repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
killertux/cwbbus | [
"a57580a72ad2c5ead7b78e9381ccf80fbe8f6e31"
] | [
"cwbbus/datareader.py"
] | [
"from os import makedirs, path\nfrom typing import Union\n\nimport pandas as pd\n\nfrom .filetype import FileType\n\n\nclass DataReader(object):\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tStores all dataframes and provides methods to feed data into the dataframes.\n\t\t\"\"\"\n\t\tself.bus_lines = pd.DataFrame(columns=['id', 'name', 'color', 'card_only', 'category'])\n\t\tself.bus_line_shapes = pd.DataFrame(columns=['id', 'bus_line_id', 'latitude', 'longitude'])\n\t\tself.bus_stops = pd.DataFrame(columns=['number', 'name', 'type', 'latitude', 'longitude'])\n\t\tself.itineraries = pd.DataFrame(columns=['id', 'bus_line_id', 'direction'])\n\t\tself.itinerary_stops = pd.DataFrame(columns=['itinerary_id', 'sequence_number', 'stop_number'])\n\t\tself.bus_lines_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'day_type',\n\t\t 'time', 'adaptive'])\n\t\tself.vehicles_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'vehicle_id',\n\t\t 'time'])\n\t\tself.itinerary_stops_extra = pd.DataFrame(columns=['itinerary_id', 'itinerary_name', 'bus_line_id',\n\t\t 'itinerary_stop_id', 'stop_name', 'stop_name_short',\n\t\t 'stop_name_abbr', 'bus_stop_id', 'sequence_number', 'type',\n\t\t 'special_stop'])\n\t\tself.itinerary_distances = pd.DataFrame(columns=['itinerary_stop_id', 'itinerary_next_stop_id', 'distance_m'])\n\t\tself.companies = pd.DataFrame(columns=['id', 'name'])\n\t\tself.itinerary_stops_companies = pd.DataFrame(columns=['itinerary_stop_id', 'company_id'])\n\t\tself.vehicle_log = pd.DataFrame(columns=['timestamp', 'vehicle_id', 'bus_line_id', 'latitude', 'longitude'])\n\t\tself.points_of_interest = pd.DataFrame(columns=['name', 'description', 'category', 'latitude', 'longitude'])\n\n\tdef feed_data(self, file: Union[bytes, str], data_type: FileType):\n\t\t\"\"\"\n\t\tFeeds data into the reader's internal dataframes.\n\t\t:param file: File which contains the data.\n\t\tIf a *bytes* object is provided, the object will be interpreted as the actual decompressed content of the file.\n\t\tAlternatively, if a *str* object is provided, the object will be interpreted as the path to a file in the user's\n\t\toperating system. Supports the same compression types supported by pandas.\n\t\t:param data_type: Type of data. See :class:`FileType` for available types\n\t\t\"\"\"\n\t\t# User provided raw binary data or file path (both are supported by pandas)\n\t\tif isinstance(file, bytes) or isinstance(file, str):\n\t\t\t# pd.read_json can take a long time. Therefore, we only read the file if the data_type parameter is valid.\n\t\t\tif data_type == FileType.LINHAS:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_linhas_json(file_data)\n\t\t\telif data_type == FileType.POIS:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_pois_json(file_data)\n\t\t\telif data_type == FileType.PONTOS_LINHA:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_pontos_linha_json(file_data)\n\t\t\telif data_type == FileType.SHAPE_LINHA:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_shape_linha_json(file_data)\n\t\t\telif data_type == FileType.TABELA_LINHA:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_tabela_linha_json(file_data)\n\t\t\telif data_type == FileType.TABELA_VEICULO:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_tabela_veiculo_json(file_data)\n\t\t\telif data_type == FileType.TRECHOS_ITINERARIOS:\n\t\t\t\tfile_data = pd.read_json(file)\n\t\t\t\tself._feed_trechos_itinerarios_json(file_data)\n\t\t\telif data_type == FileType.VEICULOS:\n\t\t\t\tfile_data = pd.read_json(file, lines=True)\n\t\t\t\tself._feed_veiculos_json(file_data)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Invalid data_type parameter\")\n\n\t\t# Unsupported type\n\t\telse:\n\t\t\traise TypeError(\"Expected bytes (file content) or str (file name)\")\n\n\tdef save_dataframe_cache(self, directory_path: str):\n\t\t\"\"\"\n\t\tDumps all data currently stored in the internal dataframes to a cache directory.\n\t\t:param directory_path: Path to the cache directory\n\t\t\"\"\"\n\t\tmakedirs(directory_path, exist_ok=True)\n\t\tself.bus_lines.to_csv(path.join(directory_path, 'bus_lines.csv.xz'), index=False)\n\t\tself.bus_line_shapes.to_csv(path.join(directory_path, 'bus_lines_shapes.csv.xz'), index=False)\n\t\tself.bus_stops.to_csv(path.join(directory_path, 'bus_stops.csv.xz'), index=False)\n\t\tself.itineraries.to_csv(path.join(directory_path, 'itineraries.csv.xz'), index=False)\n\t\tself.itinerary_stops.to_csv(path.join(directory_path, 'itinerary_stops.csv.xz'), index=False)\n\t\tself.bus_lines_schedule_tables.to_csv(path.join(directory_path, 'bus_lines_schedule_tables.csv.xz'), index=False)\n\t\tself.vehicles_schedule_tables.to_csv(path.join(directory_path, 'vehicles_schedule_tables.csv.xz'), index=False)\n\t\tself.itinerary_stops_extra.to_csv(path.join(directory_path, 'itinerary_stops_extra.csv.xz'), index=False)\n\t\tself.itinerary_distances.to_csv(path.join(directory_path, 'itinerary_distances.csv.xz'), index=False)\n\t\tself.companies.to_csv(path.join(directory_path, 'companies.csv.xz'), index=False)\n\t\tself.itinerary_stops_companies.to_csv(path.join(directory_path, 'itinerary_stops_companies.csv.xz'), index=False)\n\t\tself.points_of_interest.to_csv(path.join(directory_path, 'points_of_interest.csv.xz'), index=False)\n\t\tself.vehicle_log.to_csv(path.join(directory_path, 'vehicle_log.csv.xz'), index=False)\n\n\tdef from_dataframe_cache(self, directory_path: str):\n\t\t\"\"\"\n\t\tLoads all data currently stored in the specified cache directory into the internal dataframes.\n\t\t:param directory_path: Path to the cache directory\n\t\t\"\"\"\n\t\tself.bus_lines = pd.read_csv(path.join(directory_path, 'bus_lines.csv.xz'))\n\t\tself.bus_line_shapes = pd.read_csv(path.join(directory_path, 'bus_lines_shapes.csv.xz'), dtype={'bus_line_id': str})\n\t\tself.bus_stops = pd.read_csv(path.join(directory_path, 'bus_stops.csv.xz'))\n\t\tself.itineraries = pd.read_csv(path.join(directory_path, 'itineraries.csv.xz'))\n\t\tself.itinerary_stops = pd.read_csv(path.join(directory_path, 'itinerary_stops.csv.xz'))\n\t\tself.bus_lines_schedule_tables = pd.read_csv(path.join(directory_path, 'bus_lines_schedule_tables.csv.xz'))\n\t\tself.vehicles_schedule_tables = pd.read_csv(path.join(directory_path, 'vehicles_schedule_tables.csv.xz'))\n\t\tself.itinerary_stops_extra = pd.read_csv(path.join(directory_path, 'itinerary_stops_extra.csv.xz'))\n\t\tself.itinerary_distances = pd.read_csv(path.join(directory_path, 'itinerary_distances.csv.xz'))\n\t\tself.companies = pd.read_csv(path.join(directory_path, 'companies.csv.xz'))\n\t\tself.itinerary_stops_companies = pd.read_csv(path.join(directory_path, 'itinerary_stops_companies.csv.xz'))\n\t\tself.vehicle_log = pd.read_csv(path.join(directory_path, 'vehicle_log.csv.xz'))\n\t\tself.points_of_interest = pd.read_csv(path.join(directory_path, 'points_of_interest.csv.xz'))\n\n\tdef _feed_linhas_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the bus_lines dataframe.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\tbus_line_data = file_data[['COD', 'NOME', 'NOME_COR', 'SOMENTE_CARTAO', 'CATEGORIA_SERVICO']].copy()\n\n\t\tbus_line_data.rename(columns={\n\t\t\t'COD': 'id',\n\t\t\t'NOME': 'name',\n\t\t\t'NOME_COR': 'color',\n\t\t\t'SOMENTE_CARTAO': 'card_only',\n\t\t\t\"CATEGORIA_SERVICO\": 'category'\n\t\t}, inplace=True)\n\n\t\tself.bus_lines = self.bus_lines.merge(bus_line_data, how='outer')\n\n\tdef _feed_pois_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the points_of_interest dataframe.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\tpoi_data = file_data[['POI_NAME', 'POI_DESC', 'POI_CATEGORY_NAME', 'POI_LAT', 'POI_LON']].copy()\n\n\t\tpoi_data.rename(columns={\n\t\t\t'POI_NAME': 'name',\n\t\t\t'POI_DESC': 'description',\n\t\t\t'POI_CATEGORY_NAME': 'category',\n\t\t\t'POI_LAT': 'latitude',\n\t\t\t'POI_LON': 'longitude'\n\t\t}, inplace=True)\n\n\t\tself.points_of_interest = self.points_of_interest.merge(poi_data, how='outer')\n\n\tdef _feed_pontos_linha_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the bus_stops, itineraries and itinerary_stops dataframes.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\tbus_stop_data = file_data[['NUM', 'NOME', 'TIPO', 'LAT', 'LON']].copy()\n\t\titinerary_data = file_data[['ITINERARY_ID', 'COD', 'SENTIDO']].copy()\n\t\titinerary_stops_data = file_data[['ITINERARY_ID', 'SEQ', 'NUM']].copy()\n\n\t\tbus_stop_data.rename(columns={\n\t\t\t'NUM': 'number',\n\t\t\t'NOME': 'name',\n\t\t\t'TIPO': 'type',\n\t\t\t'LAT': 'latitude',\n\t\t\t'LON': 'longitude'\n\t\t}, inplace=True)\n\t\tbus_stop_data.drop_duplicates(inplace=True)\n\n\t\titinerary_data.rename(columns={\n\t\t\t'ITINERARY_ID': 'id',\n\t\t\t'COD': 'bus_line_id',\n\t\t\t'SENTIDO': 'direction'\n\t\t}, inplace=True)\n\t\titinerary_data.drop_duplicates(inplace=True)\n\n\t\titinerary_stops_data.rename(columns={\n\t\t\t'ITINERARY_ID': 'itinerary_id',\n\t\t\t'SEQ': 'sequence_number',\n\t\t\t'NUM': 'stop_number'\n\t\t}, inplace=True)\n\t\titinerary_stops_data.drop_duplicates(inplace=True)\n\n\t\tself.bus_stops = self.bus_stops.merge(bus_stop_data, how='outer')\n\t\tself.itineraries = self.itineraries.merge(itinerary_data, how='outer')\n\t\tself.itinerary_stops = self.itinerary_stops.merge(itinerary_stops_data, how='outer')\n\n\tdef _feed_shape_linha_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the bus_line_shapes dataframe.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\tbus_line_shape_data = file_data[['SHP', 'COD', 'LAT', 'LON']].copy()\n\n\t\tbus_line_shape_data.rename(columns={\n\t\t\t'SHP': 'id',\n\t\t\t'COD': 'bus_line_id',\n\t\t\t'LAT': 'latitude',\n\t\t\t'LON': 'longitude'\n\t\t}, inplace=True)\n\n\t\tself.bus_line_shapes = bus_line_shape_data\n\n\tdef _feed_tabela_linha_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the bus_lines_schedule_tables dataframe.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\tschedule_table_data = file_data[['TABELA', 'COD', 'NUM', 'DIA', 'HORA', 'ADAPT']].copy()\n\n\t\tschedule_table_data.rename(columns={\n\t\t\t'TABELA': 'table_id',\n\t\t\t'COD': 'bus_line_id',\n\t\t\t'NUM': 'bus_stop_id',\n\t\t\t'DIA': 'day_type',\n\t\t\t'HORA': 'time',\n\t\t\t'ADAPT': 'adaptive'\n\t\t}, inplace=True)\n\t\tschedule_table_data.replace({'day_type': {\n\t\t\t1: 'weekday',\n\t\t\t2: 'saturday',\n\t\t\t3: 'sunday',\n\t\t\t4: 'holiday'\n\t\t}}, inplace=True)\n\t\t# TODO: Add file date to the data?\n\n\t\tself.bus_lines_schedule_tables = self.bus_lines_schedule_tables.merge(schedule_table_data, how='outer')\n\n\tdef _feed_tabela_veiculo_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the vehicles_schedule_tables dataframe.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\tschedule_table_data = file_data[['TABELA', 'COD_LINHA', 'COD_PONTO', 'HORARIO', 'VEICULO']].copy()\n\n\t\tschedule_table_data.rename(columns={\n\t\t\t'TABELA': 'table_id',\n\t\t\t'COD_LINHA': 'bus_line_id',\n\t\t\t'COD_PONTO': 'bus_stop_id',\n\t\t\t'HORARIO': 'time',\n\t\t\t'VEICULO': 'vehicle_id'\n\t\t}, inplace=True)\n\t\tschedule_table_data['bus_line_id'] = schedule_table_data['bus_line_id'].astype(str)\n\t\t# TODO: Add file date to the data?\n\n\t\tself.vehicles_schedule_tables = self.vehicles_schedule_tables.merge(schedule_table_data, how='outer')\n\n\tdef _feed_trechos_itinerarios_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tMerges the data provided into the itinerary_stops_extra, itinerary_distances, companies and\n\t\titinerary_stops_companies dataframes.\n\t\t:param file_data: Dataframe to merge.\n\t\t\"\"\"\n\t\titinerary_stops_data = file_data[['COD_ITINERARIO', 'NOME_ITINERARIO', 'COD_LINHA', 'CODIGO_URBS', 'STOP_NAME',\n\t\t 'NOME_PTO_PARADA_TH', 'NOME_PTO_ABREVIADO', 'STOP_CODE', 'SEQ_PONTO_TRECHO_A',\n\t\t 'TIPO_TRECHO', 'PTO_ESPECIAL']].copy()\n\t\titinerary_distances_data = file_data[['CODIGO_URBS', 'COD_PTO_TRECHO_B', 'EXTENSAO_TRECHO_A_ATE_B']].copy()\n\t\tcompany_data = file_data[['COD_EMPRESA', 'NOME_EMPRESA']].copy()\n\t\titinerary_stops_company_data = file_data[['CODIGO_URBS', 'COD_EMPRESA']].copy()\n\n\t\titinerary_stops_data.rename(columns={\n\t\t\t'COD_ITINERARIO': 'itinerary_id',\n\t\t\t'NOME_ITINERARIO': 'itinerary_name',\n\t\t\t'COD_LINHA': 'bus_line_id',\n\t\t\t'CODIGO_URBS': 'itinerary_stop_id',\n\t\t\t'STOP_NAME': 'stop_name',\n\t\t\t'NOME_PTO_PARADA_TH': 'stop_name_short',\n\t\t\t'NOME_PTO_ABREVIADO': 'stop_name_abbr',\n\t\t\t'STOP_CODE': 'bus_stop_id',\n\t\t\t'SEQ_PONTO_TRECHO_A': 'sequence_number',\n\t\t\t'TIPO_TRECHO': 'type',\n\t\t\t'PTO_ESPECIAL': 'special_stop'\n\t\t}, inplace=True)\n\t\titinerary_stops_data.drop_duplicates(inplace=True)\n\n\t\titinerary_distances_data.rename(columns={\n\t\t\t'CODIGO_URBS': 'itinerary_stop_id',\n\t\t\t'COD_PTO_TRECHO_B': 'itinerary_next_stop_id',\n\t\t\t'EXTENSAO_TRECHO_A_ATE_B': 'distance_m'\n\t\t}, inplace=True)\n\t\titinerary_distances_data.drop_duplicates(inplace=True)\n\n\t\tcompany_data.rename(columns={\n\t\t\t'COD_EMPRESA': 'id',\n\t\t\t'NOME_EMPRESA': 'name'\n\t\t}, inplace=True)\n\t\tcompany_data.drop_duplicates(inplace=True)\n\n\t\titinerary_stops_company_data.rename(columns={\n\t\t\t'CODIGO_URBS': 'itinerary_stop_id',\n\t\t\t'COD_EMPRESA': 'company_id'\n\t\t}, inplace=True)\n\t\titinerary_stops_company_data.drop_duplicates(inplace=True)\n\n\t\tself.itinerary_stops_extra = self.itinerary_stops_extra.merge(itinerary_stops_data, how='outer')\n\t\tself.itinerary_distances = self.itinerary_distances.merge(itinerary_distances_data, how='outer')\n\t\tself.companies = self.companies.merge(company_data, how='outer')\n\t\tself.itinerary_stops_companies = self.itinerary_stops_companies.merge(itinerary_stops_company_data, how='outer')\n\n\tdef _feed_veiculos_json(self, file_data: pd.DataFrame):\n\t\t\"\"\"\n\t\tSets the data provided as the vehicle_log dataframe.\n\t\t:param file_data: Dataframe to set.\n\t\t\"\"\"\n\t\tvehicle_log_data = file_data\n\t\tvehicle_log_data.rename(columns={\n\t\t\t'DTHR': 'timestamp',\n\t\t\t'VEIC': 'vehicle_id',\n\t\t\t'COD_LINHA': 'bus_line_id',\n\t\t\t'LAT': 'latitude',\n\t\t\t'LON': 'longitude'\n\t\t}, inplace=True)\n\n\t\tvehicle_log_data['timestamp'] = pd.to_datetime(vehicle_log_data['timestamp'], format='%d/%m/%Y %H:%M:%S')\n\n\t\t# FIXME: these datasets are too large. How to deal with concatenation?\n\t\t# self.vehicle_log = pd.concat([self.vehicle_log, vehicle_log_data], sort=False)\n\t\tself.vehicle_log = vehicle_log_data\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.read_json"
]
] |
HarvsG/rightmove_floorscraper | [
"85033bcda1878e44cb648ab742fac6f7e66cd62e"
] | [
"package/rightmove_floorscraper/__init__.py"
] | [
"#!/usr/bin/env python3\n\n# Dependencies\nfrom lxml import html, etree\nimport requests\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nclass _GetDataFromURL(object):\n \"\"\"This \"private\" class does all the heavy lifting of fetching data from the\n URL provided, and then returns data to the main `rightmove_data` class\n instance. The reason for this is so that all the validation and web-scraping\n is done when an instance is created, and afterwards the data is accessible\n quickly via methods on the `rightmove_data` instance.\"\"\"\n\n def __init__(self, url):\n \"\"\"Initialize an instance of the scraper by passing a URL from the\n results of a property search on www.rightmove.co.uk.\"\"\"\n self.url = url\n self.first_page = self.make_request(self.url)\n self.validate_url()\n self.get_results = self.__get_results\n\n def validate_url(self):\n \"\"\"Basic validation that the URL at least starts in the right format and\n returns status code 200.\"\"\"\n real_url = \"{}://www.rightmove.co.uk/{}/find.html?\"\n protocols = [\"http\", \"https\"]\n types = [\"property-to-rent\", \"property-for-sale\", \"new-homes-for-sale\"]\n left_urls = [real_url.format(p, t) for p in protocols for t in types]\n conditions = [self.url.startswith(u) for u in left_urls]\n conditions.append(self.first_page[1] == 200)\n if not any(conditions):\n raise ValueError(\"Invalid rightmove URL:\\n\\n\\t{}\".format(self.url))\n\n @property\n def rent_or_sale(self):\n \"\"\"Tag to determine if the search is for properties for rent or sale.\n Required beacuse the Xpaths are different for the target elements.\"\"\"\n if \"/property-for-sale/\" in self.url \\\n or \"/new-homes-for-sale/\" in self.url:\n return \"sale\"\n elif \"/property-to-rent/\" in self.url:\n return \"rent\"\n else:\n raise ValueError(\"Invalid rightmove URL:\\n\\n\\t{}\".format(self.url))\n\n @property\n def results_count(self):\n \"\"\"Returns an integer of the total number of listings as displayed on\n the first page of results. Note that not all listings are available to\n scrape because rightmove limits the number of accessible pages.\"\"\"\n tree = html.fromstring(self.first_page[0])\n xpath = \"\"\"//span[@class=\"searchHeader-resultCount\"]/text()\"\"\"\n try:\n return int(tree.xpath(xpath)[0].replace(\",\", \"\"))\n except:\n print('error extracting the result count header')\n return 1050\n\n @property\n def page_count(self):\n \"\"\"Returns the number of result pages returned by the search URL. There\n are 24 results per page. Note that the website limits results to a\n maximum of 42 accessible pages.\"\"\"\n page_count = self.results_count // 24\n if self.results_count % 24 > 0: page_count += 1\n # Rightmove will return a maximum of 42 results pages, hence:\n if page_count > 42: page_count = 42\n return page_count\n\n @staticmethod\n def make_request(url):\n r = requests.get(url)\n # Minimise the amount returned to reduce overheads:\n return r.content, r.status_code\n\n def get_page(self, request_content):\n \"\"\"Method to scrape data from a single page of search results. Used\n iteratively by the `get_results` method to scrape data from every page\n returned by the search.\"\"\"\n # Process the html:\n tree = html.fromstring(request_content)\n\n # Set xpath for price:\n if self.rent_or_sale == \"rent\":\n xp_prices = \"\"\"//span[@class=\"propertyCard-priceValue\"]/text()\"\"\"\n elif self.rent_or_sale == \"sale\":\n xp_prices = \"\"\"//div[@class=\"propertyCard-priceValue\"]/text()\"\"\"\n\n # Set xpaths for listing title, property address, URL, and agent URL:\n xp_titles = \"\"\"//div[@class=\"propertyCard-details\"]\\\n //a[@class=\"propertyCard-link\"]\\\n //h2[@class=\"propertyCard-title\"]/text()\"\"\"\n xp_addresses = \"\"\"//address[@class=\"propertyCard-address\"]//span/text()\"\"\"\n xp_weblinks = \"\"\"//div[@class=\"propertyCard-details\"]\\\n //a[@class=\"propertyCard-link\"]/@href\"\"\"\n xp_agent_urls = \"\"\"//div[@class=\"propertyCard-contactsItem\"]\\\n //div[@class=\"propertyCard-branchLogo\"]\\\n //a[@class=\"propertyCard-branchLogo-link\"]/@href\"\"\"\n \n\n # Create data lists from xpaths:\n price_pcm = tree.xpath(xp_prices)\n titles = tree.xpath(xp_titles)\n addresses = tree.xpath(xp_addresses)\n base = \"http://www.rightmove.co.uk\"\n weblinks = [\"{}{}\".format(base, tree.xpath(xp_weblinks)[w]) \\\n for w in range(len(tree.xpath(xp_weblinks)))]\n agent_urls = [\"{}{}\".format(base, tree.xpath(xp_agent_urls)[a]) \\\n for a in range(len(tree.xpath(xp_agent_urls)))]\n \n #get floorplan from property urls\n floorplan_urls = []\n for weblink in weblinks:\n rc = self.make_request(weblink)\n tree = html.fromstring(rc[0])\n \n xp_floorplan_url = \"\"\"//*[@id=\"floorplanTabs\"]/div[2]/div[2]/img/@src\"\"\"\n floorplan_url = tree.xpath(xp_floorplan_url)\n if floorplan_url == []:\n floorplan_urls.append(np.nan)\n else:\n floorplan_urls.append(floorplan_url[0])\n \n # Store the data in a Pandas DataFrame:\n data = [price_pcm, titles, addresses, weblinks, agent_urls, floorplan_urls]\n temp_df = pd.DataFrame(data)\n temp_df = temp_df.transpose()\n temp_df.columns = [\"price\", \"type\", \"address\", \"url\", \"agent_url\", \"floorplan_url\"]\n \n # Drop empty rows which come from placeholders in the html:\n temp_df = temp_df[temp_df[\"address\"].notnull()]\n return temp_df\n\n @property\n def __get_results(self):\n \"\"\"Pandas DataFrame with all results returned by the search.\"\"\"\n # Create DataFrame of the first page (which has already been requested):\n results = self.get_page(self.first_page[0])\n\n # Iterate through the rest of the pages scraping results:\n if self.page_count > 1:\n for p in range(1, self.page_count + 1, 1):\n\n # Create the URL of the specific results page:\n p_url = \"{}&index={}\".format(str(self.url), str((p * 24)))\n\n # Make the request:\n rc = self.make_request(p_url)\n\n # Requests to scrape lots of pages eventually get status 400, so:\n if rc[1] != 200: break\n\n # Create a temporary dataframe of page results:\n temp_df = self.get_page(rc[0])\n\n # Concatenate the temporary dataframe with the full dataframe:\n frames = [results, temp_df]\n results = pd.concat(frames)\n\n # Reset the index:\n results.reset_index(inplace=True, drop=True)\n\n # Convert price column to numeric type:\n results[\"price\"].replace(regex=True, inplace=True, to_replace=r\"\\D\", value=r\"\")\n results[\"price\"] = pd.to_numeric(results[\"price\"])\n\n # Extract postcodes to a separate column:\n pat = r\"\\b([A-Za-z][A-Za-z]?[0-9][0-9]?[A-Za-z]?)\\b\"\n results[\"postcode\"] = results[\"address\"].astype(str).str.extract(pat, expand=True)\n\n # Extract number of bedrooms from \"type\" to a separate column:\n pat = r\"\\b([\\d][\\d]?)\\b\"\n results[\"number_bedrooms\"] = results.type.astype(str).str.extract(pat, expand=True)\n results.loc[results[\"type\"].astype(str).str.contains(\"studio\", case=False), \"number_bedrooms\"] = 0\n\n # Clean up annoying white spaces and newlines in \"type\" column:\n for row in range(len(results)):\n type_str = results.loc[row, \"type\"]\n clean_str = type_str.strip(\"\\n\").strip()\n results.loc[row, \"type\"] = clean_str\n\n # Add column with datetime when the search was run (i.e. now):\n now = dt.datetime.today()\n results[\"search_date\"] = now\n\n return results\n\nclass rightmove_data(object):\n \"\"\"The `rightmove_data` web scraper collects structured data on properties\n returned by a search performed on www.rightmove.co.uk\n\n An instance of the class created with a rightmove URL provides attributes to\n easily access data from the search results, the most useful being\n `get_results`, which returns all results as a Pandas DataFrame object.\n \"\"\"\n def __init__(self, url):\n \"\"\"Initialize the scraper with a URL from the results of a property\n search performed on www.rightmove.co.uk\"\"\"\n self.__request_object = _GetDataFromURL(url)\n self.__url = url\n\n @property\n def url(self):\n return self.__url\n\n @property\n def get_results(self):\n \"\"\"Pandas DataFrame of all results returned by the search.\"\"\"\n return self.__request_object.get_results\n\n @property\n def results_count(self):\n \"\"\"Total number of results returned by `get_results`. Note that the\n rightmove website may state a much higher number of results; this is\n because they artificially restrict the number of results pages that can\n be accessed to 42.\"\"\"\n return len(self.get_results)\n\n @property\n def average_price(self):\n \"\"\"Average price of all results returned by `get_results` (ignoring\n results which don't list a price).\"\"\"\n total = self.get_results[\"price\"].dropna().sum()\n return int(total / self.results_count)\n\n def summary(self, by=\"number_bedrooms\"):\n \"\"\"Pandas DataFrame summarising the the results by mean price and count.\n By default grouped by the `number_bedrooms` column but will accept any\n column name from `get_results` as a grouper.\"\"\"\n df = self.get_results.dropna(axis=0, subset=[\"price\"])\n groupers = {\"price\":[\"count\", \"mean\"]}\n df = df.groupby(df[by]).agg(groupers).astype(int)\n df.columns = df.columns.get_level_values(1)\n df.reset_index(inplace=True)\n if \"number_bedrooms\" in df.columns:\n df[\"number_bedrooms\"] = df[\"number_bedrooms\"].astype(int)\n df.sort_values(by=[\"number_bedrooms\"], inplace=True)\n else:\n df.sort_values(by=[\"count\"], inplace=True, ascending=False)\n return df.reset_index(drop=True)\n"
] | [
[
"pandas.to_numeric",
"pandas.DataFrame",
"pandas.concat"
]
] |
sahiljain11/ICML2019-TREX | [
"82694f2dfe6e3bb0668948ffc531fcde20cdf45b"
] | [
"audio_atari/baselines/baselines/common/custom_reward_wrapper.py"
] | [
"import gym\nimport numpy as np\nfrom baselines.common.vec_env import VecEnvWrapper\nfrom baselines.common.running_mean_std import RunningMeanStd\nfrom baselines.common.trex_utils import preprocess\n#import matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass AtariNet(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv1 = nn.Conv2d(4, 16, 7, stride=3)\n self.conv2 = nn.Conv2d(16, 16, 5, stride=2)\n self.conv3 = nn.Conv2d(16, 16, 3, stride=1)\n self.conv4 = nn.Conv2d(16, 16, 3, stride=1)\n self.fc1 = nn.Linear(784, 64)\n self.fc2 = nn.Linear(64, 1)\n\n\n def forward(self, traj):\n '''calculate cumulative return of trajectory'''\n x = traj.permute(0,3,1,2) #get into NCHW format\n #compute forward pass of reward network\n x = F.leaky_relu(self.conv1(x))\n x = F.leaky_relu(self.conv2(x))\n x = F.leaky_relu(self.conv3(x))\n x = F.leaky_relu(self.conv4(x))\n x = x.reshape(-1, 784)\n x = F.leaky_relu(self.fc1(x))\n r = torch.sigmoid(self.fc2(x))\n return r\n\nclass VecPyTorchAtariReward(VecEnvWrapper):\n def __init__(self, venv, reward_net_path, env_name):\n VecEnvWrapper.__init__(self, venv)\n self.reward_net = AtariNet()\n self.reward_net.load_state_dict(torch.load(reward_net_path))\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.reward_net.to(self.device)\n\n self.rew_rms = RunningMeanStd(shape=())\n self.epsilon = 1e-8\n self.cliprew = 10.\n self.env_name = env_name\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n \n #mask and normalize for input to network\n normed_obs = preprocess(obs, self.env_name)\n \n with torch.no_grad():\n rews_network = self.reward_net.forward(torch.from_numpy(np.array(normed_obs)).float().to(self.device)).cpu().numpy().squeeze()\n\n return obs, rews_network, news, infos\n\n def reset(self, **kwargs):\n obs = self.venv.reset()\n\n \n return obs\n\n\nif __name__ == \"__main__\":\n pass\n"
] | [
[
"torch.nn.Linear",
"torch.load",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"numpy.array"
]
] |
yf225/jax-alpa | [
"22219884fdec81e8325cfe0de0cc0c627291f0a1"
] | [
"jax/experimental/jax2tf/jax2tf.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Experimental module transforms JAX functions to be executed by TensorFlow.\"\"\"\nfrom functools import partial\nimport contextlib\nimport os\nimport re\nimport threading\nfrom typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport jax\nfrom jax import lax\nfrom jax import config\nfrom jax import core, custom_derivatives\nfrom jax import linear_util as lu\nfrom jax import random, tree_util\nfrom jax import numpy as jnp\nfrom jax.experimental import maps\nfrom jax.experimental import pjit\nfrom jax.interpreters import ad\nfrom jax.interpreters import partial_eval\nfrom jax.interpreters import pxla\nfrom jax.interpreters import xla\n\nimport jax._src.prng\nimport jax._src.random\nfrom jax._src import ad_checkpoint\nfrom jax._src import ad_util\nfrom jax._src import api\nfrom jax._src import api_util\nfrom jax._src import dispatch\nfrom jax._src import dtypes\nfrom jax._src import source_info_util\nfrom jax._src import util\nfrom jax._src.lax import control_flow as lax_control_flow\nfrom jax._src.lax import lax as lax_internal\nfrom jax._src.lax import linalg as lax_linalg\nfrom jax._src.lax import slicing as lax_slicing\nfrom jax._src.lax import windowed_reductions as lax_windowed_reductions\nfrom jax._src.lib import xla_client\n\nfrom jax.experimental.jax2tf import shape_poly\nfrom jax.experimental.jax2tf import impl_no_xla\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\n# These don't have public equivalents.\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]\nfrom tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]\nfrom tensorflow.core.framework import attr_value_pb2 # type: ignore[import]\nfrom tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]\nfrom tensorflow.python.framework import ops as tf_ops # type: ignore[import]\n# pylint: enable=g-direct-tensorflow-import\n\nPolyShape = shape_poly.PolyShape\n\n# A temporary internal flag, to enable the wrapping of jax.jit functions\n# with tf.function(jit_compile=True). See #7389. This change has triggered a\n# number of failures in TF. We keep this until we are confident that it does\n# not create problems.\n# TODO(b/207464757): figure out why this change breaks test\n_WRAP_JAX_JIT_WITH_TF_FUNCTION = False\n\n# The scope name need to be a valid TensorFlow name. See\n# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731\n_VALID_SCOPE_REGEX = re.compile(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\/>-]*$\")\n_INVALID_SCOPE_CHAR = re.compile(\"[^A-Za-z0-9_.\\\\/>-]\")\n\nmap = util.safe_map\nzip = util.safe_zip\n\n\ndef _sanitize_scope_name(name):\n scope_name = _INVALID_SCOPE_CHAR.sub(\"_\", name)\n if not _VALID_SCOPE_REGEX.match(scope_name):\n scope_name = \".{}\".format(scope_name)\n return scope_name\n\n\n# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,\n# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)\nTfVal = Any\nDType = Any\nPrecisionType = int # Enum xla_data.PrecisionConfig.Precision\n\ndef _is_tfval(v: TfVal) -> bool:\n if isinstance(v, (tf.Tensor, tf.Variable)):\n return True\n try:\n # Include all convertible types, even if not supported on accelerators.\n with tf.device(\"CPU\"):\n tf.constant(v)\n return True\n except:\n return False\n\n\n# The implementation rules for primitives. The rule will be called with the\n# arguments (TfVal) and must return TfVal (or a sequence thereof,\n# if primitive.multiple_results). The vast majority of primitives do not need\n# to worry about core.unit inputs or results. The exception are primarily the\n# control-flow primitives.\ntf_impl: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# Some primitive implementation rules need the abstract values of arguments\n# and the results. This is the case for the primitives implemented using\n# _convert_jax_impl and those that need to adjust the shape of the outputs\n# due to missing TF shape inference rules for TFXLA ops. The rules for these\n# primitives should be added to `tf_impl_with_avals`.\n# The abstract value are passed to the implementation as two special kwargs\n# `_in_avals` (a tuple of core.ShapedArray) and `_out_aval` (a\n# core.ShapedArray, or a tuple thereof when primitive.multiple_results).\ntf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}\n\n# XLA is not linked in all environments when converting a primitive. If this is\n# the case, we first search for implementation rules for primitives in the\n# following map. These implementations are workarounds, making use of TF ops\n# that do work when XLA is not linked in.\ntf_impl_no_xla = impl_no_xla.tf_impl_no_xla\n\n# In order to ensure that JAX picks up the proper user-frame for source\n# locations we will register the TensorFlow source path as an internal\n# path with source_info_util. The typical stack when a JAX primitive\n# conversion happens is:\n# jax2tf.process_primitive (top of stack)\n# jax tracing machinery ...\n# tf.custom_gradient machinery ...\n# jax2tf.converted_fun\n# tf function machinery ...\n# user code invokes the converted function on TF tensors\n#\n# We need to skip over not only JAX internal frames, but TF internal frames\n# also.\n# We register the TensorFlow source path lazily\n_has_registered_tf_source_path = False\n\nclass _ThreadLocalState(threading.local):\n def __init__(self):\n self.name_stack = \"\"\n # XLA is not linked in all environments; when converting a primitive, if this\n # variable is disabled, we try harder to use only standard TF ops if they are\n # applicable to the concrete use case; if the resulting conversion path ends up\n # requiring a TFXLA operation, an exception is thrown instead.\n self.enable_xla = True\n\n # Keep track if we are inside a call_tf. In that context we disable the\n # safety check that we are not inside JAX transformations.\n self.inside_call_tf = False\n\n # Maps dimension variables to TF expressions\n self.shape_env: Sequence[Tuple[str, TfVal]] = ()\n\n # Whether to actually include XLA op metadata in the generated TF ops\n # TODO(b/189306134): implement support for XLA metadata\n self.include_xla_op_metadata = False\n\n # A cache for the tf.convert_to_tensor for constants. We try to preserve\n # sharing for constants, to enable tf.Graph to take advantage of it.\n # See https://github.com/google/jax/issues/7992.\n self.constant_cache = None # None means that we don't use a cache. We\n # may be outside a conversion scope.\n\n\n_thread_local_state = _ThreadLocalState()\n\ndef _get_current_name_stack():\n return _thread_local_state.name_stack\n\[email protected]\ndef inside_call_tf():\n # Set the inside_call_tf flag for a context.\n prev = _thread_local_state.inside_call_tf\n _thread_local_state.inside_call_tf = True\n try:\n yield\n finally:\n _thread_local_state.inside_call_tf = prev\n\n@partial(api_util.api_hook, tag=\"jax2tf_convert\")\ndef convert(fun: Callable,\n *,\n polymorphic_shapes=None,\n with_gradient=True,\n enable_xla=True\n ) -> Callable:\n \"\"\"Transforms `fun` to be executed by TensorFlow.\n\n See\n [README](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md)\n for more details about usage and common problems.\n\n Args:\n fun: Function to be transformed. Its arguments and return value should be\n JAX arrays, or nested standard Python containers (tuple/list/dict) thereof\n (pytrees).\n polymorphic_shapes: Specifies input shapes to be treated polymorphically\n during conversion.\n\n .. warning:: The shape-polymorphic conversion is an experimental feature.\n It is meant to be sound, but it is known to reject some JAX programs\n that are shape polymorphic. The details of this feature can change.\n\n It should be `None` (all arguments are monomorphic), a single PolyShape\n or string (applies to all arguments), or a tuple/list of the same length\n as the function arguments. For each argument the shape specification\n should be `None` (monomorphic argument), or a Python object with the\n same pytree structure as the argument.\n See [how optional parameters are matched to\n arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).\n\n A shape specification for an array argument should be an object\n `PolyShape(dim0, dim1, ..., dimn)`\n where each `dim` is a dimension specification: a positive integer denoting\n a monomorphic dimension of the given size, or a string denoting a\n dimension variable assumed to range over non-zero dimension sizes, or\n the special placeholder string \"_\" denoting a monomorphic dimension\n whose size is given by the actual argument. As a shortcut, an Ellipsis\n suffix in the list of dimension specifications stands for a list of \"_\"\n placeholders.\n\n For convenience, a shape specification can also be given as a string\n representation, e.g.: \"batch, ...\", \"batch, height, width, _\", possibly\n with surrounding parentheses: \"(batch, ...)\".\n\n The conversion fails if it cannot ensure that the it would produce the same\n sequence of TF ops for any non-zero values of the dimension variables.\n\n polymorphic_shapes are only supported for positional arguments; shape\n polymorphism is not supported for keyword arguments.\n\n See [the README](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)\n for more details.\n\n in_shapes: DEPRECATED in favor of `polymorphic_shapes`.\n with_gradient: if set (default), add a tf.custom_gradient to the converted\n function, by converting the ``jax.vjp(fun)``. This means that reverse-mode\n TensorFlow AD is supported for the output TensorFlow function, and the\n value of the gradient will be JAX-accurate.\n enable_xla: if set (default), the converter will use the simplest conversion\n and use XLA TF ops when necessary. These ops are known to create issues\n for the TFLite and TFjs converters. For those cases, unset this parameter\n so the converter tries harder to use non-XLA TF ops to convert the\n function and aborts if this is not possible.\n\n Returns:\n A version of `fun` that expects TfVals as arguments (or\n tuple/lists/dicts) thereof, and returns TfVals as outputs, and uses\n only TensorFlow ops.\n \"\"\"\n api._check_callable(fun)\n fun_name = getattr(fun, \"__name__\", \"unknown\")\n name_stack = util.wrap_name(fun_name, \"jax2tf\") + \"/\"\n def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:\n # TODO: is there a better way to check if we are inside a transformation?\n if not core.trace_state_clean() and not _thread_local_state.inside_call_tf:\n # It is Ok to nest convert when we are inside a call_tf\n raise ValueError(\"convert must be used outside all JAX transformations.\" +\n f\"Trace state: {core.thread_local_state.trace_state.trace_stack}\")\n\n # We support kwargs by wrapping the function to take only positional arguments.\n # This is in part because jax.vjp does not support kwargs.\n nr_positional_args = len(args)\n kw_names = kwargs.keys()\n args = tuple(args) + tuple(kwargs[kw] for kw in kw_names)\n\n def fun_no_kwargs(*args_and_kwargs):\n assert len(args_and_kwargs) == nr_positional_args + len(kw_names)\n args = args_and_kwargs[:nr_positional_args]\n kwargs = {kw: args_and_kwargs[nr_positional_args + i]\n for i, kw in enumerate(kw_names)}\n return fun(*args, **kwargs)\n\n def check_arg(a):\n if not _is_tfval(a):\n msg = (f\"Argument {a} of type {type(a)} of jax2tf.convert(f) should \"\n \"be NumPy array, scalar, tf.Variable, or tf.Tensor\")\n raise TypeError(msg)\n\n tree_util.tree_map(check_arg, args)\n\n args_flat, in_tree = tree_util.tree_flatten((args, {}))\n # May need to cast the arguments to have the type assumed by JAX\n args_and_dtypes_flat = tuple(map(_tfval_to_tensor_jax_dtype, args_flat))\n args_flat, arg_dtypes_flat = util.unzip2(args_and_dtypes_flat)\n # Name input tensors; do this after we have cast the arguments\n def _apply_name(a: TfVal, suffix) -> TfVal:\n return tf.identity(a, f\"jax2tf_arg_{suffix}\")\n args_flat = tuple(_apply_name(a, i) for i, a in enumerate(args_flat))\n\n if polymorphic_shapes is None:\n polymorphic_shapes_ = (polymorphic_shapes,) * len(args)\n elif isinstance(polymorphic_shapes, (PolyShape, str)):\n polymorphic_shapes_ = (polymorphic_shapes,) * len(args) # type: ignore\n else:\n if not isinstance(polymorphic_shapes, Sequence) or len(polymorphic_shapes) != len(args) - len(kw_names):\n msg = (\"polymorphic_shapes must be a sequence with the same length as the positional argument list \"\n f\"({len(args)}). Got polymorphic_shapes={repr(polymorphic_shapes)}.\")\n raise TypeError(msg)\n polymorphic_shapes_ = tuple(polymorphic_shapes) + (None,) * len(kw_names)\n\n # Expand the polymorphic_shapes to match the argument pytree\n polymorphic_shapes_flat = tuple(api_util.flatten_axes(\"jax2tf.convert polymorphic_shapes\",\n in_tree.children()[0],\n polymorphic_shapes_))\n\n def fix_tf1_shape(arg: TfVal) -> Sequence[Optional[int]]:\n tf_arg_shape = np.shape(arg)\n return tuple(d.value if isinstance(d, tf.compat.v1.Dimension) else d for d in tf_arg_shape)\n args_shapes_flat = tuple(fix_tf1_shape(a) for a in args_flat)\n\n # Construct the abstract values for the flat arguments, possibly based on\n # the input shapes and the polymorphic_shapes if given. May create new shape\n # variables. May cast the args_flat to JAX types, using JAX's interpretation\n # of types of constants.\n args_avals_flat = shape_poly.args_avals(\n args_shapes_flat, arg_dtypes_flat, polymorphic_shapes_flat)\n\n dim_vars, get_dim_values = shape_poly.prepare_dim_var_env(args_avals_flat)\n dim_values, _ = util.unzip2(_interpret_fun(lu.wrap_init(get_dim_values),\n args_flat, args_avals_flat, \"\"))\n shape_env = zip(dim_vars, dim_values)\n\n # This function may take pytrees of TfVals. We can only set\n # tf.custom_gradient on functions that take a flat argument list.\n f = lu.wrap_init(fun_no_kwargs)\n # out_tree_thunk() will be the output tree, after running _interpret_fun.\n flat_fun, out_tree_thunk = api_util.flatten_fun(f, in_tree)\n # out_tree_thunk will be ready after _interpret_fun below.\n\n # Prepare the grad_fn for tf.custom_gradient.\n def converted_grad_fn(*out_cts_flat: TfVal,\n _out_cts_avals: Sequence[core.ShapedArray],\n variables=None):\n if variables:\n raise ValueError(\n \"Unexpected variables used in forward pass. \"\n \"This should not happen for first-order differentiation. \"\n f\"variables={variables}\")\n\n out_tree = out_tree_thunk()\n if polymorphic_shapes is None:\n vjp_polymorphic_shapes = None\n else:\n args_flat_polymorphic_shapes = polymorphic_shapes_flat\n out_cts_flat_polymorphic_shapes = tuple(str(out_aval.shape) # Note: may be polynomials, not just DimVar\n for out_aval in _out_cts_avals) # type: ignore\n vjp_polymorphic_shapes = [\n args_flat_polymorphic_shapes, out_cts_flat_polymorphic_shapes\n ]\n\n def fun_vjp_jax(args_flat_jax, out_cts_flat_jax):\n # One may think that we can get the pullback while we are converting\n # the main function in the first place. That is problematic, because the\n # pullback may contain captured tracers from the conversion of the\n # main function. Those tracers will confuse the conversion of the\n # pullback. So, we construct the vjp anew and we convert it separately.\n args_jax, kwargs_jax = tree_util.tree_unflatten(in_tree, args_flat_jax)\n assert not kwargs_jax\n _, pullback_jax = jax.vjp(fun_no_kwargs, *args_jax)\n\n def fix_out_ct(out_ct_jax, out_ct_aval: core.ShapedArray):\n # If the primal function has outputs of integer or bool types, and if we are\n # under a tf.function context, then TF will pass None in _out_cts_flat\n # in place of these values. We should change these to float0 or\n # else JAX gets unhappy. See issue #6975.\n if out_ct_jax is not None:\n return out_ct_jax\n assert core.primal_dtype_to_tangent_dtype(out_ct_aval.dtype) == dtypes.float0, f\"out_ct={out_ct_jax}\"\n # Note that out_ct_aval.shape contains dimension variable from the\n # primal function scope. It is Ok to use them here because we\n # use the same shape variables for the VJP function.\n return jnp.zeros(out_ct_aval.shape, dtype=_tf_np_dtype_for_float0)\n\n out_cts_fixed_flat = tuple(map(fix_out_ct, out_cts_flat_jax, _out_cts_avals))\n\n out_cts_fixed = tree_util.tree_unflatten(out_tree, out_cts_fixed_flat)\n in_cts_jax = pullback_jax(out_cts_fixed)\n\n in_cts_flat_jax, in_cts_tree = tree_util.tree_flatten(in_cts_jax)\n def fix_in_ct(in_ct, arg_aval: core.ShapedArray):\n if jnp.issubdtype(arg_aval.dtype, jnp.inexact):\n return in_ct\n else:\n assert in_ct.dtype == dtypes.float0\n return jnp.zeros(arg_aval.shape, _tf_np_dtype_for_float0)\n\n in_cts_fixed_flat_jax = tuple(map(fix_in_ct, in_cts_flat_jax, args_avals_flat))\n return in_cts_fixed_flat_jax\n\n # TODO: enable higher-order gradients\n with tf.name_scope(\"jax2tf_vjp\"):\n in_cts_flat = convert(\n fun_vjp_jax,\n with_gradient=False,\n polymorphic_shapes=vjp_polymorphic_shapes)(args_flat, out_cts_flat)\n return in_cts_flat\n\n try:\n assert not _thread_local_state.shape_env, f\"Unexpected shape environment {_thread_local_state.shape_env}\"\n\n prev_enable_xla = _thread_local_state.enable_xla\n _thread_local_state.enable_xla = enable_xla\n\n prev_include_xla_op_metadata = _thread_local_state.include_xla_op_metadata\n # TODO(b/189306134): implement support for XLA metadata\n _thread_local_state.include_xla_op_metadata = False\n\n _thread_local_state.shape_env = shape_env\n global _has_registered_tf_source_path\n if not _has_registered_tf_source_path:\n source_info_util.register_exclusion(os.path.dirname(tf.__file__))\n _has_registered_tf_source_path = True\n\n if with_gradient:\n\n @tf.custom_gradient\n def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat,\n name_stack,\n fresh_constant_cache=True)\n outs, out_avals = util.unzip2(out_with_avals)\n return (tuple(outs),\n partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))\n\n out_flat = converted_fun_flat_with_custom_gradient(*args_flat)\n else:\n out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat,\n name_stack, fresh_constant_cache=True)\n outs, out_avals = util.unzip2(out_with_avals)\n message = (\"The jax2tf-converted function does not support gradients. \"\n \"Use `with_gradient` parameter to enable gradients\")\n # We use PreventGradient, which is propagated through a SavedModel.\n out_flat = [\n tf.raw_ops.PreventGradient(input=o, message=message)\n for o in outs\n ]\n finally:\n _thread_local_state.shape_env = ()\n _thread_local_state.enable_xla = prev_enable_xla\n _thread_local_state.include_xla_op_metadata = prev_include_xla_op_metadata\n\n out_flat = [tf.identity(x, \"jax2tf_out\") for x in out_flat]\n out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)\n return out\n\n return converted_fun\n\n\ndef dtype_of_val(val: TfVal) -> DType:\n \"\"\"Computes the TensorFlow dtype using JAX's typing rules.\n\n If the value is a tf.Tensor, it starts with its dtype. If the value is a\n constant it uses JAX to infer its dtype. The resulting dtype follows the\n JAX type inference rules, and depends on the value of the\n JAX_ENABLE_X64 flag.\n\n See README.md for how 64-bit values are treated.\n \"\"\"\n tval, _ = _tfval_to_tensor_jax_dtype(val)\n return tval.dtype\n\n# Internals\n\[email protected]\ndef _extended_name_stack(extra_name_stack: Optional[str]):\n prev_name_stack = _thread_local_state.name_stack\n if extra_name_stack:\n if not prev_name_stack:\n _thread_local_state.name_stack = extra_name_stack\n else:\n _thread_local_state.name_stack = util.extend_name_stack(\n _thread_local_state.name_stack, extra_name_stack)\n try:\n yield\n finally:\n _thread_local_state.name_stack = prev_name_stack\n\n\ndef _interpret_fun(\n fun: lu.WrappedFun, in_vals: Sequence[TfVal],\n in_avals: Sequence[core.ShapedArray],\n extra_name_stack: Optional[str],\n fresh_constant_cache: bool = False\n) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n with core.new_base_main(TensorFlowTrace) as main: # type: ignore\n fun = _interpret_subtrace(fun, main, in_avals)\n with _extended_name_stack(extra_name_stack):\n with core.new_sublevel():\n out_vals: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n _call_wrapped_with_new_constant_cache(fun, in_vals,\n fresh_constant_cache=fresh_constant_cache)\n\n del main\n\n return tuple(out_vals)\n\ndef _call_wrapped_with_new_constant_cache(fun: lu.WrappedFun,\n in_vals: Sequence[TfVal],\n fresh_constant_cache: bool = False\n ) -> Sequence[Tuple[TfVal, core.ShapedArray]]:\n try:\n prev_constant_cache = _thread_local_state.constant_cache\n prev_constant_cache_keys = set(prev_constant_cache.keys()) if prev_constant_cache is not None else set()\n # Start a new cache, so that we don't share constants across tf.function\n # boundaries.\n if fresh_constant_cache:\n _thread_local_state.constant_cache = {}\n\n out_vals: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n fun.call_wrapped(*in_vals)\n finally:\n if prev_constant_cache is not None and not fresh_constant_cache:\n newly_added_keys = set(prev_constant_cache.keys()) - prev_constant_cache_keys\n # Delete the newly added keys\n for k in newly_added_keys:\n del prev_constant_cache[k]\n _thread_local_state.constant_cache = prev_constant_cache\n return out_vals\n\ndef _convert_jax_impl(jax_impl: Callable, *,\n multiple_results=True,\n extra_name_stack: Optional[str] = None) -> Callable:\n \"\"\"Convert the JAX implementation of a primitive.\n\n Args:\n jax_impl: typically the impl-rule for a primitive, with signature\n `(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements\n a primitive in terms of other primitives.\n multiple_results: whether `jax_impl` returns a sequence of results.\n extra_name_stack: additional element to add to the name stack for the\n converted ops.\n\n Returns:\n a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)\n -> Sequence[TfVal]`.\n \"\"\"\n\n def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,\n **kwargs) -> Sequence[TfVal]:\n\n # We wrap the jax_impl under _interpret_fun to abstract the TF values\n # from jax_impl and turn them into JAX abstract values.\n def jax_impl_jax_args(*jax_args):\n jax_results = jax_impl(*jax_args, **kwargs)\n return jax_results if multiple_results else [jax_results]\n\n tf_results_with_avals = _interpret_fun(\n lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals,\n extra_name_stack)\n tf_results, _ = util.unzip2(tf_results_with_avals)\n return tf_results if multiple_results else tf_results[0]\n\n return wrapped\n\n\[email protected]\ndef _interpret_subtrace(main: core.MainTrace,\n in_avals: Sequence[core.ShapedArray],\n *in_vals: TfVal):\n trace = TensorFlowTrace(main, core.cur_sublevel())\n in_tracers = tuple(\n TensorFlowTracer(trace, val, aval)\n for val, aval in zip(in_vals, in_avals))\n # The outs may be core.unit, see comment in TensorFlowTrace.pure.\n outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]\n out_tracers: Iterable[TensorFlowTracer] = (\n map(trace.full_raise, outs)) # type: ignore\n out_vals_with_avals: Sequence[Tuple[TfVal, core.ShapedArray]] = (\n tuple((t.val, t.aval) for t in out_tracers))\n yield out_vals_with_avals\n\n\ndef _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal,\n extra_name_stack: Optional[str]) -> Sequence[TfVal]:\n \"\"\"Evaluates a Jaxpr with tf.Tensor arguments.\n\n The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.\n \"\"\"\n fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals, extra_name_stack)\n return tuple(v for v, _ in out_with_avals)\n\n\ndef _aval_to_tf_shape(aval: core.ShapedArray) -> Tuple[Optional[int], ...]:\n \"\"\"Generate a TF shape, possibly containing None for polymorphic dimensions.\"\"\"\n return tuple(map(lambda d: None if shape_poly.is_poly_dim(d) else d,\n aval.shape)) # type: ignore[attr-defined]\n\n# In the TF world, we represent float0 as zeros of this type.\n_tf_np_dtype_for_float0 = np.int32\n\ndef _to_tf_dtype(jax_dtype):\n # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses,\n # due to float0 and 64-bit behavior.\n if jax_dtype == dtypes.float0:\n jax_dtype = _tf_np_dtype_for_float0\n return tf.dtypes.as_dtype(jax_dtype)\n\n\ndef _to_jax_dtype(tf_dtype):\n # Note that converting _to_tf_dtype and _to_jax_dtype are not inverses,\n # due to float0 and 64-bit behavior.\n return dtypes.canonicalize_dtype(tf_dtype.as_numpy_dtype)\n\n\ndef _tfval_to_tensor_jax_dtype(val: TfVal,\n jax_dtype: Optional[DType] = None,\n memoize_constants=False) -> Tuple[TfVal, DType]:\n \"\"\"Converts a scalar, ndarray, or tf.Tensor to a tf.Tensor with proper type.\n\n If `jax_dtype` is missing, uses JAX typing rules.\n See README.md for details regarding 64-bit values.\n\n Args:\n val: a scalar, ndarray, tf.Tensor, or tf.Variable\n jax_dtype: an optional dtype to use. If missing, uses JAX type inference\n rules for constants.\n memoize_constants: whether to memoize TF constants. We can't do this\n everywhere, we may be outside of a conversion scope.\n\n Returns:\n a tuple with a tf.Tensor with the type as needed by JAX, and the JAX type.\n \"\"\"\n if isinstance(val, (tf.Tensor, tf.Variable)):\n jax_dtype = jax_dtype or _to_jax_dtype(val.dtype) # Give JAX a chance to pick the type\n conversion_dtype = _to_tf_dtype(jax_dtype)\n if conversion_dtype != val.dtype:\n return tf.cast(val, conversion_dtype), jax_dtype\n else:\n return val, jax_dtype\n else: # A constant\n jax_dtype = jax_dtype or xla.abstractify(val).dtype\n # TODO(document): We assume that the value of a constant does not\n # change through the scope of the function. But it may be an ndarray, ...\n # JAX has the same problem when generating HLO.\n const_key = (id(val), jax_dtype)\n # Since we use id(val) as a cache key, we have to make sure that we keep\n # the previous `val` alive. Otherwise, for an ndarray, it can get garbage\n # collected and reused for a different value, which would create correctness\n # issues. We keep the `val` alive by storing in the cache the pair\n # `(val, tf_val)`.\n do_memoize = (memoize_constants and np.shape(val) and _thread_local_state.constant_cache is not None)\n if do_memoize:\n _, tf_val = _thread_local_state.constant_cache.get(const_key, (None, None))\n else:\n tf_val = None\n if tf_val is None:\n conversion_dtype = _to_tf_dtype(jax_dtype)\n # The float0 type is not known to TF.\n if jax_dtype == dtypes.float0:\n val = np.zeros(np.shape(val), conversion_dtype.as_numpy_dtype)\n tf_val = tf.convert_to_tensor(val, dtype=conversion_dtype)\n if do_memoize:\n _thread_local_state.constant_cache[const_key] = (val, tf_val)\n return tf_val, jax_dtype\n\n\ndef _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:\n assert all(map(lambda x: x is not None, shape)), (\n f\"Argument shape should be a valid JAX shape but got {shape}\")\n dim_vars, dim_values = util.unzip2(_thread_local_state.shape_env)\n eval_shape, dim_avals = shape_poly.get_shape_evaluator(dim_vars, shape)\n shape_values, _ = util.unzip2(_interpret_fun(lu.wrap_init(eval_shape),\n dim_values, dim_avals, \"\")) # type: ignore\n return shape_values\n\n\n# TODO(b/26854495): pylint doesn't understand slots and inheritance.\n# pylint: disable=assigning-non-slot\n\n\nclass TensorFlowTracer(core.Tracer):\n \"\"\"Tracer class that boxes a TF value and a JAX abstract value.\n\n In addition to the TF value we carry the JAX abstract value because there are\n two cases when it cannot be recovered from the value: (a) when the abstract\n value is core.abstract_unit, in which case the value is tf.nan; (b) when we\n are converting with polymorphic shapes, in which case the shape of the value\n may have dimensions set to `None`, which the JAX abstract value may contain\n more precise information.\n\n When the value has a partially-known shape, the dimensions marked as `None`\n must correspond to non-constant dimensions in the abstract value.\n\n See README.md for details.\n \"\"\"\n # val: TfVal\n # _aval: core.ShapedArray\n __slots__ = [\"val\", \"_aval\"]\n\n def __init__(self, trace: \"TensorFlowTrace\", val: TfVal,\n aval: core.AbstractValue):\n self._trace = trace\n self._aval = aval\n if aval is core.abstract_unit:\n self.val = val\n return\n\n if isinstance(val, (tf.Tensor, tf.Variable)):\n val_shape = val.shape\n\n if config.jax_enable_checks:\n assert len(self._aval.shape) == len(val_shape), f\"_aval.shape={self._aval.shape} different rank than val_shape={val_shape}\"\n # To compare types, we must handle float0 in JAX and x64 in TF\n if self._aval.dtype == dtypes.float0:\n assert _to_tf_dtype(self._aval.dtype) == val.dtype, f\"expected {self._aval.dtype} == {val.dtype}\"\n else:\n assert self._aval.dtype == _to_jax_dtype(val.dtype), f\"expected {self._aval.dtype} == {val.dtype}\"\n\n for aval_dim, val_dim in zip(self._aval.shape, val_shape): # type: ignore[attr-defined]\n if val_dim is None:\n assert shape_poly.is_poly_dim(aval_dim), f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n elif not shape_poly.is_poly_dim(aval_dim):\n assert aval_dim == val_dim, f\"expected {self._aval.shape} == {val_shape}\" # type: ignore[attr-defined]\n else:\n # We have a TF value with known shape, and the abstract shape is a shape variable.\n try:\n aval_int = int(_eval_shape([aval_dim])) # type: ignore\n except (TypeError, KeyError):\n continue\n assert aval_int == val_dim, f\"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}.\" # type: ignore\n\n self.val = _tfval_to_tensor_jax_dtype(val,\n self._aval.dtype,\n memoize_constants=True)[0] # type: ignore[attr-defined]\n\n @property\n def aval(self):\n return self._aval\n\n def full_lower(self):\n return self\n\n\nclass TensorFlowTrace(core.Trace):\n \"\"\"Trace class that underlies the jax2tf transformation.\n\n We are going to ensure that jax2tf.convert is never nested inside other\n transformations. This is sufficient for intended use cases (converting\n fully-transformed JAX code). It also simplifies our job because we do not have\n to handle situations where we apply primitives on a mix of TF values and\n JAX tracers from an outer transformation. E.g., for addition both the TF\n values\n and the JAX tracers have an override and they get confused if they see values\n from the other world.\n\n Hence a TFT trace does not interact with non-TFT traces at lower-level. For\n higher-order control-flow primitives we invoke recursively\n _interpret_fun on the body of the conditional, which will create a nested TFT.\n\n We do want to allow transformations nested inside a TensorFlowTrace (TFT), but\n those will introduce their own MainTrace, and any operations involving those\n will be done on those traces, i.e., not a concern for TFT.\n \"\"\"\n def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:\n \"\"\"Lifts a non-Tracer into the TensorFlowTracer.\n\n This function may be called by way of trace.full_raise.\n\n The value may be a core.unit. During JAX transformations we sometimes\n produce a Jaxpr that has arguments of abstract value core.abstract_unit\n and results equal to core.unit. These are arguments and results that are\n not used in the computation.\n\n In TF world, we represent core.unit as NaN. This is safe, as these values\n should never be used.\n \"\"\"\n if val is core.unit:\n return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),\n core.abstract_unit)\n else:\n tf_val, jax_dtype = _tfval_to_tensor_jax_dtype(val, memoize_constants=True)\n return TensorFlowTracer(\n self, val, core.ShapedArray(tf_val.shape, jax_dtype,\n weak_type=dtypes.is_weakly_typed(val)))\n\n def lift(self, val: core.Tracer) -> TensorFlowTracer:\n # This would be called when we need to raise a tracer from a lower-level\n # main into the TensorFlowTrace. Since the TensorFlowTrace is never nested\n # inside another transform, there are no lower-level main traces.\n assert False\n\n def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:\n # This is called when we need to raise a tracer from the same main,\n # but a lower sublevel. This could come from a nested jit.\n return TensorFlowTracer(self, val.val, val._aval)\n\n def process_primitive(self, primitive: core.Primitive,\n tracers: Sequence[TensorFlowTracer],\n params) -> TensorFlowTracer:\n impl, impl_needs_avals = self.get_primitive_impl(primitive)\n args_avals: Sequence[core.ShapedArray] = tuple(t.aval for t in tracers)\n # This is a bit conservative, doing abstract_eval even in op-by-op execution\n # but we needed it for, e.g., shape_polymorphism where only JAX's\n # abstract evaluation rules can properly track polymorphic shapes.\n # Unfortunately under op-by-op execution this is a rare occasion where we\n # need abstract evaluation.\n out_aval = primitive.abstract_eval(*args_avals, **params)\n args_tf: Sequence[TfVal] = [t.val for t in tracers]\n def invoke_impl() -> TfVal:\n if impl_needs_avals:\n return impl(\n *args_tf,\n _in_avals=args_avals, # type: ignore\n _out_aval=out_aval,\n **params)\n else:\n return impl(*args_tf, **params)\n\n if _thread_local_state.include_xla_op_metadata:\n op_metadata = xla.make_op_metadata(primitive, params,\n name_stack=_get_current_name_stack(),\n source_info=source_info_util.current())\n op_metadata_proto = xla_data_pb2.OpMetadata(\n op_type=op_metadata.op_type,\n op_name=op_metadata.op_name,\n source_file=op_metadata.source_file,\n source_line=op_metadata.source_line\n )\n with tf_ops.get_default_graph()._attr_scope(\n {\"_XlaOpMetadata\": attr_value_pb2.AttrValue(\n s=op_metadata_proto.SerializeToString())}):\n val_out = invoke_impl()\n else:\n val_out = invoke_impl()\n\n if primitive.multiple_results:\n out = [\n TensorFlowTracer(self, v, a)\n for v, a in zip(val_out, out_aval)\n ] # type: ignore\n else:\n out = TensorFlowTracer(self, val_out, out_aval) # type: ignore\n\n # Check that the impl rule returned a value of expected shape and dtype\n # TODO: adapt this to match polymorphic shapes\n if config.jax_enable_checks:\n if primitive.multiple_results:\n for o, expected_aval in zip(out, out_aval): # type: ignore\n assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (\n f\"{primitive}: out.aval = {o.aval}; expected {expected_aval}\")\n else:\n assert out.aval == out_aval, ( # type: ignore\n f\"{primitive}: out.aval = {out.aval}; expected {out_aval}\"\n ) # type: ignore\n return out # type: ignore\n\n def process_call(self, call_primitive: core.Primitive, fun: lu.WrappedFun,\n tracers: Sequence[TensorFlowTracer], params):\n assert call_primitive.multiple_results\n vals: Sequence[TfVal] = [t.val for t in tracers]\n avals: Sequence[core.ShapedArray] = tuple(t.aval for t in tracers)\n interpreted_fun = _interpret_subtrace(fun, self.main, avals)\n extra_name_stack = None\n if call_primitive == core.named_call_p:\n extra_name_stack = util.wrap_name(params[\"name\"], \"named\")\n elif call_primitive == xla.xla_call_p:\n extra_name_stack = util.wrap_name(params[\"name\"], \"jit\")\n with _extended_name_stack(extra_name_stack):\n with core.new_sublevel():\n if call_primitive == core.named_call_p:\n with tf.name_scope(_sanitize_scope_name(params[\"name\"])):\n vals_out: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n interpreted_fun.call_wrapped(*vals)\n elif call_primitive == xla.xla_call_p:\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n # Make a nested tf.function(jit_compile=True)\n store_tf_res_avals = None\n def f_tf(*tf_args):\n nonlocal store_tf_res_avals\n tf_res_out: Sequence[Tuple[TfVal, core.ShapedArray]] = \\\n _call_wrapped_with_new_constant_cache(interpreted_fun, tf_args,\n fresh_constant_cache=False)\n tf_res_vals, tf_res_avals = util.unzip2(tf_res_out)\n store_tf_res_avals = tf_res_avals\n return tf_res_vals\n tf_vals_out = tf.function(f_tf, autograph=False, jit_compile=True)(*vals)\n vals_out = zip(tf_vals_out, store_tf_res_avals)\n else:\n vals_out = interpreted_fun.call_wrapped(*vals)\n else:\n vals_out = interpreted_fun.call_wrapped(*vals)\n return [TensorFlowTracer(self, v, a) for v, a in vals_out]\n\n def post_process_call(self, call_primitive: core.Primitive,\n out_tracers: Sequence[TensorFlowTracer], params):\n # We encountered a call primitive, e.g., remat_call_p, whose result\n # (out_tracers) include TensorFlowTracer that were not passed through\n # its arguments (captured from the environment).\n vals = tuple(t.val for t in out_tracers)\n main = self.main\n\n def todo(vals: Sequence[TfVal]):\n # TODO: is name_stack correct?\n trace = TensorFlowTrace(main, core.cur_sublevel())\n return [\n TensorFlowTracer(trace, v, out_tracer.aval)\n for v, out_tracer in zip(vals, out_tracers)\n ]\n\n return vals, todo\n\n def process_map(self, map_primitive, f, tracers, params):\n raise NotImplementedError(\"process_map\")\n\n def post_process_map(self, map_primitive, out_tracers, params):\n raise NotImplementedError(\"post_process_map\")\n\n def process_custom_jvp_call(self, prim, fun, jvp, tracers):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del jvp # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_jvp_call(self, out_tracers, _):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):\n # Drop the custom differentiation rule and act like a call primitive. This\n # behavior is desirable because jax2tf stages code out of the JAX system, so\n # there are no more JAX differentiation transformations to be applied.\n del fwd, bwd, out_trees # Unused.\n return self.process_call(core.call_p, fun, tracers, {})\n\n def post_process_custom_vjp_call(self, out_tracers, _):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def post_process_custom_vjp_call_fwd(self, *_, **__):\n assert False # unreachable assuming jax2tf runs with clean trace state\n\n def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:\n # Returns the primitive implementation and whether the implementation\n # takes abstract values (see definition of tf_impl_with_avals)\n if not _thread_local_state.enable_xla:\n try:\n return tf_impl_no_xla[p], True # Always require avals.\n except KeyError:\n pass\n try:\n return tf_impl[p], False\n except KeyError:\n try:\n return tf_impl_with_avals[p], True\n except KeyError as err:\n msg = \"TensorFlow interpretation rule for '{}' not implemented\"\n raise NotImplementedError(msg.format(p)) from err\n\ndef _unexpected_primitive(p: core.Primitive, *args, **kwargs):\n assert False, f\"Encountered unexpected primitive {p}\"\n\n\n# Call primitives are inlined\nfor unexpected in [core.call_p, core.named_call_p, xla.xla_call_p,\n partial_eval.remat_call_p, maps.xmap_p]:\n tf_impl[unexpected] = partial(_unexpected_primitive, unexpected)\n\n# Primitives that are not yet implemented must be explicitly declared here.\ntf_not_yet_impl = [\n \"clz\",\n \"igamma_grad_a\",\n \"random_gamma_grad\",\n \"reduce_precision\",\n \"schur\",\n \"name\",\n \"optimization_barrier\",\n \"unreachable\",\n\n # Not high priority?\n \"after_all\",\n \"all_to_all\",\n \"approx_top_k\",\n \"create_token\",\n \"custom_transpose_call\",\n \"custom_vmap_call\",\n \"infeed\",\n \"linear_call\",\n \"outfeed\",\n \"pmax_p\",\n \"pmin\",\n \"ppermute\",\n \"psum\",\n \"pmax\",\n \"pgather\",\n \"reduce_scatter\",\n \"axis_index\",\n \"pdot\",\n \"all_gather\",\n \"lu_pivots_to_permutation\",\n \"xla_pmap\",\n]\n\ntf_impl[ad_util.stop_gradient_p] = tf.stop_gradient\ntf_impl[ad_util.zeros_like_p] = tf.zeros_like\n\n\ndef _add(x: TfVal, y: TfVal) -> TfVal:\n return tf.raw_ops.AddV2(x=x, y=y)\n\n\ntf_impl[ad_util.add_jaxvals_p] = _add\ntf_impl[dispatch.device_put_p] = lambda x, device=None: x\ntf_impl[lax_internal.copy_p] = lambda x: x\n\ndef _neg(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[x.dtype]\n x_signed = tf.cast(x, signed_dtype)\n res_signed = tf.math.negative(x_signed)\n return tf.cast(res_signed, x.dtype)\n else:\n return tf.math.negative(x)\n\ntf_impl[lax.neg_p] = _neg\n\n\ndef _sign(x: TfVal) -> TfVal:\n if x.dtype.is_unsigned:\n # TF and XLA do not support tf.math.sign for unsigned types.\n return tf.where(\n tf.math.equal(x, 0), tf.constant(0, dtype=x.dtype),\n tf.constant(1, dtype=x.dtype))\n else:\n return tf.math.sign(x)\n\n\ntf_impl[lax.sign_p] = _sign\ntf_impl[lax.floor_p] = tf.math.floor\ntf_impl[lax.ceil_p] = tf.math.ceil\n\n\ndef _round(operand, *, rounding_method,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:\n # JAX uses a single HLO op Round here\n sign = _sign(operand)\n operand *= sign\n floor = tf.math.floor(operand)\n operand -= floor\n cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))\n return sign * (\n tf.where(cond, tf.constant(np.array(1), operand.dtype),\n tf.math.round(operand)) + floor)\n else: # rounding_method is RoundingMethod.TO_NEAREST_EVEN\n rounding_fun = _convert_jax_impl(\n lax_internal._round_to_nearest_even, multiple_results=False)\n return rounding_fun(operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\ntf_impl_with_avals[lax.round_p] = _round\ntf_impl[lax.nextafter_p] = tf.math.nextafter\n\n\ndef _population_count(x):\n orig_dtype = x.dtype\n return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)\n\n\ntf_impl[lax.population_count_p] = _population_count\ntf_impl[lax.is_finite_p] = tf.math.is_finite\n\n\ndef _abs(x: TfVal) -> TfVal:\n # TF and XLA do not support tf.math.abs for unsigned types.\n return tf.math.abs(x) if not x.dtype.is_unsigned else x\n\n\ntf_impl[lax.abs_p] = _abs\ntf_impl[lax.pow_p] = tf.math.pow\n\n\ndef _integer_pow(x, *, y: int, _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Follows the implementation in lax._integer_pow_translation_rule\n if y == 0:\n return tf.broadcast_to(\n tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))\n is_reciprocal = y < 0\n if is_reciprocal:\n y = -y\n acc = None\n while y > 0:\n if y & 1:\n acc = x if acc is None else tf.math.multiply(acc, x)\n y >>= 1\n if y > 0:\n x = tf.math.multiply(x, x)\n return tf.math.reciprocal(acc) if is_reciprocal else acc\n\n\ntf_impl_with_avals[lax.integer_pow_p] = _integer_pow\ntf_impl[lax.exp_p] = tf.math.exp\ntf_impl[lax.expm1_p] = tf.math.expm1\ntf_impl[lax.log_p] = tf.math.log\ntf_impl[lax.log1p_p] = tf.math.log1p\ntf_impl[lax.tan_p] = tf.math.tan\ntf_impl[lax.tanh_p] = tf.math.tanh\ntf_impl[lax.sin_p] = tf.math.sin\ntf_impl[lax.sinh_p] = tf.math.sinh\ntf_impl[lax.cos_p] = tf.math.cos\ntf_impl[lax.cosh_p] = tf.math.cosh\ntf_impl_with_avals[lax.acos_p] = _convert_jax_impl(\n lax_internal.acos_impl, multiple_results=False)\ntf_impl_with_avals[lax.asin_p] = _convert_jax_impl(\n lax_internal.asin_impl, multiple_results=False)\ntf_impl_with_avals[lax.atan_p] = _convert_jax_impl(\n lax_internal.atan_impl, multiple_results=False)\n\ndef _atan2(y, x, **kwargs):\n if x.dtype.is_complex or y.dtype.is_complex:\n complex_component_dtype = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(y.dtype)\n zero = tf.constant(0, complex_component_dtype)\n one = tf.constant(1, complex_component_dtype)\n i = tf.complex(zero, one)\n return -i * tf.math.log((x + i * y)/tf.math.sqrt(x * x + y * y))\n else:\n return tf.math.atan2(y, x)\n\n\ntf_impl[lax.atan2_p] = _atan2\ntf_impl[lax.acosh_p] = tf.math.acosh\ntf_impl[lax.atanh_p] = tf.math.atanh\ntf_impl[lax.asinh_p] = tf.math.asinh\n\ntf_impl[lax.sqrt_p] = tf.math.sqrt\ntf_impl[lax.rsqrt_p] = tf.math.rsqrt\n\ndef _cbrt(x):\n return tf.math.sign(x) * tf.math.pow(tf.math.abs(x), 1/3)\n\ntf_impl[lax.cbrt_p] = _cbrt\n\ntf_impl[lax.lgamma_p] = tf.math.lgamma\ntf_impl[lax.digamma_p] = tf.math.digamma\ntf_impl[lax.igamma_p] = tf.math.igamma\ntf_impl[lax.igammac_p] = tf.math.igammac\ntf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc\ntf_impl[lax.erf_p] = tf.math.erf\ntf_impl[lax.erfc_p] = tf.math.erfc\ntf_impl[lax.erf_inv_p] = tf.math.erfinv\ntf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e\ntf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e\n\ntf_impl[lax.complex_p] = tf.complex\n\n\ndef _conj(x, **kwargs):\n # The only dtypes that are allowed are: float32, float64, complex64, and\n # complex128.\n if x.dtype == tf.float32:\n return tf.cast(x, tf.complex64)\n elif x.dtype == tf.float64:\n return tf.cast(x, tf.complex128)\n else:\n return tf.math.conj(x)\n\n\ntf_impl[lax.conj_p] = _conj\ntf_impl[lax.real_p] = tf.math.real\ntf_impl[lax.imag_p] = tf.math.imag\n\ntf_impl[lax.add_p] = _add\ntf_impl[lax.sub_p] = tf.math.subtract\ntf_impl[lax.mul_p] = tf.math.multiply\n\n\ndef _iota(*, dtype, shape, dimension):\n dtype = _to_tf_dtype(dtype)\n # Some dtypes are unsupported, like uint32, so we just fall back to int32.\n # TODO(mattjj, necula): improve tf.range dtype handling\n shape_tf = _eval_shape(shape)\n vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)\n vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]\n return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)\n\n\ntf_impl[lax.iota_p] = _iota\n\n\ndef _div(lhs, rhs):\n if lhs.dtype.is_integer:\n quotient = tf.math.floordiv(lhs, rhs)\n select = tf.math.logical_and(\n tf.not_equal(_sign(lhs), _sign(rhs)),\n tf.not_equal(tf.math.floormod(lhs, rhs), 0))\n return tf.where(select, quotient + 1, quotient)\n else:\n return tf.math.truediv(lhs, rhs)\n\n\ndef _rem(lhs, rhs):\n return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))\n\n\ntf_impl[lax.div_p] = _div\ntf_impl[lax.rem_p] = _rem\n\n\ndef _minmax(x: TfVal, y: TfVal, *, is_min: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,) -> TfVal:\n # For complex numbers use lexicographic ordering, like JAX\n if dtypes.issubdtype(x.dtype.as_numpy_dtype, np.complexfloating):\n return _convert_jax_impl(\n partial(lax_internal._minmax_complex_lowering,\n lax_cmp_pick_x=lax.lt if is_min else lax.gt),\n multiple_results=False)(x, y, _in_avals=_in_avals, _out_aval=_out_aval)\n elif x.dtype.as_numpy_dtype == np.bool_:\n return (tf.math.logical_and if is_min else tf.math.logical_or)(x, y)\n else:\n return (tf.math.minimum if is_min else tf.math.maximum)(x, y)\n\ndef _minmax_scalar(x: TfVal, y: TfVal, *, is_min: bool) -> TfVal:\n # For reducers we will need min/max for scalars only. In that case we\n # can construct the AbstractValues outselves, even in the presence of\n # shape polymorphism.\n assert len(x.shape) == 0 and len(y.shape) == 0, f\"x: {x.shape}, y: {y.shape}\"\n aval = core.ShapedArray((), _to_jax_dtype(x.dtype))\n return _minmax(x, y, is_min=is_min,\n _in_avals=[aval, aval], _out_aval=aval)\n\ntf_impl_with_avals[lax.max_p] = partial(_minmax, is_min=False)\ntf_impl_with_avals[lax.min_p] = partial(_minmax, is_min=True)\n\n# Map from TF signed types to TF unsigned types.\n_SIGNED_TO_UNSIGNED_TABLE = {\n tf.int8: tf.uint8,\n tf.int16: tf.uint16,\n tf.int32: tf.uint32,\n tf.int64: tf.uint64,\n}\n\n# Map from TF unsigned types to TF signed types.\n_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}\n\n\n# Note: Bitwise operations only yield identical results on unsigned integers!\n# pylint: disable=protected-access\ndef _shift_right_arithmetic_raw(x, y):\n if x.dtype.is_unsigned:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]\n x = tf.cast(x, signed_dtype)\n y = tf.cast(y, signed_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n else:\n return tf.bitwise.right_shift(x, y)\n\n\ndef _shift_right_arithmetic(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA\n # semantics to return the shift by the max value (x_bits - 1).\n # TODO: it is likely better to add XlaOps for shifts\n x_bits = 8 * x.dtype.size\n clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)\n return _shift_right_arithmetic_raw(x, clamp_y)\n\n\ntf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic\n\n\ndef _shift_right_logical_raw(x, y):\n if x.dtype.is_unsigned:\n return tf.bitwise.right_shift(x, y)\n else:\n assert x.dtype == y.dtype\n orig_dtype = x.dtype\n unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]\n x = tf.cast(x, unsigned_dtype)\n y = tf.cast(y, unsigned_dtype)\n res = tf.bitwise.right_shift(x, y)\n return tf.cast(res, orig_dtype)\n\n\ndef _shift_right_logical(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_right_logical_p] = _shift_right_logical\n\n\ndef _shift_left(x, y):\n # TF shift is \"implementation defined\" if the shift amount is negative\n # or larger or equal to the size of the value. We implement the XLA semantics\n # to return 0.\n # TODO: it is likely better to add XlaOps for shifts\n return tf.where(\n _shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))\n\n\ntf_impl[lax.shift_left_p] = _shift_left\n\n\ndef _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:\n # Return the TF expression for when y is within bounds (0 <= y < |x|)\n x_bits = 8 * x.dtype.size\n # TF does not have comparisons for uint16 and uint32 (despite what the\n # documentation says)\n y_comp = tf.cast(\n y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y\n y_lt_x_bits = tf.math.less(y_comp, x_bits)\n y_ge_0 = tf.math.greater_equal(y_comp, 0)\n return tf.logical_and(y_lt_x_bits, y_ge_0)\n\n\ndef _not(x):\n \"\"\"Computes bitwise not with support for booleans.\n\n Numpy and JAX support bitwise not for booleans by applying a logical not!\n This means that applying bitwise_not yields an unexpected result:\n jnp.bitwise_not(jnp.array([True, False]))\n >> DeviceArray([False, True], dtype=bool)\n\n if you assume that booleans are simply casted to integers.\n jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)\n >> DeviceArray([True, True], dtype=bool)\n \"\"\"\n if x.dtype == tf.bool:\n return tf.logical_not(x)\n else:\n return tf.bitwise.invert(x)\n\n\ntf_impl[lax.not_p] = _not\n\n\ndef handle_boolean_args(f, argnums: Sequence[int], boolean_f=None):\n \"\"\"Computes functions with some bool args and bool results using int8.\n\n This is needed because some TF ops do not work for bool args, e.g.,\n inequalities, min/max.\n\n Args:\n f: a TF callable to wrap. It will be called with non-boolean arguments.\n argnums: the positional arguments that may be booleans.\n boolean_f: [Optional] a TF callable compatible with boolean\n arguments.\n\n Returns: a TF callable that can take a mix of boolean positional arguments\n (in the positions specified by `argnums`) and some non-boolean positional\n arguments. If there are no boolean arguments, just calls `f`. Otherwise,\n it calls `boolean_f` if defined. Otherwise, casts the boolean\n arguments to `int8`, calls `f`, then casts the result to `bool`.\n \"\"\"\n argnums = tf.nest.flatten(argnums)\n\n def wrapper(*args: TfVal, **kwargs):\n argnum_types = {args[i].dtype for i in argnums}\n if tf.bool not in argnum_types:\n return f(*args, **kwargs)\n else:\n # All argnums should be boolean\n assert len(argnum_types) == 1, argnum_types\n if boolean_f != None:\n return boolean_f(*args, **kwargs)\n else:\n args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)\n for i, a in enumerate(args)]\n if \"_in_avals\" in kwargs:\n\n def cast_aval(aval):\n assert aval.dtype == np.bool_\n return core.ShapedArray(aval.shape, np.int8)\n\n _in_avals_cast = [\n cast_aval(aval) if i in argnums else aval\n for i, aval in enumerate(kwargs[\"_in_avals\"])\n ]\n _out_aval_cast = tf.nest.map_structure(cast_aval, kwargs[\"_out_aval\"])\n kwargs = dict(\n kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)\n out = f(*args_cast, **kwargs)\n return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)\n\n return wrapper\n\n\ntf_impl[lax.or_p] = handle_boolean_args(tf.bitwise.bitwise_or, argnums=(0, 1), boolean_f=tf.logical_or)\ntf_impl[lax.and_p] = handle_boolean_args(tf.bitwise.bitwise_and, argnums=(0, 1), boolean_f=tf.logical_and)\ntf_impl[lax.xor_p] = handle_boolean_args(tf.bitwise.bitwise_xor, argnums=(0, 1), boolean_f=tf.math.logical_xor)\n\ntf_impl[lax.eq_p] = tf.math.equal\ntf_impl[lax.ne_p] = tf.math.not_equal\n\nboolean_greater = lambda x,y: tf.logical_and(x, tf.logical_not(y)) # Only one combo: T,F -> T\nboolean_less = lambda x,y: tf.logical_and(tf.logical_not(x), y) # Only one combo: F,T -> T\nboolean_greater_or_equal = lambda x, y: tf.logical_not(boolean_less(x,y)) # All cases except F,T\nboolean_less_or_equal = lambda x, y: tf.logical_not(boolean_greater(x,y)) # All cases except T,F\n\ntf_impl[lax.gt_p] = handle_boolean_args(tf.math.greater, argnums=(0, 1), boolean_f=boolean_greater)\ntf_impl[lax.lt_p] = handle_boolean_args(tf.math.less, argnums=(0, 1), boolean_f=boolean_less)\ntf_impl[lax.ge_p] = handle_boolean_args(tf.math.greater_equal, argnums=(0, 1), boolean_f=boolean_greater_or_equal)\ntf_impl[lax.le_p] = handle_boolean_args(tf.math.less_equal, argnums=(0, 1), boolean_f=boolean_less_or_equal)\n\ntf_impl[lax.linalg.cholesky_p] = tf.linalg.cholesky\n\n\ndef _convert_element_type(operand, *, new_dtype, weak_type=False):\n old_dtype = operand.dtype.as_numpy_dtype\n if (dtypes.issubdtype(old_dtype, np.complexfloating) and\n not dtypes.issubdtype(new_dtype, np.complexfloating)):\n operand = tf.math.real(operand)\n if (dtypes.issubdtype(old_dtype, np.floating) and\n not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(\n new_dtype, np.complexfloating) or new_dtype == np.bool_)):\n sign = _sign(operand)\n operand = sign * tf.math.floor(sign * operand)\n return tf.dtypes.cast(operand, _to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.convert_element_type_p] = _convert_element_type\n\n\ndef _bitcast_convert_type(operand, new_dtype):\n if operand.dtype == new_dtype:\n return operand\n return tf.bitcast(operand, _to_tf_dtype(new_dtype))\n\n\ntf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type\n\n\ndef _clamp(minval, operand, maxval, *, _in_avals, _out_aval):\n # The below permits mirroring the behavior of JAX when maxval < minval\n op_shape_tf_val = _eval_shape(_in_avals[1].shape)\n maxval = tf.broadcast_to(maxval, op_shape_tf_val)\n minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)\n return tf.clip_by_value(operand, minval, maxval)\n\n\ntf_impl_with_avals[lax.clamp_p] = _clamp\n\n\ndef _concatenate(*operands, dimension):\n return tf.concat(operands, axis=dimension)\n\n\ntf_impl[lax.concatenate_p] = _concatenate\n\n\ndef _conv_general_dimension_numbers_proto(dimension_numbers):\n \"\"\"Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.\"\"\"\n assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n proto = xla_data_pb2.ConvolutionDimensionNumbers()\n proto.input_batch_dimension = lhs_spec[0]\n proto.input_feature_dimension = lhs_spec[1]\n proto.output_batch_dimension = out_spec[0]\n proto.output_feature_dimension = out_spec[1]\n proto.kernel_output_feature_dimension = rhs_spec[0]\n proto.kernel_input_feature_dimension = rhs_spec[1]\n proto.input_spatial_dimensions.extend(lhs_spec[2:])\n proto.kernel_spatial_dimensions.extend(rhs_spec[2:])\n proto.output_spatial_dimensions.extend(out_spec[2:])\n return proto\n\n\ndef _precision_config_proto(precision: Optional[Tuple[PrecisionType,\n PrecisionType]]):\n \"\"\"Convert an integer to an XLA.PrecisionConfig.\"\"\"\n if precision is None:\n return None\n\n proto = xla_data_pb2.PrecisionConfig()\n proto.operand_precision.append(int(precision[0]))\n proto.operand_precision.append(int(precision[1]))\n return proto\n\n\ndef _conv_general_dilated(lhs, rhs, *,\n window_strides, padding, lhs_dilation,\n rhs_dilation,\n dimension_numbers: lax.ConvDimensionNumbers,\n feature_group_count: int,\n batch_group_count: int,\n lhs_shape: Sequence[int],\n rhs_shape: Sequence[int],\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Implementation of lax.conv_general_dilated_p using XlaConv.\"\"\"\n out_tf_shape = _aval_to_tf_shape(_out_aval)\n dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)\n precision_config_proto = _precision_config_proto(precision)\n\n def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):\n tf_version = tuple(int(v) for v in tf.__version__.split(\".\")[:2])\n if tf_version >= (2, 8):\n # TODO(necula): remove when 2.8.0 is the stable TF version (and supports\n # batch_group_count.\n out = tfxla.conv(\n lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,\n dnums_proto,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n precision_config=precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n else:\n if batch_group_count != 1:\n raise ValueError(\n \"The batch_group_count parameter for conv requires TF version \"\n \"at least 2.8.0. You may want to use tf-nightly.\")\n out = tfxla.conv(\n lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,\n dnums_proto,\n feature_group_count=feature_group_count,\n precision_config=precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n # TODO: implement shape inference for XlaConv\n out.set_shape(out_tf_shape)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n # Follow the lowering for complex convolutions from\n # lax._conv_general_dilated_translation. We can use the same conversion on all\n # platforms because on XLA:TPU the compiler does the same as a rewrite.\n preferred_float_et: Optional[Any]\n if np.issubdtype(_in_avals[0].dtype, np.complexfloating):\n if preferred_element_type is not None:\n # Convert complex dtype to types used for real and imaginary parts\n assert np.issubdtype(preferred_element_type, np.complexfloating)\n preferred_float_et = (\n np.float64 if preferred_element_type == np.complex128 else np.float32)\n else:\n preferred_float_et = None\n lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)\n rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)\n k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)\n k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),\n preferred_float_et)\n k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)\n return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))\n else:\n return gen_conv(lhs, rhs, preferred_element_type)\n\n\ntf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated\n\n\ndef _dot_general(lhs, rhs, *, dimension_numbers,\n precision: Optional[Tuple[PrecisionType, PrecisionType]],\n preferred_element_type: Optional[DType],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Implementation of lax.dot_general_p in terms of tf.linalg.einsum.\"\"\"\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n dnums_proto = xla_data_pb2.DotDimensionNumbers()\n dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)\n dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)\n dnums_proto.lhs_batch_dimensions.extend(lhs_batch)\n dnums_proto.rhs_batch_dimensions.extend(rhs_batch)\n precision_config_proto = _precision_config_proto(precision)\n res = tfxla.dot_general(\n lhs,\n rhs,\n dnums_proto,\n precision_config_proto,\n preferred_element_type=preferred_element_type,\n use_v2=True)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n res = tf.stop_gradient(res) # See #7839\n return res\n\n\ntf_impl_with_avals[lax.dot_general_p] = _dot_general\n\n\ndef _broadcast_in_dim(operand, *, shape, broadcast_dimensions,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # for i in range(len(operand.shape)):\n # result.shape[bcast_dims[i]] <- operand.shape[i]\n # bcast_dims must be strictly increasing.\n # len(bcast_dims) == len(operand.shape)\n op_shape = _in_avals[0].shape\n add_1s_shape = [1] * len(shape)\n for i, broadcast_dim_i in enumerate(broadcast_dimensions):\n add_1s_shape[broadcast_dim_i] = op_shape[i]\n with_1s = tf.reshape(operand, _eval_shape(add_1s_shape))\n return tf.broadcast_to(with_1s, _eval_shape(shape))\n\n\ntf_impl_with_avals[lax.broadcast_in_dim_p] = _broadcast_in_dim\n\n\ndef _reshape(operand, *, new_sizes, dimensions):\n if dimensions is None:\n dimensions = tf.range(tf.rank(operand))\n new_sizes_tf = _eval_shape(new_sizes)\n return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)\n\n\ntf_impl[lax.reshape_p] = _reshape\n\n\ndef _squeeze(operand, *, dimensions, _in_avals, _out_aval):\n op_shape = _in_avals[0].shape\n new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)\n new_shape_tf = _eval_shape(new_shape)\n return tf.reshape(operand, new_shape_tf)\n\n\ntf_impl_with_avals[lax.squeeze_p] = _squeeze\n\n\ndef _pad(operand, padding_value, *, padding_config,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n low, high, interior = util.unzip3(padding_config)\n out = tfxla.pad(operand, padding_value, low, high, interior)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.pad_p] = _pad\n\n\ndef _rev(operand, *, dimensions):\n return tf.reverse(operand, dimensions)\n\n\ntf_impl[lax.rev_p] = _rev\n\n\ndef _where(which, *cases):\n if which.dtype == tf.bool:\n assert len(cases) <= 2\n return cases if len(cases) == 1 else tf.where(which, cases[1], cases[0])\n\n def _select(offset, cases):\n assert len(cases) > 0\n if len(cases) == 1:\n return cases[0]\n mid = len(cases) // 2\n return tf.where(tf.less(which, offset + mid),\n _select(offset, cases[:mid]),\n _select(mid, cases[mid:]))\n\n return _select(0, cases)\n\n\ntf_impl[lax.select_n_p] = _where\n\n\ndef _transpose(operand, *, permutation):\n return tf.transpose(operand, perm=permutation)\n\n\ntf_impl[lax.transpose_p] = _transpose\n\naxes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)\n\n# reduce_sum and reduce_prod are not supported for bool\ntf_impl[lax.reduce_sum_p] = axes_to_axis(tf.reduce_sum)\ntf_impl[lax.reduce_prod_p] = axes_to_axis(tf.reduce_prod)\ntf_impl[lax.reduce_max_p] = handle_boolean_args(\n axes_to_axis(tf.reduce_max), argnums=[0],\n boolean_f=axes_to_axis(tf.reduce_any)) # Max is T if any one is T\ntf_impl[lax.reduce_min_p] = handle_boolean_args(\n axes_to_axis(tf.reduce_min), argnums=[0],\n boolean_f=axes_to_axis(tf.reduce_all)) # Min is F if not all are T\ntf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)\ntf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)\n\n\ndef _argminmax(is_min: bool, operand: TfVal, axes: Sequence[int],\n index_dtype: DType,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Follow the JAX implementation, using a XlaReduce with a custom comparator\n if is_min:\n extra_name_stack = \"argmin\"\n value_comparator = lax.lt\n get_identity = lax_internal._get_min_identity\n else:\n extra_name_stack = \"argmax\"\n value_comparator = lax.gt\n get_identity = lax_internal._get_max_identity\n\n res = _convert_jax_impl(\n partial(lax_internal._compute_argminmax, value_comparator, get_identity),\n multiple_results=False,\n extra_name_stack=extra_name_stack)(\n operand,\n index_dtype=index_dtype,\n axes=axes,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[lax.argmin_p] = partial(_argminmax, True)\ntf_impl_with_avals[lax.argmax_p] = partial(_argminmax, False)\n\n\n_add_fn = tf.function(_add, autograph=False)\n_ge_fn = tf.function(tf.math.greater_equal, autograph=False)\n\n\ndef _select_and_gather_add(\n tangents: TfVal, operand: TfVal, select_prim: core.Primitive,\n window_dimensions: Sequence[int], window_strides: Sequence[int],\n base_dilation: Sequence[int], window_dilation: Sequence[int],\n padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n # Note: this function follows the pattern in\n # jax.lax._select_and_gather_add_translation.\n dtype = operand.dtype\n nbits = dtypes.finfo(dtype.as_numpy_dtype).bits\n\n # Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,\n # we thus intend to let the code throw a different exception on this platform.\n max_bits = 64\n\n assert nbits <= max_bits\n double_word_reduction = nbits * 2 <= max_bits\n\n const = lambda dtype, x: tf.constant(np.array(x), dtype)\n\n if double_word_reduction:\n word_dtype = lax_internal._UINT_DTYPES[nbits]\n double_word_dtype = lax_internal._UINT_DTYPES[nbits * 2]\n\n # Packs two values into a tuple.\n def pack(a, b):\n a = _bitcast_convert_type(a, word_dtype)\n b = _bitcast_convert_type(b, word_dtype)\n a = _convert_element_type(a, new_dtype=double_word_dtype)\n b = _convert_element_type(b, new_dtype=double_word_dtype)\n a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))\n return tf.bitwise.bitwise_or(a, b)\n\n # Unpacks the first element of a tuple.\n def fst(t):\n assert t.dtype == double_word_dtype\n st = _shift_right_logical(t, const(double_word_dtype, nbits))\n return _bitcast_convert_type(\n _convert_element_type(st, new_dtype=word_dtype), dtype)\n\n # Unpacks the second element of a tuple.\n def snd(t):\n return _bitcast_convert_type(\n _convert_element_type(t, new_dtype=word_dtype), dtype)\n\n else:\n raise NotImplementedError(\n f\"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits.\"\n )\n\n assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim\n\n def reducer(x, y):\n which = tf_impl[select_prim]\n return tf_impl[lax.select_n_p](which(fst(x), fst(y)), y, x)\n\n init = -np.inf if select_prim is lax.ge_p else np.inf\n init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))\n\n out = _specialized_reduce_window(\n reducer,\n init_identity,\n pack(operand, tangents),\n window_dimensions=window_dimensions,\n window_strides=window_strides,\n padding=padding,\n base_dilation=base_dilation,\n window_dilation=window_dilation,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n return snd(out)\n\n\ntf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add\n\n\ndef _get_shape_from_tensor_or_array(x):\n if isinstance(x.shape, tf.TensorShape):\n return tuple(x.shape.as_list())\n return tuple(x.shape)\n\n\ndef _common_reduce_window(operand, init_val, reducer, window_dimensions,\n window_strides, padding, base_dilation,\n window_dilation, _in_avals, _out_aval):\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n reducer_fn = tf.function(\n reducer, autograph=False).get_concrete_function(o_spec, o_spec)\n\n if not isinstance(init_val, (tf.Tensor, tf.Variable)):\n init_val = tf.constant(init_val, operand.dtype)\n out = tfxla.reduce_window(\n operand,\n init_val,\n reducer_fn,\n window_dimensions,\n window_strides,\n base_dilations=base_dilation,\n window_dilations=window_dilation,\n padding=padding)\n # TODO: implement shape inference for XlaReduceWindow\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ndef _reduce_window(*args, jaxpr, consts, window_dimensions,\n window_strides, padding, base_dilation, window_dilation,\n _in_avals, _out_aval):\n \"\"\"TensorFlow implementation of reduce_window.\n\n Args:\n operands: N dimensional arrays containing elements of type T\n init_values: starting values of the reduction\n jaxpr: the jaxpr corresponding to the reduction function\n consts: the constants associated with jaxpr.\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n\n Returns:\n The reduced operand.\n \"\"\"\n assert len(consts) == 0, \"Reduction computation cannot have constants\"\n operands, init_values = util.split_list(args, [len(args) // 2])\n\n if len(operands) != 1:\n raise NotImplementedError(\"jax2tf does not support variadic reduce_window\")\n\n def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2, extra_name_stack=None)\n return res\n\n return (_common_reduce_window(operands[0], init_values[0], reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval[0]),)\n\n\n\ndef _specialized_reduce_window(reducer,\n identity,\n operand,\n *,\n window_dimensions,\n window_strides,\n padding,\n base_dilation,\n window_dilation,\n _in_avals,\n _out_aval,\n name=None):\n \"\"\"Wraps the TensorFlow reduce window operation based on a reducer and an\n\n identity function defining the initial value of the reduction depending on\n the dtype of the operand.\n\n Args:\n reducer: reduction function of type TfVal -> TfVal -> TfVal\n identity: function that takes a TensorFlow dtype as a parameter and returns\n the starting value of the reduction.\n operand: N dimensional array containing elements of type T\n window_dimensions: array of integers for window dimension values\n window_strides: array of integers for window stride values\n padding: array of pairs of integers for padding values\n base_dilation: array of integers for base dilation values\n window_dilation: array of integers for window dilation values\n name: the name of the specialized reduce window primitive for which this\n conversion function is called. This information may help to choose a\n different conversion path (optional)\n\n Returns:\n The reduced operand.\n \"\"\"\n return _common_reduce_window(operand, identity(operand.dtype), reducer,\n window_dimensions, window_strides, padding,\n base_dilation, window_dilation, _in_avals,\n _out_aval)\n\n\ndef _get_max_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(-np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).min\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined max identity\")\n return False\n\n\ndef _get_min_identity(tf_dtype):\n numpy_tf_dtype = tf_dtype.as_numpy_dtype\n if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):\n return numpy_tf_dtype(np.inf)\n elif dtypes.issubdtype(numpy_tf_dtype, np.integer):\n return dtypes.iinfo(numpy_tf_dtype).max\n else:\n assert dtypes.issubdtype(\n numpy_tf_dtype, np.bool_), (f\"{tf_dtype} has no defined min identity\")\n return True\n\n\n# pylint: disable=protected-access\ntf_impl_with_avals[lax.reduce_window_sum_p] = (\n partial(_specialized_reduce_window, _add, lambda x: 0,\n name=\"reduce_window_sum\"))\ntf_impl_with_avals[lax.reduce_window_min_p] = (\n partial(_specialized_reduce_window,\n partial(_minmax_scalar, is_min=True),\n _get_min_identity,\n name=\"reduce_window_min\"))\ntf_impl_with_avals[lax.reduce_window_max_p] = (\n partial(_specialized_reduce_window,\n partial(_minmax_scalar, is_min=False),\n _get_max_identity,\n name=\"reduce_window_max\"))\ntf_impl_with_avals[lax.reduce_window_p] = _reduce_window\n# pylint: enable=protected-access\n\ndef _reduce(*operands: TfVal,\n computation: Callable,\n jaxpr: core.Jaxpr,\n consts: Sequence[Any],\n dimensions: Sequence[int],\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray) -> Sequence[TfVal]:\n del computation\n assert not consts\n assert len(operands) % 2 == 0\n # operands: op1, op2, ..., init_val1, init_val2, ...\n # reducer takes op1[i], op2[i], ..., init_val1, init_val2, ...\n nr_operands = len(operands) // 2\n init_vals = operands[nr_operands:]\n operands = operands[0:nr_operands]\n\n reducer_arg_spec = tuple([tf.TensorSpec((), op.dtype) for op in init_vals] * 2)\n\n def reducer_computation(*args: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)\n res = _interpret_jaxpr(closed_jaxpr, *args, extra_name_stack=None)\n return res\n\n xla_reducer_computation = (\n tf.function(reducer_computation,\n autograph=False).get_concrete_function(*reducer_arg_spec))\n\n outs = tfxla.variadic_reduce(operands, init_vals,\n dimensions_to_reduce=dimensions,\n reducer=xla_reducer_computation)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n outs = tuple(tf.stop_gradient(out) for out in outs) # See #7839\n return outs\n\ntf_impl_with_avals[lax.reduce_p] = _reduce\n\n\n# We use lax._cumred_tpu_translation_rule to convert cummax,\n# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is\n# O(n^2) on other backends. This may be implemented using associative_scan\n# instead to favor different backends.\ndef _cumred(lax_reduce_fn: Callable,\n lax_reduce_window_fn: Callable,\n extra_name_stack: str):\n if config.jax2tf_associative_scan_reductions:\n return _convert_jax_impl(partial(lax_control_flow.associative_scan,\n lax_reduce_fn),\n multiple_results=False,\n extra_name_stack=extra_name_stack)\n else:\n return _convert_jax_impl(partial(lax_control_flow._cumred_tpu_translation_rule,\n lax_reduce_window_fn),\n multiple_results=False,\n extra_name_stack=extra_name_stack)\n\n\ntf_impl_with_avals[lax.cummax_p] = _cumred(\n lax_reduce_window_fn=lax_windowed_reductions._reduce_window_max,\n lax_reduce_fn=lax.max,\n extra_name_stack=\"cummax\")\ntf_impl_with_avals[lax.cummin_p] = _cumred(\n lax_reduce_window_fn=lax_windowed_reductions._reduce_window_min,\n lax_reduce_fn=lax.min,\n extra_name_stack=\"cummin\")\ntf_impl_with_avals[lax.cumsum_p] = _cumred(\n lax_reduce_window_fn=lax_windowed_reductions._reduce_window_sum,\n lax_reduce_fn=lax.add,\n extra_name_stack=\"cumsum\")\ntf_impl_with_avals[lax.cumprod_p] = _cumred(\n lax_reduce_window_fn=lax_windowed_reductions._reduce_window_prod,\n lax_reduce_fn=lax.mul,\n extra_name_stack=\"cumprod\")\n\n\ndef _select_and_scatter(operand, source, init_value, select_jaxpr,\n select_consts, scatter_jaxpr, scatter_consts,\n window_dimensions, window_strides, padding):\n raise NotImplementedError(\"TODO: jax2tf can not convert _select_and_scatter\")\n\n\ntf_impl[lax.select_and_scatter_p] = _select_and_scatter\n\n\n@partial(handle_boolean_args, argnums=(0, 1))\ndef _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,\n window_strides, padding, _in_avals, _out_aval):\n init_value = tf.zeros((), operand.dtype)\n select_fn = (\n tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(\n init_value, init_value))\n scatter_fn = _add_fn.get_concrete_function(init_value, init_value)\n out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,\n padding, source, init_value, select_fn,\n scatter_fn)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add\n\n\ndef _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):\n res = _convert_jax_impl(\n partial(jax._src.prng._threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True, extra_name_stack=\"threefry\")(\n *args, _in_avals=_in_avals, _out_aval=_out_aval)\n return res\n\n\ntf_impl_with_avals[jax._src.prng.threefry2x32_p] = _threefry2x32_jax_impl\n\n# Use the vmap implementation, otherwise on TPU the performance is really bad\n# With use_vmap=True on, we get about the same performance for JAX and jax2tf.\ntf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(\n partial(jax._src.random._gamma_impl, use_vmap=True),\n multiple_results=False, extra_name_stack=\"random_gamma\")\n\n\ndef _rng_bit_generator(key: TfVal, *, shape, dtype, algorithm) -> Sequence[TfVal]:\n is_uint32_key = key.dtype == _to_tf_dtype(jnp.uint32)\n if is_uint32_key:\n key = tf.reshape(key, (2, 2))\n key = tfxla.bitcast_convert_type(key, _to_tf_dtype(jnp.uint64))\n shape_tf = _eval_shape(shape)\n # JAX uses XLA algorithm enums; tfxla uses tf.random.Algorithm\n if algorithm == lax.RandomAlgorithm.RNG_THREE_FRY:\n algorithm_tf = tf.random.Algorithm.THREEFRY\n elif algorithm == lax.RandomAlgorithm.RNG_PHILOX:\n algorithm_tf = tf.random.Algorithm.PHILOX\n elif algorithm == lax.RandomAlgorithm.RNG_DEFAULT:\n algorithm_tf = tf.random.Algorithm.AUTO_SELECT\n else:\n assert False\n (new_key, res) = tfxla.rng_bit_generator(algorithm_tf.value, key, shape_tf,\n dtype=_to_tf_dtype(dtype))\n if is_uint32_key:\n new_key = tfxla.bitcast_convert_type(new_key, _to_tf_dtype(jnp.uint32))\n new_key = tf.reshape(new_key, (4,))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n # See #7839\n new_key = tf.stop_gradient(new_key)\n res = tf.stop_gradient(res)\n return new_key, res\n\n\ntf_impl[lax.rng_bit_generator_p] = _rng_bit_generator\n\n\ndef _rng_uniform(minval: TfVal, maxval: TfVal, *, shape) -> TfVal:\n shape_tf = _eval_shape(shape)\n return tf.random.uniform(shape_tf, minval=minval, maxval=maxval, dtype=minval.dtype)\n\ntf_impl[lax.rng_uniform_p] = _rng_uniform\n\n\ndef _gather_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.GatherDimensionNumbers()\n proto.offset_dims.extend(dimension_numbers.offset_dims)\n proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)\n proto.start_index_map.extend(dimension_numbers.start_index_map)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\n@partial(handle_boolean_args, argnums=[0])\ndef _gather(operand, start_indices, *, dimension_numbers, slice_sizes: core.Shape,\n indices_are_sorted, unique_indices, mode, fill_value,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n \"\"\"Tensorflow implementation of gather.\"\"\"\n if mode == lax.GatherScatterMode.FILL_OR_DROP:\n gather_fill_fn = _convert_jax_impl(lax_slicing._gather_fill,\n multiple_results=False)\n return gather_fill_fn(\n operand, start_indices, dimension_numbers=dimension_numbers,\n slice_sizes=slice_sizes, unique_indices=unique_indices,\n indices_are_sorted=indices_are_sorted, fill_value=fill_value,\n output_shape=_out_aval.shape, _in_avals=_in_avals, _out_aval=_out_aval)\n\n proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)\n slice_sizes_tf = _eval_shape(slice_sizes)\n out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf,\n indices_are_sorted)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.gather_p] = _gather\n\n\ndef _slice(operand, start_indices, limit_indices, strides, _in_avals,\n _out_aval):\n if strides is None:\n strides = [1] * len(start_indices)\n slices = tuple(\n map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),\n _eval_shape(strides)))\n out = operand[slices]\n # TODO(b/184503314): improve shape inference for __getitem__\n # E.g., operand.shape=(b, 5, 3), start_indices=(0, 1, 1), limit_indices=(b, 5, 3), strides=(1, 2, 1)\n out.set_shape(_aval_to_tf_shape(_out_aval))\n return out\n\n\ntf_impl_with_avals[lax.slice_p] = _slice\n\n\ndef _dynamic_slice(operand, *start_indices, slice_sizes: core.Shape,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n start_indices = tf.stack(start_indices)\n slice_sizes_tf = _eval_shape(slice_sizes)\n\n res = tfxla.dynamic_slice(operand, start_indices, size_indices=slice_sizes_tf)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n res = tf.stop_gradient(res) # See #7839\n return res\n\n\ntf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice\n\n\ndef _dynamic_update_slice(operand, update, *start_indices,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n out = tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.dynamic_update_slice_p] = _dynamic_update_slice\n\n\ndef _scatter_dimensions_proto(indices_shape, dimension_numbers):\n proto = xla_data_pb2.ScatterDimensionNumbers()\n proto.update_window_dims.extend(dimension_numbers.update_window_dims)\n proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)\n proto.scatter_dims_to_operand_dims.extend(\n dimension_numbers.scatter_dims_to_operand_dims)\n assert indices_shape\n proto.index_vector_dim = len(indices_shape) - 1\n return proto\n\n\ndef _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,\n dimension_numbers, indices_are_sorted, unique_indices, mode,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n del unique_indices\n\n if mode == lax.GatherScatterMode.CLIP:\n clip_fn = _convert_jax_impl(lax_slicing._clamp_scatter_indices,\n multiple_results=False)\n scatter_indices = clip_fn(\n operand, scatter_indices, updates, dnums=dimension_numbers,\n _in_avals=_in_avals, _out_aval=_in_avals[1])\n\n assert len(update_consts) == 0, \"Update computation cannot have constants\"\n\n proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)\n\n def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:\n closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)\n res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2, extra_name_stack=None)\n return res\n\n o_spec = tf.TensorSpec((), dtype=operand.dtype)\n xla_update_computation = (\n tf.function(update_computation,\n autograph=False).get_concrete_function(o_spec, o_spec))\n out = tfxla.scatter(\n operand,\n scatter_indices,\n updates,\n xla_update_computation,\n proto,\n indices_are_sorted=indices_are_sorted)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n out = tf.stop_gradient(out) # See #7839\n return out\n\n\ntf_impl_with_avals[lax.scatter_p] = _scatter\ntf_impl_with_avals[lax.scatter_min_p] = _scatter\ntf_impl_with_avals[lax.scatter_max_p] = _scatter\ntf_impl_with_avals[lax.scatter_mul_p] = _scatter\ntf_impl_with_avals[lax.scatter_add_p] = _scatter\n\n\ndef _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],\n linear: Sequence[bool]) -> Sequence[TfVal]:\n del linear\n # tf.cond needs lambdas with no arguments.\n branches_tf = [\n partial(_interpret_jaxpr, jaxpr, *operands,\n # Same name stack as the XLA translation of cond_p\n extra_name_stack=f\"branch_{i}_fun\")\n for jaxpr in branches\n for i, jaxpr in enumerate(branches)\n ]\n return tf.switch_case(index, branches_tf)\n\n\ntf_impl[lax.cond_p] = _cond\n\n\ndef _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,\n body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]\n # The conditional is not a scalar, this must be a batched while\n return _batched_cond_while(\n *args,\n cond_nconsts=cond_nconsts,\n cond_jaxpr=cond_jaxpr,\n body_nconsts=body_nconsts,\n body_jaxpr=body_jaxpr)\n\n # The conditional must return a single value to TF\n def cond_tf_func(*args: TfVal) -> TfVal:\n pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args,\n # Same name stack as the XLA translation of while_p\n extra_name_stack=\"while/cond\")\n return pred\n\n body_tf_func = partial(_interpret_jaxpr, body_jaxpr, *body_consts,\n extra_name_stack=\"while/body\")\n return tf.while_loop(cond_tf_func, body_tf_func, init_carry)\n\n\ndef _batched_cond_while(*args: TfVal, cond_nconsts: int,\n cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,\n body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:\n \"\"\"Interprets a while_loop with a batched condition.\n\n A batched while has a conditional that returns a tensor of booleans, and\n a body that returns a list of tensors whose leading dimensions match those\n of the conditional tensor.\n\n We need to turn it into a while with scalar boolean conditional. We will\n expand the loop carry to include a prefix with the current tensor boolean\n condition. We prepend to the loop the first calculation of the tensor boolean\n condition. The loop condition will use a \"reduce_any\" to calculate a scalar\n boolean from the tensor boolean condition. The end of the loop body will\n compute the new carry using a \"tf.where\", and we compute the new tensor\n boolean condition.\n \"\"\"\n cond_consts, body_consts, init_carry = util.split_list(\n args, [cond_nconsts, body_nconsts])\n # Initial computation of batched condition\n init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry,\n extra_name_stack=\"while/body_pred\")\n assert init_pred_b is not core.unit\n\n def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:\n pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))\n return pred\n\n def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:\n new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,\n *carry,\n extra_name_stack=\"while/body\")\n # We repeat those carries for which the loop termination condition is false\n def select_one_carry(new_c: TfVal, c: TfVal, c_aval: core.ShapedArray) -> TfVal:\n pred_b_bcast = _broadcast_in_dim(\n pred_b,\n shape=c_aval.shape, # a JAX shape\n broadcast_dimensions=list(range(len(pred_b.shape))),\n _in_avals=cond_jaxpr.out_avals,\n _out_aval=core.ShapedArray(c_aval.shape, np.bool_))\n return tf.where(pred_b_bcast, new_c, c)\n\n selected_carry: Sequence[TfVal] = list(map(select_one_carry, new_carry, carry, body_jaxpr.out_avals))\n next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry,\n extra_name_stack=\"body_pred\")\n return (next_pred_b, *selected_carry)\n\n _, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,\n (init_pred_b, *init_carry))\n return res_carry\n\n\ntf_impl[lax.while_p] = _while\n\n# We use the scan impl rule to rewrite in terms of while.\ntf_impl_with_avals[lax.scan_p] = _convert_jax_impl(\n lax_control_flow._scan_impl,\n extra_name_stack=\"scan\")\n\ntf_impl_with_avals[ad_checkpoint.remat_p] = \\\n _convert_jax_impl(partial(lax_control_flow._remat_translation_rule,\n # TODO: jax2tf cannot discriminate by platform\n platform=\"cpu\"),\n multiple_results=True,\n extra_name_stack=\"checkpoint\")\n\ndef _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:\n # Some types originally incompatible with tf.math.top_k can be promoted\n # to a compatible type without loss of precision.\n def promote_tf_dtype(tf_dtype):\n if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:\n return tf.uint32\n if tf_dtype in [tf.int8, tf.int16]:\n return tf.int32\n if tf_dtype is tf.float16:\n return tf.float32\n return None\n\n conversion_dtype = promote_tf_dtype(operand.dtype)\n if conversion_dtype:\n values, indices = tf.math.top_k(\n tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)\n return tf.dtypes.cast(values, operand.dtype), indices\n else:\n return tf.math.top_k(operand, k=k, sorted=True)\n\n\ntf_impl[lax.top_k_p] = _top_k\n\n\ndef _sort(*operands: TfVal, dimension: int, is_stable: bool,\n num_keys: int) -> Tuple[TfVal, ...]:\n assert 1 <= num_keys <= len(operands)\n assert 0 <= dimension < len(\n operands[0].shape\n ), f\"Invalid {dimension} for ndim {len(operands[0].shape)}\"\n\n comparator_spec: List[tf.TensorSpec] = []\n comparator_jax_in_avals: List[core.ShapedArray] = []\n for op in operands:\n o_spec = tf.TensorSpec((), dtype=op.dtype)\n comparator_spec.extend([o_spec, o_spec])\n o_aval = core.ShapedArray((), _to_jax_dtype(op.dtype))\n comparator_jax_in_avals.extend([o_aval, o_aval])\n\n # Use the same comparator that JAX uses when compiling to XLA, to get the\n # proper NaN/Inf total order, and the lexicographic ordering.\n # The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]\n # corresponding to two scalars from operand[k].\n def lexicographic_comparator(*tf_args: TfVal) -> TfVal:\n return _convert_jax_impl(\n lax_internal._sort_lt_comparator, multiple_results=False)(\n *tf_args,\n _in_avals=comparator_jax_in_avals,\n _out_aval=core.ShapedArray((), np.bool_),\n num_keys=num_keys)\n\n xla_comparator_computation = (\n tf.function(lexicographic_comparator,\n autograph=False).get_concrete_function(*comparator_spec))\n results = tfxla.variadic_sort(\n operands,\n dimension=dimension,\n is_stable=is_stable,\n comparator=xla_comparator_computation)\n if _WRAP_JAX_JIT_WITH_TF_FUNCTION:\n results = tuple(tf.stop_gradient(out) for out in results) # See #7839\n return results\n\n\ntf_impl[lax.sort_p] = _sort\n\n\ndef _fft(x, fft_type, fft_lengths):\n FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))\n if fft_type == IRFFT:\n expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)\n else:\n expected_lengths = x.shape[-len(fft_lengths):]\n if expected_lengths != fft_lengths:\n raise NotImplementedError(\n f\"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of \"\n f\"array with shape={x.shape}.\")\n tf_funcs = {\n FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],\n IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],\n RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],\n IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]\n }\n return tf_funcs[fft_type][len(fft_lengths) - 1](x)\n\n\ntf_impl[lax.fft_p] = _fft\n\n\ndef _qr(operand, full_matrices):\n return tf.linalg.qr(operand, full_matrices=full_matrices)\n\n\ntf_impl[lax.linalg.qr_p] = _qr\n\n\ndef _svd(operand, full_matrices, compute_uv):\n result = tf.linalg.svd(operand, full_matrices, compute_uv)\n if not compute_uv:\n return result,\n s, u, v = result\n return s, u, tf.linalg.adjoint(v)\n\n\ntf_impl[lax.linalg.svd_p] = _svd\n\n\ndef _eig(operand: TfVal, compute_left_eigenvectors: bool,\n compute_right_eigenvectors: bool):\n if compute_left_eigenvectors and compute_right_eigenvectors:\n # TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to\n # sort the left eigenvectors in the right order. The jax.numpy.linalg API\n # suggests to me that left eigenvectors are anyway seldom used, so I\n # think it is acceptable to leave as unimplemented for now.\n msg = (\"Conversion of eig is not implemented when both \"\n \"compute_left_eigenvectors and compute_right_eigenvectors are set \"\n \"to True.\")\n raise NotImplementedError(msg)\n elif not (compute_left_eigenvectors or compute_right_eigenvectors):\n return tuple([tf.linalg.eigvals(operand)])\n elif compute_right_eigenvectors:\n return tuple(tf.linalg.eig(operand))\n else: # compute_left_eigenvectors == True\n wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))\n wHH = tf.math.conj(wH)\n return tuple([wHH, vl])\n\n\ntf_impl[lax.linalg.eig_p] = _eig\n\n\ndef _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):\n if operand.shape[-1] == 0:\n v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))\n else:\n if not lower:\n operand = tf.linalg.adjoint(operand)\n w, v = tf.linalg.eigh(operand)\n cast_type = {\n tf.complex64: tf.float32,\n tf.complex128: tf.float64\n }.get(operand.dtype)\n if cast_type is not None:\n w = tf.cast(w, cast_type)\n return v, w\n\n\ntf_impl_with_avals[lax.linalg.eigh_p] = _eigh\n\n\ndef _lu(operand: TfVal, _in_avals, _out_aval):\n return _convert_jax_impl(lax_linalg._lu_python, extra_name_stack=\"lu\")(\n operand, _in_avals=_in_avals, _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linalg.lu_p] = _lu\n\n\ndef _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,\n transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray):\n if unit_diagonal:\n a_aval, _ = _in_avals\n a_shape = _eval_shape(a_aval.shape)\n a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))\n if not left_side:\n rank = len(a.shape)\n transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]\n a = tf.transpose(a, transpose_dimensions)\n b = tf.transpose(b, transpose_dimensions)\n lower = not lower\n # adjoint == transpose for real dtypes, so special care need only be taken\n # for complex types.\n if a.dtype in [tf.complex64, tf.complex128]:\n if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):\n a = tf.math.conj(a)\n result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)\n if not left_side:\n result = tf.transpose(result, transpose_dimensions)\n return result\n\n\ntf_impl_with_avals[lax.linalg.triangular_solve_p] = _triangular_solve\n\n\ndef _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):\n return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl,\n extra_name_stack=\"linear_solve\")(\n *args,\n const_lengths=const_lengths,\n jaxprs=jaxprs,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linear_solve_p] = _linear_solve\n\ndef _tridiagonal_solve(*args: TfVal, _in_avals, _out_aval, **params):\n return _convert_jax_impl(lax_linalg._tridiagonal_solve_jax,\n multiple_results=False,\n extra_name_stack=\"tridiagonal_solve\")(\n *args,\n _in_avals=_in_avals,\n _out_aval=_out_aval)\n\n\ntf_impl_with_avals[lax.linalg.tridiagonal_solve_p] = _tridiagonal_solve\n\ndef _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n jvp_jaxpr_thunk: Callable,\n num_consts: int) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args, extra_name_stack=\"custom_jvp\")\n\n\ntf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr\n\n\ndef _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,\n **_) -> Sequence[TfVal]:\n # TODO(necula): ensure that there is no AD transformation in scope\n return _interpret_jaxpr(fun_jaxpr, *args, extra_name_stack=\"custom_vjp\")\n\n\ntf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr\n\n\ndef _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:\n raise TypeError(\"can't apply forward-mode autodiff (jvp) to a custom_vjp \"\n \"function.\")\n\n\ntf_impl[ad.custom_lin_p] = _custom_lin\n\n\ndef split_to_logical_devices(tensor: TfVal,\n partition_dimensions: pxla.PartitionsOrReplicated):\n \"\"\"Like TPUMPStrategy.experimental_split_to_logical_devices.\n\n For jax2tf purposes we want to avoid needing to thread the `strategy` object\n through the generated computation. It seems that the original function needs\n the strategy object only for error checking, which we assume is done upstream\n by JAX.\n\n Args:\n tensor: Input tensor to annotate.\n partition_dimensions: A list of integers, with one integer per tensor\n dimension, specifying in how many parts the dimension should be split. The\n product of integers must equal the number of devices per replica.\n use_sharding_op: whether to use a sharding op, or not.\n\n Returns:\n an annotated tensor.\n \"\"\"\n # TODO: this is only for sharded_jit. Either remove, or implement in terms\n # of _shard_values.\n if partition_dimensions is None:\n return xla_sharding.replicate(tensor, use_sharding_op=True)\n num_partition_splits = np.prod(partition_dimensions)\n tile_assignment = np.arange(num_partition_splits).reshape(\n partition_dimensions)\n return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)\n\n\ndef _shard_value(mesh: maps.Mesh,\n val: TfVal,\n aval: core.ShapedArray,\n axis_resources: pjit.ParsedPartitionSpec) -> TfVal:\n \"\"\"Apply sharding to a TfVal.\"\"\"\n sharding_proto: xla_client.OpSharding = pjit.get_aval_sharding_proto(\n aval, axis_resources, mesh)\n # To use xla_sharding.py, we must have a xla_data_pb2.OpSharding.\n xla_sharding_proto: xla_data_pb2.OpSharding = (\n xla_data_pb2.OpSharding(\n type=int(sharding_proto.type),\n tile_assignment_dimensions=sharding_proto.tile_assignment_dimensions,\n tile_assignment_devices=sharding_proto.tile_assignment_devices,\n replicate_on_last_tile_dim=sharding_proto.replicate_on_last_tile_dim,\n last_tile_dims=sharding_proto.last_tile_dims))\n return xla_sharding.Sharding(proto=xla_sharding_proto).apply_to_tensor(\n val, use_sharding_op=True)\n\n\ndef _pjit(*args: TfVal,\n jaxpr: core.ClosedJaxpr,\n in_axis_resources: Sequence[pjit.ParsedPartitionSpec],\n out_axis_resources: Sequence[pjit.ParsedPartitionSpec],\n resource_env: maps.ResourceEnv,\n donated_invars,\n name: str,\n in_positional_semantics,\n out_positional_semantics,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray) -> TfVal:\n del donated_invars\n if resource_env.physical_mesh.is_multi_process:\n raise NotImplementedError(\"jax2tf translation for pjit over multi-process \"\n \"meshes is not supported yet\")\n # TODO: add `name` to the name stack\n shard_value_for_mesh = partial(_shard_value, resource_env.physical_mesh)\n # Apply sharding annotation to the arguments\n sharded_args: Sequence[TfVal] = tuple(\n map(shard_value_for_mesh, args, _in_avals, in_axis_resources))\n results = _interpret_jaxpr(jaxpr, *sharded_args,\n extra_name_stack=util.wrap_name(name, \"pjit\"))\n sharded_results: Sequence[TfVal] = tuple(\n map(shard_value_for_mesh, results, _out_aval, out_axis_resources))\n return tuple(sharded_results)\n\n\ntf_impl_with_avals[pjit.pjit_p] = _pjit\n\n\ndef _pjit_sharding_constraint(arg: TfVal, *,\n axis_resources: pjit.ParsedPartitionSpec,\n resource_env: maps.ResourceEnv,\n _in_avals: Sequence[core.ShapedArray],\n _out_aval: core.ShapedArray,\n **kwargs) -> TfVal:\n return _shard_value(resource_env.physical_mesh, arg, _in_avals[0], axis_resources)\n\n\ntf_impl_with_avals[pjit.sharding_constraint_p] = _pjit_sharding_constraint\n\ndef _dimension_size_jax2tf(op: TfVal, *, dimension):\n return tf.shape(op)[dimension]\n\ntf_impl[shape_poly.dimension_size_p] = _dimension_size_jax2tf\n\ndef _dim_as_value_jax2tf(dim: shape_poly.DimSize):\n dim_tf, = _eval_shape((dim,))\n return dim_tf\n\ntf_impl[shape_poly.dim_as_value_p] = _dim_as_value_jax2tf\n\ndef _register_checkpoint_pytrees():\n \"\"\"Registers TF custom container types as pytrees.\"\"\"\n m = tf.Module()\n # The types here are automagically changed by TensorFlow's checkpointing\n # infrastructure.\n m.a = (tf.Module(), tf.Module())\n m.b = [tf.Module(), tf.Module()]\n m.c = {\"a\": tf.Module()}\n tuple_wrapper = type(m.a)\n list_wrapper = type(m.b)\n dict_wrapper = type(m.c)\n\n # TF AutoTrackable swaps container types out for wrappers.\n assert tuple_wrapper is not tuple\n assert list_wrapper is not list\n assert dict_wrapper is not dict\n\n jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:\n (tuple(xs), None), lambda _, xs: tuple(xs))\n\n jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),\n lambda _, xs: list(xs))\n\n jax.tree_util.register_pytree_node(\n dict_wrapper,\n lambda s: (tuple(s.values()), tuple(s.keys())),\n lambda k, xs: dict(zip(k, xs)))\n\n\n_register_checkpoint_pytrees()\n"
] | [
[
"tensorflow.math.equal",
"tensorflow.logical_and",
"tensorflow.identity",
"tensorflow.__version__.split",
"tensorflow.linalg.eigh",
"tensorflow.function",
"tensorflow.compiler.tf2xla.python.xla.conv",
"tensorflow.bitwise.right_shift",
"tensorflow.math.greater_equal",
"tensorflow.compiler.tf2xla.python.xla.gather",
"tensorflow.linalg.eigvals",
"tensorflow.math.floor",
"tensorflow.compiler.xla.xla_data_pb2.DotDimensionNumbers",
"tensorflow.Module",
"tensorflow.math.real",
"tensorflow.while_loop",
"tensorflow.cast",
"tensorflow.math.floormod",
"tensorflow.compiler.xla.xla_data_pb2.ScatterDimensionNumbers",
"tensorflow.linalg.eig",
"tensorflow.math.round",
"numpy.array",
"tensorflow.compiler.xla.xla_data_pb2.PrecisionConfig",
"tensorflow.complex",
"numpy.issubdtype",
"tensorflow.compiler.tf2xla.python.xla.scatter",
"tensorflow.ones",
"tensorflow.convert_to_tensor",
"tensorflow.math.conj",
"tensorflow.raw_ops.AddV2",
"tensorflow.math.atan2",
"tensorflow.math.floordiv",
"tensorflow.compiler.tf2xla.python.xla.variadic_reduce",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.math.truediv",
"tensorflow.compiler.tf2xla.python.xla.dot_general",
"tensorflow.rank",
"tensorflow.math.sqrt",
"tensorflow.bitwise.bitwise_or",
"tensorflow.math.abs",
"tensorflow.math.imag",
"tensorflow.stop_gradient",
"tensorflow.bitwise.left_shift",
"tensorflow.compiler.tf2xla.python.xla.reduce_window",
"tensorflow.raw_ops.PopulationCount",
"tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.Sharding",
"tensorflow.dtypes.cast",
"tensorflow.compiler.tf2xla.python.xla.dynamic_slice",
"tensorflow.nest.flatten",
"tensorflow.less",
"tensorflow.linalg.svd",
"tensorflow.compiler.xla.xla_data_pb2.ConvolutionDimensionNumbers",
"tensorflow.transpose",
"tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.tile",
"tensorflow.compiler.xla.xla_data_pb2.GatherDimensionNumbers",
"tensorflow.math.sign",
"tensorflow.reverse",
"tensorflow.linalg.adjoint",
"tensorflow.zeros_like",
"numpy.arange",
"numpy.prod",
"tensorflow.math.negative",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.logical_not",
"tensorflow.bitwise.invert",
"tensorflow.random.uniform",
"tensorflow.linalg.triangular_solve",
"tensorflow.math.subtract",
"tensorflow.broadcast_to",
"tensorflow.reshape",
"tensorflow.switch_case",
"tensorflow.name_scope",
"tensorflow.concat",
"tensorflow.dtypes.as_dtype",
"tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.replicate",
"tensorflow.math.less",
"tensorflow.raw_ops.PreventGradient",
"tensorflow.device",
"tensorflow.clip_by_value",
"tensorflow.compiler.xla.xla_data_pb2.OpMetadata",
"tensorflow.math.reciprocal",
"tensorflow.stack",
"tensorflow.nest.map_structure",
"tensorflow.math.multiply",
"tensorflow.compiler.tf2xla.python.xla.variadic_sort",
"tensorflow.zeros",
"tensorflow.compiler.tf2xla.python.xla.pad",
"numpy.shape",
"tensorflow.compiler.tf2xla.python.xla.select_and_scatter",
"tensorflow.TensorSpec",
"tensorflow.where",
"tensorflow.linalg.qr",
"tensorflow.math.top_k"
]
] |
aakashsingh1210/greyatom-python-for-data-science | [
"c4df27416cfffa574107ba1a7a5dc96ba6d92172"
] | [
"data-visualization/code.py"
] | [
"# --------------\n#Importing header files\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#Reading the file\ndata=pd.read_csv(path)\n\n#Code starts here\n\n# Step 1 \n#Reading the file\n\n\n#Creating a new variable to store the value counts\nloan_status=data['Loan_Status'].value_counts()\nloan_status.plot(kind=\"bar\")\n#Plotting bar plot\n\n\n\n# Step 2\n#Plotting an unstacked bar plot\nproperty_and_loan=data.groupby([\"Property_Area\",\"Loan_Status\"]).size().unstack()\nproperty_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))\n# Label X-axes and Y-axes\nplt.xlabel('Property Area')\nplt.ylabel('Loan Status')\n# Rotate X-axes labels\nplt.xticks(rotation=45)\n\n#Changing the x-axis label\n\n\n#Changing the y-axis label\n\n\n#Rotating the ticks of X-axis\n\n\n# Step 3\n#Plotting a stacked bar plot\neducation_and_loan=data.groupby([\"Education\",\"Loan_Status\"]).size().unstack()\neducation_and_loan.plot(kind='bar', stacked=True, figsize=(15,10))\n# Label X-axes and Y-axes\nplt.xlabel('Education Status')\nplt.ylabel('Loan Status')\n# Rotate X-axes labels\nplt.xticks(rotation=45)\n\n\n#Changing the x-axis label\n\n\n#Changing the y-axis label\n\n\n#Rotating the ticks of X-axis\n\n\n# Step 4 \n#Subsetting the dataframe based on 'Education' column\ngraduate=data[data['Education'] == 'Graduate']\nnot_graduate=data[data['Education'] == 'Not Graduate']\n#Subsetting the dataframe based on 'Education' column\ngraduate['LoanAmount'].plot(kind='density',label='Graduate')\nnot_graduate['LoanAmount'].plot(kind='density',label='Not Graduate')\n#Plotting density plot for 'Graduate'\n\n\n#Plotting density plot for 'Graduate'\n\n\n#For automatic legend display\n\n\n# Step 5\n#Setting up the subplots\n# Initialize figure and axes\nfig, (ax_1, ax_2,ax_3) = plt.subplots(3,1, figsize=(20,10))\nax_1.scatter(data['ApplicantIncome'],data[\"LoanAmount\"])\nax_1.set_title('Applicant Income')\nax_2.scatter(data['CoapplicantIncome'],data[\"LoanAmount\"])\nax_2.set_title('Coapplicant Income')\ndata['TotalIncome']=data['ApplicantIncome']+data['CoapplicantIncome']\nax_3.scatter(data['TotalIncome'],data[\"LoanAmount\"])\nax_3.set_title('Total Income')\n\n\n#Setting the subplot axis title\n\n\n\n"
] | [
[
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
DCichon/straxen | [
"ffcf06ad86471caf11cc831f2ff68d70b59464af"
] | [
"straxen/analyses/pulse_plots.py"
] | [
"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport strax\nimport straxen\n\n\[email protected]_analysis(requires=('raw_records',), warn_beyond_sec=5)\ndef plot_pulses_tpc(context, raw_records, run_id, time_range=None,\n plot_hits=False, plot_median=False,\n max_plots=20, store_pdf=False, path=''):\n plot_pulses(context, raw_records, run_id, time_range,\n plot_hits, plot_median,\n max_plots, store_pdf, path)\n\n\[email protected]_analysis(requires=('raw_records_mv',), warn_beyond_sec=5)\ndef plot_pulses_mv(context, raw_records_mv, run_id, time_range=None,\n plot_hits=False, plot_median=False,\n max_plots=20, store_pdf=False, path=''):\n plot_pulses(context, raw_records_mv, run_id, time_range,\n plot_hits, plot_median,\n max_plots, store_pdf, path, detector_ending='_mv')\n\n\[email protected]_analysis(requires=('raw_records_nv',), warn_beyond_sec=5)\ndef plot_pulses_nv(context, raw_records_nv, run_id, time_range=None,\n plot_hits=False, plot_median=False,\n max_plots=20, store_pdf=False, path=''):\n plot_pulses(context, raw_records_nv, run_id, time_range,\n plot_hits, plot_median,\n max_plots, store_pdf, path, detector_ending='_nv')\n\n\ndef plot_pulses(context, raw_records, run_id, time_range,\n plot_hits=False, plot_median=False,\n max_plots=20, store_pdf=False, path='',\n detector_ending=''):\n \"\"\"\n Plots nveto pulses for a list of records.\n :param context: Context to be used.\n :param plot_hits: If True plot hit boundaries including the left\n and right extension as orange shaded regions.\n :param plot_median: If true plots pulses sample median as dotted\n line.\n :param max_plots: Limits the number of figures. If you would like\n to plot more pulses you should put the plots in a PDF.\n :param store_pdf: If true figures are put to a PDF instead of\n plotting them to your notebook. The file name is automatically\n generated including the time range and run_id.\n :param path: Relative path where the PDF should be stored. By default\n it is the directory of the notebook.\n :param detector_ending: Ending of the corresponding detector. Empty\n string for TPC '_nv' for neutron-veto and '_mv' muon-veto. \n \"\"\"\n # Register records plugin to get settings\n p = context.get_single_plugin(run_id, 'records' + detector_ending)\n\n # Compute strax baseline and baseline_rms:\n records = strax.raw_to_records(raw_records)\n records = strax.sort_by_time(records)\n strax.zero_out_of_bounds(records)\n\n baseline_key = [key for key in p.config.keys() if 'baseline_samples' in key][0]\n\n if isinstance(p.config[baseline_key], int):\n baseline_samples = p.config[baseline_key]\n else:\n baseline_samples = straxen.get_correction_from_cmt(\n run_id, p.config[baseline_key])\n\n strax.baseline(records,\n baseline_samples=baseline_samples,\n flip=True)\n\n nfigs = 1\n if store_pdf and time_range is None:\n raise ValueError(f'Specify time range!')\n if store_pdf:\n from matplotlib.backends.backend_pdf import PdfPages\n fname = f'pulses_{run_id}_{time_range[0]}_{time_range[1]}.pdf'\n fname = os.path.join(path, fname)\n pdf = PdfPages(fname)\n\n hits = None # needed for delete if false\n\n for inds in _yield_pulse_indices(raw_records):\n # Grouped our pulse so now plot:\n rr_pulse = raw_records[inds]\n r_pulse = records[inds]\n\n fig, axes = straxen.plot_single_pulse(rr_pulse, run_id)\n if detector_ending == '_nv':\n # We only store for the nv digitizer baseline values:\n axes.axhline(rr_pulse[0]['baseline'], ls='dashed',\n color='k', label=f'D. Bas.: {rr_pulse[0][\"baseline\"]} ADC')\n\n baseline = r_pulse[0]['baseline']\n baseline_rms = r_pulse[0]['baseline_rms']\n axes.axhline(baseline, ls='solid',\n color='k',\n label=f'Strax Bas. +/-RMS:\\n ({baseline:.2f}+/-{baseline_rms:.2f}) ADC')\n xlim = axes.get_xlim()\n axes.fill_between(xlim,\n [baseline + baseline_rms] * 2,\n [baseline - baseline_rms] * 2,\n color='gray', alpha=0.4\n )\n\n # check type of p.hit_thresholds\n if isinstance(p.hit_thresholds, int):\n thr = p.hit_thresholds\n elif isinstance(p.hit_thresholds, np.ndarray):\n thr = p.hit_thresholds[rr_pulse['channel']][0]\n\n if plot_median:\n # Plot median if asked.\n # Have to make pulse again:\n pulse = straxen.matplotlib_utils._make_pulse(rr_pulse)\n median = np.median(pulse)\n axes.axhline(median,\n ls='dotted',\n color='k',\n label=f'Median Bas.: {median:.0f} ADC')\n\n axes.axhline(median - thr,\n ls='dotted', color='orange'\n )\n\n if plot_hits:\n min_amplitude = thr\n\n axes.axhline(baseline - min_amplitude,\n color='orange', label='Hitfinder threshold')\n\n hits = strax.find_hits(r_pulse,\n min_amplitude=min_amplitude\n )\n if detector_ending != '_he':\n # We don't have 'save_outside_hits_he' at all!\n le, re = p.config['save_outside_hits' + detector_ending]\n else:\n le, re = p.config['save_outside_hits']\n start = (hits['time'] - r_pulse[0]['time']) / r_pulse[0]['dt'] - le\n end = (strax.endtime(hits) - r_pulse[0]['time']) / r_pulse[0]['dt'] + re\n\n ylim = axes.get_ylim()\n for s, e in zip(start, end):\n plt.fill_between((s, e), *ylim, alpha=0.2, color='orange')\n axes.set_ylim(*ylim)\n\n plt.legend()\n axes.set_xlim(*xlim)\n\n if store_pdf:\n plt.close()\n pdf.savefig(fig)\n\n nfigs += 1\n if max_plots is not None and nfigs > max_plots:\n break\n\n if store_pdf:\n pdf.close()\n del records, hits\n\n\ndef _yield_pulse_indices(records):\n \"\"\"\n Function which yields indices of records which are within a pulse.\n Note:\n Only finds fragments of the pulse if record_i == 0 is within list\n of records.\n :yields: indices of fragments to make the corresponding pulse.\n \"\"\"\n # Get record links and find start indicies:\n _, next_ri = strax.record_links(records)\n start_ri = np.where(records['record_i'] == 0)[0]\n\n # Loop over pulse start_ri, group fragments by pulses yield for plot:\n for ri in start_ri:\n # Buffer for indices:\n inds = []\n\n tries = 1\n max_tries = 5000\n while ri != -1:\n inds.append(ri)\n ri = next_ri[ri]\n\n tries += 1\n if tries > max_tries:\n raise ValueError('Tried more than 5000 times to find subsequent record.'\n ' Am I stuck in a loop?')\n yield inds\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.median",
"matplotlib.pyplot.close",
"numpy.where",
"matplotlib.pyplot.fill_between"
]
] |
joeljgeo/landlab | [
"1d2651c76a8a36a7a132f139638192df1823f8fb"
] | [
"landlab/data_record/tests/conftest.py"
] | [
"import pytest\nimport numpy as np\nfrom landlab import RasterModelGrid\nfrom landlab.data_record import DataRecord\n\ngrid = RasterModelGrid((3,3))\n\[email protected]\ndef dr_time():\n time=[0.]\n data_vars={'mean_elevation' : (['time'], np.array([100]))}\n attrs={'time_units' : 'y'}\n return DataRecord(grid=grid,\n time=time,\n data_vars=data_vars,\n attrs=attrs)\n\n\[email protected]\ndef dr_item():\n my_items2 = {'grid_element': np.array(('node', 'link'), dtype=str),\n 'element_id': np.array([1, 3])}\n return DataRecord(grid=grid,\n items=my_items2)\n\[email protected]\ndef dr_2dim():\n time=[0.]\n my_items3 = {'grid_element':np.array([['node'], ['link']]),\n 'element_id': np.array([[1],[3]])}\n my_data_vars = {'mean_elevation' : (['time'], [110.]),\n 'item_size' : (['item_id', 'time'],\n np.array([[0.3], [0.4]]))}\n return DataRecord(grid=grid,\n time=time,\n items=my_items3,\n data_vars=my_data_vars)\n\[email protected]\ndef dr_nodim():\n return DataRecord(grid=grid)\n\n"
] | [
[
"numpy.array"
]
] |
dmsuehir/mlt | [
"896654099af010a8afd80b613b0b6ec96633e401"
] | [
"mlt-templates/tf-dist-mnist/main.py"
] | [
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\n\nimport json\nimport logging\nimport os\nimport numpy as np\nimport socket\nimport subprocess\nimport tensorflow as tf\nimport time\n\n# You can turn on the gRPC messages by setting the environment variables below\n# os.environ[\"GRPC_VERBOSITY\"]=\"DEBUG\"\n# os.environ[\"GRPC_TRACE\"] = \"all\"\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # Get rid of the AVX, SSE\n\n# Define parameters\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_float(\"learning_rate\", 0.2, \"Initial learning rate.\")\ntf.app.flags.DEFINE_integer(\"steps_to_validate\", 10,\n \"Validate and print loss after this many steps\")\ntf.app.flags.DEFINE_integer(\"is_sync\", 1, \"Synchronous updates?\")\ntf.app.flags.DEFINE_string(\"train_dir\", \"/output\", \"directory to write \"\n \"checkpoint files\")\ntf.app.flags.DEFINE_integer(\"num_epochs\", 5, \"number of epochs\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 1024, \"batch size\")\n\n\ndef create_done_queue(i, worker_list):\n \"\"\"\n Queue used to signal termination of the i\"th ps shard.\n Each worker sets their queue value to 1 when done.\n The parameter server op just checks for this.\n \"\"\"\n\n with tf.device(\"/job:ps/task:{}\".format(i)):\n return tf.FIFOQueue(\n len(worker_list), tf.int32, shared_name=\"done_queue{}\".format(i))\n\n\ndef create_done_queues(ps_list, worker_list):\n return [create_done_queue(i, worker_list) for i in range(len(ps_list))]\n\n\ndef get_epoch(batch_size, x, y, num_classes):\n train_size = x.shape[0]\n image_width = x.shape[1]\n image_height = x.shape[2]\n image_channels = x.shape[3]\n\n epoch_length = train_size - train_size % batch_size\n batch_count = int(epoch_length / batch_size)\n\n # Shuffle and truncate arrays to equal 1 epoch\n zipped = list(zip(x, y))\n np.random.shuffle(zipped)\n data, labels = zip(*zipped)\n data = np.asarray(data)[:epoch_length]\n labels = np.asarray(labels)[:epoch_length]\n\n # Reshape arrays into batch_count batches of length batch_size\n data = data.reshape((batch_count, batch_size, image_width, image_height,\n image_channels))\n labels = labels.reshape((batch_count, batch_size, num_classes))\n\n # Join batches of training examples with batches of labels\n epoch_of_batches = list(zip(data, labels))\n\n return epoch_of_batches\n\n\ndef main(_):\n start_time = time.time()\n\n logging.info(\"TensorFlow version: %s\", tf.__version__)\n logging.info(\"TensorFlow git version: %s\", tf.__git_version__)\n\n tf_config_json = os.environ.get(\"TF_CONFIG\", \"{}\")\n tf_config = json.loads(tf_config_json)\n logging.info(\"tf_config: %s\", tf_config)\n\n task = tf_config.get(\"task\", {})\n task_index = task[\"index\"]\n job_name = task[\"type\"]\n logging.info(\"task: %s\", task)\n\n cluster_spec = tf_config.get(\"cluster\", {})\n logging.info(\"cluster_spec: %s\", cluster_spec)\n worker_list = cluster_spec.get(\"worker\", \"{}\")\n ps_list = cluster_spec.get(\"ps\", \"{}\")\n\n logging.info(\"job_name: {}\".format(job_name))\n logging.info(\"task_index: {}\".format(task_index))\n\n # Hyperparameters\n learning_rate = FLAGS.learning_rate\n steps_to_validate = FLAGS.steps_to_validate\n\n num_inter_op_threads = 1\n num_intra_op_threads = 1\n\n config = tf.ConfigProto(\n inter_op_parallelism_threads=num_inter_op_threads,\n intra_op_parallelism_threads=num_intra_op_threads)\n\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata() # For Tensorflow trace\n\n cluster = tf.train.ClusterSpec(cluster_spec)\n server = tf.train.Server(cluster, job_name=job_name, task_index=task_index)\n\n is_sync = (FLAGS.is_sync == 1) # Synchronous or asynchronous updates\n is_chief = (task_index == 0) # Am I the chief node (always task 0)\n\n if job_name == \"ps\":\n\n sess = tf.Session(server.target, config=config)\n queue = create_done_queue(task_index, worker_list)\n\n logging.info(\"\\n\")\n logging.info(\"*\" * 30)\n logging.info(\"\\nParameter server #{} on this machine.\\n\\nWaiting on \"\n \"workers to finish.\\n\\nPress CTRL-\\\\ to terminate early.\"\n .format(task_index))\n logging.info(\"*\" * 30)\n\n # wait until all workers are done\n for i in range(len(worker_list)):\n sess.run(queue.dequeue())\n logging.info(\"Worker #{} reports job finished.\".format(i))\n\n logging.info(\"Parameter server #{} is quitting\".format(task_index))\n logging.info(\"Training complete.\")\n\n elif job_name == \"worker\":\n\n if is_chief:\n logging.info(\"I am chief worker {} with task #{}\".format(\n worker_list[task_index], task_index))\n else:\n logging.info(\"I am worker {} with task #{}\".format(\n worker_list[task_index], task_index))\n\n with tf.device(\n tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:{}\".format(task_index),\n cluster=cluster)):\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n \"\"\"\n BEGIN: Data loader\n \"\"\"\n # Load pre-shuffled MNIST data into train and test sets\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.\\\n load_data()\n\n x_train = np.expand_dims(x_train, -1)\n x_test = np.expand_dims(x_test, -1)\n\n x_train = x_train / 255.0 # Scale everything between 0 and 1\n x_test = x_test / 255.0 # Scale everything between 0 and 1\n num_classes = 10 # 10 classes for MNIST (0-9)\n\n # One-hot encode the labels so that we can perform categorical\n # cross-entropy loss\n y_train = tf.keras.utils.to_categorical(y_train, num_classes)\n y_test = tf.keras.utils.to_categorical(y_test, num_classes)\n\n epoch = get_epoch(FLAGS.batch_size, x_train, y_train, num_classes)\n num_batches = len(epoch)\n\n logging.info(\"Data loaded: {} batches of size {}\".format(\n len(epoch), FLAGS.batch_size))\n\n \"\"\"\n END: Data loader\n \"\"\"\n\n \"\"\"\n BEGIN: Define our model\n \"\"\"\n # Set keras learning phase to train\n tf.keras.backend.set_learning_phase(True)\n\n # Don't initialize variables on the fly\n tf.keras.backend.manual_variable_initialization(False)\n\n # this placeholder will contain our input digits\n img = tf.placeholder(tf.float32, shape=(None, x_train.shape[1],\n x_train.shape[2], 1))\n\n inputs = tf.keras.layers.Input(tensor=img, name='Images')\n\n # Keras layers can be called on TensorFlow tensors:\n x = tf.keras.layers.Flatten()(inputs)\n layer_1 = tf.keras.layers.Dense(100, activation=\"linear\")(x)\n preds = tf.keras.layers.Dense(10, activation=\"softmax\")(\n layer_1) # output layer with 10 units and a softmax activation\n\n model = tf.keras.models.Model(inputs=[inputs], outputs=[preds])\n\n label = tf.placeholder(tf.float32, shape=(None, 10))\n\n loss_value = tf.reduce_mean(\n tf.keras.backend.categorical_crossentropy(label, preds))\n\n values, indices = tf.nn.top_k(preds, 10)\n table = tf.contrib.lookup.index_to_string_table_from_tensor(\n tf.constant([str(i) for i in range(10)]))\n prediction_classes = table.lookup(tf.to_int64(indices))\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(preds, 1),\n tf.argmax(label, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction,\n tf.float32))\n\n logging.info(\"Model defined:\")\n logging.info(model.summary())\n\n \"\"\"\n END: Define our model\n \"\"\"\n\n # Define gradient descent optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\n grads_and_vars = optimizer.compute_gradients(\n loss_value, model.trainable_weights)\n\n if is_sync:\n rep_op = tf.train.SyncReplicasOptimizer(\n optimizer,\n replicas_to_aggregate=len(worker_list),\n total_num_replicas=len(worker_list),\n use_locking=True)\n\n train_op = rep_op.apply_gradients(\n grads_and_vars, global_step=global_step)\n\n init_token_op = rep_op.get_init_tokens_op()\n\n chief_queue_runner = rep_op.get_chief_queue_runner()\n\n else:\n train_op = optimizer.apply_gradients(\n grads_and_vars, global_step=global_step)\n\n init_op = tf.global_variables_initializer()\n\n saver = tf.train.Saver()\n\n # These are the values we wish to print to TensorBoard\n tf.summary.scalar(\"loss\", loss_value)\n tf.summary.histogram(\"loss\", loss_value)\n tf.summary.histogram(\"loss\", loss_value)\n tf.summary.scalar(\"accuracy\", accuracy)\n tf.summary.histogram(\"accuracy\", accuracy)\n tf.summary.histogram(\"accuracy\", accuracy)\n tf.summary.image(\"mnist_images\", img, max_outputs=5)\n\n # Send a signal to the ps when done by simply updating a queue in\n # the shared graph\n enq_ops = []\n for q in create_done_queues(ps_list, worker_list):\n qop = q.enqueue(1)\n enq_ops.append(qop)\n\n # Only the chief does the summary\n if is_chief:\n summary_op = tf.summary.merge_all()\n else:\n summary_op = None\n\n # TODO: Theoretically I can pass the summary_op into\n # the Supervisor and have it handle the TensorBoard\n # log entries. However, doing so seems to hang the code.\n # For now, I just handle the summary calls explicitly.\n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=os.path.join(FLAGS.train_dir, \"run\" +\n time.strftime(\"_%Y%m%d_%H%M%S\")),\n init_op=init_op,\n summary_op=None,\n saver=saver,\n global_step=global_step,\n save_model_secs=20\n ) # Save the model (with weights) every 60 seconds\n\n # TODO:\n # I'd like to use managed_session for this as it is more abstract\n # and probably less sensitive to changes from the TF team. However,\n # I am finding that the chief worker hangs on exit if I use\n # managed_session.\n with sv.prepare_or_wait_for_session(\n server.target, config=config) as sess:\n # with sv.managed_session(server.target) as sess:\n if is_chief and is_sync:\n sv.start_queue_runners(sess, [chief_queue_runner])\n sess.run(init_token_op)\n step = 0\n\n # Start TensorBoard on the chief worker\n if is_chief:\n cmd = 'tensorboard --logdir={}'.format(FLAGS.train_dir)\n tensorboard_pid = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n shell=True,\n preexec_fn=os.setsid)\n chief_ip = socket.gethostbyname(socket.gethostname())\n logging.info(\"Chief node started TensorBoard http://{}:6006\".\n format(chief_ip))\n\n # Go for a few epochs of training\n NUM_STEPS = FLAGS.num_epochs * num_batches\n while (not sv.should_stop()) and (step < NUM_STEPS):\n batch_idx = step % num_batches # Which batch?\n\n data = epoch[batch_idx][0]\n labels = epoch[batch_idx][1]\n\n # For n workers, break up the batch into n sections\n # Send each worker a different section of the batch\n data_range = int(FLAGS.batch_size / len(worker_list))\n start = data_range * task_index\n end = start + data_range\n\n history, loss_v, acc_val, step = sess.run(\n [train_op, loss_value, accuracy, global_step],\n feed_dict={\n img: data[start:end],\n label: labels[start:end]\n })\n\n if step % steps_to_validate == 0:\n if is_chief:\n summary = sess.run(\n summary_op,\n feed_dict={\n img: data[start:end],\n label: labels[start:end]\n })\n\n sv.summary_computed(sess,\n summary) # Update the summary\n\n logging.info(\"[step: {:,} of {:,}] loss: {:.4f}, \"\n \"accuracy: {:.2f}\" .format(step, NUM_STEPS,\n loss_v, acc_val))\n\n # Shuffle every epoch\n if (batch_idx == 0) and (step > num_batches):\n logging.info(\"Shuffling epoch\")\n epoch = get_epoch(FLAGS.batch_size, x_train, y_train,\n num_classes)\n\n # Send a signal to the ps when done by simply updating a queue in\n # the shared graph\n for op in enq_ops:\n sess.run(\n op\n ) # Send the \"work completed\" signal to the parameter server\n\n logging.info(\"Finished work on this node.\")\n\n sv.request_stop()\n\n logging.info(\"Finished in {} seconds\".format(time.time() - start_time))\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n tf.app.run()\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.keras.layers.Flatten",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.summary.image",
"tensorflow.nn.top_k",
"numpy.asarray",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.models.Model",
"tensorflow.name_scope",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.Variable",
"tensorflow.summary.histogram",
"tensorflow.global_variables_initializer",
"numpy.expand_dims",
"tensorflow.keras.layers.Dense",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.Server",
"tensorflow.keras.backend.manual_variable_initialization",
"tensorflow.app.run",
"tensorflow.cast",
"tensorflow.RunOptions",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.ConfigProto",
"tensorflow.keras.utils.to_categorical",
"numpy.random.shuffle",
"tensorflow.placeholder",
"tensorflow.keras.backend.categorical_crossentropy",
"tensorflow.summary.merge_all",
"tensorflow.to_int64",
"tensorflow.train.ClusterSpec",
"tensorflow.RunMetadata",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.argmax",
"tensorflow.keras.layers.Input"
]
] |
g5t/scipp | [
"d819c930a5e438fd65e42e2e4e737743b8d39d37"
] | [
"python/tests/nexus_helpers.py"
] | [
"from typing import List, Union, Iterator\nimport h5py\nimport numpy as np\nfrom contextlib import contextmanager\n\nh5root = Union[h5py.File, h5py.Group]\n\n\ndef find_by_nx_class(nx_class_name: str, root: h5root) -> List[h5py.Group]:\n \"\"\"\n Find groups in the HDF5 file or group which have the\n requested NX_class attribute\n Recursively searches all subgroups of the provided file or group\n\n :param nx_class_name: Name of the NX class, one of\n https://manual.nexusformat.org/classes/base_classes\n :param root: HDF5 file or group to search\n :return: List of groups matching requested NX_class\n \"\"\"\n groups_with_requested_nx_class = []\n\n def _match_nx_class(name, object):\n if isinstance(object, h5py.Group):\n try:\n try:\n nx_class = object.attrs[\"NX_class\"].decode(\"utf8\")\n except AttributeError:\n nx_class = object.attrs[\"NX_class\"]\n if nx_class == nx_class_name:\n groups_with_requested_nx_class.append(object)\n except AttributeError:\n pass\n\n root.visititems(_match_nx_class)\n return groups_with_requested_nx_class\n\n\n@contextmanager\ndef in_memory_nexus_file_with_event_data() -> Iterator[h5py.File]:\n \"\"\"\n Creates an in-memory NeXus file with an NXentry containing\n an NXevent_data group for use in tests\n \"\"\"\n def _create_nx_class(group_name: str, nx_class_name: str,\n parent: h5root) -> h5py.Group:\n nx_class = parent.create_group(group_name)\n nx_class.attrs[\"NX_class\"] = nx_class_name\n return nx_class\n\n # \"core\" driver means file is \"in-memory\" not on disk.\n # backing_store=False prevents file being written to\n # disk on flush() or close().\n nexus_file = h5py.File('in_memory_events.nxs',\n mode='w',\n driver=\"core\",\n backing_store=False)\n try:\n entry_group = _create_nx_class(\"entry\", \"NXentry\", nexus_file)\n event_group = _create_nx_class(\"events\", \"NXevent_data\", entry_group)\n\n # Add 5 events from 4 pulses\n event_group.create_dataset(\"event_id\", data=np.array([1, 2, 3, 1, 3]))\n event_time_offset_ds = event_group.create_dataset(\n \"event_time_offset\", data=np.array([456, 743, 347, 345, 632]))\n event_time_offset_ds.attrs[\"units\"] = \"ns\"\n event_time_zero_ds = event_group.create_dataset(\n \"event_time_zero\",\n data=np.array([\n 1600766730000000000, 1600766731000000000, 1600766732000000000,\n 1600766733000000000\n ]))\n event_time_zero_ds.attrs[\"units\"] = \"ns\"\n event_group.create_dataset(\"event_index\", data=np.array([0, 3, 3, 5]))\n\n yield nexus_file\n finally:\n nexus_file.close()\n"
] | [
[
"numpy.array"
]
] |
gabloa/probability | [
"7a0ce5e5beff91051028258dfbc7bc6cf0c4998d"
] | [
"tensorflow_probability/python/distributions/zipf_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import stats\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\ntfd = tfp.distributions\n\n\n@test_util.test_all_tf_execution_regimes\nclass ZipfTest(test_util.TestCase):\n\n def assertBetween(self, x, minimum, maximum):\n self.assertGreaterEqual(x, minimum)\n self.assertLessEqual(x, maximum)\n\n def assertAllBetween(self, a, minval, maxval, atol=1e-6):\n a = self._GetNdArray(a)\n minval = self._GetNdArray(minval)\n maxval = self._GetNdArray(maxval)\n\n self.assertEqual(a.shape, minval.shape)\n self.assertEqual(a.shape, maxval.shape)\n\n for idx, _ in np.ndenumerate(a):\n self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)\n\n def testZipfShape(self):\n power = tf.constant([3.0] * 5)\n zipf = tfd.Zipf(power=power, validate_args=True)\n\n self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))\n self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))\n self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])\n self.assertEqual(zipf.event_shape, tf.TensorShape([]))\n\n def testInvalidPower(self):\n invalid_powers = [-.02, 0.5, -2., .99, 1.]\n for power in invalid_powers:\n with self.assertRaisesOpError(\"Condition x > y\"):\n zipf = tfd.Zipf(power=power, validate_args=True)\n self.evaluate(zipf.mean())\n\n def testNanPower(self):\n zipf = tfd.Zipf(power=np.nan, validate_args=False)\n self.assertAllNan(self.evaluate(zipf.power))\n\n def testValidPower_ImplicitlyConvertsToFloat32(self):\n powers = [2, 10, 1.1]\n for power in powers:\n zipf = tfd.Zipf(power=power, validate_args=True)\n self.assertEqual(zipf.power.dtype, tf.float32)\n\n def testEventDtype(self):\n for power_dtype in [tf.float32, tf.float64]:\n for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:\n power_dtype = tf.float32\n event_dtype = tf.int32\n power = tf.constant(5., dtype=power_dtype)\n zipf = tfd.Zipf(power=power, dtype=event_dtype, validate_args=True)\n self.assertEqual(zipf.dtype, event_dtype)\n self.assertEqual(\n zipf.dtype, zipf.sample(10, seed=test_util.test_seed()).dtype)\n self.assertEqual(\n zipf.dtype, zipf.sample(1, seed=test_util.test_seed()).dtype)\n self.assertEqual(zipf.dtype, zipf.mode().dtype)\n\n def testInvalidEventDtype(self):\n with self.assertRaisesWithPredicateMatch(\n TypeError, \"power.dtype .* not a supported .* type\"):\n power = tf.constant(5., dtype=tf.float16)\n zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)\n self.evaluate(zipf.sample(seed=test_util.test_seed()))\n\n def testZipfLogPmf_InvalidArgs(self):\n power = tf.constant([4.0])\n # Non-integer samples are rejected if validate_args is True and\n # interpolate_nondiscrete is False.\n non_integer_samples = [0.99, 4.5, 5.001, 1e-6, -3, -2, -1]\n for x in non_integer_samples:\n zipf = tfd.Zipf(\n power=power, interpolate_nondiscrete=False, validate_args=True)\n\n with self.assertRaisesOpError(\"Condition (x == y|x >= 0)\"):\n self.evaluate(zipf.log_prob(x))\n\n with self.assertRaisesOpError(\"Condition (x == y|x >= 0)\"):\n self.evaluate(zipf.prob(x))\n\n def testZipfLogPmf_IntegerArgs(self):\n batch_size = 9\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((batch_size,), log_pmf.shape)\n self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((batch_size,), pmf.shape)\n self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))\n\n def testZipfLogPmf_NonIntegerArgs(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((batch_size,), log_pmf.shape)\n\n # Check that log_pmf(x) of tfd.Zipf is between the values of\n # stats.zipf.logpmf for ceil(x) and floor(x).\n log_pmf_values = self.evaluate(log_pmf)\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),\n stats.zipf.logpmf(floor_x, power_v))\n\n # Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for\n # ceil(x) and floor(x).\n pmf = zipf.prob(x)\n self.assertEqual((batch_size,), pmf.shape)\n\n pmf_values = self.evaluate(pmf)\n self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),\n stats.zipf.pmf(floor_x, power_v))\n\n def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]\n\n zipf = tfd.Zipf(\n power=power, interpolate_nondiscrete=False, validate_args=False)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((batch_size,), log_pmf.shape)\n\n log_pmf_values = self.evaluate(log_pmf)\n self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((batch_size,), pmf.shape)\n\n pmf_values = self.evaluate(pmf)\n self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))\n\n def testZipfLogPmfMultidimensional_IntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((6, 3), log_pmf.shape)\n self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((6, 3), pmf.shape)\n self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))\n\n def testZipfLogPmfMultidimensional_NonIntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_pmf = zipf.log_prob(x)\n self.assertEqual((6, 3), log_pmf.shape)\n self.assertAllBetween(\n self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),\n stats.zipf.logpmf(floor_x, power_v))\n\n pmf = zipf.prob(x)\n self.assertEqual((6, 3), pmf.shape)\n self.assertAllBetween(\n self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),\n stats.zipf.pmf(floor_x, power_v))\n\n def testZipfCdf_IntegerArgs(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))\n\n def testZipfCdf_NonIntegerArgsNoInterpolation(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]\n\n zipf = tfd.Zipf(\n power=power, interpolate_nondiscrete=False, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))\n\n def testZipfCdf_NonIntegerArgsInterpolated(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllBetween(\n self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),\n stats.zipf.logcdf(ceil_x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllBetween(\n self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),\n stats.zipf.cdf(ceil_x, power_v))\n\n def testZipfCdf_NonIntegerArgs(self):\n batch_size = 12\n power = tf.constant([3.0] * batch_size)\n power_v = 3.0\n x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=False)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((batch_size,), log_cdf.shape)\n self.assertAllBetween(\n self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),\n stats.zipf.logcdf(ceil_x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((batch_size,), cdf.shape)\n self.assertAllBetween(\n self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),\n stats.zipf.cdf(ceil_x, power_v))\n\n def testZipfCdfMultidimensional_IntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((6, 3), log_cdf.shape)\n self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((6, 3), cdf.shape)\n self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))\n\n def testZipfCdfMultidimensional_NonIntegerArgs(self):\n batch_size = 6\n power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)\n power_v = [2.0, 4.0, 5.0]\n x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T\n floor_x = np.floor(x)\n ceil_x = np.ceil(x)\n\n zipf = tfd.Zipf(power=power, validate_args=True)\n log_cdf = zipf.log_cdf(x)\n self.assertEqual((6, 3), log_cdf.shape)\n self.assertAllBetween(\n self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),\n stats.zipf.logcdf(ceil_x, power_v))\n\n cdf = zipf.cdf(x)\n self.assertEqual((6, 3), cdf.shape)\n self.assertAllBetween(\n self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),\n stats.zipf.cdf(ceil_x, power_v))\n\n def testZipfMean(self):\n power_v = [2.0, 3.0, 2.5]\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n self.assertEqual((3,), zipf.mean().shape)\n self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))\n\n def testZipfVariance(self):\n power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n self.assertEqual((3,), zipf.variance().shape)\n stat_vars = np.vectorize(stats.zipf.var)(power_v)\n self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)\n\n def testZipfStd(self):\n power_v = [4.0, 3.5, 4.5]\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n self.assertEqual((3,), zipf.stddev().shape)\n stat_stddevs = np.vectorize(stats.zipf.std)(power_v)\n self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)\n\n def testZipfMode(self):\n power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]\n zipf = tfd.Zipf(power=power_v, validate_args=False)\n self.assertEqual((6,), zipf.mode().shape)\n self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))\n\n def testZipfSample(self):\n power_v = 5.\n n = int(500e4)\n\n for power_dtype in [tf.float32, tf.float64]:\n power = tf.constant(power_v, dtype=power_dtype)\n for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:\n zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)\n samples = zipf.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n,), samples.shape)\n self.assertEqual((n,), sample_values.shape)\n self.assertAllClose(\n sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)\n self.assertAllClose(\n sample_values.std(), stats.zipf.std(power_v), rtol=.03)\n\n def testZipfSample_ValidateArgs(self):\n power_v = 3.\n n = int(100e3)\n\n for power_dtype in [tf.float32, tf.float64]:\n power = tf.constant(power_v, dtype=power_dtype)\n\n for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:\n zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)\n samples = zipf.sample(n, seed=test_util.test_seed())\n self.evaluate(samples)\n\n def testZipfSampleMultidimensionalMean(self):\n power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n n = int(100e3)\n samples = zipf.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n, 1, 10,), samples.shape)\n self.assertEqual((n, 1, 10,), sample_values.shape)\n\n # stats.zipf wants float64 params.\n stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))\n self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)\n\n def testZipfSampleMultidimensionalStd(self):\n power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5\n zipf = tfd.Zipf(power=power_v, validate_args=True)\n n = int(100e4)\n samples = zipf.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n, 1, 5), samples.shape)\n self.assertEqual((n, 1, 5), sample_values.shape)\n\n # stats.zipf wants float64 params.\n stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))\n self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)\n\n # Test that sampling with the same seed twice gives the same results.\n def testZipfSampleMultipleTimes(self):\n n = 1000\n seed = test_util.test_seed()\n power = 1.5\n\n zipf1 = tfd.Zipf(power=power, name=\"zipf1\", validate_args=True)\n tf1.set_random_seed(seed)\n samples1 = self.evaluate(zipf1.sample(n, seed=seed))\n\n zipf2 = tfd.Zipf(power=power, name=\"zipf2\", validate_args=True)\n tf1.set_random_seed(seed)\n samples2 = self.evaluate(zipf2.sample(n, seed=seed))\n\n self.assertAllEqual(samples1, samples2)\n\n def testZipfSample_AvoidsInfiniteLoop(self):\n zipf = tfd.Zipf(power=1., validate_args=False)\n n = 1000\n self.evaluate(zipf.sample(n, seed=test_util.test_seed()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"numpy.vectorize",
"numpy.ceil",
"scipy.stats.zipf.logcdf",
"scipy.stats.zipf.cdf",
"numpy.floor",
"numpy.ones_like",
"tensorflow.compat.v2.test.main",
"numpy.ndenumerate",
"numpy.arange",
"scipy.stats.zipf.logpmf",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v2.TensorShape",
"numpy.array",
"scipy.stats.zipf.mean",
"tensorflow.compat.v2.constant",
"scipy.stats.zipf.std",
"scipy.stats.zipf.pmf"
]
] |
freewind-2016/mmcv | [
"ba30d98a7b2ac20f49aa6a88fecb5fbb97f438e8"
] | [
"mmcv/runner/base_runner.py"
] | [
"# Copyright (c) Open-MMLab. All rights reserved.\nimport copy\nimport logging\nimport os.path as osp\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport mmcv\nfrom ..parallel import is_module_wrapper\nfrom .checkpoint import load_checkpoint\nfrom .dist_utils import get_dist_info\nfrom .hooks import HOOKS, Hook\nfrom .log_buffer import LogBuffer\nfrom .priority import get_priority\nfrom .utils import get_time_str\n\n\nclass BaseRunner(metaclass=ABCMeta):\n \"\"\"The base class of Runner, a training helper for PyTorch.\n\n All subclasses should implement the following APIs:\n\n - ``run()``\n - ``train()``\n - ``val()``\n - ``save_checkpoint()``\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n batch_processor (callable): A callable method that process a data\n batch. The interface of this method should be\n `batch_processor(model, data, train_mode) -> dict`\n optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an\n optimizer (in most cases) or a dict of optimizers (in models that\n requires more than one optimizer, e.g., GAN).\n work_dir (str, optional): The working directory to save checkpoints\n and logs. Defaults to None.\n logger (:obj:`logging.Logger`): Logger used during training.\n Defaults to None. (The default value is just for backward\n compatibility)\n meta (dict | None): A dict records some import information such as\n environment info and seed, which will be logged in logger hook.\n Defaults to None.\n max_epochs (int, optional): Total training epochs.\n max_iters (int, optional): Total training iterations.\n \"\"\"\n\n def __init__(self,\n model,\n batch_processor=None,\n optimizer=None,\n work_dir=None,\n logger=None,\n meta=None,\n max_iters=None,\n max_epochs=None):\n if batch_processor is not None:\n if not callable(batch_processor):\n raise TypeError('batch_processor must be callable, '\n f'but got {type(batch_processor)}')\n warnings.warn('batch_processor is deprecated, please implement '\n 'train_step() and val_step() in the model instead.')\n # raise an error is `batch_processor` is not None and\n # `model.train_step()` exists.\n if is_module_wrapper(model):\n _model = model.module\n else:\n _model = model\n if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):\n raise RuntimeError(\n 'batch_processor and model.train_step()/model.val_step() '\n 'cannot be both available.')\n else:\n assert hasattr(model, 'train_step')\n\n # check the type of `optimizer`\n if isinstance(optimizer, dict):\n for name, optim in optimizer.items():\n if not isinstance(optim, Optimizer):\n raise TypeError(\n f'optimizer must be a dict of torch.optim.Optimizers, '\n f'but optimizer[\"{name}\"] is a {type(optim)}')\n elif not isinstance(optimizer, Optimizer) and optimizer is not None:\n raise TypeError(\n f'optimizer must be a torch.optim.Optimizer object '\n f'or dict or None, but got {type(optimizer)}')\n\n # check the type of `logger`\n if not isinstance(logger, logging.Logger):\n raise TypeError(f'logger must be a logging.Logger object, '\n f'but got {type(logger)}')\n\n # check the type of `meta`\n if meta is not None and not isinstance(meta, dict):\n raise TypeError(\n f'meta must be a dict or None, but got {type(meta)}')\n\n self.model = model\n self.batch_processor = batch_processor\n self.optimizer = optimizer\n self.logger = logger\n self.meta = meta\n\n # create work_dir\n if mmcv.is_str(work_dir):\n self.work_dir = osp.abspath(work_dir)\n mmcv.mkdir_or_exist(self.work_dir)\n elif work_dir is None:\n self.work_dir = None\n else:\n raise TypeError('\"work_dir\" must be a str or None')\n\n # get model name from the model class\n if hasattr(self.model, 'module'):\n self._model_name = self.model.module.__class__.__name__\n else:\n self._model_name = self.model.__class__.__name__\n\n self._rank, self._world_size = get_dist_info()\n self.timestamp = get_time_str()\n self.mode = None\n self._hooks = []\n self._epoch = 0\n self._iter = 0\n self._inner_iter = 0\n\n if max_epochs is not None and max_iters is not None:\n raise ValueError(\n 'Only one of `max_epochs` or `max_iters` can be set.')\n\n self._max_epochs = max_epochs\n self._max_iters = max_iters\n # TODO: Redesign LogBuffer, it is not flexible and elegant enough\n self.log_buffer = LogBuffer()\n\n @property\n def model_name(self):\n \"\"\"str: Name of the model, usually the module class name.\"\"\"\n return self._model_name\n\n @property\n def rank(self):\n \"\"\"int: Rank of current process. (distributed training)\"\"\"\n return self._rank\n\n @property\n def world_size(self):\n \"\"\"int: Number of processes participating in the job.\n (distributed training)\"\"\"\n return self._world_size\n\n @property\n def hooks(self):\n \"\"\"list[:obj:`Hook`]: A list of registered hooks.\"\"\"\n return self._hooks\n\n @property\n def epoch(self):\n \"\"\"int: Current epoch.\"\"\"\n return self._epoch\n\n @property\n def iter(self):\n \"\"\"int: Current iteration.\"\"\"\n return self._iter\n\n @property\n def inner_iter(self):\n \"\"\"int: Iteration in an epoch.\"\"\"\n return self._inner_iter\n\n @property\n def max_epochs(self):\n \"\"\"int: Maximum training epochs.\"\"\"\n return self._max_epochs\n\n @property\n def max_iters(self):\n \"\"\"int: Maximum training iterations.\"\"\"\n return self._max_iters\n\n @abstractmethod\n def train(self):\n pass\n\n @abstractmethod\n def val(self):\n pass\n\n @abstractmethod\n def run(self, data_loaders, workflow, **kwargs):\n pass\n\n @abstractmethod\n def save_checkpoint(self,\n out_dir,\n filename_tmpl,\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n pass\n\n def current_lr(self):\n \"\"\"Get current learning rates.\n\n Returns:\n list[float] | dict[str, list[float]]: Current learning rates of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n if isinstance(self.optimizer, torch.optim.Optimizer):\n lr = [group['lr'] for group in self.optimizer.param_groups]\n elif isinstance(self.optimizer, dict):\n lr = dict()\n for name, optim in self.optimizer.items():\n lr[name] = [group['lr'] for group in optim.param_groups]\n else:\n raise RuntimeError(\n 'lr is not applicable because optimizer does not exist.')\n return lr\n\n def current_momentum(self):\n \"\"\"Get current momentums.\n\n Returns:\n list[float] | dict[str, list[float]]: Current momentums of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n\n def _get_momentum(optimizer):\n momentums = []\n for group in optimizer.param_groups:\n if 'momentum' in group.keys():\n momentums.append(group['momentum'])\n elif 'betas' in group.keys():\n momentums.append(group['betas'][0])\n else:\n momentums.append(0)\n return momentums\n\n if self.optimizer is None:\n raise RuntimeError(\n 'momentum is not applicable because optimizer does not exist.')\n elif isinstance(self.optimizer, torch.optim.Optimizer):\n momentums = _get_momentum(self.optimizer)\n elif isinstance(self.optimizer, dict):\n momentums = dict()\n for name, optim in self.optimizer.items():\n momentums[name] = _get_momentum(optim)\n return momentums\n\n def register_hook(self, hook, priority='NORMAL'):\n \"\"\"Register a hook into the hook list.\n\n The hook will be inserted into a priority queue, with the specified\n priority (See :class:`Priority` for details of priorities).\n For hooks with the same priority, they will be triggered in the same\n order as they are registered.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n \"\"\"\n assert isinstance(hook, Hook)\n if hasattr(hook, 'priority'):\n raise ValueError('\"priority\" is a reserved attribute for hooks')\n priority = get_priority(priority)\n hook.priority = priority\n # insert the hook to a sorted list\n inserted = False\n for i in range(len(self._hooks) - 1, -1, -1):\n if priority >= self._hooks[i].priority:\n self._hooks.insert(i + 1, hook)\n inserted = True\n break\n if not inserted:\n self._hooks.insert(0, hook)\n\n def register_hook_from_cfg(self, hook_cfg):\n \"\"\"Register a hook from its cfg.\n\n Args:\n hook_cfg (dict): Hook config. It should have at least keys 'type'\n and 'priority' indicating its type and priority.\n\n Notes:\n The specific hook class to register should not use 'type' and\n 'priority' arguments during initialization.\n \"\"\"\n hook_cfg = hook_cfg.copy()\n priority = hook_cfg.pop('priority', 'NORMAL')\n hook = mmcv.build_from_cfg(hook_cfg, HOOKS)\n self.register_hook(hook, priority=priority)\n\n def call_hook(self, fn_name):\n \"\"\"Call all hooks.\n\n Args:\n fn_name (str): The function name in each hook to be called, such as\n \"before_train_epoch\".\n \"\"\"\n for hook in self._hooks:\n getattr(hook, fn_name)(self)\n\n def load_checkpoint(self, filename, map_location='cpu', strict=False):\n self.logger.info('load checkpoint from %s', filename)\n return load_checkpoint(self.model, filename, map_location, strict,\n self.logger)\n\n def resume(self,\n checkpoint,\n resume_optimizer=True,\n map_location='default'):\n if map_location == 'default':\n if torch.cuda.is_available():\n device_id = torch.cuda.current_device()\n checkpoint = self.load_checkpoint(\n checkpoint,\n map_location=lambda storage, loc: storage.cuda(device_id))\n else:\n checkpoint = self.load_checkpoint(checkpoint)\n else:\n checkpoint = self.load_checkpoint(\n checkpoint, map_location=map_location)\n\n self._epoch = checkpoint['meta']['epoch']\n self._iter = checkpoint['meta']['iter']\n if 'optimizer' in checkpoint and resume_optimizer:\n if isinstance(self.optimizer, Optimizer):\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n elif isinstance(self.optimizer, dict):\n for k in self.optimizer.keys():\n self.optimizer[k].load_state_dict(\n checkpoint['optimizer'][k])\n else:\n raise TypeError(\n 'Optimizer should be dict or torch.optim.Optimizer '\n f'but got {type(self.optimizer)}')\n\n self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)\n\n def register_lr_hook(self, lr_config):\n if lr_config is None:\n return\n elif isinstance(lr_config, dict):\n assert 'policy' in lr_config\n policy_type = lr_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of Lr updater.\n # Since this is not applicable for `\n # CosineAnnealingLrUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'LrUpdaterHook'\n lr_config['type'] = hook_type\n hook = mmcv.build_from_cfg(lr_config, HOOKS)\n else:\n hook = lr_config\n self.register_hook(hook)\n\n def register_momentum_hook(self, momentum_config):\n if momentum_config is None:\n return\n if isinstance(momentum_config, dict):\n assert 'policy' in momentum_config\n policy_type = momentum_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of momentum updater.\n # Since this is not applicable for\n # `CosineAnnealingMomentumUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'MomentumUpdaterHook'\n momentum_config['type'] = hook_type\n hook = mmcv.build_from_cfg(momentum_config, HOOKS)\n else:\n hook = momentum_config\n self.register_hook(hook)\n\n def register_optimizer_hook(self, optimizer_config):\n if optimizer_config is None:\n return\n if isinstance(optimizer_config, dict):\n optimizer_config.setdefault('type', 'OptimizerHook')\n hook = mmcv.build_from_cfg(optimizer_config, HOOKS)\n else:\n hook = optimizer_config\n self.register_hook(hook)\n\n def register_checkpoint_hook(self, checkpoint_config):\n if checkpoint_config is None:\n return\n if isinstance(checkpoint_config, dict):\n checkpoint_config.setdefault('type', 'CheckpointHook')\n hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)\n else:\n hook = checkpoint_config\n self.register_hook(hook)\n\n def register_logger_hooks(self, log_config):\n if log_config is None:\n return\n log_interval = log_config['interval']\n for info in log_config['hooks']:\n logger_hook = mmcv.build_from_cfg(\n info, HOOKS, default_args=dict(interval=log_interval))\n self.register_hook(logger_hook, priority='VERY_LOW')\n\n def register_timer_hook(self, timer_config):\n if timer_config is None:\n return\n if isinstance(timer_config, dict):\n timer_config_ = copy.deepcopy(timer_config)\n hook = mmcv.build_from_cfg(timer_config_, HOOKS)\n else:\n hook = timer_config\n self.register_hook(hook)\n\n def register_training_hooks(self,\n lr_config,\n optimizer_config=None,\n checkpoint_config=None,\n log_config=None,\n momentum_config=None,\n timer_config=dict(type='IterTimerHook')):\n \"\"\"Register default hooks for training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - MomentumUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n \"\"\"\n self.register_lr_hook(lr_config)\n self.register_momentum_hook(momentum_config)\n self.register_optimizer_hook(optimizer_config)\n self.register_checkpoint_hook(checkpoint_config)\n self.register_timer_hook(timer_config)\n self.register_logger_hooks(log_config)\n"
] | [
[
"torch.cuda.is_available",
"torch.cuda.current_device"
]
] |
whzup/quadpy | [
"ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e"
] | [
"quadpy/disk/_albrecht.py"
] | [
"import numpy\nimport sympy\nfrom mpmath import mp\n\nfrom ..helpers import article, fsd, pm, untangle, z\nfrom ._helpers import DiskScheme\n\n_citation = article(\n authors=[\"J. Albrecht\"],\n title=\"Formeln zur numerischen Integration über Kreisbereiche\",\n journal=\"ZAMM\",\n volume=\"40\",\n number=\"10-11\",\n year=\"1960\",\n pages=\"514–517\",\n url=\"https://doi.org/10.1002/zamm.19600401014\",\n)\n\nfrac = sympy.Rational\npi = sympy.pi\ncos = numpy.vectorize(sympy.cos)\nsin = numpy.vectorize(sympy.sin)\nsqrt = numpy.vectorize(sympy.sqrt)\npm_ = numpy.array([+1, -1])\nroots = mp.polyroots\nlinear_solve = mp.lu_solve\n\n\ndef albrecht_1():\n # Equals Albrecht-Collatz, Lether(2)\n alpha = (2 * numpy.arange(4) + 1) * pi / 4\n t = numpy.array([cos(alpha), sin(alpha)]).T\n\n data = [(frac(1, 4), sqrt(frac(1, 2)) * t)]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 1\", weights, points, 3, _citation)\n\n\ndef albrecht_2():\n alpha = (2 * numpy.arange(6) + 1) * pi / 6\n t = numpy.array([cos(alpha), sin(alpha)]).T\n\n data = [(frac(1, 4), z(2)), (frac(1, 8), sqrt(frac(2, 3)) * t)]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 2\", weights, points, 5, _citation)\n\n\ndef albrecht_3():\n alpha = 2 * numpy.arange(4) * pi / 4\n s = numpy.array([cos(alpha), sin(alpha)]).T\n\n alpha = (2 * numpy.arange(4) + 1) * pi / 4\n t = numpy.array([cos(alpha), sin(alpha)]).T\n\n sqrt29 = sqrt(29)\n a1, a2 = (551 + pm_ * 41 * sqrt29) / 6264\n rho1, rho2 = sqrt((27 - pm_ * 3 * sqrt29) / 52)\n\n data = [(frac(2, 27), sqrt(frac(3, 4)) * t), (a1, rho1 * s), (a2, rho2 * s)]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 3\", weights, points, 7, _citation)\n\n\ndef albrecht_4():\n sqrt111 = sqrt(111)\n rho1, rho2 = sqrt((96 - pm_ * 4 * sqrt(111)) / 155)\n\n alpha = 2 * numpy.arange(6) * pi / 6\n s = numpy.array([cos(alpha), sin(alpha)]).T\n\n alpha = (2 * numpy.arange(6) + 1) * pi / 6\n t = numpy.array([cos(alpha), sin(alpha)]).T\n\n B0 = frac(251, 2304)\n B1, B2 = (110297 + pm_ * 5713 * sqrt111) / 2045952\n C = frac(125, 3072)\n\n data = [(B0, z(2)), (B1, rho1 * s), (B2, rho2 * s), (C, sqrt(frac(4, 5)) * t)]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 4\", weights, points, 9, _citation)\n\n\ndef albrecht_5():\n # The values are solutions of\n # 6317094x^3 - 10022245*x^2 + 4149900*x - 336375 = 0\n sigma2 = roots([6317094, -10022245, 4149900, -336375])\n A = numpy.vander(sigma2, increasing=True).T\n b = numpy.array([frac(168899, 1350000), frac(7661, 180000), frac(71, 3000)])\n B = linear_solve(A, b)\n\n sqrt19 = sqrt(19)\n\n # ERR Stroud incorrectly lists sqrt(10) for s1.\n s1, s2 = sqrt((125 - pm_ * 10 * sqrt19) / 366)\n\n # ERR Stroud incorrectly lists 749489_3_.0 instead of 749489_2_.0\n C1, C2 = (7494892 + pm_ * 1053263 * sqrt19) / 205200000\n D = frac(81, 3125)\n\n u = sqrt(frac(5, 6)) * cos(pi / 8)\n v = sqrt(frac(5, 6)) * sin(pi / 8)\n\n data = [\n (B[0], fsd(2, (sqrt(sigma2[0]), 1))),\n (B[1], fsd(2, (sqrt(sigma2[1]), 1))),\n (B[2], fsd(2, (sqrt(sigma2[2]), 1))),\n (C1, pm(2, s1)),\n (C2, pm(2, s2)),\n (D, fsd(2, (u, 1), (v, 1))),\n ]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 5\", weights, points, 11, _citation)\n\n\ndef albrecht_6():\n # The values are solutions of\n # 11025*x^3 - 19020*x^2 + 9370*x - 1212 = 0\n sigma2 = roots([11025, -19020, 9370, -1212])\n A = numpy.vander(sigma2, increasing=True).T\n b = numpy.array([frac(1432433, 18849024), frac(1075, 31104), frac(521, 25920)])\n B = linear_solve(A, b)\n\n B0 = frac(2615, 43632)\n C = frac(16807, 933120)\n\n alpha = 2 * numpy.arange(10) * pi / 10\n rs = numpy.array([cos(alpha), sin(alpha)]).T\n\n alpha = (2 * numpy.arange(10) + 1) * pi / 10\n uv = numpy.array([cos(alpha), sin(alpha)]).T\n\n data = [\n (B0, z(2)),\n (B[0], sqrt(sigma2[0]) * rs),\n (B[1], sqrt(sigma2[1]) * rs),\n (B[2], sqrt(sigma2[2]) * rs),\n (C, sqrt(frac(6, 7)) * uv),\n ]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 6\", weights, points, 13, _citation)\n\n\ndef albrecht_7():\n alpha = 2 * numpy.arange(8) * pi / 8\n s = numpy.array([cos(alpha), sin(alpha)]).T\n\n alpha = (2 * numpy.arange(8) + 1) * pi / 8\n t = numpy.array([cos(alpha), sin(alpha)]).T\n\n sqrt21 = sqrt(21)\n wt1, wt2 = (4998 + pm_ * 343 * sqrt21) / 253125\n tau1, tau2 = sqrt((21 - pm_ * sqrt21) / 28)\n\n # The values are solutions of\n # 4960228*x^4 - 10267740*x^3 + 6746490*x^2 - 1476540*x + 70425 = 0\n sigma2 = roots([4960228, -10267740, 6746490, -1476540, 70425])\n A = numpy.vander(sigma2, increasing=True).T\n b = numpy.array(\n [frac(57719, 675000), frac(9427, 270000), frac(193, 9000), frac(113, 7200)]\n )\n ws = linear_solve(A, b)\n\n data = [\n (ws[0], sqrt(sigma2[0]) * s),\n (ws[1], sqrt(sigma2[1]) * s),\n (ws[2], sqrt(sigma2[2]) * s),\n (ws[3], sqrt(sigma2[3]) * s),\n (wt1, tau1 * t),\n (wt2, tau2 * t),\n ]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 7\", weights, points, 15, _citation)\n\n\ndef albrecht_8():\n alpha = 2 * numpy.arange(10) * pi / 10\n s = numpy.array([cos(alpha), sin(alpha)]).T\n\n alpha = (2 * numpy.arange(10) + 1) * pi / 10\n t = numpy.array([cos(alpha), sin(alpha)]).T\n\n m0 = frac(496439663, 13349499975)\n\n sqrt7 = sqrt(7)\n wt1, wt2 = (125504 + pm_ * 16054 * sqrt7) / 8751645\n tau1, tau2 = sqrt((14 - pm_ * sqrt7) / 18)\n\n # The values are solutions of\n # 160901628*x^4 - 364759920*x^3 + 274856190*x^2 - 76570340*x\n # + 6054195 = 0\n sigma2 = roots([160901628, -364759920, 274856190, -76570340, 6054195])\n A = numpy.vander(sigma2, increasing=True).T\n b = numpy.array(\n [\n frac(121827491812, 1802182496625),\n frac(48541, 1666980),\n frac(977, 55566),\n frac(671, 52920),\n ]\n )\n ws = linear_solve(A, b)\n\n data = [\n (m0, z(2)),\n (ws[0], sqrt(sigma2[0]) * s),\n (ws[1], sqrt(sigma2[1]) * s),\n (ws[2], sqrt(sigma2[2]) * s),\n (ws[3], sqrt(sigma2[3]) * s),\n (wt1, tau1 * t),\n (wt2, tau2 * t),\n ]\n\n points, weights = untangle(data)\n weights *= pi\n return DiskScheme(\"Albrecht 8\", weights, points, 17, _citation)\n"
] | [
[
"numpy.array",
"numpy.arange",
"numpy.vectorize",
"numpy.vander"
]
] |
adujardin/OpenIBL | [
"5ab80d65afa42ca22210c4c08983fdc156696bab"
] | [
"ibl/evaluators.py"
] | [
"from __future__ import print_function, absolute_import\nimport time\nfrom collections import OrderedDict\nimport numpy as np\nfrom sklearn.preprocessing import normalize\nfrom sklearn.metrics import pairwise_distances\n\nimport torch\nimport torch.nn.functional as F\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader\n\nfrom .pca import PCA\nfrom .utils.meters import AverageMeter\nfrom .utils.rerank import re_ranking\nfrom .utils.dist_utils import synchronize\nfrom .utils.serialization import write_json\nfrom .utils.data.preprocessor import Preprocessor\nfrom .utils import to_torch\n\ndef extract_cnn_feature(model, inputs, vlad=True, gpu=None):\n model.eval()\n inputs = to_torch(inputs).cuda(gpu)\n outputs = model(inputs)\n if (isinstance(outputs, list) or isinstance(outputs, tuple)):\n x_pool, x_vlad = outputs\n if vlad:\n outputs = F.normalize(x_vlad, p=2, dim=-1)\n else:\n outputs = F.normalize(x_pool, p=2, dim=-1)\n else:\n outputs = F.normalize(outputs, p=2, dim=-1)\n return outputs\n\ndef extract_features(model, data_loader, dataset, print_freq=10,\n vlad=True, pca=None, gpu=None, sync_gather=False):\n model.eval()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n features = []\n\n if (pca is not None):\n pca.load(gpu=gpu)\n\n end = time.time()\n with torch.no_grad():\n for i, (imgs, fnames, _, _, _) in enumerate(data_loader):\n data_time.update(time.time() - end)\n\n outputs = extract_cnn_feature(model, imgs, vlad, gpu=gpu)\n if (pca is not None):\n outputs = pca.infer(outputs)\n outputs = outputs.data.cpu()\n\n features.append(outputs)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if ((i + 1) % print_freq == 0 and rank==0):\n print('Extract Features: [{}/{}]\\t'\n 'Time {:.3f} ({:.3f})\\t'\n 'Data {:.3f} ({:.3f})\\t'\n .format(i + 1, len(data_loader),\n batch_time.val, batch_time.avg,\n data_time.val, data_time.avg))\n\n if (pca is not None):\n del pca\n\n if (sync_gather):\n # all gather features in parallel\n # cost more GPU memory but less time\n features = torch.cat(features).cuda(gpu)\n all_features = [torch.empty_like(features) for _ in range(world_size)]\n dist.all_gather(all_features, features)\n del features\n all_features = torch.cat(all_features).cpu()[:len(dataset)]\n features_dict = OrderedDict()\n for fname, output in zip(dataset, all_features):\n features_dict[fname[0]] = output\n del all_features\n else:\n # broadcast features in sequence\n # cost more time but less GPU memory\n bc_features = torch.cat(features).cuda(gpu)\n features_dict = OrderedDict()\n for k in range(world_size):\n bc_features.data.copy_(torch.cat(features))\n if (rank==0):\n print(\"gathering features from rank no.{}\".format(k))\n dist.broadcast(bc_features, k)\n l = bc_features.cpu().size(0)\n for fname, output in zip(dataset[k*l:(k+1)*l], bc_features.cpu()):\n features_dict[fname[0]] = output\n del bc_features, features\n\n return features_dict\n\ndef pairwise_distance(features, query=None, gallery=None, metric=None):\n if query is None and gallery is None:\n n = len(features)\n x = torch.cat(list(features.values()))\n x = x.view(n, -1)\n if metric is not None:\n x = metric.transform(x)\n dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True) * 2\n dist_m = dist_m.expand(n, n) - 2 * torch.mm(x, x.t())\n return dist_m, None, None\n\n if (dist.get_rank()==0):\n print (\"===> Start calculating pairwise distances\")\n x = torch.cat([features[f].unsqueeze(0) for f, _, _, _ in query], 0)\n y = torch.cat([features[f].unsqueeze(0) for f, _, _, _ in gallery], 0)\n\n m, n = x.size(0), y.size(0)\n x = x.view(m, -1)\n y = y.view(n, -1)\n if metric is not None:\n x = metric.transform(x)\n y = metric.transform(y)\n dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n dist_m.addmm_(1, -2, x, y.t())\n return dist_m, x.numpy(), y.numpy()\n\ndef spatial_nms(pred, db_ids, topN):\n assert(len(pred)==len(db_ids))\n pred_select = pred[:topN]\n pred_pids = [db_ids[i] for i in pred_select]\n # find unique\n seen = set()\n seen_add = seen.add\n pred_pids_unique = [i for i, x in enumerate(pred_pids) if not (x in seen or seen_add(x))]\n return [pred_select[i] for i in pred_pids_unique]\n\ndef evaluate_all(distmat, gt, gallery, recall_topk=[1, 5, 10], nms=False):\n sort_idx = np.argsort(distmat, axis=1)\n del distmat\n db_ids = [db[1] for db in gallery]\n\n if (dist.get_rank()==0):\n print(\"===> Start calculating recalls\")\n correct_at_n = np.zeros(len(recall_topk))\n\n for qIx, pred in enumerate(sort_idx):\n if (nms):\n pred = spatial_nms(pred.tolist(), db_ids, max(recall_topk)*12)\n\n for i, n in enumerate(recall_topk):\n # if in top N then also in top NN, where NN > N\n if np.any(np.in1d(pred[:n], gt[qIx])):\n correct_at_n[i:] += 1\n break\n recalls = correct_at_n / len(gt)\n del sort_idx\n\n if (dist.get_rank()==0):\n print('Recall Scores:')\n for i, k in enumerate(recall_topk):\n print(' top-{:<4}{:12.1%}'.format(k, recalls[i]))\n return recalls\n\n\nclass Evaluator(object):\n def __init__(self, model):\n super(Evaluator, self).__init__()\n self.model = model\n self.rank = dist.get_rank()\n\n def evaluate(self, query_loader, dataset, query, gallery, ground_truth, gallery_loader=None, \\\n vlad=True, pca=None, rerank=False, gpu=None, sync_gather=False, \\\n nms=False, rr_topk=25, lambda_value=0):\n if (gallery_loader is not None):\n features = extract_features(self.model, query_loader, query,\n vlad=vlad, pca=pca, gpu=gpu, sync_gather=sync_gather)\n features_db = extract_features(self.model, gallery_loader, gallery,\n vlad=vlad, pca=pca, gpu=gpu, sync_gather=sync_gather)\n features.update(features_db)\n else:\n features = extract_features(self.model, query_loader, dataset,\n vlad=vlad, pca=pca, gpu=gpu, sync_gather=sync_gather)\n\n distmat, _, _ = pairwise_distance(features, query, gallery)\n recalls = evaluate_all(distmat, ground_truth, gallery, nms=nms)\n if (not rerank):\n return recalls\n\n if (self.rank==0):\n print('Applying re-ranking ...')\n distmat_gg, _, _ = pairwise_distance(features, gallery, gallery)\n distmat_qq, _, _ = pairwise_distance(features, query, query)\n distmat = re_ranking(distmat.numpy(), distmat_qq.numpy(), distmat_gg.numpy(),\n k1=rr_topk, k2=1, lambda_value=lambda_value)\n\n return evaluate_all(distmat, ground_truth, gallery, nms=nms)\n"
] | [
[
"torch.distributed.all_gather",
"torch.empty_like",
"torch.distributed.get_world_size",
"torch.distributed.get_rank",
"torch.distributed.broadcast",
"torch.nn.functional.normalize",
"torch.no_grad",
"numpy.argsort",
"numpy.in1d",
"torch.cat",
"torch.pow"
]
] |
joedomino874/hummingbot | [
"cb3ee5a30a2feb0a55ceca9d200c59662d7e3057"
] | [
"test/hummingbot/strategy/cross_exchange_market_making/test_cross_exchange_market_making.py"
] | [
"import logging\nimport pandas as pd\nfrom typing import List\nimport unittest\n\nfrom decimal import Decimal\nfrom math import floor, ceil\nfrom nose.plugins.attrib import attr\n\nfrom hummingbot.core.clock import Clock, ClockMode\nfrom hummingbot.core.data_type.limit_order import LimitOrder\nfrom hummingbot.core.data_type.order_book import OrderBook\nfrom hummingbot.core.data_type.order_book_row import OrderBookRow\nfrom hummingbot.core.event.events import (\n BuyOrderCompletedEvent,\n BuyOrderCreatedEvent,\n MarketEvent,\n OrderBookTradeEvent,\n OrderFilledEvent,\n OrderType,\n SellOrderCreatedEvent,\n SellOrderCompletedEvent,\n TradeFee,\n TradeType,\n)\nfrom hummingbot.core.event.event_logger import EventLogger\nfrom hummingbot.strategy.cross_exchange_market_making import CrossExchangeMarketMakingStrategy\nfrom hummingbot.strategy.cross_exchange_market_making.cross_exchange_market_pair import CrossExchangeMarketPair\nfrom hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple\nfrom hummingbot.connector.exchange.paper_trade.paper_trade_exchange import QuantizationParams\nfrom test.mock.mock_paper_exchange import MockPaperExchange\n\nlogging.basicConfig(level=logging.ERROR)\n\n\n@attr(\"stable\")\nclass HedgedMarketMakingUnitTest(unittest.TestCase):\n start: pd.Timestamp = pd.Timestamp(\"2019-01-01\", tz=\"UTC\")\n end: pd.Timestamp = pd.Timestamp(\"2019-01-01 01:00:00\", tz=\"UTC\")\n start_timestamp: float = start.timestamp()\n end_timestamp: float = end.timestamp()\n maker_trading_pairs: List[str] = [\"COINALPHA-WETH\", \"COINALPHA\", \"WETH\"]\n taker_trading_pairs: List[str] = [\"COINALPHA-ETH\", \"COINALPHA\", \"ETH\"]\n\n def setUp(self):\n self.clock: Clock = Clock(ClockMode.BACKTEST, 1.0, self.start_timestamp, self.end_timestamp)\n self.min_profitbality = Decimal(\"0.005\")\n self.maker_market: MockPaperExchange = MockPaperExchange()\n self.taker_market: MockPaperExchange = MockPaperExchange()\n self.maker_market.set_balanced_order_book(self.maker_trading_pairs[0], 1.0, 0.5, 1.5, 0.01, 10)\n self.taker_market.set_balanced_order_book(self.taker_trading_pairs[0], 1.0, 0.5, 1.5, 0.001, 4)\n self.maker_market.set_balance(\"COINALPHA\", 5)\n self.maker_market.set_balance(\"WETH\", 5)\n self.maker_market.set_balance(\"QETH\", 5)\n self.taker_market.set_balance(\"COINALPHA\", 5)\n self.taker_market.set_balance(\"ETH\", 5)\n self.maker_market.set_quantization_param(QuantizationParams(self.maker_trading_pairs[0], 5, 5, 5, 5))\n self.taker_market.set_quantization_param(QuantizationParams(self.taker_trading_pairs[0], 5, 5, 5, 5))\n\n self.market_pair: CrossExchangeMarketPair = CrossExchangeMarketPair(\n MarketTradingPairTuple(self.maker_market, *self.maker_trading_pairs),\n MarketTradingPairTuple(self.taker_market, *self.taker_trading_pairs),\n )\n\n logging_options: int = (\n CrossExchangeMarketMakingStrategy.OPTION_LOG_ALL\n & (~CrossExchangeMarketMakingStrategy.OPTION_LOG_NULL_ORDER_SIZE)\n )\n self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy.init_params(\n [self.market_pair],\n order_size_portfolio_ratio_limit=Decimal(\"0.3\"),\n min_profitability=Decimal(self.min_profitbality),\n logging_options=logging_options,\n slippage_buffer=Decimal(\"0\"),\n )\n self.strategy_with_top_depth_tolerance: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy_with_top_depth_tolerance.init_params(\n [self.market_pair],\n order_size_portfolio_ratio_limit=Decimal(\"0.3\"),\n min_profitability=Decimal(self.min_profitbality),\n logging_options=logging_options,\n top_depth_tolerance=1,\n slippage_buffer=Decimal(\"0\"),\n )\n self.logging_options = logging_options\n self.clock.add_iterator(self.maker_market)\n self.clock.add_iterator(self.taker_market)\n self.clock.add_iterator(self.strategy)\n\n self.maker_order_fill_logger: EventLogger = EventLogger()\n self.taker_order_fill_logger: EventLogger = EventLogger()\n self.cancel_order_logger: EventLogger = EventLogger()\n self.maker_order_created_logger: EventLogger = EventLogger()\n self.taker_order_created_logger: EventLogger = EventLogger()\n self.maker_market.add_listener(MarketEvent.OrderFilled, self.maker_order_fill_logger)\n self.taker_market.add_listener(MarketEvent.OrderFilled, self.taker_order_fill_logger)\n self.maker_market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger)\n self.maker_market.add_listener(MarketEvent.BuyOrderCreated, self.maker_order_created_logger)\n self.maker_market.add_listener(MarketEvent.SellOrderCreated, self.maker_order_created_logger)\n self.taker_market.add_listener(MarketEvent.BuyOrderCreated, self.taker_order_created_logger)\n self.taker_market.add_listener(MarketEvent.SellOrderCreated, self.taker_order_created_logger)\n\n def simulate_maker_market_trade(self, is_buy: bool, quantity: Decimal, price: Decimal):\n maker_trading_pair: str = self.maker_trading_pairs[0]\n order_book: OrderBook = self.maker_market.get_order_book(maker_trading_pair)\n trade_event: OrderBookTradeEvent = OrderBookTradeEvent(\n maker_trading_pair, self.clock.current_timestamp, TradeType.BUY if is_buy else TradeType.SELL, price, quantity\n )\n order_book.apply_trade(trade_event)\n\n @staticmethod\n def simulate_order_book_widening(order_book: OrderBook, top_bid: float, top_ask: float):\n bid_diffs: List[OrderBookRow] = []\n ask_diffs: List[OrderBookRow] = []\n update_id: int = order_book.last_diff_uid + 1\n for row in order_book.bid_entries():\n if row.price > top_bid:\n bid_diffs.append(OrderBookRow(row.price, 0, update_id))\n else:\n break\n for row in order_book.ask_entries():\n if row.price < top_ask:\n ask_diffs.append(OrderBookRow(row.price, 0, update_id))\n else:\n break\n order_book.apply_diffs(bid_diffs, ask_diffs, update_id)\n\n @staticmethod\n def simulate_limit_order_fill(market: MockPaperExchange, limit_order: LimitOrder):\n quote_currency_traded: Decimal = limit_order.price * limit_order.quantity\n base_currency_traded: Decimal = limit_order.quantity\n quote_currency: str = limit_order.quote_currency\n base_currency: str = limit_order.base_currency\n\n if limit_order.is_buy:\n market.set_balance(quote_currency, market.get_balance(quote_currency) - quote_currency_traded)\n market.set_balance(base_currency, market.get_balance(base_currency) + base_currency_traded)\n market.trigger_event(\n MarketEvent.BuyOrderCreated,\n BuyOrderCreatedEvent(\n market.current_timestamp,\n OrderType.LIMIT,\n limit_order.trading_pair,\n limit_order.quantity,\n limit_order.price,\n limit_order.client_order_id\n )\n )\n market.trigger_event(\n MarketEvent.OrderFilled,\n OrderFilledEvent(\n market.current_timestamp,\n limit_order.client_order_id,\n limit_order.trading_pair,\n TradeType.BUY,\n OrderType.LIMIT,\n limit_order.price,\n limit_order.quantity,\n TradeFee(Decimal(0)),\n ),\n )\n market.trigger_event(\n MarketEvent.BuyOrderCompleted,\n BuyOrderCompletedEvent(\n market.current_timestamp,\n limit_order.client_order_id,\n base_currency,\n quote_currency,\n quote_currency,\n base_currency_traded,\n quote_currency_traded,\n Decimal(0),\n OrderType.LIMIT,\n ),\n )\n else:\n market.set_balance(quote_currency, market.get_balance(quote_currency) + quote_currency_traded)\n market.set_balance(base_currency, market.get_balance(base_currency) - base_currency_traded)\n market.trigger_event(\n MarketEvent.BuyOrderCreated,\n SellOrderCreatedEvent(\n market.current_timestamp,\n OrderType.LIMIT,\n limit_order.trading_pair,\n limit_order.quantity,\n limit_order.price,\n limit_order.client_order_id\n )\n )\n market.trigger_event(\n MarketEvent.OrderFilled,\n OrderFilledEvent(\n market.current_timestamp,\n limit_order.client_order_id,\n limit_order.trading_pair,\n TradeType.SELL,\n OrderType.LIMIT,\n limit_order.price,\n limit_order.quantity,\n TradeFee(Decimal(0)),\n ),\n )\n market.trigger_event(\n MarketEvent.SellOrderCompleted,\n SellOrderCompletedEvent(\n market.current_timestamp,\n limit_order.client_order_id,\n base_currency,\n quote_currency,\n quote_currency,\n base_currency_traded,\n quote_currency_traded,\n Decimal(0),\n OrderType.LIMIT,\n ),\n )\n\n @staticmethod\n def emit_order_created_event(market: MockPaperExchange, order: LimitOrder):\n event_cls = BuyOrderCreatedEvent if order.is_buy else SellOrderCreatedEvent\n event_tag = MarketEvent.BuyOrderCreated if order.is_buy else MarketEvent.SellOrderCreated\n market.trigger_event(\n event_tag,\n message=event_cls(\n order.creation_timestamp,\n OrderType.LIMIT,\n order.trading_pair,\n order.quantity,\n order.price,\n order.client_order_id,\n )\n )\n\n def test_both_sides_profitable(self):\n self.clock.backtest_til(self.start_timestamp + 5)\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.99451\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0055\"), ask_order.price)\n self.assertEqual(Decimal(\"3.0\"), bid_order.quantity)\n self.assertEqual(Decimal(\"3.0\"), ask_order.quantity)\n\n self.simulate_maker_market_trade(False, Decimal(\"10.0\"), bid_order.price * Decimal(\"0.99\"))\n\n self.clock.backtest_til(self.start_timestamp + 10)\n self.assertEqual(1, len(self.maker_order_fill_logger.event_log))\n self.assertEqual(1, len(self.taker_order_fill_logger.event_log))\n\n maker_fill: OrderFilledEvent = self.maker_order_fill_logger.event_log[0]\n taker_fill: OrderFilledEvent = self.taker_order_fill_logger.event_log[0]\n self.assertEqual(TradeType.BUY, maker_fill.trade_type)\n self.assertEqual(TradeType.SELL, taker_fill.trade_type)\n self.assertAlmostEqual(Decimal(\"0.99451\"), maker_fill.price)\n self.assertAlmostEqual(Decimal(\"0.9995\"), taker_fill.price)\n self.assertAlmostEqual(Decimal(\"3.0\"), maker_fill.amount)\n self.assertAlmostEqual(Decimal(\"3.0\"), taker_fill.amount)\n\n def test_top_depth_tolerance(self): # TODO\n self.clock.remove_iterator(self.strategy)\n self.clock.add_iterator(self.strategy_with_top_depth_tolerance)\n self.clock.backtest_til(self.start_timestamp + 5)\n bid_order: LimitOrder = self.strategy_with_top_depth_tolerance.active_bids[0][1]\n ask_order: LimitOrder = self.strategy_with_top_depth_tolerance.active_asks[0][1]\n\n self.taker_market.trigger_event(\n MarketEvent.BuyOrderCreated,\n BuyOrderCreatedEvent(\n self.start_timestamp + 5,\n OrderType.LIMIT,\n bid_order.trading_pair,\n bid_order.quantity,\n bid_order.price,\n bid_order.client_order_id\n )\n )\n\n self.taker_market.trigger_event(\n MarketEvent.SellOrderCreated,\n SellOrderCreatedEvent(\n self.start_timestamp + 5,\n OrderType.LIMIT,\n ask_order.trading_pair,\n ask_order.quantity,\n ask_order.price,\n ask_order.client_order_id\n )\n )\n\n self.assertEqual(Decimal(\"0.99451\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0055\"), ask_order.price)\n self.assertEqual(Decimal(\"3.0\"), bid_order.quantity)\n self.assertEqual(Decimal(\"3.0\"), ask_order.quantity)\n\n self.simulate_order_book_widening(self.taker_market.order_books[self.taker_trading_pairs[0]], 0.99, 1.01)\n\n self.clock.backtest_til(self.start_timestamp + 100)\n\n self.assertEqual(2, len(self.cancel_order_logger.event_log))\n self.assertEqual(1, len(self.strategy_with_top_depth_tolerance.active_bids))\n self.assertEqual(1, len(self.strategy_with_top_depth_tolerance.active_asks))\n\n bid_order = self.strategy_with_top_depth_tolerance.active_bids[0][1]\n ask_order = self.strategy_with_top_depth_tolerance.active_asks[0][1]\n self.assertEqual(Decimal(\"0.98457\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0155\"), ask_order.price)\n\n def test_market_became_wider(self):\n self.clock.backtest_til(self.start_timestamp + 5)\n\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.99451\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0055\"), ask_order.price)\n self.assertEqual(Decimal(\"3.0\"), bid_order.quantity)\n self.assertEqual(Decimal(\"3.0\"), ask_order.quantity)\n\n self.taker_market.trigger_event(\n MarketEvent.BuyOrderCreated,\n BuyOrderCreatedEvent(\n self.start_timestamp + 5,\n OrderType.LIMIT,\n bid_order.trading_pair,\n bid_order.quantity,\n bid_order.price,\n bid_order.client_order_id\n )\n )\n\n self.taker_market.trigger_event(\n MarketEvent.SellOrderCreated,\n SellOrderCreatedEvent(\n self.start_timestamp + 5,\n OrderType.LIMIT,\n ask_order.trading_pair,\n ask_order.quantity,\n ask_order.price,\n ask_order.client_order_id\n )\n )\n\n self.simulate_order_book_widening(self.taker_market.order_books[self.taker_trading_pairs[0]], 0.99, 1.01)\n\n self.clock.backtest_til(self.start_timestamp + 100)\n\n self.assertEqual(2, len(self.cancel_order_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n\n bid_order = self.strategy.active_bids[0][1]\n ask_order = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.98457\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0155\"), ask_order.price)\n\n def test_market_became_narrower(self):\n self.clock.backtest_til(self.start_timestamp + 5)\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.99451\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0055\"), ask_order.price)\n self.assertEqual(Decimal(\"3.0\"), bid_order.quantity)\n self.assertEqual(Decimal(\"3.0\"), ask_order.quantity)\n\n self.maker_market.order_books[self.maker_trading_pairs[0]].apply_diffs(\n [OrderBookRow(0.996, 30, 2)], [OrderBookRow(1.004, 30, 2)], 2)\n\n self.clock.backtest_til(self.start_timestamp + 10)\n self.assertEqual(0, len(self.cancel_order_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n\n bid_order = self.strategy.active_bids[0][1]\n ask_order = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.99451\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0055\"), ask_order.price)\n\n def test_order_fills_after_cancellation(self): # TODO\n self.clock.backtest_til(self.start_timestamp + 5)\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.99451\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0055\"), ask_order.price)\n self.assertEqual(Decimal(\"3.0\"), bid_order.quantity)\n self.assertEqual(Decimal(\"3.0\"), ask_order.quantity)\n\n self.taker_market.trigger_event(\n MarketEvent.BuyOrderCreated,\n BuyOrderCreatedEvent(\n self.start_timestamp + 5,\n OrderType.LIMIT,\n bid_order.trading_pair,\n bid_order.quantity,\n bid_order.price,\n bid_order.client_order_id\n )\n )\n\n self.taker_market.trigger_event(\n MarketEvent.SellOrderCreated,\n SellOrderCreatedEvent(\n self.start_timestamp + 5,\n OrderType.LIMIT,\n ask_order.trading_pair,\n ask_order.quantity,\n ask_order.price,\n ask_order.client_order_id\n )\n )\n\n self.simulate_order_book_widening(self.taker_market.order_books[self.taker_trading_pairs[0]], 0.99, 1.01)\n\n self.clock.backtest_til(self.start_timestamp + 10)\n\n self.assertEqual(2, len(self.cancel_order_logger.event_log))\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n\n bid_order = self.strategy.active_bids[0][1]\n ask_order = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.98457\"), bid_order.price)\n self.assertEqual(Decimal(\"1.0155\"), ask_order.price)\n\n self.clock.backtest_til(self.start_timestamp + 20)\n self.simulate_limit_order_fill(self.maker_market, bid_order)\n self.simulate_limit_order_fill(self.maker_market, ask_order)\n\n self.clock.backtest_til(self.start_timestamp + 25)\n fill_events: List[OrderFilledEvent] = self.taker_order_fill_logger.event_log\n bid_hedges: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]\n ask_hedges: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]\n self.assertEqual(1, len(bid_hedges))\n self.assertEqual(1, len(ask_hedges))\n self.assertGreater(\n self.maker_market.get_balance(self.maker_trading_pairs[2]) + self.taker_market.get_balance(self.taker_trading_pairs[2]),\n Decimal(\"10\"),\n )\n self.assertEqual(2, len(self.taker_order_fill_logger.event_log))\n taker_fill1: OrderFilledEvent = self.taker_order_fill_logger.event_log[0]\n self.assertEqual(TradeType.SELL, taker_fill1.trade_type)\n self.assertAlmostEqual(Decimal(\"0.9895\"), taker_fill1.price)\n self.assertAlmostEqual(Decimal(\"3.0\"), taker_fill1.amount)\n taker_fill2: OrderFilledEvent = self.taker_order_fill_logger.event_log[1]\n self.assertEqual(TradeType.BUY, taker_fill2.trade_type)\n self.assertAlmostEqual(Decimal(\"1.0105\"), taker_fill2.price)\n self.assertAlmostEqual(Decimal(\"3.0\"), taker_fill2.amount)\n\n def test_with_conversion(self):\n self.clock.remove_iterator(self.strategy)\n self.market_pair: CrossExchangeMarketPair = CrossExchangeMarketPair(\n MarketTradingPairTuple(self.maker_market, *[\"COINALPHA-QETH\", \"COINALPHA\", \"QETH\"]),\n MarketTradingPairTuple(self.taker_market, *self.taker_trading_pairs),\n )\n self.maker_market.set_balanced_order_book(\"COINALPHA-QETH\", 1.05, 0.55, 1.55, 0.01, 10)\n self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy.init_params(\n [self.market_pair], Decimal(\"0.01\"),\n order_size_portfolio_ratio_limit=Decimal(\"0.3\"),\n logging_options=self.logging_options,\n taker_to_maker_base_conversion_rate=Decimal(\"0.95\")\n )\n self.clock.add_iterator(self.strategy)\n self.clock.backtest_til(self.start_timestamp + 5)\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n self.assertAlmostEqual(Decimal(\"1.0417\"), round(bid_order.price, 4))\n self.assertAlmostEqual(Decimal(\"1.0636\"), round(ask_order.price, 4))\n self.assertAlmostEqual(Decimal(\"2.9286\"), round(bid_order.quantity, 4))\n self.assertAlmostEqual(Decimal(\"2.9286\"), round(ask_order.quantity, 4))\n\n def test_maker_price(self):\n buy_taker_price: Decimal = round(self.strategy.get_effective_hedging_price(self.market_pair, False, 3), 4)\n sell_taker_price: Decimal = round(self.strategy.get_effective_hedging_price(self.market_pair, True, 3), 4)\n price_quantum = Decimal(\"0.0001\")\n self.assertEqual(Decimal(\"1.0004\"), buy_taker_price)\n self.assertEqual(Decimal(\"0.9995\"), sell_taker_price)\n self.clock.backtest_til(self.start_timestamp + 5)\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n bid_maker_price = sell_taker_price * (1 - self.min_profitbality)\n bid_maker_price = (floor(bid_maker_price / price_quantum)) * price_quantum\n ask_maker_price = buy_taker_price * (1 + self.min_profitbality)\n ask_maker_price = (ceil(ask_maker_price / price_quantum) * price_quantum)\n self.assertEqual(bid_maker_price, round(bid_order.price, 4))\n self.assertEqual(ask_maker_price, round(ask_order.price, 4))\n self.assertEqual(Decimal(\"3.0\"), bid_order.quantity)\n self.assertEqual(Decimal(\"3.0\"), ask_order.quantity)\n\n def test_with_adjust_orders_enabled(self):\n self.clock.remove_iterator(self.strategy)\n self.clock.remove_iterator(self.maker_market)\n self.maker_market: MockPaperExchange = MockPaperExchange()\n self.maker_market.set_balanced_order_book(self.maker_trading_pairs[0], 1.0, 0.5, 1.5, 0.1, 10)\n self.market_pair: CrossExchangeMarketPair = CrossExchangeMarketPair(\n MarketTradingPairTuple(self.maker_market, *self.maker_trading_pairs),\n MarketTradingPairTuple(self.taker_market, *self.taker_trading_pairs),\n )\n self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy.init_params(\n [self.market_pair],\n order_size_portfolio_ratio_limit=Decimal(\"0.3\"),\n min_profitability=Decimal(\"0.005\"),\n logging_options=self.logging_options,\n )\n self.maker_market.set_balance(\"COINALPHA\", 5)\n self.maker_market.set_balance(\"WETH\", 5)\n self.maker_market.set_balance(\"QETH\", 5)\n self.maker_market.set_quantization_param(QuantizationParams(self.maker_trading_pairs[0], 4, 4, 4, 4))\n self.clock.add_iterator(self.strategy)\n self.clock.add_iterator(self.maker_market)\n self.clock.backtest_til(self.start_timestamp + 5)\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n # place above top bid (at 0.95)\n self.assertAlmostEqual(Decimal(\"0.9500\"), bid_order.price)\n # place below top ask (at 1.05)\n self.assertAlmostEqual(Decimal(\"1.049\"), ask_order.price)\n self.assertAlmostEqual(Decimal(\"3\"), round(bid_order.quantity, 4))\n self.assertAlmostEqual(Decimal(\"3\"), round(ask_order.quantity, 4))\n\n def test_with_adjust_orders_disabled(self):\n self.clock.remove_iterator(self.strategy)\n self.clock.remove_iterator(self.maker_market)\n self.maker_market: MockPaperExchange = MockPaperExchange()\n\n self.maker_market.set_balanced_order_book(self.maker_trading_pairs[0], 1.0, 0.5, 1.5, 0.1, 10)\n self.taker_market.set_balanced_order_book(self.taker_trading_pairs[0], 1.0, 0.5, 1.5, 0.001, 20)\n self.market_pair: CrossExchangeMarketPair = CrossExchangeMarketPair(\n MarketTradingPairTuple(self.maker_market, *self.maker_trading_pairs),\n MarketTradingPairTuple(self.taker_market, *self.taker_trading_pairs),\n )\n self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy.init_params(\n [self.market_pair],\n order_size_portfolio_ratio_limit=Decimal(\"0.3\"),\n min_profitability=Decimal(\"0.005\"),\n logging_options=self.logging_options,\n adjust_order_enabled=False\n )\n self.maker_market.set_balance(\"COINALPHA\", 5)\n self.maker_market.set_balance(\"WETH\", 5)\n self.maker_market.set_balance(\"QETH\", 5)\n self.maker_market.set_quantization_param(QuantizationParams(self.maker_trading_pairs[0], 4, 4, 4, 4))\n self.clock.add_iterator(self.strategy)\n self.clock.add_iterator(self.maker_market)\n self.clock.backtest_til(self.start_timestamp + 5)\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n self.assertEqual(Decimal(\"0.9945\"), bid_order.price)\n self.assertEqual(Decimal(\"1.006\"), ask_order.price)\n self.assertAlmostEqual(Decimal(\"3\"), round(bid_order.quantity, 4))\n self.assertAlmostEqual(Decimal(\"3\"), round(ask_order.quantity, 4))\n\n def test_price_and_size_limit_calculation(self):\n self.taker_market.set_balanced_order_book(self.taker_trading_pairs[0], 1.0, 0.5, 1.5, 0.001, 20)\n bid_size = self.strategy.get_market_making_size(self.market_pair, True)\n bid_price = self.strategy.get_market_making_price(self.market_pair, True, bid_size)\n ask_size = self.strategy.get_market_making_size(self.market_pair, False)\n ask_price = self.strategy.get_market_making_price(self.market_pair, False, ask_size)\n self.assertEqual((Decimal(\"0.99451\"), Decimal(\"3\")), (bid_price, bid_size))\n self.assertEqual((Decimal(\"1.0055\"), Decimal(\"3\")), (ask_price, ask_size))\n\n def test_price_and_size_limit_calculation_with_slippage_buffer(self):\n self.taker_market.set_balance(\"ETH\", 3)\n self.taker_market.set_balanced_order_book(\n self.taker_trading_pairs[0],\n mid_price=Decimal(\"1.0\"),\n min_price=Decimal(\"0.5\"),\n max_price=Decimal(\"1.5\"),\n price_step_size=Decimal(\"0.1\"),\n volume_step_size=Decimal(\"100\"),\n )\n self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy.init_params(\n [self.market_pair],\n order_size_taker_volume_factor=Decimal(\"1\"),\n order_size_taker_balance_factor=Decimal(\"1\"),\n order_size_portfolio_ratio_limit=Decimal(\"1\"),\n min_profitability=Decimal(\"0.25\"),\n logging_options=self.logging_options,\n slippage_buffer=Decimal(\"0\"),\n order_amount=Decimal(\"4\"),\n )\n strategy_with_slippage_buffer: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n strategy_with_slippage_buffer.init_params(\n [self.market_pair],\n order_size_taker_volume_factor=Decimal(\"1\"),\n order_size_taker_balance_factor=Decimal(\"1\"),\n order_size_portfolio_ratio_limit=Decimal(\"1\"),\n min_profitability=Decimal(\"0.25\"),\n logging_options=self.logging_options,\n slippage_buffer=Decimal(\"0.25\"),\n order_amount=Decimal(\"4\"),\n )\n\n bid_size = self.strategy.get_market_making_size(self.market_pair, True)\n bid_price = self.strategy.get_market_making_price(self.market_pair, True, bid_size)\n ask_size = self.strategy.get_market_making_size(self.market_pair, False)\n ask_price = self.strategy.get_market_making_price(self.market_pair, False, ask_size)\n slippage_bid_size = strategy_with_slippage_buffer.get_market_making_size(self.market_pair, True)\n slippage_bid_price = strategy_with_slippage_buffer.get_market_making_price(\n self.market_pair, True, slippage_bid_size\n )\n slippage_ask_size = strategy_with_slippage_buffer.get_market_making_size(self.market_pair, False)\n slippage_ask_price = strategy_with_slippage_buffer.get_market_making_price(\n self.market_pair, False, slippage_ask_size\n )\n\n self.assertEqual(Decimal(\"4\"), bid_size) # the user size\n self.assertEqual(Decimal(\"0.75999\"), bid_price) # price = bid_VWAP(4) / profitability = 0.95 / 1.25\n self.assertEqual(Decimal(\"2.8571\"), ask_size) # size = balance / (ask_VWAP(3) * slippage) = 3 / (1.05 * 1)\n self.assertEqual(Decimal(\"1.3125\"), ask_price) # price = ask_VWAP(2.8571) * profitability = 1.05 * 1.25\n self.assertEqual(Decimal(\"4\"), slippage_bid_size) # the user size\n self.assertEqual(Decimal(\"0.75999\"), slippage_bid_price) # price = bid_VWAP(4) / profitability = 0.9 / 1.25\n self.assertEqual(Decimal(\"2.2857\"), slippage_ask_size) # size = balance / (ask_VWAP(3) * slippage) = 3 / (1.05 * 1.25)\n self.assertEqual(Decimal(\"1.3125\"), slippage_ask_price) # price = ask_VWAP(2.2857) * profitability = 1.05 * 1.25\n\n def test_check_if_sufficient_balance_adjusts_including_slippage(self):\n self.taker_market.set_balance(\"COINALPHA\", 4)\n self.taker_market.set_balance(\"ETH\", 3)\n self.taker_market.set_balanced_order_book(\n self.taker_trading_pairs[0],\n mid_price=Decimal(\"1.0\"),\n min_price=Decimal(\"0.5\"),\n max_price=Decimal(\"1.5\"),\n price_step_size=Decimal(\"0.1\"),\n volume_step_size=Decimal(\"1\"),\n )\n strategy_with_slippage_buffer: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n strategy_with_slippage_buffer.init_params(\n [self.market_pair],\n order_size_taker_volume_factor=Decimal(\"1\"),\n order_size_taker_balance_factor=Decimal(\"1\"),\n order_size_portfolio_ratio_limit=Decimal(\"1\"),\n min_profitability=Decimal(\"0.25\"),\n logging_options=self.logging_options,\n slippage_buffer=Decimal(\"0.25\"),\n order_amount=Decimal(\"4\"),\n )\n self.clock.remove_iterator(self.strategy)\n self.clock.add_iterator(strategy_with_slippage_buffer)\n self.clock.backtest_til(self.start_timestamp + 1)\n\n active_bids = strategy_with_slippage_buffer.active_bids\n active_asks = strategy_with_slippage_buffer.active_asks\n\n self.assertEqual(1, len(active_bids))\n self.assertEqual(1, len(active_asks))\n\n active_bid = active_bids[0][1]\n active_ask = active_asks[0][1]\n\n self.emit_order_created_event(self.maker_market, active_bid)\n self.emit_order_created_event(self.maker_market, active_ask)\n\n self.clock.backtest_til(self.start_timestamp + 2)\n\n active_bids = strategy_with_slippage_buffer.active_bids\n active_asks = strategy_with_slippage_buffer.active_asks\n\n self.assertEqual(1, len(active_bids))\n self.assertEqual(1, len(active_asks))\n\n active_bid = active_bids[0][1]\n active_ask = active_asks[0][1]\n bids_quantum = self.taker_market.get_order_size_quantum(\n self.taker_trading_pairs[0], active_bid.quantity\n )\n asks_quantum = self.taker_market.get_order_size_quantum(\n self.taker_trading_pairs[0], active_ask.quantity\n )\n\n self.taker_market.set_balance(\"COINALPHA\", Decimal(\"4\") - bids_quantum)\n self.taker_market.set_balance(\"ETH\", Decimal(\"3\") - asks_quantum * 1)\n\n self.clock.backtest_til(self.start_timestamp + 3)\n active_bids = strategy_with_slippage_buffer.active_bids\n active_asks = strategy_with_slippage_buffer.active_asks\n\n self.assertEqual(0, len(active_bids)) # cancelled\n self.assertEqual(0, len(active_asks)) # cancelled\n\n self.clock.backtest_til(self.start_timestamp + 4)\n\n new_active_bids = strategy_with_slippage_buffer.active_bids\n new_active_asks = strategy_with_slippage_buffer.active_asks\n\n self.assertEqual(1, len(new_active_bids))\n self.assertEqual(1, len(new_active_asks))\n\n new_active_bid = new_active_bids[0][1]\n new_active_ask = new_active_asks[0][1]\n\n self.assertEqual(Decimal(str(active_bid.quantity - bids_quantum)), new_active_bid.quantity)\n self.assertEqual(Decimal(str(active_ask.quantity - asks_quantum)), new_active_ask.quantity)\n\n def test_empty_maker_orderbook(self):\n self.clock.remove_iterator(self.strategy)\n self.clock.remove_iterator(self.maker_market)\n self.maker_market: MockPaperExchange = MockPaperExchange()\n\n # Orderbook is empty\n self.maker_market.new_empty_order_book(self.maker_trading_pairs[0])\n self.market_pair: CrossExchangeMarketPair = CrossExchangeMarketPair(\n MarketTradingPairTuple(self.maker_market, *self.maker_trading_pairs),\n MarketTradingPairTuple(self.taker_market, *self.taker_trading_pairs),\n )\n self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()\n self.strategy.init_params(\n [self.market_pair],\n order_amount=1,\n min_profitability=Decimal(\"0.005\"),\n logging_options=self.logging_options,\n adjust_order_enabled=False\n )\n self.maker_market.set_balance(\"COINALPHA\", 5)\n self.maker_market.set_balance(\"WETH\", 5)\n self.maker_market.set_balance(\"QETH\", 5)\n self.maker_market.set_quantization_param(QuantizationParams(self.maker_trading_pairs[0], 4, 4, 4, 4))\n self.clock.add_iterator(self.strategy)\n self.clock.add_iterator(self.maker_market)\n self.clock.backtest_til(self.start_timestamp + 5)\n self.assertEqual(1, len(self.strategy.active_bids))\n self.assertEqual(1, len(self.strategy.active_asks))\n bid_order: LimitOrder = self.strategy.active_bids[0][1]\n ask_order: LimitOrder = self.strategy.active_asks[0][1]\n # Places orders based on taker orderbook\n self.assertEqual(Decimal(\"0.9945\"), bid_order.price)\n self.assertEqual(Decimal(\"1.006\"), ask_order.price)\n self.assertAlmostEqual(Decimal(\"1\"), round(bid_order.quantity, 4))\n self.assertAlmostEqual(Decimal(\"1\"), round(ask_order.quantity, 4))\n"
] | [
[
"pandas.Timestamp"
]
] |
springcoil/lime | [
"fb1288ad50989d2effd569cb7f6b526a56d5c0a7"
] | [
"lime/lime_tabular.py"
] | [
"\"\"\"\nFunctions for explaining classifiers that use tabular data (matrices).\n\"\"\"\nimport collections\nimport json\nimport copy\nimport numpy as np\nimport sklearn\nimport sklearn.preprocessing\nfrom . import lime_base\nfrom . import explanation\n\n\nclass TableDomainMapper(explanation.DomainMapper):\n \"\"\"Maps feature ids to names, generates table views, etc\"\"\"\n def __init__(self, feature_names, feature_values, scaled_row,\n categorical_features, discretized_feature_names=None):\n \"\"\"Init.\n\n Args:\n feature_names: list of feature names, in order\n feature_values: list of strings with the values of the original row\n scaled_row: scaled row\n categorical_features: list of categorical features ids (ints)\n \"\"\"\n self.exp_feature_names = feature_names\n self.discretized_feature_names = discretized_feature_names\n self.feature_names = feature_names\n self.feature_values = feature_values\n self.scaled_row = scaled_row\n self.all_categorical = len(categorical_features) == len(scaled_row)\n self.categorical_features = categorical_features\n\n def map_exp_ids(self, exp):\n \"\"\"Maps ids to feature names.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n\n Returns:\n list of tuples (feature_name, weight)\n \"\"\"\n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n show_table=True,\n show_all=False):\n \"\"\"Shows the current example in a table format.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n show_table: if False, don't show table visualization.\n show_all: if True, show zero-weighted features in the table.\n \"\"\"\n if not show_table:\n return ''\n weights = [0] * len(self.feature_names)\n for x in exp:\n weights[x[0]] = x[1]\n out_list = list(zip(self.exp_feature_names, self.feature_values,\n weights))\n if not show_all:\n out_list = [out_list[x[0]] for x in exp]\n ret = u'''\n %s.show_raw_tabular(%s, %d, %s);\n ''' % (exp_object_name, json.dumps(out_list), label, div_name)\n return ret\n\n\nclass LimeTabularExplainer(object):\n \"\"\"Explains predictions on tabular (i.e. matrix) data.\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to the\n means and stds in the training data. For categorical features, perturb by\n sampling according to the training distribution, and making a binary\n feature that is 1 when the value is the same as the instance being\n explained.\"\"\"\n def __init__(self, training_data, feature_names=None,\n categorical_features=None, categorical_names=None,\n kernel_width=None, verbose=False, class_names=None,\n feature_selection='auto', discretize_continuous=True):\n \"\"\"Init function.\n\n Args:\n training_data: numpy 2d array\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt(number of columns) * 0.75\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n \"\"\"\n self.categorical_names = categorical_names\n self.categorical_features = categorical_features\n if self.categorical_names is None:\n self.categorical_names = {}\n if self.categorical_features is None:\n self.categorical_features = []\n self.discretizer = None\n if discretize_continuous:\n self.discretizer = QuartileDiscretizer(training_data,\n self.categorical_features,\n feature_names)\n self.categorical_features = range(training_data.shape[1])\n discretized_training_data = self.discretizer.discretize(\n training_data)\n\n if kernel_width is None:\n kernel_width = np.sqrt(training_data.shape[1]) * .75\n kernel_width = float(kernel_width)\n\n def kernel(d): return np.sqrt(np.exp(-(d**2) / kernel_width ** 2))\n self.feature_selection = feature_selection\n self.base = lime_base.LimeBase(kernel, verbose)\n self.scaler = None\n self.class_names = class_names\n self.feature_names = feature_names\n self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)\n self.scaler.fit(training_data)\n self.feature_values = {}\n self.feature_frequencies = {}\n\n for feature in self.categorical_features:\n feature_count = collections.defaultdict(lambda: 0.0)\n column = training_data[:, feature]\n if self.discretizer is not None:\n column = discretized_training_data[:, feature]\n feature_count[0] = 0.\n feature_count[1] = 0.\n feature_count[2] = 0.\n feature_count[3] = 0.\n for value in column:\n feature_count[value] += 1\n values, frequencies = map(list, zip(*(feature_count.items())))\n self.feature_values[feature] = values\n self.feature_frequencies[feature] = (np.array(frequencies) /\n sum(frequencies))\n self.scaler.mean_[feature] = 0\n self.scaler.scale_[feature] = 1\n\n def explain_instance(self, data_row, classifier_fn, labels=(1,),\n top_labels=None, num_features=10, num_samples=5000,\n distance_metric='euclidean', model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n classifier_fn: classifier prediction probability function, which\n takes a string and outputs prediction probabilities. For\n ScikitClassifiers , this is classifier.predict_proba.\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have model_regressor.coef_\n and 'sample_weight' as a parameter to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n data, inverse = self.__data_inverse(data_row, num_samples)\n scaled_data = (data - self.scaler.mean_) / self.scaler.scale_\n\n distances = sklearn.metrics.pairwise_distances(\n scaled_data,\n scaled_data[0].reshape(1, -1),\n metric=distance_metric\n ).ravel()\n\n yss = classifier_fn(inverse)\n if self.class_names is None:\n self.class_names = [str(x) for x in range(yss[0].shape[0])]\n else:\n self.class_names = list(self.class_names)\n feature_names = copy.deepcopy(self.feature_names)\n if feature_names is None:\n feature_names = [str(x) for x in range(data_row.shape[0])]\n\n def round_stuff(x): return ['%.2f' % a for a in x]\n values = round_stuff(data_row)\n for i in self.categorical_features:\n if self.discretizer is not None and i in self.discretizer.lambdas:\n continue\n name = int(data_row[i])\n if i in self.categorical_names:\n name = self.categorical_names[i][name]\n feature_names[i] = '%s=%s' % (feature_names[i], name)\n values[i] = 'True'\n categorical_features = self.categorical_features\n discretized_feature_names = None\n if self.discretizer is not None:\n categorical_features = range(data.shape[1])\n discretized_instance = self.discretizer.discretize(data_row)\n discretized_feature_names = copy.deepcopy(feature_names)\n for f in self.discretizer.names:\n discretized_feature_names[f] = self.discretizer.names[f][int(\n discretized_instance[f])]\n\n domain_mapper = TableDomainMapper(\n feature_names, values, scaled_data[0],\n categorical_features=categorical_features,\n discretized_feature_names=discretized_feature_names)\n ret_exp = explanation.Explanation(domain_mapper=domain_mapper,\n class_names=self.class_names)\n ret_exp.predict_proba = yss[0]\n if top_labels:\n labels = np.argsort(yss[0])[-top_labels:]\n ret_exp.top_labels = list(labels)\n ret_exp.top_labels.reverse()\n for label in labels:\n (ret_exp.intercept[label],\n ret_exp.local_exp[label],\n ret_exp.score) = self.base.explain_instance_with_data(\n scaled_data, yss, distances, label, num_features,\n model_regressor=model_regressor,\n feature_selection=self.feature_selection)\n return ret_exp\n\n def __data_inverse(self,\n data_row,\n num_samples):\n \"\"\"Generates a neighborhood around a prediction.\n\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to\n the means and stds in the training data. For categorical features,\n perturb by sampling according to the training distribution, and making\n a binary feature that is 1 when the value is the same as the instance\n being explained.\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n num_samples: size of the neighborhood to learn the linear model\n\n Returns:\n A tuple (data, inverse), where:\n data: dense num_samples * K matrix, where categorical features\n are encoded with either 0 (not equal to the corresponding value\n in data_row) or 1. The first row is the original instance.\n inverse: same as data, except the categorical features are not\n binary, but categorical (as the original data)\n \"\"\"\n data = np.zeros((num_samples, data_row.shape[0]))\n categorical_features = range(data_row.shape[0])\n if self.discretizer is None:\n data = np.random.normal(\n 0, 1, num_samples * data_row.shape[0]).reshape(\n num_samples, data_row.shape[0])\n data = data * self.scaler.scale_ + self.scaler.mean_\n categorical_features = self.categorical_features\n first_row = data_row\n else:\n first_row = self.discretizer.discretize(data_row)\n data[0] = data_row.copy()\n inverse = data.copy()\n for column in categorical_features:\n values = self.feature_values[column]\n freqs = self.feature_frequencies[column]\n inverse_column = np.random.choice(values, size=num_samples,\n replace=True, p=freqs)\n binary_column = np.array([1 if x == first_row[column]\n else 0 for x in inverse_column])\n binary_column[0] = 1\n inverse_column[0] = data[0, column]\n data[:, column] = binary_column\n inverse[:, column] = inverse_column\n if self.discretizer is not None:\n inverse[1:] = self.discretizer.undiscretize(inverse[1:])\n inverse[0] = data_row\n return data, inverse\n\n\nclass QuartileDiscretizer:\n \"\"\"Discretizes data into quartiles.\"\"\"\n def __init__(self, data, categorical_features, feature_names):\n \"\"\"Initializer\n\n Args:\n data: numpy 2d array\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. These features will not be discretized.\n Everything else will be considered continuous, and will be\n discretized.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n \"\"\"\n to_discretize = ([x for x in range(data.shape[1])\n if x not in categorical_features])\n self.names = {}\n self.lambdas = {}\n self.ranges = {}\n self.means = {}\n self.stds = {}\n self.mins = {}\n self.maxs = {}\n for feature in to_discretize:\n qts = np.percentile(data[:, feature], [25, 50, 75])\n boundaries = np.min(data[:, feature]), np.max(data[:, feature])\n name = feature_names[feature]\n self.names[feature] = (\n ['%s <= %.2f' % (name, qts[0]),\n '%.2f < %s <= %.2f' % (qts[0], name, qts[1]),\n '%.2f < %s <= %.2f' % (qts[1], name, qts[2]),\n '%s > %.2f' % (name, qts[2])])\n self.lambdas[feature] = lambda x, qts=qts: np.searchsorted(qts, x)\n discretized = self.lambdas[feature](data[:, feature])\n self.means[feature] = []\n self.stds[feature] = []\n for x in range(4):\n selection = data[discretized == x, feature]\n mean = 0 if len(selection) == 0 else np.mean(selection)\n self.means[feature].append(mean)\n std = 0 if len(selection) == 0 else np.std(selection)\n std += 0.00000000001\n self.stds[feature].append(std)\n self.mins[feature] = [boundaries[0], qts[0], qts[1], qts[2]]\n self.maxs[feature] = [qts[0], qts[1], qts[2], boundaries[1]]\n\n def discretize(self, data):\n \"\"\"Discretizes the data.\n\n Args:\n data: numpy 2d or 1d array\n\n Returns:\n numpy array of same dimension, discretized.\n \"\"\"\n ret = data.copy()\n for feature in self.lambdas:\n if len(data.shape) == 1:\n ret[feature] = int(self.lambdas[feature](ret[feature]))\n else:\n ret[:, feature] = self.lambdas[feature](\n ret[:, feature]).astype(int)\n return ret\n\n def undiscretize(self, data):\n ret = data.copy()\n for feature in self.means:\n mins = self.mins[feature]\n maxs = self.maxs[feature]\n means = self.means[feature]\n stds = self.stds[feature]\n\n def get_inverse(q): return max(\n mins[q],\n min(np.random.normal(means[q], stds[q]), maxs[q]))\n if len(data.shape) == 1:\n q = int(ret[feature])\n ret[feature] = get_inverse(q)\n else:\n ret[:, feature] = (\n [get_inverse(int(x)) for x in ret[:, feature]])\n return ret\n"
] | [
[
"numpy.sqrt",
"numpy.zeros",
"numpy.searchsorted",
"numpy.argsort",
"numpy.random.choice",
"numpy.exp",
"numpy.random.normal",
"numpy.max",
"numpy.min",
"sklearn.preprocessing.StandardScaler",
"numpy.std",
"numpy.array",
"numpy.percentile",
"numpy.mean"
]
] |
sdonatti/nnabla | [
"ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9"
] | [
"python/src/nnabla/experimental/graph_converters/batch_normalization_linear.py"
] | [
"import nnabla as nn\nimport numpy as np\n\nfrom .identity import IdentityConverter\nfrom .helpers import GraphInfo\n\n\nclass BatchNormalizationLinearConverter(IdentityConverter):\n \"\"\"\n The parameters of the batch normalization replaced simple scale and bias.\n\n Args:\n black_list (list): Black list of the function list.\n params (:obj:`OrderedDict`): Result of nn.get_parameters().\n name (:obj:`str`): Prefix of the parameter scope.\n\n \"\"\"\n\n def __init__(self,\n black_list=[], params=None,\n name=\"bn-linear\"):\n super(BatchNormalizationLinearConverter, self).__init__(black_list,\n params, name)\n\n def convert(self, vroot, entry_variables):\n \"\"\"\n All functions are replaced with the same `new` function.\n\n Args:\n vroot (:obj:`Variable`): NNabla Variable\n entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.\n \"\"\"\n self.graph_info = GraphInfo(vroot)\n self.entry_variables = entry_variables\n\n cnt = 0\n with nn.parameter_scope(self.name):\n # Function loop in the forward order\n for t, func in enumerate(self.graph_info.funcs):\n if func.name == \"BatchNormalization\":\n bn_func = func\n # TODO: should deal with both?\n if bn_func.info.args[\"batch_stat\"] == False:\n o = self._bn_linear_conversion(bn_func, cnt)\n cnt += 1\n continue\n # Identity conversion\n o = self._identity_conversion(func)\n\n self.end_variable = o\n return self.end_variable\n\n def _bn_linear_conversion(self, bn_func, cnt):\n # Conversion\n eps_data = bn_func.info.args[\"eps\"]\n beta_data = np.squeeze(bn_func.inputs[1].d)\n gamma_data = np.squeeze(bn_func.inputs[2].d)\n mean_data = np.squeeze(bn_func.inputs[3].d)\n var_data = np.squeeze(bn_func.inputs[4].d)\n sigma_data = np.sqrt(var_data + eps_data)\n c0_data = gamma_data / sigma_data\n c1_data = beta_data - (gamma_data * mean_data) / sigma_data\n # Reshape\n oshape = bn_func.inputs[1].shape\n c0_data = c0_data.reshape(oshape)\n c1_data = c1_data.reshape(oshape)\n\n # Inputs\n x = bn_func.inputs[0]\n x = self.input_map[x] if x in self.input_map else x\n\n c0 = nn.parameter.get_parameter_or_create(\"c0-{}-{}\".format(self.name, cnt),\n c0_data.shape, c0_data)\n c1 = nn.parameter.get_parameter_or_create(\"c1-{}-{}\".format(self.name, cnt),\n c1_data.shape, c1_data)\n\n # Function call\n o = c0 * x + c1\n\n # Map output of ref graph to output of new graph\n x = bn_func.outputs[0]\n self.input_map[x] = o\n\n # Store output (just in case)\n self.outputs.append(o)\n\n return o\n"
] | [
[
"numpy.sqrt",
"numpy.squeeze"
]
] |
Oichii/resnet3D_pulse | [
"d123abfdb14eedc972ab1e0c4c3026fe8c4074af"
] | [
"ResNet_test.py"
] | [
"import os\nimport glob\nimport json\nimport torch\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.signal import resample, find_peaks\nfrom pulse_sampler import PulseSampler\nfrom pulse_dataset import PulseDataset\nfrom NegPearsonLoss import NegPearson\nfrom ResNet_model import generate_model\nfrom scipy.stats import pearsonr\nimport heartpy as hp\nfrom utils import butter_bandpass_filter, psnr\n\nimport pandas as pd\n\n\nfor i in range(3, 30):\n resume = 'save_temp/transfer_3d_{}.tar'.format(i)\n print(\"initialize model {} ...\".format(i))\n\n seq_len = 32\n\n model = generate_model(34)\n\n model = torch.nn.DataParallel(model)\n model.cuda()\n ss = sum(p.numel() for p in model.parameters())\n print('num params: ', ss)\n if os.path.isfile(resume):\n print(\"=> loading checkpoint '{}'\".format(resume))\n checkpoint = torch.load(resume)\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint (epoch {})\".format(checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume))\n\n sequence_list = \"sequence_test.txt\"\n root_dir = 'E:/Datasets_PULSE/set_all/'\n seq_list = []\n end_indexes_test = []\n with open(sequence_list, 'r') as seq_list_file:\n for line in seq_list_file:\n seq_list.append(line.rstrip('\\n'))\n\n # seq_list = ['test_static']\n for s in seq_list:\n sequence_dir = os.path.join(root_dir, s)\n if sequence_dir[-2:len(sequence_dir)] == '_1':\n fr_list = glob.glob(sequence_dir[0:-2] + '/cropped/*.png')\n fr_list = fr_list[0:len(fr_list) // 2]\n elif sequence_dir[-2:len(sequence_dir)] == '_2':\n fr_list = glob.glob(sequence_dir[0:-2] + '/cropped/*.png')\n fr_list = fr_list[len(fr_list) // 2: len(fr_list)]\n else:\n if os.path.exists(sequence_dir + '/cropped/'):\n fr_list = glob.glob(sequence_dir + '/cropped/*.png')\n else:\n fr_list = glob.glob(sequence_dir + '/*.png')\n # print(fr_list)\n end_indexes_test.append(len(fr_list))\n\n end_indexes_test = [0, *end_indexes_test]\n # print(end_indexes_test)\n\n sampler_test = PulseSampler(end_indexes_test, seq_len, False)\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n pulse_test = PulseDataset(sequence_list, root_dir, seq_len=seq_len,\n length=len(sampler_test), transform=transforms.Compose([\n transforms.ToTensor(),\n normalize]))\n val_loader = torch.utils.data.DataLoader(pulse_test, batch_size=1, shuffle=False, sampler=sampler_test, pin_memory=True)\n\n model.eval()\n criterion = NegPearson()\n criterion2 = nn.MSELoss()\n\n\n criterion = criterion.cuda()\n\n outputs = []\n reference_ = []\n loss_avg = []\n loss_avg2 = []\n import time\n start = time.time()\n for k, (net_input, target) in enumerate(val_loader):\n net_input = net_input.cuda(non_blocking=True)\n # target = target.squeeze()\n target = target.cuda(non_blocking=True)\n\n with torch.no_grad():\n output = model(net_input)\n\n outputs.append(output[0])\n reference_.append(target[0])\n\n end = time.time()\n print(end-start, len(val_loader))\n\n outputs = torch.cat(outputs)\n\n outputs = (outputs - torch.mean(outputs)) / torch.std(outputs)\n # outputs = (outputs - torch.min(outputs)) / (torch.max(outputs) - torch.min(outputs))\n outputs = outputs.tolist()\n\n reference_ = torch.cat(reference_)\n # reference_ = (reference_ - torch.min(reference_)) / (torch.max(reference_) - torch.min(reference_))\n reference_ = (reference_-torch.mean(reference_))/torch.std(reference_)\n reference_ = reference_.tolist()\n\n fs = 30\n lowcut = 1\n highcut = 3\n import pandas as pd\n\n yr = butter_bandpass_filter(outputs, lowcut, highcut, fs, order=4)\n yr = (yr - np.mean(yr)) / np.std(yr)\n # plt.plot(yr, alpha=0.5, label='filtered')\n\n # restored=[]\n # for i in range(len(outputs)-1):\n # restored.append(outputs[i]+outputs[i+1])\n # # print(outputs)\n # import pandas as pd\n # ppg = pd.read_csv(root_dir+'01-01_orginal' + '.txt', sep='\\t')\n # ref_ppg = ppg.loc[:, 'waveform']\n # ref_ppg = resample(ref_ppg, len(fr_list))\n # ref_ppg = (ref_ppg - np.mean(ref_ppg)) /np.std(ref_ppg)\n # print(outputs)\n # print(reference_)\n # plt.subplot(121)\n\n plt.subplots_adjust(right=0.7)\n plt.plot(outputs, alpha=0.7, label='wyjście\\n sieci')\n # plt.plot(yr, label='wyjście\\n sieci po filtracji')\n plt.plot(reference_, '--', label='referencja\\n PPG')\n\n # plt.plot(ref_ppg)\n plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0, fontsize='large')\n # plt.legend(bbox_to_anchor=(0.5, -0.20), loc='upper center', borderaxespad=0, fontsize='large', ncol=3)\n # plt.title('model ze splotem trójwymiarowym'.format(i))\n # plt.plot(restored)\n plt.ylabel('Amplituda', fontsize='large', fontweight='semibold')\n plt.xlabel('Czas [próbka]', fontsize='large', fontweight='semibold')\n plt.grid()\n plt.xlim([350, 550])\n plt.ylim([-2, 3])\n\n plt.savefig('3d.svg', bbox_inches='tight')\n plt.show()\n reference_ = np.array(reference_)\n outputs = np.array(outputs)\n res = pd.DataFrame({'output': yr, 'ref': reference_})\n print(res)\n res.to_csv('splot_spa-temp.csv')\n\n # outputs = (outputs - np.min(outputs)) / (np.max(outputs) - np.min(outputs))\n\n bpm_ref = []\n bpm_out = []\n bmmp_filt = []\n bpm_out2 = []\n hrv_ref = []\n hrv_out = []\n\n win = 255\n for i in range(win, len(reference_), win):\n peaks, _ = find_peaks(reference_[i:i+win], distance=20, height=0.9)\n peaks_out, _ = find_peaks(yr[i:i + win], height=0.95)\n # plt.plot(outputs[i:i+win])\n # plt.plot(yr[i:i + win])\n # plt.plot(peaks_out, outputs[i:i+win][peaks_out], \"x\")\n # plt.show()\n # print(len(peaks_out), len(peaks))\n\n _, measures2 = hp.process(reference_[i:i+win], 30.0)\n bpm_ref.append(30/(win/len(peaks))*win)\n bmmp_filt.append(measures2['bpm'])\n # print(measures2)\n hrv_ref.append(measures2['rmssd'])\n # print(measures2['bpm'], 30/(256/len(peaks))*60, 30/(256/len(peaks_out))*60)\n # print(measures2)\n # _, mm = hp.process(yr[i:i+256], 30.0)\n _, mmm = hp.process(yr[i:i + win], 30.0)\n # print(mm)\n bpm_out.append(mmm['bpm'])\n bpm_out2.append(30/(win/len(peaks_out))*win)\n hrv_out.append(mmm['rmssd'])\n\n plt.plot(bpm_out, label='output')\n plt.plot(bpm_ref, label='referencja')\n plt.plot(bmmp_filt, label='ref2')\n plt.plot(bpm_out2, label='out2')\n\n plt.legend()\n plt.show()\n\n corr, _ = pearsonr(bmmp_filt, bpm_out)\n c = np.corrcoef(bmmp_filt, bpm_out)\n cc = np.corrcoef(bpm_ref, bpm_out2)\n ccc = np.corrcoef(bmmp_filt, bpm_out2)\n print('korelacja pulsu:', c, cc, ccc)\n\n # corr_ = np.correlate(reference_, outputs, 'full')\n # przes = np.argmax(corr_)\n # # cc = pearsonr(reference_[:len(outputs[przes:])], outputs[przes:])\n # # ccc = pearsonr(reference_, yr)\n # # print('korelacja sygnalow po przesunięciu:', cc, ccc)\n # plt.plot(reference_[:len(outputs[przes:])])\n # plt.plot(outputs[przes:])\n # plt.show()\n # corr2, _ = pearsonr(reference_[:len(outputs[przes:])], outputs[przes:])\n # cc = np.corrcoef(reference_[:len(outputs[przes:])], outputs[przes:])\n # print('korelacja sygnalow bez przesunięcia', corr2, cc)\n\n plt.subplots_adjust(right=0.7)\n time = np.arange(0, 3, 1 / fs)\n fourier_transform = np.fft.rfft(outputs)\n abs_fourier_transform = np.abs(fourier_transform)\n power_spectrum = np.square(abs_fourier_transform)\n frequency = np.linspace(0, fs / 2, len(power_spectrum))\n plt.semilogy(frequency, power_spectrum, label='wyjście\\n sieci')\n\n fourier_transform = np.fft.rfft(reference_)\n abs_fourier_transform = np.abs(fourier_transform)\n power_spectrum = np.square(abs_fourier_transform)\n plt.xlim(-0.1, 10)\n plt.ylim(10e-6, 10e6)\n plt.semilogy(frequency, power_spectrum, label='referencja\\n PPG')\n plt.ylabel('|A(f)|', fontsize='large', fontweight='semibold')\n plt.xlabel('Częstotliwość f [Hz]', fontsize='large', fontweight='semibold')\n plt.title('Częstitliwościowe widmo mocy')\n plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)\n plt.show()\n\n reference_ = torch.tensor(reference_)\n outputs = torch.tensor(outputs)\n # yr = torch.tensor(yr)\n pp = psnr(reference_, outputs)\n print('psnr', pp)\n\n criterionMSE = nn.MSELoss()\n criterionMAE = nn.L1Loss()\n mse = criterionMSE(reference_, outputs)\n rmse = torch.sqrt(mse)\n mae = criterionMAE(reference_, outputs)\n print(outputs.shape)\n se = torch.std(outputs-reference_)/np.sqrt(outputs.shape[0])\n print(mae, mse, rmse, se)\n print(hrv_out, hrv_ref)\n o = np.mean(hrv_out)\n r = np.mean(hrv_ref)\n err = abs(o-r)/r\n print(err)\n print()\n o = np.mean(bpm_out2)\n r = np.mean(bpm_ref)\n print(bpm_out2, bpm_ref)\n err = abs(o - r) / r\n print(err)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.std",
"torch.nn.L1Loss",
"torch.no_grad",
"torch.sqrt",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"torch.cat",
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"torch.nn.DataParallel",
"numpy.fft.rfft",
"numpy.mean",
"numpy.corrcoef",
"numpy.sqrt",
"torch.mean",
"scipy.stats.pearsonr",
"matplotlib.pyplot.semilogy",
"torch.load",
"torch.tensor",
"numpy.arange",
"matplotlib.pyplot.ylim",
"numpy.std",
"numpy.square",
"matplotlib.pyplot.legend",
"torch.nn.MSELoss",
"matplotlib.pyplot.grid",
"pandas.DataFrame",
"scipy.signal.find_peaks",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
will-hossack/Poptics | [
"4093876e158eb16421dfd4e57818210b11381429"
] | [
"examples/analysis/SpotDiagramExample.py"
] | [
"\"\"\"\n Example Programme to for a Spot Digram using high level SpotAnalysis class\n\n\n\"\"\"\n\nfrom poptics.lens import DataBaseLens\nfrom poptics.vector import Angle,Unit3d\nfrom poptics.wavelength import getDefaultWavelength\nfrom poptics.analysis import SpotAnalysis\nfrom poptics.tio import getFloat\nimport matplotlib.pyplot as plt\n\ndef main():\n\n # Get lens from database \n lens = DataBaseLens() \n \n # Get angle of beam and wavelnegth \n angle =getFloat(\"Angle in degrees\",0.0,0.0,15.0)\n u = Unit3d(Angle().setDegrees(angle)) # Angle as unit vectr\n wave = getFloat(\"Wavelength\",getDefaultWavelength())\n\n\n # Get optimal area psf and create a SpotDiagram \n sd = SpotAnalysis(lens,u,0,wavelength = wave)\n\n # Go round loop plotting the sopt diagram as various zplane positions\n\n while True:\n zp = getFloat(\"Delta\",0.0)\n \n sd.draw(zp, True)\n pos = sd.plane.getPoint().z \n print(\"Plane pos \" + str(pos))\n plt.title(\"Spot diagram\")\n plt.show(block = True)\n \n\nmain()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
]
] |
Iolaum/Gauss | [
"3b4873f7e21a0e5a0074646438e398f3c3ba3868"
] | [
"scripts/ali/ali01_testing/aa01_non_numerical.py"
] | [
"# coding: utf-8\n__author__ = 'Antonis'\n\n'''\nPython script for Kaggle Competition\n\nThis script focuses on reading the train dataset and getting information for non-numerical values.\nTo run the script you need to have the \"train.csv\" file inside the /dataset folder in the project root.\n'''\n\nimport pandas as pd\nimport numpy as np\nimport warnings\n\nwarnings.filterwarnings('ignore', 'numpy not_equal will not check object identity in the future')\n\n# Get the data from reading the training set\ndata = pd.read_csv('../dataset/train.csv', sep=',', na_values='.') # read csv file, seperated by ;, na values exists\n\n# Find and print the names of all non-numerical features.\nprint(\"Export the features with non-numerical data\")\n\nnon_numerical = []\n\n# data is of pandas's type Dataframe. It is a table that consists columns and rows.\nfor column in data:\n # column_series is of pandas type Series ( One-dimensional ndarray with axis labels)\n column_series = data[column]\n\n # dtype is property of a Series. It declares the data type of the values inside it.\n if column_series.dtype not in ['int64', 'float64']:\n first_item = column_series.iloc[0]\n # Detect NaN values(they are calculated as Objects with type \"float\")\n if type(first_item) is not float:\n non_numerical.append(column)\nprint(\"\")\nprint(non_numerical)\n\nnon_numerical_values = {}\n\nfor column in data:\n column_series = data[column]\n\n if column_series.dtype not in ['int64', 'float64']:\n first_item = column_series.iloc[0]\n\n if type(first_item) is not float:\n if column not in non_numerical_values:\n non_numerical_values[column] = np.unique(column_series.values)\n\nprint(\"\")\nfor item in non_numerical_values:\n print(\"\")\n print(item)\n print(non_numerical_values[item])\n print(\"\")\n# print(non_numerical_values)"
] | [
[
"pandas.read_csv",
"numpy.unique"
]
] |
SnorlaxSE/handtracking-Application | [
"5af03d9a1797040274ca2a306a7873cd48ea12af"
] | [
"detectVideo.py"
] | [
"from PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtCore import QTimer\nimport qdarkstyle\nimport sys\nimport time\n \nfrom utils import detector_utils as detector_utils\nimport tensorflow as tf\nimport datetime\nimport argparse\nimport numpy as np\nimport cv2\nimport os\nimport pdb \n\nfrom postProcess.new_cut_start_end_video import *\n\n \ndetection_graph, sess = detector_utils.load_inference_graph()\n\nclass VideoBox(QWidget):\n \n STATUS_INIT = 0\n STATUS_PLAYING = 1\n STATUS_PAUSE = 2\n\n def __init__(self, video_url=\"\", cutVideoDir=\"\", score_thresh=0.5, fps=25, crop=False, crop_rate=0.8):\n\n super(VideoBox, self).__init__()\n self.playCaptureState = False\n self.video_url = video_url\n self.status = self.STATUS_INIT # 0: INIT 1:PLAYING 2: PAUSE\n self.crop=crop # 如果视频中放下的手仍出现在视野中,crop=True\n self.crop_rate = crop_rate\n self.playCapture = cv2.VideoCapture()\n\n self.scores_list = []\n self.scoresList = [] # save scores_list like Container, prevent from PlayReset() 重置 scores_list\n self.num_frames = 0\n self.srcVideo = \"\"\n self.cutVideoDir = cutVideoDir\n self.score_thresh = score_thresh\n self.start_time = datetime.datetime.now()\n\n self.fps = fps\n # self.im_width, self.im_height = size[0], size[1]\n\n # max number of hands we want to detect/track\n self.num_hands_detect = 2\n\n desktop = QApplication.desktop()\n # print(\"屏幕宽:\" + str(desktop.width()))\n # print(\"屏幕高:\" + str(desktop.height()))\n \n # 窗口框\n win_height = desktop.height() / 1.6\n win_width = desktop.width() / 1.6\n self.resize(win_width, win_height) # width height\n self.setFixedSize(win_width, win_height)\n self.setWindowTitle(\"Hand Detection\")\n\n # 状态label\n self.stateTextEdit = QTextEdit(self)\n self.stateTextEdit.setText(\"Waiting for detectiong...\")\n self.stateTextEdit.setAlignment(Qt.AlignLeft)\n stateTextEdit_height = win_height / 1.1\n stateTextEdit_width = stateTextEdit_height / 2\n self.stateTextEdit.setFixedSize(stateTextEdit_width, stateTextEdit_height) # width height\n # self.stateTextEdit.move(50, 500)\n self.stateTextEdit.setFocusPolicy(QtCore.Qt.NoFocus)\n self.stateTextEdit.moveCursor(QTextCursor.End)\n\n # 帧label\n frameLabel_width = win_width - stateTextEdit_width - 50 \n frameLabel_height = win_height / 1.2\n self.frameLabel = QLabel(self)\n self.frameLabel.setFixedSize(frameLabel_width, frameLabel_height) # width height\n # self.frameLabel.move(10, 10)\n self.init_image = QPixmap(\"src/cat.jpeg\").scaled(self.frameLabel.width(), self.frameLabel.height())\n self.frameLabel.setPixmap(self.init_image)\n\n # 开启视频按键\n self.playButton = QPushButton(self)\n self.playButton.setText(\"Open\")\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n # self.playButton.move(100, 570)\n self.playButton.clicked.connect(self.slotStart)\n\n # 暂停视频按钮\n self.pauseButton = QPushButton(self)\n self.pauseButton.setText(\"Pause\")\n self.pauseButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n # self.pauseButton.move(300, 570)\n self.pauseButton.setEnabled(False)\n self.pauseButton.clicked.connect(self.slotPause)\n \n # 停止视频按钮\n self.stopButton = QPushButton(self)\n self.stopButton.setText(\"Stop\")\n self.stopButton.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))\n # self.stopButton.move(500, 570)\n self.stopButton.setEnabled(False)\n self.stopButton.clicked.connect(self.slotStop)\n\n # 裁剪按钮\n self.cutButton = QPushButton(self)\n self.cutButton.setText(\"Cut\")\n self.cutButton.setIcon(self.style().standardIcon(QStyle.SP_DialogCloseButton))\n # self.cutButton.move(700, 570)\n self.cutButton.setEnabled(False)\n self.cutButton.clicked.connect(self.slotCut)\n\n frameBox = QVBoxLayout()\n # frameBox.addStretch()\n frameBox.addWidget(self.frameLabel)\n # frameBox.addStretch()\n\n controlBox = QHBoxLayout()\n controlBox.addWidget(self.playButton)\n controlBox.addWidget(self.pauseButton)\n controlBox.addWidget(self.stopButton)\n controlBox.addWidget(self.cutButton)\n\n ViewBox = QVBoxLayout()\n ViewBox.addLayout(frameBox)\n ViewBox.addLayout(controlBox)\n\n layout = QHBoxLayout()\n layout.addLayout(ViewBox)\n layout.addWidget(self.stateTextEdit)\n\n self.setLayout(layout)\n\n # timer 设置\n self.timer = QTimer() #定义定时器\n\n\n def slotStart(self):\n \"\"\" \n Slot function to start the progamme\n \"\"\"\n if self.video_url == \"\":\n info = QMessageBox.information(self,'information', 'Choose a video to Play.', QMessageBox.Yes | QMessageBox.Yes)\n self.video_url, _ = QFileDialog.getOpenFileName(self, \"Open\", \"\", \"*.mp4;;*.MTS;;*.avi;;All Files(*)\")\n\n if not self.video_url == \"\" and os.path.isfile(self.video_url): # \"\"为用户点击取消(cancel)\n\n # Set score_thresh\n clothes_type, ok = QInputDialog.getText(self, 'Advanced', \"演示者穿'短袖' or '长袖':\")\n print(clothes_type, ok)\n if \"长\" in clothes_type:\n self.score_thresh = 0.2\n else:\n self.score_thresh = 0.4\n \n # Set crop\n \n crop_info, ok = QInputDialog.getText(self, 'Advanced', \"演示者手腕处是否始终出现在画面: 'True' or 'False'\")\n print(crop_info, ok)\n if \"true\" in crop_info.lower():\n self.crop = True\n\n # Set crop_rate\n def set_crop_frame():\n crop_rate, ok = QInputDialog.getText(self, 'Advanced', \"预估裁剪比例,0-1,如 '0.8':\")\n print(crop_rate, ok)\n if ok:\n\n try:\n crop_rate = float(crop_rate)\n\n if 0 >= crop_rate or crop_rate > 1:\n set_crop_frame()\n else:\n self.crop_rate = crop_rate\n\n except:\n set_crop_frame()\n\n \n set_crop_frame()\n else:\n self.crop = False\n\n # Reset Something\n self.stateTextEdit.setText(\"Waiting for detectiong...\")\n self.status = VideoBox.STATUS_INIT\n\n self.playCapture.open(self.video_url)\n self.fps = get_video_info(self.video_url)[0]\n # print(\"self.fps\", self.fps)\n self.timer.start(1000/self.fps) # 单位是毫秒,这点要注意,相当于时间每过xxx ms,timer的timeout()就会被触发一次\n self.timer.timeout.connect(self.showFrame)\n self.playCaptureState = True\n self.pauseButton.setEnabled(True)\n self.stopButton.setEnabled(True)\n\n # Reset Something (预防 \"pause\" 后 click \"open\")\n if self.scores_list != []:\n self.scoresList = self.scores_list\n self.scores_list = [] \n self.num_frames = 0\n self.start_time = datetime.datetime.now()\n self.cutButton.setEnabled(False)\n\n \n def slotPause(self):\n \"\"\"\n 点击\"Pause\" 触发的事件处理\n \"\"\"\n \n if self.status is VideoBox.STATUS_PAUSE or self.status is VideoBox.STATUS_INIT:\n # want to pause\n self.timer.stop()\n elif self.status is VideoBox.STATUS_PLAYING:\n # want to play\n self.timer.start(1000/self.fps)\n \n if not self.video_url == '': # 避免多次无效click 'open', 将self.video_url重置\n self.srcVideo = self.video_url # 避免self.video_url重置后,无法 cut\n\n self.video_url = \"\" # make sure 暂停状态 click 'open' 无异常\n\n self.status = (VideoBox.STATUS_PLAYING,\n VideoBox.STATUS_PAUSE,\n VideoBox.STATUS_PLAYING)[self.status]\n\n def slotStop(self):\n \"\"\"\n 点击\"Stop\" 触发的事件处理\n \"\"\"\n if self.playCaptureState:\n self.stateTextEdit.append(\"This video detection has been stopped.\")\n self.playReset()\n QMessageBox.information(self,'information',\"This video detection has been stopped.\", QMessageBox.Yes | QMessageBox.Yes)\n \n else:\n self.stateTextEdit.append(\"Please choose a video to Play.\")\n Warming = QMessageBox.about(self, \"About\", \"Please choose a video to show.\")\n\n\n def playReset(self):\n\n \"\"\"\n 仅 click 'stop' 、 play complete 调用\n \"\"\"\n \n self.timer.stop()\n self.playCapture.release()\n self.status = VideoBox.STATUS_INIT\n \n if self.video_url != '': # 避免多次无效click 'open', 将self.video_url重置\n self.srcVideo = self.video_url # 避免self.video_url重置后,无法 cut\n\n self.video_url = \"\" # make sure 暂停状态 click 'open' 无异常\n\n if self.scores_list != []:\n self.scoresList = self.scores_list\n\n self.scores_list = []\n self.playCaptureState = False\n self.pauseButton.setEnabled(False)\n self.stopButton.setEnabled(False)\n self.cutButton.setEnabled(True)\n\n\n def slotCut(self):\n \"\"\" \n 点击\"Cut\" 触发的事件处理\n\n only call when click 'stop' or play complete \n \"\"\"\n\n if self.scoresList == []:\n QMessageBox.information(self,'information',\"Detection Uncompleted.\", QMessageBox.Yes | QMessageBox.Yes)\n return\n\n if self.cutVideoDir == '':\n info = QMessageBox.information(self,'information',\"Choose Output Folder.\", QMessageBox.Yes | QMessageBox.Yes)\n self.cutVideoDir = QtWidgets.QFileDialog.getExistingDirectory(self, \"getExistingDirectory\", \"./\") \n if self.cutVideoDir == '':\n print(\"self.cutVideoDir == '': \", self.cutVideoDir == '')\n info = QMessageBox.information(self,'information',\"The Output Folder Unselected.\", QMessageBox.Yes | QMessageBox.Yes)\n return\n\n # print(\"self.cutVideoDir: \", self.cutVideoDir)\n video_name = os.path.basename(self.srcVideo)\n\n # self.cutVideoDir = os.path.join('outputs', video_name)\n\n if not os.path.exists(self.cutVideoDir):\n os.makedirs(self.cutVideoDir)\n outputDir = os.path.basename(self.cutVideoDir)\n \n # Advanced Setting\n # actionGap, ok = QInputDialog.getText(self, 'Advanced', '动作间隔(一般情况为1,单位为s):')\n # print(actionGap, ok)\n\n self.frameLabel.setPixmap(self.init_image)\n self.playButton.setText(\"Wait\")\n self.playButton.setEnabled(False)\n self.pauseButton.setText(\"for\")\n self.pauseButton.setEnabled(False)\n self.stopButton.setText(\"cutting\")\n self.stopButton.setEnabled(False)\n self.cutButton.setText(\"...\")\n self.cutButton.setEnabled(False)\n\n self.stateTextEdit.append(\"Now start to cut the video \\n'{}'\".format(video_name)) \n QMessageBox.information(self, \"information\", \"Now start to cut the video {}. And the Outputs would save at '{}' Folder.\".format(video_name, outputDir), QMessageBox.Yes)\n\n import platform\n if platform.system() == \"Windows\":\n os.system(f\"start {self.cutVideoDir}\") \n elif platform.system() == \"Linux\":\n os.system(f\"nautilus {self.cutVideoDir}\") \n elif platform.system() == \"Darwin\":\n os.system(f\"open {self.cutVideoDir}\") \n # print(platform.system())\n\n prediction_structure_list = []\n for frame_info_dict in self.scoresList:\n for (frame_value, frame_info_value_dict) in frame_info_dict.items():\n if frame_info_value_dict['scores'] != []:\n prediction_structure_list.append(frame_info_dict)\n continue\n\n frame_list = []\n for frame_info_dict in prediction_structure_list:\n for (frame_value, _) in frame_info_dict.items():\n frame_list.append(frame_value)\n # print(\"len(prediction_structure_list): \", len(prediction_structure_list))\n # print(\"len(frame_list): \", len(frame_list))\n\n fps, size, total_frames, rate, total_duration = get_video_info(self.srcVideo) # size: (width, height)\n\n # try:\n # actionGap = float(actionGap)\n # except:\n # actionGap = 1\n actionGap = 1\n normal_frame_gap_threshold = fps * actionGap # 正常动作间隔\n short_frame_gap_threshold = 0.5 * fps * actionGap # 较短动作间隔\n post_frame_gap_threshold = 0.6 * fps * actionGap\n post_action_frame_threshold = 3.5 * fps * actionGap\n \n start_frame_list, end_frame_list, frame_gap_list, duration_list = pick_predict_frame_sections(prediction_structure_list, frame_list, normal_frame_gap_threshold, short_frame_gap_threshold, total_frames, self.im_width, self.im_height, blackBorder=False)\n print('*'*10)\n adaptive_start_frame_list, adaptive_end_frame_list, adaptive_duration_list = adaptive_frame_sections(start_frame_list, end_frame_list, frame_gap_list, duration_list, normal_frame_gap_threshold, total_frames, fps)\n print('**'*10)\n post_adapt_start_frame_list, post_adapt_end_frame_list, post_adapt_duration_frame_list = post_adapt(prediction_structure_list, adaptive_start_frame_list, adaptive_end_frame_list, adaptive_duration_list, post_frame_gap_threshold, post_action_frame_threshold, fps)\n print('***'*10)\n cut_video(self.srcVideo, self.cutVideoDir, post_adapt_start_frame_list, post_adapt_end_frame_list, post_adapt_duration_frame_list, fps, self.stateTextEdit)\n\n self.playButton.setText(\"Open\")\n self.playButton.setEnabled(True)\n self.pauseButton.setText(\"Pause\")\n self.pauseButton.setEnabled(False)\n self.stopButton.setText(\"Stop\")\n self.stopButton.setEnabled(False)\n self.cutButton.setText(\"Cut\")\n self.cutButton.setEnabled(False)\n\n self.stateTextEdit.append(\"Cut Completed.\") \n QMessageBox.about(self, \"About\", \"Cut Completed.\")\n self.cutButton.setEnabled(False)\n\n pass\n\n def showFrame(self):\n \"\"\" \n Slot function to capture frame and process it\n \"\"\"\n\n if self.playCapture.isOpened():\n ret, frame = self.playCapture.read()\n if ret:\n\n # crop frame\n if self.crop:\n # print(\"frame: \", frame.shape)\n frame = frame[:int(frame.shape[0]*self.crop_rate),:,:] # (height, width, bytesPerComponent)\n # print(\"frame: \", frame.shape, type(frame))\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n boxes, scores = detector_utils.detect_objects(frame, detection_graph, sess)\n\n # Calculate Frames per second (FPS)\n self.num_frames += 1\n elapsed_time = (datetime.datetime.now() - self.start_time).total_seconds()\n # fps = self.num_frames / elapsed_time\n\n score_index = np.where(scores>=self.score_thresh)\n self.scores_list.append({self.num_frames:{'scores':list(scores[scores>=self.score_thresh]), 'boxes':list(boxes[score_index]) } })\n\n height, width, bytesPerComponent = frame.shape\n self.im_height, self.im_width = height, width # update crop size \n bytesPerLine = bytesPerComponent * width\n\n # print(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)\n q_image = QImage(frame.data, width, height, bytesPerLine,\n QImage.Format_RGB888).scaled(self.frameLabel.width(), self.frameLabel.height())\n\n # q_image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)\n self.frameLabel.setPixmap(QPixmap.fromImage(q_image))\n \n # print(\"frames processed: \", self.num_frames, \"elapsed time: \", elapsed_time, \" scores: \", scores[score_index]) # (100,)\n score = str(scores[score_index][:2])[1:-1]\n if score == '':\n score = \"0\"\n self.stateTextEdit.append(\"frame: {} scores: {}\".format( self.num_frames, score)) # (100,)\n self.stateTextEdit.moveCursor(QTextCursor.End)\n # pdb.set_trace()\n\n else:\n # 判断本地文件播放完毕\n self.stateTextEdit.append(\"Play Completed.\")\n QMessageBox.about(self,'About',\"Play Completed.\")\n self.playReset()\n\n return \n\n else:\n self.stateTextEdit.append(\"open file or capturing device error, try again.\")\n # Warming = QMessageBox.warning(self, \"Warming\", \"open file or capturing device error, try again.\", QMessageBox.Yes)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-sth', '--scorethreshold', dest='score_thresh', type=float, default=0.5, help='Score threshold for displaying bounding boxes')\n parser.add_argument('-src', '--source', dest='video_source', default=\"\", help='Device index of the camera.')\n parser.add_argument('-wd', '--width',dest='width', type=int, default=720, help='Width of the frames in the video stream.')\n parser.add_argument( '-ht', '--height', dest='height', type=int, default=540, help='Height of the frames in the video stream.')\n parser.add_argument('-ds', '--display', dest='display', type=int, default=1, help='Display the detected images using OpenCV. This reduces FPS')\n parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int, default=4, help='Number of workers.')\n parser.add_argument( '-q-size', '--queue-size', dest='queue_size', type=int, default=5, help='Size of the queue.')\n parser.add_argument('-crop', '--crop', type=bool, default=False, help='wether crop or not')\n # parser.add_argument('-fps', '--fps', dest='fps', type=int, default=1, help='Show FPS on detection/display visualization')\n args = parser.parse_args()\n\n app = QtWidgets.QApplication(sys.argv)\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n app.setWindowIcon(QIcon('./src/Gear.ico'))\n\n my = VideoBox(video_url=\"\", cutVideoDir=\"\", score_thresh=args.score_thresh, crop=args.crop)\n my.show()\n sys.exit(app.exec_())\n\n"
] | [
[
"numpy.where"
]
] |
FedericoGarza/sktime | [
"b21cdd81453abd34c72b42d4b2273b49d29eba30"
] | [
"sktime/performance_metrics/forecasting/probabilistic/_classes.py"
] | [
"#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\nimport numpy as np\n\n# TODO: add formal tests\nimport pandas as pd\nfrom sklearn.utils import check_array, check_consistent_length\n\nfrom sktime.datatypes import check_is_scitype, convert\nfrom sktime.performance_metrics.forecasting._classes import _BaseForecastingErrorMetric\n\n\nclass _BaseProbaForecastingErrorMetric(_BaseForecastingErrorMetric):\n \"\"\"Base class for probabilistic forecasting error metrics in sktime.\n\n Extends sktime's BaseMetric to the forecasting interface. Forecasting error\n metrics measure the error (loss) between forecasts and true values. Lower\n values are better.\n \"\"\"\n\n _tags = {\n \"scitype:y_pred\": \"pred_quantiles\",\n \"lower_is_better\": True,\n }\n\n def __init__(self, func=None, name=None, multioutput=\"uniform_average\"):\n self.multioutput = multioutput\n super().__init__(func, name=name)\n\n def __call__(self, y_true, y_pred, **kwargs):\n \"\"\"Calculate metric value using underlying metric function.\n\n Parameters\n ----------\n y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \\\n (fh, n_outputs) where fh is the forecasting horizon\n Ground truth (correct) target values.\n\n y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \\\n (fh, n_outputs) where fh is the forecasting horizon\n Forecasted values.\n\n Returns\n -------\n loss : float\n Calculated loss metric.\n \"\"\"\n return self.evaluate(y_true, y_pred, multioutput=self.multioutput, **kwargs)\n\n def evaluate(self, y_true, y_pred, multioutput=None, **kwargs):\n \"\"\"Evaluate the desired metric on given inputs.\n\n Parameters\n ----------\n y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \\\n (fh, n_outputs) where fh is the forecasting horizon\n Ground truth (correct) target values.\n\n y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \\\n (fh, n_outputs) where fh is the forecasting horizon\n Forecasted values.\n\n Returns\n -------\n loss : pd.DataFrame of shape (, n_outputs), calculated loss metric.\n \"\"\"\n # Input checks and conversions\n y_true_inner, y_pred_inner, multioutput = self._check_ys(\n y_true, y_pred, multioutput\n )\n # pass to inner function\n return self._evaluate(y_true_inner, y_pred_inner, multioutput, **kwargs)\n\n def _evaluate(self, y_true, y_pred, multioutput, **kwargs):\n # Default implementation relies on implementation of evaluate_by_index\n try:\n index_df = self._evaluate_by_index(y_true, y_pred, multioutput)\n return index_df.mean(axis=0)\n except RecursionError:\n RecursionError(\"Must implement one of _evaluate or _evaluate_by_index\")\n\n def evaluate_by_index(self, y_true, y_pred, multioutput=None, **kwargs):\n \"\"\"Return the metric evaluated at each time point.\n\n Parameters\n ----------\n y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \\\n (fh, n_outputs) where fh is the forecasting horizon\n Ground truth (correct) target values.\n\n y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \\\n (fh, n_outputs) where fh is the forecasting horizon\n Forecasted values.\n\n Returns\n -------\n loss : pd.DataFrame of shape (fh, n_outputs), calculated loss metric.\n \"\"\"\n # Input checks and conversions\n y_true_inner, y_pred_inner, multioutput = self._check_ys(\n y_true, y_pred, multioutput\n )\n # pass to inner function\n return self._evaluate_by_index(y_true_inner, y_pred_inner, multioutput)\n\n def _evaluate_by_index(self, y_true, y_pred, multioutput, **kwargs):\n \"\"\"Logic for finding the metric evaluated at each index.\n\n By default this uses _evaluate to find jackknifed pseudosamples. This\n estimates the error at each of the time points.\n \"\"\"\n n = y_true.shape[0]\n out_series = pd.Series(index=y_pred.index)\n try:\n x_bar = self.evaluate(y_true, y_pred, multioutput, **kwargs)\n for i in range(n):\n out_series[i] = n * x_bar - (n - 1) * self.evaluate(\n np.vstack((y_true[:i, :], y_true[i + 1 :, :])),\n np.vstack((y_pred[:i, :], y_pred[i + 1 :, :])),\n multioutput,\n )\n return out_series\n except RecursionError:\n RecursionError(\"Must implement one of _evaluate or _evaluate_by_index\")\n\n def _check_consistent_input(self, y_true, y_pred, multioutput):\n check_consistent_length(y_true, y_pred)\n\n y_true = check_array(y_true, ensure_2d=False)\n\n if not isinstance(y_pred, pd.DataFrame):\n ValueError(\"y_pred should be a dataframe.\")\n\n if not all(y_pred.dtypes == float):\n ValueError(\"Data should be numeric.\")\n\n if y_true.ndim == 1:\n y_true = y_true.reshape((-1, 1))\n\n n_outputs = y_true.shape[1]\n\n allowed_multioutput_str = (\"raw_values\", \"uniform_average\", \"variance_weighted\")\n if isinstance(multioutput, str):\n if multioutput not in allowed_multioutput_str:\n raise ValueError(\n \"Allowed 'multioutput' string values are {}. \"\n \"You provided multioutput={!r}\".format(\n allowed_multioutput_str, multioutput\n )\n )\n elif multioutput is not None:\n multioutput = check_array(multioutput, ensure_2d=False)\n if n_outputs == 1:\n raise ValueError(\"Custom weights are useful only in multi-output case.\")\n elif n_outputs != len(multioutput):\n raise ValueError(\n \"There must be equally many custom weights (%d) as outputs (%d).\"\n % (len(multioutput), n_outputs)\n )\n\n return y_true, y_pred, multioutput\n\n def _check_ys(self, y_true, y_pred, multioutput):\n if multioutput is None:\n multioutput = self.multioutput\n valid, msg, metadata = check_is_scitype(\n y_pred, scitype=\"Proba\", return_metadata=True, var_name=\"y_pred\"\n )\n\n if not valid:\n raise TypeError(msg)\n\n y_pred_mtype = metadata[\"mtype\"]\n inner_y_pred_mtype = self.get_tag(\"scitype:y_pred\")\n y_pred_inner = convert(\n y_pred,\n from_type=y_pred_mtype,\n to_type=inner_y_pred_mtype,\n as_scitype=\"Proba\",\n )\n\n y_true, y_pred, multioutput = self._check_consistent_input(\n y_true, y_pred, multioutput\n )\n\n return y_true, y_pred_inner, multioutput\n\n def _get_alpha_from(self, y_pred):\n \"\"\"Fetch the alphas present in y_pred.\"\"\"\n # Only needed for quantile metrics, could put in a quantile mixin?\n\n alphas = np.unique(list(y_pred.columns.get_level_values(1)))\n if not all(((alphas > 0) & (alphas < 1))):\n raise ValueError(\"Alpha must be between 0 and 1.\")\n\n return alphas\n\n def _handle_multioutput(self, loss, multioutput):\n if isinstance(multioutput, str):\n if multioutput == \"raw_values\":\n return loss\n elif multioutput == \"uniform_average\":\n # pass None as weights to np.average: uniform mean\n multioutput = None\n else:\n raise ValueError(\n \"multioutput is expected to be 'raw_values' \"\n \"or 'uniform_average' but we got %r\"\n \" instead.\" % multioutput\n )\n\n if loss.ndim > 1:\n out = np.average(loss, weights=multioutput, axis=1)\n else:\n out = np.average(loss, weights=multioutput)\n return out\n\n\nclass PinballLoss(_BaseProbaForecastingErrorMetric):\n \"\"\"Evaluate the pinball loss at all quantiles given in data.\n\n Parameters\n ----------\n multioutput : string \"uniform_average\" or \"raw_values\" determines how multioutput\n results will be treated\n \"\"\"\n\n _tags = {\n \"scitype:y_pred\": \"pred_quantiles\",\n \"lower_is_better\": True,\n }\n\n def __init__(self, multioutput=\"uniform_average\"):\n name = \"PinballLoss\"\n super().__init__(name=name, multioutput=multioutput)\n\n def _evaluate(self, y_true, y_pred, multioutput, **kwargs):\n alphas = self._get_alpha_from(y_pred)\n\n out = [None] * len(alphas)\n for i, alpha in enumerate(alphas):\n alpha_preds = y_pred.iloc[\n :, y_pred.columns.get_level_values(1) == alpha\n ].to_numpy()\n diff = y_true - alpha_preds\n sign = (diff >= 0).astype(diff.dtype)\n loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff\n\n loss = np.average(loss, axis=0)\n\n out[i] = self._handle_multioutput(loss, multioutput)\n\n out_df = pd.DataFrame([out], columns=alphas)\n return out_df\n\n def _evaluate_by_index(self, y_true, y_pred, multioutput, **kwargs):\n alphas = self._get_alpha_from(y_pred)\n\n n = len(y_true)\n out = np.full([n, len(alphas)], None)\n for i, alpha in enumerate(alphas):\n alpha_preds = y_pred.iloc[\n :, y_pred.columns.get_level_values(1) == alpha\n ].to_numpy()\n diff = y_true - alpha_preds\n sign = (diff >= 0).astype(diff.dtype)\n loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff\n\n out[:, i] = self._handle_multioutput(loss, multioutput)\n\n out_df = pd.DataFrame(out, index=y_pred.index, columns=alphas)\n return out_df\n\n @classmethod\n def get_test_params(self):\n \"\"\"Retrieve test parameters.\"\"\"\n return {}\n"
] | [
[
"numpy.vstack",
"pandas.Series",
"pandas.DataFrame",
"sklearn.utils.check_array",
"sklearn.utils.check_consistent_length",
"numpy.average"
]
] |
jennydaman/nighres | [
"9ced74e61db02261e4753a69b03f4479bfdc26b6"
] | [
"nighres/io/io_mesh.py"
] | [
"import nibabel as nb\nimport numpy as np\n\n# TODO: compare with Nilearn functions and possibly extend\n\ndef load_mesh(surf_mesh):\n '''\n Load a mesh into a dictionary with entries\n \"points\", \"faces\" and \"data\"\n\n Parameters\n ----------\n surf_mesh:\n Mesh to be loaded, can be a path to a file\n (currently supported formats are freesurfer geometry formats,\n gii and ASCII-coded vtk, ply or obj) or a dictionary with the\n keys \"points\", \"faces\" and (optionally) \"data\"\n\n Returns\n ----------\n dict\n Dictionary with a numpy array with key \"points\" for a Numpy array of\n the x-y-z coordinates of the mesh vertices and key \"faces\" for a\n Numpy array of the the indices (into points) of the mesh faces.\n Optional \"data\" key is a Numpy array of values sampled on the \"points\".\n\n Notes\n ----------\n Originally created as part of Laminar Python [1]_\n\n References\n -----------\n .. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical\n depth-resolved analysis of high-resolution brain imaging data in\n Python. DOI: 10.3897/rio.3.e12346\n '''\n \n if surf_mesh.endswith('vtk'):\n points, faces, data = _read_vtk(surf_mesh)\n return {'points': points, 'faces': faces, 'data': data}\n\n elif surf_mesh.endswith('gii'):\n points, faces, data = _read_gifti(surf_mesh)\n return {'points': points, 'faces': faces, 'data': data}\n\n else: \n geom = load_mesh_geometry(surf_mesh)\n return geom\n \n\ndef save_mesh(filename, surf_dict):\n '''\n Saves surface mesh to file\n \n Parameters\n ----------\n filename: str\n Full path and filename under which surfaces data should be saved. The\n extension determines the file format. Currently supported are\n freesurfer geometry formats, gii and ASCII-coded vtk, obj, ply. Note\n that only ASCII-coded vtk currently saves data, the others only save\n the geometry.\n surf_dict: dict\n Surface mesh geometry to be saved. Dictionary with a numpy array with\n key \"points\" for a Numpy array of the x-y-z coordinates of the mesh\n vertices and key \"faces\" for a Numpy array of the the indices\n (into points) of the mesh faces. Optional \"data\" key is a Numpy array \n of values sampled on the \"points\"\n \n Notes\n ----------\n Originally created as part of Laminar Python [1]_\n \n References\n -----------\n .. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical\n depth-resolved analysis of high-resolution brain imaging data in\n Python. DOI: 10.3897/rio.3.e12346\n '''\n if filename.endswith('vtk'):\n _write_vtk(filename, surf_dict['points'], surf_dict['faces'],\n surf_dict['data'])\n elif filename.endswith('gii'):\n _write_gifti(filename, surf_dict['points'], surf_dict['faces'],\n surf_dict['data'])\n else: \n save_mesh_geometry(filename, surf_dict)\n \n\ndef load_mesh_geometry(surf_mesh):\n '''\n Load a mesh geometry into a dictionary with entries\n \"points\" and \"faces\"\n\n Parameters\n ----------\n surf_mesh:\n Mesh geometry to be loaded, can be a path to a file\n (currently supported formats are freesurfer geometry formats,\n gii and ASCII-coded vtk, ply or obj) or a dictionary with the\n keys \"points\" and \"faces\"\n\n Returns\n ----------\n dict\n Dictionary with a numpy array with key \"points\" for a Numpy array of\n the x-y-z coordinates of the mesh vertices and key \"faces\" for a\n Numpy array of the the indices (into points) of the mesh faces\n\n Notes\n ----------\n Originally created as part of Laminar Python [1]_\n\n References\n -----------\n .. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical\n depth-resolved analysis of high-resolution brain imaging data in\n Python. DOI: 10.3897/rio.3.e12346\n '''\n # if input is a filename, try to load it with nibabel\n if isinstance(surf_mesh, str):\n if (surf_mesh.endswith('orig') or surf_mesh.endswith('pial') or\n surf_mesh.endswith('white') or surf_mesh.endswith('sphere') or\n surf_mesh.endswith('inflated')):\n points, faces = nb.freesurfer.io.read_geometry(surf_mesh)\n elif surf_mesh.endswith('gii'):\n points, faces, = _read_gifti(surf_mesh)\n elif surf_mesh.endswith('vtk'):\n points, faces, _ = _read_vtk(surf_mesh)\n elif surf_mesh.endswith('ply'):\n points, faces = _read_ply(surf_mesh)\n elif surf_mesh.endswith('obj'):\n points, faces = _read_obj(surf_mesh)\n else:\n raise ValueError('Currently supported file formats are freesurfer '\n 'geometry formats and gii, vtk, ply, obj')\n elif isinstance(surf_mesh, dict):\n if ('faces' in surf_mesh and 'points' in surf_mesh):\n points, faces = surf_mesh['points'], surf_mesh['faces']\n else:\n raise ValueError('If surf_mesh is given as a dictionary it '\n 'must contain items with keys \"points\" and '\n '\"faces\"')\n else:\n raise ValueError('Input surf_mesh must be a either filename or a '\n 'dictionary containing items with keys \"points\" '\n 'and \"faces\"')\n return {'points': points, 'faces': faces}\n\n\ndef load_mesh_data(surf_data, gii_darray=None):\n '''\n Loads mesh data into a Numpy array\n\n Parameters\n ----------\n surf_data:\n Mesh data to be loaded, can be a Numpy array or a path to a file.\n Currently supported formats are freesurfer data formats (mgz, curv,\n sulc, thickness, annot, label), nii, gii, ASCII-coded vtk and txt\n gii_darray: int, optional\n Index of gii data array to load (default is to load all)\n\n Returns\n ----------\n np.ndarray\n Numpy array containing the data\n\n Notes\n ----------\n Originally created as part of Laminar Python [1]_\n\n References\n -----------\n .. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical\n depth-resolved analysis of high-resolution brain imaging data in\n Python. DOI: 10.3897/rio.3.e12346\n '''\n # if the input is a filename, load it\n if isinstance(surf_data, str):\n if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or\n surf_data.endswith('mgz')):\n data = np.squeeze(nb.load(surf_data).get_data())\n elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or\n surf_data.endswith('thickness')):\n data = nb.freesurfer.io.read_morph_data(surf_data)\n elif surf_data.endswith('annot'):\n data = nb.freesurfer.io.read_annot(surf_data)[0]\n elif surf_data.endswith('label'):\n data = nb.freesurfer.io.read_label(surf_data)\n # check if this works with multiple indices (if dim(data)>1)\n elif surf_data.endswith('gii'):\n _, _, data = _read_gifti(surf_data)\n elif surf_data.endswith('vtk'):\n _, _, data = _read_vtk(surf_data)\n elif surf_data.endswith('txt'):\n data = np.loadtxt(surf_data)\n else:\n raise ValueError('Format of data file not recognized. Currently '\n 'supported formats are freesurfer data formats '\n '(mgz, sulc, curv, thickness, annot, label)'\n 'nii', 'gii, ASCII-coded vtk and txt')\n elif isinstance(surf_data, np.ndarray):\n data = np.squeeze(surf_data)\n return data\n\n\ndef save_mesh_data(filename, surf_data):\n '''\n Saves surface data that is a Numpy array to file\n\n Parameters\n ----------\n filename: str\n Full path and filename under which surfaces data should be saved. The\n extension determines the file format. Currently supported are\n freesurfer formats curv, thickness, sulc and ASCII-coded txt'\n surf_data: np.ndarray\n Surface data to be saved\n\n Notes\n ----------\n Originally created as part of Laminar Python [1]_\n\n References\n -----------\n .. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical\n depth-resolved analysis of high-resolution brain imaging data in\n Python. DOI: 10.3897/rio.3.e12346\n '''\n if isinstance(filename, str) and isinstance(surf_data, np.ndarray):\n if (filename.endswith('curv') or filename.endswith('thickness') or\n filename.endswith('sulc')):\n nb.freesurfer.io.write_morph_data(filename, surf_data)\n print(\"\\nSaving {0}\".format(filename))\n elif filename.endswith('txt'):\n np.savetxt(filename, surf_data)\n print(\"\\nSaving {0}\".format(filename))\n else:\n raise ValueError('File format not recognized. Currently supported '\n 'are freesurfer formats curv, sulc, thickness '\n 'and ASCII coded vtk and txt')\n else:\n raise ValueError('Filename must be a string')\n\n\ndef save_mesh_geometry(filename, surf_dict):\n '''\n Saves surface mesh geometry to file\n\n Parameters\n ----------\n filename: str\n Full path and filename under which surfaces data should be saved. The\n extension determines the file format. Currently supported are\n freesurfer geometry formats, gii and ASCII-coded vtk, obj, ply'\n surf_dict: dict\n Surface mesh geometry to be saved. Dictionary with a numpy array with\n key \"points\" for a Numpy array of the x-y-z coordinates of the mesh\n vertices and key \"faces2 for a Numpy array of the the indices\n (into points) of the mesh faces\n\n Notes\n ----------\n Originally created as part of Laminar Python [1]_\n\n References\n -----------\n .. [1] Huntenburg et al. (2017), Laminar Python: Tools for cortical\n depth-resolved analysis of high-resolution brain imaging data in\n Python. DOI: 10.3897/rio.3.e12346\n '''\n if isinstance(filename, str) and isinstance(surf_dict, dict):\n if (filename.endswith('orig') or filename.endswith('pial') or\n filename.endswith('white') or filename.endswith('sphere') or\n filename.endswith('inflated')):\n nb.freesurfer.io.write_geometry(filename, surf_dict['points'],\n surf_dict['faces'])\n print(\"\\nSaving {0}\".format(filename))\n elif filename.endswith('gii'):\n _write_gifti(filename, surf_dict['points'], surf_dict['faces'])\n print(\"\\nSaving {0}\".format(filename))\n elif filename.endswith('vtk'):\n if 'data' in surf_dict.keys():\n _write_vtk(filename, surf_dict['points'], surf_dict['faces'],\n surf_dict['data'])\n print(\"\\nSaving {0}\".format(filename))\n else:\n _write_vtk(filename, surf_dict['points'], surf_dict['faces'])\n print(\"\\nSaving {0}\".format(filename))\n elif filename.endswith('ply'):\n _write_ply(filename, surf_dict['points'], surf_dict['faces'])\n print(\"\\nSaving {0}\".format(filename))\n elif filename.endswith('obj'):\n _write_obj(filename, surf_dict['points'], surf_dict['faces'])\n print(\"\\nSaving {0}\".format(filename))\n print('To view mesh in brainview, run the command:\\n')\n print('average_objects ' + filename + ' ' + filename)\n else:\n raise ValueError('Filename must be a string and surf_dict must be a '\n 'dictionary with keys \"points\" and \"faces\"')\n\n\ndef _read_gifti(file):\n points = nb.gifti.read(file).get_arrays_from_intent(\n nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data\n faces = nb.gifti.read(file).get_arrays_from_intent(\n nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data\n \n narrays = len(nb.gifti.read(file).darrays)-2\n if narrays>0:\n data = np.zeros([points.shape[0], narrays])\n n=0;\n for darray in nb.gifti.read(file).darrays:\n if darray.intent is not nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'] \\\n and darray.intent is not nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE']:\n data[:,n] = darray.data\n n=n+1\n else:\n data = None\n\n return points, faces, data\n\n\n# function to read vtk files\n# ideally use pyvtk, but it didn't work for our data, look into why\ndef _read_vtk(file):\n '''\n Reads ASCII coded vtk files using pandas,\n returning vertices, faces and data as three numpy arrays.\n '''\n import pandas as pd\n import csv\n # read full file while dropping empty lines\n try:\n vtk_df = pd.read_csv(file, header=None, engine='python')\n except csv.Error:\n raise ValueError(\n 'This vtk file appears to be binary coded currently only ASCII '\n 'coded vtk files can be read')\n vtk_df = vtk_df.dropna()\n # extract number of vertices and faces\n number_vertices = int(vtk_df[vtk_df[0].str.contains(\n 'POINTS')][0].iloc[0].split()[1])\n number_faces = int(vtk_df[vtk_df[0].str.contains(\n 'POLYGONS')][0].iloc[0].split()[1])\n # read vertices into df and array\n start_vertices = (vtk_df[vtk_df[0].str.contains(\n 'POINTS')].index.tolist()[0]) + 1\n vertex_df = pd.read_csv(file, skiprows=range(start_vertices),\n nrows=number_vertices, sep='\\s*',\n header=None, engine='python')\n if np.array(vertex_df).shape[1] == 3:\n vertex_array = np.array(vertex_df)\n # sometimes the vtk format is weird with 9 indices per line,\n # then it has to be reshaped\n elif np.array(vertex_df).shape[1] == 9:\n vertex_df = pd.read_csv(file, skiprows=range(start_vertices),\n nrows=int(number_vertices / 3) + 1,\n sep='\\s*', header=None, engine='python')\n vertex_array = np.array(vertex_df.iloc[0:1, 0:3])\n vertex_array = np.append(vertex_array, vertex_df.iloc[0:1, 3:6],\n axis=0)\n vertex_array = np.append(vertex_array, vertex_df.iloc[0:1, 6:9],\n axis=0)\n for row in range(1, (int(number_vertices / 3) + 1)):\n for col in [0, 3, 6]:\n vertex_array = np.append(vertex_array, np.array(\n vertex_df.iloc[row:(row + 1), col:(col + 3)]), axis=0)\n # strip rows containing nans\n vertex_array = vertex_array[~np.isnan(vertex_array)].reshape(\n number_vertices, 3)\n else:\n print(\"vertex indices out of shape\")\n # read faces into df and array\n start_faces = (vtk_df[vtk_df[0].str.contains(\n 'POLYGONS')].index.tolist()[0]) + 1\n face_df = pd.read_csv(file, skiprows=range(start_faces),\n nrows=number_faces, sep='\\s*', header=None,\n engine='python')\n face_array = np.array(face_df.iloc[:, 1:4])\n # read data into df and array if exists\n if vtk_df[vtk_df[0].str.contains('POINT_DATA')].index.tolist() != []:\n start_data = (vtk_df[vtk_df[0].str.contains(\n 'POINT_DATA')].index.tolist()[0]) + 3\n number_data = number_vertices\n data_df = pd.read_csv(file, skiprows=range(start_data),\n nrows=number_data, sep='\\s*', header=None,\n engine='python')\n data_array = np.array(data_df)\n else:\n data_array = None\n\n return vertex_array, face_array, data_array\n\n\ndef _read_ply(file):\n import pandas as pd\n import csv\n # read full file and drop empty lines\n try:\n ply_df = pd.read_csv(file, header=None, engine='python')\n except csv.Error:\n raise ValueError(\n 'This ply file appears to be binary coded currently only '\n 'ASCII coded ply files can be read')\n ply_df = ply_df.dropna()\n # extract number of vertices and faces, and row that marks end of header\n number_vertices = int(ply_df[ply_df[0].str.contains(\n 'element vertex')][0].iloc[0].split()[2])\n number_faces = int(ply_df[ply_df[0].str.contains(\n 'element face')][0].iloc[0].split()[2])\n end_header = ply_df[ply_df[0].str.contains('end_header')].index.tolist()[0]\n # read vertex coordinates into dict\n vertex_df = pd.read_csv(file, skiprows=range(end_header + 1),\n nrows=number_vertices, sep='\\s*', header=None,\n engine='python')\n vertex_array = np.array(vertex_df)\n # read face indices into dict\n face_df = pd.read_csv(file,\n skiprows=range(end_header + number_vertices + 1),\n nrows=number_faces, sep='\\s*', header=None,\n engine='python')\n face_array = np.array(face_df.iloc[:, 1:4])\n\n return vertex_array, face_array\n\n\n# function to read MNI obj mesh format\ndef _read_obj(file):\n\n def chunks(l, n):\n \"\"\"Yield n-sized chunks from l\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)\n fp = open(file, 'r')\n n_vert = []\n n_poly = []\n k = 0\n Polys = []\n # Find number of vertices and number of polygons, stored in .obj file.\n # Then extract list of all vertices in polygons\n for i, line in enumerate(fp):\n if i == 0:\n # Number of vertices\n n_vert = int(line.split()[6])\n XYZ = np.zeros([n_vert, 3])\n elif i <= n_vert:\n XYZ[i - 1] = map(float, line.split())\n elif i > 2 * n_vert + 5:\n if not line.strip():\n k = 1\n elif k == 1:\n Polys.extend(line.split())\n Polys = map(int, Polys)\n npPolys = np.array(Polys)\n triangles = np.array(list(chunks(Polys, 3)))\n return XYZ, triangles\n\n\ndef _write_gifti(surf_mesh, points, faces, data=None):\n coord_array = nb.gifti.GiftiDataArray(data=points,\n intent=nb.nifti1.intent_codes[\n 'NIFTI_INTENT_POINTSET'])\n face_array = nb.gifti.GiftiDataArray(data=faces,\n intent=nb.nifti1.intent_codes[\n 'NIFTI_INTENT_TRIANGLE'])\n if data is not None:\n data_array = nb.gifti.GiftiDataArray(data=data,\n intent=nb.nifti1.intent_codes[\n 'NIFTI_INTENT_ESTIMATE'])\n gii = nb.gifti.GiftiImage(darrays=[coord_array, face_array, data_array])\n else:\n gii = nb.gifti.GiftiImage(darrays=[coord_array, face_array])\n \n nb.gifti.write(gii, surf_mesh)\n\n\ndef _write_obj(surf_mesh, points, faces):\n # write out MNI - obj format\n n_vert = len(points)\n XYZ = points.tolist()\n Tri = faces.tolist()\n with open(surf_mesh, 'w') as s:\n line1 = \"P 0.3 0.3 0.4 10 1 \" + str(n_vert) + \"\\n\"\n s.write(line1)\n k = -1\n for a in XYZ:\n k += 1\n cor = ' ' + ' '.join(map(str, XYZ[k]))\n s.write('%s\\n' % cor)\n s.write('\\n')\n for a in XYZ:\n s.write(' 0 0 0\\n')\n s.write('\\n')\n l = ' ' + str(len(Tri)) + '\\n'\n s.write(l)\n s.write(' 0 1 1 1 1\\n')\n s.write('\\n')\n nt = len(Tri) * 3\n Triangles = np.arange(3, nt + 1, 3)\n Rounded8 = np.shape(Triangles)[0] / 8\n N8 = 8 * Rounded8\n Triangles8 = Triangles[0:N8]\n RowsOf8 = np.split(Triangles8, N8 / 8)\n for r in RowsOf8:\n L = r.tolist()\n Lint = map(int, L)\n Line = ' ' + ' '.join(map(str, Lint))\n s.write('%s\\n' % Line)\n L = Triangles[N8:].tolist()\n Lint = map(int, L)\n Line = ' ' + ' '.join(map(str, Lint))\n s.write('%s\\n' % Line)\n s.write('\\n')\n ListOfTriangles = np.array(Tri).flatten()\n Rounded8 = np.shape(ListOfTriangles)[0] / 8\n N8 = 8 * Rounded8\n Triangles8 = ListOfTriangles[0:N8]\n ListTri8 = ListOfTriangles[0:N8]\n RowsOf8 = np.split(Triangles8, N8 / 8)\n for r in RowsOf8:\n L = r.tolist()\n Lint = map(int, L)\n Line = ' ' + ' '.join(map(str, Lint))\n s.write('%s\\n' % Line)\n L = ListOfTriangles[N8:].tolist()\n Lint = map(int, L)\n Line = ' ' + ' '.join(map(str, Lint))\n s.write('%s\\n' % Line)\n\n\ndef _write_vtk(filename, vertices, faces, data=None, comment=None):\n '''\n Creates ASCII coded vtk file from numpy arrays using pandas.\n Inputs:\n -------\n (mandatory)\n * filename: str, path to location where vtk file should be stored\n * vertices: numpy array with vertex coordinates, shape (n_vertices, 3)\n * faces: numpy array with face specifications, shape (n_faces, 3)\n (optional)\n * data: numpy array with data points, shape (n_vertices, n_datapoints)\n NOTE: n_datapoints can be =1 but cannot be skipped (n_vertices,)\n * comment: str, is written into the comment section of the vtk file\n Usage:\n ---------------------\n _write_vtk('/path/to/vtk/file.vtk', v_array, f_array)\n '''\n\n import pandas as pd\n # infer number of vertices and faces\n number_vertices = vertices.shape[0]\n number_faces = faces.shape[0]\n if data is not None:\n number_data = data.shape[0]\n # make header and subheader dataframe\n header = ['# vtk DataFile Version 3.0',\n '%s' % comment,\n 'ASCII',\n 'DATASET POLYDATA',\n 'POINTS %i float' % number_vertices\n ]\n header_df = pd.DataFrame(header)\n sub_header = ['POLYGONS %i %i' % (number_faces, 4 * number_faces)]\n sub_header_df = pd.DataFrame(sub_header)\n # make dataframe from vertices\n vertex_df = pd.DataFrame(vertices)\n # make dataframe from faces, appending first row of 3's\n # (indicating the polygons are triangles)\n triangles = np.reshape(3 * (np.ones(number_faces)), (number_faces, 1))\n triangles = triangles.astype(int)\n faces = faces.astype(int)\n faces_df = pd.DataFrame(np.concatenate((triangles, faces), axis=1))\n # write dfs to csv\n header_df.to_csv(filename, header=None, index=False)\n with open(filename, 'a') as f:\n vertex_df.to_csv(f, header=False, index=False, float_format='%.3f',\n sep=' ')\n with open(filename, 'a') as f:\n sub_header_df.to_csv(f, header=False, index=False)\n with open(filename, 'a') as f:\n faces_df.to_csv(f, header=False, index=False, float_format='%.0f',\n sep=' ')\n # if there is data append second subheader and data\n if data is not None:\n if len(data.shape)>1:\n datapoints = data.shape[1]\n sub_header2 = ['POINT_DATA %i' % (number_data),\n 'SCALARS Scalars float %i' % (datapoints),\n 'LOOKUP_TABLE default']\n else:\n datapoints = 1\n sub_header2 = ['POINT_DATA %i' % (number_data),\n 'SCALARS Scalars float',\n 'LOOKUP_TABLE default']\n sub_header_df2 = pd.DataFrame(sub_header2)\n data_df = pd.DataFrame(data)\n with open(filename, 'a') as f:\n sub_header_df2.to_csv(f, header=False, index=False)\n with open(filename, 'a') as f:\n data_df.to_csv(f, header=False, index=False, float_format='%.16f',\n sep=' ')\n\n\ndef _write_ply(filename, vertices, faces, comment=None):\n import pandas as pd\n print(\"writing ply format\")\n # infer number of vertices and faces\n number_vertices = vertices.shape[0]\n number_faces = faces.shape[0]\n # make header dataframe\n header = ['ply',\n 'format ascii 1.0',\n 'comment %s' % comment,\n 'element vertex %i' % number_vertices,\n 'property float x',\n 'property float y',\n 'property float z',\n 'element face %i' % number_faces,\n 'property list uchar int vertex_indices',\n 'end_header'\n ]\n header_df = pd.DataFrame(header)\n # make dataframe from vertices\n vertex_df = pd.DataFrame(vertices)\n # make dataframe from faces, adding first row of 3s (indicating triangles)\n triangles = np.reshape(3 * (np.ones(number_faces)), (number_faces, 1))\n triangles = triangles.astype(int)\n faces = faces.astype(int)\n faces_df = pd.DataFrame(np.concatenate((triangles, faces), axis=1))\n # write dfs to csv\n header_df.to_csv(filename, header=None, index=False)\n with open(filename, 'a') as f:\n vertex_df.to_csv(f, header=False, index=False,\n float_format='%.3f', sep=' ')\n with open(filename, 'a') as f:\n faces_df.to_csv(f, header=False, index=False,\n float_format='%.0f', sep=' ')\n"
] | [
[
"numpy.ones",
"numpy.append",
"numpy.zeros",
"numpy.squeeze",
"pandas.read_csv",
"numpy.savetxt",
"pandas.DataFrame",
"numpy.arange",
"numpy.shape",
"numpy.isnan",
"numpy.array",
"numpy.concatenate",
"numpy.loadtxt",
"numpy.split"
]
] |
thanhan/active-hcomp15 | [
"7c82a3fc013e3d677073acfbabcffbed72d546d9"
] | [
"util.py"
] | [
"import xml.etree.ElementTree as ET\nimport numpy as np\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import confusion_matrix\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\n\nimport scipy\n\nimport csv\n\ndef get_text(a):\n try:\n return a.text\n except AttributeError:\n return ''\n \ndef get_relevant():\n f = open('data/proton-beam-relevant.txt')\n res = np.zeros(4751)\n for line in f:\n x = int(line)\n res[x-1] = 1\n f.close()\n return res\n \ndef get_pub_dic_xml(file_name = 'data/proton-beam-all.xml'):\n tree = ET.parse(file_name)\n root = tree.getroot()[0]\n\n # Create dic of : id -> feature text\n pub_dic = {}\n for pub in root:\n rec_number = int (get_text (pub.find('rec-number')))\n abstract = get_text (pub.find('abstract'))\n title = get_text (pub.find('titles')[0])\n text = title + abstract\n for kw in pub.find('keywords'):\n text = text + kw.text + ' '\n pub_dic[rec_number] = text\n \n return pub_dic\n \n \n\n\ndef get_pub_dic_csv(dataset):\n filename = \"data/\" + dataset + \"-text.csv\"\n f = open(filename)\n f.readline()\n csv_reader = csv.reader(f)\n \n # Create dic of : id -> feature text\n pub_dic = {}\n \n for row in csv_reader: \n (abstract_id, title, publisher, abstract) = tuple(row)[0:4]\n abstract_id = int(abstract_id)\n text = title + abstract\n \n pub_dic[abstract_id] = text\n \n return pub_dic\n \n \ndef get_turk_data(dataset):\n filename = \"data/\" + dataset + \"-turk.csv\"\n f = open(filename)\n first_line = f.readline()\n csv_reader = csv.reader(f)\n \n turk_dic = {}\n rel_dic = {}\n for row in csv_reader:\n #print len(row)\n if dataset == 'omega3':\n (AssignmentId, WorkerId, HITId, AcceptTime, SubmitTime, ApprovalTime, TimeToComplete, PMID, AbstractId, Question2, Question3, Question4, Relevant, Honeypot) = tuple(row)\n else:\n (AssignmentId, WorkerId, HITId, AcceptTime, SubmitTime, ApprovalTime, TimeToComplete, PMID, AbstractId, Question1, Question2, Question3, Question4, Relevant, Honeypot) = tuple(row)\n AbstractId = int(AbstractId)\n if AbstractId not in turk_dic: turk_dic[AbstractId] = []\n turk_dic[AbstractId].append( (Question3, Question4) )\n rel_dic[AbstractId] = Relevant\n \n return (turk_dic, rel_dic)\n\n \nmat = None\nrel = None\nturk_dic = None\n \ndef main(dataset = 'proton-beam'):\n global mat, rel, turk_dic\n \n if dataset == 'proton-beam':\n pub_dic = get_pub_dic_xml() \n # pub_dic_items are already sorted by key\n [rec_nums, texts] = zip(*pub_dic.items())\n rel = get_relevant()\n else:\n pub_dic = get_pub_dic_csv(dataset)\n #[rec_nums, texts] = zip(*pub_dic.items())\n (turk_dic, rel_dic) = get_turk_data(dataset)\n texts = []\n for i in pub_dic.keys():\n if pub_dic.has_key(i) and turk_dic.has_key(i) and rel_dic.has_key(i):\n texts.append(pub_dic[i])\n else:\n if pub_dic.has_key(i): pub_dic.pop(i)\n if turk_dic.has_key(i): turk_dic.pop(i)\n if rel_dic.has_key(i): rel_dic.pop(i)\n \n (_,rel) = zip(*rel_dic.items())\n rel = map(int, rel)\n \n vectorizer = TfidfVectorizer()\n #save_texts = texts\n mat = vectorizer.fit_transform(texts)\n return (pub_dic, texts)\n\n\ndef classify(n = 50):\n #clf = MultinomialNB(fit_prior=False)\n #clf = SVC(gamma=2, C=1, class_weight = {0.0:0.063829777, 1.0:1.0})\n clf = SGDClassifier(loss=\"log\", penalty=\"l1\", class_weight = {0.0:0.022, 1.0:1.0})\n\n clf.fit(mat[:n], rel[:n])\n return clf\n\n \ndef confu_mat(rel, turk_rel):\n m = [[0,0],[0,0]]\n for i in range(len(rel)):\n m[rel[i]][turk_rel[i]] += 1\n return m\n \ndef plot_pr(gold, predicted_prob, lb):\n pp1 = predicted_prob[:,1] # prob for class 1\n p, r, th = precision_recall_curve(gold, pp1)\n ap = average_precision_score(gold, pp1)\n plt.plot(r, p, label= lb + ' (area = {0:0.2f})'\n ''.format(ap))\n\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Precision and Recall')\n plt.legend(loc=\"upper right\")\n #plt.show()\n \ndef eval_clf(gold, clf, mat, start = 0):\n pp = clf.predict_proba(mat[start:,:])\n pp1 = pp[:,1]\n ap = average_precision_score(gold[start:], pp1)\n return ap\n\ndef train_and_plot(ex = [50,100,200]):\n \"\"\"\n train the classifier with ex[i] examples\n Plot\n \"\"\"\n \n for num in ex:\n clf = classify(num)\n pp = clf.predict_proba(mat)\n plot_pr(rel[2000:], pp[2000:], str(num))\n \n \ndef get_balance_data(mat, rel):\n mat_1 = mat[ np.nonzero(rel == 1)[0] ]\n mat_0 = mat[ np.nonzero(rel == 0)[0] ]\n \n #print mat_1.shape, mat_0.shape\n\n n = min(mat_1.shape[0], mat_0.shape[0])\n \n #shuffle mat_0\n index = np.arange( mat_0.shape[0] )\n np.random.shuffle(index)\n mat_0 = mat_0[index]\n \n #print mat_0.shape\n \n new_mat = scipy.sparse.vstack([mat_1[:n], mat_0[:n]], 'csr')\n new_rel = np.hstack([np.ones((n,)), np.zeros((n,))] )\n \n #print new_mat, new_rel.shape\n \n #shuffle new mat and rel\n index = np.arange(new_mat.shape[0])\n np.random.shuffle(index)\n \n new_mat = new_mat[index]\n new_rel = new_rel[index]\n \n return (new_mat, new_rel)\n \n\n #s = [0, 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16,17,18,19,20, 37, 44, 68, 71, 118, 141, 162,183, 189, 248, 249, 255, 267, 268, 324]\n \n #\n #from sklearn.cross_validation import KFold\n #kf = KFold(n, n_folds=10)\n #acc_list = []\n #for train, test in kf:\n # clf.fit(mat[train], rel[train])\n # predicted = clf.predict(mat[test])\n # acc = sum(predicted == rel[test]) * 1.0 / len(rel[test])\n # acc_list.append(acc)\n \n #print 'average accuracy: ', np.average(acc_list)\n\n #for i in range(20, 1000, 20):\n # clf.fit(mat[0:i], rel[0:i])\n # predicted = clf.predict(mat[1000:])\n # acc = sum(predicted == rel[1000:]) * 1.0 / len(rel[1000:])\n # print i, acc\n #from sklearn.svm import SVC\n\n #clf = SVC()\n\n #clf.fit(mat, rel)\n\n \n"
] | [
[
"numpy.ones",
"matplotlib.pyplot.legend",
"numpy.random.shuffle",
"numpy.zeros",
"sklearn.linear_model.SGDClassifier",
"sklearn.feature_extraction.text.TfidfVectorizer",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.average_precision_score",
"numpy.nonzero",
"matplotlib.pyplot.xlabel",
"scipy.sparse.vstack"
]
] |
Rodrigo-Tenorio/nessai | [
"2b4175da61b3a7250d1154a126ad93481836df0d"
] | [
"tests/test_reparameterisations/test_rescale.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nTest the Rescale class.\n\"\"\"\nimport numpy as np\nimport pytest\nfrom unittest.mock import create_autospec\n\nfrom nessai.livepoint import numpy_array_to_live_points\nfrom nessai.reparameterisations import Rescale\n\n\[email protected]()\ndef reparam():\n return create_autospec(Rescale)\n\n\[email protected]('scale', [2, 2.0, [1, 2], {'x': 1, 'y': 2}])\ndef test_init(scale):\n \"\"\"Test the init method with different input types\"\"\"\n parameters = ['x', 'y']\n prior_bounds = {'x': [-1, 1], 'y': [-1, 1]}\n\n reparam = \\\n Rescale(parameters=parameters, scale=scale, prior_bounds=prior_bounds)\n\n assert not set(reparam.scale.keys()) - set(parameters)\n assert isinstance(reparam.scale['x'], float)\n\n\[email protected]('n', [1, 2])\ndef test_reparameterise(reparam, n):\n \"\"\"Test the reparameterise method\"\"\"\n reparam.parameters = ['x', 'y']\n reparam.prime_parameters = ['x_prime', 'y_prime']\n reparam.scale = {'x': -2.0, 'y': 4.0}\n x = numpy_array_to_live_points(np.ones((n, 2)), reparam.parameters)\n x_prime = numpy_array_to_live_points(\n np.zeros((n, 2)), reparam.prime_parameters)\n log_j = np.zeros(n)\n\n x_out, x_prime_out, log_j_out = \\\n Rescale.reparameterise(reparam, x, x_prime, log_j)\n\n assert np.array_equal(x, x_out)\n assert np.array_equal(log_j_out, -np.log(8 * np.ones(n)))\n assert (x_prime_out['x_prime'] == -0.5).all()\n assert (x_prime_out['y_prime'] == 0.25).all()\n\n\[email protected]('scale', [1e60, 1e-60])\ndef test_reparameterise_overflow(reparam, scale):\n \"\"\"Test the reparameterise method with very small and large scales.\n\n Checks precision to 14 decimal places.\n \"\"\"\n reparam.parameters = ['x']\n reparam.prime_parameters = ['x_prime']\n reparam.scale = {'x': scale}\n x_array = np.arange(100.0, dtype=float)\n x = numpy_array_to_live_points(scale * x_array[:, np.newaxis],\n reparam.parameters)\n x_prime = numpy_array_to_live_points(np.ones((x_array.size, 1)),\n reparam.prime_parameters)\n log_j = np.zeros(x.size)\n\n x_out, x_prime_out, log_j_out = \\\n Rescale.reparameterise(reparam, x, x_prime, log_j)\n\n np.testing.assert_array_almost_equal(x_array, x_prime_out['x_prime'],\n decimal=14)\n assert (log_j == -np.log(scale)).all()\n\n\[email protected]('n', [1, 2])\ndef test_inverse_reparameterise(reparam, n):\n \"\"\"Test the inverse reparameterise method\"\"\"\n reparam.parameters = ['x', 'y']\n reparam.prime_parameters = ['x_prime', 'y_prime']\n reparam.scale = {'x': -2.0, 'y': 4.0}\n x = numpy_array_to_live_points(np.zeros((n, 2)), reparam.parameters)\n x_prime = numpy_array_to_live_points(\n np.ones((n, 2)), reparam.prime_parameters)\n x_prime['x_prime'] *= -1\n log_j = np.zeros(n)\n\n x_out, x_prime_out, log_j_out = \\\n Rescale.inverse_reparameterise(reparam, x, x_prime, log_j)\n\n assert np.array_equal(x_prime, x_prime_out)\n assert np.array_equal(log_j_out, np.log(8 * np.ones(n)))\n assert (x_out['x'] == 2.0).all()\n assert (x_out['y'] == 4.0).all()\n\n\[email protected]('scale', [1e60, 1e-60])\ndef test_inverse_reparameterise_overflow(reparam, scale):\n \"\"\"Test the inverse_reparameterise method with very small and large scales.\n \"\"\"\n reparam.parameters = ['x']\n reparam.prime_parameters = ['x_prime']\n reparam.scale = {'x': scale}\n x_array = np.arange(100.0, dtype=float)\n x = numpy_array_to_live_points(np.ones((x_array.size, 1)),\n reparam.parameters)\n x_prime = numpy_array_to_live_points(x_array[:, np.newaxis],\n reparam.prime_parameters)\n log_j = np.zeros(x.size)\n\n x_out, x_prime_out, log_j_out = \\\n Rescale.inverse_reparameterise(reparam, x, x_prime, log_j)\n\n np.testing.assert_array_equal(x_array * scale, x_out['x'])\n assert (log_j == np.log(scale)).all()\n\n\ndef test_init_no_scale():\n \"\"\"Make sure an error is raised if the scale is not given\"\"\"\n with pytest.raises(RuntimeError) as excinfo:\n Rescale(scale=None)\n assert 'Must specify a scale!' in str(excinfo.value)\n\n\[email protected]('scale', [[1], [1, 2, 3]])\ndef test_init_incorrect_scale_list(scale):\n \"\"\"Make sure an error is raised if the scale is the incorrect length\"\"\"\n parameters = ['x', 'y']\n prior_bounds = {'x': [-1, 1], 'y': [-1, 1]}\n\n with pytest.raises(RuntimeError) as excinfo:\n Rescale(parameters=parameters, scale=scale, prior_bounds=prior_bounds)\n\n assert 'different length' in str(excinfo.value)\n\n\[email protected]('scale', [{'x': 1}, {'x': 1, 'y': 1, 'z': 1}])\ndef test_init_incorrect_scale_dict(scale):\n \"\"\"Make sure an error is raised if the scale keys to not match the \\\n parameters.\n \"\"\"\n parameters = ['x', 'y']\n prior_bounds = {'x': [-1, 1], 'y': [-1, 1]}\n\n with pytest.raises(RuntimeError) as excinfo:\n Rescale(parameters=parameters, scale=scale, prior_bounds=prior_bounds)\n\n assert 'Mismatched parameters' in str(excinfo.value)\n\n\ndef test_init_incorrect_scale_type():\n \"\"\"Make sure an error is raised if the scale is the incorrect type\"\"\"\n parameters = ['x', 'y']\n prior_bounds = {'x': [-1, 1], 'y': [-1, 1]}\n\n with pytest.raises(TypeError) as excinfo:\n Rescale(parameters=parameters, scale='1', prior_bounds=prior_bounds)\n\n assert 'Scale input must be' in str(excinfo.value)\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"numpy.log",
"numpy.array_equal"
]
] |
daniosro/DMSP | [
"c5ad5b200c2eece611ab6b326a9a93cc3fd421d1"
] | [
"code/figures/enz_deg/modelling/d34s_DMSP_enz_deg.py"
] | [
"# %% \nimport git\nimport pandas as pd\nimport numpy as np\n\n# Find home directory for repo\nrepo = git.Repo(\"./\", search_parent_directories=True)\nhomedir = repo.working_dir\n\n#Import plotting features\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.ticker import StrMethodFormatter\nfrom matplotlib.ticker import FormatStrFormatter\nimport seaborn as sns\n\n# Set plot style\nsns.set_style(\"ticks\")\nsns.set_context(\"paper\")\n\n # %% \n#Load modelling results\ndf_DMSP = pd.read_csv (f'{homedir}/data/modelling/DMSP_enz_deg.csv')\ndf_DMSP.head()\n#Load DddP data\n# Import table with the raw data for DddP\ndf_dddp = pd.read_csv(f'{homedir}/data/processed/enzymes/dddp_master.csv')\ndf_dddp.head()\n# %% \n#Define slopes (fractionation factors) to apper in the plot\n#Without enzyme degradation\nslope_0 = -4.04\n#With enzyme degradation\nslope_k = -3.98\n\n#Create figure\n\nfig = plt.figure(figsize=(6.1, 4), dpi=192)\n\nax1 = plt.subplot(221)\nax2 = plt.subplot(223)\nax3 = plt.subplot(122)\n\n#Assing plots to axes\nax1.plot(df_DMSP['-ln_fr_no_enz_deg'], df_DMSP['d34s_no_enz_deg'], color ='#0072B2')\nax2.plot(df_DMSP['-ln_fr_enz_deg'], df_DMSP['d34s_enz_deg'], color='#D55E00')\nax3.plot(df_DMSP['Time'], df_DMSP['d34s_no_enz_deg'], \nlabel ='$\\delta^{34}{S}$ without deg', color ='#0072B2')\nax3.plot(df_DMSP['Time'], df_DMSP['d34s_enz_deg'], \nlabel='$\\delta^{34}{S}$ with deg', color ='#D55E00')\n \n#Add axes labels and legends to the first plot\nax1.set_ylabel ('$\\delta ^{34}$S DMSP$_{VCDT}$ (‰)')\nax1.set_xlabel ('-ln $f_{R}$')\nax1.set_yticks(np.linspace(14,28,5))\n\nax1.set_title('Fractionation of S isotopes in \\n DMSP without enz. degradation')\nax1.set_xticks(range(0,6))\nax1.set_xlim(0,5)\nax1.set_yticks(range(14,35,4))\nax1.set_ylim(14,34)\n#Add epsilon\nax1.text(1, 0, '$^{34}$$\\epsilon$ = %s ‰' %(-slope_0), transform=ax1.transAxes,\n verticalalignment='bottom', horizontalalignment='right', fontsize=10)\n\n#Add axes labels and legends to the second plot\nax2.set_ylabel ('$\\delta ^{34}$S DMSP$_{VCDT}$ (‰)')\nax2.set_xlabel ('-ln $f_{R}$')\nax2.set_ylim (14,19)\nax2.set_title('Fractionation of S isotopes in \\n DMSP with enz. degradation')\nax2.set_xticks([0,0.2,0.4,0.6,0.8,1,1.2])\nax2.set_xlim(0,1.2)\nax2.set_yticks(range(14,20,1))\nax2.set_ylim(14,19)\n#Add epsilon\nax2.text(1, 0, '$^{34}$$\\epsilon$ = %s ‰' %(-slope_k), transform=ax2.transAxes,\n verticalalignment='bottom', horizontalalignment='right', fontsize=10)\n\n#Add axes labels and legends\nax3.set_xlabel ('Time (min)')\nax3.set_ylabel ('$\\delta ^{34}$S DMSP$_{VCDT}$ (‰)')\nax3.set_xlim (0,60)\nax3.set_yticks(range(14,35,4))\nax3.set_ylim (14,34)\nax3.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n\n#Plot the DddP data on top\n# Group by replicate\ndf_group = df_dddp.groupby(['Replicate'])\n# Define colors\n\n# Loop through replicates\nfor i, (group, data) in enumerate(df_group):\n\n#Plot experimental data\n ax3.scatter(data.Time_min, data.d34S_approx_DMSP, color = '#864410')\n\n#Show legend\nax3.legend()\n#Adjust figure dimensions and save figure\nplt.tight_layout(pad=0.4, w_pad=1, h_pad=1)\n\n# %%\n#Save figure\nfig.savefig(f'{homedir}/figures/enz_deg/modelling/fit_d34s_enz_deg.pdf', \nbbox_inches='tight')\n# %%\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.subplot",
"numpy.linspace"
]
] |
lukaszplk/bagging | [
"39d2019b8b1cc26399e6470f1a18d4c084ab0056"
] | [
"main.py"
] | [
"# For this basic implementation, we only need these modules\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import BaggingClassifier\n\n# Load the well-known Breast Cancer dataset\n# Split into train and test sets\nx, y = load_breast_cancer(return_X_y=True)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=23)\n\n# For simplicity, we are going to use as base estimator a Decision Tree with fixed parameters\ntree = DecisionTreeClassifier(max_depth=3, random_state=23)\n\n# The baggging ensemble classifier is initialized with:\n# base_estimator = DecisionTree\n# n_estimators = 5 : it's gonna be created 5 subsets to train 5 Decision Tree models\n# max_samples = 50 : it's gonna be taken randomly 50 items with replacement\n# bootstrap = True : means that the sampling is gonna be with replacement\nbagging = BaggingClassifier(base_estimator=tree, n_estimators=5, max_samples=50, bootstrap=True)\n\n# Training\nbagging.fit(x_train, y_train)\n\n# Evaluating\nprint(f\"Train score: {bagging.score(x_train, y_train)}\")\nprint(f\"Test score: {bagging.score(x_test, y_test)}\")\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"sklearn.datasets.load_breast_cancer",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.BaggingClassifier"
]
] |
RamonPujol/OTSun | [
"0a587980b8465bcc886811de246718e08e6dab06"
] | [
"tests/test_4.py"
] | [
"import sys\r\nimport otsun\r\nimport os\r\nimport FreeCAD\r\nfrom FreeCAD import Base\r\nimport Part\r\nimport numpy as np\r\nnp.random.seed(1)\r\n\r\nimport random\r\nrandom.seed(1)\r\n\r\nimport logging\r\nlogger = otsun.logger\r\nlogger.setLevel(logging.ERROR)\r\n\r\n# create console handler and set level to debug\r\nch = logging.StreamHandler()\r\nch.setLevel(logging.ERROR)\r\n\r\n# create formatter\r\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n\r\n# add formatter to ch\r\nch.setFormatter(formatter)\r\n\r\n# add ch to logger\r\nlogger.addHandler(ch)\r\n\r\nMyProject = 'Perovskite_Stack_200nm.FCStd'\r\nFreeCAD.openDocument(MyProject)\r\n\r\n\r\ndoc = FreeCAD.ActiveDocument\r\n\r\n\r\n# ---\r\n# Materials\r\n# ---\r\notsun.TransparentSimpleLayer(\"Trans\", 1.0)\r\notsun.AbsorberSimpleLayer(\"Abs\", 1.0)\r\notsun.TwoLayerMaterial(\"Trans_Abs\", \"Trans\", \"Abs\")\r\nfile_thin_film = 'Fitxer_OTSun_Exp1a_theta0_90.txt'\r\nfile_Perovskite = 'Perovskite_Leguy.txt'\r\notsun.PolarizedThinFilm(\"ThinFilm\", file_thin_film, \"Vacuum\", file_Perovskite)\r\notsun.PVMaterial(\"PV\", file_Perovskite)\r\nfile_Spiro = 'Spiro_.txt'\r\notsun.WavelengthVolumeMaterial(\"Spiro\", file_Spiro)\r\nfile_Ag = 'Ag_Yang.txt'\r\notsun.MetallicLambertianLayer(\"Ag\", file_Ag)\r\n\r\n# ---\r\n# Constant inputs for Spectral Analysis\r\n# ---\r\npolarization_vector = None\r\nshow_in_doc = None\r\n# show_in_doc = doc\r\n# --------- end\r\n\r\n# ---\r\n# Inputs for Spectral Analysis\r\n# ---\r\nphi = 0.0 # default value zero\r\nphi = phi + 1.E-9\r\ntheta = 0.0 # default value zero\r\ntheta = theta + 1.E-9\r\nwavelength_ini = 500.0 # default value 280.0\r\nwavelength_end = 502.0 # default value 4000.0\r\nwavelength_end = wavelength_end + 1E-4\r\nwavelength_step = 2.0 # default value 10.0\r\nnumber_of_rays = 100 # number of rays per wavelength # default value 1000\r\naperture_collector_Th = 1000. * 1000. * 1.0 # default value zero\r\naperture_collector_PV = 1000. * 1000. * 1.0 # default value zero\r\n# for direction of the source two options: Buie model or main_direction \r\ndirection_distribution = None # default option main_direction\r\n# CSR = 0.05\r\n# Buie_model = raytrace.buie_distribution(CSR)\r\n# direction_distribution = Buie_model\r\n# for the internal quantum efficiency two options: constant value =< 1.0, or data file \r\ninternal_quantum_efficiency = 1.0 # default option equal to 1.0\r\n# internal_quantum_efficiency = 'data.txt' \r\ndata_file_spectrum = 'ASTMG173-direct.txt'\r\n# --------- end\r\n\r\n\r\n\r\n# ---\r\n# Magnitudes used for outputs in Spectral Analysis\r\n# ---\r\ncaptured_energy_PV = 0.0\r\ncaptured_energy_Th = 0.0\r\nsource_wavelength = []\r\nTh_energy = []\r\nTh_wavelength = []\r\nTh_points_absorber = []\r\nPV_energy = []\r\nPV_wavelength = []\r\nPV_values = []\r\n# --------- end\r\n\r\n# objects for scene\r\nsel = doc.Objects\r\ncurrent_scene = otsun.Scene(sel)\r\n\r\nfor w in np.arange(wavelength_ini, wavelength_end , wavelength_step):\r\n light_spectrum = w\r\n main_direction = otsun.polar_to_cartesian(phi, theta) * -1.0 # Sun direction vector\r\n emitting_region = otsun.SunWindow(current_scene, main_direction)\r\n l_s = otsun.LightSource(current_scene, emitting_region, light_spectrum, 1.0, direction_distribution, polarization_vector)\r\n exp = otsun.Experiment(current_scene, l_s, number_of_rays, show_in_doc)\r\n exp.run(show_in_doc)\r\n print (\"%s\" % (w)+ '\\n')\r\n Th_energy.append(exp.Th_energy)\r\n Th_wavelength.append(exp.Th_wavelength) \r\n PV_energy.append(exp.PV_energy)\r\n PV_wavelength.append(exp.PV_wavelength)\r\n source_wavelength.append(w) \r\n if exp.PV_values:\r\n PV_values.append(exp.PV_values)\r\n if exp.points_absorber_Th:\r\n Th_points_absorber.append(exp.points_absorber_Th)\r\n captured_energy_PV += exp.captured_energy_PV\r\n captured_energy_Th += exp.captured_energy_Th\r\n\r\n# ---\r\n# Output file for wavelengths emitted by the source\r\n# ---\r\n#data_source_wavelength = np.array(np.concatenate(source_wavelength))\r\ndata_source_wavelength = np.array(source_wavelength)\r\ndata_source_wavelength = data_source_wavelength.T\r\n# --------- end\r\n\r\n# ---\r\n# Output source spectrum for calculation and total energy emitted\r\n# ---\r\nsource_spectrum = otsun.spectrum_to_constant_step(data_file_spectrum, wavelength_step, wavelength_ini, wavelength_end)\r\nenergy_emitted = np.trapz(source_spectrum[:,1], x = source_spectrum[:,0])\r\n# --------- end\r\n\r\n\r\n# ---\r\n# Outputs for thermal absorber materials (Th) in Spectral Analysis\r\n# ---\r\nif captured_energy_Th > 1E-9:\r\n data_Th_points_absorber = np.array(np.concatenate(Th_points_absorber))\r\n table_Th = otsun.make_histogram_from_experiment_results(Th_wavelength, Th_energy, wavelength_step,\r\n aperture_collector_Th,\r\n exp.light_source.emitting_region.aperture)\r\n spectrum_by_table_Th = source_spectrum[:,1] * table_Th[:,1]\t\t\r\n power_absorbed_from_source_Th = np.trapz(spectrum_by_table_Th, x = source_spectrum[:,0])\r\n efficiency_from_source_Th = power_absorbed_from_source_Th / energy_emitted\r\n\r\n # print power_absorbed_from_source_Th * aperture_collector_Th * 1E-6, energy_emitted * exp.light_source.emitting_region.aperture * 1E-6, efficiency_from_source_Th\r\n\r\n# --------- end\t\t\r\n\t\r\n# ---\r\n# Outputs for photovoltaic materials (PV) in Spectral Analysis\r\n# ---\r\n\r\nif captured_energy_PV > 1E-9:\r\n\r\n data_PV_values = np.array(np.concatenate(PV_values))\r\n table_PV = otsun.make_histogram_from_experiment_results(PV_wavelength, PV_energy, wavelength_step,\r\n aperture_collector_PV,\r\n exp.light_source.emitting_region.aperture)\r\n spectrum_by_table_PV = source_spectrum[:,1] * table_PV[:,1]\t\t\r\n power_absorbed_from_source_PV = np.trapz(spectrum_by_table_PV, x = source_spectrum[:,0])\r\n efficiency_from_source_PV = power_absorbed_from_source_PV / energy_emitted\r\n iqe = internal_quantum_efficiency\r\n SR = otsun.spectral_response(table_PV, iqe)\r\n ph_cu = otsun.photo_current(SR, source_spectrum)\r\n\r\n # print power_absorbed_from_source_PV * aperture_collector_PV * 1E-6, energy_emitted * exp.light_source.emitting_region.aperture * 1E-6, efficiency_from_source_PV, ph_cu\r\n\r\n# --------- end\r\n\r\nFreeCAD.closeDocument(FreeCAD.ActiveDocument.Name)\r\n\r\nprint (table_Th, table_PV)\r\nprint (0.12 > table_Th[0][1] > 0.0 and 0.12 > table_Th[1][1] > 0.0 and 0.98 > table_PV[0][1] > 0.75 and 0.98 > table_PV[1][1] > 0.75)\r\n\r\ndef test_4():\r\n assert 0.12 > table_Th[0][1] > 0.0 and 0.12 > table_Th[1][1] > 0.0 and 0.98 > table_PV[0][1] > 0.75 and 0.98 > table_PV[1][1] > 0.75\r\n\r\n"
] | [
[
"numpy.random.seed",
"numpy.arange",
"numpy.array",
"numpy.trapz",
"numpy.concatenate"
]
] |
jamiecook/AequilibraE | [
"b1013d59cbeaf6fc4e1a944cf31f20460a2a4156"
] | [
"aequilibrae/paths/all_or_nothing.py"
] | [
"\"\"\"\n -----------------------------------------------------------------------------------------------------------\n Package: AequilibraE\n\n Name: Traffic assignment\n Purpose: Implement traffic assignment algorithms based on Cython's network loading procedures\n\n Original Author: Pedro Camargo ([email protected])\n Contributors:\n Last edited by: Pedro Camargo\n\n Website: www.AequilibraE.com\n Repository: https://github.com/AequilibraE/AequilibraE\n\n Created: 15/09/2013\n Updated: 2017-05-07\n Copyright: (c) AequilibraE authors\n Licence: See LICENSE.TXT\n -----------------------------------------------------------------------------------------------------------\n \"\"\"\n\nimport sys\nsys.dont_write_bytecode = True\n\nimport numpy as np\nimport thread\nfrom multiprocessing.dummy import Pool as ThreadPool\ntry:\n from PyQt4.QtCore import SIGNAL\n pyqt = True\nexcept:\n pyqt = False\n\nfrom multi_threaded_aon import MultiThreadedAoN\ntry:\n from AoN import one_to_all, path_computation\nexcept:\n pass\n\nfrom ..utils import WorkerThread\n\n\nclass allOrNothing(WorkerThread):\n def __init__(self, matrix, graph, results):\n WorkerThread.__init__(self, None)\n\n self.matrix = matrix\n self.graph = graph\n self.results = results\n self.aux_res = MultiThreadedAoN()\n self.report = []\n self.cumulative = 0\n\n if results.__graph_id__ != graph.__id__:\n raise ValueError(\"Results object not prepared. Use --> results.prepare(graph)\")\n\n if results.__graph_id__ is None:\n raise ValueError('The results object was not prepared. Use results.prepare(graph)')\n\n elif results.__graph_id__ != graph.__id__:\n raise ValueError('The results object was prepared for a different graph')\n\n elif matrix.matrix_view is None:\n raise ValueError('Matrix was not prepared for assignment. '\n 'Please create a matrix_procedures view with all classes you want to assign')\n\n elif not np.array_equal(matrix.index, graph.centroids):\n raise ValueError('Matrix and graph do not have compatible set of centroids.')\n\n def doWork(self):\n self.execute()\n\n def execute(self):\n if pyqt:\n self.emit(SIGNAL(\"assignment\"), ['zones finalized', 0])\n\n self.aux_res.prepare(self.graph, self.results)\n self.matrix.matrix_view = self.matrix.matrix_view.reshape((self.graph.num_zones, self.graph.num_zones,\n self.results.classes['number']))\n mat = self.matrix.matrix_view\n pool = ThreadPool(self.results.cores)\n all_threads = {'count': 0}\n for orig in self.matrix.index:\n i = int(self.graph.nodes_to_indices[orig])\n if np.nansum(mat[i, :, :]) > 0:\n if self.graph.fs[i] == self.graph.fs[i+1]:\n self.report.append(\"Centroid \" + str(orig) + \" is not connected\")\n else:\n pool.apply_async(self.func_assig_thread, args=(orig, all_threads))\n # self.func_assig_thread(orig, all_threads)\n pool.close()\n pool.join()\n self.results.link_loads = np.sum(self.aux_res.temp_link_loads, axis=2)\n\n if pyqt:\n self.emit(SIGNAL(\"assignment\"), ['text AoN', \"Saving Outputs\"])\n self.emit(SIGNAL(\"assignment\"), ['finished_threaded_procedure', None])\n\n def func_assig_thread(self, O, all_threads):\n if thread.get_ident() in all_threads:\n th = all_threads[thread.get_ident()]\n else:\n all_threads[thread.get_ident()] = all_threads['count']\n th = all_threads['count']\n all_threads['count'] += 1\n x = one_to_all(O, self.matrix, self.graph, self.results, self.aux_res, th)\n self.cumulative += 1\n if x != O:\n self.report.append(x)\n if pyqt:\n self.emit(SIGNAL(\"assignment\"), ['zones finalized', self.cumulative])\n txt = str(self.cumulative) + ' / ' + str(self.matrix.zones)\n self.emit(SIGNAL(\"assignment\"), ['text AoN', txt])"
] | [
[
"numpy.sum",
"numpy.nansum",
"numpy.array_equal"
]
] |
TanmayKumar-EngStud/Natural_Language_Processing | [
"7545e7f9273fd2e2a623d1f8e027b7981cf6ed0b"
] | [
"Latent Dirichlet Allocation/Latent Dirichlet Allocation.py"
] | [
"from sklearn.feature_extraction.text import CountVectorizer\nimport pandas as pd\nimport os\nos.system('clear')\n# This program is to understand the concept of Topic modelling using \n# Latent Dirichlet Allocation\n\nfile = open('shakesphere.txt','r')\n\ncVector = CountVectorizer(max_df=0.95, min_df=2, stop_words='english')\n\n#creating a paragraph from the file\ndoc = file.read()\nvector = cVector.fit_transform([text for text in doc.split('\\n\\n')])\n\nfrom sklearn.decomposition import LatentDirichletAllocation\nLDA = LatentDirichletAllocation(n_components = 7)\nLDA.fit(vector)\n\nfor index,topic in enumerate(LDA.components_):\n print(f'THE TOP 10 WORDS FOR TOPIC #{index}')\n print([cVector.get_feature_names()[i] for i in topic.argsort()[-10:]])\n print('\\n')\n# these are the top 10 topics which the machine will use \n# in order to classify the paragraphs we have in the file\ntopicSet = []\nfor index, topic in enumerate(LDA.components_):\n miniSet =[]\n for i in topic.argsort()[-10:]:\n miniSet.append(cVector.get_feature_names()[i])\n topicSet.append(miniSet) \n\ntopic_results = LDA.transform(vector)\n\n\n\n#we have to create a CSV file\ndataFrame = {'Text':doc.split('\\n\\n')}\ndf = pd.DataFrame(dataFrame)\ndf[\"Topic\"] = topic_results.argmax(axis=1)\ndf.to_csv(\"NewCSV.tsv\", sep='\\t', index=True, header=False, encoding='utf-8')\n\nprint(df.head(5))"
] | [
[
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.decomposition.LatentDirichletAllocation"
]
] |
FANJIYU0825/federated-learning | [
"5772ca0a321a222eae5d5e29b70fb4a468c28374"
] | [
"main.py"
] | [
"import sys\nsys.path.insert(0,\"/content/federated-learning/\")\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport copy\nimport numpy as np\nfrom torchvision import datasets, transforms\nimport torch\n\nfrom utils.sampling import mnist_iid, mnist_noniid, cifar_iid\nfrom utils.options import args_parser\nfrom models.Update import LocalUpdate\nfrom models.Nets import MLP, CNNMnist, CNNCifar\nfrom models.Fed import FedAvg\nfrom models.test import test_img\n"
] | [
[
"matplotlib.use"
]
] |
Liang813/graphics | [
"71ab1775228a0a292427551350cbb62bfa8bd01a"
] | [
"tensorflow_graphics/projects/points_to_3Dobjects/utils/tf_utils.py"
] | [
"# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility functions for TensorFlow.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef gaussian_blur(img, sigma=5):\n \"\"\"Applies gaussian blur to the given image.\n\n Args:\n img: The input image.\n sigma: The gaussian kernel size.\n\n Returns:\n Gaussian blurred image.\n \"\"\"\n kernel_size = 2 * sigma\n def gauss_kernel(channels, kernel_size, sigma):\n ax = tf.range(-kernel_size // 2 + 1.0, kernel_size // 2 + 1.0)\n xx, yy = tf.meshgrid(ax, ax)\n kernel = tf.exp(-(xx ** 2 + yy ** 2) / (2.0 * sigma ** 2))\n kernel = kernel / tf.reduce_sum(kernel)\n kernel = tf.tile(kernel[..., tf.newaxis], [1, 1, channels])\n return kernel\n\n gaussian_kernel = gauss_kernel(tf.shape(img)[-1], kernel_size, sigma)\n gaussian_kernel = gaussian_kernel[..., tf.newaxis]\n blurred_image = tf.nn.depthwise_conv2d(tf.expand_dims(img, axis=0),\n gaussian_kernel, [1, 1, 1, 1],\n padding='SAME', data_format='NHWC')\n return tf.squeeze(blurred_image, axis=0)\n\n\ndef euler_from_rotation_matrix(matrix: tf.Tensor, axis: int) -> tf.float32:\n \"\"\"Extracts the euler angle of a 3D rotation around axis.\n\n Args:\n matrix: The 3D input rotation matrix.\n axis: The rotation axis.\n\n Returns:\n The euler angle in radians around the specified axis.\n \"\"\"\n tf.debugging.assert_integer(axis)\n tf.debugging.assert_less_equal(axis, 2)\n tf.debugging.assert_greater_equal(axis, 0)\n tf.debugging.assert_equal(matrix.shape, [3, 3])\n mask = np.ones((3, 3), dtype=bool)\n mask[axis, :] = False\n mask[:, axis] = False\n matrix2d = tf.reshape(tf.boolean_mask(matrix, mask), [2, 2])\n a = matrix2d[0, 1] if axis == 1 else matrix2d[1, 0]\n euler_angle = tf.math.atan2(a, matrix2d[0, 0])\n return euler_angle\n\n\ndef compute_dot(image_size: tf.Tensor,\n intrinsics: tf.Tensor,\n extrinsics: tf.Tensor,\n axis=1,\n image_intersection=(0.5, 0.75)) -> tf.Tensor:\n \"\"\"Computes the intersection at the ground plane of a ray from the camera.\n\n Args:\n image_size: The size of the camera image.\n intrinsics: The camera intrinsics matrix. Shape: (3, 3)\n extrinsics: The camera extrinsics matrix. Shape: (3, 4)\n axis: The ground plane corresponds to the plane defined by axis = 0.\n image_intersection: The relative image position of the ray intersection.\n\n Returns:\n The intersection. Shape: (3, 1)\n \"\"\"\n # Shoot ray through image pixel\n ray_2d = tf.cast(image_size, tf.float32) * [image_intersection[0],\n image_intersection[1],\n 1/image_size[-1]]\n ray_2d = tf.reshape(ray_2d, [3, 1])\n\n # Invert intrinsics matrix K\n k = tf.reshape(intrinsics, [3, 3])\n k_inv = tf.linalg.inv(k)\n\n # Decompose extrinsics into rotation and translation, and inverte\n rt = tf.reshape(extrinsics, [3, 4])\n r = tf.gather(rt, [0, 1, 2], axis=1)\n t = tf.gather(rt, [3], axis=1)\n\n r_inv = tf.linalg.inv(r)\n t_inv = r_inv @ t * (-1)\n\n # Compute ray intersection with the ground plane along specified axis\n ray = r_inv @ k_inv @ ray_2d\n l = t_inv[axis] * -1 / ray[axis] # determine lambda\n l = tf.expand_dims(l, -1)\n\n # this is the same\n dot = ray * l + t_inv\n return dot\n\n\ndef get_next_sample_dataset(dataset_iter):\n \"\"\"Get next sample.\"\"\"\n try:\n sample = next(dataset_iter)\n except (StopIteration, RuntimeError) as e:\n if \"Can't copy Tensor with type\" in str(e):\n sample = None\n elif isinstance(e, StopIteration):\n sample = None\n else:\n raise e\n return sample\n\n\ndef get_devices(gpu_ids):\n \"\"\"Get device.\"\"\"\n if gpu_ids is not None:\n gpu_ids = [f'/gpu:{gpu}' for gpu in gpu_ids.split(',')]\n cpu_ids = [\n f'/cpu:{x.name.split(\":\")[-1]}'\n for x in tf.config.list_physical_devices('CPU')\n ]\n device_ids = [*cpu_ids, *gpu_ids]\n else:\n device_ids = None\n return device_ids\n\n\ndef using_multigpu():\n multigpu = False\n if tf.distribute.has_strategy():\n strategy = tf.distribute.get_strategy()\n if strategy.num_replicas_in_sync > 1:\n multigpu = True\n return multigpu\n\n\ndef compute_batch_size(tensor_dict):\n \"\"\"Compute batch size.\"\"\"\n if using_multigpu():\n dummy_tensor = next(iter(tensor_dict.values())).values\n batch_size = 0\n for ii in range(len(dummy_tensor)):\n batch_size += tf.shape(dummy_tensor[ii])[0]\n else:\n dummy_tensor = next(iter(tensor_dict.values()))\n batch_size = tf.shape(dummy_tensor)[0]\n\n return batch_size\n"
] | [
[
"numpy.ones",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.squeeze",
"tensorflow.reduce_sum",
"tensorflow.math.atan2",
"tensorflow.meshgrid",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.debugging.assert_greater_equal",
"tensorflow.linalg.inv",
"tensorflow.boolean_mask",
"tensorflow.tile",
"tensorflow.debugging.assert_less_equal",
"tensorflow.debugging.assert_equal",
"tensorflow.range",
"tensorflow.exp",
"tensorflow.debugging.assert_integer",
"tensorflow.distribute.get_strategy",
"tensorflow.distribute.has_strategy",
"tensorflow.config.list_physical_devices"
]
] |
insomnia94/OnAVOS | [
"1e34ba504519a6b6659e96b3a6e0427860afce1e"
] | [
"crf/crf_youtube.py"
] | [
"#!/usr/bin/env python3\n\nimport pydensecrf.densecrf as dcrf\nfrom pydensecrf.utils import unary_from_softmax\nfrom scipy.ndimage import imread\nfrom scipy.misc import imsave, imresize\nimport pickle\nimport numpy\nimport glob\nimport os\nfrom joblib import Parallel, delayed\nimport sys\n\nimgs_path = \"/data/corpora/youtube-objects/youtube_masks_full/\"\npreds_path_prefix = \"/fastwork/voigtlaender/mywork/data/training/2016-01-13-tf-test/forwarded/\"\n\n\ndef convert_path(inp):\n sp = inp.split(\"/\")\n fwd_idx = sp.index(\"forwarded\")\n\n seq = sp[fwd_idx + 3]\n fn = sp[-1]\n\n mainseq = seq.split(\"_\")[0]\n subseq = seq.split(\"_\")[1]\n im_path = imgs_path + mainseq + \"/data/\" + subseq + \"/shots/001/images/\" + \\\n fn.replace(\".pickle\", \".jpg\")\n # .replace(\"frame\", \"\")\n\n sp[fwd_idx + 1] += \"_crf\"\n sp[-1] = sp[-1].replace(\".pickle\", \".png\")\n out_path = \"/\".join(sp)\n return im_path, out_path\n\n\ndef mkdir_p(d):\n try:\n os.makedirs(d)\n except OSError as err:\n if err.errno != 17:\n raise\n\n\ndef apply_crf(im, pred):\n im = numpy.ascontiguousarray(im)\n if im.shape[:2] != pred.shape[:2]:\n im = imresize(im, pred.shape[:2])\n\n pred = numpy.ascontiguousarray(pred.swapaxes(0, 2).swapaxes(1, 2))\n\n d = dcrf.DenseCRF2D(im.shape[1], im.shape[0], 2) # width, height, nlabels\n unaries = unary_from_softmax(pred, scale=1.0)\n d.setUnaryEnergy(unaries)\n\n d.addPairwiseGaussian(sxy=0.220880737269, compat=1.24845093352)\n d.addPairwiseBilateral(sxy=22.3761305044, srgb=7.70254062277, rgbim=im, compat=1.40326787165)\n processed = d.inference(12)\n res = numpy.argmax(processed, axis=0).reshape(im.shape[:2])\n\n return res\n\n\ndef do_seq(seq, model, save=True):\n preds_path = preds_path_prefix + model + \"/valid/\"\n files = sorted(glob.glob(preds_path + seq + \"/*.pickle\"))\n for f in files:\n pred_path = f\n im_path, out_path = convert_path(f)\n pred = pickle.load(open(pred_path))\n im = imread(im_path)\n res = apply_crf(im, pred).astype(\"uint8\") * 255\n # before = numpy.argmax(pred, axis=2)\n if save:\n dir_ = \"/\".join(out_path.split(\"/\")[:-1])\n mkdir_p(dir_)\n imsave(out_path, res)\n\n print(out_path)\n\n\ndef main():\n seqs = [\"aeroplane_0001\", \"aeroplane_0002\", \"aeroplane_0010\", \"aeroplane_0011\", \"aeroplane_0012\", \"aeroplane_0013\",\n \"bird_0001\", \"bird_0007\", \"bird_0010\", \"bird_0011\", \"bird_0012\", \"bird_0014\", \"boat_0001\", \"boat_0003\",\n \"boat_0004\", \"boat_0005\", \"boat_0006\", \"boat_0007\", \"boat_0008\", \"boat_0009\", \"boat_0010\", \"boat_0011\",\n \"boat_0012\", \"boat_0014\", \"boat_0015\", \"boat_0016\", \"boat_0017\", \"car_0001\", \"car_0002\", \"car_0003\",\n \"car_0004\", \"car_0005\", \"car_0008\", \"car_0009\", \"cat_0001\", \"cat_0002\", \"cat_0003\", \"cat_0004\", \"cat_0006\",\n \"cat_0008\", \"cat_0010\", \"cat_0011\", \"cat_0012\", \"cat_0013\", \"cat_0014\", \"cat_0015\", \"cat_0016\", \"cat_0017\",\n \"cat_0018\", \"cat_0020\", \"cow_0001\", \"cow_0002\", \"cow_0003\", \"cow_0004\", \"cow_0005\", \"cow_0006\", \"cow_0007\",\n \"cow_0008\", \"cow_0009\", \"cow_0010\", \"cow_0011\", \"cow_0012\", \"cow_0013\", \"cow_0014\", \"cow_0015\", \"cow_0016\",\n \"cow_0017\", \"cow_0018\", \"cow_0022\", \"dog_0001\", \"dog_0003\", \"dog_0005\", \"dog_0006\", \"dog_0007\", \"dog_0008\",\n \"dog_0009\", \"dog_0010\", \"dog_0012\", \"dog_0013\", \"dog_0014\", \"dog_0016\", \"dog_0018\", \"dog_0020\", \"dog_0021\",\n \"dog_0022\", \"dog_0023\", \"dog_0025\", \"dog_0026\", \"dog_0027\", \"dog_0028\", \"dog_0030\", \"dog_0031\", \"dog_0032\",\n \"dog_0034\", \"dog_0035\", \"dog_0036\", \"horse_0001\", \"horse_0009\", \"horse_0010\", \"horse_0011\", \"horse_0012\",\n \"horse_0014\", \"horse_0018\", \"horse_0020\", \"horse_0021\", \"horse_0022\", \"horse_0024\", \"horse_0025\",\n \"horse_0026\", \"horse_0029\", \"motorbike_0001\", \"motorbike_0002\", \"motorbike_0003\", \"motorbike_0006\",\n \"motorbike_0009\", \"motorbike_0011\", \"motorbike_0012\", \"motorbike_0013\", \"motorbike_0014\", \"train_0001\",\n \"train_0003\", \"train_0008\", \"train_0024\", \"train_0025\"]\n\n save = True\n assert len(sys.argv) == 2\n model = sys.argv[1]\n Parallel(n_jobs=20)(delayed(do_seq)(seq, model, save=save) for seq in seqs)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"scipy.misc.imsave",
"scipy.misc.imresize",
"numpy.argmax",
"numpy.ascontiguousarray",
"scipy.ndimage.imread"
]
] |
miguelmorin/pandas | [
"ce4ab828d882a0c50f2f63921621ccae0d14b5ae"
] | [
"pandas/core/groupby/groupby.py"
] | [
"import types\nfrom functools import wraps, partial\nimport numpy as np\nimport datetime\nimport collections\nimport warnings\nimport copy\nfrom textwrap import dedent\n\nfrom pandas.compat import (\n zip, range, lzip,\n callable, map\n)\n\nfrom pandas import compat\nfrom pandas.compat.numpy import function as nv\nfrom pandas.compat import set_function_name\n\nfrom pandas.core.dtypes.common import (\n is_numeric_dtype,\n is_timedelta64_dtype, is_datetime64_dtype,\n is_categorical_dtype,\n is_interval_dtype,\n is_datetimelike,\n is_datetime64_any_dtype,\n is_bool, is_integer_dtype,\n is_complex_dtype,\n is_bool_dtype,\n is_scalar,\n is_list_like,\n is_hashable,\n needs_i8_conversion,\n _ensure_float64,\n _ensure_platform_int,\n _ensure_int64,\n _ensure_object,\n _ensure_categorical,\n _ensure_float)\nfrom pandas.core.dtypes.cast import maybe_downcast_to_dtype\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import isna, isnull, notna, _maybe_fill\n\nfrom pandas.core.base import (PandasObject, SelectionMixin, GroupByError,\n DataError, SpecificationError)\nfrom pandas.core.index import (Index, MultiIndex,\n CategoricalIndex, _ensure_index)\nfrom pandas.core.arrays import ExtensionArray, Categorical\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.internals import BlockManager, make_block\nfrom pandas.core.series import Series\nfrom pandas.core.panel import Panel\nfrom pandas.core.sorting import (get_group_index_sorter, get_group_index,\n compress_group_index, get_flattened_iterator,\n decons_obs_group_ids, get_indexer_dict)\nfrom pandas.util._decorators import (cache_readonly, Substitution,\n Appender, make_signature)\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.util._validators import validate_kwargs\n\nimport pandas.core.common as com\nimport pandas.core.algorithms as algorithms\nfrom pandas.core.config import option_context\n\nfrom pandas.plotting._core import boxplot_frame_groupby\n\nfrom pandas._libs import (lib, reduction,\n groupby as libgroupby,\n Timestamp, NaT, iNaT)\nfrom pandas._libs.lib import count_level_2d\n\n_doc_template = \"\"\"\n\n See also\n --------\n pandas.Series.%(name)s\n pandas.DataFrame.%(name)s\n pandas.Panel.%(name)s\n\"\"\"\n\n_apply_docs = dict(\n template=\"\"\"\n Apply function ``func`` group-wise and combine the results together.\n\n The function passed to ``apply`` must take a {input} as its first\n argument and return a dataframe, a series or a scalar. ``apply`` will\n then take care of combining the results back together into a single\n dataframe or series. ``apply`` is therefore a highly flexible\n grouping method.\n\n While ``apply`` is a very flexible method, its downside is that\n using it can be quite a bit slower than using more specific methods.\n Pandas offers a wide range of method that will be much faster\n than using ``apply`` for their specific purposes, so try to use them\n before reaching for ``apply``.\n\n Parameters\n ----------\n func : function\n A callable that takes a {input} as its first argument, and\n returns a dataframe, a series or a scalar. In addition the\n callable may take positional and keyword arguments\n args, kwargs : tuple and dict\n Optional positional and keyword arguments to pass to ``func``\n\n Returns\n -------\n applied : Series or DataFrame\n\n Notes\n -----\n In the current implementation ``apply`` calls func twice on the\n first group to decide whether it can take a fast or slow code\n path. This can lead to unexpected behavior if func has\n side-effects, as they will take effect twice for the first\n group.\n\n Examples\n --------\n {examples}\n\n See also\n --------\n pipe : Apply function to the full GroupBy object instead of to each\n group.\n aggregate, transform\n \"\"\",\n dataframe_examples=\"\"\"\n >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]})\n >>> g = df.groupby('A')\n\n From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``.\n Calling ``apply`` in various ways, we can get different grouping results:\n\n Example 1: below the function passed to ``apply`` takes a dataframe as\n its argument and returns a dataframe. ``apply`` combines the result for\n each group together into a new dataframe:\n\n >>> g.apply(lambda x: x / x.sum())\n B C\n 0 0.333333 0.4\n 1 0.666667 0.6\n 2 1.000000 1.0\n\n Example 2: The function passed to ``apply`` takes a dataframe as\n its argument and returns a series. ``apply`` combines the result for\n each group together into a new dataframe:\n\n >>> g.apply(lambda x: x.max() - x.min())\n B C\n A\n a 1 2\n b 0 0\n\n Example 3: The function passed to ``apply`` takes a dataframe as\n its argument and returns a scalar. ``apply`` combines the result for\n each group together into a series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.C.max() - x.B.min())\n A\n a 5\n b 2\n dtype: int64\n \"\"\",\n series_examples=\"\"\"\n >>> ser = pd.Series([0, 1, 2], index='a a b'.split())\n >>> g = ser.groupby(ser.index)\n\n From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``.\n Calling ``apply`` in various ways, we can get different grouping results:\n\n Example 1: The function passed to ``apply`` takes a series as\n its argument and returns a series. ``apply`` combines the result for\n each group together into a new series:\n\n >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)\n 0 0.0\n 1 0.5\n 2 4.0\n dtype: float64\n\n Example 2: The function passed to ``apply`` takes a series as\n its argument and returns a scalar. ``apply`` combines the result for\n each group together into a series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.max() - x.min())\n a 1\n b 0\n dtype: int64\n \"\"\")\n\n_pipe_template = \"\"\"\\\nApply a function ``func`` with arguments to this %(klass)s object and return\nthe function's result.\n\n%(versionadded)s\n\nUse ``.pipe`` when you want to improve readability by chaining together\nfunctions that expect Series, DataFrames, GroupBy or Resampler objects.\nInstead of writing\n\n>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)\n\nYou can write\n\n>>> (df.groupby('group')\n... .pipe(f)\n... .pipe(g, arg1=a)\n... .pipe(h, arg2=b, arg3=c))\n\nwhich is much more readable.\n\nParameters\n----------\nfunc : callable or tuple of (callable, string)\n Function to apply to this %(klass)s object or, alternatively,\n a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a\n string indicating the keyword of ``callable`` that expects the\n %(klass)s object.\nargs : iterable, optional\n positional arguments passed into ``func``.\nkwargs : dict, optional\n a dictionary of keyword arguments passed into ``func``.\n\nReturns\n-------\nobject : the return type of ``func``.\n\nNotes\n-----\nSee more `here\n<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_\n\nExamples\n--------\n%(examples)s\n\nSee Also\n--------\npandas.Series.pipe : Apply a function with arguments to a series\npandas.DataFrame.pipe: Apply a function with arguments to a dataframe\napply : Apply function to each group instead of to the\n full %(klass)s object.\n\"\"\"\n\n_transform_template = \"\"\"\nCall function producing a like-indexed %(klass)s on each group and\nreturn a %(klass)s having the same indexes as the original object\nfilled with the transformed values\n\nParameters\n----------\nf : function\n Function to apply to each group\n\nNotes\n-----\nEach group is endowed the attribute 'name' in case you need to know\nwhich group you are working on.\n\nThe current implementation imposes three requirements on f:\n\n* f must return a value that either has the same shape as the input\n subframe or can be broadcast to the shape of the input subframe.\n For example, f returns a scalar it will be broadcast to have the\n same shape as the input subframe.\n* if this is a DataFrame, f must support application column-by-column\n in the subframe. If f also supports application to the entire subframe,\n then a fast path is used starting from the second chunk.\n* f must not mutate groups. Mutation is not supported and may\n produce unexpected results.\n\nReturns\n-------\n%(klass)s\n\nSee also\n--------\naggregate, transform\n\nExamples\n--------\n\n# Same shape\n>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n... 'foo', 'bar'],\n... 'B' : ['one', 'one', 'two', 'three',\n... 'two', 'two'],\n... 'C' : [1, 5, 5, 2, 5, 5],\n... 'D' : [2.0, 5., 8., 1., 2., 9.]})\n>>> grouped = df.groupby('A')\n>>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n C D\n0 -1.154701 -0.577350\n1 0.577350 0.000000\n2 0.577350 1.154701\n3 -1.154701 -1.000000\n4 0.577350 -0.577350\n5 0.577350 1.000000\n\n# Broadcastable\n>>> grouped.transform(lambda x: x.max() - x.min())\n C D\n0 4 6.0\n1 3 8.0\n2 4 6.0\n3 3 8.0\n4 4 6.0\n5 3 8.0\n\n\"\"\"\n\n\n# special case to prevent duplicate plots when catching exceptions when\n# forwarding methods from NDFrames\n_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])\n\n_common_apply_whitelist = frozenset([\n 'last', 'first',\n 'head', 'tail', 'median',\n 'mean', 'sum', 'min', 'max',\n 'cumcount', 'ngroup',\n 'resample',\n 'rank', 'quantile',\n 'fillna',\n 'mad',\n 'any', 'all',\n 'take',\n 'idxmax', 'idxmin',\n 'shift', 'tshift',\n 'ffill', 'bfill',\n 'pct_change', 'skew',\n 'corr', 'cov', 'diff',\n]) | _plotting_methods\n\n_series_apply_whitelist = ((_common_apply_whitelist |\n {'nlargest', 'nsmallest',\n 'is_monotonic_increasing',\n 'is_monotonic_decreasing'}) -\n {'boxplot'}) | frozenset(['dtype', 'unique'])\n\n_dataframe_apply_whitelist = ((_common_apply_whitelist |\n frozenset(['dtypes', 'corrwith'])) -\n {'boxplot'})\n\n_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',\n 'cummin', 'cummax'])\n\n_cython_cast_blacklist = frozenset(['rank', 'count', 'size'])\n\n\nclass Grouper(object):\n \"\"\"\n A Grouper allows the user to specify a groupby instruction for a target\n object\n\n This specification will select a column via the key parameter, or if the\n level and/or axis parameters are given, a level of the index of the target\n object.\n\n These are local specifications and will override 'global' settings,\n that is the parameters axis and level which are passed to the groupby\n itself.\n\n Parameters\n ----------\n key : string, defaults to None\n groupby key, which selects the grouping column of the target\n level : name/number, defaults to None\n the level for the target index\n freq : string / frequency object, defaults to None\n This will groupby the specified frequency if the target selection\n (via key or level) is a datetime-like object. For full specification\n of available frequencies, please see `here\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.\n axis : number/name of the axis, defaults to 0\n sort : boolean, default to False\n whether to sort the resulting labels\n\n additional kwargs to control time-like groupers (when ``freq`` is passed)\n\n closed : closed end of interval; 'left' or 'right'\n label : interval boundary to use for labeling; 'left' or 'right'\n convention : {'start', 'end', 'e', 's'}\n If grouper is PeriodIndex\n base, loffset\n\n Returns\n -------\n A specification for a groupby instruction\n\n Examples\n --------\n\n Syntactic sugar for ``df.groupby('A')``\n\n >>> df.groupby(Grouper(key='A'))\n\n Specify a resample operation on the column 'date'\n\n >>> df.groupby(Grouper(key='date', freq='60s'))\n\n Specify a resample operation on the level 'date' on the columns axis\n with a frequency of 60s\n\n >>> df.groupby(Grouper(level='date', freq='60s', axis=1))\n \"\"\"\n _attributes = ('key', 'level', 'freq', 'axis', 'sort')\n\n def __new__(cls, *args, **kwargs):\n if kwargs.get('freq') is not None:\n from pandas.core.resample import TimeGrouper\n cls = TimeGrouper\n return super(Grouper, cls).__new__(cls)\n\n def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):\n self.key = key\n self.level = level\n self.freq = freq\n self.axis = axis\n self.sort = sort\n\n self.grouper = None\n self.obj = None\n self.indexer = None\n self.binner = None\n self._grouper = None\n\n @property\n def ax(self):\n return self.grouper\n\n def _get_grouper(self, obj, validate=True):\n \"\"\"\n Parameters\n ----------\n obj : the subject object\n validate : boolean, default True\n if True, validate the grouper\n\n Returns\n -------\n a tuple of binner, grouper, obj (possibly sorted)\n \"\"\"\n\n self._set_grouper(obj)\n self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],\n axis=self.axis,\n level=self.level,\n sort=self.sort,\n validate=validate)\n return self.binner, self.grouper, self.obj\n\n def _set_grouper(self, obj, sort=False):\n \"\"\"\n given an object and the specifications, setup the internal grouper\n for this particular specification\n\n Parameters\n ----------\n obj : the subject object\n sort : bool, default False\n whether the resulting grouper should be sorted\n \"\"\"\n\n if self.key is not None and self.level is not None:\n raise ValueError(\n \"The Grouper cannot specify both a key and a level!\")\n\n # Keep self.grouper value before overriding\n if self._grouper is None:\n self._grouper = self.grouper\n\n # the key must be a valid info item\n if self.key is not None:\n key = self.key\n # The 'on' is already defined\n if getattr(self.grouper, 'name', None) == key and \\\n isinstance(obj, ABCSeries):\n ax = self._grouper.take(obj.index)\n else:\n if key not in obj._info_axis:\n raise KeyError(\n \"The grouper name {0} is not found\".format(key))\n ax = Index(obj[key], name=key)\n\n else:\n ax = obj._get_axis(self.axis)\n if self.level is not None:\n level = self.level\n\n # if a level is given it must be a mi level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n ax = Index(ax._get_level_values(level),\n name=ax.names[level])\n\n else:\n if level not in (0, ax.name):\n raise ValueError(\n \"The level {0} is not valid\".format(level))\n\n # possibly sort\n if (self.sort or sort) and not ax.is_monotonic:\n # use stable sort to support first, last, nth\n indexer = self.indexer = ax.argsort(kind='mergesort')\n ax = ax.take(indexer)\n obj = obj._take(indexer, axis=self.axis, is_copy=False)\n\n self.obj = obj\n self.grouper = ax\n return self.grouper\n\n @property\n def groups(self):\n return self.grouper.groups\n\n def __repr__(self):\n attrs_list = [\"{}={!r}\".format(attr_name, getattr(self, attr_name))\n for attr_name in self._attributes\n if getattr(self, attr_name) is not None]\n attrs = \", \".join(attrs_list)\n cls_name = self.__class__.__name__\n return \"{}({})\".format(cls_name, attrs)\n\n\nclass GroupByPlot(PandasObject):\n \"\"\"\n Class implementing the .plot attribute for groupby objects\n \"\"\"\n\n def __init__(self, groupby):\n self._groupby = groupby\n\n def __call__(self, *args, **kwargs):\n def f(self):\n return self.plot(*args, **kwargs)\n f.__name__ = 'plot'\n return self._groupby.apply(f)\n\n def __getattr__(self, name):\n def attr(*args, **kwargs):\n def f(self):\n return getattr(self.plot, name)(*args, **kwargs)\n return self._groupby.apply(f)\n return attr\n\n\nclass _GroupBy(PandasObject, SelectionMixin):\n _group_selection = None\n _apply_whitelist = frozenset([])\n\n def __init__(self, obj, keys=None, axis=0, level=None,\n grouper=None, exclusions=None, selection=None, as_index=True,\n sort=True, group_keys=True, squeeze=False,\n observed=None, **kwargs):\n\n self._selection = selection\n\n if isinstance(obj, NDFrame):\n obj._consolidate_inplace()\n\n self.level = level\n\n if not as_index:\n if not isinstance(obj, DataFrame):\n raise TypeError('as_index=False only valid with DataFrame')\n if axis != 0:\n raise ValueError('as_index=False only valid for axis=0')\n\n self.as_index = as_index\n self.keys = keys\n self.sort = sort\n self.group_keys = group_keys\n self.squeeze = squeeze\n self.observed = observed\n self.mutated = kwargs.pop('mutated', False)\n\n if grouper is None:\n grouper, exclusions, obj = _get_grouper(obj, keys,\n axis=axis,\n level=level,\n sort=sort,\n observed=observed,\n mutated=self.mutated)\n\n self.obj = obj\n self.axis = obj._get_axis_number(axis)\n self.grouper = grouper\n self.exclusions = set(exclusions) if exclusions else set()\n\n # we accept no other args\n validate_kwargs('group', kwargs, {})\n\n def __len__(self):\n return len(self.groups)\n\n def __unicode__(self):\n # TODO: Better unicode/repr for GroupBy object\n return object.__repr__(self)\n\n def _assure_grouper(self):\n \"\"\"\n we create the grouper on instantiation\n sub-classes may have a different policy\n \"\"\"\n pass\n\n @property\n def groups(self):\n \"\"\" dict {group name -> group labels} \"\"\"\n self._assure_grouper()\n return self.grouper.groups\n\n @property\n def ngroups(self):\n self._assure_grouper()\n return self.grouper.ngroups\n\n @property\n def indices(self):\n \"\"\" dict {group name -> group indices} \"\"\"\n self._assure_grouper()\n return self.grouper.indices\n\n def _get_indices(self, names):\n \"\"\"\n safe get multiple indices, translate keys for\n datelike to underlying repr\n \"\"\"\n\n def get_converter(s):\n # possibly convert to the actual key types\n # in the indices, could be a Timestamp or a np.datetime64\n if isinstance(s, (Timestamp, datetime.datetime)):\n return lambda key: Timestamp(key)\n elif isinstance(s, np.datetime64):\n return lambda key: Timestamp(key).asm8\n else:\n return lambda key: key\n\n if len(names) == 0:\n return []\n\n if len(self.indices) > 0:\n index_sample = next(iter(self.indices))\n else:\n index_sample = None # Dummy sample\n\n name_sample = names[0]\n if isinstance(index_sample, tuple):\n if not isinstance(name_sample, tuple):\n msg = (\"must supply a tuple to get_group with multiple\"\n \" grouping keys\")\n raise ValueError(msg)\n if not len(name_sample) == len(index_sample):\n try:\n # If the original grouper was a tuple\n return [self.indices[name] for name in names]\n except KeyError:\n # turns out it wasn't a tuple\n msg = (\"must supply a a same-length tuple to get_group\"\n \" with multiple grouping keys\")\n raise ValueError(msg)\n\n converters = [get_converter(s) for s in index_sample]\n names = [tuple(f(n) for f, n in zip(converters, name))\n for name in names]\n\n else:\n converter = get_converter(index_sample)\n names = [converter(name) for name in names]\n\n return [self.indices.get(name, []) for name in names]\n\n def _get_index(self, name):\n \"\"\" safe get index, translate keys for datelike to underlying repr \"\"\"\n return self._get_indices([name])[0]\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, Series):\n if self._group_selection is not None:\n return self.obj[self._group_selection]\n return self.obj\n else:\n return self.obj[self._selection]\n\n def _reset_group_selection(self):\n \"\"\"\n Clear group based selection. Used for methods needing to return info on\n each group regardless of whether a group selection was previously set.\n \"\"\"\n if self._group_selection is not None:\n self._group_selection = None\n # GH12839 clear cached selection too when changing group selection\n self._reset_cache('_selected_obj')\n\n def _set_group_selection(self):\n \"\"\"\n Create group based selection. Used when selection is not passed\n directly but instead via a grouper.\n \"\"\"\n grp = self.grouper\n if self.as_index and getattr(grp, 'groupings', None) is not None and \\\n self.obj.ndim > 1:\n ax = self.obj._info_axis\n groupers = [g.name for g in grp.groupings\n if g.level is None and g.in_axis]\n\n if len(groupers):\n self._group_selection = ax.difference(Index(groupers)).tolist()\n # GH12839 clear selected obj cache when group selection changes\n self._reset_cache('_selected_obj')\n\n def _set_result_index_ordered(self, result):\n # set the result index on the passed values object and\n # return the new object, xref 8046\n\n # the values/counts are repeated according to the group index\n # shortcut if we have an already ordered grouper\n if not self.grouper.is_monotonic:\n index = Index(np.concatenate(\n self._get_indices(self.grouper.result_index)))\n result.set_axis(index, axis=self.axis, inplace=True)\n result = result.sort_index(axis=self.axis)\n\n result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,\n inplace=True)\n return result\n\n def _dir_additions(self):\n return self.obj._dir_additions() | self._apply_whitelist\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n if hasattr(self.obj, attr):\n return self._make_wrapper(attr)\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n @Substitution(klass='GroupBy',\n versionadded='.. versionadded:: 0.21.0',\n examples=\"\"\"\\\n>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})\n>>> df\n A B\n0 a 1\n1 b 2\n2 a 3\n3 b 4\n\nTo get the difference between each groups maximum and minimum value in one\npass, you can do\n\n>>> df.groupby('A').pipe(lambda x: x.max() - x.min())\n B\nA\na 2\nb 2\"\"\")\n @Appender(_pipe_template)\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n plot = property(GroupByPlot)\n\n def _make_wrapper(self, name):\n if name not in self._apply_whitelist:\n is_callable = callable(getattr(self._selected_obj, name, None))\n kind = ' callable ' if is_callable else ' '\n msg = (\"Cannot access{0}attribute {1!r} of {2!r} objects, try \"\n \"using the 'apply' method\".format(kind, name,\n type(self).__name__))\n raise AttributeError(msg)\n\n # need to setup the selection\n # as are not passed directly but in the grouper\n self._set_group_selection()\n\n f = getattr(self._selected_obj, name)\n if not isinstance(f, types.MethodType):\n return self.apply(lambda self: getattr(self, name))\n\n f = getattr(type(self._selected_obj), name)\n\n def wrapper(*args, **kwargs):\n # a little trickery for aggregation functions that need an axis\n # argument\n kwargs_with_axis = kwargs.copy()\n if 'axis' not in kwargs_with_axis or \\\n kwargs_with_axis['axis'] is None:\n kwargs_with_axis['axis'] = self.axis\n\n def curried_with_axis(x):\n return f(x, *args, **kwargs_with_axis)\n\n def curried(x):\n return f(x, *args, **kwargs)\n\n # preserve the name so we can detect it when calling plot methods,\n # to avoid duplicates\n curried.__name__ = curried_with_axis.__name__ = name\n\n # special case otherwise extra plots are created when catching the\n # exception below\n if name in _plotting_methods:\n return self.apply(curried)\n\n try:\n return self.apply(curried_with_axis)\n except Exception:\n try:\n return self.apply(curried)\n except Exception:\n\n # related to : GH3688\n # try item-by-item\n # this can be called recursively, so need to raise\n # ValueError\n # if we don't have this method to indicated to aggregate to\n # mark this column as an error\n try:\n return self._aggregate_item_by_item(name,\n *args, **kwargs)\n except (AttributeError):\n raise ValueError\n\n return wrapper\n\n def get_group(self, name, obj=None):\n \"\"\"\n Constructs NDFrame from group with provided name\n\n Parameters\n ----------\n name : object\n the name of the group to get as a DataFrame\n obj : NDFrame, default None\n the NDFrame to take the DataFrame out of. If\n it is None, the object groupby was called on will\n be used\n\n Returns\n -------\n group : type of obj\n \"\"\"\n if obj is None:\n obj = self._selected_obj\n\n inds = self._get_index(name)\n if not len(inds):\n raise KeyError(name)\n\n return obj._take(inds, axis=self.axis)\n\n def __iter__(self):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n return self.grouper.get_iterator(self.obj, axis=self.axis)\n\n @Appender(_apply_docs['template']\n .format(input=\"dataframe\",\n examples=_apply_docs['dataframe_examples']))\n def apply(self, func, *args, **kwargs):\n\n func = self._is_builtin_func(func)\n\n # this is needed so we don't try and wrap strings. If we could\n # resolve functions to their callable functions prior, this\n # wouldn't be needed\n if args or kwargs:\n if callable(func):\n\n @wraps(func)\n def f(g):\n with np.errstate(all='ignore'):\n return func(g, *args, **kwargs)\n else:\n raise ValueError('func must be a callable if args or '\n 'kwargs are supplied')\n else:\n f = func\n\n # ignore SettingWithCopy here in case the user mutates\n with option_context('mode.chained_assignment', None):\n return self._python_apply_general(f)\n\n def _python_apply_general(self, f):\n keys, values, mutated = self.grouper.apply(f, self._selected_obj,\n self.axis)\n\n return self._wrap_applied_output(\n keys,\n values,\n not_indexed_same=mutated or self.mutated)\n\n def _iterate_slices(self):\n yield self._selection_name, self._selected_obj\n\n def transform(self, func, *args, **kwargs):\n raise com.AbstractMethodError(self)\n\n def _cumcount_array(self, ascending=True):\n \"\"\"\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Notes\n -----\n this is currently implementing sort=False\n (though the default is sort=True) for groupby in general\n \"\"\"\n ids, _, ngroups = self.grouper.group_info\n sorter = get_group_index_sorter(ids, ngroups)\n ids, count = ids[sorter], len(ids)\n\n if count == 0:\n return np.empty(0, dtype=np.int64)\n\n run = np.r_[True, ids[:-1] != ids[1:]]\n rep = np.diff(np.r_[np.nonzero(run)[0], count])\n out = (~run).cumsum()\n\n if ascending:\n out -= np.repeat(out[run], rep)\n else:\n out = np.repeat(out[np.r_[run[1:], True]], rep) - out\n\n rev = np.empty(count, dtype=np.intp)\n rev[sorter] = np.arange(count, dtype=np.intp)\n return out[rev].astype(np.int64, copy=False)\n\n def _try_cast(self, result, obj, numeric_only=False):\n \"\"\"\n try to cast the result to our obj original type,\n we may have roundtripped thru object in the mean-time\n\n if numeric_only is True, then only try to cast numerics\n and not datetimelikes\n\n \"\"\"\n if obj.ndim > 1:\n dtype = obj.values.dtype\n else:\n dtype = obj.dtype\n\n if not is_scalar(result):\n if numeric_only and is_numeric_dtype(dtype) or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n def _transform_should_cast(self, func_nm):\n \"\"\"\n Parameters:\n -----------\n func_nm: str\n The name of the aggregation function being performed\n\n Returns:\n --------\n bool\n Whether transform should attempt to cast the result of aggregation\n \"\"\"\n return (self.size().fillna(0) > 0).any() and (func_nm not in\n _cython_cast_blacklist)\n\n def _cython_transform(self, how, numeric_only=True, **kwargs):\n output = collections.OrderedDict()\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.transform(obj.values, how,\n **kwargs)\n except NotImplementedError:\n continue\n except AssertionError as e:\n raise GroupByError(str(e))\n if self._transform_should_cast(how):\n output[name] = self._try_cast(result, obj)\n else:\n output[name] = result\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_transformed_output(output, names)\n\n def _cython_agg_general(self, how, alt=None, numeric_only=True,\n min_count=-1):\n output = {}\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.aggregate(obj.values, how,\n min_count=min_count)\n except AssertionError as e:\n raise GroupByError(str(e))\n output[name] = self._try_cast(result, obj)\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_aggregated_output(output, names)\n\n def _python_agg_general(self, func, *args, **kwargs):\n func = self._is_builtin_func(func)\n f = lambda x: func(x, *args, **kwargs)\n\n # iterate through \"columns\" ex exclusions to populate output dict\n output = {}\n for name, obj in self._iterate_slices():\n try:\n result, counts = self.grouper.agg_series(obj, f)\n output[name] = self._try_cast(result, obj, numeric_only=True)\n except TypeError:\n continue\n\n if len(output) == 0:\n return self._python_apply_general(f)\n\n if self.grouper._filter_empty_groups:\n\n mask = counts.ravel() > 0\n for name, result in compat.iteritems(output):\n\n # since we are masking, make sure that we have a float object\n values = result\n if is_numeric_dtype(values.dtype):\n values = _ensure_float(values)\n\n output[name] = self._try_cast(values[mask], result)\n\n return self._wrap_aggregated_output(output)\n\n def _wrap_applied_output(self, *args, **kwargs):\n raise com.AbstractMethodError(self)\n\n def _concat_objects(self, keys, values, not_indexed_same=False):\n from pandas.core.reshape.concat import concat\n\n def reset_identity(values):\n # reset the identities of the components\n # of the values to prevent aliasing\n for v in com._not_none(*values):\n ax = v._get_axis(self.axis)\n ax._reset_identity()\n return values\n\n if not not_indexed_same:\n result = concat(values, axis=self.axis)\n ax = self._selected_obj._get_axis(self.axis)\n\n if isinstance(result, Series):\n result = result.reindex(ax)\n else:\n\n # this is a very unfortunate situation\n # we have a multi-index that is NOT lexsorted\n # and we have a result which is duplicated\n # we can't reindex, so we resort to this\n # GH 14776\n if isinstance(ax, MultiIndex) and not ax.is_unique:\n indexer = algorithms.unique1d(\n result.index.get_indexer_for(ax.values))\n result = result.take(indexer, axis=self.axis)\n else:\n result = result.reindex(ax, axis=self.axis)\n\n elif self.group_keys:\n\n values = reset_identity(values)\n if self.as_index:\n\n # possible MI return case\n group_keys = keys\n group_levels = self.grouper.levels\n group_names = self.grouper.names\n\n result = concat(values, axis=self.axis, keys=group_keys,\n levels=group_levels, names=group_names,\n sort=False)\n else:\n\n # GH5610, returns a MI, with the first level being a\n # range index\n keys = list(range(len(values)))\n result = concat(values, axis=self.axis, keys=keys)\n else:\n values = reset_identity(values)\n result = concat(values, axis=self.axis)\n\n if (isinstance(result, Series) and\n getattr(self, '_selection_name', None) is not None):\n\n result.name = self._selection_name\n\n return result\n\n def _apply_filter(self, indices, dropna):\n if len(indices) == 0:\n indices = np.array([], dtype='int64')\n else:\n indices = np.sort(np.concatenate(indices))\n if dropna:\n filtered = self._selected_obj.take(indices, axis=self.axis)\n else:\n mask = np.empty(len(self._selected_obj.index), dtype=bool)\n mask.fill(False)\n mask[indices.astype(int)] = True\n # mask fails to broadcast when passed to where; broadcast manually.\n mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T\n filtered = self._selected_obj.where(mask) # Fill with NaNs.\n return filtered\n\n\nclass GroupBy(_GroupBy):\n\n \"\"\"\n Class for grouping and aggregating relational data. See aggregate,\n transform, and apply functions on this object.\n\n It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:\n\n ::\n\n grouped = groupby(obj, ...)\n\n Parameters\n ----------\n obj : pandas object\n axis : int, default 0\n level : int, default None\n Level of MultiIndex\n groupings : list of Grouping objects\n Most users should ignore this\n exclusions : array-like, optional\n List of columns to exclude\n name : string\n Most users should ignore this\n\n Notes\n -----\n After grouping, see aggregate, apply, and transform functions. Here are\n some other brief notes about usage. When grouping by multiple groups, the\n result index will be a MultiIndex (hierarchical) by default.\n\n Iteration produces (key, group) tuples, i.e. chunking the data by group. So\n you can write code like:\n\n ::\n\n grouped = obj.groupby(keys, axis=axis)\n for key, group in grouped:\n # do something with the data\n\n Function calls on GroupBy, if not specially implemented, \"dispatch\" to the\n grouped data. So if you group a DataFrame and wish to invoke the std()\n method on each group, you can simply do:\n\n ::\n\n df.groupby(mapper).std()\n\n rather than\n\n ::\n\n df.groupby(mapper).aggregate(np.std)\n\n You can pass arguments to these \"wrapped\" functions, too.\n\n See the online documentation for full exposition on these topics and much\n more\n\n Returns\n -------\n **Attributes**\n groups : dict\n {group name -> group labels}\n len(grouped) : int\n Number of groups\n \"\"\"\n _apply_whitelist = _common_apply_whitelist\n\n def _bool_agg(self, val_test, skipna):\n \"\"\"Shared func to call any / all Cython GroupBy implementations\"\"\"\n\n def objs_to_bool(vals):\n try:\n vals = vals.astype(np.bool)\n except ValueError: # for objects\n vals = np.array([bool(x) for x in vals])\n\n return vals.view(np.uint8)\n\n def result_to_bool(result):\n return result.astype(np.bool, copy=False)\n\n return self._get_cythonized_result('group_any_all', self.grouper,\n aggregate=True,\n cython_dtype=np.uint8,\n needs_values=True,\n needs_mask=True,\n pre_processing=objs_to_bool,\n post_processing=result_to_bool,\n val_test=val_test, skipna=skipna)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def any(self, skipna=True):\n \"\"\"\n Returns True if any value in the group is truthful, else False\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('any', skipna)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def all(self, skipna=True):\n \"\"\"Returns True if all values in the group are truthful, else False\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('all', skipna)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def count(self):\n \"\"\"Compute count of group, excluding missing values\"\"\"\n\n # defined here for API doc\n raise NotImplementedError\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def mean(self, *args, **kwargs):\n \"\"\"\n Compute mean of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])\n try:\n return self._cython_agg_general('mean', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n self._set_group_selection()\n f = lambda x: x.mean(axis=self.axis, **kwargs)\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def median(self, **kwargs):\n \"\"\"\n Compute median of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n try:\n return self._cython_agg_general('median', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n\n self._set_group_selection()\n\n def f(x):\n if isinstance(x, np.ndarray):\n x = Series(x)\n return x.median(axis=self.axis, **kwargs)\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def std(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute standard deviation of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n # TODO: implement at Cython level?\n nv.validate_groupby_func('std', args, kwargs)\n return np.sqrt(self.var(ddof=ddof, **kwargs))\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def var(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute variance of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n nv.validate_groupby_func('var', args, kwargs)\n if ddof == 1:\n return self._cython_agg_general('var', **kwargs)\n else:\n self._set_group_selection()\n f = lambda x: x.var(ddof=ddof, **kwargs)\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def sem(self, ddof=1):\n \"\"\"\n Compute standard error of the mean of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n return self.std(ddof=ddof) / np.sqrt(self.count())\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def size(self):\n \"\"\"Compute group sizes\"\"\"\n result = self.grouper.size()\n\n if isinstance(self.obj, Series):\n result.name = getattr(self.obj, 'name', None)\n return result\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\" add numeric operations to the GroupBy generically \"\"\"\n\n def groupby_function(name, alias, npfunc,\n numeric_only=True, _convert=False,\n min_count=-1):\n\n _local_template = \"Compute %(f)s of group values\"\n\n @Substitution(name='groupby', f=name)\n @Appender(_doc_template)\n @Appender(_local_template)\n def f(self, **kwargs):\n if 'numeric_only' not in kwargs:\n kwargs['numeric_only'] = numeric_only\n if 'min_count' not in kwargs:\n kwargs['min_count'] = min_count\n self._set_group_selection()\n try:\n return self._cython_agg_general(\n alias, alt=npfunc, **kwargs)\n except AssertionError as e:\n raise SpecificationError(str(e))\n except Exception:\n result = self.aggregate(\n lambda x: npfunc(x, axis=self.axis))\n if _convert:\n result = result._convert(datetime=True)\n return result\n\n set_function_name(f, name, cls)\n\n return f\n\n def first_compat(x, axis=0):\n\n def first(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[0]\n\n if isinstance(x, DataFrame):\n return x.apply(first, axis=axis)\n else:\n return first(x)\n\n def last_compat(x, axis=0):\n\n def last(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[-1]\n\n if isinstance(x, DataFrame):\n return x.apply(last, axis=axis)\n else:\n return last(x)\n\n cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)\n cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)\n cls.min = groupby_function('min', 'min', np.min, numeric_only=False)\n cls.max = groupby_function('max', 'max', np.max, numeric_only=False)\n cls.first = groupby_function('first', 'first', first_compat,\n numeric_only=False)\n cls.last = groupby_function('last', 'last', last_compat,\n numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def ohlc(self):\n \"\"\"\n Compute sum of values, excluding missing values\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n\n return self._apply_to_column_groupbys(\n lambda x: x._cython_agg_general('ohlc'))\n\n @Appender(DataFrame.describe.__doc__)\n def describe(self, **kwargs):\n self._set_group_selection()\n result = self.apply(lambda x: x.describe(**kwargs))\n if self.axis == 1:\n return result.T\n return result.unstack()\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def resample(self, rule, *args, **kwargs):\n \"\"\"\n Provide resampling when using a TimeGrouper\n Return a new grouper with our resampler appended\n \"\"\"\n from pandas.core.resample import get_resampler_for_grouping\n return get_resampler_for_grouping(self, rule, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def rolling(self, *args, **kwargs):\n \"\"\"\n Return a rolling grouper, providing rolling\n functionality per group\n\n \"\"\"\n from pandas.core.window import RollingGroupby\n return RollingGroupby(self, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def expanding(self, *args, **kwargs):\n \"\"\"\n Return an expanding grouper, providing expanding\n functionality per group\n\n \"\"\"\n from pandas.core.window import ExpandingGroupby\n return ExpandingGroupby(self, *args, **kwargs)\n\n def _fill(self, direction, limit=None):\n \"\"\"Shared function for `pad` and `backfill` to call Cython method\n\n Parameters\n ----------\n direction : {'ffill', 'bfill'}\n Direction passed to underlying Cython function. `bfill` will cause\n values to be filled backwards. `ffill` and any other values will\n default to a forward fill\n limit : int, default None\n Maximum number of consecutive values to fill. If `None`, this\n method will convert to -1 prior to passing to Cython\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n\n See Also\n --------\n pad\n backfill\n \"\"\"\n # Need int value for Cython\n if limit is None:\n limit = -1\n\n return self._get_cythonized_result('group_fillna_indexer',\n self.grouper, needs_mask=True,\n cython_dtype=np.int64,\n result_is_index=True,\n direction=direction, limit=limit)\n\n @Substitution(name='groupby')\n def pad(self, limit=None):\n \"\"\"\n Forward fill the values\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.pad\n DataFrame.pad\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('ffill', limit=limit)\n ffill = pad\n\n @Substitution(name='groupby')\n def backfill(self, limit=None):\n \"\"\"\n Backward fill the values\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.backfill\n DataFrame.backfill\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('bfill', limit=limit)\n bfill = backfill\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def nth(self, n, dropna=None):\n \"\"\"\n Take the nth row from each group if n is an int, or a subset of rows\n if n is a list of ints.\n\n If dropna, will take the nth non-null row, dropna is either\n Truthy (if a Series) or 'all', 'any' (if a DataFrame);\n this is equivalent to calling dropna(how=dropna) before the\n groupby.\n\n Parameters\n ----------\n n : int or list of ints\n a single nth value for the row or a list of nth values\n dropna : None or str, optional\n apply the specified dropna operation before counting which row is\n the nth row. Needs to be None, 'any' or 'all'\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])\n >>> g = df.groupby('A')\n >>> g.nth(0)\n B\n A\n 1 NaN\n 2 3.0\n >>> g.nth(1)\n B\n A\n 1 2.0\n 2 5.0\n >>> g.nth(-1)\n B\n A\n 1 4.0\n 2 5.0\n >>> g.nth([0, 1])\n B\n A\n 1 NaN\n 1 2.0\n 2 3.0\n 2 5.0\n\n Specifying ``dropna`` allows count ignoring NaN\n\n >>> g.nth(0, dropna='any')\n B\n A\n 1 2.0\n 2 3.0\n\n NaNs denote group exhausted when using dropna\n\n >>> g.nth(3, dropna='any')\n B\n A\n 1 NaN\n 2 NaN\n\n Specifying ``as_index=False`` in ``groupby`` keeps the original index.\n\n >>> df.groupby('A', as_index=False).nth(1)\n A B\n 1 1 2.0\n 4 2 5.0\n \"\"\"\n\n if isinstance(n, int):\n nth_values = [n]\n elif isinstance(n, (set, list, tuple)):\n nth_values = list(set(n))\n if dropna is not None:\n raise ValueError(\n \"dropna option with a list of nth values is not supported\")\n else:\n raise TypeError(\"n needs to be an int or a list/set/tuple of ints\")\n\n nth_values = np.array(nth_values, dtype=np.intp)\n self._set_group_selection()\n\n if not dropna:\n mask = np.in1d(self._cumcount_array(), nth_values) | \\\n np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)\n\n out = self._selected_obj[mask]\n if not self.as_index:\n return out\n\n ids, _, _ = self.grouper.group_info\n out.index = self.grouper.result_index[ids[mask]]\n\n return out.sort_index() if self.sort else out\n\n if dropna not in ['any', 'all']:\n if isinstance(self._selected_obj, Series) and dropna is True:\n warnings.warn(\"the dropna={dropna} keyword is deprecated,\"\n \"use dropna='all' instead. \"\n \"For a Series groupby, dropna must be \"\n \"either None, 'any' or 'all'.\".format(\n dropna=dropna),\n FutureWarning,\n stacklevel=2)\n dropna = 'all'\n else:\n # Note: when agg-ing picker doesn't raise this,\n # just returns NaN\n raise ValueError(\"For a DataFrame groupby, dropna must be \"\n \"either None, 'any' or 'all', \"\n \"(was passed %s).\" % (dropna),)\n\n # old behaviour, but with all and any support for DataFrames.\n # modified in GH 7559 to have better perf\n max_len = n if n >= 0 else - 1 - n\n dropped = self.obj.dropna(how=dropna, axis=self.axis)\n\n # get a new grouper for our dropped obj\n if self.keys is None and self.level is None:\n\n # we don't have the grouper info available\n # (e.g. we have selected out\n # a column that is not in the current object)\n axis = self.grouper.axis\n grouper = axis[axis.isin(dropped.index)]\n\n else:\n\n # create a grouper with the original parameters, but on the dropped\n # object\n grouper, _, _ = _get_grouper(dropped, key=self.keys,\n axis=self.axis, level=self.level,\n sort=self.sort,\n mutated=self.mutated)\n\n grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)\n sizes, result = grb.size(), grb.nth(n)\n mask = (sizes < max_len).values\n\n # set the results which don't meet the criteria\n if len(result) and mask.any():\n result.loc[mask] = np.nan\n\n # reset/reindex to the original groups\n if len(self.obj) == len(dropped) or \\\n len(result) == len(self.grouper.result_index):\n result.index = self.grouper.result_index\n else:\n result = result.reindex(self.grouper.result_index)\n\n return result\n\n @Substitution(name='groupby')\n def ngroup(self, ascending=True):\n \"\"\"\n Number each group from 0 to the number of groups - 1.\n\n This is the enumerative complement of cumcount. Note that the\n numbers given to the groups match the order in which the groups\n would be seen when iterating over the groupby object, not the\n order they are first observed.\n\n .. versionadded:: 0.20.2\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from number of group - 1 to 0.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({\"A\": list(\"aaabba\")})\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').ngroup()\n 0 0\n 1 0\n 2 0\n 3 1\n 4 1\n 5 0\n dtype: int64\n >>> df.groupby('A').ngroup(ascending=False)\n 0 1\n 1 1\n 2 1\n 3 0\n 4 0\n 5 1\n dtype: int64\n >>> df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()\n 0 0\n 1 0\n 2 1\n 3 3\n 4 2\n 5 0\n dtype: int64\n\n See also\n --------\n .cumcount : Number the rows in each group.\n \"\"\"\n\n self._set_group_selection()\n\n index = self._selected_obj.index\n result = Series(self.grouper.group_info[0], index)\n if not ascending:\n result = self.ngroups - 1 - result\n return result\n\n @Substitution(name='groupby')\n def cumcount(self, ascending=True):\n \"\"\"\n Number each item in each group from 0 to the length of that group - 1.\n\n Essentially this is equivalent to\n\n >>> self.apply(lambda x: Series(np.arange(len(x)), x.index))\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],\n ... columns=['A'])\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').cumcount()\n 0 0\n 1 1\n 2 2\n 3 0\n 4 1\n 5 3\n dtype: int64\n >>> df.groupby('A').cumcount(ascending=False)\n 0 3\n 1 2\n 2 1\n 3 1\n 4 0\n 5 0\n dtype: int64\n\n See also\n --------\n .ngroup : Number the groups themselves.\n \"\"\"\n\n self._set_group_selection()\n\n index = self._selected_obj.index\n cumcounts = self._cumcount_array(ascending=ascending)\n return Series(cumcounts, index)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def rank(self, method='average', ascending=True, na_option='keep',\n pct=False, axis=0):\n \"\"\"\n Provides the rank of values within each group.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n pct : boolean, default False\n Compute percentage rank of data within each group\n axis : int, default 0\n The axis of the object over which to compute the rank.\n\n Returns\n -----\n DataFrame with ranking of values within each group\n \"\"\"\n return self._cython_transform('rank', numeric_only=False,\n ties_method=method, ascending=ascending,\n na_option=na_option, pct=pct, axis=axis)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cumprod(self, axis=0, *args, **kwargs):\n \"\"\"Cumulative product for each group\"\"\"\n nv.validate_groupby_func('cumprod', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))\n\n return self._cython_transform('cumprod', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"Cumulative sum for each group\"\"\"\n nv.validate_groupby_func('cumsum', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))\n\n return self._cython_transform('cumsum', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cummin(self, axis=0, **kwargs):\n \"\"\"Cumulative min for each group\"\"\"\n if axis != 0:\n return self.apply(lambda x: np.minimum.accumulate(x, axis))\n\n return self._cython_transform('cummin', numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cummax(self, axis=0, **kwargs):\n \"\"\"Cumulative max for each group\"\"\"\n if axis != 0:\n return self.apply(lambda x: np.maximum.accumulate(x, axis))\n\n return self._cython_transform('cummax', numeric_only=False)\n\n def _get_cythonized_result(self, how, grouper, aggregate=False,\n cython_dtype=None, needs_values=False,\n needs_mask=False, needs_ngroups=False,\n result_is_index=False,\n pre_processing=None, post_processing=None,\n **kwargs):\n \"\"\"Get result for Cythonized functions\n\n Parameters\n ----------\n how : str, Cythonized function name to be called\n grouper : Grouper object containing pertinent group info\n aggregate : bool, default False\n Whether the result should be aggregated to match the number of\n groups\n cython_dtype : default None\n Type of the array that will be modified by the Cython call. If\n `None`, the type will be inferred from the values of each slice\n needs_values : bool, default False\n Whether the values should be a part of the Cython call\n signature\n needs_mask : bool, default False\n Whether boolean mask needs to be part of the Cython call\n signature\n needs_ngroups : bool, default False\n Whether number of groups is part of the Cython call signature\n result_is_index : bool, default False\n Whether the result of the Cython operation is an index of\n values to be retrieved, instead of the actual values themselves\n pre_processing : function, default None\n Function to be applied to `values` prior to passing to Cython\n Raises if `needs_values` is False\n post_processing : function, default None\n Function to be applied to result of Cython function\n **kwargs : dict\n Extra arguments to be passed back to Cython funcs\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n \"\"\"\n if result_is_index and aggregate:\n raise ValueError(\"'result_is_index' and 'aggregate' cannot both \"\n \"be True!\")\n if post_processing:\n if not callable(pre_processing):\n raise ValueError(\"'post_processing' must be a callable!\")\n if pre_processing:\n if not callable(pre_processing):\n raise ValueError(\"'pre_processing' must be a callable!\")\n if not needs_values:\n raise ValueError(\"Cannot use 'pre_processing' without \"\n \"specifying 'needs_values'!\")\n\n labels, _, ngroups = grouper.group_info\n output = collections.OrderedDict()\n base_func = getattr(libgroupby, how)\n\n for name, obj in self._iterate_slices():\n if aggregate:\n result_sz = ngroups\n else:\n result_sz = len(obj.values)\n\n if not cython_dtype:\n cython_dtype = obj.values.dtype\n\n result = np.zeros(result_sz, dtype=cython_dtype)\n func = partial(base_func, result, labels)\n if needs_values:\n vals = obj.values\n if pre_processing:\n vals = pre_processing(vals)\n func = partial(func, vals)\n\n if needs_mask:\n mask = isnull(obj.values).view(np.uint8)\n func = partial(func, mask)\n\n if needs_ngroups:\n func = partial(func, ngroups)\n\n func(**kwargs) # Call func to modify indexer values in place\n\n if result_is_index:\n result = algorithms.take_nd(obj.values, result)\n\n if post_processing:\n result = post_processing(result)\n\n output[name] = result\n\n if aggregate:\n return self._wrap_aggregated_output(output)\n else:\n return self._wrap_transformed_output(output)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def shift(self, periods=1, freq=None, axis=0):\n \"\"\"\n Shift each group by periods observations\n\n Parameters\n ----------\n periods : integer, default 1\n number of periods to shift\n freq : frequency string\n axis : axis to shift, default 0\n \"\"\"\n\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.shift(periods, freq, axis))\n\n return self._get_cythonized_result('group_shift_indexer',\n self.grouper, cython_dtype=np.int64,\n needs_ngroups=True,\n result_is_index=True,\n periods=periods)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,\n axis=0):\n \"\"\"Calcuate pct_change of each value to previous entry in group\"\"\"\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.pct_change(periods=periods,\n fill_method=fill_method,\n limit=limit, freq=freq,\n axis=axis))\n\n filled = getattr(self, fill_method)(limit=limit).drop(\n self.grouper.names, axis=1)\n shifted = filled.shift(periods=periods, freq=freq)\n\n return (filled / shifted) - 1\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def head(self, n=5):\n \"\"\"\n Returns first n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.head(n))``,\n except ignores as_index flag.\n\n Examples\n --------\n\n >>> df = DataFrame([[1, 2], [1, 4], [5, 6]],\n columns=['A', 'B'])\n >>> df.groupby('A', as_index=False).head(1)\n A B\n 0 1 2\n 2 5 6\n >>> df.groupby('A').head(1)\n A B\n 0 1 2\n 2 5 6\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array() < n\n return self._selected_obj[mask]\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def tail(self, n=5):\n \"\"\"\n Returns last n rows of each group\n\n Essentially equivalent to ``.apply(lambda x: x.tail(n))``,\n except ignores as_index flag.\n\n Examples\n --------\n\n >>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],\n columns=['A', 'B'])\n >>> df.groupby('A').tail(1)\n A B\n 1 a 2\n 3 b 2\n >>> df.groupby('A').head(1)\n A B\n 0 a 1\n 2 b 1\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array(ascending=False) < n\n return self._selected_obj[mask]\n\n\nGroupBy._add_numeric_operations()\n\n\n@Appender(GroupBy.__doc__)\ndef groupby(obj, by, **kwds):\n if isinstance(obj, Series):\n klass = SeriesGroupBy\n elif isinstance(obj, DataFrame):\n klass = DataFrameGroupBy\n else: # pragma: no cover\n raise TypeError('invalid type: %s' % type(obj))\n\n return klass(obj, by, **kwds)\n\n\ndef _get_axes(group):\n if isinstance(group, Series):\n return [group.index]\n else:\n return group.axes\n\n\ndef _is_indexed_like(obj, axes):\n if isinstance(obj, Series):\n if len(axes) > 1:\n return False\n return obj.index.equals(axes[0])\n elif isinstance(obj, DataFrame):\n return obj.index.equals(axes[0])\n\n return False\n\n\nclass BaseGrouper(object):\n \"\"\"\n This is an internal Grouper class, which actually holds\n the generated groups\n\n Parameters\n ----------\n axis : int\n the axis to group\n groupings : array of grouping\n all the grouping instances to handle in this grouper\n for example for grouper list to groupby, need to pass the list\n sort : boolean, default True\n whether this grouper will give sorted result or not\n group_keys : boolean, default True\n mutated : boolean, default False\n indexer : intp array, optional\n the indexer created by Grouper\n some groupers (TimeGrouper) will sort its axis and its\n group_info is also sorted, so need the indexer to reorder\n\n \"\"\"\n\n def __init__(self, axis, groupings, sort=True, group_keys=True,\n mutated=False, indexer=None):\n self._filter_empty_groups = self.compressed = len(groupings) != 1\n self.axis = axis\n self.groupings = groupings\n self.sort = sort\n self.group_keys = group_keys\n self.mutated = mutated\n self.indexer = indexer\n\n @property\n def shape(self):\n return tuple(ping.ngroups for ping in self.groupings)\n\n def __iter__(self):\n return iter(self.indices)\n\n @property\n def nkeys(self):\n return len(self.groupings)\n\n def get_iterator(self, data, axis=0):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n splitter = self._get_splitter(data, axis=axis)\n keys = self._get_group_keys()\n for key, (i, group) in zip(keys, splitter):\n yield key, group\n\n def _get_splitter(self, data, axis=0):\n comp_ids, _, ngroups = self.group_info\n return get_splitter(data, comp_ids, ngroups, axis=axis)\n\n def _get_group_keys(self):\n if len(self.groupings) == 1:\n return self.levels[0]\n else:\n comp_ids, _, ngroups = self.group_info\n\n # provide \"flattened\" iterator for multi-group setting\n return get_flattened_iterator(comp_ids,\n ngroups,\n self.levels,\n self.labels)\n\n def apply(self, f, data, axis=0):\n mutated = self.mutated\n splitter = self._get_splitter(data, axis=axis)\n group_keys = self._get_group_keys()\n\n # oh boy\n f_name = com._get_callable_name(f)\n if (f_name not in _plotting_methods and\n hasattr(splitter, 'fast_apply') and axis == 0):\n try:\n values, mutated = splitter.fast_apply(f, group_keys)\n return group_keys, values, mutated\n except reduction.InvalidApply:\n # we detect a mutation of some kind\n # so take slow path\n pass\n except Exception:\n # raise this error to the caller\n pass\n\n result_values = []\n for key, (i, group) in zip(group_keys, splitter):\n object.__setattr__(group, 'name', key)\n\n # group might be modified\n group_axes = _get_axes(group)\n res = f(group)\n if not _is_indexed_like(res, group_axes):\n mutated = True\n result_values.append(res)\n\n return group_keys, result_values, mutated\n\n @cache_readonly\n def indices(self):\n \"\"\" dict {group name -> group indices} \"\"\"\n if len(self.groupings) == 1:\n return self.groupings[0].indices\n else:\n label_list = [ping.labels for ping in self.groupings]\n keys = [com._values_from_object(ping.group_index)\n for ping in self.groupings]\n return get_indexer_dict(label_list, keys)\n\n @property\n def labels(self):\n return [ping.labels for ping in self.groupings]\n\n @property\n def levels(self):\n return [ping.group_index for ping in self.groupings]\n\n @property\n def names(self):\n return [ping.name for ping in self.groupings]\n\n def size(self):\n \"\"\"\n Compute group sizes\n\n \"\"\"\n ids, _, ngroup = self.group_info\n ids = _ensure_platform_int(ids)\n if ngroup:\n out = np.bincount(ids[ids != -1], minlength=ngroup)\n else:\n out = ids\n return Series(out,\n index=self.result_index,\n dtype='int64')\n\n @cache_readonly\n def groups(self):\n \"\"\" dict {group name -> group labels} \"\"\"\n if len(self.groupings) == 1:\n return self.groupings[0].groups\n else:\n to_groupby = lzip(*(ping.grouper for ping in self.groupings))\n to_groupby = Index(to_groupby)\n return self.axis.groupby(to_groupby)\n\n @cache_readonly\n def is_monotonic(self):\n # return if my group orderings are monotonic\n return Index(self.group_info[0]).is_monotonic\n\n @cache_readonly\n def group_info(self):\n comp_ids, obs_group_ids = self._get_compressed_labels()\n\n ngroups = len(obs_group_ids)\n comp_ids = _ensure_int64(comp_ids)\n return comp_ids, obs_group_ids, ngroups\n\n @cache_readonly\n def label_info(self):\n # return the labels of items in original grouped axis\n labels, _, _ = self.group_info\n if self.indexer is not None:\n sorter = np.lexsort((labels, self.indexer))\n labels = labels[sorter]\n return labels\n\n def _get_compressed_labels(self):\n all_labels = [ping.labels for ping in self.groupings]\n if len(all_labels) > 1:\n group_index = get_group_index(all_labels, self.shape,\n sort=True, xnull=True)\n return compress_group_index(group_index, sort=self.sort)\n\n ping = self.groupings[0]\n return ping.labels, np.arange(len(ping.group_index))\n\n @cache_readonly\n def ngroups(self):\n return len(self.result_index)\n\n @property\n def recons_labels(self):\n comp_ids, obs_ids, _ = self.group_info\n labels = (ping.labels for ping in self.groupings)\n return decons_obs_group_ids(\n comp_ids, obs_ids, self.shape, labels, xnull=True)\n\n @cache_readonly\n def result_index(self):\n if not self.compressed and len(self.groupings) == 1:\n return self.groupings[0].result_index.rename(self.names[0])\n\n labels = self.recons_labels\n levels = [ping.result_index for ping in self.groupings]\n result = MultiIndex(levels=levels,\n labels=labels,\n verify_integrity=False,\n names=self.names)\n return result\n\n def get_group_levels(self):\n if not self.compressed and len(self.groupings) == 1:\n return [self.groupings[0].result_index]\n\n name_list = []\n for ping, labels in zip(self.groupings, self.recons_labels):\n labels = _ensure_platform_int(labels)\n levels = ping.result_index.take(labels)\n\n name_list.append(levels)\n\n return name_list\n\n # ------------------------------------------------------------\n # Aggregation functions\n\n _cython_functions = {\n 'aggregate': {\n 'add': 'group_add',\n 'prod': 'group_prod',\n 'min': 'group_min',\n 'max': 'group_max',\n 'mean': 'group_mean',\n 'median': {\n 'name': 'group_median'\n },\n 'var': 'group_var',\n 'first': {\n 'name': 'group_nth',\n 'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1)\n },\n 'last': 'group_last',\n 'ohlc': 'group_ohlc',\n },\n\n 'transform': {\n 'cumprod': 'group_cumprod',\n 'cumsum': 'group_cumsum',\n 'cummin': 'group_cummin',\n 'cummax': 'group_cummax',\n 'rank': {\n 'name': 'group_rank',\n 'f': lambda func, a, b, c, d, **kwargs: func(\n a, b, c, d,\n kwargs.get('ties_method', 'average'),\n kwargs.get('ascending', True),\n kwargs.get('pct', False),\n kwargs.get('na_option', 'keep')\n )\n }\n }\n }\n\n _cython_arity = {\n 'ohlc': 4, # OHLC\n }\n\n _name_functions = {\n 'ohlc': lambda *args: ['open', 'high', 'low', 'close']\n }\n\n def _is_builtin_func(self, arg):\n \"\"\"\n if we define an builtin function for this argument, return it,\n otherwise return the arg\n \"\"\"\n return SelectionMixin._builtin_table.get(arg, arg)\n\n def _get_cython_function(self, kind, how, values, is_numeric):\n\n dtype_str = values.dtype.name\n\n def get_func(fname):\n # see if there is a fused-type version of function\n # only valid for numeric\n f = getattr(libgroupby, fname, None)\n if f is not None and is_numeric:\n return f\n\n # otherwise find dtype-specific version, falling back to object\n for dt in [dtype_str, 'object']:\n f = getattr(libgroupby, \"%s_%s\" % (fname, dtype_str), None)\n if f is not None:\n return f\n\n ftype = self._cython_functions[kind][how]\n\n if isinstance(ftype, dict):\n func = afunc = get_func(ftype['name'])\n\n # a sub-function\n f = ftype.get('f')\n if f is not None:\n\n def wrapper(*args, **kwargs):\n return f(afunc, *args, **kwargs)\n\n # need to curry our sub-function\n func = wrapper\n\n else:\n func = get_func(ftype)\n\n if func is None:\n raise NotImplementedError(\"function is not implemented for this\"\n \"dtype: [how->%s,dtype->%s]\" %\n (how, dtype_str))\n return func\n\n def _cython_operation(self, kind, values, how, axis, min_count=-1,\n **kwargs):\n assert kind in ['transform', 'aggregate']\n\n # can we do this operation with our cython functions\n # if not raise NotImplementedError\n\n # we raise NotImplemented if this is an invalid operation\n # entirely, e.g. adding datetimes\n\n # categoricals are only 1d, so we\n # are not setup for dim transforming\n if is_categorical_dtype(values):\n raise NotImplementedError(\n \"categoricals are not support in cython ops ATM\")\n elif is_datetime64_any_dtype(values):\n if how in ['add', 'prod', 'cumsum', 'cumprod']:\n raise NotImplementedError(\n \"datetime64 type does not support {} \"\n \"operations\".format(how))\n elif is_timedelta64_dtype(values):\n if how in ['prod', 'cumprod']:\n raise NotImplementedError(\n \"timedelta64 type does not support {} \"\n \"operations\".format(how))\n\n arity = self._cython_arity.get(how, 1)\n\n vdim = values.ndim\n swapped = False\n if vdim == 1:\n values = values[:, None]\n out_shape = (self.ngroups, arity)\n else:\n if axis > 0:\n swapped = True\n values = values.swapaxes(0, axis)\n if arity > 1:\n raise NotImplementedError(\"arity of more than 1 is not \"\n \"supported for the 'how' argument\")\n out_shape = (self.ngroups,) + values.shape[1:]\n\n is_datetimelike = needs_i8_conversion(values.dtype)\n is_numeric = is_numeric_dtype(values.dtype)\n\n if is_datetimelike:\n values = values.view('int64')\n is_numeric = True\n elif is_bool_dtype(values.dtype):\n values = _ensure_float64(values)\n elif is_integer_dtype(values):\n # we use iNaT for the missing value on ints\n # so pre-convert to guard this condition\n if (values == iNaT).any():\n values = _ensure_float64(values)\n else:\n values = values.astype('int64', copy=False)\n elif is_numeric and not is_complex_dtype(values):\n values = _ensure_float64(values)\n else:\n values = values.astype(object)\n\n try:\n func = self._get_cython_function(\n kind, how, values, is_numeric)\n except NotImplementedError:\n if is_numeric:\n values = _ensure_float64(values)\n func = self._get_cython_function(\n kind, how, values, is_numeric)\n else:\n raise\n\n if how == 'rank':\n out_dtype = 'float'\n else:\n if is_numeric:\n out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)\n else:\n out_dtype = 'object'\n\n labels, _, _ = self.group_info\n\n if kind == 'aggregate':\n result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),\n fill_value=np.nan)\n counts = np.zeros(self.ngroups, dtype=np.int64)\n result = self._aggregate(\n result, counts, values, labels, func, is_numeric,\n is_datetimelike, min_count)\n elif kind == 'transform':\n result = _maybe_fill(np.empty_like(values, dtype=out_dtype),\n fill_value=np.nan)\n\n # TODO: min_count\n result = self._transform(\n result, values, labels, func, is_numeric, is_datetimelike,\n **kwargs)\n\n if is_integer_dtype(result) and not is_datetimelike:\n mask = result == iNaT\n if mask.any():\n result = result.astype('float64')\n result[mask] = np.nan\n\n if kind == 'aggregate' and \\\n self._filter_empty_groups and not counts.all():\n if result.ndim == 2:\n try:\n result = lib.row_bool_subset(\n result, (counts > 0).view(np.uint8))\n except ValueError:\n result = lib.row_bool_subset_object(\n _ensure_object(result),\n (counts > 0).view(np.uint8))\n else:\n result = result[counts > 0]\n\n if vdim == 1 and arity == 1:\n result = result[:, 0]\n\n if how in self._name_functions:\n # TODO\n names = self._name_functions[how]()\n else:\n names = None\n\n if swapped:\n result = result.swapaxes(0, axis)\n\n return result, names\n\n def aggregate(self, values, how, axis=0, min_count=-1):\n return self._cython_operation('aggregate', values, how, axis,\n min_count=min_count)\n\n def transform(self, values, how, axis=0, **kwargs):\n return self._cython_operation('transform', values, how, axis, **kwargs)\n\n def _aggregate(self, result, counts, values, comp_ids, agg_func,\n is_numeric, is_datetimelike, min_count=-1):\n if values.ndim > 3:\n # punting for now\n raise NotImplementedError(\"number of dimensions is currently \"\n \"limited to 3\")\n elif values.ndim > 2:\n for i, chunk in enumerate(values.transpose(2, 0, 1)):\n\n chunk = chunk.squeeze()\n agg_func(result[:, :, i], counts, chunk, comp_ids,\n min_count)\n else:\n agg_func(result, counts, values, comp_ids, min_count)\n\n return result\n\n def _transform(self, result, values, comp_ids, transform_func,\n is_numeric, is_datetimelike, **kwargs):\n\n comp_ids, _, ngroups = self.group_info\n if values.ndim > 3:\n # punting for now\n raise NotImplementedError(\"number of dimensions is currently \"\n \"limited to 3\")\n elif values.ndim > 2:\n for i, chunk in enumerate(values.transpose(2, 0, 1)):\n\n chunk = chunk.squeeze()\n transform_func(result[:, :, i], values,\n comp_ids, is_datetimelike, **kwargs)\n else:\n transform_func(result, values, comp_ids, is_datetimelike, **kwargs)\n\n return result\n\n def agg_series(self, obj, func):\n try:\n return self._aggregate_series_fast(obj, func)\n except Exception:\n return self._aggregate_series_pure_python(obj, func)\n\n def _aggregate_series_fast(self, obj, func):\n func = self._is_builtin_func(func)\n\n if obj.index._has_complex_internals:\n raise TypeError('Incompatible index for Cython grouper')\n\n group_index, _, ngroups = self.group_info\n\n # avoids object / Series creation overhead\n dummy = obj._get_values(slice(None, 0)).to_dense()\n indexer = get_group_index_sorter(group_index, ngroups)\n obj = obj._take(indexer).to_dense()\n group_index = algorithms.take_nd(\n group_index, indexer, allow_fill=False)\n grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups,\n dummy)\n result, counts = grouper.get_result()\n return result, counts\n\n def _aggregate_series_pure_python(self, obj, func):\n\n group_index, _, ngroups = self.group_info\n\n counts = np.zeros(ngroups, dtype=int)\n result = None\n\n splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)\n\n for label, group in splitter:\n res = func(group)\n if result is None:\n if (isinstance(res, (Series, Index, np.ndarray))):\n raise ValueError('Function does not reduce')\n result = np.empty(ngroups, dtype='O')\n\n counts[label] = group.shape[0]\n result[label] = res\n\n result = lib.maybe_convert_objects(result, try_float=0)\n return result, counts\n\n\ndef generate_bins_generic(values, binner, closed):\n \"\"\"\n Generate bin edge offsets and bin labels for one array using another array\n which has bin edge values. Both arrays must be sorted.\n\n Parameters\n ----------\n values : array of values\n binner : a comparable array of values representing bins into which to bin\n the first array. Note, 'values' end-points must fall within 'binner'\n end-points.\n closed : which end of bin is closed; left (default), right\n\n Returns\n -------\n bins : array of offsets (into 'values' argument) of bins.\n Zero and last edge are excluded in result, so for instance the first\n bin is values[0:bin[0]] and the last is values[bin[-1]:]\n \"\"\"\n lenidx = len(values)\n lenbin = len(binner)\n\n if lenidx <= 0 or lenbin <= 0:\n raise ValueError(\"Invalid length for values or for binner\")\n\n # check binner fits data\n if values[0] < binner[0]:\n raise ValueError(\"Values falls before first bin\")\n\n if values[lenidx - 1] > binner[lenbin - 1]:\n raise ValueError(\"Values falls after last bin\")\n\n bins = np.empty(lenbin - 1, dtype=np.int64)\n\n j = 0 # index into values\n bc = 0 # bin count\n\n # linear scan, presume nothing about values/binner except that it fits ok\n for i in range(0, lenbin - 1):\n r_bin = binner[i + 1]\n\n # count values in current bin, advance to next bin\n while j < lenidx and (values[j] < r_bin or\n (closed == 'right' and values[j] == r_bin)):\n j += 1\n\n bins[bc] = j\n bc += 1\n\n return bins\n\n\nclass BinGrouper(BaseGrouper):\n\n \"\"\"\n This is an internal Grouper class\n\n Parameters\n ----------\n bins : the split index of binlabels to group the item of axis\n binlabels : the label list\n filter_empty : boolean, default False\n mutated : boolean, default False\n indexer : a intp array\n\n Examples\n --------\n bins: [2, 4, 6, 8, 10]\n binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',\n '2005-01-05', '2005-01-07', '2005-01-09'],\n dtype='datetime64[ns]', freq='2D')\n\n the group_info, which contains the label of each item in grouped\n axis, the index of label in label list, group number, is\n\n (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)\n\n means that, the grouped axis has 10 items, can be grouped into 5\n labels, the first and second items belong to the first label, the\n third and forth items belong to the second label, and so on\n\n \"\"\"\n\n def __init__(self, bins, binlabels, filter_empty=False, mutated=False,\n indexer=None):\n self.bins = _ensure_int64(bins)\n self.binlabels = _ensure_index(binlabels)\n self._filter_empty_groups = filter_empty\n self.mutated = mutated\n self.indexer = indexer\n\n @cache_readonly\n def groups(self):\n \"\"\" dict {group name -> group labels} \"\"\"\n\n # this is mainly for compat\n # GH 3881\n result = {}\n for key, value in zip(self.binlabels, self.bins):\n if key is not NaT:\n result[key] = value\n return result\n\n @property\n def nkeys(self):\n return 1\n\n def get_iterator(self, data, axis=0):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n if isinstance(data, NDFrame):\n slicer = lambda start, edge: data._slice(\n slice(start, edge), axis=axis)\n length = len(data.axes[axis])\n else:\n slicer = lambda start, edge: data[slice(start, edge)]\n length = len(data)\n\n start = 0\n for edge, label in zip(self.bins, self.binlabels):\n if label is not NaT:\n yield label, slicer(start, edge)\n start = edge\n\n if start < length:\n yield self.binlabels[-1], slicer(start, None)\n\n @cache_readonly\n def indices(self):\n indices = collections.defaultdict(list)\n\n i = 0\n for label, bin in zip(self.binlabels, self.bins):\n if i < bin:\n if label is not NaT:\n indices[label] = list(range(i, bin))\n i = bin\n return indices\n\n @cache_readonly\n def group_info(self):\n ngroups = self.ngroups\n obs_group_ids = np.arange(ngroups)\n rep = np.diff(np.r_[0, self.bins])\n\n rep = _ensure_platform_int(rep)\n if ngroups == len(self.bins):\n comp_ids = np.repeat(np.arange(ngroups), rep)\n else:\n comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)\n\n return comp_ids.astype('int64', copy=False), \\\n obs_group_ids.astype('int64', copy=False), ngroups\n\n @cache_readonly\n def ngroups(self):\n return len(self.result_index)\n\n @cache_readonly\n def result_index(self):\n if len(self.binlabels) != 0 and isna(self.binlabels[0]):\n return self.binlabels[1:]\n\n return self.binlabels\n\n @property\n def levels(self):\n return [self.binlabels]\n\n @property\n def names(self):\n return [self.binlabels.name]\n\n @property\n def groupings(self):\n return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)\n for lvl, name in zip(self.levels, self.names)]\n\n def agg_series(self, obj, func):\n dummy = obj[:0]\n grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy)\n return grouper.get_result()\n\n # ----------------------------------------------------------------------\n # cython aggregation\n\n _cython_functions = copy.deepcopy(BaseGrouper._cython_functions)\n\n\nclass Grouping(object):\n\n \"\"\"\n Holds the grouping information for a single key\n\n Parameters\n ----------\n index : Index\n grouper :\n obj :\n name :\n level :\n observed : boolean, default False\n If we are a Categorical, use the observed values\n in_axis : if the Grouping is a column in self.obj and hence among\n Groupby.exclusions list\n\n Returns\n -------\n **Attributes**:\n * indices : dict of {group -> index_list}\n * labels : ndarray, group labels\n * ids : mapping of label -> group\n * counts : array of group counts\n * group_index : unique groups\n * groups : dict of {group -> label_list}\n \"\"\"\n\n def __init__(self, index, grouper=None, obj=None, name=None, level=None,\n sort=True, observed=None, in_axis=False):\n\n self.name = name\n self.level = level\n self.grouper = _convert_grouper(index, grouper)\n self.all_grouper = None\n self.index = index\n self.sort = sort\n self.obj = obj\n self.observed = observed\n self.in_axis = in_axis\n\n # right place for this?\n if isinstance(grouper, (Series, Index)) and name is None:\n self.name = grouper.name\n\n if isinstance(grouper, MultiIndex):\n self.grouper = grouper.values\n\n # we have a single grouper which may be a myriad of things,\n # some of which are dependent on the passing in level\n\n if level is not None:\n if not isinstance(level, int):\n if level not in index.names:\n raise AssertionError('Level %s not in index' % str(level))\n level = index.names.index(level)\n\n if self.name is None:\n self.name = index.names[level]\n\n self.grouper, self._labels, self._group_index = \\\n index._get_grouper_for_level(self.grouper, level)\n\n # a passed Grouper like, directly get the grouper in the same way\n # as single grouper groupby, use the group_info to get labels\n elif isinstance(self.grouper, Grouper):\n # get the new grouper; we already have disambiguated\n # what key/level refer to exactly, don't need to\n # check again as we have by this point converted these\n # to an actual value (rather than a pd.Grouper)\n _, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)\n if self.name is None:\n self.name = grouper.result_index.name\n self.obj = self.grouper.obj\n self.grouper = grouper\n\n else:\n if self.grouper is None and self.name is not None:\n self.grouper = self.obj[self.name]\n\n elif isinstance(self.grouper, (list, tuple)):\n self.grouper = com._asarray_tuplesafe(self.grouper)\n\n # a passed Categorical\n elif is_categorical_dtype(self.grouper):\n\n # observed can be True/False/None\n # we treat None as False. If in the future\n # we need to warn if observed is not passed\n # then we have this option\n # gh-20583\n\n self.all_grouper = self.grouper\n self.grouper = self.grouper._codes_for_groupby(\n self.sort, observed)\n categories = self.grouper.categories\n\n # we make a CategoricalIndex out of the cat grouper\n # preserving the categories / ordered attributes\n self._labels = self.grouper.codes\n if observed:\n codes = algorithms.unique1d(self.grouper.codes)\n else:\n codes = np.arange(len(categories))\n\n self._group_index = CategoricalIndex(\n Categorical.from_codes(\n codes=codes,\n categories=categories,\n ordered=self.grouper.ordered))\n\n # we are done\n if isinstance(self.grouper, Grouping):\n self.grouper = self.grouper.grouper\n\n # no level passed\n elif not isinstance(self.grouper,\n (Series, Index, ExtensionArray, np.ndarray)):\n if getattr(self.grouper, 'ndim', 1) != 1:\n t = self.name or str(type(self.grouper))\n raise ValueError(\"Grouper for '%s' not 1-dimensional\" % t)\n self.grouper = self.index.map(self.grouper)\n if not (hasattr(self.grouper, \"__len__\") and\n len(self.grouper) == len(self.index)):\n errmsg = ('Grouper result violates len(labels) == '\n 'len(data)\\nresult: %s' %\n pprint_thing(self.grouper))\n self.grouper = None # Try for sanity\n raise AssertionError(errmsg)\n\n # if we have a date/time-like grouper, make sure that we have\n # Timestamps like\n if getattr(self.grouper, 'dtype', None) is not None:\n if is_datetime64_dtype(self.grouper):\n from pandas import to_datetime\n self.grouper = to_datetime(self.grouper)\n elif is_timedelta64_dtype(self.grouper):\n from pandas import to_timedelta\n self.grouper = to_timedelta(self.grouper)\n\n def __repr__(self):\n return 'Grouping({0})'.format(self.name)\n\n def __iter__(self):\n return iter(self.indices)\n\n _labels = None\n _group_index = None\n\n @property\n def ngroups(self):\n return len(self.group_index)\n\n @cache_readonly\n def indices(self):\n # we have a list of groupers\n if isinstance(self.grouper, BaseGrouper):\n return self.grouper.indices\n\n values = _ensure_categorical(self.grouper)\n return values._reverse_indexer()\n\n @property\n def labels(self):\n if self._labels is None:\n self._make_labels()\n return self._labels\n\n @cache_readonly\n def result_index(self):\n if self.all_grouper is not None:\n all_categories = self.all_grouper.categories\n\n # we re-order to the original category orderings\n if self.sort:\n return self.group_index.set_categories(all_categories)\n\n # we are not sorting, so add unobserved to the end\n categories = self.group_index.categories\n return self.group_index.add_categories(\n all_categories[~all_categories.isin(categories)])\n\n return self.group_index\n\n @property\n def group_index(self):\n if self._group_index is None:\n self._make_labels()\n return self._group_index\n\n def _make_labels(self):\n if self._labels is None or self._group_index is None:\n # we have a list of groupers\n if isinstance(self.grouper, BaseGrouper):\n labels = self.grouper.label_info\n uniques = self.grouper.result_index\n else:\n labels, uniques = algorithms.factorize(\n self.grouper, sort=self.sort)\n uniques = Index(uniques, name=self.name)\n self._labels = labels\n self._group_index = uniques\n\n @cache_readonly\n def groups(self):\n return self.index.groupby(Categorical.from_codes(self.labels,\n self.group_index))\n\n\ndef _get_grouper(obj, key=None, axis=0, level=None, sort=True,\n observed=None, mutated=False, validate=True):\n \"\"\"\n create and return a BaseGrouper, which is an internal\n mapping of how to create the grouper indexers.\n This may be composed of multiple Grouping objects, indicating\n multiple groupers\n\n Groupers are ultimately index mappings. They can originate as:\n index mappings, keys to columns, functions, or Groupers\n\n Groupers enable local references to axis,level,sort, while\n the passed in axis, level, and sort are 'global'.\n\n This routine tries to figure out what the passing in references\n are and then creates a Grouping for each one, combined into\n a BaseGrouper.\n\n If observed & we have a categorical grouper, only show the observed\n values\n\n If validate, then check for key/level overlaps\n\n \"\"\"\n group_axis = obj._get_axis(axis)\n\n # validate that the passed single level is compatible with the passed\n # axis of the object\n if level is not None:\n # TODO: These if-block and else-block are almost same.\n # MultiIndex instance check is removable, but it seems that there are\n # some processes only for non-MultiIndex in else-block,\n # eg. `obj.index.name != level`. We have to consider carefully whether\n # these are applicable for MultiIndex. Even if these are applicable,\n # we need to check if it makes no side effect to subsequent processes\n # on the outside of this condition.\n # (GH 17621)\n if isinstance(group_axis, MultiIndex):\n if is_list_like(level) and len(level) == 1:\n level = level[0]\n\n if key is None and is_scalar(level):\n # Get the level values from group_axis\n key = group_axis.get_level_values(level)\n level = None\n\n else:\n # allow level to be a length-one list-like object\n # (e.g., level=[0])\n # GH 13901\n if is_list_like(level):\n nlevels = len(level)\n if nlevels == 1:\n level = level[0]\n elif nlevels == 0:\n raise ValueError('No group keys passed!')\n else:\n raise ValueError('multiple levels only valid with '\n 'MultiIndex')\n\n if isinstance(level, compat.string_types):\n if obj.index.name != level:\n raise ValueError('level name %s is not the name of the '\n 'index' % level)\n elif level > 0 or level < -1:\n raise ValueError('level > 0 or level < -1 only valid with '\n ' MultiIndex')\n\n # NOTE: `group_axis` and `group_axis.get_level_values(level)`\n # are same in this section.\n level = None\n key = group_axis\n\n # a passed-in Grouper, directly convert\n if isinstance(key, Grouper):\n binner, grouper, obj = key._get_grouper(obj, validate=False)\n if key.key is None:\n return grouper, [], obj\n else:\n return grouper, set([key.key]), obj\n\n # already have a BaseGrouper, just return it\n elif isinstance(key, BaseGrouper):\n return key, [], obj\n\n # In the future, a tuple key will always mean an actual key,\n # not an iterable of keys. In the meantime, we attempt to provide\n # a warning. We can assume that the user wanted a list of keys when\n # the key is not in the index. We just have to be careful with\n # unhashble elements of `key`. Any unhashable elements implies that\n # they wanted a list of keys.\n # https://github.com/pandas-dev/pandas/issues/18314\n is_tuple = isinstance(key, tuple)\n all_hashable = is_tuple and is_hashable(key)\n\n if is_tuple:\n if ((all_hashable and key not in obj and set(key).issubset(obj))\n or not all_hashable):\n # column names ('a', 'b') -> ['a', 'b']\n # arrays like (a, b) -> [a, b]\n msg = (\"Interpreting tuple 'by' as a list of keys, rather than \"\n \"a single key. Use 'by=[...]' instead of 'by=(...)'. In \"\n \"the future, a tuple will always mean a single key.\")\n warnings.warn(msg, FutureWarning, stacklevel=5)\n key = list(key)\n\n if not isinstance(key, list):\n keys = [key]\n match_axis_length = False\n else:\n keys = key\n match_axis_length = len(keys) == len(group_axis)\n\n # what are we after, exactly?\n any_callable = any(callable(g) or isinstance(g, dict) for g in keys)\n any_groupers = any(isinstance(g, Grouper) for g in keys)\n any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))\n for g in keys)\n\n try:\n if isinstance(obj, DataFrame):\n all_in_columns_index = all(g in obj.columns or g in obj.index.names\n for g in keys)\n else:\n all_in_columns_index = False\n except Exception:\n all_in_columns_index = False\n\n if not any_callable and not all_in_columns_index and \\\n not any_arraylike and not any_groupers and \\\n match_axis_length and level is None:\n keys = [com._asarray_tuplesafe(keys)]\n\n if isinstance(level, (tuple, list)):\n if key is None:\n keys = [None] * len(level)\n levels = level\n else:\n levels = [level] * len(keys)\n\n groupings = []\n exclusions = []\n\n # if the actual grouper should be obj[key]\n def is_in_axis(key):\n if not _is_label_like(key):\n try:\n obj._data.items.get_loc(key)\n except Exception:\n return False\n\n return True\n\n # if the grouper is obj[name]\n def is_in_obj(gpr):\n try:\n return id(gpr) == id(obj[gpr.name])\n except Exception:\n return False\n\n for i, (gpr, level) in enumerate(zip(keys, levels)):\n\n if is_in_obj(gpr): # df.groupby(df['name'])\n in_axis, name = True, gpr.name\n exclusions.append(name)\n\n elif is_in_axis(gpr): # df.groupby('name')\n if gpr in obj:\n if validate:\n stacklevel = 5 # Number of stack levels from df.groupby\n obj._check_label_or_level_ambiguity(\n gpr, stacklevel=stacklevel)\n in_axis, name, gpr = True, gpr, obj[gpr]\n exclusions.append(name)\n elif obj._is_level_reference(gpr):\n in_axis, name, level, gpr = False, None, gpr, None\n else:\n raise KeyError(gpr)\n elif isinstance(gpr, Grouper) and gpr.key is not None:\n # Add key to exclusions\n exclusions.append(gpr.key)\n in_axis, name = False, None\n else:\n in_axis, name = False, None\n\n if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:\n raise ValueError(\n (\"Length of grouper ({len_gpr}) and axis ({len_axis})\"\n \" must be same length\"\n .format(len_gpr=len(gpr), len_axis=obj.shape[axis])))\n\n # create the Grouping\n # allow us to passing the actual Grouping as the gpr\n ping = Grouping(group_axis,\n gpr,\n obj=obj,\n name=name,\n level=level,\n sort=sort,\n observed=observed,\n in_axis=in_axis) \\\n if not isinstance(gpr, Grouping) else gpr\n\n groupings.append(ping)\n\n if len(groupings) == 0:\n raise ValueError('No group keys passed!')\n\n # create the internals grouper\n grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)\n return grouper, exclusions, obj\n\n\ndef _is_label_like(val):\n return (isinstance(val, (compat.string_types, tuple)) or\n (val is not None and is_scalar(val)))\n\n\ndef _convert_grouper(axis, grouper):\n if isinstance(grouper, dict):\n return grouper.get\n elif isinstance(grouper, Series):\n if grouper.index.equals(axis):\n return grouper._values\n else:\n return grouper.reindex(axis)._values\n elif isinstance(grouper, (list, Series, Index, np.ndarray)):\n if len(grouper) != len(axis):\n raise ValueError('Grouper and axis must be same length')\n return grouper\n else:\n return grouper\n\n\ndef _whitelist_method_generator(klass, whitelist):\n \"\"\"\n Yields all GroupBy member defs for DataFrame/Series names in _whitelist.\n\n Parameters\n ----------\n klass - class where members are defined. Should be Series or DataFrame\n\n whitelist - list of names of klass methods to be constructed\n\n Returns\n -------\n The generator yields a sequence of strings, each suitable for exec'ing,\n that define implementations of the named methods for DataFrameGroupBy\n or SeriesGroupBy.\n\n Since we don't want to override methods explicitly defined in the\n base class, any such name is skipped.\n \"\"\"\n\n method_wrapper_template = \\\n \"\"\"def %(name)s(%(sig)s) :\n \\\"\"\"\n %(doc)s\n \\\"\"\"\n f = %(self)s.__getattr__('%(name)s')\n return f(%(args)s)\"\"\"\n property_wrapper_template = \\\n \"\"\"@property\ndef %(name)s(self) :\n \\\"\"\"\n %(doc)s\n \\\"\"\"\n return self.__getattr__('%(name)s')\"\"\"\n for name in whitelist:\n # don't override anything that was explicitly defined\n # in the base class\n if hasattr(GroupBy, name):\n continue\n # ugly, but we need the name string itself in the method.\n f = getattr(klass, name)\n doc = f.__doc__\n doc = doc if type(doc) == str else ''\n if isinstance(f, types.MethodType):\n wrapper_template = method_wrapper_template\n decl, args = make_signature(f)\n # pass args by name to f because otherwise\n # GroupBy._make_wrapper won't know whether\n # we passed in an axis parameter.\n args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]\n params = {'name': name,\n 'doc': doc,\n 'sig': ','.join(decl),\n 'self': args[0],\n 'args': ','.join(args_by_name)}\n else:\n wrapper_template = property_wrapper_template\n params = {'name': name, 'doc': doc}\n yield wrapper_template % params\n\n\nclass SeriesGroupBy(GroupBy):\n #\n # Make class defs of attributes on SeriesGroupBy whitelist\n _apply_whitelist = _series_apply_whitelist\n for _def_str in _whitelist_method_generator(Series,\n _series_apply_whitelist):\n exec(_def_str)\n\n @property\n def _selection_name(self):\n \"\"\"\n since we are a series, we by definition only have\n a single name, but may be the result of a selection or\n the name of our object\n \"\"\"\n if self._selection is None:\n return self.obj.name\n else:\n return self._selection\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n >>> s = Series([1, 2, 3, 4])\n\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).min()\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg('min')\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])\n min max\n 1 1 2\n 2 3 4\n\n See also\n --------\n pandas.Series.groupby.apply\n pandas.Series.groupby.transform\n pandas.Series.aggregate\n\n \"\"\")\n\n @Appender(_apply_docs['template']\n .format(input='series',\n examples=_apply_docs['series_examples']))\n def apply(self, func, *args, **kwargs):\n return super(SeriesGroupBy, self).apply(func, *args, **kwargs)\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n klass='Series',\n versionadded='',\n axis=''))\n def aggregate(self, func_or_funcs, *args, **kwargs):\n _level = kwargs.pop('_level', None)\n if isinstance(func_or_funcs, compat.string_types):\n return getattr(self, func_or_funcs)(*args, **kwargs)\n\n if isinstance(func_or_funcs, collections.Iterable):\n # Catch instances of lists / tuples\n # but not the class list / tuple itself.\n ret = self._aggregate_multiple_funcs(func_or_funcs,\n (_level or 0) + 1)\n else:\n cyfunc = self._is_cython_func(func_or_funcs)\n if cyfunc and not args and not kwargs:\n return getattr(self, cyfunc)()\n\n if self.grouper.nkeys > 1:\n return self._python_agg_general(func_or_funcs, *args, **kwargs)\n\n try:\n return self._python_agg_general(func_or_funcs, *args, **kwargs)\n except Exception:\n result = self._aggregate_named(func_or_funcs, *args, **kwargs)\n\n index = Index(sorted(result), name=self.grouper.names[0])\n ret = Series(result, index=index)\n\n if not self.as_index: # pragma: no cover\n print('Warning, ignoring as_index=True')\n\n # _level handled at higher\n if not _level and isinstance(ret, dict):\n from pandas import concat\n ret = concat(ret, axis=1)\n return ret\n\n agg = aggregate\n\n def _aggregate_multiple_funcs(self, arg, _level):\n if isinstance(arg, dict):\n\n # show the deprecation, but only if we\n # have not shown a higher level one\n # GH 15931\n if isinstance(self._selected_obj, Series) and _level <= 1:\n warnings.warn(\n (\"using a dict on a Series for aggregation\\n\"\n \"is deprecated and will be removed in a future \"\n \"version\"),\n FutureWarning, stacklevel=3)\n\n columns = list(arg.keys())\n arg = list(arg.items())\n elif any(isinstance(x, (tuple, list)) for x in arg):\n arg = [(x, x) if not isinstance(x, (tuple, list)) else x\n for x in arg]\n\n # indicated column order\n columns = lzip(*arg)[0]\n else:\n # list of functions / function names\n columns = []\n for f in arg:\n if isinstance(f, compat.string_types):\n columns.append(f)\n else:\n # protect against callables without names\n columns.append(com._get_callable_name(f))\n arg = lzip(columns, arg)\n\n results = {}\n for name, func in arg:\n obj = self\n if name in results:\n raise SpecificationError('Function names must be unique, '\n 'found multiple named %s' % name)\n\n # reset the cache so that we\n # only include the named selection\n if name in self._selected_obj:\n obj = copy.copy(obj)\n obj._reset_cache()\n obj._selection = name\n results[name] = obj.aggregate(func)\n\n if isinstance(list(compat.itervalues(results))[0],\n DataFrame):\n\n # let higher level handle\n if _level:\n return results\n return list(compat.itervalues(results))[0]\n return DataFrame(results, columns=columns)\n\n def _wrap_output(self, output, index, names=None):\n \"\"\" common agg/transform wrapping logic \"\"\"\n output = output[self._selection_name]\n\n if names is not None:\n return DataFrame(output, index=index, columns=names)\n else:\n name = self._selection_name\n if name is None:\n name = self._selected_obj.name\n return Series(output, index=index, name=name)\n\n def _wrap_aggregated_output(self, output, names=None):\n return self._wrap_output(output=output,\n index=self.grouper.result_index,\n names=names)\n\n def _wrap_transformed_output(self, output, names=None):\n return self._wrap_output(output=output,\n index=self.obj.index,\n names=names)\n\n def _wrap_applied_output(self, keys, values, not_indexed_same=False):\n if len(keys) == 0:\n # GH #6265\n return Series([], name=self._selection_name, index=keys)\n\n def _get_index():\n if self.grouper.nkeys > 1:\n index = MultiIndex.from_tuples(keys, names=self.grouper.names)\n else:\n index = Index(keys, name=self.grouper.names[0])\n return index\n\n if isinstance(values[0], dict):\n # GH #823\n index = _get_index()\n result = DataFrame(values, index=index).stack()\n result.name = self._selection_name\n return result\n\n if isinstance(values[0], (Series, dict)):\n return self._concat_objects(keys, values,\n not_indexed_same=not_indexed_same)\n elif isinstance(values[0], DataFrame):\n # possible that Series -> DataFrame by applied function\n return self._concat_objects(keys, values,\n not_indexed_same=not_indexed_same)\n else:\n # GH #6265\n return Series(values, index=_get_index(),\n name=self._selection_name)\n\n def _aggregate_named(self, func, *args, **kwargs):\n result = {}\n\n for name, group in self:\n group.name = name\n output = func(group, *args, **kwargs)\n if isinstance(output, (Series, Index, np.ndarray)):\n raise Exception('Must produce aggregated value')\n result[name] = self._try_cast(output, group)\n\n return result\n\n @Substitution(klass='Series', selected='A.')\n @Appender(_transform_template)\n def transform(self, func, *args, **kwargs):\n func = self._is_cython_func(func) or func\n\n # if string function\n if isinstance(func, compat.string_types):\n if func in _cython_transforms:\n # cythonized transform\n return getattr(self, func)(*args, **kwargs)\n else:\n # cythonized aggregation and merge\n return self._transform_fast(\n lambda: getattr(self, func)(*args, **kwargs), func)\n\n # reg transform\n klass = self._selected_obj.__class__\n results = []\n wrapper = lambda x: func(x, *args, **kwargs)\n for name, group in self:\n object.__setattr__(group, 'name', name)\n res = wrapper(group)\n\n if hasattr(res, 'values'):\n res = res.values\n\n indexer = self._get_index(name)\n s = klass(res, indexer)\n results.append(s)\n\n from pandas.core.reshape.concat import concat\n result = concat(results).sort_index()\n\n # we will only try to coerce the result type if\n # we have a numeric dtype, as these are *always* udfs\n # the cython take a different path (and casting)\n dtype = self._selected_obj.dtype\n if is_numeric_dtype(dtype):\n result = maybe_downcast_to_dtype(result, dtype)\n\n result.name = self._selected_obj.name\n result.index = self._selected_obj.index\n return result\n\n def _transform_fast(self, func, func_nm):\n \"\"\"\n fast version of transform, only applicable to\n builtin/cythonizable functions\n \"\"\"\n if isinstance(func, compat.string_types):\n func = getattr(self, func)\n\n ids, _, ngroup = self.grouper.group_info\n cast = self._transform_should_cast(func_nm)\n out = algorithms.take_1d(func().values, ids)\n if cast:\n out = self._try_cast(out, self.obj)\n return Series(out, index=self.obj.index, name=self.obj.name)\n\n def filter(self, func, dropna=True, *args, **kwargs): # noqa\n \"\"\"\n Return a copy of a Series excluding elements from groups that\n do not satisfy the boolean criterion specified by func.\n\n Parameters\n ----------\n func : function\n To apply to each group. Should return True or False.\n dropna : Drop groups that do not pass the filter. True by default;\n if False, groups that evaluate False are filled with NaNs.\n\n Examples\n --------\n >>> import pandas as pd\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')\n >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)\n 1 2\n 3 4\n 5 6\n Name: B, dtype: int64\n\n Returns\n -------\n filtered : Series\n \"\"\"\n if isinstance(func, compat.string_types):\n wrapper = lambda x: getattr(x, func)(*args, **kwargs)\n else:\n wrapper = lambda x: func(x, *args, **kwargs)\n\n # Interpret np.nan as False.\n def true_and_notna(x, *args, **kwargs):\n b = wrapper(x, *args, **kwargs)\n return b and notna(b)\n\n try:\n indices = [self._get_index(name) for name, group in self\n if true_and_notna(group)]\n except ValueError:\n raise TypeError(\"the filter must return a boolean result\")\n except TypeError:\n raise TypeError(\"the filter must return a boolean result\")\n\n filtered = self._apply_filter(indices, dropna)\n return filtered\n\n def nunique(self, dropna=True):\n \"\"\" Returns number of unique elements in the group \"\"\"\n ids, _, _ = self.grouper.group_info\n\n val = self.obj.get_values()\n\n try:\n sorter = np.lexsort((val, ids))\n except TypeError: # catches object dtypes\n assert val.dtype == object, \\\n 'val.dtype must be object, got %s' % val.dtype\n val, _ = algorithms.factorize(val, sort=False)\n sorter = np.lexsort((val, ids))\n _isna = lambda a: a == -1\n else:\n _isna = isna\n\n ids, val = ids[sorter], val[sorter]\n\n # group boundaries are where group ids change\n # unique observations are where sorted values change\n idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]\n inc = np.r_[1, val[1:] != val[:-1]]\n\n # 1st item of each group is a new unique observation\n mask = _isna(val)\n if dropna:\n inc[idx] = 1\n inc[mask] = 0\n else:\n inc[mask & np.r_[False, mask[:-1]]] = 0\n inc[idx] = 1\n\n out = np.add.reduceat(inc, idx).astype('int64', copy=False)\n if len(ids):\n # NaN/NaT group exists if the head of ids is -1,\n # so remove it from res and exclude its index from idx\n if ids[0] == -1:\n res = out[1:]\n idx = idx[np.flatnonzero(idx)]\n else:\n res = out\n else:\n res = out[1:]\n ri = self.grouper.result_index\n\n # we might have duplications among the bins\n if len(res) != len(ri):\n res, out = np.zeros(len(ri), dtype=out.dtype), res\n res[ids[idx]] = out\n\n return Series(res,\n index=ri,\n name=self._selection_name)\n\n @Appender(Series.describe.__doc__)\n def describe(self, **kwargs):\n self._set_group_selection()\n result = self.apply(lambda x: x.describe(**kwargs))\n if self.axis == 1:\n return result.T\n return result.unstack()\n\n def value_counts(self, normalize=False, sort=True, ascending=False,\n bins=None, dropna=True):\n\n from pandas.core.reshape.tile import cut\n from pandas.core.reshape.merge import _get_join_indexers\n\n if bins is not None and not np.iterable(bins):\n # scalar bins cannot be done at top level\n # in a backward compatible way\n return self.apply(Series.value_counts,\n normalize=normalize,\n sort=sort,\n ascending=ascending,\n bins=bins)\n\n ids, _, _ = self.grouper.group_info\n val = self.obj.get_values()\n\n # groupby removes null keys from groupings\n mask = ids != -1\n ids, val = ids[mask], val[mask]\n\n if bins is None:\n lab, lev = algorithms.factorize(val, sort=True)\n llab = lambda lab, inc: lab[inc]\n else:\n\n # lab is a Categorical with categories an IntervalIndex\n lab = cut(Series(val), bins, include_lowest=True)\n lev = lab.cat.categories\n lab = lev.take(lab.cat.codes)\n llab = lambda lab, inc: lab[inc]._multiindex.labels[-1]\n\n if is_interval_dtype(lab):\n # TODO: should we do this inside II?\n sorter = np.lexsort((lab.left, lab.right, ids))\n else:\n sorter = np.lexsort((lab, ids))\n\n ids, lab = ids[sorter], lab[sorter]\n\n # group boundaries are where group ids change\n idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]\n\n # new values are where sorted labels change\n lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))\n inc = np.r_[True, lchanges]\n inc[idx] = True # group boundaries are also new values\n out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts\n\n # num. of times each group should be repeated\n rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))\n\n # multi-index components\n labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]\n levels = [ping.group_index for ping in self.grouper.groupings] + [lev]\n names = self.grouper.names + [self._selection_name]\n\n if dropna:\n mask = labels[-1] != -1\n if mask.all():\n dropna = False\n else:\n out, labels = out[mask], [label[mask] for label in labels]\n\n if normalize:\n out = out.astype('float')\n d = np.diff(np.r_[idx, len(ids)])\n if dropna:\n m = ids[lab == -1]\n np.add.at(d, m, -1)\n acc = rep(d)[mask]\n else:\n acc = rep(d)\n out /= acc\n\n if sort and bins is None:\n cat = ids[inc][mask] if dropna else ids[inc]\n sorter = np.lexsort((out if ascending else -out, cat))\n out, labels[-1] = out[sorter], labels[-1][sorter]\n\n if bins is None:\n mi = MultiIndex(levels=levels, labels=labels, names=names,\n verify_integrity=False)\n\n if is_integer_dtype(out):\n out = _ensure_int64(out)\n return Series(out, index=mi, name=self._selection_name)\n\n # for compat. with libgroupby.value_counts need to ensure every\n # bin is present at every index level, null filled with zeros\n diff = np.zeros(len(out), dtype='bool')\n for lab in labels[:-1]:\n diff |= np.r_[True, lab[1:] != lab[:-1]]\n\n ncat, nbin = diff.sum(), len(levels[-1])\n\n left = [np.repeat(np.arange(ncat), nbin),\n np.tile(np.arange(nbin), ncat)]\n\n right = [diff.cumsum() - 1, labels[-1]]\n\n _, idx = _get_join_indexers(left, right, sort=False, how='left')\n out = np.where(idx != -1, out[idx], 0)\n\n if sort:\n sorter = np.lexsort((out if ascending else -out, left[0]))\n out, left[-1] = out[sorter], left[-1][sorter]\n\n # build the multi-index w/ full levels\n labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))\n labels.append(left[-1])\n\n mi = MultiIndex(levels=levels, labels=labels, names=names,\n verify_integrity=False)\n\n if is_integer_dtype(out):\n out = _ensure_int64(out)\n return Series(out, index=mi, name=self._selection_name)\n\n def count(self):\n \"\"\" Compute count of group, excluding missing values \"\"\"\n ids, _, ngroups = self.grouper.group_info\n val = self.obj.get_values()\n\n mask = (ids != -1) & ~isna(val)\n ids = _ensure_platform_int(ids)\n out = np.bincount(ids[mask], minlength=ngroups or 0)\n\n return Series(out,\n index=self.grouper.result_index,\n name=self._selection_name,\n dtype='int64')\n\n def _apply_to_column_groupbys(self, func):\n \"\"\" return a pass thru \"\"\"\n return func(self)\n\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None):\n \"\"\"Calculate percent change of each value to previous entry in group\"\"\"\n filled = getattr(self, fill_method)(limit=limit)\n shifted = filled.shift(periods=periods, freq=freq)\n\n return (filled / shifted) - 1\n\n\nclass NDFrameGroupBy(GroupBy):\n\n def _iterate_slices(self):\n if self.axis == 0:\n # kludge\n if self._selection is None:\n slice_axis = self.obj.columns\n else:\n slice_axis = self._selection_list\n slicer = lambda x: self.obj[x]\n else:\n slice_axis = self.obj.index\n slicer = self.obj.xs\n\n for val in slice_axis:\n if val in self.exclusions:\n continue\n yield val, slicer(val)\n\n def _cython_agg_general(self, how, alt=None, numeric_only=True,\n min_count=-1):\n new_items, new_blocks = self._cython_agg_blocks(\n how, alt=alt, numeric_only=numeric_only, min_count=min_count)\n return self._wrap_agged_blocks(new_items, new_blocks)\n\n def _wrap_agged_blocks(self, items, blocks):\n obj = self._obj_with_exclusions\n\n new_axes = list(obj._data.axes)\n\n # more kludge\n if self.axis == 0:\n new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index\n else:\n new_axes[self.axis] = self.grouper.result_index\n\n # Make sure block manager integrity check passes.\n assert new_axes[0].equals(items)\n new_axes[0] = items\n\n mgr = BlockManager(blocks, new_axes)\n\n new_obj = type(obj)(mgr)\n\n return self._post_process_cython_aggregate(new_obj)\n\n _block_agg_axis = 0\n\n def _cython_agg_blocks(self, how, alt=None, numeric_only=True,\n min_count=-1):\n # TODO: the actual managing of mgr_locs is a PITA\n # here, it should happen via BlockManager.combine\n\n data, agg_axis = self._get_data_to_aggregate()\n\n if numeric_only:\n data = data.get_numeric_data(copy=False)\n\n new_blocks = []\n new_items = []\n deleted_items = []\n for block in data.blocks:\n\n locs = block.mgr_locs.as_array\n try:\n result, _ = self.grouper.aggregate(\n block.values, how, axis=agg_axis, min_count=min_count)\n except NotImplementedError:\n # generally if we have numeric_only=False\n # and non-applicable functions\n # try to python agg\n\n if alt is None:\n # we cannot perform the operation\n # in an alternate way, exclude the block\n deleted_items.append(locs)\n continue\n\n # call our grouper again with only this block\n obj = self.obj[data.items[locs]]\n s = groupby(obj, self.grouper)\n result = s.aggregate(lambda x: alt(x, axis=self.axis))\n newb = result._data.blocks[0]\n\n finally:\n\n # see if we can cast the block back to the original dtype\n result = block._try_coerce_and_cast_result(result)\n newb = block.make_block(result)\n\n new_items.append(locs)\n new_blocks.append(newb)\n\n if len(new_blocks) == 0:\n raise DataError('No numeric types to aggregate')\n\n # reset the locs in the blocks to correspond to our\n # current ordering\n indexer = np.concatenate(new_items)\n new_items = data.items.take(np.sort(indexer))\n\n if len(deleted_items):\n\n # we need to adjust the indexer to account for the\n # items we have removed\n # really should be done in internals :<\n\n deleted = np.concatenate(deleted_items)\n ai = np.arange(len(data))\n mask = np.zeros(len(data))\n mask[deleted] = 1\n indexer = (ai - mask.cumsum())[indexer]\n\n offset = 0\n for b in new_blocks:\n loc = len(b.mgr_locs)\n b.mgr_locs = indexer[offset:(offset + loc)]\n offset += loc\n\n return new_items, new_blocks\n\n def _get_data_to_aggregate(self):\n obj = self._obj_with_exclusions\n if self.axis == 0:\n return obj.swapaxes(0, 1)._data, 1\n else:\n return obj._data, self.axis\n\n def _post_process_cython_aggregate(self, obj):\n # undoing kludge from below\n if self.axis == 0:\n obj = obj.swapaxes(0, 1)\n return obj\n\n def aggregate(self, arg, *args, **kwargs):\n\n _level = kwargs.pop('_level', None)\n result, how = self._aggregate(arg, _level=_level, *args, **kwargs)\n if how is None:\n return result\n\n if result is None:\n\n # grouper specific aggregations\n if self.grouper.nkeys > 1:\n return self._python_agg_general(arg, *args, **kwargs)\n else:\n\n # try to treat as if we are passing a list\n try:\n assert not args and not kwargs\n result = self._aggregate_multiple_funcs(\n [arg], _level=_level, _axis=self.axis)\n result.columns = Index(\n result.columns.levels[0],\n name=self._selected_obj.columns.name)\n except Exception:\n result = self._aggregate_generic(arg, *args, **kwargs)\n\n if not self.as_index:\n self._insert_inaxis_grouper_inplace(result)\n result.index = np.arange(len(result))\n\n return result._convert(datetime=True)\n\n agg = aggregate\n\n def _aggregate_generic(self, func, *args, **kwargs):\n if self.grouper.nkeys != 1:\n raise AssertionError('Number of keys must be 1')\n\n axis = self.axis\n obj = self._obj_with_exclusions\n\n result = {}\n if axis != obj._info_axis_number:\n try:\n for name, data in self:\n result[name] = self._try_cast(func(data, *args, **kwargs),\n data)\n except Exception:\n return self._aggregate_item_by_item(func, *args, **kwargs)\n else:\n for name in self.indices:\n try:\n data = self.get_group(name, obj=obj)\n result[name] = self._try_cast(func(data, *args, **kwargs),\n data)\n except Exception:\n wrapper = lambda x: func(x, *args, **kwargs)\n result[name] = data.apply(wrapper, axis=axis)\n\n return self._wrap_generic_output(result, obj)\n\n def _wrap_aggregated_output(self, output, names=None):\n raise com.AbstractMethodError(self)\n\n def _aggregate_item_by_item(self, func, *args, **kwargs):\n # only for axis==0\n\n obj = self._obj_with_exclusions\n result = {}\n cannot_agg = []\n errors = None\n for item in obj:\n try:\n data = obj[item]\n colg = SeriesGroupBy(data, selection=item,\n grouper=self.grouper)\n result[item] = self._try_cast(\n colg.aggregate(func, *args, **kwargs), data)\n except ValueError:\n cannot_agg.append(item)\n continue\n except TypeError as e:\n cannot_agg.append(item)\n errors = e\n continue\n\n result_columns = obj.columns\n if cannot_agg:\n result_columns = result_columns.drop(cannot_agg)\n\n # GH6337\n if not len(result_columns) and errors is not None:\n raise errors\n\n return DataFrame(result, columns=result_columns)\n\n def _decide_output_index(self, output, labels):\n if len(output) == len(labels):\n output_keys = labels\n else:\n output_keys = sorted(output)\n try:\n output_keys.sort()\n except Exception: # pragma: no cover\n pass\n\n if isinstance(labels, MultiIndex):\n output_keys = MultiIndex.from_tuples(output_keys,\n names=labels.names)\n\n return output_keys\n\n def _wrap_applied_output(self, keys, values, not_indexed_same=False):\n from pandas.core.index import _all_indexes_same\n from pandas.core.tools.numeric import to_numeric\n\n if len(keys) == 0:\n return DataFrame(index=keys)\n\n key_names = self.grouper.names\n\n # GH12824.\n def first_not_none(values):\n try:\n return next(com._not_none(*values))\n except StopIteration:\n return None\n\n v = first_not_none(values)\n\n if v is None:\n # GH9684. If all values are None, then this will throw an error.\n # We'd prefer it return an empty dataframe.\n return DataFrame()\n elif isinstance(v, DataFrame):\n return self._concat_objects(keys, values,\n not_indexed_same=not_indexed_same)\n elif self.grouper.groupings is not None:\n if len(self.grouper.groupings) > 1:\n key_index = self.grouper.result_index\n\n else:\n ping = self.grouper.groupings[0]\n if len(keys) == ping.ngroups:\n key_index = ping.group_index\n key_index.name = key_names[0]\n\n key_lookup = Index(keys)\n indexer = key_lookup.get_indexer(key_index)\n\n # reorder the values\n values = [values[i] for i in indexer]\n else:\n\n key_index = Index(keys, name=key_names[0])\n\n # don't use the key indexer\n if not self.as_index:\n key_index = None\n\n # make Nones an empty object\n v = first_not_none(values)\n if v is None:\n return DataFrame()\n elif isinstance(v, NDFrame):\n values = [\n x if x is not None else\n v._constructor(**v._construct_axes_dict())\n for x in values\n ]\n\n v = values[0]\n\n if isinstance(v, (np.ndarray, Index, Series)):\n if isinstance(v, Series):\n applied_index = self._selected_obj._get_axis(self.axis)\n all_indexed_same = _all_indexes_same([\n x.index for x in values\n ])\n singular_series = (len(values) == 1 and\n applied_index.nlevels == 1)\n\n # GH3596\n # provide a reduction (Frame -> Series) if groups are\n # unique\n if self.squeeze:\n\n # assign the name to this series\n if singular_series:\n values[0].name = keys[0]\n\n # GH2893\n # we have series in the values array, we want to\n # produce a series:\n # if any of the sub-series are not indexed the same\n # OR we don't have a multi-index and we have only a\n # single values\n return self._concat_objects(\n keys, values, not_indexed_same=not_indexed_same\n )\n\n # still a series\n # path added as of GH 5545\n elif all_indexed_same:\n from pandas.core.reshape.concat import concat\n return concat(values)\n\n if not all_indexed_same:\n # GH 8467\n return self._concat_objects(\n keys, values, not_indexed_same=True,\n )\n\n try:\n if self.axis == 0:\n # GH6124 if the list of Series have a consistent name,\n # then propagate that name to the result.\n index = v.index.copy()\n if index.name is None:\n # Only propagate the series name to the result\n # if all series have a consistent name. If the\n # series do not have a consistent name, do\n # nothing.\n names = {v.name for v in values}\n if len(names) == 1:\n index.name = list(names)[0]\n\n # normally use vstack as its faster than concat\n # and if we have mi-columns\n if (isinstance(v.index, MultiIndex) or\n key_index is None or\n isinstance(key_index, MultiIndex)):\n stacked_values = np.vstack(map(np.asarray, values))\n result = DataFrame(stacked_values, index=key_index,\n columns=index)\n else:\n # GH5788 instead of stacking; concat gets the\n # dtypes correct\n from pandas.core.reshape.concat import concat\n result = concat(values, keys=key_index,\n names=key_index.names,\n axis=self.axis).unstack()\n result.columns = index\n else:\n stacked_values = np.vstack(map(np.asarray, values))\n result = DataFrame(stacked_values.T, index=v.index,\n columns=key_index)\n\n except (ValueError, AttributeError):\n # GH1738: values is list of arrays of unequal lengths fall\n # through to the outer else caluse\n return Series(values, index=key_index,\n name=self._selection_name)\n\n # if we have date/time like in the original, then coerce dates\n # as we are stacking can easily have object dtypes here\n so = self._selected_obj\n if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):\n result = result.apply(\n lambda x: to_numeric(x, errors='ignore'))\n date_cols = self._selected_obj.select_dtypes(\n include=['datetime', 'timedelta']).columns\n date_cols = date_cols.intersection(result.columns)\n result[date_cols] = (result[date_cols]\n ._convert(datetime=True,\n coerce=True))\n else:\n result = result._convert(datetime=True)\n\n return self._reindex_output(result)\n\n # values are not series or array-like but scalars\n else:\n # only coerce dates if we find at least 1 datetime\n coerce = any(isinstance(x, Timestamp) for x in values)\n # self._selection_name not passed through to Series as the\n # result should not take the name of original selection\n # of columns\n return (Series(values, index=key_index)\n ._convert(datetime=True,\n coerce=coerce))\n\n else:\n # Handle cases like BinGrouper\n return self._concat_objects(keys, values,\n not_indexed_same=not_indexed_same)\n\n def _transform_general(self, func, *args, **kwargs):\n from pandas.core.reshape.concat import concat\n\n applied = []\n obj = self._obj_with_exclusions\n gen = self.grouper.get_iterator(obj, axis=self.axis)\n fast_path, slow_path = self._define_paths(func, *args, **kwargs)\n\n path = None\n for name, group in gen:\n object.__setattr__(group, 'name', name)\n\n if path is None:\n # Try slow path and fast path.\n try:\n path, res = self._choose_path(fast_path, slow_path, group)\n except TypeError:\n return self._transform_item_by_item(obj, fast_path)\n except ValueError:\n msg = 'transform must return a scalar value for each group'\n raise ValueError(msg)\n else:\n res = path(group)\n\n if isinstance(res, Series):\n\n # we need to broadcast across the\n # other dimension; this will preserve dtypes\n # GH14457\n if not np.prod(group.shape):\n continue\n elif res.index.is_(obj.index):\n r = concat([res] * len(group.columns), axis=1)\n r.columns = group.columns\n r.index = group.index\n else:\n r = DataFrame(\n np.concatenate([res.values] * len(group.index)\n ).reshape(group.shape),\n columns=group.columns, index=group.index)\n\n applied.append(r)\n else:\n applied.append(res)\n\n concat_index = obj.columns if self.axis == 0 else obj.index\n concatenated = concat(applied, join_axes=[concat_index],\n axis=self.axis, verify_integrity=False)\n return self._set_result_index_ordered(concatenated)\n\n @Substitution(klass='DataFrame', selected='')\n @Appender(_transform_template)\n def transform(self, func, *args, **kwargs):\n\n # optimized transforms\n func = self._is_cython_func(func) or func\n if isinstance(func, compat.string_types):\n if func in _cython_transforms:\n # cythonized transform\n return getattr(self, func)(*args, **kwargs)\n else:\n # cythonized aggregation and merge\n result = getattr(self, func)(*args, **kwargs)\n else:\n return self._transform_general(func, *args, **kwargs)\n\n # a reduction transform\n if not isinstance(result, DataFrame):\n return self._transform_general(func, *args, **kwargs)\n\n obj = self._obj_with_exclusions\n # nuiscance columns\n if not result.columns.equals(obj.columns):\n return self._transform_general(func, *args, **kwargs)\n\n return self._transform_fast(result, obj, func)\n\n def _transform_fast(self, result, obj, func_nm):\n \"\"\"\n Fast transform path for aggregations\n \"\"\"\n # if there were groups with no observations (Categorical only?)\n # try casting data to original dtype\n cast = self._transform_should_cast(func_nm)\n\n # for each col, reshape to to size of original frame\n # by take operation\n ids, _, ngroup = self.grouper.group_info\n output = []\n for i, _ in enumerate(result.columns):\n res = algorithms.take_1d(result.iloc[:, i].values, ids)\n if cast:\n res = self._try_cast(res, obj.iloc[:, i])\n output.append(res)\n\n return DataFrame._from_arrays(output, columns=result.columns,\n index=obj.index)\n\n def _define_paths(self, func, *args, **kwargs):\n if isinstance(func, compat.string_types):\n fast_path = lambda group: getattr(group, func)(*args, **kwargs)\n slow_path = lambda group: group.apply(\n lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)\n else:\n fast_path = lambda group: func(group, *args, **kwargs)\n slow_path = lambda group: group.apply(\n lambda x: func(x, *args, **kwargs), axis=self.axis)\n return fast_path, slow_path\n\n def _choose_path(self, fast_path, slow_path, group):\n path = slow_path\n res = slow_path(group)\n\n # if we make it here, test if we can use the fast path\n try:\n res_fast = fast_path(group)\n\n # compare that we get the same results\n if res.shape == res_fast.shape:\n res_r = res.values.ravel()\n res_fast_r = res_fast.values.ravel()\n mask = notna(res_r)\n if (res_r[mask] == res_fast_r[mask]).all():\n path = fast_path\n\n except Exception:\n pass\n return path, res\n\n def _transform_item_by_item(self, obj, wrapper):\n # iterate through columns\n output = {}\n inds = []\n for i, col in enumerate(obj):\n try:\n output[col] = self[col].transform(wrapper)\n inds.append(i)\n except Exception:\n pass\n\n if len(output) == 0: # pragma: no cover\n raise TypeError('Transform function invalid for data types')\n\n columns = obj.columns\n if len(output) < len(obj.columns):\n columns = columns.take(inds)\n\n return DataFrame(output, index=obj.index, columns=columns)\n\n def filter(self, func, dropna=True, *args, **kwargs): # noqa\n \"\"\"\n Return a copy of a DataFrame excluding elements from groups that\n do not satisfy the boolean criterion specified by func.\n\n Parameters\n ----------\n f : function\n Function to apply to each subframe. Should return True or False.\n dropna : Drop groups that do not pass the filter. True by default;\n if False, groups that evaluate False are filled with NaNs.\n\n Notes\n -----\n Each subframe is endowed the attribute 'name' in case you need to know\n which group you are working on.\n\n Examples\n --------\n >>> import pandas as pd\n >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n ... 'foo', 'bar'],\n ... 'B' : [1, 2, 3, 4, 5, 6],\n ... 'C' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby('A')\n >>> grouped.filter(lambda x: x['B'].mean() > 3.)\n A B C\n 1 bar 2 5.0\n 3 bar 4 1.0\n 5 bar 6 9.0\n\n Returns\n -------\n filtered : DataFrame\n \"\"\"\n\n indices = []\n\n obj = self._selected_obj\n gen = self.grouper.get_iterator(obj, axis=self.axis)\n\n for name, group in gen:\n object.__setattr__(group, 'name', name)\n\n res = func(group, *args, **kwargs)\n\n try:\n res = res.squeeze()\n except AttributeError: # allow e.g., scalars and frames to pass\n pass\n\n # interpret the result of the filter\n if is_bool(res) or (is_scalar(res) and isna(res)):\n if res and notna(res):\n indices.append(self._get_index(name))\n else:\n # non scalars aren't allowed\n raise TypeError(\"filter function returned a %s, \"\n \"but expected a scalar bool\" %\n type(res).__name__)\n\n return self._apply_filter(indices, dropna)\n\n\nclass DataFrameGroupBy(NDFrameGroupBy):\n _apply_whitelist = _dataframe_apply_whitelist\n #\n # Make class defs of attributes on DataFrameGroupBy whitelist.\n for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):\n exec(_def_str)\n\n _block_agg_axis = 1\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame({'A': [1, 1, 2, 2],\n ... 'B': [1, 2, 3, 4],\n ... 'C': np.random.randn(4)})\n\n >>> df\n A B C\n 0 1 1 0.362838\n 1 1 2 0.227877\n 2 2 3 1.267767\n 3 2 4 -0.562860\n\n The aggregation is for each column.\n\n >>> df.groupby('A').agg('min')\n B C\n A\n 1 1 0.227877\n 2 3 -0.562860\n\n Multiple aggregations\n\n >>> df.groupby('A').agg(['min', 'max'])\n B C\n min max min max\n A\n 1 1 2 0.227877 0.362838\n 2 3 4 -0.562860 1.267767\n\n Select a column for aggregation\n\n >>> df.groupby('A').B.agg(['min', 'max'])\n min max\n A\n 1 1 2\n 2 3 4\n\n Different aggregations per column\n\n >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})\n B C\n min max sum\n A\n 1 1 2 0.590716\n 2 3 4 0.704907\n\n See also\n --------\n pandas.DataFrame.groupby.apply\n pandas.DataFrame.groupby.transform\n pandas.DataFrame.aggregate\n\n \"\"\")\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n klass='DataFrame',\n versionadded='',\n axis=''))\n def aggregate(self, arg, *args, **kwargs):\n return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n\n if ndim == 2:\n if subset is None:\n subset = self.obj\n return DataFrameGroupBy(subset, self.grouper, selection=key,\n grouper=self.grouper,\n exclusions=self.exclusions,\n as_index=self.as_index)\n elif ndim == 1:\n if subset is None:\n subset = self.obj[key]\n return SeriesGroupBy(subset, selection=key,\n grouper=self.grouper)\n\n raise AssertionError(\"invalid ndim for _gotitem\")\n\n def _wrap_generic_output(self, result, obj):\n result_index = self.grouper.levels[0]\n\n if self.axis == 0:\n return DataFrame(result, index=obj.columns,\n columns=result_index).T\n else:\n return DataFrame(result, index=obj.index,\n columns=result_index)\n\n def _get_data_to_aggregate(self):\n obj = self._obj_with_exclusions\n if self.axis == 1:\n return obj.T._data, 1\n else:\n return obj._data, 1\n\n def _insert_inaxis_grouper_inplace(self, result):\n # zip in reverse so we can always insert at loc 0\n izip = zip(* map(reversed, (\n self.grouper.names,\n self.grouper.get_group_levels(),\n [grp.in_axis for grp in self.grouper.groupings])))\n\n for name, lev, in_axis in izip:\n if in_axis:\n result.insert(0, name, lev)\n\n def _wrap_aggregated_output(self, output, names=None):\n agg_axis = 0 if self.axis == 1 else 1\n agg_labels = self._obj_with_exclusions._get_axis(agg_axis)\n\n output_keys = self._decide_output_index(output, agg_labels)\n\n if not self.as_index:\n result = DataFrame(output, columns=output_keys)\n self._insert_inaxis_grouper_inplace(result)\n result = result._consolidate()\n else:\n index = self.grouper.result_index\n result = DataFrame(output, index=index, columns=output_keys)\n\n if self.axis == 1:\n result = result.T\n\n return self._reindex_output(result)._convert(datetime=True)\n\n def _wrap_transformed_output(self, output, names=None):\n return DataFrame(output, index=self.obj.index)\n\n def _wrap_agged_blocks(self, items, blocks):\n if not self.as_index:\n index = np.arange(blocks[0].values.shape[1])\n mgr = BlockManager(blocks, [items, index])\n result = DataFrame(mgr)\n\n self._insert_inaxis_grouper_inplace(result)\n result = result._consolidate()\n else:\n index = self.grouper.result_index\n mgr = BlockManager(blocks, [items, index])\n result = DataFrame(mgr)\n\n if self.axis == 1:\n result = result.T\n\n return self._reindex_output(result)._convert(datetime=True)\n\n def _reindex_output(self, result):\n \"\"\"\n if we have categorical groupers, then we want to make sure that\n we have a fully reindex-output to the levels. These may have not\n participated in the groupings (e.g. may have all been\n nan groups)\n\n This can re-expand the output space\n \"\"\"\n\n # TODO(jreback): remove completely\n # when observed parameter is defaulted to True\n # gh-20583\n\n if self.observed:\n return result\n\n groupings = self.grouper.groupings\n if groupings is None:\n return result\n elif len(groupings) == 1:\n return result\n elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))\n for ping in groupings):\n return result\n\n levels_list = [ping.group_index for ping in groupings]\n index, _ = MultiIndex.from_product(\n levels_list, names=self.grouper.names).sortlevel()\n\n if self.as_index:\n d = {self.obj._get_axis_name(self.axis): index, 'copy': False}\n return result.reindex(**d)\n\n # GH 13204\n # Here, the categorical in-axis groupers, which need to be fully\n # expanded, are columns in `result`. An idea is to do:\n # result = result.set_index(self.grouper.names)\n # .reindex(index).reset_index()\n # but special care has to be taken because of possible not-in-axis\n # groupers.\n # So, we manually select and drop the in-axis grouper columns,\n # reindex `result`, and then reset the in-axis grouper columns.\n\n # Select in-axis groupers\n in_axis_grps = [(i, ping.name) for (i, ping)\n in enumerate(groupings) if ping.in_axis]\n g_nums, g_names = zip(*in_axis_grps)\n\n result = result.drop(labels=list(g_names), axis=1)\n\n # Set a temp index and reindex (possibly expanding)\n result = result.set_index(self.grouper.result_index\n ).reindex(index, copy=False)\n\n # Reset in-axis grouper columns\n # (using level numbers `g_nums` because level names may not be unique)\n result = result.reset_index(level=g_nums)\n\n return result.reset_index(drop=True)\n\n def _iterate_column_groupbys(self):\n for i, colname in enumerate(self._selected_obj.columns):\n yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],\n selection=colname,\n grouper=self.grouper,\n exclusions=self.exclusions)\n\n def _apply_to_column_groupbys(self, func):\n from pandas.core.reshape.concat import concat\n return concat(\n (func(col_groupby) for _, col_groupby\n in self._iterate_column_groupbys()),\n keys=self._selected_obj.columns, axis=1)\n\n def _fill(self, direction, limit=None):\n \"\"\"Overridden method to join grouped columns in output\"\"\"\n res = super(DataFrameGroupBy, self)._fill(direction, limit=limit)\n output = collections.OrderedDict(\n (grp.name, grp.grouper) for grp in self.grouper.groupings)\n\n from pandas import concat\n return concat((self._wrap_transformed_output(output), res), axis=1)\n\n def count(self):\n \"\"\" Compute count of group, excluding missing values \"\"\"\n from pandas.core.dtypes.missing import _isna_ndarraylike as isna\n\n data, _ = self._get_data_to_aggregate()\n ids, _, ngroups = self.grouper.group_info\n mask = ids != -1\n\n val = ((mask & ~isna(np.atleast_2d(blk.get_values())))\n for blk in data.blocks)\n loc = (blk.mgr_locs for blk in data.blocks)\n\n counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)\n blk = map(make_block, map(counter, val), loc)\n\n return self._wrap_agged_blocks(data.items, list(blk))\n\n def nunique(self, dropna=True):\n \"\"\"\n Return DataFrame with number of distinct observations per group for\n each column.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n nunique: DataFrame\n\n Examples\n --------\n >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',\n ... 'ham', 'ham'],\n ... 'value1': [1, 5, 5, 2, 5, 5],\n ... 'value2': list('abbaxy')})\n >>> df\n id value1 value2\n 0 spam 1 a\n 1 egg 5 b\n 2 egg 5 b\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n\n >>> df.groupby('id').nunique()\n id value1 value2\n id\n egg 1 1 1\n ham 1 1 2\n spam 1 2 1\n\n # check for rows with the same id but conflicting values\n >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())\n id value1 value2\n 0 spam 1 a\n 3 spam 2 a\n 4 ham 5 x\n 5 ham 5 y\n \"\"\"\n\n obj = self._selected_obj\n\n def groupby_series(obj, col=None):\n return SeriesGroupBy(obj,\n selection=col,\n grouper=self.grouper).nunique(dropna=dropna)\n\n if isinstance(obj, Series):\n results = groupby_series(obj)\n else:\n from pandas.core.reshape.concat import concat\n results = [groupby_series(obj[col], col) for col in obj.columns]\n results = concat(results, axis=1)\n\n if not self.as_index:\n results.index = com._default_index(len(results))\n return results\n\n boxplot = boxplot_frame_groupby\n\n\nclass PanelGroupBy(NDFrameGroupBy):\n\n def aggregate(self, arg, *args, **kwargs):\n return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n def _iterate_slices(self):\n if self.axis == 0:\n # kludge\n if self._selection is None:\n slice_axis = self._selected_obj.items\n else:\n slice_axis = self._selection_list\n slicer = lambda x: self._selected_obj[x]\n else:\n raise NotImplementedError(\"axis other than 0 is not supported\")\n\n for val in slice_axis:\n if val in self.exclusions:\n continue\n\n yield val, slicer(val)\n\n def aggregate(self, arg, *args, **kwargs):\n \"\"\"\n Aggregate using input function or dict of {column -> function}\n\n Parameters\n ----------\n arg : function or dict\n Function to use for aggregating groups. If a function, must either\n work when passed a Panel or when passed to Panel.apply. If\n pass a dict, the keys must be DataFrame column names\n\n Returns\n -------\n aggregated : Panel\n \"\"\"\n if isinstance(arg, compat.string_types):\n return getattr(self, arg)(*args, **kwargs)\n\n return self._aggregate_generic(arg, *args, **kwargs)\n\n def _wrap_generic_output(self, result, obj):\n if self.axis == 0:\n new_axes = list(obj.axes)\n new_axes[0] = self.grouper.result_index\n elif self.axis == 1:\n x, y, z = obj.axes\n new_axes = [self.grouper.result_index, z, x]\n else:\n x, y, z = obj.axes\n new_axes = [self.grouper.result_index, y, x]\n\n result = Panel._from_axes(result, new_axes)\n\n if self.axis == 1:\n result = result.swapaxes(0, 1).swapaxes(0, 2)\n elif self.axis == 2:\n result = result.swapaxes(0, 2)\n\n return result\n\n def _aggregate_item_by_item(self, func, *args, **kwargs):\n obj = self._obj_with_exclusions\n result = {}\n\n if self.axis > 0:\n for item in obj:\n try:\n itemg = DataFrameGroupBy(obj[item],\n axis=self.axis - 1,\n grouper=self.grouper)\n result[item] = itemg.aggregate(func, *args, **kwargs)\n except (ValueError, TypeError):\n raise\n new_axes = list(obj.axes)\n new_axes[self.axis] = self.grouper.result_index\n return Panel._from_axes(result, new_axes)\n else:\n raise ValueError(\"axis value must be greater than 0\")\n\n def _wrap_aggregated_output(self, output, names=None):\n raise com.AbstractMethodError(self)\n\n\n# ----------------------------------------------------------------------\n# Splitting / application\n\n\nclass DataSplitter(object):\n\n def __init__(self, data, labels, ngroups, axis=0):\n self.data = data\n self.labels = _ensure_int64(labels)\n self.ngroups = ngroups\n\n self.axis = axis\n\n @cache_readonly\n def slabels(self):\n # Sorted labels\n return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)\n\n @cache_readonly\n def sort_idx(self):\n # Counting sort indexer\n return get_group_index_sorter(self.labels, self.ngroups)\n\n def __iter__(self):\n sdata = self._get_sorted_data()\n\n if self.ngroups == 0:\n # we are inside a generator, rather than raise StopIteration\n # we merely return signal the end\n return\n\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\n\n for i, (start, end) in enumerate(zip(starts, ends)):\n # Since I'm now compressing the group ids, it's now not \"possible\"\n # to produce empty slices because such groups would not be observed\n # in the data\n # if start >= end:\n # raise AssertionError('Start %s must be less than end %s'\n # % (str(start), str(end)))\n yield i, self._chop(sdata, slice(start, end))\n\n def _get_sorted_data(self):\n return self.data._take(self.sort_idx, axis=self.axis)\n\n def _chop(self, sdata, slice_obj):\n return sdata.iloc[slice_obj]\n\n def apply(self, f):\n raise com.AbstractMethodError(self)\n\n\nclass SeriesSplitter(DataSplitter):\n\n def _chop(self, sdata, slice_obj):\n return sdata._get_values(slice_obj).to_dense()\n\n\nclass FrameSplitter(DataSplitter):\n\n def __init__(self, data, labels, ngroups, axis=0):\n super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)\n\n def fast_apply(self, f, names):\n # must return keys::list, values::list, mutated::bool\n try:\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\n except Exception:\n # fails when all -1\n return [], True\n\n sdata = self._get_sorted_data()\n results, mutated = reduction.apply_frame_axis0(sdata, f, names,\n starts, ends)\n\n return results, mutated\n\n def _chop(self, sdata, slice_obj):\n if self.axis == 0:\n return sdata.iloc[slice_obj]\n else:\n return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]\n\n\nclass NDFrameSplitter(DataSplitter):\n\n def __init__(self, data, labels, ngroups, axis=0):\n super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)\n\n self.factory = data._constructor\n\n def _get_sorted_data(self):\n # this is the BlockManager\n data = self.data._data\n\n # this is sort of wasteful but...\n sorted_axis = data.axes[self.axis].take(self.sort_idx)\n sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)\n\n return sorted_data\n\n def _chop(self, sdata, slice_obj):\n return self.factory(sdata.get_slice(slice_obj, axis=self.axis))\n\n\ndef get_splitter(data, *args, **kwargs):\n if isinstance(data, Series):\n klass = SeriesSplitter\n elif isinstance(data, DataFrame):\n klass = FrameSplitter\n else:\n klass = NDFrameSplitter\n\n return klass(data, *args, **kwargs)\n"
] | [
[
"numpy.diff",
"pandas.core.reshape.concat.concat",
"numpy.asarray",
"pandas._libs.reduction.SeriesBinGrouper",
"pandas.core.resample.get_resampler_for_grouping",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.dtypes.missing._isna_ndarraylike",
"pandas.core.window.ExpandingGroupby",
"pandas.to_timedelta",
"pandas._libs.reduction.apply_frame_axis0",
"numpy.repeat",
"pandas.core.dtypes.common.is_scalar",
"pandas.util._decorators.Appender",
"numpy.errstate",
"pandas.core.internals.BlockManager",
"numpy.array",
"pandas.core.base.SelectionMixin._builtin_table.get",
"pandas.util._validators.validate_kwargs",
"pandas.compat.iteritems",
"pandas.compat.map",
"pandas._libs.lib.generate_slices",
"pandas.core.index._all_indexes_same",
"pandas.compat.numpy.function.validate_groupby_func",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.tools.numeric.to_numeric",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.algorithms.take_1d",
"pandas.to_datetime",
"pandas.compat.lzip",
"numpy.add.at",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.algorithms.unique1d",
"pandas.core.index.MultiIndex.from_product",
"pandas.core.dtypes.missing.isnull",
"pandas.core.base.SpecificationError",
"pandas.core.sorting.compress_group_index",
"pandas.core.arrays.Categorical.from_codes",
"pandas.core.sorting.get_flattened_iterator",
"pandas.core.dtypes.common._ensure_float64",
"numpy.concatenate",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.compat.zip",
"pandas.core.common.AbstractMethodError",
"pandas.core.dtypes.common.is_list_like",
"pandas.compat.itervalues",
"pandas.core.dtypes.common.is_categorical_dtype",
"numpy.iterable",
"numpy.empty_like",
"pandas.core.panel.Panel._from_axes",
"numpy.nonzero",
"pandas.core.index._ensure_index",
"pandas.core.dtypes.common._ensure_object",
"numpy.zeros",
"pandas.core.dtypes.common._ensure_categorical",
"numpy.arange",
"numpy.lexsort",
"numpy.prod",
"numpy.sort",
"pandas.core.frame.DataFrame._from_arrays",
"pandas.core.sorting.get_indexer_dict",
"pandas.core.dtypes.common.is_bool",
"pandas.core.dtypes.common._ensure_platform_int",
"pandas.core.sorting.get_group_index_sorter",
"pandas.core.sorting.decons_obs_group_ids",
"pandas.core.series.Series",
"pandas.core.common._pipe",
"pandas.core.common._get_callable_name",
"pandas.compat.range",
"pandas.core.config.option_context",
"numpy.minimum.accumulate",
"numpy.maximum.accumulate",
"pandas.core.dtypes.common.is_complex_dtype",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.common._asarray_tuplesafe",
"pandas.core.window.RollingGroupby",
"pandas._libs.lib.maybe_convert_objects",
"pandas.compat.callable",
"pandas.core.index.MultiIndex",
"pandas.core.common._values_from_object",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas._libs.reduction.SeriesGrouper",
"pandas.core.dtypes.missing.notna",
"numpy.where",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.flatnonzero",
"pandas.core.dtypes.common._ensure_int64",
"pandas.core.common._not_none",
"pandas.util._decorators.make_signature",
"pandas.core.base.DataError",
"numpy.bincount",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.algorithms.factorize",
"numpy.add.reduceat",
"pandas.util._decorators.Substitution",
"numpy.empty",
"pandas.core.dtypes.common._ensure_float",
"pandas.core.index.MultiIndex.from_tuples",
"pandas.core.index.Index",
"pandas.core.reshape.merge._get_join_indexers",
"pandas.core.sorting.get_group_index",
"pandas.core.frame.DataFrame"
]
] |
jhlee900958/PhySG | [
"8aaeaeea53595e039782ddad74491176f943be68"
] | [
"code/evaluation/eval.py"
] | [
"import sys\nsys.path.append('../code')\nimport argparse\nimport GPUtil\nimport os\nfrom pyhocon import ConfigFactory\nimport torch\nimport numpy as np\nimport cvxpy as cp\nfrom PIL import Image\nimport math\n\nimport utils.general as utils\nimport utils.plots as plt\nfrom utils import rend_util\nfrom utils import vis_util\nfrom model.sg_render import compute_envmap\nimport imageio\n# import pyexr\n\n\ndef evaluate(**kwargs):\n torch.set_default_dtype(torch.float32)\n\n conf = ConfigFactory.parse_file(kwargs['conf'])\n exps_folder_name = kwargs['exps_folder_name']\n evals_folder_name = kwargs['evals_folder_name']\n\n expname = conf.get_string('train.expname') + '-' + kwargs['expname']\n\n if kwargs['timestamp'] == 'latest':\n if os.path.exists(os.path.join('../', kwargs['exps_folder_name'], expname)):\n timestamps = os.listdir(os.path.join('../', kwargs['exps_folder_name'], expname))\n if (len(timestamps)) == 0:\n print('WRONG EXP FOLDER')\n exit()\n else:\n timestamp = sorted(timestamps)[-1]\n else:\n print('WRONG EXP FOLDER')\n exit()\n else:\n timestamp = kwargs['timestamp']\n\n utils.mkdir_ifnotexists(os.path.join('../', evals_folder_name))\n expdir = os.path.join('../', exps_folder_name, expname)\n evaldir = os.path.join('../', evals_folder_name, expname, os.path.basename(kwargs['data_split_dir']))\n\n model = utils.get_class(conf.get_string('train.model_class'))(conf=conf.get_config('model'))\n if torch.cuda.is_available():\n model.cuda()\n\n eval_dataset = utils.get_class(conf.get_string('train.dataset_class'))(kwargs['gamma'],\n kwargs['data_split_dir'],\n train_cameras=False)\n\n eval_dataloader = torch.utils.data.DataLoader(eval_dataset,\n batch_size=1,\n shuffle=False,\n collate_fn=eval_dataset.collate_fn\n )\n total_pixels = eval_dataset.total_pixels\n img_res = eval_dataset.img_res\n\n old_checkpnts_dir = os.path.join(expdir, timestamp, 'checkpoints')\n ckpt_path = os.path.join(old_checkpnts_dir, 'ModelParameters', str(kwargs['checkpoint']) + \".pth\")\n saved_model_state = torch.load(ckpt_path)\n model.load_state_dict(saved_model_state[\"model_state_dict\"])\n epoch = saved_model_state['epoch']\n print('Loaded checkpoint: ', ckpt_path)\n\n if kwargs['geometry'].endswith('.pth'):\n print('Reloading geometry from: ', kwargs['geometry'])\n geometry = torch.load(kwargs['geometry'])['model_state_dict']\n geometry = {k: v for k, v in geometry.items() if 'implicit_network' in k}\n print(geometry.keys())\n model_dict = model.state_dict()\n model_dict.update(geometry)\n model.load_state_dict(model_dict)\n\n #####################################################################################################\n # reset lighting\n #####################################################################################################\n relight = False\n if kwargs['light_sg'].endswith('.npy'):\n print('Loading light from: ', kwargs['light_sg'])\n model.envmap_material_network.load_light(kwargs['light_sg'])\n evaldir = evaldir + '_relight'\n relight = True\n\n edit_diffuse = False\n if len(kwargs['diffuse_albedo']) > 0:\n print('Setting diffuse albedo to: ', kwargs['diffuse_albedo'])\n evaldir = evaldir + '_editdiffuse'\n edit_diffuse = True\n\n utils.mkdir_ifnotexists(evaldir)\n print('Output directory is: ', evaldir)\n\n with open(os.path.join(evaldir, 'ckpt_path.txt'), 'w') as fp:\n fp.write(ckpt_path + '\\n')\n\n ####################################################################################################################\n print(\"evaluating...\")\n model.eval()\n\n # extract mesh\n if (not edit_diffuse) and (not relight) and eval_dataset.has_groundtruth:\n with torch.no_grad():\n mesh = plt.get_surface_high_res_mesh(\n sdf=lambda x: model.implicit_network(x)[:, 0],\n resolution=kwargs['resolution']\n )\n\n # Taking the biggest connected component\n components = mesh.split(only_watertight=False)\n areas = np.array([c.area for c in components], dtype=np.float)\n mesh_clean = components[areas.argmax()]\n mesh_clean.export('{0}/mesh.obj'.format(evaldir), 'obj')\n\n\n # generate images\n images_dir = evaldir\n\n all_frames = []\n psnrs = []\n for data_index, (indices, model_input, ground_truth) in enumerate(eval_dataloader):\n if eval_dataset.has_groundtruth:\n out_img_name = os.path.basename(eval_dataset.image_paths[indices[0]])[:-4]\n else:\n out_img_name = '{}'.format(indices[0])\n\n if len(kwargs['view_name']) > 0 and out_img_name != kwargs['view_name']:\n print('Skipping: ', out_img_name)\n continue\n\n print('Evaluating data_index: ', data_index, len(eval_dataloader))\n model_input[\"intrinsics\"] = model_input[\"intrinsics\"].cuda()\n model_input[\"uv\"] = model_input[\"uv\"].cuda()\n model_input[\"object_mask\"] = model_input[\"object_mask\"].cuda()\n model_input['pose'] = model_input['pose'].cuda()\n\n split = utils.split_input(model_input, total_pixels)\n res = []\n for s in split:\n out = model(s)\n res.append({\n 'points': out['points'].detach(),\n 'idr_rgb_values': out['idr_rgb_values'].detach(),\n 'sg_rgb_values': out['sg_rgb_values'].detach(),\n 'normal_values': out['normal_values'].detach(),\n 'network_object_mask': out['network_object_mask'].detach(),\n 'object_mask': out['object_mask'].detach(),\n 'sg_diffuse_albedo_values': out['sg_diffuse_albedo_values'].detach(),\n 'sg_diffuse_rgb_values': out['sg_diffuse_rgb_values'].detach(),\n 'sg_specular_rgb_values': out['sg_specular_rgb_values'].detach(),\n })\n\n batch_size = ground_truth['rgb'].shape[0]\n model_outputs = utils.merge_output(res, total_pixels, batch_size)\n\n ### re-render with updated diffuse albedo\n if edit_diffuse:\n diffuse_albedo = imageio.imread(kwargs['diffuse_albedo']).astype(np.float32)[:, :, :3]\n if not kwargs['diffuse_albedo'].endswith('.exr'):\n diffuse_albedo /= 255.\n diffuse_albedo = torch.from_numpy(diffuse_albedo).cuda().reshape((-1, 3))\n\n ray_dirs, _ = rend_util.get_camera_params(model_input[\"uv\"],\n model_input['pose'],\n model_input[\"intrinsics\"])\n sg_ret = model.render_sg_rgb(mask=model_outputs['network_object_mask'],\n normals=model_outputs['normal_values'],\n view_dirs=-ray_dirs.reshape((-1, 3)),\n diffuse_albedo=diffuse_albedo)\n for x in sorted(sg_ret.keys()):\n assert (x in model_outputs)\n model_outputs[x] = sg_ret[x]\n\n tonemap_img = lambda x: np.power(x, 1./eval_dataset.gamma)\n clip_img = lambda x: np.clip(x, 0., 1.)\n\n assert (batch_size == 1)\n\n if kwargs['write_idr']:\n rgb_eval = model_outputs['idr_rgb_values']\n rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)\n rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]\n rgb_eval = rgb_eval.transpose(1, 2, 0)\n rgb_eval = clip_img(tonemap_img(rgb_eval))\n img = Image.fromarray((rgb_eval * 255).astype(np.uint8))\n img.save('{0}/idr_rgb_{1}.png'.format(images_dir, out_img_name))\n\n rgb_eval = model_outputs['sg_rgb_values']\n rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)\n rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]\n rgb_eval = rgb_eval.transpose(1, 2, 0)\n if kwargs['save_exr']:\n imageio.imwrite('{0}/sg_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # pyexr.write('{0}/sg_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # np.save('{0}/sg_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)\n\n else:\n rgb_eval = clip_img(tonemap_img(rgb_eval))\n img = Image.fromarray((rgb_eval * 255).astype(np.uint8))\n img.save('{0}/sg_rgb_{1}.png'.format(images_dir, out_img_name))\n\n all_frames.append(np.array(img))\n\n # network_object_mask = model_outputs['network_object_mask']\n # network_object_mask = network_object_mask.reshape(batch_size, total_pixels, 3)\n # network_object_mask = plt.lin2img(network_object_mask, img_res).detach().cpu().numpy()[0]\n # network_object_mask = network_object_mask.transpose(1, 2, 0)\n # img = Image.fromarray((network_object_mask * 255).astype(np.uint8))\n # img.save('{0}/object_mask_{1}.png'.format(images_dir, out_img_name))\n\n normal = model_outputs['normal_values']\n normal = normal.reshape(batch_size, total_pixels, 3)\n normal = (normal + 1.) / 2.\n normal = plt.lin2img(normal, img_res).detach().cpu().numpy()[0]\n normal = normal.transpose(1, 2, 0)\n if kwargs['save_exr']:\n imageio.imwrite('{0}/normal_{1}.exr'.format(images_dir, out_img_name), normal)\n # pyexr.write('{0}/normal_{1}.exr'.format(images_dir, out_img_name), normal)\n # np.save('{0}/normal_{1}.npy'.format(images_dir, out_img_name), normal)\n\n else:\n img = Image.fromarray((normal * 255).astype(np.uint8))\n img.save('{0}/normal_{1}.png'.format(images_dir, out_img_name))\n\n if (not relight) and eval_dataset.has_groundtruth:\n depth = torch.ones(batch_size * total_pixels).cuda().float()\n network_object_mask = model_outputs['network_object_mask'] & model_outputs['object_mask']\n depth_valid = rend_util.get_depth(model_outputs['points'].reshape(batch_size, total_pixels, 3),\n model_input['pose']).reshape(-1)[network_object_mask]\n depth[network_object_mask] = depth_valid\n depth[~network_object_mask] = 0.98 * depth_valid.min()\n assert (batch_size == 1)\n network_object_mask = network_object_mask.float().reshape(img_res[0], img_res[1]).cpu()\n depth = depth.reshape(img_res[0], img_res[1]).cpu()\n\n if kwargs['save_exr']:\n depth = depth * network_object_mask\n depth = depth.numpy()\n imageio.imwrite('{0}/depth_{1}.exr'.format(images_dir, out_img_name), depth)\n # pyexr.write('{0}/depth_{1}.exr'.format(images_dir, out_img_name), depth)\n # np.save('{0}/depth_{1}.npy'.format(images_dir, out_img_name), depth)\n\n else:\n depth = vis_util.colorize(depth, cmap_name='jet')\n depth = depth * network_object_mask.unsqueeze(-1) + (1. - network_object_mask.unsqueeze(-1))\n depth = depth.numpy()\n img = Image.fromarray((depth * 255).astype(np.uint8))\n img.save('{0}/depth_{1}.png'.format(images_dir, out_img_name))\n\n # write lighting and materials\n envmap = compute_envmap(lgtSGs=model.envmap_material_network.get_light(), H=256, W=512, upper_hemi=model.envmap_material_network.upper_hemi)\n envmap = envmap.cpu().numpy()\n imageio.imwrite(os.path.join(images_dir, 'envmap.exr'), envmap)\n\n roughness, specular_reflectance = model.envmap_material_network.get_base_materials()\n with open(os.path.join(images_dir, 'relight_material.txt'), 'w') as fp:\n for i in range(roughness.shape[0]):\n fp.write('Material {}:\\n'.format(i))\n fp.write('\\troughness: {}\\n'.format(roughness[i, 0].item()))\n fp.write('\\tspecular_reflectance: ')\n for j in range(3):\n fp.write('{}, '.format(specular_reflectance[i, j].item()))\n fp.write('\\n\\n')\n\n rgb_gt = ground_truth['rgb']\n rgb_gt = plt.lin2img(rgb_gt, img_res).numpy()[0].transpose(1, 2, 0)\n if kwargs['save_exr']:\n imageio.imwrite('{0}/gt_{1}.exr'.format(images_dir, out_img_name), rgb_gt)\n # pyexr.write('{0}/gt_{1}.exr'.format(images_dir, out_img_name), rgb_gt)\n # np.save('{0}/gt_{1}.npy'.format(images_dir, out_img_name), rgb_gt)\n\n else:\n rgb_gt = clip_img(tonemap_img(rgb_gt))\n img = Image.fromarray((rgb_gt * 255).astype(np.uint8))\n img.save('{0}/gt_{1}.png'.format(images_dir, out_img_name))\n\n mask = model_input['object_mask']\n mask = plt.lin2img(mask.unsqueeze(-1), img_res).cpu().numpy()[0]\n mask = mask.transpose(1, 2, 0)\n rgb_eval_masked = rgb_eval * mask\n rgb_gt_masked = rgb_gt * mask\n\n psnr = calculate_psnr(rgb_eval_masked, rgb_gt_masked, mask)\n psnrs.append(psnr)\n\n # verbose mode\n rgb_eval = model_outputs['sg_diffuse_albedo_values']\n rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)\n rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]\n rgb_eval = rgb_eval.transpose(1, 2, 0)\n if kwargs['save_exr']:\n imageio.imwrite('{0}/sg_diffuse_albedo_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # pyexr.write('{0}/sg_diffuse_albedo_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # np.save('{0}/sg_diffuse_albedo_{1}.npy'.format(images_dir, out_img_name), rgb_eval)\n\n else:\n rgb_eval = clip_img(rgb_eval)\n img = Image.fromarray((rgb_eval * 255).astype(np.uint8))\n img.save('{0}/sg_diffuse_albedo_{1}.png'.format(images_dir, out_img_name))\n\n rgb_eval = model_outputs['sg_diffuse_rgb_values']\n rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)\n rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]\n rgb_eval = rgb_eval.transpose(1, 2, 0)\n if kwargs['save_exr']:\n imageio.imwrite('{0}/sg_diffuse_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # pyexr.write('{0}/sg_diffuse_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # np.save('{0}/sg_diffuse_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)\n\n else:\n rgb_eval = clip_img(rgb_eval)\n img = Image.fromarray((rgb_eval * 255).astype(np.uint8))\n img.save('{0}/sg_diffuse_rgb_{1}.png'.format(images_dir, out_img_name))\n\n rgb_eval = model_outputs['sg_specular_rgb_values']\n rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)\n rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]\n rgb_eval = rgb_eval.transpose(1, 2, 0)\n if kwargs['save_exr']:\n imageio.imwrite('{0}/sg_specular_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # pyexr.write('{0}/sg_specular_rgb_{1}.exr'.format(images_dir, out_img_name), rgb_eval)\n # np.save('{0}/sg_specular_rgb_{1}.npy'.format(images_dir, out_img_name), rgb_eval)\n\n else:\n rgb_eval = clip_img(rgb_eval)\n img = Image.fromarray((rgb_eval * 255).astype(np.uint8))\n img.save('{0}/sg_specular_rgb_{1}.png'.format(images_dir, out_img_name))\n\n if not kwargs['save_exr']:\n imageio.mimwrite(os.path.join(images_dir, 'video_rgb.mp4'), all_frames, fps=15, quality=9)\n print('Done rendering', images_dir)\n\n if len(psnrs) > 0:\n psnrs = np.array(psnrs).astype(np.float64)\n # print(\"RENDERING EVALUATION {2}: psnr mean = {0} ; psnr std = {1}\".format(\"%.2f\" % psnrs.mean(), \"%.2f\" % psnrs.std(), scan_id))\n print(\"RENDERING EVALUATION: psnr mean = {0} ; psnr std = {1}\".format(\"%.2f\" % psnrs.mean(), \"%.2f\" % psnrs.std()))\n\n\ndef get_cameras_accuracy(pred_Rs, gt_Rs, pred_ts, gt_ts,):\n ''' Align predicted pose to gt pose and print cameras accuracy'''\n\n # find rotation\n d = pred_Rs.shape[-1]\n n = pred_Rs.shape[0]\n\n Q = torch.addbmm(torch.zeros(d, d, dtype=torch.double), gt_Rs, pred_Rs.transpose(1, 2))\n Uq, _, Vq = torch.svd(Q)\n sv = torch.ones(d, dtype=torch.double)\n sv[-1] = torch.det(Uq @ Vq.transpose(0, 1))\n R_opt = Uq @ torch.diag(sv) @ Vq.transpose(0, 1)\n R_fixed = torch.bmm(R_opt.repeat(n, 1, 1), pred_Rs)\n\n # find translation\n pred_ts = pred_ts @ R_opt.transpose(0, 1)\n c_opt = cp.Variable()\n t_opt = cp.Variable((1, d))\n\n constraints = []\n obj = cp.Minimize(cp.sum(\n cp.norm(gt_ts.numpy() - (c_opt * pred_ts.numpy() + np.ones((n, 1), dtype=np.double) @ t_opt), axis=1)))\n prob = cp.Problem(obj, constraints)\n prob.solve()\n t_fixed = c_opt.value * pred_ts.numpy() + np.ones((n, 1), dtype=np.double) * t_opt.value\n\n # Calculate transaltion error\n t_error = np.linalg.norm(t_fixed - gt_ts.numpy(), axis=-1)\n t_error = t_error\n t_error_mean = np.mean(t_error)\n t_error_medi = np.median(t_error)\n\n # Calculate rotation error\n R_error = compare_rotations(R_fixed, gt_Rs)\n\n R_error = R_error.numpy()\n R_error_mean = np.mean(R_error)\n R_error_medi = np.median(R_error)\n\n print('CAMERAS EVALUATION: R error mean = {0} ; t error mean = {1} ; R error median = {2} ; t error median = {3}'\n .format(\"%.2f\" % R_error_mean, \"%.2f\" % t_error_mean, \"%.2f\" % R_error_medi, \"%.2f\" % t_error_medi))\n\n # return alignment and aligned pose\n return R_opt.numpy(), t_opt.value, c_opt.value, R_fixed.numpy(), t_fixed\n\ndef compare_rotations(R1, R2):\n cos_err = (torch.bmm(R1, R2.transpose(1, 2))[:, torch.arange(3), torch.arange(3)].sum(dim=-1) - 1) / 2\n cos_err[cos_err > 1] = 1\n cos_err[cos_err < -1] = -1\n return cos_err.acos() * 180 / np.pi\n\ndef calculate_psnr(img1, img2, mask):\n # img1 and img2 have range [0, 1]\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n mse = np.mean((img1 - img2)**2) * (img2.shape[0] * img2.shape[1]) / mask.sum()\n if mse == 0:\n return float('inf')\n return 20 * math.log10(1.0 / math.sqrt(mse))\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--conf', type=str, default='./confs/default.conf')\n parser.add_argument('--data_split_dir', type=str, default='')\n parser.add_argument('--gamma', type=float, default=1., help='gamma correction coefficient')\n\n parser.add_argument('--save_exr', default=False, action=\"store_true\", help='')\n\n parser.add_argument('--light_sg', type=str, default='', help='')\n parser.add_argument('--geometry', type=str, default='', help='')\n parser.add_argument('--diffuse_albedo', type=str, default='', help='')\n parser.add_argument('--view_name', type=str, default='', help='')\n\n parser.add_argument('--expname', type=str, default='', help='The experiment name to be evaluated.')\n parser.add_argument('--exps_folder', type=str, default='exps', help='The experiments folder name.')\n parser.add_argument('--timestamp', default='latest', type=str, help='The experiemnt timestamp to test.')\n parser.add_argument('--checkpoint', default='latest',type=str,help='The trained model checkpoint to test')\n\n parser.add_argument('--write_idr', default=False, action=\"store_true\", help='')\n\n parser.add_argument('--resolution', default=512, type=int, help='Grid resolution for marching cube')\n parser.add_argument('--is_uniform_grid', default=False, action=\"store_true\", help='If set, evaluate marching cube with uniform grid.')\n\n parser.add_argument('--gpu', type=str, default='auto', help='GPU to use [default: GPU auto]')\n\n opt = parser.parse_args()\n\n if opt.gpu == \"auto\":\n deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, maxMemory=0.5, includeNan=False, excludeID=[], excludeUUID=[])\n gpu = deviceIDs[0]\n else:\n gpu = opt.gpu\n\n if (not gpu == 'ignore'):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '{0}'.format(gpu)\n\n evaluate(conf=opt.conf,\n write_idr=opt.write_idr,\n gamma=opt.gamma,\n data_split_dir=opt.data_split_dir,\n expname=opt.expname,\n exps_folder_name=opt.exps_folder,\n evals_folder_name='evals',\n timestamp=opt.timestamp,\n checkpoint=opt.checkpoint,\n resolution=opt.resolution,\n save_exr=opt.save_exr,\n light_sg=opt.light_sg,\n geometry=opt.geometry,\n view_name=opt.view_name,\n diffuse_albedo=opt.diffuse_albedo,\n )\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.ones",
"numpy.ones",
"torch.load",
"torch.svd",
"torch.no_grad",
"torch.diag",
"numpy.median",
"torch.set_default_dtype",
"torch.cuda.is_available",
"numpy.power",
"numpy.clip",
"torch.from_numpy",
"torch.arange",
"torch.zeros",
"numpy.array",
"numpy.mean"
]
] |
akeaveny/robo-gym-robot-servers | [
"072ddf11292190f9770591853f66902e094c5df4"
] | [
"ur_robot_server/scripts/objects_controller.py"
] | [
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Bool\nfrom geometry_msgs.msg import Pose, Twist, TransformStamped\nfrom gazebo_msgs.msg import ModelState\nfrom scipy import signal, interpolate \nimport numpy as np \nimport copy\nimport os\nimport random\n\nimport tf, tf.msg\nimport json\n\nmove = False \nclass ObjectsController:\n def __init__(self):\n\n self.real_robot = rospy.get_param(\"real_robot\")\n\n # Objects Model State publisher\n if not self.real_robot:\n self.set_model_state_pub = rospy.Publisher('gazebo/set_model_state', ModelState, queue_size=1)\n\n # Objects position update frequency (Hz)\n self.update_rate = 100 \n\n # move_objects subscriber\n rospy.Subscriber(\"move_objects\", Bool, self.callback_move_objects)\n \n self.publish_tf = rospy.get_param(\"publish_objects_tf\")\n\n # Objects TF Publisher\n if self.publish_tf:\n self.reference_frame = rospy.get_param(\"reference_frame\", \"base\")\n self.pub_tf = rospy.Publisher(\"/tf\", tf.msg.tfMessage)\n\n object_trajectory_file_name = rospy.get_param(\"object_trajectory_file_name\")\n if object_trajectory_file_name != 'no_file':\n file_path = os.path.join(os.path.dirname(__file__),'../object_trajectories', object_trajectory_file_name + '.json')\n\n # Load object trajectory file \n with open(file_path, 'r') as json_file:\n self.p = json.load(json_file)\n \n def callback_move_objects(self,data):\n global move \n if data.data == True:\n move = True\n else:\n move = False\n \n def get_fixed_position(self, x, y, z):\n \"\"\"Generate trajectory for object in a fixed position\n\n Args:\n x (float): x coordinate (m).\n y (float): y coordinate (m).\n z (float): z coordinate (m).\n\n Returns:\n list: x coordinate function\n list: y coordinate function\n list: z coordinate function\n \"\"\" \n x_function = [x]\n y_function = [y]\n z_function = [z]\n self.samples_len = 1\n \n return x_function, y_function, z_function \n\n def get_triangle_wave(self, x, y, amplitude, frequency, offset):\n\n \"\"\"Generate samples of triangle wave function with amplitude in the z axis direction.\n\n Args:\n x (float): x coordinate (m).\n y (float): y coordinate (m).\n amplitude (float): amplitude of the triangle wave (m).\n frequency (float): frequency of the triangle wave (Hz).\n offset (float): offset from the ground of the zero of the triangle wave (m).\n\n\n Returns:\n np.array: Samples of the x coordinate of the function over time.\n np.array: Samples of the y coordinate of the function over time.\n np.array: Samples of the z coordinate of the function over time.\n\n \"\"\"\n\n # Create array with time samples over 1 full function period\n sampling_rate = copy.deepcopy(self.update_rate)\n self.samples_len = int(sampling_rate / frequency)\n t = np.linspace(0, (1/frequency), self.samples_len)\n\n x_function = np.full(self.samples_len, x)\n y_function = np.full(self.samples_len, y)\n z_function = offset + amplitude * signal.sawtooth(2 * np.pi * frequency * t, 0.5)\n\n return x_function, y_function, z_function\n \n def get_3d_spline(self, x_min, x_max, y_min, y_max, z_min, z_max, n_points = 10, n_sampling_points = 4000):\n \n \"\"\"Generate samples of the cartesian coordinates of a 3d spline.\n\n Args:\n x_min (float): min x coordinate of random points used to interpolate spline (m).\n x_max (float): max x coordinate of random points used to interpolate spline (m).\n y_min (float): min y coordinate of random points used to interpolate spline (m).\n y_max (float): max y coordinate of random points used to interpolate spline (m).\n z_min (float): min z coordinate of random points used to interpolate spline (m).\n z_max (float): max z coordinate of random points used to interpolate spline (m).\n n_points (int): number of random points used to interpolate the 3d spline.\n n_sampling_points (int): number of the samples to take over the whole length of the spline.\n\n Returns:\n np.array: Samples of the x coordinate of the function over time.\n np.array: Samples of the y coordinate of the function over time.\n np.array: Samples of the z coordinate of the function over time.\n\n \"\"\"\n\n # Convert number of points to int\n n_points = int(n_points)\n # Convert number of sampling points to int\n # By increasing the number of sampling points the speed of the object decreases\n n_sampling_points = int(n_sampling_points)\n # Create array with time samples over 1 full function period\n\n self.samples_len = n_sampling_points\n\n\n x = np.random.uniform(x_min,x_max,n_points)\n y = np.random.uniform(y_min,y_max,n_points)\n z = np.random.uniform(z_min,z_max,n_points)\n\n # set last point equal to first to have a closed trajectory\n x[n_points-1] = x[0]\n y[n_points-1] = y[0]\n z[n_points-1] = z[0]\n\n smoothness = 0\n tck, u = interpolate.splprep([x,y,z], s=smoothness)\n u_fine = np.linspace(0,1,n_sampling_points)\n x_function, y_function, z_function = interpolate.splev(u_fine, tck)\n\n return x_function, y_function, z_function\n\n def get_3d_spline_ur5_workspace(self, x_min, x_max, y_min, y_max, z_min, z_max, n_points = 10, n_sampling_points = 4000):\n \n \"\"\"Generate samples of the cartesian coordinates of a 3d spline that do not cross a vertical \n cylinder of radius r_min centered in 0,0.\n\n Args:\n x_min (float): min x coordinate of random points used to interpolate spline (m).\n x_max (float): max x coordinate of random points used to interpolate spline (m).\n y_min (float): min y coordinate of random points used to interpolate spline (m).\n y_max (float): max y coordinate of random points used to interpolate spline (m).\n z_min (float): min z coordinate of random points used to interpolate spline (m).\n z_max (float): max z coordinate of random points used to interpolate spline (m).\n n_points (int): number of random points used to interpolate the 3d spline.\n n_sampling_points (int): number of the samples to take over the whole length of the spline.\n\n Returns:\n np.array: Samples of the x coordinate of the function over time.\n np.array: Samples of the y coordinate of the function over time.\n np.array: Samples of the z coordinate of the function over time.\n\n \"\"\"\n\n r_min_cylinder = 0.2 \n r_min_sphere_base = 0.35 \n\n # Convert number of points to int\n n_points = int(n_points)\n # Convert number of sampling points to int\n # By increasing the number of sampling points the speed of the object decreases\n n_sampling_points = int(n_sampling_points)\n # Create array with time samples over 1 full function period\n\n self.samples_len = n_sampling_points\n \n search = True\n while search:\n x = np.random.uniform(x_min,x_max,n_points)\n y = np.random.uniform(y_min,y_max,n_points)\n z = np.random.uniform(z_min,z_max,n_points)\n\n # set first point oustide of square of size 0.5m centered in 0,0\n x[0] = random.choice([np.random.uniform(-1.0,-0.5),np.random.uniform(0.5,1.0)])\n y[0] = random.choice([np.random.uniform(-1.0,-0.5),np.random.uniform(0.5,1.0)])\n\n # set last point equal to first to have a closed trajectory\n x[n_points-1] = x[0]\n y[n_points-1] = y[0]\n z[n_points-1] = z[0]\n\n smoothness = 0\n tck, u = interpolate.splprep([x,y,z], s=smoothness)\n u_fine = np.linspace(0,1,n_sampling_points)\n x_function, y_function, z_function = interpolate.splev(u_fine, tck)\n \n search = False\n for i in range(len(x_function)):\n if (x_function[i]**2+y_function[i]**2)**(1/2) <= r_min_cylinder or \\\n (x_function[i]**2+y_function[i]**2+z_function[i]**2)**(1/2) <= r_min_sphere_base :\n search = True\n\n return x_function, y_function, z_function\n\n def get_fixed_trajectory(self, trajectory_id):\n # file_name = \"obstacle_trajectories.yaml\"\n\n\n trajectory_name = \"trajectory_\" + str(int(trajectory_id))\n x_function = self.p[trajectory_name][\"x\"]\n y_function = self.p[trajectory_name][\"y\"]\n z_function = self.p[trajectory_name][\"z\"]\n\n # self.samples_len = self.p[trajectory_name][\"n_sampling_points\"]\n self.samples_len = 4000\n return x_function, y_function, z_function \n\n def objects_state_update_loop(self):\n \n while not rospy.is_shutdown():\n if move:\n self.n_objects = int(rospy.get_param(\"n_objects\", 1))\n # Initialization of ModelState() messages\n if not self.real_robot:\n objects_model_state = [ModelState() for i in range(self.n_objects)]\n # Get objects model names\n for i in range(self.n_objects):\n objects_model_state[i].model_name = rospy.get_param(\"object_\" + repr(i) +\"_model_name\")\n rospy.loginfo(rospy.get_param(\"object_\" + repr(i) +\"_model_name\"))\n if self.publish_tf:\n # Initialization of Objects tf frames names\n objects_tf_frame = [rospy.get_param(\"object_\" + repr(i) +\"_frame\") for i in range(self.n_objects)]\n\n # Generate Movement Trajectories\n objects_trajectories = []\n for i in range(self.n_objects):\n function = rospy.get_param(\"object_\" + repr(i) +\"_function\")\n if function == \"fixed_position\":\n x = rospy.get_param(\"object_\" + repr(i) + \"_x\")\n y = rospy.get_param(\"object_\" + repr(i) + \"_y\")\n z = rospy.get_param(\"object_\" + repr(i) + \"_z\")\n x_trajectory, y_trajectory, z_trajectory = self.get_fixed_position(x,y,z)\n elif function == \"triangle_wave\":\n x = rospy.get_param(\"object_\" + repr(i) + \"_x\")\n y = rospy.get_param(\"object_\" + repr(i) + \"_y\")\n a = rospy.get_param(\"object_\" + repr(i) + \"_z_amplitude\")\n f = rospy.get_param(\"object_\" + repr(i) + \"_z_frequency\")\n o = rospy.get_param(\"object_\" + repr(i) + \"_z_offset\")\n x_trajectory, y_trajectory, z_trajectory = self.get_triangle_wave(x, y, a, f, o)\n elif function == \"3d_spline\":\n x_min = rospy.get_param(\"object_\" + repr(i) + \"_x_min\")\n x_max = rospy.get_param(\"object_\" + repr(i) + \"_x_max\")\n y_min = rospy.get_param(\"object_\" + repr(i) + \"_y_min\")\n y_max = rospy.get_param(\"object_\" + repr(i) + \"_y_max\")\n z_min = rospy.get_param(\"object_\" + repr(i) + \"_z_min\")\n z_max = rospy.get_param(\"object_\" + repr(i) + \"_z_max\")\n n_points = rospy.get_param(\"object_\" + repr(i) + \"_n_points\")\n n_sampling_points = rospy.get_param(\"n_sampling_points\")\n x_trajectory, y_trajectory, z_trajectory = self.get_3d_spline(x_min, x_max, y_min, y_max, z_min, z_max, n_points, n_sampling_points)\n elif function == \"3d_spline_ur5_workspace\":\n x_min = rospy.get_param(\"object_\" + repr(i) + \"_x_min\")\n x_max = rospy.get_param(\"object_\" + repr(i) + \"_x_max\")\n y_min = rospy.get_param(\"object_\" + repr(i) + \"_y_min\")\n y_max = rospy.get_param(\"object_\" + repr(i) + \"_y_max\")\n z_min = rospy.get_param(\"object_\" + repr(i) + \"_z_min\")\n z_max = rospy.get_param(\"object_\" + repr(i) + \"_z_max\")\n n_points = rospy.get_param(\"object_\" + repr(i) + \"_n_points\")\n n_sampling_points = rospy.get_param(\"n_sampling_points\")\n x_trajectory, y_trajectory, z_trajectory = self.get_3d_spline_ur5_workspace(x_min, x_max, y_min, y_max, z_min, z_max, n_points, n_sampling_points)\n elif function == \"fixed_trajectory\":\n trajectory_id = rospy.get_param(\"object_\" + repr(i) + \"_trajectory_id\")\n x_trajectory, y_trajectory, z_trajectory = self.get_fixed_trajectory(trajectory_id)\n objects_trajectories.append([x_trajectory, y_trajectory, z_trajectory])\n\n # Move objects \n s = 0 \n while move: \n s = s % self.samples_len\n for i in range(self.n_objects):\n if not self.real_robot:\n objects_model_state[i].pose.position.x = objects_trajectories[i][0][s]\n objects_model_state[i].pose.position.y = objects_trajectories[i][1][s]\n objects_model_state[i].pose.position.z = objects_trajectories[i][2][s]\n self.set_model_state_pub.publish(objects_model_state[i])\n if self.publish_tf:\n t = TransformStamped()\n t.header.frame_id = self.reference_frame\n t.header.stamp = rospy.Time.now()\n t.child_frame_id = objects_tf_frame[i]\n t.transform.translation.x = objects_trajectories[i][0][s]\n t.transform.translation.y = objects_trajectories[i][1][s]\n t.transform.translation.z = objects_trajectories[i][2][s]\n t.transform.rotation.x = 0.0\n t.transform.rotation.y = 0.0\n t.transform.rotation.z = 0.0\n t.transform.rotation.w = 1.0\n tfm = tf.msg.tfMessage([t])\n self.pub_tf.publish(tfm)\n\n rospy.Rate(self.update_rate).sleep()\n s = s + 1\n\n # Move objects up in the air \n for i in range(self.n_objects):\n if not self.real_robot:\n objects_model_state[i].pose.position.x = i\n objects_model_state[i].pose.position.y = 0.0\n objects_model_state[i].pose.position.z = 3.0\n self.set_model_state_pub.publish(objects_model_state[i]) \n if self.publish_tf:\n t = TransformStamped()\n t.header.frame_id = self.reference_frame\n t.header.stamp = rospy.Time.now()\n t.child_frame_id = objects_tf_frame[i]\n t.transform.translation.x = i\n t.transform.translation.y = 0.0\n t.transform.translation.z = 3.0\n t.transform.rotation.x = 0.0\n t.transform.rotation.y = 0.0\n t.transform.rotation.z = 0.0\n t.transform.rotation.w = 1.0\n tfm = tf.msg.tfMessage([t])\n self.pub_tf.publish(tfm)\n rospy.Rate(self.update_rate).sleep()\n else:\n pass \n\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('objects_controller')\n oc = ObjectsController()\n oc.objects_state_update_loop()\n except rospy.ROSInterruptException:\n pass\n"
] | [
[
"numpy.random.uniform",
"scipy.interpolate.splev",
"scipy.interpolate.splprep",
"numpy.full",
"numpy.linspace",
"scipy.signal.sawtooth"
]
] |
marcobaga/leap | [
"975fa34aad4e3e578feea4dee3db45c061c29cdb"
] | [
"railrl/state_distance/tdm_networks.py"
] | [
"import numpy as np\nimport torch\n\nfrom railrl.state_distance.policies import UniversalPolicy\nfrom railrl.torch.networks import TanhMlpPolicy, FlattenMlp\nfrom railrl.torch.sac.policies import TanhGaussianPolicy\n\n\nclass TdmQf(FlattenMlp):\n def __init__(\n self,\n env,\n vectorized,\n structure='norm_difference',\n learn_offset=False,\n observation_dim=None,\n action_dim=None,\n goal_dim=None,\n norm_order=1,\n output_dim=None,\n **flatten_mlp_kwargs\n ):\n \"\"\"\n :param env:\n :param hidden_sizes:\n :param vectorized: Boolean. Vectorized or not?\n :param structure: String defining output structure of network:\n - 'norm_difference': Q = -||g - f(inputs)||\n - 'squared_difference': Q = -(g - f(inputs))^2\n - 'squared_difference_offset': Q = -(goal - f(inputs))^2 + f2(s, goal, tau)\n - 'none': Q = f(inputs)\n\n :param kwargs:\n \"\"\"\n assert structure in [\n 'norm_difference',\n 'squared_difference',\n 'none',\n ]\n self.save_init_params(locals())\n\n if observation_dim is None:\n self.observation_dim = env.observation_space.low.size\n else:\n self.observation_dim = observation_dim\n\n if action_dim is None:\n self.action_dim = env.action_space.low.size\n else:\n self.action_dim = action_dim\n\n if goal_dim is None:\n self.goal_dim = env.goal_dim\n else:\n self.goal_dim = goal_dim\n\n if output_dim is None:\n output_dim = self.goal_dim if vectorized else 1\n\n super().__init__(\n input_size=(\n self.observation_dim + self.action_dim + self.goal_dim + 1\n ),\n output_size=output_dim,\n **flatten_mlp_kwargs\n )\n self.env = env\n self.vectorized = vectorized\n self.structure = structure\n self.norm_order = norm_order\n self.learn_offset = learn_offset\n if learn_offset:\n self.offset_network = FlattenMlp(\n input_size=(\n self.observation_dim + self.action_dim + self.goal_dim + 1\n ),\n output_size=self.goal_dim if vectorized else 1,\n **flatten_mlp_kwargs\n )\n\n def forward(\n self,\n observations,\n actions,\n goals,\n num_steps_left,\n return_predictions=False\n ):\n predictions = super().forward(\n observations, actions, goals, num_steps_left\n )\n if return_predictions:\n return predictions\n\n if self.structure == 'norm_difference':\n output = - torch.abs(goals - predictions)\n elif self.structure == 'squared_difference':\n output = - (goals - predictions)**2\n elif self.structure == 'none':\n output = predictions\n else:\n raise TypeError(\"Invalid structure: {}\".format(self.structure))\n if not self.vectorized:\n output = - torch.norm(output, p=self.norm_order, dim=1, keepdim=True)\n\n if self.learn_offset:\n offset = self.offset_network(\n observations, actions, goals, num_steps_left\n )\n output = output + offset\n\n return output\n\nclass TdmVf(FlattenMlp):\n def __init__(\n self,\n env,\n vectorized,\n structure='norm_difference',\n observation_dim=None,\n goal_dim=None,\n norm_order=1,\n output_dim=None,\n **kwargs\n ):\n assert structure in [\n 'norm_difference',\n 'squared_difference',\n 'none',\n ]\n self.save_init_params(locals())\n\n if observation_dim is None:\n self.observation_dim = env.observation_space.low.size\n else:\n self.observation_dim = observation_dim\n\n if goal_dim is None:\n self.goal_dim = env.goal_dim\n else:\n self.goal_dim = goal_dim\n\n if output_dim is None:\n output_dim = self.goal_dim if vectorized else 1\n\n super().__init__(\n input_size= self.observation_dim + self.goal_dim + 1,\n output_size=output_dim,\n **kwargs\n )\n self.env = env\n self.vectorized = vectorized\n self.structure = structure\n self.norm_order = norm_order\n\n def forward(\n self,\n observations,\n goals,\n num_steps_left,\n ):\n predictions = super().forward(\n observations, goals, num_steps_left\n )\n\n if self.structure == 'norm_difference':\n output = - torch.abs(goals - predictions)\n elif self.structure == 'squared_difference':\n output = - (goals - predictions)**2\n elif self.structure == 'none':\n output = predictions\n else:\n raise TypeError(\"Invalid structure: {}\".format(self.structure))\n if not self.vectorized:\n output = - torch.norm(output, p=self.norm_order, dim=1, keepdim=True)\n\n return output\n\n\nclass TdmPolicy(TanhMlpPolicy):\n \"\"\"\n Rather than giving `g`, give `g - goalify(s)` as input.\n \"\"\"\n def __init__(\n self,\n env,\n observation_dim=None,\n action_dim=None,\n goal_dim=None,\n reward_scale=None,\n **kwargs\n ):\n self.save_init_params(locals())\n\n if observation_dim is None:\n self.observation_dim = env.observation_space.low.size\n else:\n self.observation_dim = observation_dim\n\n if action_dim is None:\n self.action_dim = env.action_space.low.size\n else:\n self.action_dim = action_dim\n\n if goal_dim is None:\n self.goal_dim = env.goal_dim\n else:\n self.goal_dim = goal_dim\n\n self.reward_scale = reward_scale\n\n super().__init__(\n input_size=self.observation_dim + self.goal_dim + 1,\n output_size=self.action_dim,\n **kwargs\n )\n self.env = env\n\n def forward(\n self,\n observations,\n goals,\n num_steps_left,\n return_preactivations=False,\n ):\n flat_input = torch.cat((observations, goals, num_steps_left), dim=1)\n return super().forward(\n flat_input,\n return_preactivations=return_preactivations,\n )\n\n def get_action(self, ob_np, goal_np, tau_np):\n actions = self.eval_np(\n ob_np[None],\n goal_np[None],\n tau_np[None],\n )\n return actions[0, :], {}\n\n def get_actions(self, ob_np, goal_np, tau_np):\n actions = self.eval_np(\n ob_np,\n goal_np,\n tau_np,\n )\n return actions\n\nclass StochasticTdmPolicy(TanhGaussianPolicy, UniversalPolicy):\n def __init__(\n self,\n env,\n observation_dim=None,\n action_dim=None,\n goal_dim=None,\n reward_scale=None,\n **kwargs\n ):\n self.save_init_params(locals())\n\n if observation_dim is None:\n self.observation_dim = env.observation_space.low.size\n else:\n self.observation_dim = observation_dim\n\n if action_dim is None:\n self.action_dim = env.action_space.low.size\n else:\n self.action_dim = action_dim\n\n if goal_dim is None:\n self.goal_dim = env.goal_dim\n else:\n self.goal_dim = goal_dim\n\n self.reward_scale = reward_scale\n\n super().__init__(\n obs_dim=self.observation_dim + self.goal_dim + 1,\n action_dim=self.action_dim,\n **kwargs\n )\n self.env = env\n\n def forward(\n self,\n observations,\n goals,\n num_steps_left,\n **kwargs\n ):\n flat_input = torch.cat((observations, goals, num_steps_left), dim=1)\n return super().forward(flat_input, **kwargs)\n\n def get_action(self, ob_np, goal_np, tau_np, deterministic=False):\n actions = self.get_actions(\n ob_np[None],\n goal_np[None],\n tau_np[None],\n deterministic=deterministic\n )\n return actions[0, :], {}\n\n def get_actions(self, obs_np, goals_np, taus_np, deterministic=False):\n return self.eval_np(\n obs_np, goals_np, taus_np, deterministic=deterministic\n )[0]\n"
] | [
[
"torch.cat",
"torch.abs",
"torch.norm"
]
] |
philhchen/OpenNMT-evidential-softmax | [
"87709ce1cf7bda783aed4a64c096fa23282e7aa9"
] | [
"onmt/modules/structured_attention.py"
] | [
"import torch.nn as nn\nimport torch\nimport torch.cuda\nfrom onmt.utils.logging import init_logger\n\n\nclass MatrixTree(nn.Module):\n \"\"\"Implementation of the matrix-tree theorem for computing marginals\n of non-projective dependency parsing. This attention layer is used\n in the paper \"Learning Structured Text Representations.\"\n\n\n :cite:`DBLP:journals/corr/LiuL17d`\n \"\"\"\n\n def __init__(self, eps=1e-5):\n self.eps = eps\n super(MatrixTree, self).__init__()\n\n def forward(self, input):\n laplacian = input.exp() + self.eps\n output = input.clone()\n for b in range(input.size(0)):\n lap = laplacian[b].masked_fill(torch.eye(input.size(1)).cuda().ne(0), 0)\n lap = -lap + torch.diag(lap.sum(0))\n # store roots on diagonal\n lap[0] = input[b].diag().exp()\n inv_laplacian = lap.inverse()\n\n factor = (\n inv_laplacian.diag().unsqueeze(1).expand_as(input[b]).transpose(0, 1)\n )\n term1 = input[b].exp().mul(factor).clone()\n term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()\n term1[:, 0] = 0\n term2[0] = 0\n output[b] = term1 - term2\n roots_output = input[b].diag().exp().mul(inv_laplacian.transpose(0, 1)[0])\n output[b] = output[b] + torch.diag(roots_output)\n return output\n\n\nif __name__ == \"__main__\":\n logger = init_logger(\"StructuredAttention.log\")\n dtree = MatrixTree()\n q = torch.rand(1, 5, 5).cuda()\n marg = dtree.forward(q)\n logger.info(marg.sum(1))\n"
] | [
[
"torch.rand",
"torch.diag"
]
] |
qtwang/SEAnet | [
"0cd38ff770bae8c4fe4f8d5f227a645b8c4c0ec9"
] | [
"util/data.py"
] | [
"# coding = utf-8\n\nimport os\nimport struct\nimport platform\nimport subprocess\nfrom os.path import isfile\nfrom pathlib import Path\nfrom ctypes import CDLL, c_char_p, c_long\nfrom _ctypes import dlclose\n\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom util.conf import Configuration\n \n\nclass TSDataset(Dataset):\n def __init__(self, data):\n self.data = data\n\n def __len__(self):\n return self.data.shape[0]\n \n def __getitem__(self, indices):\n return self.data[indices]\n\n\n\ndef getSamples(conf: Configuration):\n dim_series = conf.getHP('dim_series')\n size_train = conf.getHP('size_train')\n size_val = conf.getHP('size_val')\n device = conf.getHP('device')\n\n train_path = conf.getHP('train_path')\n val_path = conf.getHP('val_path')\n\n if os.path.exists(train_path) and os.path.exists(val_path):\n train_samples = torch.from_numpy(np.fromfile(train_path, dtype=np.float32, count=dim_series * size_train))\n val_samples = torch.from_numpy(np.fromfile(val_path, dtype=np.float32, count=dim_series * size_val))\n else:\n if conf.getHP('sampling_name') == 'coconut' or conf.getHP('sampling_name') == 'uniform':\n train_samples, val_samples = sample(conf)\n else:\n raise ValueError('sampling {:s} is not supported'.format(conf.getHP('sampling_name')))\n\n if conf.getHP('encoder') == 'gru' or conf.getHP('encoder') == 'lstm':\n train_samples = train_samples.view([-1, dim_series, 1])\n val_samples = val_samples.view([-1, dim_series, 1])\n else:\n train_samples = train_samples.view([-1, 1, dim_series])\n val_samples = val_samples.view([-1, 1, dim_series])\n\n train_samples = train_samples.to(device)\n val_samples = val_samples.to(device)\n\n return train_samples, val_samples\n\n\n# def loadTrainValCocunut(dataset_name, dataset_path, dataset_size, train_size, val_size, series_length=256, sax_length=16, sax_cardinality=8):\ndef sample(conf: Configuration):\n dataset_path = conf.getHP('database_path')\n\n train_path = conf.getHP('train_path')\n val_path = conf.getHP('val_path')\n train_indices_path = conf.getHP('train_indices_path')\n val_indices_path = conf.getHP('val_indices_path')\n\n os.makedirs(Path(train_path).parent, exist_ok=True)\n os.makedirs(Path(val_path).parent, exist_ok=True)\n os.makedirs(Path(train_indices_path).parent, exist_ok=True)\n os.makedirs(Path(val_indices_path).parent, exist_ok=True)\n\n dim_coconut = conf.getHP('dim_coconut')\n dim_series = conf.getHP('dim_series')\n size_train = conf.getHP('size_train')\n size_val = conf.getHP('size_val')\n size_db = conf.getHP('size_db')\n\n sampling_method = conf.getHP('sampling_name')\n\n if sampling_method == 'coconut':\n if not (os.path.exists(train_indices_path) and isfile(train_indices_path)) or not (os.path.exists(val_indices_path) and isfile(val_indices_path)):\n c_functions = CDLL(conf.getHP('coconut_libpath'))\n\n return_code = c_functions.sample_coconut(c_char_p(dataset_path.encode('ASCII')), \n c_long(size_db),\n c_char_p(train_indices_path.encode('ASCII')), \n size_train,\n c_char_p(val_indices_path.encode('ASCII')), \n size_val, \n dim_series, \n conf.getHP('coconut_cardinality'),\n dim_coconut)\n dlclose(c_functions._handle)\n \n if return_code != 0:\n print(return_code)\n elif sampling_method == 'uniform':\n if not (os.path.exists(train_indices_path) and isfile(train_indices_path)) or not (os.path.exists(val_indices_path) and isfile(val_indices_path)):\n train_sample_indices = np.random.randint(0, size_db, size=size_train, dtype=np.int64)\n val_samples_indices = np.random.randint(0, size_db, size=size_val, dtype=np.int64)\n\n train_sample_indices.tofile(train_indices_path)\n val_samples_indices.tofile(val_indices_path)\n else:\n raise ValueError('sampling {:s} is not supported'.format(sampling_method))\n\n train_sample_indices = np.fromfile(train_indices_path, dtype=np.int64)\n assert len(train_sample_indices) == size_train\n \n loaded = []\n for index in train_sample_indices:\n sequence = np.fromfile(dataset_path, dtype=np.float32, count=dim_series, offset=4 * dim_series * index)\n\n if not np.isnan(np.sum(sequence)):\n loaded.append(sequence) \n\n train_samples = np.asarray(loaded, dtype=np.float32)\n train_samples.tofile(train_path)\n train_samples = torch.from_numpy(train_samples)\n \n val_samples_indices = np.fromfile(val_indices_path, dtype=np.int64)\n assert len(val_samples_indices) == size_val\n\n loaded = []\n for index in val_samples_indices:\n sequence = np.fromfile(dataset_path, dtype=np.float32, count=dim_series, offset=4 * dim_series * index)\n\n if not np.isnan(np.sum(sequence)):\n loaded.append(sequence) \n\n val_samples = np.asarray(loaded, dtype=np.float32)\n val_samples.tofile(val_path)\n val_samples = torch.from_numpy(val_samples)\n\n return train_samples, val_samples\n\n\n\nclass FileContainer(object):\n def __init__(self, filename, binary=True):\n self.filename = filename\n self.binary = binary\n if self.binary:\n self.f = open(filename, \"wb\")\n else:\n self.f = open(filename, \"w\")\n\n def write(self, ts):\n if self.binary:\n s = struct.pack('f' * len(ts), *ts)\n self.f.write(s)\n else:\n self.f.write(\" \".join(map(str, ts)) + \"\\n\")\n\n def close(self):\n self.f.close()\n\n\n\ndef embedData(model, data_filepath, embedding_filepath, data_size, batch_size = 2000, original_dim = 256, \n embedded_dim = 16, device = 'cuda', is_rnn = False, encoder = ''):\n if encoder == 'gru' or encoder == 'lstm':\n is_rnn = True\n\n num_segments = int(data_size / batch_size)\n\n if data_size < batch_size:\n num_segments = 1\n batch_size = data_size\n else: \n assert data_size % batch_size == 0\n\n nan_replacement_original = np.array([0.] * original_dim).reshape([original_dim, 1] if is_rnn else [1, original_dim])\n nan_replacement_embedding = [0.] * embedded_dim\n\n writer = FileContainer(embedding_filepath)\n \n try:\n with torch.no_grad():\n total_nans = 0\n\n for segment in range(num_segments):\n batch = np.fromfile(data_filepath, dtype=np.float32, count=original_dim * batch_size, offset=4 * original_dim * batch_size * segment)\n\n if is_rnn:\n batch = batch.reshape([-1, original_dim, 1])\n else:\n batch = batch.reshape([-1, 1, original_dim])\n\n nan_indices = set()\n for i, sequence in zip(range(batch.shape[0]), batch):\n if np.isnan(np.sum(sequence)):\n nan_indices.add(i)\n batch[i] = nan_replacement_original\n\n embedding = model.encode(torch.from_numpy(batch).to(device)).detach().cpu().numpy()\n\n for i in nan_indices:\n embedding[i] = nan_replacement_embedding\n\n writer.write(embedding.flatten())\n \n total_nans += len(nan_indices)\n\n print('nans = {:d}'.format(total_nans))\n finally:\n writer.close()\n"
] | [
[
"numpy.sum",
"numpy.fromfile",
"torch.no_grad",
"numpy.asarray",
"torch.from_numpy",
"numpy.array",
"numpy.random.randint"
]
] |
ellenjkr/LattesQualis | [
"4fa149ea9e1c58e12b03bd1b88474a0cc2c6d534"
] | [
"lattes_qualis/Backup/lattes_qualis/lattes_qualis.py"
] | [
"import pandas as pd\nfrom autor import Author\nfrom excel import ExcelFile\nfrom individuos import Student, Egress\nfrom verifica_autores import em_lista_autores, trata_exceçoes\nfrom valores import ND, quadrennium\nfrom PyscopusModified import ScopusModified\nfrom pprint import pprint\nfrom excecoes import excecoes_artigos_scopus\n\ndef calcula_AE(data_frame, lista_egressos, lista_alunos):\n\tegressos_nomes = []\n\tfor egresso in lista_egressos:\n\t\tegressos_nomes.append(trata_exceçoes(egresso.name.strip()))\n\talunos_nomes = []\n\tfor aluno in lista_alunos:\n\t\talunos_nomes.append(trata_exceçoes(aluno.name.strip()))\n\n\tAE_quantidade = 0\n\tfor index, row in data_frame.iterrows():\n\t\tAE = False\n\t\tfor coluna in row.index:\n\t\t\tif \"Autor\" in str(coluna):\n\t\t\t\tif data_frame[coluna][index] != \"\":\n\t\t\t\t\tfor pos_egresso, egresso in enumerate(egressos_nomes):\n\t\t\t\t\t\tif data_frame[coluna][index] == egresso:\n\t\t\t\t\t\t\tif lista_egressos[pos_egresso].period[str(int(data_frame[\"Ano\"][index]))[2:]] == True:\n\t\t\t\t\t\t\t\tAE = True\n\n\t\t\t\t\tfor pos_aluno, aluno in enumerate(alunos_nomes):\n\t\t\t\t\t\tif data_frame[coluna][index] == aluno:\n\t\t\t\t\t\t\tif lista_alunos[pos_aluno].period[str(data_frame[\"Ano\"][index])[2:]] == True:\n\t\t\t\t\t\t\t\tAE = True\n\t\tif AE == True:\n\t\t\tAE_quantidade += 1\n\t\n\treturn AE_quantidade\n\ndef calcula_quantidade(data_frame, aux_porc, lista_egressos, lista_alunos):\n\tqtd_AE = calcula_AE(data_frame, lista_egressos, lista_alunos)\n\tqtd = len(data_frame.index)\n\n\tporc = f\"{aux_porc * qtd:.2f}%\"\n\n\ttry:\n\t\tporc_AE = f\"{100/qtd * qtd_AE:.2f}%\"\n\texcept ZeroDivisionError:\n\t\tporc_AE = \"0%\"\n\n\n\treturn (qtd, qtd_AE, porc, porc_AE)\n\ndef get_indicadores(info, lista_egressos, lista_alunos, geral = False):\n\tdata_frame = pd.DataFrame(info)\n\tporcentagens = []\n\ttotal_artigos = len(data_frame[\"Tipo\"])\n\tif total_artigos != 0:\n\t\taux_porc = 100/total_artigos\n\telse:\n\t\taux_porc = 0\n\tporcentagens_AE = []\n\n\tperiodicos = data_frame.loc[data_frame[\"Tipo\"] == \"Periódico\"]\n\tperiodicos, AE_periodicos, porc_periodicos, porc_AE_periodicos = calcula_quantidade(periodicos, aux_porc, lista_egressos, lista_alunos)\n\t\n\tanais = data_frame.loc[data_frame[\"Tipo\"] == \"Anais\"]\n\tanais, AE_anais, porc_anais, porc_AE_anais = calcula_quantidade(anais, aux_porc, lista_egressos, lista_alunos)\n\t\n\ta1 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"A1\"]\n\ta1, AE_a1, porc_a1, porc_AE_a1 = calcula_quantidade(a1, aux_porc, lista_egressos, lista_alunos)\n\n\ta2 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"A2\"]\n\ta2, AE_a2, porc_a2, porc_AE_a2 = calcula_quantidade(a2, aux_porc, lista_egressos, lista_alunos)\n\n\ta3 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"A3\"]\n\ta3, AE_a3, porc_a3, porc_AE_a3 = calcula_quantidade(a3, aux_porc, lista_egressos, lista_alunos)\n\n\ta4 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"A4\"]\n\ta4, AE_a4, porc_a4, porc_AE_a4 = calcula_quantidade(a4, aux_porc, lista_egressos, lista_alunos)\n\n\ta1_a4 = a1 + a2 + a3 + a4\n\tAE_a1_a4 = AE_a1 + AE_a2 + AE_a3 + AE_a4\n\tporc_a1_a4 = f\"{aux_porc * a1_a4:.2f}%\"\n\ttry:\n\t\tporc_AE_a1_a4 = f\"{100/a1_a4 * AE_a1_a4:.2f}%\"\n\texcept ZeroDivisionError:\n\t\tporc_AE_a1_a4 = \"0%\"\n\t\n\n\tb1 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"B1\"]\n\tb1, AE_b1, porc_b1, porc_AE_b1 = calcula_quantidade(b1, aux_porc, lista_egressos, lista_alunos)\n\n\tb2 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"B2\"]\n\tb2, AE_b2, porc_b2, porc_AE_b2 = calcula_quantidade(b2, aux_porc, lista_egressos, lista_alunos)\n\n\tb3 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"B3\"]\n\tb3, AE_b3, porc_b3, porc_AE_b3 = calcula_quantidade(b3, aux_porc, lista_egressos, lista_alunos)\n\n\tb4 = data_frame.loc[data_frame[\"Qualis 2019\"] == \"B4\"]\n\tb4, AE_b4, porc_b4, porc_AE_b4 = calcula_quantidade(b4, aux_porc, lista_egressos, lista_alunos)\n\n\tb1_b4 = b1 + b2 + b3 + b4\n\tAE_b1_b4 = AE_b1 + AE_b2 + AE_b3 + AE_b4\n\tporc_b1_b4 = f\"{aux_porc * b1_b4:.2f}%\"\n\ttry:\n\t\tporc_AE_b1_b4 = f\"{100/b1_b4 * AE_b1_b4:.2f}%\"\n\texcept ZeroDivisionError:\n\t\tporc_AE_b1_b4 = \"0%\"\n\n\toutros = data_frame.loc[((data_frame[\"Qualis 2019\"] != \"A1\") & (data_frame[\"Qualis 2019\"] != \"A2\") & (data_frame[\"Qualis 2019\"] != \"A3\") & (data_frame[\"Qualis 2019\"] != \"A4\"))]\n\toutros = outros.loc[((outros[\"Qualis 2019\"] != \"B1\") & (outros[\"Qualis 2019\"] != \"B2\") & (outros[\"Qualis 2019\"] != \"B3\") & (outros[\"Qualis 2019\"] != \"B4\"))]\n\toutros, AE_outros, porc_outros, porc_AE_outros = calcula_quantidade(outros, aux_porc, lista_egressos, lista_alunos)\n\n\tporcentagens.append(porc_periodicos)\n\tporcentagens.append(porc_anais)\n\tporcentagens.append(porc_a1_a4)\n\tporcentagens.append(porc_a1)\n\tporcentagens.append(porc_a2)\n\tporcentagens.append(porc_a3)\n\tporcentagens.append(porc_a4)\n\tporcentagens.append(porc_b1_b4)\n\tporcentagens.append(porc_b1)\n\tporcentagens.append(porc_b2)\n\tporcentagens.append(porc_b3)\n\tporcentagens.append(porc_b4)\n\tporcentagens.append(porc_outros)\n\n\tporcentagens_AE.append(porc_AE_periodicos)\n\tporcentagens_AE.append(porc_AE_anais)\n\tporcentagens_AE.append(porc_AE_a1_a4)\n\tporcentagens_AE.append(porc_AE_a1)\n\tporcentagens_AE.append(porc_AE_a2)\n\tporcentagens_AE.append(porc_AE_a3)\n\tporcentagens_AE.append(porc_AE_a4)\n\tporcentagens_AE.append(porc_AE_b1_b4)\n\tporcentagens_AE.append(porc_AE_b1)\n\tporcentagens_AE.append(porc_AE_b2)\n\tporcentagens_AE.append(porc_AE_b3)\n\tporcentagens_AE.append(porc_AE_b4)\n\tporcentagens_AE.append(porc_AE_outros)\n\n\ttipo_qualis = [\"Periódicos\", \"Anais\", \"A1-A4\", \"A1\", \"A2\", \"A3\", \"A4\", \"B1-B4\", \"B1\", \"B2\", \"B3\", \"B4\", \"Outros\"]\n\ttabela = {\"Tipo/Qualis\": tipo_qualis, \"Quantidade\": [], \"Porcentagem\": [], \"Quantidade com alunos/egressos\":[], \"Porcentagem alunos/egressos\":[]}\n\n\ttabela[\"Tipo/Qualis\"].append(None)\n\ttabela[\"Tipo/Qualis\"].append(\"Índice\")\n\ttabela[\"Tipo/Qualis\"].append(\"Irestrito\")\n\ttabela[\"Tipo/Qualis\"].append(\"Igeral\")\n\n\ttabela[\"Quantidade\"].append(periodicos)\n\ttabela[\"Quantidade\"].append(anais)\n\ttabela[\"Quantidade\"].append(a1_a4)\n\ttabela[\"Quantidade\"].append(a1)\n\ttabela[\"Quantidade\"].append(a2)\n\ttabela[\"Quantidade\"].append(a3)\n\ttabela[\"Quantidade\"].append(a4)\n\ttabela[\"Quantidade\"].append(b1_b4)\n\ttabela[\"Quantidade\"].append(b1)\n\ttabela[\"Quantidade\"].append(b2)\n\ttabela[\"Quantidade\"].append(b3)\n\ttabela[\"Quantidade\"].append(b4)\n\ttabela[\"Quantidade\"].append(outros)\n\ttabela[\"Quantidade\"].append(None)\n\n\tIrestrito = a1 + (a2 * 0.875) + (a3 * 0.75) + (a4 * 0.625)\n\tif Irestrito != 0:\n\t\tIrestrito = round(Irestrito, 2)\n\t\tIrestrito_medio = round((Irestrito/ND), 2)\n\telse:\n\t\tIrestrito_medio = 0\n\n\tIgeral = Irestrito + (b1 * 0.5) + (b2 * 0.2) + (b3 * 0.1) + (b4 * 0.05)\n\tif Igeral != 0:\n\t\tIgeral = round(Igeral, 2)\n\t\tIgeral_medio = round((Igeral/ND), 2)\n\telse:\n\t\tIgeral_medio = 0\n\ttabela[\"Quantidade\"].append(\"Acumulado\")\n\ttabela[\"Quantidade\"].append(Irestrito)\n\ttabela[\"Quantidade\"].append(Igeral)\n\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_periodicos)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_anais)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_a1_a4)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_a1)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_a2)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_a3)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_a4)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_b1_b4)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_b1)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_b2)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_b3)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_b4)\n\ttabela[\"Quantidade com alunos/egressos\"].append(AE_outros)\n\ttabela[\"Quantidade com alunos/egressos\"].append(None)\n\ttabela[\"Quantidade com alunos/egressos\"].append(None)\n\ttabela[\"Quantidade com alunos/egressos\"].append(None)\n\ttabela[\"Quantidade com alunos/egressos\"].append(None)\n\n\ttabela[\"Porcentagem alunos/egressos\"] = porcentagens_AE\n\ttabela[\"Porcentagem alunos/egressos\"].append(None)\n\ttabela[\"Porcentagem alunos/egressos\"].append(None)\n\ttabela[\"Porcentagem alunos/egressos\"].append(None)\n\ttabela[\"Porcentagem alunos/egressos\"].append(None)\n\n\ttabela[\"Porcentagem\"] = porcentagens\n\ttabela[\"Porcentagem\"].append(None)\n\tif geral:\n\t\ttabela[\"Porcentagem\"].append(\"Média por docente\")\n\t\ttabela[\"Porcentagem\"].append(Irestrito_medio)\n\t\ttabela[\"Porcentagem\"].append(Igeral_medio)\n\telse:\n\t\ttabela[\"Porcentagem\"].append(None)\n\t\ttabela[\"Porcentagem\"].append(None)\n\t\ttabela[\"Porcentagem\"].append(None)\n\t\n\treturn pd.DataFrame(tabela)\n\ndef read_files():\n\t# Read files - People\n\ttry:\n\t\tprofessors = pd.read_csv(\"UNIVALI - PPGC - Professores.csv\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tprofessors = pd.read_csv(\"UNIVALI - PPGC - Professores.csv\", sep=\";\", encoding='utf-8')\n\ttry:\n\t\tegress = pd.read_csv(\"planilha_egressos_lattes.CSV\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tegress = pd.read_csv(\"planilha_egressos_lattes.CSV\", sep=\";\", encoding='utf-8')\n\ttry:\n\t\tstudents = pd.read_csv(\"Planilha - Levantamento alunos ativos.CSV\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tstudents = pd.read_csv(\"Planilha - Levantamento alunos ativos.CSV\", sep=\";\", encoding='utf-8')\n\t\n\n\t# Read files - Qualis\n\ttry:\n\t\tqualis_cc2016_file = pd.read_csv(\"Qualis/QualisCC_2013_2016.csv\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tqualis_cc2016_file = pd.read_csv(\"Qualis/QualisCC_2013_2016.csv\", sep=\";\", encoding='utf-8')\n\ttry:\n\t\tqualis_xx2020_file = pd.read_csv(\"Qualis/QualisXX_2020.csv\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tqualis_xx2020_file = pd.read_csv(\"Qualis/QualisXX_2020.csv\", sep=\";\", encoding='utf-8')\n\ttry:\n\t\tqualis_cc2016_eventos = pd.read_csv(\"Qualis/QualisCC_eventos_2016.csv\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tqualis_cc2016_eventos = pd.read_csv(\"Qualis/QualisCC_eventos_2016.csv\", sep=\";\", encoding='utf-8')\n\ttry:\n\t\tqualis_xx2020_eventos = pd.read_csv(\"Qualis/QualisXX_eventos_2020.csv\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\tqualis_xx2020_eventos = pd.read_csv(\"QualisXX_eventos_2020.csv\", sep=\";\", encoding='utf-8')\n\n\n\t# Read file - Exceptions\n\ttry:\n\t\texceptions = pd.read_csv(\"excecoes.csv\", sep=\";\", encoding='iso-8859-1')\n\texcept:\n\t\texceptions = pd.read_csv(\"excecoes.csv\", sep=\";\", encoding='utf-8')\n\n\treturn (professors, egress, students, qualis_cc2016_file, qualis_xx2020_file, qualis_cc2016_eventos, qualis_xx2020_eventos, exceptions)\n\n\nclass Data():\n\tdef __init__(self, professors, egress, students, qualis_2016, qualis_2020, qualis_2016_events, qualis_2020_events, exceptions):\n\t\tsuper(Data, self).__init__()\n\t\tself.professors = professors\n\t\tself.egress = egress\n\t\tself.students = students\n\t\tself.qualis_2016 = qualis_2016\n\t\tself.qualis_2020 = qualis_2020\n\t\tself.qualis_2016_events = qualis_2016_events\n\t\tself.qualis_2020_events = qualis_2020_events\n\t\tself.exceptions = exceptions\n\n\t\tself.reports = {'Author':[], 'Report':[]} # Reports by author\n\t\tself.authors_dict = {\"Author\":[], \"A/E\":[]} # Dictionary of authors (Professors, Students and Egress)\n\t\tself.art_prof = pd.DataFrame() # Articles by professor\n\t\tself.authors_mean = [] # List with the \"mean of authors by article\" of each professor\n\t\tself.authors_indicators = [] # Indicators of each professor\n\t\tself.general_indicators = [] # Indicators for all professors together\n\n\n\tdef treat_data(self):\n\t\t# Get the list of egress and students with their names and active-period\n\t\tegress = Egress(self.egress, quadrennium)\n\t\tself.egress_list = egress.get_egress_list()\n\t\tstudents = Student(self.students, quadrennium)\n\t\tself.students_list = students.get_students_list()\n\n\t\t# Lowercase events\n\t\tfor pos, i in enumerate(self.qualis_2016_events['Nome Padrão']):\n\t\t\tself.qualis_2016_events['Nome Padrão'][pos] = str(self.qualis_2016_events['Nome Padrão'][pos]).lower()\n\t\tfor pos, i in enumerate(self.qualis_2020_events['Nome Padrão']):\n\t\t\tself.qualis_2020_events['Nome Padrão'][pos] = str(self.qualis_2020_events['Nome Padrão'][pos]).lower()\n\n\t\t# Remove \"-\" from ISSN\n\t\tfor i in range(len(self.qualis_2016[\"ISSN\"])):\n\t\t\tself.qualis_2016[\"ISSN\"][i] = self.qualis_2016[\"ISSN\"][i].replace(\"-\", \"\")\n\t\tfor i in range(len(self.qualis_2020[\"ISSN\"])):\n\t\t\tself.qualis_2020[\"ISSN\"][i] = self.qualis_2020[\"ISSN\"][i].replace(\"-\", \"\")\n\n\n\tdef get_author_period(self, pos):\n\t\tperiod = {quadrennium[0]: False, quadrennium[1]: False, quadrennium[2]: False, quadrennium[3]: False}\n\t\tstart = str(self.professors[\"Início do Vínculo\"][pos])[8:]\n\t\tend = str(self.professors[\"Fim do Vínculo\"][pos])\n\n\t\tif end == \"-\":\n\t\t\tend = quadrennium[3]\n\t\telse:\n\t\t\tend = str(self.professors[\"Fim do Vínculo\"][pos])[8:]\n\n\t\tstart_position = None\n\t\tend_position = None\n\t\tfor pos, key in enumerate(period.keys()): # For each year of the quadrennium\n\t\t\tif pos == 0 and int(start) < int(quadrennium[0]): # If the start year is lower than the first year of the quadrennium\n\t\t\t\tstart = quadrennium[0]\n\t\t\tif key == start:\n\t\t\t\tstart_position = pos # The position of the start year on the quadrennium\n\t\t\tif key == end:\n\t\t\t\tend_position = pos # The position of the end year on the quadrennium\n\n\t\tfor pos, key in enumerate(period.keys()):\n\t\t\tif pos >= start_position and pos <= end_position: # The start year, the end year and the years in between are true\n\t\t\t\tperiod[key] = True\n\n\t\treturn period\n\n\n\tdef get_authors_reports(self):\n\t\t# Iterates through the professors \n\t\tfor pos, professor in enumerate(self.professors[\"Nome\"]):\n\t\t\tif str(professor) != 'nan':\n\t\t\t\tprofessor = str(professor)\n\n\t\t\t\tperiod = self.get_author_period(pos) # Get the period of valid publications\n\t\t\t\tauthor = Author(professor, period, self.qualis_2016, self.qualis_2020, self.qualis_2016_events, self.qualis_2020_events, self.professors, self.authors_dict[\"Author\"])\n\t\t\t\t# print(professor)\n\t\t\t\t# print(pd.DataFrame(author.info))\n\t\t\t\tself.authors_dict[\"Author\"] = author.lista_autores # Updates the authors list\n\t\t\t\tself.reports['Author'].append(professor) # Adds the professor to the list of reports\n\t\t\t\tself.reports['Report'].append(pd.DataFrame(author.info)) # Adds the professor's report to the list of reports\n\t\t\t\tself.authors_mean.append(author.get_media_autores()) # Adds the \"mean of authors by article\" to the list of means\n\n\tdef treat_names(self): # Looks for convergence between names written in different ways and replaces for the right name\n\t\tegress_names = []\n\t\tfor egress in self.egress_list:\n\t\t\tegress_names.append(trata_exceçoes(egress.name.strip()))\n\n\t\tstudents_names = []\n\t\tfor student in self.students_list:\n\t\t\tstudents_names.append(trata_exceçoes(student.name.strip()))\n\n\t\tfor pos, report in enumerate(self.reports[\"Report\"]):\n\t\t\t# df = pd.DataFrame(report)\n\t\t\t# for index, row in df.iterrows():\n\t\t\tfor index, row in report.iterrows():\n\t\t\t\tfor column in row.index:\n\t\t\t\t\tif \"Autor\" in str(column): # Goes through the authors columns\n\t\t\t\t\t\tif self.reports[\"Report\"][pos][column][index] != \" \":\n\t\t\t\t\t\t\t_, self.reports[\"Report\"][pos][column][index] = em_lista_autores(self.authors_dict[\"Author\"], str(self.reports[\"Report\"][pos][column][index]))\n\t\t\t\t\t\t\t_, self.reports[\"Report\"][pos][column][index] = em_lista_autores(egress_names, str(self.reports[\"Report\"][pos][column][index]))\n\t\t\t\t\t\t\t_, self.reports[\"Report\"][pos][column][index] = em_lista_autores(students_names, str(self.reports[\"Report\"][pos][column][index]))\n\t\n\tdef get_art_prof(self):\n\t\tfor pos, report in enumerate(self.reports[\"Report\"]):\n\t\t\tname_column = [self.reports[\"Author\"][pos] for i in range(len(report))] # Generates a column with the name of the author for each article\n\n\t\t\treport_copy = report.copy() # A copy of the report\n\t\t\treport_copy.insert(loc=0, column='Nome', value=name_column) # Adds the name_column\n\n\t\t\tif pos == 0:\n\t\t\t\tself.art_prof = report_copy\n\t\t\telse:\n\t\t\t\tself.art_prof = pd.concat([self.art_prof, report_copy], ignore_index=True, sort=False) # Puts the reports together, in one dataframe\n\t\n\t\t# Replace \"nan\" values with \" \"\n\t\tfor col in self.art_prof.columns:\n\t\t\tif \"Autor\" in col:\n\t\t\t\tfor pos, i in enumerate(self.art_prof[col]):\n\t\t\t\t\tif str(i) == \"NaN\" or str(i) == \"nan\":\n\t\t\t\t\t\tself.art_prof.loc[pos, col] = \" \"\n\n\n\tdef update_authors_dict(self):\n\t\tegress_names = []\n\t\tfor egress in self.egress_list:\n\t\t\tegress_names.append(trata_exceçoes(egress.name.strip())) # Gets the egress' name\n\n\t\tstudents_names = []\n\t\tfor student in self.students_list:\n\t\t\tstudents_names.append(trata_exceçoes(student.name.strip())) # Gets the student's name\n\n\t\t# Looks for egress or students and marks them with a X in the \"A/E\" column\n\t\tfor author in self.authors_dict[\"Author\"]:\n\t\t\tif author in egress_names or author in students_names:\n\t\t\t\tself.authors_dict[\"A/E\"].append(\"X\")\n\t\t\telse:\n\t\t\t\tself.authors_dict[\"A/E\"].append(\"\")\n\n\n\tdef get_indicators(self):\n\t\tfor report in self.reports[\"Report\"]:\n\t\t\tself.authors_indicators.append(get_indicadores(report, self.egress_list, self.students_list))\n\t\tself.general_indicators = get_indicadores(self.art_prof, self.egress_list, self.students_list, geral=True)\n\n\n\tdef analyze_journal_classifications(self):\n\t\tself.journals_a1_a4 = [] # Journals A1-A4\n\t\tself.journals_a1_a4_ae = [] # Journals A1-A4 with students and/or egress\n\n\t\tfor pos, report in enumerate(self.reports[\"Report\"]):\n\t\t\t# Separates by journal classifications \n\t\t\tjournals = report.loc[report[\"Tipo\"] == \"Periódico\"] # All the publications in journals\n\t\t\tjournals_a1 = journals.loc[journals[\"Qualis 2019\"] == \"A1\"]\n\t\t\tjournals_a2 = journals.loc[journals[\"Qualis 2019\"] == \"A2\"]\n\t\t\tjournals_a3 = journals.loc[journals[\"Qualis 2019\"] == \"A3\"]\n\t\t\tjournals_a4 = journals.loc[journals[\"Qualis 2019\"] == \"A4\"]\n\t\t\tjournals_a1_a4 = pd.concat([journals_a1, journals_a2, journals_a3, journals_a4], ignore_index=True, sort=False)\n\n\t\t\t# Calculates the amount of articles A1-A4 with and without students/egress\n\t\t\tamount_journals_a1_a4 = len(journals_a1_a4.index)\n\t\t\tself.journals_a1_a4.append(amount_journals_a1_a4)\n\t\t\tamount_journals_a1_a4_ae = calcula_AE(journals_a1_a4, self.egress_list, self.students_list)\n\t\t\tself.journals_a1_a4_ae.append(amount_journals_a1_a4_ae)\n\n\tdef analyze_journals(self):\n\t\tall_publications = self.art_prof.copy()\n\t\tall_publications = all_publications.drop_duplicates(subset=\"Título\")\n\n\t\tself.journals = all_publications.loc[all_publications[\"Tipo\"] == \"Periódico\"] # All the publications in journals\n\t\tself.journals.loc[:, 'Quantidade'] = self.journals[\"Nome de Publicação\"].map(self.journals[\"Nome de Publicação\"].value_counts()) # Calculates the number of times the journal appears and add that number to a column\n\t\t\n\t\tcolumns = [\"Nome de Publicação\", \"ISSN/SIGLA\", \"Qualis CC 2016\", \"Qualis 2019\", \"Scopus 2019\", \"Quantidade\"] # The columns we're gonna use\n\n\t\tdrop_columns = []\n\t\tfor column in self.journals.columns:\n\t\t\tif column not in columns:\n\t\t\t\tdrop_columns.append(column)\n\t\tself.journals = self.journals.drop(columns=drop_columns)\n\t\tself.journals = self.journals.rename(columns={\"ISSN/SIGLA\": \"ISSN\"})\n\t\tself.journals = self.journals.drop_duplicates(subset=\"ISSN\") # Drop all the duplicated journals\n\n\tdef analyze_journal_metrics(self):\n\t\tself.journal_metrics = pd.DataFrame(columns=[\"Métrica\", \"Qtd.\", \"Qtd. %\"])\n\t\tself.journal_metrics[\"Métrica\"] = [\"Quantidade de periódicos diferentes\", \"Quantidade de periódicos A1-A4\", \"Quantidade de periódicos B1-B4\", \"Quantidade de periódicos Não Qualis\"]\n\n\t\tamount = []\n\t\tamount_perc = []\n\t\tamount.append(len(self.journals.index))\n\n\t\tjournals_a1 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"A1\"]\n\t\tjournals_a2 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"A2\"]\n\t\tjournals_a3 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"A3\"]\n\t\tjournals_a4 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"A4\"]\n\t\tjournals_a1_a4 = pd.concat([journals_a1, journals_a2, journals_a3, journals_a4], ignore_index=True, sort=False)\n\t\tamount.append(len(journals_a1_a4.index))\n\n\t\tjournals_b1 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"B1\"]\n\t\tjournals_b2 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"B2\"]\n\t\tjournals_b3 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"B3\"]\n\t\tjournals_b4 = self.journals.loc[self.journals[\"Qualis 2019\"] == \"B4\"]\n\t\tjournals_b1_b4 = pd.concat([journals_b1, journals_b2, journals_b3, journals_b4], ignore_index=True, sort=False)\n\t\tamount.append(len(journals_b1_b4.index))\n\n\t\tothers = self.journals.loc[((self.journals[\"Qualis 2019\"] == \"-\") | (self.journals[\"Qualis 2019\"] == \"NP\") | (self.journals[\"Qualis 2019\"] == \"C\"))]\n\t\tamount.append(len(others.index))\n\t\tself.journal_metrics[\"Qtd.\"] = amount\n\n\t\tfor i in self.journal_metrics[\"Qtd.\"]:\n\t\t\tamount_perc.append(f\"{round(100/self.journal_metrics['Qtd.'][0] * i, 1)}%\")\n\t\t\n\t\tself.journal_metrics[\"Qtd. %\"] = amount_perc\n\n\n\tdef analyze_proceedings(self):\n\t\tall_publications = self.art_prof.copy()\n\t\tall_publications = all_publications.drop_duplicates(subset=\"Título\")\n\n\t\tself.proceedings = all_publications.loc[all_publications[\"Tipo\"] == \"Anais\"]\n\t\tself.proceedings.loc[:, 'Quantidade'] = self.proceedings[\"Nome de Publicação\"].map(self.proceedings[\"Nome de Publicação\"].value_counts())\n\t\tcolumns = [\"Nome de Publicação\", \"ISSN/SIGLA\", \"Qualis CC 2016\", \"Qualis 2019\", \"Scopus 2019\", \"Quantidade\"]\n\t\tdrop_columns = []\n\t\tfor column in self.proceedings.columns:\n\t\t\tif column not in columns:\n\t\t\t\tdrop_columns.append(column)\n\t\tself.proceedings = self.proceedings.drop(columns=drop_columns)\n\t\tself.proceedings = self.proceedings.rename(columns={\"ISSN/SIGLA\": \"SIGLA\"})\n\t\tself.proceedings = self.proceedings.drop_duplicates(subset=\"SIGLA\")\n\n\n\tdef analyze_proceedings_metrics(self):\n\t\tself.proceedings_metrics = pd.DataFrame(columns=[\"Métrica\", \"Qtd.\", \"Qtd. %\"])\n\t\tself.proceedings_metrics[\"Métrica\"] = [\"Quantidade de eventos diferentes\", \"Quantidade de eventos A1-A4\", \"Quantidade de eventos B1-B4\", \"Quantidade de eventos Não Qualis\"]\n\n\t\tamount = []\n\t\tamount_perc = []\n\t\tamount.append(len(self.proceedings.index))\n\n\t\tproceedings_a1 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"A1\"]\n\t\tproceedings_a2 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"A2\"]\n\t\tproceedings_a3 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"A3\"]\n\t\tproceedings_a4 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"A4\"]\n\t\tproceedings_a1_a4 = pd.concat([proceedings_a1, proceedings_a2, proceedings_a3, proceedings_a4], ignore_index=True, sort=False)\n\t\tamount.append(len(proceedings_a1_a4.index))\n\n\t\tproceedings_b1 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"B1\"]\n\t\tproceedings_b2 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"B2\"]\n\t\tproceedings_b3 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"B3\"]\n\t\tproceedings_b4 = self.proceedings.loc[self.proceedings[\"Qualis 2019\"] == \"B4\"]\n\t\tproceedings_b1_b4 = pd.concat([proceedings_b1, proceedings_b2, proceedings_b3, proceedings_b4], ignore_index=True, sort=False)\n\t\tamount.append(len(proceedings_b1_b4.index))\n\n\t\tothers = self.proceedings.loc[((self.proceedings[\"Qualis 2019\"] == \"-\") | (self.proceedings[\"Qualis 2019\"] == \"NP\") | (self.proceedings[\"Qualis 2019\"] == \"C\"))]\n\t\tamount.append(len(others.index))\n\n\t\tself.proceedings_metrics[\"Qtd.\"] = amount\n\n\t\tfor i in self.proceedings_metrics[\"Qtd.\"]:\n\t\t\tamount_perc.append(f\"{round(100/self.proceedings_metrics['Qtd.'][0] * i, 1)}%\")\n\t\t\n\t\tself.proceedings_metrics[\"Qtd. %\"] = amount_perc\n\n\tdef get_artppg(self):\n\t\tself.artppg = self.art_prof.copy().drop(columns=\"Nome\")\n\n\tdef get_scopus_citations(self):\n\t\t# scopus_articles = {\"Title\":[], \"Citations\":[]}\n\t\t# scopus = ScopusModified('2f8a856ea2c32c265b4c5a9895e6900d')\n\t\t# for pos, author_id in enumerate(self.professors[\"ID Scopus\"]):\n\t\t# \ttry:\n\t\t# \t\tsearch = scopus.search(f\"AU-ID ({author_id})\")\n\t\t# \t\tdocs_array = []\n\t\t# \t\tfor doc in search['scopus_id']: # Gets the documents\n\t\t# \t\t\tdocs_array.append(doc)\n\t\t\t\t\n\t\t# \t\t# ============== TO RETRIEVE MORE DATA THAN THE LIMIT ====================\n\t\t# \t\t# The limit is 25 by request\n\n\t\t# \t\tdone = 0\n\t\t# \t\tnot_done = 25\n\t\t# \t\tcitations_temp = []\n\t\t# \t\twhile not_done < len(docs_array) + 25: \n\t\t# \t\t\tcitations_temp.append(scopus.retrieve_citation(scopus_id_array=docs_array[done:not_done], year_range=[2017, 2020])) # Retrieve the citations data\n\t\t# \t\t\tdone += 25\n\t\t# \t\t\tnot_done += 25\n\t\t# \t\t# ========================================================================\n\n\t\t# \t\tcitations = citations_temp[0]\n\t\t# \t\tfor pos, citation in enumerate(citations_temp):\n\t\t# \t\t\tif pos != 0:\n\t\t# \t\t\t\tcitations = citations.append(citation, ignore_index = True)\n\t\t# \t\tfor pos2, titulo in enumerate(search['title']):\n\t\t# \t\t\ttitulo = excecoes_artigos_scopus(titulo)\n\t\t# \t\t\tscopus_articles[\"Titulo\"].append(titulo)\n\t\t# \t\t\tscopus_articles[\"Citações\"].append(citations[\"range_citation\"][pos2])\n\t\t# \texcept:\n\t\t# \t\tpass\n\t\tcitations = []\n\t\tfor title in self.artppg[\"Título\"]:\n\t\t\tcitations.append(\"-\")\n\t\t\t# pos = None\n\t\t\t# for i, title2 in enumerate(scopus_articles[\"Title\"]):\n\t\t\t# \tif title.lower().strip() in title2 or title2 in title.lower().strip():\n\t\t\t# \t\tpos = i\n\t\t\t# if pos != None:\n\t\t\t# \tcitations.append(str(scopus_articles[\"Citations\"][pos]))\n\t\t\t# else:\n\t\t\t# \tcitations.append(\"-\")\n\n\t\tself.artppg.insert(8, 'Citações', citations)\n\n\n\nif __name__ == '__main__':\n\t\n\tprofessors, egress, students, qualis_2016, qualis_2020, qualis_2016_events, qualis_2020_events, exceptions = read_files()\n\n\tdata = Data(professors, egress, students, qualis_2016, qualis_2020, qualis_2016_events, qualis_2020_events, exceptions)\n\tdata.treat_data()\n\tdata.get_authors_reports()\n\tdata.treat_names()\n\tdata.get_art_prof()\n\tdata.update_authors_dict()\n\tdata.get_indicators()\n\tdata.analyze_journal_classifications()\n\tdata.analyze_journals()\n\tdata.analyze_journal_metrics()\n\tdata.analyze_proceedings()\n\tdata.analyze_proceedings_metrics()\n\tdata.get_artppg()\n\tdata.get_scopus_citations()\n\n\t# excel = ExcelFile(relatorios, pd.DataFrame(dic_autores), art_prof_df, artppg, medias_autores, indicadores_autores, indicadores_geral, lista_egressos, lista_alunos, excecoes, per_a1_a4, per_a1_a4_ae, periodicos, anais, periodicos_metricas, anais_metricas)\n\texcel = ExcelFile(data.reports, pd.DataFrame(data.authors_dict), data.art_prof, data.artppg, data.authors_mean, data.authors_indicators, data.general_indicators, data.egress_list, data.students_list, data.exceptions, data.journals_a1_a4, data.journals_a1_a4_ae, data.journals, data.proceedings, data.journal_metrics, data.proceedings_metrics)\n\n\texcel.salva_arquivo()\n\n\t\n\t"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.concat"
]
] |
saulgold/miflora | [
"dc19d812dda1fc90b43e3e3bd72841fd62ca4003"
] | [
"plot_utils.py"
] | [
"#https://blog.heptanalytics.com/flask-plotly-dashboard/\nimport plotly\nimport plotly.graph_objs as go\nimport pandas as pd\nimport numpy as np\nimport json\nfrom config_utils.config import load_configs\nimport pydevd_pycharm\nimport os\n#pydevd_pycharm.settrace('192.168.0.68', port=1025, stdoutToServer=True, stderrToServer=True)\n\ndef create_plot():\n \n configs = load_configs()\n data_dir = configs['data_dir']\n csvs = os.listdir(data_dir)\n\n csv_path = os.path.join(data_dir,csvs[0])\n sensor_data = pd.read_csv(csv_path)\n graphs = []\n for k in sensor_data.keys():\n if k != 'time':\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n x=sensor_data['time'],\n y=sensor_data[k],\n name=csvs[0],\n )\n )\n for i,f in enumerate(csvs):\n if i !=0:\n csv_path = os.path.join(data_dir, f)\n sensor_data = pd.read_csv(csv_path)\n fig.add_trace(\n go.Scatter(\n x=sensor_data['time'],\n y=sensor_data[k],\n name=f\n )\n )\n\n\n\n fig.update_layout(title_text=k)\n data = fig\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n graphs.append(graphJSON)\n\n\n\n return graphs\ncreate_plot()"
] | [
[
"pandas.read_csv"
]
] |
lzlniu/Pubmed-Ref-Cite | [
"f6866a8882fcf36cf05056d4d1002a8213bf31f6"
] | [
"logcite.py"
] | [
"import sys\nimport numpy as np\nimport pandas as pd\n\nprint(\"input file:\", sys.argv[1])\nprint(\"output file:\", sys.argv[2])\ncitations = pd.read_csv(sys.argv[1], sep='\\t', header=None)\nalpha=float(sys.argv[3])\nprint(\"alpha:\", alpha)\npd.concat([citations[0], 1+np.log(alpha+citations[1])], axis=1, join=\"inner\").to_csv(sys.argv[2], sep='\\t', header=False, index=False)\nprint(\"successfully saved the output file\")\n\n"
] | [
[
"pandas.read_csv",
"numpy.log"
]
] |
TaoWangzj/PCFAN | [
"f6ddc8fd2e72a45431891acf0b25135499c84485"
] | [
"utils.py"
] | [
"\"\"\"\npaper: Pyramid Channel-based Feature Attention Network for image dehazing \nfile: utils.py\nabout: all utilities\nauthor: Tao Wang\ndate: 12/01/2021\n\"\"\"\n\n# --- Imports --- #\nimport time\nimport torch\nimport torch.nn.functional as F\nimport torchvision.utils as utils\nfrom math import log10\nfrom skimage import measure\nfrom torch.autograd import Variable\nimport os\n\ndef to_psnr(dehaze, gt):\n mse = F.mse_loss(dehaze, gt, reduction='none')\n mse_split = torch.split(mse, 1, dim=0)\n mse_list = [torch.mean(torch.squeeze(mse_split[ind])).item() for ind in range(len(mse_split))]\n\n intensity_max = 1.0\n psnr_list = [10.0 * log10(intensity_max / mse) for mse in mse_list]\n return psnr_list\n\n\ndef to_ssim_skimage(dehaze, gt):\n dehaze_list = torch.split(dehaze, 1, dim=0)\n gt_list = torch.split(gt, 1, dim=0)\n\n dehaze_list_np = [dehaze_list[ind].permute(0, 2, 3, 1).data.cpu().numpy().squeeze() for ind in range(len(dehaze_list))]\n gt_list_np = [gt_list[ind].permute(0, 2, 3, 1).data.cpu().numpy().squeeze() for ind in range(len(dehaze_list))]\n ssim_list = [measure.compare_ssim(dehaze_list_np[ind], gt_list_np[ind], data_range=1, multichannel=True) for ind in range(len(dehaze_list))]\n\n return ssim_list\n\n\ndef validation(net, val_data_loader, device, category, save_tag=False):\n \"\"\"\n :param net: PCFAN\n :param val_data_loader: validation loader\n :param device: The GPU that loads the network\n :param category: indoor or outdoor test dataset\n :param save_tag: tag of saving image or not\n :return: average PSNR value\n \"\"\"\n psnr_list = []\n ssim_list = []\n\n for batch_id, val_data in enumerate(val_data_loader):\n\n with torch.no_grad():\n haze, gt, image_name = Variable(val_data['hazy_image']), Variable(val_data['clear_image']),val_data['haze_name']\n haze = haze.to(device)\n gt = gt.to(device)\n dehaze = net(haze)\n\n # --- Calculate the average PSNR --- #\n psnr_list.extend(to_psnr(dehaze, gt))\n\n # --- Calculate the average SSIM --- #\n ssim_list.extend(to_ssim_skimage(dehaze, gt))\n\n # --- Save image --- #\n if save_tag:\n path = './results/{}_results'.format(category)\n if not os.path.exists(path):\n os.makedirs(path)\n save_image(dehaze, image_name, category)\n\n avr_psnr = sum(psnr_list) / len(psnr_list)\n avr_ssim = sum(ssim_list) / len(ssim_list)\n return avr_psnr, avr_ssim\n\n\ndef save_image(dehaze, image_name, category):\n dehaze_images = torch.split(dehaze, 1, dim=0)\n batch_num = len(dehaze_images)\n for ind in range(batch_num):\n utils.save_image(dehaze_images[ind], './results/{}_results/{}'.format(category, image_name[ind][:-3] + 'png'))\n\n\ndef print_log(epoch, num_epochs, train_psnr, val_psnr, val_ssim, category):\n print('Epoch [{0}/{1}], Train_PSNR:{2:.2f}, Val_PSNR:{3:.2f}, Val_SSIM:{4:.4f}'\n .format(epoch, num_epochs, train_psnr, val_psnr, val_ssim))\n\n # --- Write the training log --- #\n with open('./logs/{}_log.txt'.format(category), 'a') as f:\n print('Date: {0}s, Epoch: [{1}/{2}], Train_PSNR: {3:.2f}, Val_PSNR: {4:.2f}, Val_SSIM: {5:.4f}'\n .format(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),\n epoch, num_epochs, train_psnr, val_psnr, val_ssim), file=f)"
] | [
[
"torch.nn.functional.mse_loss",
"torch.split",
"torch.autograd.Variable",
"torch.no_grad",
"torch.squeeze"
]
] |
zy-zhou/MLCS | [
"478c95efb63d6c285a22c469fa7773e8f801052a"
] | [
"Utils.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 6 18:51:41 2019\r\n\r\n@author: Zhou\r\n\"\"\"\r\nimport json\r\nfrom math import exp\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom nltk.translate.bleu_score import sentence_bleu, SmoothingFunction\r\nfrom nltk.translate.meteor_score import single_meteor_score\r\nfrom multiprocessing import Pool\r\nfrom time import time\r\nfrom rouge import Rouge\r\n\r\ndef parallel(func, data, workers=5, chunksize=None, star=False, **kwargs):\r\n print('Initializing multi-process...')\r\n begin = time()\r\n pool = Pool(workers, **kwargs)\r\n if star:\r\n results = pool.starmap(func, data, chunksize=chunksize)\r\n else:\r\n results = pool.map(func, data, chunksize=chunksize)\r\n pool.close()\r\n pool.join()\r\n gap = time() - begin\r\n print('Done.')\r\n print('Elapsed time: {} min {:.2f} sec'.format(int(gap // 60), gap % 60))\r\n return results\r\n\r\ndef load(path, is_json=False, key=None, encoding='utf-8', errors='ignore', drop_list=()):\r\n print('Loading...')\r\n with open(path, 'r', encoding=encoding, errors=errors) as f:\r\n lines = f.readlines()\r\n if not is_json:\r\n if not drop_list:\r\n return lines\r\n else:\r\n return [line for i, line in enumerate(lines) if not i in drop_list]\r\n \r\n if key is None:\r\n return [json.loads(line) for i, line in enumerate(lines) if not i in drop_list]\r\n else:\r\n return [json.loads(line)[key] for i, line in enumerate(lines) if not i in drop_list]\r\n\r\ndef save(data, path, is_json=False, encoding='utf-8'):\r\n print('Saving...')\r\n with open(path, 'w', encoding=encoding) as f:\r\n for line in data:\r\n if is_json:\r\n line = json.dumps(line)\r\n f.write(line + '\\n')\r\n\r\ndef sequence_mask(lengths, maxlen=None, dtype=None):\r\n maxlen = maxlen or lengths.max()\r\n row = torch.arange(maxlen, dtype=lengths.dtype, device=lengths.device)\r\n col = lengths.unsqueeze(-1)\r\n result = torch.lt(row, col)\r\n if dtype is not None:\r\n result = result.type(dtype)\r\n return result\r\n\r\ndef sequence_loss(logits_or_probs=None, targets=None, is_probs=False, pad_id=1, reduction='mean'):\r\n ''' shape of logits or probs: [batch_size, max_steps, vocab_size]\r\n shape of targets: [batch_size, max_steps] '''\r\n if reduction == 'none':\r\n batch_size = targets.shape[0]\r\n lengths = torch.count_nonzero(targets.not_equal(pad_id), -1)\r\n \r\n targets = targets.reshape(-1)\r\n outputs = logits_or_probs.view(-1, logits_or_probs.shape[-1])\r\n if is_probs:\r\n loss = F.nll_loss(outputs.log(), targets, ignore_index=pad_id, reduction=reduction)\r\n else:\r\n loss = F.cross_entropy(outputs, targets, ignore_index=pad_id, reduction=reduction)\r\n \r\n if reduction == 'none':\r\n loss = loss.reshape(batch_size, -1)\r\n loss = loss.sum(-1) / lengths\r\n return loss\r\n\r\ndef sample_with_temp(logits, sampling_temp=1.0, keep_topk=-1):\r\n ''' Select next tokens randomly from the top k possible next tokens.\r\n shape of logits: [batch_size, vocab_size]\r\n shape of returned tokens and scores: [batch_size, 1]'''\r\n if sampling_temp == 0.0 or keep_topk == 1:\r\n topk_scores, topk_ids = logits.topk(1, dim=-1)\r\n if sampling_temp > 0:\r\n topk_scores /= sampling_temp\r\n else:\r\n logits = torch.div(logits, sampling_temp)\r\n\r\n if keep_topk > 0:\r\n top_values, top_indices = torch.topk(logits, keep_topk, dim=1)\r\n kth_best = top_values[:, -1].view([-1, 1])\r\n kth_best = kth_best.repeat([1, logits.shape[1]]).float()\r\n ignore = torch.lt(logits, kth_best)\r\n logits = logits.masked_fill(ignore, float('-inf'))\r\n\r\n probs = F.softmax(logits, -1)\r\n dist = torch.distributions.Multinomial(probs=probs, total_count=1)\r\n# dist = torch.distributions.Multinomial(logits=logits, total_count=1)\r\n topk_ids = torch.argmax(dist.sample(), dim=1, keepdim=True)\r\n topk_scores = probs.gather(dim=1, index=topk_ids)\r\n# topk_scores = logits.gather(dim=1, index=topk_ids)\r\n return topk_ids, topk_scores\r\n\r\ndef sample_with_probs(probs, keep_topk=-1):\r\n ''' Select next tokens randomly from the top k possible next tokens.\r\n shape of probs: [batch_size, vocab_size]\r\n shape of returned tokens and scores: [batch_size, 1]'''\r\n if keep_topk == 1:\r\n topk_scores, topk_ids = probs.topk(1, dim=-1)\r\n else:\r\n if keep_topk > 0:\r\n top_values, top_indices = torch.topk(probs, keep_topk, dim=1)\r\n kth_best = top_values[:, -1].view([-1, 1])\r\n kth_best = kth_best.repeat([1, probs.shape[1]]).float()\r\n ignore = torch.lt(probs, kth_best)\r\n probs = probs.masked_fill(ignore, float('-inf'))\r\n\r\n dist = torch.distributions.Multinomial(probs=probs, total_count=1)\r\n topk_ids = torch.argmax(dist.sample(), dim=1, keepdim=True)\r\n topk_scores = probs.gather(dim=1, index=topk_ids)\r\n return topk_ids, topk_scores\r\n\r\ndef tile(x, count, dim=0):\r\n perm = list(range(x.dim()))\r\n if dim != 0:\r\n perm[0], perm[dim] = perm[dim], perm[0]\r\n x = x.permute(perm).contiguous()\r\n out_size = list(x.shape)\r\n out_size[0] *= count\r\n batch = x.shape[0]\r\n x = x.reshape(batch, -1).transpose(0, 1) \\\r\n .repeat(count, 1).transpose(0, 1) \\\r\n .reshape(*out_size)\r\n if dim != 0:\r\n x = x.permute(perm).contiguous()\r\n return x\r\n\r\ndef tuple_map(fn, t, **kwargs):\r\n if t is None:\r\n return None\r\n if type(t) not in {list, tuple}:\r\n return fn(t, **kwargs)\r\n return tuple(tuple_map(fn, s, **kwargs) for s in t)\r\n\r\ndef batch_bleu(hypotheses, references, smooth_method=3, n=4, average=True):\r\n ' expect tokenized inputs '\r\n assert len(hypotheses) == len(references)\r\n cc = SmoothingFunction()\r\n smooth = getattr(cc, 'method' + str(smooth_method))\r\n weights = [1. / n] * n\r\n scores = [sentence_bleu([ref], hyp, weights, smoothing_function=smooth) \\\r\n for hyp, ref in zip(hypotheses, references)]\r\n return np.mean(scores) if average else scores\r\n\r\ndef batch_meteor(hypotheses, references, alpha=0.85, beta=0.2, gamma=0.6, average=True):\r\n assert len(hypotheses) == len(references)\r\n scores = [single_meteor_score(ref, hyp, alpha=alpha, beta=beta, gamma=gamma) \\\r\n for hyp, ref in zip(hypotheses, references)]\r\n return np.mean(scores) if average else scores\r\n\r\ndef batch_rouge(hypotheses, references, metrics=['rouge-l'], average=True):\r\n assert len(hypotheses) == len(references)\r\n rouge = Rouge(metrics=metrics, max_n=4)\r\n if average:\r\n scores = rouge.get_scores(hypotheses, references)\r\n else:\r\n scores = [rouge.get_scores(hyp, ref) for hyp, ref in zip(hypotheses, references)]\r\n return scores\r\n\r\ndef perplexity(loss):\r\n return exp(min(loss, 100))\r\n\r\ndef bce_loss(logits, targets, reduction='mean'):\r\n return F.binary_cross_entropy_with_logits(logits, targets, reduction=reduction)"
] | [
[
"torch.distributions.Multinomial",
"torch.nn.functional.softmax",
"torch.lt",
"torch.div",
"torch.topk",
"torch.arange",
"torch.nn.functional.cross_entropy",
"numpy.mean",
"torch.nn.functional.binary_cross_entropy_with_logits"
]
] |
Shalmalee15/Differential_Reddening | [
"af2655484f0fc346e3967cf738b19bd4991c9842"
] | [
"my_reddening_code.py"
] | [
"# This is a part of the program which removes the effect of the Differential Reddening from the main sequence of the masive star clusters. \n# Reference: A. Milone et al (2012)\n# The steps: 1. Plot a CMD, 2. Rotate the main sequence using theta = A_Filter_1/(A_Filter_I - A_Filter_II); A = Absorption Coefficients (Ref. Jansen et al 1#994)\n\nimport math\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import *\nimport pandas as pd\n\n# Read the data from the data file\nmy_data = np.loadtxt('cluster.dat')\nx_data = my_data[:,2] # Separate the columns \ny_data = my_data[:,0] # Separate the columns\n#print(x_data, y_data)\n#print(x_data.shape, y_data.shape)\nprint(\"********* THIS IS CMD FOR NGC 1783 ************\")\nplt.figure()\nplt.scatter(x_data, y_data, 0.3, 'black')\nplt.xlim(0,3.0)\nplt.ylim(18, 23)\nplt.gca().invert_yaxis()\nplt.xlabel(\"Colour\")\nplt.ylabel(\"Magnitude\")\nplt.title('CMD of NGC 1783')\n# Choose an arbitrary point on CMD \nplt.show()\n\n#Calculate the rotation angle \n# theta = np.radians(1.0928526307169) # theta = Af435w/(Af435w - Af814w); A = Absorption Coefficients (Ref. Jansen et al 1994)\ntheta = 1.1780330682095217\nxcos = (x_data - 0.4) * np.cos(theta) # old_x * cos(theta)\nycos = (y_data - 20) * np.cos(theta) # old_y * cos(theta)\nxsin = (x_data - 0.4) * np.sin(theta) # old_x * sin(theta)\nysin = (y_data - 20) * np.sin(theta) # old_y * sin(theta)\n\nxx_data = xcos + ysin # Will store the new X_coordinates \nyy_data = -xsin + ycos # Will store the new Y_coordinates\nprint(xx_data, yy_data)\n\nprint(\"****************** THIS IS A TRIAL PLOT FOR DEREDDENING PROCESS ***************\")\nplt.figure()\nplt.scatter(xx_data, yy_data, 0.3, 'red')\nplt.xlim(-1,4)\nplt.ylim(0,0.8)\nplt.gca().invert_yaxis()\nplt.xlabel(\"abscissa\")\nplt.ylabel(\"ordinate\")\nplt.title('Rotated CMD')\nplt.show()\n\n# Define a dataframe for x data and y data\ndf= pd.DataFrame({'x_data':xx_data,'y_data':yy_data})\n# df1 will contain only those values which are in the range [X:-1 to 4] and [Y: 0 to 0.8]\ndf1 = df[(df['x_data']<=4.0) & (df['x_data']>= -1.0) & (df['y_data']<=0.8) & (df['y_data']>=0.0)]\n#print(df1)\nbins = np.linspace(0.0, 0.8, num=10) # These are number of bins\n#print(bins)\n# md will contain the x and y median points which will be calculated by the following iteration loop. \nmd = pd.DataFrame(np.zeros(((len(bins)-1), 2)), columns=['x_med','y_med']) # Save the median points after the loop calculation.\n#print(md)\n\n# Iteration over the bins to get the median points for each y bins\nfor i in range(len(bins)-1):\n tempdf = df1[(df1['y_data'] >= bins[i]) & (df1['y_data'] <= bins[i+1]) ]\n x_median = np.median(tempdf['x_data'])\n y_median = np.median(tempdf['y_data'])\n md.iloc[i]=[x_median, y_median]\n#print(md)\nprint(\"************* THIS IS FAMOUS FIDUCIAL LINE *****************\")\nplt.figure()\nplt.plot(md['x_med'], md['y_med'], 'black')\nplt.scatter(df1['x_data'], df1['y_data'], 0.3, zorder=0) # zorder= is used when you are lazy. \nplt.xlim(-1,4)\nplt.ylim(0.0, 0.78)\nplt.gca().invert_yaxis() # Again. When you are lazy, use this. Being lazy is good and easy. \nplt.title('Fiducial Line through the Main Sequence')\nplt.show()\n"
] | [
[
"numpy.sin",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"pandas.DataFrame",
"numpy.cos",
"numpy.median",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"numpy.loadtxt",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
GProulx/ml-agents | [
"78a218b4254a74c5580aa3451ee9e391d65122ff"
] | [
"ml-agents/mlagents/trainers/sac/optimizer.py"
] | [
"import logging\nimport numpy as np\nfrom typing import Dict, List, Optional, Any, Mapping\n\nfrom mlagents.tf_utils import tf\n\nfrom mlagents.trainers.sac.network import SACPolicyNetwork, SACTargetNetwork\nfrom mlagents.trainers.models import LearningRateSchedule, EncoderType, ModelUtils\nfrom mlagents.trainers.common.tf_optimizer import TFOptimizer\nfrom mlagents.trainers.tf_policy import TFPolicy\nfrom mlagents.trainers.buffer import AgentBuffer\nfrom mlagents_envs.timers import timed\n\nEPSILON = 1e-6 # Small value to avoid divide by zero\n\nLOGGER = logging.getLogger(\"mlagents.trainers\")\n\nPOLICY_SCOPE = \"\"\nTARGET_SCOPE = \"target_network\"\n\n\nclass SACOptimizer(TFOptimizer):\n def __init__(self, policy: TFPolicy, trainer_params: Dict[str, Any]):\n \"\"\"\n Takes a Unity environment and model-specific hyper-parameters and returns the\n appropriate PPO agent model for the environment.\n :param brain: Brain parameters used to generate specific network graph.\n :param lr: Learning rate.\n :param lr_schedule: Learning rate decay schedule.\n :param h_size: Size of hidden layers\n :param init_entcoef: Initial value for entropy coefficient. Set lower to learn faster,\n set higher to explore more.\n :return: a sub-class of PPOAgent tailored to the environment.\n :param max_step: Total number of training steps.\n :param normalize: Whether to normalize vector observation input.\n :param use_recurrent: Whether to use an LSTM layer in the network.\n :param num_layers: Number of hidden layers between encoded input and policy & value layers\n :param tau: Strength of soft-Q update.\n :param m_size: Size of brain memory.\n \"\"\"\n # Create the graph here to give more granular control of the TF graph to the Optimizer.\n policy.create_tf_graph()\n\n with policy.graph.as_default():\n with tf.variable_scope(\"\"):\n super().__init__(policy, trainer_params)\n lr = float(trainer_params[\"learning_rate\"])\n lr_schedule = LearningRateSchedule(\n trainer_params.get(\"learning_rate_schedule\", \"constant\")\n )\n self.policy = policy\n self.act_size = self.policy.act_size\n h_size = int(trainer_params[\"hidden_units\"])\n max_step = float(trainer_params[\"max_steps\"])\n num_layers = int(trainer_params[\"num_layers\"])\n vis_encode_type = EncoderType(\n trainer_params.get(\"vis_encode_type\", \"simple\")\n )\n self.tau = trainer_params.get(\"tau\", 0.005)\n self.burn_in_ratio = float(trainer_params.get(\"burn_in_ratio\", 0.0))\n\n # Non-exposed SAC parameters\n self.discrete_target_entropy_scale = (\n 0.2\n ) # Roughly equal to e-greedy 0.05\n self.continuous_target_entropy_scale = 1.0\n\n self.init_entcoef = trainer_params.get(\"init_entcoef\", 1.0)\n stream_names = list(self.reward_signals.keys())\n # Use to reduce \"survivor bonus\" when using Curiosity or GAIL.\n self.gammas = [\n _val[\"gamma\"] for _val in trainer_params[\"reward_signals\"].values()\n ]\n self.use_dones_in_backup = {\n name: tf.Variable(1.0) for name in stream_names\n }\n self.disable_use_dones = {\n name: self.use_dones_in_backup[name].assign(0.0)\n for name in stream_names\n }\n\n if num_layers < 1:\n num_layers = 1\n\n self.target_init_op: List[tf.Tensor] = []\n self.target_update_op: List[tf.Tensor] = []\n self.update_batch_policy: Optional[tf.Operation] = None\n self.update_batch_value: Optional[tf.Operation] = None\n self.update_batch_entropy: Optional[tf.Operation] = None\n\n self.policy_network = SACPolicyNetwork(\n policy=self.policy,\n m_size=self.policy.m_size, # 3x policy.m_size\n h_size=h_size,\n normalize=self.policy.normalize,\n use_recurrent=self.policy.use_recurrent,\n num_layers=num_layers,\n stream_names=stream_names,\n vis_encode_type=vis_encode_type,\n )\n self.target_network = SACTargetNetwork(\n policy=self.policy,\n m_size=self.policy.m_size, # 1x policy.m_size\n h_size=h_size,\n normalize=self.policy.normalize,\n use_recurrent=self.policy.use_recurrent,\n num_layers=num_layers,\n stream_names=stream_names,\n vis_encode_type=vis_encode_type,\n )\n # The optimizer's m_size is 3 times the policy (Q1, Q2, and Value)\n self.m_size = 3 * self.policy.m_size\n self._create_inputs_and_outputs()\n self.learning_rate = ModelUtils.create_learning_rate(\n lr_schedule, lr, self.policy.global_step, int(max_step)\n )\n self._create_losses(\n self.policy_network.q1_heads,\n self.policy_network.q2_heads,\n lr,\n int(max_step),\n stream_names,\n discrete=not self.policy.use_continuous_act,\n )\n self._create_sac_optimizer_ops()\n\n self.selected_actions = (\n self.policy.selected_actions\n ) # For GAIL and other reward signals\n if self.policy.normalize:\n target_update_norm = self.target_network.copy_normalization(\n self.policy.running_mean,\n self.policy.running_variance,\n self.policy.normalization_steps,\n )\n # Update the normalization of the optimizer when the policy does.\n self.policy.update_normalization_op = tf.group(\n [self.policy.update_normalization_op, target_update_norm]\n )\n\n self.policy.initialize_or_load()\n\n self.stats_name_to_update_name = {\n \"Losses/Value Loss\": \"value_loss\",\n \"Losses/Policy Loss\": \"policy_loss\",\n \"Losses/Q1 Loss\": \"q1_loss\",\n \"Losses/Q2 Loss\": \"q2_loss\",\n \"Policy/Entropy Coeff\": \"entropy_coef\",\n \"Policy/Learning Rate\": \"learning_rate\",\n }\n\n self.update_dict = {\n \"value_loss\": self.total_value_loss,\n \"policy_loss\": self.policy_loss,\n \"q1_loss\": self.q1_loss,\n \"q2_loss\": self.q2_loss,\n \"entropy_coef\": self.ent_coef,\n \"entropy\": self.policy.entropy,\n \"update_batch\": self.update_batch_policy,\n \"update_value\": self.update_batch_value,\n \"update_entropy\": self.update_batch_entropy,\n \"learning_rate\": self.learning_rate,\n }\n\n def _create_inputs_and_outputs(self) -> None:\n \"\"\"\n Assign the higher-level SACModel's inputs and outputs to those of its policy or\n target network.\n \"\"\"\n self.vector_in = self.policy.vector_in\n self.visual_in = self.policy.visual_in\n self.next_vector_in = self.target_network.vector_in\n self.next_visual_in = self.target_network.visual_in\n self.sequence_length_ph = self.policy.sequence_length_ph\n self.next_sequence_length_ph = self.target_network.sequence_length_ph\n if not self.policy.use_continuous_act:\n self.action_masks = self.policy_network.action_masks\n else:\n self.output_pre = self.policy_network.output_pre\n\n # Don't use value estimate during inference.\n self.value = tf.identity(\n self.policy_network.value, name=\"value_estimate_unused\"\n )\n self.value_heads = self.policy_network.value_heads\n self.dones_holder = tf.placeholder(\n shape=[None], dtype=tf.float32, name=\"dones_holder\"\n )\n\n if self.policy.use_recurrent:\n self.memory_in = self.policy_network.memory_in\n self.memory_out = self.policy_network.memory_out\n if not self.policy.use_continuous_act:\n self.prev_action = self.policy_network.prev_action\n self.next_memory_in = self.target_network.memory_in\n\n def _create_losses(\n self,\n q1_streams: Dict[str, tf.Tensor],\n q2_streams: Dict[str, tf.Tensor],\n lr: tf.Tensor,\n max_step: int,\n stream_names: List[str],\n discrete: bool = False,\n ) -> None:\n \"\"\"\n Creates training-specific Tensorflow ops for SAC models.\n :param q1_streams: Q1 streams from policy network\n :param q1_streams: Q2 streams from policy network\n :param lr: Learning rate\n :param max_step: Total number of training steps.\n :param stream_names: List of reward stream names.\n :param discrete: Whether or not to use discrete action losses.\n \"\"\"\n\n if discrete:\n self.target_entropy = [\n self.discrete_target_entropy_scale * np.log(i).astype(np.float32)\n for i in self.act_size\n ]\n discrete_action_probs = tf.exp(self.policy.all_log_probs)\n per_action_entropy = discrete_action_probs * self.policy.all_log_probs\n else:\n self.target_entropy = (\n -1\n * self.continuous_target_entropy_scale\n * np.prod(self.act_size[0]).astype(np.float32)\n )\n\n self.rewards_holders = {}\n self.min_policy_qs = {}\n\n for name in stream_names:\n if discrete:\n _branched_mpq1 = self._apply_as_branches(\n self.policy_network.q1_pheads[name] * discrete_action_probs\n )\n branched_mpq1 = tf.stack(\n [\n tf.reduce_sum(_br, axis=1, keep_dims=True)\n for _br in _branched_mpq1\n ]\n )\n _q1_p_mean = tf.reduce_mean(branched_mpq1, axis=0)\n\n _branched_mpq2 = self._apply_as_branches(\n self.policy_network.q2_pheads[name] * discrete_action_probs\n )\n branched_mpq2 = tf.stack(\n [\n tf.reduce_sum(_br, axis=1, keep_dims=True)\n for _br in _branched_mpq2\n ]\n )\n _q2_p_mean = tf.reduce_mean(branched_mpq2, axis=0)\n\n self.min_policy_qs[name] = tf.minimum(_q1_p_mean, _q2_p_mean)\n else:\n self.min_policy_qs[name] = tf.minimum(\n self.policy_network.q1_pheads[name],\n self.policy_network.q2_pheads[name],\n )\n\n rewards_holder = tf.placeholder(\n shape=[None], dtype=tf.float32, name=\"{}_rewards\".format(name)\n )\n self.rewards_holders[name] = rewards_holder\n\n q1_losses = []\n q2_losses = []\n # Multiple q losses per stream\n expanded_dones = tf.expand_dims(self.dones_holder, axis=-1)\n for i, name in enumerate(stream_names):\n _expanded_rewards = tf.expand_dims(self.rewards_holders[name], axis=-1)\n\n q_backup = tf.stop_gradient(\n _expanded_rewards\n + (1.0 - self.use_dones_in_backup[name] * expanded_dones)\n * self.gammas[i]\n * self.target_network.value_heads[name]\n )\n\n if discrete:\n # We need to break up the Q functions by branch, and update them individually.\n branched_q1_stream = self._apply_as_branches(\n self.policy.selected_actions * q1_streams[name]\n )\n branched_q2_stream = self._apply_as_branches(\n self.policy.selected_actions * q2_streams[name]\n )\n\n # Reduce each branch into scalar\n branched_q1_stream = [\n tf.reduce_sum(_branch, axis=1, keep_dims=True)\n for _branch in branched_q1_stream\n ]\n branched_q2_stream = [\n tf.reduce_sum(_branch, axis=1, keep_dims=True)\n for _branch in branched_q2_stream\n ]\n\n q1_stream = tf.reduce_mean(branched_q1_stream, axis=0)\n q2_stream = tf.reduce_mean(branched_q2_stream, axis=0)\n\n else:\n q1_stream = q1_streams[name]\n q2_stream = q2_streams[name]\n\n _q1_loss = 0.5 * tf.reduce_mean(\n tf.to_float(self.policy.mask)\n * tf.squared_difference(q_backup, q1_stream)\n )\n\n _q2_loss = 0.5 * tf.reduce_mean(\n tf.to_float(self.policy.mask)\n * tf.squared_difference(q_backup, q2_stream)\n )\n\n q1_losses.append(_q1_loss)\n q2_losses.append(_q2_loss)\n\n self.q1_loss = tf.reduce_mean(q1_losses)\n self.q2_loss = tf.reduce_mean(q2_losses)\n\n # Learn entropy coefficient\n if discrete:\n # Create a log_ent_coef for each branch\n self.log_ent_coef = tf.get_variable(\n \"log_ent_coef\",\n dtype=tf.float32,\n initializer=np.log([self.init_entcoef] * len(self.act_size)).astype(\n np.float32\n ),\n trainable=True,\n )\n else:\n self.log_ent_coef = tf.get_variable(\n \"log_ent_coef\",\n dtype=tf.float32,\n initializer=np.log(self.init_entcoef).astype(np.float32),\n trainable=True,\n )\n\n self.ent_coef = tf.exp(self.log_ent_coef)\n if discrete:\n # We also have to do a different entropy and target_entropy per branch.\n branched_per_action_ent = self._apply_as_branches(per_action_entropy)\n branched_ent_sums = tf.stack(\n [\n tf.reduce_sum(_lp, axis=1, keep_dims=True) + _te\n for _lp, _te in zip(branched_per_action_ent, self.target_entropy)\n ],\n axis=1,\n )\n self.entropy_loss = -tf.reduce_mean(\n tf.to_float(self.policy.mask)\n * tf.reduce_mean(\n self.log_ent_coef\n * tf.squeeze(tf.stop_gradient(branched_ent_sums), axis=2),\n axis=1,\n )\n )\n\n # Same with policy loss, we have to do the loss per branch and average them,\n # so that larger branches don't get more weight.\n # The equivalent KL divergence from Eq 10 of Haarnoja et al. is also pi*log(pi) - Q\n branched_q_term = self._apply_as_branches(\n discrete_action_probs * self.policy_network.q1_p\n )\n\n branched_policy_loss = tf.stack(\n [\n tf.reduce_sum(self.ent_coef[i] * _lp - _qt, axis=1, keep_dims=True)\n for i, (_lp, _qt) in enumerate(\n zip(branched_per_action_ent, branched_q_term)\n )\n ]\n )\n self.policy_loss = tf.reduce_mean(\n tf.to_float(self.policy.mask) * tf.squeeze(branched_policy_loss)\n )\n\n # Do vbackup entropy bonus per branch as well.\n branched_ent_bonus = tf.stack(\n [\n tf.reduce_sum(self.ent_coef[i] * _lp, axis=1, keep_dims=True)\n for i, _lp in enumerate(branched_per_action_ent)\n ]\n )\n value_losses = []\n for name in stream_names:\n v_backup = tf.stop_gradient(\n self.min_policy_qs[name]\n - tf.reduce_mean(branched_ent_bonus, axis=0)\n )\n value_losses.append(\n 0.5\n * tf.reduce_mean(\n tf.to_float(self.policy.mask)\n * tf.squared_difference(\n self.policy_network.value_heads[name], v_backup\n )\n )\n )\n\n else:\n self.entropy_loss = -tf.reduce_mean(\n self.log_ent_coef\n * tf.to_float(self.policy.mask)\n * tf.stop_gradient(\n tf.reduce_sum(\n self.policy.all_log_probs + self.target_entropy,\n axis=1,\n keep_dims=True,\n )\n )\n )\n batch_policy_loss = tf.reduce_mean(\n self.ent_coef * self.policy.all_log_probs - self.policy_network.q1_p,\n axis=1,\n )\n self.policy_loss = tf.reduce_mean(\n tf.to_float(self.policy.mask) * batch_policy_loss\n )\n\n value_losses = []\n for name in stream_names:\n v_backup = tf.stop_gradient(\n self.min_policy_qs[name]\n - tf.reduce_sum(self.ent_coef * self.policy.all_log_probs, axis=1)\n )\n value_losses.append(\n 0.5\n * tf.reduce_mean(\n tf.to_float(self.policy.mask)\n * tf.squared_difference(\n self.policy_network.value_heads[name], v_backup\n )\n )\n )\n self.value_loss = tf.reduce_mean(value_losses)\n\n self.total_value_loss = self.q1_loss + self.q2_loss + self.value_loss\n\n self.entropy = self.policy_network.entropy\n\n def _apply_as_branches(self, concat_logits: tf.Tensor) -> List[tf.Tensor]:\n \"\"\"\n Takes in a concatenated set of logits and breaks it up into a list of non-concatenated logits, one per\n action branch\n \"\"\"\n action_idx = [0] + list(np.cumsum(self.act_size))\n branches_logits = [\n concat_logits[:, action_idx[i] : action_idx[i + 1]]\n for i in range(len(self.act_size))\n ]\n return branches_logits\n\n def _create_sac_optimizer_ops(self) -> None:\n \"\"\"\n Creates the Adam optimizers and update ops for SAC, including\n the policy, value, and entropy updates, as well as the target network update.\n \"\"\"\n policy_optimizer = self.create_optimizer_op(\n learning_rate=self.learning_rate, name=\"sac_policy_opt\"\n )\n entropy_optimizer = self.create_optimizer_op(\n learning_rate=self.learning_rate, name=\"sac_entropy_opt\"\n )\n value_optimizer = self.create_optimizer_op(\n learning_rate=self.learning_rate, name=\"sac_value_opt\"\n )\n\n self.target_update_op = [\n tf.assign(target, (1 - self.tau) * target + self.tau * source)\n for target, source in zip(\n self.target_network.value_vars, self.policy_network.value_vars\n )\n ]\n LOGGER.debug(\"value_vars\")\n self.print_all_vars(self.policy_network.value_vars)\n LOGGER.debug(\"targvalue_vars\")\n self.print_all_vars(self.target_network.value_vars)\n LOGGER.debug(\"critic_vars\")\n self.print_all_vars(self.policy_network.critic_vars)\n LOGGER.debug(\"q_vars\")\n self.print_all_vars(self.policy_network.q_vars)\n LOGGER.debug(\"policy_vars\")\n policy_vars = self.policy.get_trainable_variables()\n self.print_all_vars(policy_vars)\n\n self.target_init_op = [\n tf.assign(target, source)\n for target, source in zip(\n self.target_network.value_vars, self.policy_network.value_vars\n )\n ]\n\n self.update_batch_policy = policy_optimizer.minimize(\n self.policy_loss, var_list=policy_vars\n )\n\n # Make sure policy is updated first, then value, then entropy.\n with tf.control_dependencies([self.update_batch_policy]):\n self.update_batch_value = value_optimizer.minimize(\n self.total_value_loss, var_list=self.policy_network.critic_vars\n )\n # Add entropy coefficient optimization operation\n with tf.control_dependencies([self.update_batch_value]):\n self.update_batch_entropy = entropy_optimizer.minimize(\n self.entropy_loss, var_list=self.log_ent_coef\n )\n\n def print_all_vars(self, variables):\n for _var in variables:\n LOGGER.debug(_var)\n\n @timed\n def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:\n \"\"\"\n Updates model using buffer.\n :param num_sequences: Number of trajectories in batch.\n :param batch: Experience mini-batch.\n :param update_target: Whether or not to update target value network\n :param reward_signal_batches: Minibatches to use for updating the reward signals,\n indexed by name. If none, don't update the reward signals.\n :return: Output from update process.\n \"\"\"\n feed_dict = self._construct_feed_dict(self.policy, batch, num_sequences)\n stats_needed = self.stats_name_to_update_name\n update_stats: Dict[str, float] = {}\n update_vals = self._execute_model(feed_dict, self.update_dict)\n for stat_name, update_name in stats_needed.items():\n update_stats[stat_name] = update_vals[update_name]\n # Update target network. By default, target update happens at every policy update.\n self.sess.run(self.target_update_op)\n return update_stats\n\n def update_reward_signals(\n self, reward_signal_minibatches: Mapping[str, Dict], num_sequences: int\n ) -> Dict[str, float]:\n \"\"\"\n Only update the reward signals.\n :param reward_signal_batches: Minibatches to use for updating the reward signals,\n indexed by name. If none, don't update the reward signals.\n \"\"\"\n # Collect feed dicts for all reward signals.\n feed_dict: Dict[tf.Tensor, Any] = {}\n update_dict: Dict[str, tf.Tensor] = {}\n update_stats: Dict[str, float] = {}\n stats_needed: Dict[str, str] = {}\n if reward_signal_minibatches:\n self.add_reward_signal_dicts(\n feed_dict,\n update_dict,\n stats_needed,\n reward_signal_minibatches,\n num_sequences,\n )\n update_vals = self._execute_model(feed_dict, update_dict)\n for stat_name, update_name in stats_needed.items():\n update_stats[stat_name] = update_vals[update_name]\n return update_stats\n\n def add_reward_signal_dicts(\n self,\n feed_dict: Dict[tf.Tensor, Any],\n update_dict: Dict[str, tf.Tensor],\n stats_needed: Dict[str, str],\n reward_signal_minibatches: Mapping[str, Dict],\n num_sequences: int,\n ) -> None:\n \"\"\"\n Adds the items needed for reward signal updates to the feed_dict and stats_needed dict.\n :param feed_dict: Feed dict needed update\n :param update_dit: Update dict that needs update\n :param stats_needed: Stats needed to get from the update.\n :param reward_signal_minibatches: Minibatches to use for updating the reward signals,\n indexed by name.\n \"\"\"\n for name, r_batch in reward_signal_minibatches.items():\n feed_dict.update(\n self.reward_signals[name].prepare_update(\n self.policy, r_batch, num_sequences\n )\n )\n update_dict.update(self.reward_signals[name].update_dict)\n stats_needed.update(self.reward_signals[name].stats_name_to_update_name)\n\n def _construct_feed_dict(\n self, policy: TFPolicy, batch: AgentBuffer, num_sequences: int\n ) -> Dict[tf.Tensor, Any]:\n \"\"\"\n Builds the feed dict for updating the SAC model.\n :param model: The model to update. May be different when, e.g. using multi-GPU.\n :param batch: Mini-batch to use to update.\n :param num_sequences: Number of LSTM sequences in batch.\n \"\"\"\n # Do an optional burn-in for memories\n num_burn_in = int(self.burn_in_ratio * self.policy.sequence_length)\n burn_in_mask = np.ones((self.policy.sequence_length), dtype=np.float32)\n burn_in_mask[range(0, num_burn_in)] = 0\n burn_in_mask = np.tile(burn_in_mask, num_sequences)\n feed_dict = {\n policy.batch_size_ph: num_sequences,\n policy.sequence_length_ph: self.policy.sequence_length,\n self.next_sequence_length_ph: self.policy.sequence_length,\n self.policy.mask_input: batch[\"masks\"] * burn_in_mask,\n }\n for name in self.reward_signals:\n feed_dict[self.rewards_holders[name]] = batch[\"{}_rewards\".format(name)]\n\n if self.policy.use_continuous_act:\n feed_dict[self.policy_network.external_action_in] = batch[\"actions\"]\n else:\n feed_dict[policy.output] = batch[\"actions\"]\n if self.policy.use_recurrent:\n feed_dict[policy.prev_action] = batch[\"prev_action\"]\n feed_dict[policy.action_masks] = batch[\"action_mask\"]\n if self.policy.use_vec_obs:\n feed_dict[policy.vector_in] = batch[\"vector_obs\"]\n feed_dict[self.next_vector_in] = batch[\"next_vector_in\"]\n if self.policy.vis_obs_size > 0:\n for i, _ in enumerate(policy.visual_in):\n _obs = batch[\"visual_obs%d\" % i]\n feed_dict[policy.visual_in[i]] = _obs\n for i, _ in enumerate(self.next_visual_in):\n _obs = batch[\"next_visual_obs%d\" % i]\n feed_dict[self.next_visual_in[i]] = _obs\n if self.policy.use_recurrent:\n feed_dict[policy.memory_in] = [\n batch[\"memory\"][i]\n for i in range(0, len(batch[\"memory\"]), self.policy.sequence_length)\n ]\n feed_dict[self.policy_network.memory_in] = self._make_zero_mem(\n self.m_size, batch.num_experiences\n )\n feed_dict[self.target_network.memory_in] = self._make_zero_mem(\n self.m_size // 3, batch.num_experiences\n )\n feed_dict[self.dones_holder] = batch[\"done\"]\n return feed_dict\n"
] | [
[
"numpy.ones",
"numpy.tile",
"numpy.cumsum",
"numpy.log",
"numpy.prod"
]
] |
casassg/tfx | [
"07690d4581e44485b05be1c95cc1502c7b952911"
] | [
"tfx/orchestration/experimental/core/task_manager_test.py"
] | [
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.orchestration.experimental.core.task_manager.\"\"\"\n\nimport contextlib\nimport functools\nimport os\nimport threading\n\nfrom absl import logging\nfrom absl.testing.absltest import mock\nimport tensorflow as tf\nfrom tfx.orchestration import data_types_utils\nfrom tfx.orchestration import metadata\nfrom tfx.orchestration.experimental.core import async_pipeline_task_gen as asptg\nfrom tfx.orchestration.experimental.core import constants\nfrom tfx.orchestration.experimental.core import pipeline_state as pstate\nfrom tfx.orchestration.experimental.core import service_jobs\nfrom tfx.orchestration.experimental.core import task as task_lib\nfrom tfx.orchestration.experimental.core import task_manager as tm\nfrom tfx.orchestration.experimental.core import task_queue as tq\nfrom tfx.orchestration.experimental.core import task_scheduler as ts\nfrom tfx.orchestration.experimental.core import test_utils\nfrom tfx.proto.orchestration import execution_result_pb2\nfrom tfx.proto.orchestration import pipeline_pb2\nfrom tfx.utils import status as status_lib\n\nfrom ml_metadata.proto import metadata_store_pb2\n\n\ndef _test_exec_node_task(node_id, pipeline_id, pipeline=None):\n node_uid = task_lib.NodeUid(\n pipeline_uid=task_lib.PipelineUid(pipeline_id=pipeline_id),\n node_id=node_id)\n return test_utils.create_exec_node_task(node_uid, pipeline=pipeline)\n\n\ndef _test_cancel_node_task(node_id, pipeline_id):\n node_uid = task_lib.NodeUid(\n pipeline_uid=task_lib.PipelineUid(pipeline_id=pipeline_id),\n node_id=node_id)\n return task_lib.CancelNodeTask(node_uid=node_uid)\n\n\nclass _Collector:\n\n def __init__(self):\n self._lock = threading.Lock()\n self.scheduled_tasks = []\n self.cancelled_tasks = []\n\n def add_scheduled_task(self, task):\n with self._lock:\n self.scheduled_tasks.append(task)\n\n def add_cancelled_task(self, task):\n with self._lock:\n self.cancelled_tasks.append(task)\n\n\nclass _FakeTaskScheduler(ts.TaskScheduler):\n\n def __init__(self, block_nodes, collector, **kwargs):\n super(_FakeTaskScheduler, self).__init__(**kwargs)\n # For these nodes, `schedule` will block until `cancel` is called.\n self._block_nodes = block_nodes\n self._collector = collector\n self._cancel = threading.Event()\n\n def schedule(self):\n logging.info('_FakeTaskScheduler: scheduling task: %s', self.task)\n self._collector.add_scheduled_task(self.task)\n if self.task.node_uid.node_id in self._block_nodes:\n self._cancel.wait()\n code = status_lib.Code.CANCELLED\n else:\n code = status_lib.Code.OK\n return ts.TaskSchedulerResult(\n status=status_lib.Status(\n code=code, message='_FakeTaskScheduler result'))\n\n def cancel(self):\n logging.info('_FakeTaskScheduler: cancelling task: %s', self.task)\n self._collector.add_cancelled_task(self.task)\n self._cancel.set()\n\n\nclass TaskManagerTest(test_utils.TfxTest):\n\n def setUp(self):\n super(TaskManagerTest, self).setUp()\n\n # Create a pipeline IR containing deployment config for testing.\n deployment_config = pipeline_pb2.IntermediateDeploymentConfig()\n executor_spec = pipeline_pb2.ExecutorSpec.PythonClassExecutorSpec(\n class_path='trainer.TrainerExecutor')\n deployment_config.executor_specs['Trainer'].Pack(executor_spec)\n deployment_config.executor_specs['Transform'].Pack(executor_spec)\n deployment_config.executor_specs['Evaluator'].Pack(executor_spec)\n pipeline = pipeline_pb2.Pipeline()\n pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'\n pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'\n pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'\n pipeline.pipeline_info.id = 'test-pipeline'\n pipeline.deployment_config.Pack(deployment_config)\n\n ts.TaskSchedulerRegistry.clear()\n\n self._deployment_config = deployment_config\n self._pipeline = pipeline\n self._type_url = deployment_config.executor_specs['Trainer'].type_url\n\n @contextlib.contextmanager\n def _task_manager(self, task_queue):\n with tm.TaskManager(\n mock.Mock(),\n task_queue,\n max_active_task_schedulers=1000,\n max_dequeue_wait_secs=0.1,\n process_all_queued_tasks_before_exit=True) as task_manager:\n yield task_manager\n\n @mock.patch.object(tm, '_publish_execution_results')\n def test_task_handling(self, mock_publish):\n collector = _Collector()\n\n # Register a fake task scheduler.\n ts.TaskSchedulerRegistry.register(\n self._type_url,\n functools.partial(\n _FakeTaskScheduler,\n block_nodes={'Trainer', 'Transform'},\n collector=collector))\n\n task_queue = tq.TaskQueue()\n\n # Enqueue some tasks.\n trainer_exec_task = _test_exec_node_task(\n 'Trainer', 'test-pipeline', pipeline=self._pipeline)\n task_queue.enqueue(trainer_exec_task)\n task_queue.enqueue(_test_cancel_node_task('Trainer', 'test-pipeline'))\n\n with self._task_manager(task_queue) as task_manager:\n # Enqueue more tasks after task manager starts.\n transform_exec_task = _test_exec_node_task(\n 'Transform', 'test-pipeline', pipeline=self._pipeline)\n task_queue.enqueue(transform_exec_task)\n evaluator_exec_task = _test_exec_node_task(\n 'Evaluator', 'test-pipeline', pipeline=self._pipeline)\n task_queue.enqueue(evaluator_exec_task)\n task_queue.enqueue(_test_cancel_node_task('Transform', 'test-pipeline'))\n\n self.assertTrue(task_manager.done())\n self.assertIsNone(task_manager.exception())\n\n # Ensure that all exec and cancellation tasks were processed correctly.\n self.assertCountEqual(\n [trainer_exec_task, transform_exec_task, evaluator_exec_task],\n collector.scheduled_tasks)\n self.assertCountEqual([trainer_exec_task, transform_exec_task],\n collector.cancelled_tasks)\n\n result_ok = ts.TaskSchedulerResult(\n status=status_lib.Status(\n code=status_lib.Code.OK, message='_FakeTaskScheduler result'))\n result_cancelled = ts.TaskSchedulerResult(\n status=status_lib.Status(\n code=status_lib.Code.CANCELLED,\n message='_FakeTaskScheduler result'))\n mock_publish.assert_has_calls([\n mock.call(\n mlmd_handle=mock.ANY,\n task=trainer_exec_task,\n result=result_cancelled),\n mock.call(\n mlmd_handle=mock.ANY,\n task=transform_exec_task,\n result=result_cancelled),\n mock.call(\n mlmd_handle=mock.ANY, task=evaluator_exec_task, result=result_ok),\n ],\n any_order=True)\n self.assertLen(mock_publish.mock_calls, 3)\n\n @mock.patch.object(tm, '_publish_execution_results')\n def test_exceptions_are_surfaced(self, mock_publish):\n\n def _publish(**kwargs):\n task = kwargs['task']\n assert task_lib.is_exec_node_task(task)\n if task.node_uid.node_id == 'Transform':\n raise ValueError('test error')\n return mock.DEFAULT\n\n mock_publish.side_effect = _publish\n\n collector = _Collector()\n\n # Register a fake task scheduler.\n ts.TaskSchedulerRegistry.register(\n self._type_url,\n functools.partial(\n _FakeTaskScheduler, block_nodes={}, collector=collector))\n\n task_queue = tq.TaskQueue()\n\n with self._task_manager(task_queue) as task_manager:\n transform_task = _test_exec_node_task(\n 'Transform', 'test-pipeline', pipeline=self._pipeline)\n trainer_task = _test_exec_node_task(\n 'Trainer', 'test-pipeline', pipeline=self._pipeline)\n task_queue.enqueue(transform_task)\n task_queue.enqueue(trainer_task)\n\n self.assertTrue(task_manager.done())\n exception = task_manager.exception()\n self.assertIsNotNone(exception)\n self.assertIsInstance(exception, tm.TasksProcessingError)\n self.assertLen(exception.errors, 1)\n self.assertEqual('test error', str(exception.errors[0]))\n\n self.assertCountEqual([transform_task, trainer_task],\n collector.scheduled_tasks)\n result_ok = ts.TaskSchedulerResult(\n status=status_lib.Status(\n code=status_lib.Code.OK, message='_FakeTaskScheduler result'))\n mock_publish.assert_has_calls([\n mock.call(mlmd_handle=mock.ANY, task=transform_task, result=result_ok),\n mock.call(mlmd_handle=mock.ANY, task=trainer_task, result=result_ok),\n ],\n any_order=True)\n\n\nclass _FakeComponentScheduler(ts.TaskScheduler):\n\n def __init__(self, return_result, exception, **kwargs):\n super(_FakeComponentScheduler, self).__init__(**kwargs)\n self.exception = exception\n self.return_result = return_result\n\n def schedule(self):\n if self.exception:\n raise self.exception\n return self.return_result\n\n def cancel(self):\n pass\n\n\ndef _make_executor_output(task, code=status_lib.Code.OK, msg=''):\n assert task_lib.is_exec_node_task(task)\n executor_output = execution_result_pb2.ExecutorOutput()\n for key, artifacts in task.output_artifacts.items():\n for artifact in artifacts:\n executor_output.output_artifacts[key].artifacts.add().CopyFrom(\n artifact.mlmd_artifact)\n executor_output.execution_result.code = code\n executor_output.execution_result.result_message = msg\n return executor_output\n\n\nclass TaskManagerE2ETest(test_utils.TfxTest):\n \"\"\"Test end-to-end from task generation to publication of results to MLMD.\"\"\"\n\n def setUp(self):\n super(TaskManagerE2ETest, self).setUp()\n pipeline_root = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self.id())\n\n # Makes sure multiple connections within a test always connect to the same\n # MLMD instance.\n metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')\n self._metadata_path = metadata_path\n connection_config = metadata.sqlite_metadata_connection_config(\n metadata_path)\n connection_config.sqlite.SetInParent()\n self._mlmd_connection = metadata.Metadata(\n connection_config=connection_config)\n\n # Sets up the pipeline.\n pipeline = pipeline_pb2.Pipeline()\n self.load_proto_from_text(\n os.path.join(\n os.path.dirname(__file__), 'testdata', 'async_pipeline.pbtxt'),\n pipeline)\n\n # Extracts components.\n self._example_gen = pipeline.nodes[0].pipeline_node\n self._transform = pipeline.nodes[1].pipeline_node\n self._trainer = pipeline.nodes[2].pipeline_node\n\n # Pack deployment config for testing.\n deployment_config = pipeline_pb2.IntermediateDeploymentConfig()\n executor_spec = pipeline_pb2.ExecutorSpec.PythonClassExecutorSpec(\n class_path='fake.ClassPath')\n deployment_config.executor_specs[self._trainer.node_info.id].Pack(\n executor_spec)\n deployment_config.executor_specs[self._transform.node_info.id].Pack(\n executor_spec)\n self._type_url = deployment_config.executor_specs[\n self._trainer.node_info.id].type_url\n pipeline.deployment_config.Pack(deployment_config)\n self._pipeline = pipeline\n self._pipeline_info = pipeline.pipeline_info\n self._pipeline_runtime_spec = pipeline.runtime_spec\n self._pipeline_runtime_spec.pipeline_root.field_value.string_value = (\n pipeline_root)\n\n ts.TaskSchedulerRegistry.clear()\n self._task_queue = tq.TaskQueue()\n\n # Run fake example-gen to prepare downstreams component triggers.\n test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,\n 1)\n\n # Task generator should produce a task to run transform.\n with self._mlmd_connection as m:\n pipeline_state = pstate.PipelineState.new(m, self._pipeline)\n tasks = asptg.AsyncPipelineTaskGenerator(\n m, pipeline_state, self._task_queue.contains_task_id,\n service_jobs.DummyServiceJobManager()).generate()\n self.assertLen(tasks, 1)\n self._task = tasks[0]\n self.assertEqual('my_transform', self._task.node_uid.node_id)\n self.assertTrue(os.path.exists(self._task.stateful_working_dir))\n self._output_artifact_uri = self._task.output_artifacts['transform_graph'][\n 0].uri\n self.assertTrue(os.path.exists(self._output_artifact_uri))\n self._task_queue.enqueue(self._task)\n\n # There should be 1 active execution in MLMD.\n with self._mlmd_connection as m:\n executions = m.store.get_executions()\n active_executions = [\n e for e in executions\n if e.last_known_state == metadata_store_pb2.Execution.RUNNING\n ]\n self.assertLen(active_executions, 1)\n\n # Active execution id.\n self._execution_id = active_executions[0].id\n\n def _register_task_scheduler(self, return_result, exception=None):\n ts.TaskSchedulerRegistry.register(\n self._type_url,\n functools.partial(\n _FakeComponentScheduler,\n return_result=return_result,\n exception=exception))\n\n def _run_task_manager(self):\n with self._mlmd_connection as m:\n with tm.TaskManager(\n m,\n self._task_queue,\n 1000,\n max_dequeue_wait_secs=0.1,\n process_all_queued_tasks_before_exit=True) as task_manager:\n pass\n return task_manager\n\n def _get_execution(self):\n with self._mlmd_connection as m:\n executions = m.store.get_executions_by_id([self._execution_id])\n return executions[0]\n\n def test_successful_execution_resulting_in_executor_output(self):\n # Register a fake task scheduler that returns a successful execution result\n # and `OK` task scheduler status.\n self._register_task_scheduler(\n ts.TaskSchedulerResult(\n status=status_lib.Status(code=status_lib.Code.OK),\n output=ts.ExecutorNodeOutput(\n executor_output=_make_executor_output(self._task, code=0))))\n task_manager = self._run_task_manager()\n self.assertTrue(task_manager.done())\n self.assertIsNone(task_manager.exception())\n\n # Check that the task was processed and MLMD execution marked successful.\n self.assertTrue(self._task_queue.is_empty())\n execution = self._get_execution()\n self.assertEqual(metadata_store_pb2.Execution.COMPLETE,\n execution.last_known_state)\n\n # Check that stateful working dir is removed.\n self.assertFalse(os.path.exists(self._task.stateful_working_dir))\n # Output artifact URI remains as execution was successful.\n self.assertTrue(os.path.exists(self._output_artifact_uri))\n\n def test_successful_execution_resulting_in_output_artifacts(self):\n # Register a fake task scheduler that returns a successful execution result\n # and `OK` task scheduler status.\n self._register_task_scheduler(\n ts.TaskSchedulerResult(\n status=status_lib.Status(code=status_lib.Code.OK),\n output=ts.ImporterNodeOutput(\n output_artifacts=self._task.output_artifacts)))\n task_manager = self._run_task_manager()\n self.assertTrue(task_manager.done())\n self.assertIsNone(task_manager.exception())\n\n # Check that the task was processed and MLMD execution marked successful.\n self.assertTrue(self._task_queue.is_empty())\n execution = self._get_execution()\n self.assertEqual(metadata_store_pb2.Execution.COMPLETE,\n execution.last_known_state)\n\n # Check that stateful working dir is removed.\n self.assertFalse(os.path.exists(self._task.stateful_working_dir))\n # Output artifact URI remains as execution was successful.\n self.assertTrue(os.path.exists(self._output_artifact_uri))\n\n def test_scheduler_failure(self):\n # Register a fake task scheduler that returns a failure status.\n self._register_task_scheduler(\n ts.TaskSchedulerResult(\n status=status_lib.Status(\n code=status_lib.Code.ABORTED, message='foobar error')))\n task_manager = self._run_task_manager()\n self.assertTrue(task_manager.done())\n self.assertIsNone(task_manager.exception())\n\n # Check that the task was processed and MLMD execution marked failed.\n self.assertTrue(self._task_queue.is_empty())\n execution = self._get_execution()\n self.assertEqual(metadata_store_pb2.Execution.FAILED,\n execution.last_known_state)\n self.assertEqual(\n 'foobar error',\n data_types_utils.get_metadata_value(\n execution.custom_properties[constants.EXECUTION_ERROR_MSG_KEY]))\n\n # Check that stateful working dir and output artifact URI are removed.\n self.assertFalse(os.path.exists(self._task.stateful_working_dir))\n self.assertFalse(os.path.exists(self._output_artifact_uri))\n\n def test_executor_failure(self):\n # Register a fake task scheduler that returns success but the executor\n # was cancelled.\n self._register_task_scheduler(\n ts.TaskSchedulerResult(\n status=status_lib.Status(code=status_lib.Code.OK),\n output=ts.ExecutorNodeOutput(\n executor_output=_make_executor_output(\n self._task,\n code=status_lib.Code.FAILED_PRECONDITION,\n msg='foobar error'))))\n task_manager = self._run_task_manager()\n self.assertTrue(task_manager.done())\n self.assertIsNone(task_manager.exception())\n\n # Check that the task was processed and MLMD execution marked failed.\n self.assertTrue(self._task_queue.is_empty())\n execution = self._get_execution()\n self.assertEqual(metadata_store_pb2.Execution.FAILED,\n execution.last_known_state)\n self.assertEqual(\n 'foobar error',\n data_types_utils.get_metadata_value(\n execution.custom_properties[constants.EXECUTION_ERROR_MSG_KEY]))\n\n # Check that stateful working dir and output artifact URI are removed.\n self.assertFalse(os.path.exists(self._task.stateful_working_dir))\n self.assertFalse(os.path.exists(self._output_artifact_uri))\n\n def test_scheduler_raises_exception(self):\n # Register a fake task scheduler that raises an exception in `schedule`.\n self._register_task_scheduler(None, exception=ValueError('test exception'))\n task_manager = self._run_task_manager()\n self.assertTrue(task_manager.done())\n self.assertIsNone(task_manager.exception())\n\n # Check that the task was processed and MLMD execution marked failed.\n self.assertTrue(self._task_queue.is_empty())\n execution = self._get_execution()\n self.assertEqual(metadata_store_pb2.Execution.FAILED,\n execution.last_known_state)\n\n # Check that stateful working dir and output artifact URI are removed.\n self.assertFalse(os.path.exists(self._task.stateful_working_dir))\n self.assertFalse(os.path.exists(self._output_artifact_uri))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.test.main"
]
] |
Dev514/scikit-learn | [
"a13bad38182cab956b073dd1a48821e32180fd6c"
] | [
"sklearn/manifold/tests/test_t_sne.py"
] | [
"import sys\nfrom io import StringIO\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport scipy.sparse as sp\nimport pytest\n\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.exceptions import EfficiencyWarning\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import skip_if_32bit\nfrom sklearn.utils import check_random_state\nfrom sklearn.manifold._t_sne import _joint_probabilities\nfrom sklearn.manifold._t_sne import _joint_probabilities_nn\nfrom sklearn.manifold._t_sne import _kl_divergence\nfrom sklearn.manifold._t_sne import _kl_divergence_bh\nfrom sklearn.manifold._t_sne import _gradient_descent\nfrom sklearn.manifold._t_sne import trustworthiness\nfrom sklearn.manifold import TSNE\n\n# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'\nfrom sklearn.manifold import _barnes_hut_tsne # type: ignore\nfrom sklearn.manifold._utils import _binary_search_perplexity\nfrom sklearn.datasets import make_blobs\nfrom scipy.optimize import check_grad\nfrom scipy.spatial.distance import pdist\nfrom scipy.spatial.distance import squareform\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.metrics.pairwise import manhattan_distances\nfrom sklearn.metrics.pairwise import cosine_distances\n\n\nx = np.linspace(0, 1, 10)\nxx, yy = np.meshgrid(x, x)\nX_2d_grid = np.hstack(\n [\n xx.ravel().reshape(-1, 1),\n yy.ravel().reshape(-1, 1),\n ]\n)\n\npytestmark = pytest.mark.filterwarnings(\n \"ignore:The PCA initialization in TSNE will change to have the standard deviation\",\n)\n\n\ndef test_gradient_descent_stops():\n # Test stopping conditions of gradient descent.\n class ObjectiveSmallGradient:\n def __init__(self):\n self.it = -1\n\n def __call__(self, _, compute_error=True):\n self.it += 1\n return (10 - self.it) / 10.0, np.array([1e-5])\n\n def flat_function(_, compute_error=True):\n return 0.0, np.ones(1)\n\n # Gradient norm\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n _, error, it = _gradient_descent(\n ObjectiveSmallGradient(),\n np.zeros(1),\n 0,\n n_iter=100,\n n_iter_without_progress=100,\n momentum=0.0,\n learning_rate=0.0,\n min_gain=0.0,\n min_grad_norm=1e-5,\n verbose=2,\n )\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n assert error == 1.0\n assert it == 0\n assert \"gradient norm\" in out\n\n # Maximum number of iterations without improvement\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n _, error, it = _gradient_descent(\n flat_function,\n np.zeros(1),\n 0,\n n_iter=100,\n n_iter_without_progress=10,\n momentum=0.0,\n learning_rate=0.0,\n min_gain=0.0,\n min_grad_norm=0.0,\n verbose=2,\n )\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n assert error == 0.0\n assert it == 11\n assert \"did not make any progress\" in out\n\n # Maximum number of iterations\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n _, error, it = _gradient_descent(\n ObjectiveSmallGradient(),\n np.zeros(1),\n 0,\n n_iter=11,\n n_iter_without_progress=100,\n momentum=0.0,\n learning_rate=0.0,\n min_gain=0.0,\n min_grad_norm=0.0,\n verbose=2,\n )\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n assert error == 0.0\n assert it == 10\n assert \"Iteration 10\" in out\n\n\ndef test_binary_search():\n # Test if the binary search finds Gaussians with desired perplexity.\n random_state = check_random_state(0)\n data = random_state.randn(50, 5)\n distances = pairwise_distances(data).astype(np.float32)\n desired_perplexity = 25.0\n P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)\n P = np.maximum(P, np.finfo(np.double).eps)\n mean_perplexity = np.mean(\n [np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])]\n )\n assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)\n\n\ndef test_binary_search_underflow():\n # Test if the binary search finds Gaussians with desired perplexity.\n # A more challenging case than the one above, producing numeric\n # underflow in float precision (see issue #19471 and PR #19472).\n random_state = check_random_state(42)\n data = random_state.randn(1, 90).astype(np.float32) + 100\n desired_perplexity = 30.0\n P = _binary_search_perplexity(data, desired_perplexity, verbose=0)\n perplexity = 2 ** -np.nansum(P[0, 1:] * np.log2(P[0, 1:]))\n assert_almost_equal(perplexity, desired_perplexity, decimal=3)\n\n\ndef test_binary_search_neighbors():\n # Binary perplexity search approximation.\n # Should be approximately equal to the slow method when we use\n # all points as neighbors.\n n_samples = 200\n desired_perplexity = 25.0\n random_state = check_random_state(0)\n data = random_state.randn(n_samples, 2).astype(np.float32, copy=False)\n distances = pairwise_distances(data)\n P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0)\n\n # Test that when we use all the neighbors the results are identical\n n_neighbors = n_samples - 1\n nn = NearestNeighbors().fit(data)\n distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode=\"distance\")\n distances_nn = distance_graph.data.astype(np.float32, copy=False)\n distances_nn = distances_nn.reshape(n_samples, n_neighbors)\n P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)\n\n indptr = distance_graph.indptr\n P1_nn = np.array(\n [\n P1[k, distance_graph.indices[indptr[k] : indptr[k + 1]]]\n for k in range(n_samples)\n ]\n )\n assert_array_almost_equal(P1_nn, P2, decimal=4)\n\n # Test that the highest P_ij are the same when fewer neighbors are used\n for k in np.linspace(150, n_samples - 1, 5):\n k = int(k)\n topn = k * 10 # check the top 10 * k entries out of k * k entries\n distance_graph = nn.kneighbors_graph(n_neighbors=k, mode=\"distance\")\n distances_nn = distance_graph.data.astype(np.float32, copy=False)\n distances_nn = distances_nn.reshape(n_samples, k)\n P2k = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)\n assert_array_almost_equal(P1_nn, P2, decimal=2)\n idx = np.argsort(P1.ravel())[::-1]\n P1top = P1.ravel()[idx][:topn]\n idx = np.argsort(P2k.ravel())[::-1]\n P2top = P2k.ravel()[idx][:topn]\n assert_array_almost_equal(P1top, P2top, decimal=2)\n\n\ndef test_binary_perplexity_stability():\n # Binary perplexity search should be stable.\n # The binary_search_perplexity had a bug wherein the P array\n # was uninitialized, leading to sporadically failing tests.\n n_neighbors = 10\n n_samples = 100\n random_state = check_random_state(0)\n data = random_state.randn(n_samples, 5)\n nn = NearestNeighbors().fit(data)\n distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode=\"distance\")\n distances = distance_graph.data.astype(np.float32, copy=False)\n distances = distances.reshape(n_samples, n_neighbors)\n last_P = None\n desired_perplexity = 3\n for _ in range(100):\n P = _binary_search_perplexity(distances.copy(), desired_perplexity, verbose=0)\n P1 = _joint_probabilities_nn(distance_graph, desired_perplexity, verbose=0)\n # Convert the sparse matrix to a dense one for testing\n P1 = P1.toarray()\n if last_P is None:\n last_P = P\n last_P1 = P1\n else:\n assert_array_almost_equal(P, last_P, decimal=4)\n assert_array_almost_equal(P1, last_P1, decimal=4)\n\n\ndef test_gradient():\n # Test gradient of Kullback-Leibler divergence.\n random_state = check_random_state(0)\n\n n_samples = 50\n n_features = 2\n n_components = 2\n alpha = 1.0\n\n distances = random_state.randn(n_samples, n_features).astype(np.float32)\n distances = np.abs(distances.dot(distances.T))\n np.fill_diagonal(distances, 0.0)\n X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)\n\n P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0)\n\n def fun(params):\n return _kl_divergence(params, P, alpha, n_samples, n_components)[0]\n\n def grad(params):\n return _kl_divergence(params, P, alpha, n_samples, n_components)[1]\n\n assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5)\n\n\ndef test_trustworthiness():\n # Test trustworthiness score.\n random_state = check_random_state(0)\n\n # Affine transformation\n X = random_state.randn(100, 2)\n assert trustworthiness(X, 5.0 + X / 10.0) == 1.0\n\n # Randomly shuffled\n X = np.arange(100).reshape(-1, 1)\n X_embedded = X.copy()\n random_state.shuffle(X_embedded)\n assert trustworthiness(X, X_embedded) < 0.6\n\n # Completely different\n X = np.arange(5).reshape(-1, 1)\n X_embedded = np.array([[0], [2], [4], [1], [3]])\n assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)\n\n\[email protected](\"method\", [\"exact\", \"barnes_hut\"])\[email protected](\"init\", (\"random\", \"pca\"))\ndef test_preserve_trustworthiness_approximately(method, init):\n # Nearest neighbors should be preserved approximately.\n random_state = check_random_state(0)\n n_components = 2\n X = random_state.randn(50, n_components).astype(np.float32)\n tsne = TSNE(\n n_components=n_components,\n init=init,\n random_state=0,\n method=method,\n n_iter=700,\n learning_rate=\"auto\",\n )\n X_embedded = tsne.fit_transform(X)\n t = trustworthiness(X, X_embedded, n_neighbors=1)\n assert t > 0.85\n\n\ndef test_optimization_minimizes_kl_divergence():\n \"\"\"t-SNE should give a lower KL divergence with more iterations.\"\"\"\n random_state = check_random_state(0)\n X, _ = make_blobs(n_features=3, random_state=random_state)\n kl_divergences = []\n for n_iter in [250, 300, 350]:\n tsne = TSNE(\n n_components=2,\n init=\"random\",\n perplexity=10,\n learning_rate=100.0,\n n_iter=n_iter,\n random_state=0,\n )\n tsne.fit_transform(X)\n kl_divergences.append(tsne.kl_divergence_)\n assert kl_divergences[1] <= kl_divergences[0]\n assert kl_divergences[2] <= kl_divergences[1]\n\n\[email protected](\"method\", [\"exact\", \"barnes_hut\"])\ndef test_fit_csr_matrix(method):\n # X can be a sparse matrix.\n rng = check_random_state(0)\n X = rng.randn(50, 2)\n X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0\n X_csr = sp.csr_matrix(X)\n tsne = TSNE(\n n_components=2,\n init=\"random\",\n perplexity=10,\n learning_rate=100.0,\n random_state=0,\n method=method,\n n_iter=750,\n )\n X_embedded = tsne.fit_transform(X_csr)\n assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, rtol=1.1e-1)\n\n\ndef test_preserve_trustworthiness_approximately_with_precomputed_distances():\n # Nearest neighbors should be preserved approximately.\n random_state = check_random_state(0)\n for i in range(3):\n X = random_state.randn(80, 2)\n D = squareform(pdist(X), \"sqeuclidean\")\n tsne = TSNE(\n n_components=2,\n perplexity=2,\n learning_rate=100.0,\n early_exaggeration=2.0,\n metric=\"precomputed\",\n random_state=i,\n verbose=0,\n n_iter=500,\n init=\"random\",\n )\n X_embedded = tsne.fit_transform(D)\n t = trustworthiness(D, X_embedded, n_neighbors=1, metric=\"precomputed\")\n assert t > 0.95\n\n\ndef test_trustworthiness_not_euclidean_metric():\n # Test trustworthiness with a metric different from 'euclidean' and\n # 'precomputed'\n random_state = check_random_state(0)\n X = random_state.randn(100, 2)\n assert trustworthiness(X, X, metric=\"cosine\") == trustworthiness(\n pairwise_distances(X, metric=\"cosine\"), X, metric=\"precomputed\"\n )\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_early_exaggeration_too_small():\n # Early exaggeration factor must be >= 1.\n tsne = TSNE(early_exaggeration=0.99)\n with pytest.raises(ValueError, match=\"early_exaggeration .*\"):\n tsne.fit_transform(np.array([[0.0], [0.0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_too_few_iterations():\n # Number of gradient descent iterations must be at least 200.\n tsne = TSNE(n_iter=199)\n with pytest.raises(ValueError, match=\"n_iter .*\"):\n tsne.fit_transform(np.array([[0.0], [0.0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\n \"method, retype\",\n [\n (\"exact\", np.asarray),\n (\"barnes_hut\", np.asarray),\n (\"barnes_hut\", sp.csr_matrix),\n ],\n)\[email protected](\n \"D, message_regex\",\n [\n ([[0.0], [1.0]], \".* square distance matrix\"),\n ([[0.0, -1.0], [1.0, 0.0]], \".* positive.*\"),\n ],\n)\ndef test_bad_precomputed_distances(method, D, retype, message_regex):\n tsne = TSNE(\n metric=\"precomputed\",\n method=method,\n init=\"random\",\n random_state=42,\n )\n with pytest.raises(ValueError, match=message_regex):\n tsne.fit_transform(retype(D))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\ndef test_exact_no_precomputed_sparse():\n tsne = TSNE(\n metric=\"precomputed\",\n method=\"exact\",\n init=\"random\",\n random_state=42,\n )\n with pytest.raises(TypeError, match=\"sparse\"):\n tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\ndef test_high_perplexity_precomputed_sparse_distances():\n # Perplexity should be less than 50\n dist = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n bad_dist = sp.csr_matrix(dist)\n tsne = TSNE(metric=\"precomputed\", init=\"random\", random_state=42)\n msg = \"3 neighbors per samples are required, but some samples have only 1\"\n with pytest.raises(ValueError, match=msg):\n tsne.fit_transform(bad_dist)\n\n\n@ignore_warnings(category=EfficiencyWarning)\ndef test_sparse_precomputed_distance():\n \"\"\"Make sure that TSNE works identically for sparse and dense matrix\"\"\"\n random_state = check_random_state(0)\n X = random_state.randn(100, 2)\n\n D_sparse = kneighbors_graph(X, n_neighbors=100, mode=\"distance\", include_self=True)\n D = pairwise_distances(X)\n assert sp.issparse(D_sparse)\n assert_almost_equal(D_sparse.A, D)\n\n tsne = TSNE(\n metric=\"precomputed\", random_state=0, init=\"random\", learning_rate=\"auto\"\n )\n Xt_dense = tsne.fit_transform(D)\n\n for fmt in [\"csr\", \"lil\"]:\n Xt_sparse = tsne.fit_transform(D_sparse.asformat(fmt))\n assert_almost_equal(Xt_dense, Xt_sparse)\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_non_positive_computed_distances():\n # Computed distance matrices must be positive.\n def metric(x, y):\n return -1\n\n # Negative computed distances should be caught even if result is squared\n tsne = TSNE(metric=metric, method=\"exact\")\n X = np.array([[0.0, 0.0], [1.0, 1.0]])\n with pytest.raises(ValueError, match=\"All distances .*metric given.*\"):\n tsne.fit_transform(X)\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\ndef test_init_not_available():\n # 'init' must be 'pca', 'random', or numpy array.\n tsne = TSNE(init=\"not available\")\n m = \"'init' must be 'pca', 'random', or a numpy array\"\n with pytest.raises(ValueError, match=m):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n\ndef test_init_ndarray():\n # Initialize TSNE with ndarray and test fit\n tsne = TSNE(init=np.zeros((100, 2)), learning_rate=\"auto\")\n X_embedded = tsne.fit_transform(np.ones((100, 5)))\n assert_array_equal(np.zeros((100, 2)), X_embedded)\n\n\ndef test_init_ndarray_precomputed():\n # Initialize TSNE with ndarray and metric 'precomputed'\n # Make sure no FutureWarning is thrown from _fit\n tsne = TSNE(\n init=np.zeros((100, 2)),\n metric=\"precomputed\",\n learning_rate=50.0,\n )\n tsne.fit(np.zeros((100, 100)))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_distance_not_available():\n # 'metric' must be valid.\n tsne = TSNE(metric=\"not available\", method=\"exact\")\n with pytest.raises(ValueError, match=\"Unknown metric not available.*\"):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n tsne = TSNE(metric=\"not available\", method=\"barnes_hut\")\n with pytest.raises(ValueError, match=\"Metric 'not available' not valid.*\"):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_method_not_available():\n # 'nethod' must be 'barnes_hut' or 'exact'\n tsne = TSNE(method=\"not available\")\n with pytest.raises(ValueError, match=\"'method' must be 'barnes_hut' or \"):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_angle_out_of_range_checks():\n # check the angle parameter range\n for angle in [-1, -1e-6, 1 + 1e-6, 2]:\n tsne = TSNE(angle=angle)\n with pytest.raises(ValueError, match=\"'angle' must be between 0.0 - 1.0\"):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\ndef test_pca_initialization_not_compatible_with_precomputed_kernel():\n # Precomputed distance matrices cannot use PCA initialization.\n tsne = TSNE(metric=\"precomputed\", init=\"pca\")\n with pytest.raises(\n ValueError,\n match='The parameter init=\"pca\" cannot be used with metric=\"precomputed\".',\n ):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n\ndef test_pca_initialization_not_compatible_with_sparse_input():\n # Sparse input matrices cannot use PCA initialization.\n tsne = TSNE(init=\"pca\", learning_rate=100.0)\n with pytest.raises(TypeError, match=\"PCA initialization.*\"):\n tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_n_components_range():\n # barnes_hut method should only be used with n_components <= 3\n tsne = TSNE(n_components=4, method=\"barnes_hut\")\n with pytest.raises(ValueError, match=\"'n_components' should be .*\"):\n tsne.fit_transform(np.array([[0.0], [1.0]]))\n\n\ndef test_early_exaggeration_used():\n # check that the ``early_exaggeration`` parameter has an effect\n random_state = check_random_state(0)\n n_components = 2\n methods = [\"exact\", \"barnes_hut\"]\n X = random_state.randn(25, n_components).astype(np.float32)\n for method in methods:\n tsne = TSNE(\n n_components=n_components,\n perplexity=1,\n learning_rate=100.0,\n init=\"pca\",\n random_state=0,\n method=method,\n early_exaggeration=1.0,\n n_iter=250,\n )\n X_embedded1 = tsne.fit_transform(X)\n tsne = TSNE(\n n_components=n_components,\n perplexity=1,\n learning_rate=100.0,\n init=\"pca\",\n random_state=0,\n method=method,\n early_exaggeration=10.0,\n n_iter=250,\n )\n X_embedded2 = tsne.fit_transform(X)\n\n assert not np.allclose(X_embedded1, X_embedded2)\n\n\ndef test_n_iter_used():\n # check that the ``n_iter`` parameter has an effect\n random_state = check_random_state(0)\n n_components = 2\n methods = [\"exact\", \"barnes_hut\"]\n X = random_state.randn(25, n_components).astype(np.float32)\n for method in methods:\n for n_iter in [251, 500]:\n tsne = TSNE(\n n_components=n_components,\n perplexity=1,\n learning_rate=0.5,\n init=\"random\",\n random_state=0,\n method=method,\n early_exaggeration=1.0,\n n_iter=n_iter,\n )\n tsne.fit_transform(X)\n\n assert tsne.n_iter_ == n_iter - 1\n\n\ndef test_answer_gradient_two_points():\n # Test the tree with only a single set of children.\n #\n # These tests & answers have been checked against the reference\n # implementation by LvdM.\n pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])\n pos_output = np.array(\n [[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]]\n )\n neighbors = np.array([[1], [0]])\n grad_output = np.array(\n [[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]]\n )\n _run_answer_test(pos_input, pos_output, neighbors, grad_output)\n\n\ndef test_answer_gradient_four_points():\n # Four points tests the tree with multiple levels of children.\n #\n # These tests & answers have been checked against the reference\n # implementation by LvdM.\n pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]])\n pos_output = np.array(\n [\n [6.080564e-05, -7.120823e-05],\n [-1.718945e-04, -4.000536e-05],\n [-2.271720e-04, 8.663310e-05],\n [-1.032577e-04, -3.582033e-05],\n ]\n )\n neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]])\n grad_output = np.array(\n [\n [5.81128448e-05, -7.78033454e-06],\n [-5.81526851e-05, 7.80976444e-06],\n [4.24275173e-08, -3.69569698e-08],\n [-2.58720939e-09, 7.52706374e-09],\n ]\n )\n _run_answer_test(pos_input, pos_output, neighbors, grad_output)\n\n\ndef test_skip_num_points_gradient():\n # Test the kwargs option skip_num_points.\n #\n # Skip num points should make it such that the Barnes_hut gradient\n # is not calculated for indices below skip_num_point.\n # Aside from skip_num_points=2 and the first two gradient rows\n # being set to zero, these data points are the same as in\n # test_answer_gradient_four_points()\n pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]])\n pos_output = np.array(\n [\n [6.080564e-05, -7.120823e-05],\n [-1.718945e-04, -4.000536e-05],\n [-2.271720e-04, 8.663310e-05],\n [-1.032577e-04, -3.582033e-05],\n ]\n )\n neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]])\n grad_output = np.array(\n [\n [0.0, 0.0],\n [0.0, 0.0],\n [4.24275173e-08, -3.69569698e-08],\n [-2.58720939e-09, 7.52706374e-09],\n ]\n )\n _run_answer_test(pos_input, pos_output, neighbors, grad_output, False, 0.1, 2)\n\n\ndef _run_answer_test(\n pos_input,\n pos_output,\n neighbors,\n grad_output,\n verbose=False,\n perplexity=0.1,\n skip_num_points=0,\n):\n distances = pairwise_distances(pos_input).astype(np.float32)\n args = distances, perplexity, verbose\n pos_output = pos_output.astype(np.float32)\n neighbors = neighbors.astype(np.int64, copy=False)\n pij_input = _joint_probabilities(*args)\n pij_input = squareform(pij_input).astype(np.float32)\n grad_bh = np.zeros(pos_output.shape, dtype=np.float32)\n\n from scipy.sparse import csr_matrix\n\n P = csr_matrix(pij_input)\n\n neighbors = P.indices.astype(np.int64)\n indptr = P.indptr.astype(np.int64)\n\n _barnes_hut_tsne.gradient(\n P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0\n )\n assert_array_almost_equal(grad_bh, grad_output, decimal=4)\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_verbose():\n # Verbose options write to stdout.\n random_state = check_random_state(0)\n tsne = TSNE(verbose=2)\n X = random_state.randn(5, 2)\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n tsne.fit_transform(X)\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n\n assert \"[t-SNE]\" in out\n assert \"nearest neighbors...\" in out\n assert \"Computed conditional probabilities\" in out\n assert \"Mean sigma\" in out\n assert \"early exaggeration\" in out\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_chebyshev_metric():\n # t-SNE should allow metrics that cannot be squared (issue #3526).\n random_state = check_random_state(0)\n tsne = TSNE(metric=\"chebyshev\")\n X = random_state.randn(5, 2)\n tsne.fit_transform(X)\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_reduction_to_one_component():\n # t-SNE should allow reduction to one component (issue #4154).\n random_state = check_random_state(0)\n tsne = TSNE(n_components=1)\n X = random_state.randn(5, 2)\n X_embedded = tsne.fit(X).embedding_\n assert np.all(np.isfinite(X_embedded))\n\n\[email protected](\"method\", [\"barnes_hut\", \"exact\"])\[email protected](\"dt\", [np.float32, np.float64])\ndef test_64bit(method, dt):\n # Ensure 64bit arrays are handled correctly.\n random_state = check_random_state(0)\n\n X = random_state.randn(10, 2).astype(dt, copy=False)\n tsne = TSNE(\n n_components=2,\n perplexity=2,\n learning_rate=100.0,\n random_state=0,\n method=method,\n verbose=0,\n n_iter=300,\n init=\"random\",\n )\n X_embedded = tsne.fit_transform(X)\n effective_type = X_embedded.dtype\n\n # tsne cython code is only single precision, so the output will\n # always be single precision, irrespectively of the input dtype\n assert effective_type == np.float32\n\n\[email protected](\"method\", [\"barnes_hut\", \"exact\"])\ndef test_kl_divergence_not_nan(method):\n # Ensure kl_divergence_ is computed at last iteration\n # even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0\n random_state = check_random_state(0)\n\n X = random_state.randn(50, 2)\n tsne = TSNE(\n n_components=2,\n perplexity=2,\n learning_rate=100.0,\n random_state=0,\n method=method,\n verbose=0,\n n_iter=503,\n init=\"random\",\n )\n tsne.fit_transform(X)\n\n assert not np.isnan(tsne.kl_divergence_)\n\n\ndef test_barnes_hut_angle():\n # When Barnes-Hut's angle=0 this corresponds to the exact method.\n angle = 0.0\n perplexity = 10\n n_samples = 100\n for n_components in [2, 3]:\n n_features = 5\n degrees_of_freedom = float(n_components - 1.0)\n\n random_state = check_random_state(0)\n data = random_state.randn(n_samples, n_features)\n distances = pairwise_distances(data)\n params = random_state.randn(n_samples, n_components)\n P = _joint_probabilities(distances, perplexity, verbose=0)\n kl_exact, grad_exact = _kl_divergence(\n params, P, degrees_of_freedom, n_samples, n_components\n )\n\n n_neighbors = n_samples - 1\n distances_csr = (\n NearestNeighbors()\n .fit(data)\n .kneighbors_graph(n_neighbors=n_neighbors, mode=\"distance\")\n )\n P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)\n kl_bh, grad_bh = _kl_divergence_bh(\n params,\n P_bh,\n degrees_of_freedom,\n n_samples,\n n_components,\n angle=angle,\n skip_num_points=0,\n verbose=0,\n )\n\n P = squareform(P)\n P_bh = P_bh.toarray()\n assert_array_almost_equal(P_bh, P, decimal=5)\n assert_almost_equal(kl_exact, kl_bh, decimal=3)\n\n\n@skip_if_32bit\ndef test_n_iter_without_progress():\n # Use a dummy negative n_iter_without_progress and check output on stdout\n random_state = check_random_state(0)\n X = random_state.randn(100, 10)\n for method in [\"barnes_hut\", \"exact\"]:\n tsne = TSNE(\n n_iter_without_progress=-1,\n verbose=2,\n learning_rate=1e8,\n random_state=0,\n method=method,\n n_iter=351,\n init=\"random\",\n )\n tsne._N_ITER_CHECK = 1\n tsne._EXPLORATION_N_ITER = 0\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n tsne.fit_transform(X)\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n\n # The output needs to contain the value of n_iter_without_progress\n assert \"did not make any progress during the last -1 episodes. Finished.\" in out\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_min_grad_norm():\n # Make sure that the parameter min_grad_norm is used correctly\n random_state = check_random_state(0)\n X = random_state.randn(100, 2)\n min_grad_norm = 0.002\n tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method=\"exact\")\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n tsne.fit_transform(X)\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n\n lines_out = out.split(\"\\n\")\n\n # extract the gradient norm from the verbose output\n gradient_norm_values = []\n for line in lines_out:\n # When the computation is Finished just an old gradient norm value\n # is repeated that we do not need to store\n if \"Finished\" in line:\n break\n\n start_grad_norm = line.find(\"gradient norm\")\n if start_grad_norm >= 0:\n line = line[start_grad_norm:]\n line = line.replace(\"gradient norm = \", \"\").split(\" \")[0]\n gradient_norm_values.append(float(line))\n\n # Compute how often the gradient norm is smaller than min_grad_norm\n gradient_norm_values = np.array(gradient_norm_values)\n n_smaller_gradient_norms = len(\n gradient_norm_values[gradient_norm_values <= min_grad_norm]\n )\n\n # The gradient norm can be smaller than min_grad_norm at most once,\n # because in the moment it becomes smaller the optimization stops\n assert n_smaller_gradient_norms <= 1\n\n\[email protected](\"ignore:The default learning rate in TSNE\")\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_accessible_kl_divergence():\n # Ensures that the accessible kl_divergence matches the computed value\n random_state = check_random_state(0)\n X = random_state.randn(50, 2)\n tsne = TSNE(\n n_iter_without_progress=2, verbose=2, random_state=0, method=\"exact\", n_iter=500\n )\n\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n tsne.fit_transform(X)\n finally:\n out = sys.stdout.getvalue()\n sys.stdout.close()\n sys.stdout = old_stdout\n\n # The output needs to contain the accessible kl_divergence as the error at\n # the last iteration\n for line in out.split(\"\\n\")[::-1]:\n if \"Iteration\" in line:\n _, _, error = line.partition(\"error = \")\n if error:\n error, _, _ = error.partition(\",\")\n break\n assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)\n\n\[email protected](\"method\", [\"barnes_hut\", \"exact\"])\ndef test_uniform_grid(method):\n \"\"\"Make sure that TSNE can approximately recover a uniform 2D grid\n\n Due to ties in distances between point in X_2d_grid, this test is platform\n dependent for ``method='barnes_hut'`` due to numerical imprecision.\n\n Also, t-SNE is not assured to converge to the right solution because bad\n initialization can lead to convergence to bad local minimum (the\n optimization problem is non-convex). To avoid breaking the test too often,\n we re-run t-SNE from the final point when the convergence is not good\n enough.\n \"\"\"\n seeds = range(3)\n n_iter = 500\n for seed in seeds:\n tsne = TSNE(\n n_components=2,\n init=\"random\",\n random_state=seed,\n perplexity=50,\n n_iter=n_iter,\n method=method,\n learning_rate=\"auto\",\n )\n Y = tsne.fit_transform(X_2d_grid)\n\n try_name = \"{}_{}\".format(method, seed)\n try:\n assert_uniform_grid(Y, try_name)\n except AssertionError:\n # If the test fails a first time, re-run with init=Y to see if\n # this was caused by a bad initialization. Note that this will\n # also run an early_exaggeration step.\n try_name += \":rerun\"\n tsne.init = Y\n Y = tsne.fit_transform(X_2d_grid)\n assert_uniform_grid(Y, try_name)\n\n\ndef assert_uniform_grid(Y, try_name=None):\n # Ensure that the resulting embedding leads to approximately\n # uniformly spaced points: the distance to the closest neighbors\n # should be non-zero and approximately constant.\n nn = NearestNeighbors(n_neighbors=1).fit(Y)\n dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()\n assert dist_to_nn.min() > 0.1\n\n smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)\n largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)\n\n assert smallest_to_mean > 0.5, try_name\n assert largest_to_mean < 2, try_name\n\n\ndef test_bh_match_exact():\n # check that the ``barnes_hut`` method match the exact one when\n # ``angle = 0`` and ``perplexity > n_samples / 3``\n random_state = check_random_state(0)\n n_features = 10\n X = random_state.randn(30, n_features).astype(np.float32)\n X_embeddeds = {}\n n_iter = {}\n for method in [\"exact\", \"barnes_hut\"]:\n tsne = TSNE(\n n_components=2,\n method=method,\n learning_rate=1.0,\n init=\"random\",\n random_state=0,\n n_iter=251,\n perplexity=30.0,\n angle=0,\n )\n # Kill the early_exaggeration\n tsne._EXPLORATION_N_ITER = 0\n X_embeddeds[method] = tsne.fit_transform(X)\n n_iter[method] = tsne.n_iter_\n\n assert n_iter[\"exact\"] == n_iter[\"barnes_hut\"]\n assert_allclose(X_embeddeds[\"exact\"], X_embeddeds[\"barnes_hut\"], rtol=1e-4)\n\n\ndef test_gradient_bh_multithread_match_sequential():\n # check that the bh gradient with different num_threads gives the same\n # results\n\n n_features = 10\n n_samples = 30\n n_components = 2\n degrees_of_freedom = 1\n\n angle = 3\n perplexity = 5\n\n random_state = check_random_state(0)\n data = random_state.randn(n_samples, n_features).astype(np.float32)\n params = random_state.randn(n_samples, n_components)\n\n n_neighbors = n_samples - 1\n distances_csr = (\n NearestNeighbors()\n .fit(data)\n .kneighbors_graph(n_neighbors=n_neighbors, mode=\"distance\")\n )\n P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)\n kl_sequential, grad_sequential = _kl_divergence_bh(\n params,\n P_bh,\n degrees_of_freedom,\n n_samples,\n n_components,\n angle=angle,\n skip_num_points=0,\n verbose=0,\n num_threads=1,\n )\n for num_threads in [2, 4]:\n kl_multithread, grad_multithread = _kl_divergence_bh(\n params,\n P_bh,\n degrees_of_freedom,\n n_samples,\n n_components,\n angle=angle,\n skip_num_points=0,\n verbose=0,\n num_threads=num_threads,\n )\n\n assert_allclose(kl_multithread, kl_sequential, rtol=1e-6)\n assert_allclose(grad_multithread, grad_multithread)\n\n\ndef test_tsne_with_different_distance_metrics():\n \"\"\"Make sure that TSNE works for different distance metrics\"\"\"\n random_state = check_random_state(0)\n n_components_original = 3\n n_components_embedding = 2\n X = random_state.randn(50, n_components_original).astype(np.float32)\n metrics = [\"manhattan\", \"cosine\"]\n dist_funcs = [manhattan_distances, cosine_distances]\n for metric, dist_func in zip(metrics, dist_funcs):\n X_transformed_tsne = TSNE(\n metric=metric,\n n_components=n_components_embedding,\n random_state=0,\n n_iter=300,\n init=\"random\",\n learning_rate=\"auto\",\n ).fit_transform(X)\n X_transformed_tsne_precomputed = TSNE(\n metric=\"precomputed\",\n n_components=n_components_embedding,\n random_state=0,\n n_iter=300,\n init=\"random\",\n learning_rate=\"auto\",\n ).fit_transform(dist_func(X))\n assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)\n\n\n# TODO: Remove in 1.2\[email protected](\"init\", [None, \"random\", \"pca\"])\ndef test_tsne_init_futurewarning(init):\n \"\"\"Make sure that a FutureWarning is only raised when the\n init is not specified or is 'pca'.\"\"\"\n random_state = check_random_state(0)\n\n X = random_state.randn(5, 2)\n kwargs = dict(learning_rate=200.0, init=init)\n tsne = TSNE(**{k: v for k, v in kwargs.items() if v is not None})\n\n if init is None:\n with pytest.warns(FutureWarning, match=\"The default initialization.*\"):\n tsne.fit_transform(X)\n elif init == \"pca\":\n with pytest.warns(FutureWarning, match=\"The PCA initialization.*\"):\n tsne.fit_transform(X)\n else:\n with pytest.warns(None) as record:\n tsne.fit_transform(X)\n assert not record\n\n\n# TODO: Remove in 1.2\[email protected](\"learning_rate\", [None, 200.0])\ndef test_tsne_learning_rate_futurewarning(learning_rate):\n \"\"\"Make sure that a FutureWarning is only raised when the learning rate\n is not specified\"\"\"\n random_state = check_random_state(0)\n\n X = random_state.randn(5, 2)\n kwargs = dict(learning_rate=learning_rate, init=\"random\")\n tsne = TSNE(**{k: v for k, v in kwargs.items() if v is not None})\n\n if learning_rate is None:\n with pytest.warns(FutureWarning, match=\"The default learning rate.*\"):\n tsne.fit_transform(X)\n else:\n with pytest.warns(None) as record:\n tsne.fit_transform(X)\n assert not record\n\n\[email protected](\"ignore:The default initialization in TSNE\")\ndef test_tsne_negative_learning_rate():\n \"\"\"Make sure that negative learning rate results in a ValueError\"\"\"\n random_state = check_random_state(0)\n X = random_state.randn(5, 2)\n with pytest.raises(ValueError, match=\"'learning_rate' must be.*\"):\n TSNE(learning_rate=-50.0).fit_transform(X)\n\n\[email protected](\"method\", [\"exact\", \"barnes_hut\"])\ndef test_tsne_n_jobs(method):\n \"\"\"Make sure that the n_jobs parameter doesn't impact the output\"\"\"\n random_state = check_random_state(0)\n n_features = 10\n X = random_state.randn(30, n_features)\n X_tr_ref = TSNE(\n n_components=2,\n method=method,\n perplexity=30.0,\n angle=0,\n n_jobs=1,\n random_state=0,\n init=\"random\",\n learning_rate=\"auto\",\n ).fit_transform(X)\n X_tr = TSNE(\n n_components=2,\n method=method,\n perplexity=30.0,\n angle=0,\n n_jobs=2,\n random_state=0,\n init=\"random\",\n learning_rate=\"auto\",\n ).fit_transform(X)\n\n assert_allclose(X_tr_ref, X_tr)\n\n\[email protected](\"ignore:The PCA initialization in TSNE will change\")\n# FIXME: remove in 1.3 after deprecation of `square_distances`\ndef test_tsne_deprecation_square_distances():\n \"\"\"Check that we raise a warning regarding the removal of\n `square_distances`.\n\n Also check the parameters do not have any effect.\n \"\"\"\n random_state = check_random_state(0)\n X = random_state.randn(30, 10)\n tsne = TSNE(\n n_components=2,\n init=\"pca\",\n learning_rate=\"auto\",\n perplexity=30.0,\n angle=0,\n n_jobs=1,\n random_state=0,\n square_distances=True,\n )\n warn_msg = (\n \"The parameter `square_distances` has not effect and will be removed in\"\n \" version 1.3\"\n )\n with pytest.warns(FutureWarning, match=warn_msg):\n X_trans_1 = tsne.fit_transform(X)\n\n tsne = TSNE(\n n_components=2,\n init=\"pca\",\n learning_rate=\"auto\",\n perplexity=30.0,\n angle=0,\n n_jobs=1,\n random_state=0,\n )\n X_trans_2 = tsne.fit_transform(X)\n assert_allclose(X_trans_1, X_trans_2)\n"
] | [
[
"numpy.ones",
"scipy.spatial.distance.pdist",
"sklearn.metrics.pairwise.pairwise_distances",
"sklearn.manifold._t_sne._kl_divergence_bh",
"numpy.log",
"numpy.fill_diagonal",
"sklearn.manifold._barnes_hut_tsne.gradient",
"numpy.meshgrid",
"numpy.isfinite",
"sklearn.manifold._t_sne._joint_probabilities_nn",
"numpy.allclose",
"sklearn.neighbors.NearestNeighbors",
"sklearn.utils._testing.assert_array_equal",
"sklearn.manifold.TSNE",
"sklearn.manifold._t_sne.trustworthiness",
"numpy.isnan",
"numpy.linspace",
"numpy.mean",
"sklearn.utils._testing.assert_array_almost_equal",
"sklearn.utils.check_random_state",
"numpy.zeros",
"numpy.arange",
"sklearn.manifold._utils._binary_search_perplexity",
"sklearn.utils._testing.ignore_warnings",
"numpy.finfo",
"sklearn.datasets.make_blobs",
"scipy.spatial.distance.squareform",
"numpy.log2",
"sklearn.manifold._t_sne._joint_probabilities",
"scipy.sparse.issparse",
"sklearn.utils._testing.assert_almost_equal",
"scipy.sparse.csr_matrix",
"sklearn.neighbors.kneighbors_graph",
"sklearn.manifold._t_sne._kl_divergence",
"numpy.testing.assert_allclose",
"numpy.array"
]
] |
jacob-parnell-rozetta/pegasus | [
"ae08e41b32b1429e9f24b8a3b97dbb4d17bd2546"
] | [
"pegasus/data/datasets.py"
] | [
"# Copyright 2020 The PEGASUS Authors..\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Basic Dataset Class.\"\"\"\n# pylint: disable=g-long-lambda\n\nimport logging\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n_DATASETS = {}\n\n\ndef get_dataset(dataset_name):\n if dataset_name not in _DATASETS:\n raise ValueError(\"Dataset name %s is not found in registered datasets.\" %\n dataset_name)\n return _DATASETS[dataset_name]()\n\n\ndef register(dataset_name):\n \"\"\"Decorator for registering a dataset.\"\"\"\n\n def decorator(decorator_dataset_class, decorator_dataset_name):\n _DATASETS[decorator_dataset_name] = decorator_dataset_class\n return decorator_dataset_class\n\n return lambda dataset_class: decorator(dataset_class, dataset_name)\n\n\nclass BaseDataset(object):\n \"\"\"Dataset Class.\"\"\"\n\n @property\n def is_supervised(self):\n # set to false for pretraining corpus dataset.\n return True\n\n @property\n def num_examples(self):\n return\n\n def build(self, input_pattern, shuffle_files):\n \"\"\"Build dataset.\n\n Args:\n input_pattern: input format.\n shuffle_files: whether to shuffle files list.\n\n Returns:\n Tuple of (tf.data.Dataset, number_of_examples)\n \"\"\"\n raise NotImplementedError()\n\n\nclass FilesDataset(BaseDataset):\n \"\"\"Files Dataset.\n\n Load data from files directly.\n reader_fn create serialized examples tf.data.Dataset from filenames.\n parser_fn parse serialzied examples into dictionary of tensors.\n \"\"\"\n\n @property\n def reader_fn(self):\n raise NotImplementedError()\n\n def parser_fn(self, serialized_example):\n \"\"\"Parse serialized examples.\"\"\"\n if self.is_supervised:\n features = tf.io.parse_single_example(\n serialized_example,\n features={\n \"inputs\": tf.io.FixedLenFeature([], tf.string),\n \"targets\": tf.io.FixedLenFeature([], tf.string),\n })\n return {\n \"inputs\": features[\"inputs\"],\n \"targets\": features[\"targets\"],\n \"supervised\": tf.constant(True)\n }\n else:\n features = tf.io.parse_single_example(\n serialized_example,\n features={\n \"text\": tf.io.FixedLenFeature([], tf.string),\n })\n return {\n \"inputs\": features[\"text\"],\n \"targets\": tf.constant(\"\"),\n \"supervised\": tf.constant(False)\n }\n\n def build(self, input_pattern, shuffle_files):\n \"\"\"Build dataset.\n\n Args:\n input_pattern: input file pattern.\n shuffle_files: whether to shuffle files list.\n\n Returns:\n Tuple of (tf.data.Dataset, number_of_examples)\n \"\"\"\n filenames = sorted(tf.gfile.Glob(input_pattern))\n if not filenames:\n raise ValueError(\"Can't not find files with pattern: %s.\" % input_pattern)\n dataset = tf.data.Dataset.from_tensor_slices(filenames)\n if shuffle_files:\n dataset = dataset.shuffle(len(filenames))\n options = tf.data.Options()\n options.experimental_deterministic = not shuffle_files\n dataset = dataset.with_options(options)\n dataset = dataset.interleave(\n self.reader_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.map(\n self.parser_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset, self.num_examples\n\n\nclass TFDSDataset(BaseDataset):\n \"\"\"TFDS Dataset Class.\"\"\"\n\n @property\n def is_supervised(self):\n return True\n\n @property\n def data_dir(self):\n return\n\n @property\n def s3_enabled(self):\n return True\n\n def override_build(self, build):\n return build\n\n def load(self, build, split, shuffle_files):\n dataset, info = tfds.load(\n self.override_build(build),\n as_supervised=self.is_supervised,\n split=split,\n with_info=True,\n shuffle_files=False, # shuffle_files, # False for 2xfine-tune\n data_dir=\"/data/jsparnel/tensorflow_datasets/\")\n num_examples = self.num_examples or info.splits[split].num_examples\n return dataset, num_examples\n\n def transform(self, dataset):\n if self.is_supervised:\n return dataset.map(lambda x, y: {\n \"inputs\": x,\n \"targets\": y,\n \"supervised\": tf.constant(True),\n })\n else:\n return dataset.map(\n lambda d: {\n \"inputs\": d[\"text\"],\n \"targets\": tf.constant(\"\"),\n \"supervised\": tf.constant(False),\n })\n\n def build(self, input_pattern, shuffle_files):\n \"\"\"Build dataset.\n\n Args:\n input_pattern: input patterns have more than two parts separated by\n hyphens. The first part is the name of tfds, could be xxx/yyy. The\n second part is split type among train, validation, or test. Rest are the\n key arguments.\n For example a valid dataset would be:\n big_patent/all-train-shard_100-take_200\n shuffle_files: whether to shuffle files list.\n\n Returns:\n Tuple of (tf.data.Dataset, number_of_examples)\n \"\"\"\n args = input_pattern.split(\"-\")\n build_name, split = args[0:2]\n kwargs = [seg.split(\"_\") for seg in args[2:]]\n kwargs = {k: v for k, v in kwargs}\n\n if split not in [\"train\", \"validation\", \"test\"]:\n raise ValueError(\"Split type %s is not supported. Supported types are: \"\n \"train, validation, test.\" % split)\n dataset, num_examples = self.load(build_name, split, shuffle_files)\n dataset = self.transform(dataset)\n\n if \"shard\" in kwargs:\n dataset = dataset.shard(int(kwargs.pop(\"shard\")), 0)\n if \"take\" in kwargs:\n num_examples = int(kwargs.pop(\"take\"))\n dataset = dataset.take(num_examples)\n if num_examples <= 10000:\n dataset = dataset.cache()\n if kwargs:\n raise ValueError(\"Unused keys: %s\" % \",\".join(kwargs.keys()))\n\n num_examples = int(num_examples)\n logging.info(\"Number of examples for config %s %s is %d\", build_name, split,\n num_examples)\n return dataset, num_examples\n\n def _split_train_80_10_10(self, build, split, shuffle_files):\n \"\"\"One of the default setting to build dataset.\"\"\"\n # Those supervised datasets have a single dataset and do not provide\n # train/validation/test splits. We split the dataset 80/10/10.\n split_patterns = {\n \"train\": \"train[:80%]\",\n \"validation\": \"train[80%:90%]\",\n \"test\": \"train[90%:]\"\n }\n dataset, info = tfds.load(\n self.override_build(build),\n as_supervised=self.is_supervised,\n split=split_patterns[split],\n shuffle_files=shuffle_files,\n with_info=True,\n data_dir=self.data_dir)\n if split == \"train\":\n num_examples = info.splits[\"train\"].num_examples * 0.8\n elif split == \"validation\":\n num_examples = info.splits[\"train\"].num_examples * 0.1\n else:\n num_examples = info.splits[\"train\"].num_examples * 0.1\n return dataset, num_examples\n\n def _split_train_98_1_1(self, build, split, shuffle_files):\n \"\"\"One of the default setting to build dataset.\"\"\"\n # Those large pretraining datasets have a single dataset and do not provide\n # train/validation/test splits. We split the dataset 98/01/01.\n if self.s3_enabled:\n split_patterns = {\n \"train\": \"train[:98%]\",\n \"validation\": \"train[98%:99%]\",\n \"test\": \"train[99%:]\"\n }\n else:\n split_patterns = {\n \"train\": tfds.Split.TRAIN.subsplit(tfds.percent[:98]),\n \"validation\": tfds.Split.TRAIN.subsplit(tfds.percent[98:99]),\n \"test\": tfds.Split.TRAIN.subsplit(tfds.percent[99:]),\n }\n dataset = tfds.load(\n self.override_build(build),\n as_supervised=self.is_supervised,\n split=split_patterns[split],\n shuffle_files=shuffle_files,\n data_dir=self.data_dir)\n if self.num_examples is None:\n raise ValueError(\"Must set valid num examples.\")\n num_examples = int(self.num_examples * (0.98 if split == \"train\" else 0.01))\n return dataset, num_examples\n\n def _split_validation_50_50(self, build, split, shuffle_files):\n \"\"\"One of the default setting to build dataset.\"\"\"\n # Those large pretraining datasets have not have test set.\n # We split the validation dataset 50/50 as validation/test.\n split_patterns = {\n \"train\": \"train\",\n \"validation\": \"validation[50%:]\",\n \"test\": \"validation[50%:]\"\n }\n dataset, info = tfds.load(\n self.override_build(build),\n as_supervised=self.is_supervised,\n split=split_patterns[split],\n shuffle_files=shuffle_files,\n with_info=True,\n data_dir=self.data_dir)\n if split == \"train\":\n num_examples = info.splits[\"train\"].num_examples\n elif split == \"validation\":\n num_examples = info.splits[\"validation\"].num_examples * 0.5\n else:\n num_examples = info.splits[\"validation\"].num_examples * 0.5\n return dataset, num_examples\n"
] | [
[
"tensorflow.data.Options",
"tensorflow.gfile.Glob",
"tensorflow.io.FixedLenFeature",
"tensorflow.constant",
"tensorflow.data.Dataset.from_tensor_slices"
]
] |
ksteimel/allennlp | [
"dcd8d9e9671da5a87de51f2bb42ceb3abdce8b3b"
] | [
"allennlp/models/multitask.py"
] | [
"from collections import defaultdict\nimport inspect\nfrom typing import Any, Dict, List, Set, Union, Mapping\n\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.data import Vocabulary, TextFieldTensors\nfrom allennlp.modules import Backbone\nfrom allennlp.models.model import Model\nfrom allennlp.models.heads import Head\nfrom allennlp.nn import InitializerApplicator\n\n\ndef get_forward_arguments(module: torch.nn.Module) -> Set[str]:\n signature = inspect.signature(module.forward)\n return set([arg for arg in signature.parameters if arg != \"self\"])\n\n\[email protected](\"multitask\")\nclass MultiTaskModel(Model):\n \"\"\"\n A `MultiTaskModel` consists of a `Backbone` that encodes its inputs in some way, then a\n collection of `Heads` that make predictions from the backbone-encoded inputs. The predictions\n of each `Head` are combined to compute a joint loss, which is then used for training.\n\n This model works by taking `**kwargs` in `forward`, and passing the right arguments from that to\n the backbone and to each head. By default, we use `inspect` to try to figure out getting the\n right arguments to the right modules, but we allow you to specify these arguments yourself in\n case our inference code gets it wrong.\n\n It is the caller's responsibility to make sure that the backbone and all heads are compatible with\n each other, and with the input data that comes from a `MultiTaskDatasetReader`. We give some\n arguments in this class and in `MultiTaskDatasetReader` to help with plumbing the arguments in\n complex cases (e.g., you can change argument names so that they match what the backbone and\n heads expect).\n\n # Parameters\n\n vocab: `Vocab`\n backbone: `Backbone`\n heads: `Dict[str, Head]`\n loss_weights: `Dict[str, float]`, optional (default = `equal weighting`)\n If you want, you can specify a weight for each head, which we will multiply the loss by when\n aggregating across heads. This is equivalent in many cases to specifying a separate\n learning rate per head, and just putting a weighting on the loss is much easier than\n figuring out the right way to specify that in the optimizer.\n arg_name_mapping: `Dict[str, Dict[str, str]]`, optional (default = `identity mapping`)\n The mapping changes the names in the `**kwargs` dictionary passed to `forward` before\n passing on the arguments to the backbone and heads. This is keyed by component, and the\n top-level keys must match the keys passed in the `heads` parameter, plus a \"backbone\" key\n for the backbone. If you are using dataset readers that use dataset-specific names for\n their keys, this lets you change them to be consistent. For example, this dictionary might\n end up looking like this: `{\"backbone\": {\"question\": \"text\", \"review\": \"text\"},\n \"classifier1\": {\"sentiment\": \"label\"}, \"classifier2\": {\"topic\": \"label\"}}`.\n Though in this particular example, we have two different inputs mapping to the same key in\n the backbone; this will work, as long are you are careful that you don't give both of those\n inputs in the same batch. If we see overlapping keys, we will crash. If you want to be able\n to do this kind of mixed training in the same batch, you need to handle that in your data\n code, not here; we won't handle complex batching inside this model.\n allowed_arguments: `Dict[str, Set[str]]`, optional (default = `inferred`)\n The list of arguments that should be passed from `**kwargs` to the `forward` method for the\n backbone and each head. If you provide this, the keys in here should match the keys given\n in the `heads` parameter, plus a \"backbone\" key for the backbone arguments. If not given,\n we will use the `inspect` module to figure this out. The only time that this inference\n might fail is if you have optional arguments that you want to be ignored, or\n something. You very likely don't need to worry about this argument.\n initializer: `InitializerApplicator`, optional (default=`InitializerApplicator()`)\n If provided, will be used to initialize the model parameters.\n \"\"\"\n\n default_predictor = \"multitask\"\n\n def __init__(\n self,\n vocab: Vocabulary,\n backbone: Backbone,\n heads: Dict[str, Head],\n *,\n loss_weights: Dict[str, float] = None,\n arg_name_mapping: Dict[str, Dict[str, str]] = None,\n allowed_arguments: Dict[str, Set[str]] = None,\n initializer: InitializerApplicator = InitializerApplicator(),\n **kwargs,\n ):\n super().__init__(vocab, **kwargs)\n self._backbone = backbone\n self._heads = torch.nn.ModuleDict(heads)\n self._heads_called: Set[str] = set()\n self._arg_name_mapping = arg_name_mapping or defaultdict(dict)\n\n self._allowed_arguments = allowed_arguments or {\n \"backbone\": get_forward_arguments(backbone),\n **{key: get_forward_arguments(heads[key]) for key in heads},\n }\n self._loss_weights = loss_weights or defaultdict(lambda: 1.0)\n initializer(self)\n\n def forward(self, **kwargs) -> Dict[str, torch.Tensor]: # type: ignore\n if \"task\" not in kwargs:\n raise ValueError(\n \"Instances for multitask training need to contain a MetadataField with \"\n \"the name 'task' to indicate which task they belong to. Usually the \"\n \"MultitaskDataLoader provides this field and you don't have to do anything.\"\n )\n\n task_indices_just_for_mypy: Mapping[str, List[int]] = defaultdict(lambda: [])\n for i, task in enumerate(kwargs[\"task\"]):\n task_indices_just_for_mypy[task].append(i)\n task_indices: Dict[str, torch.LongTensor] = {\n task: torch.LongTensor(indices) for task, indices in task_indices_just_for_mypy.items()\n }\n\n def make_inputs_for_task(\n task: str, whole_batch_input: Union[torch.Tensor, TextFieldTensors, List]\n ):\n if isinstance(whole_batch_input, dict):\n for k1, v1 in whole_batch_input.items():\n for k2, v2 in v1.items():\n whole_batch_input[k1][k2] = make_inputs_for_task(task, v2)\n\n return whole_batch_input\n\n if isinstance(whole_batch_input, torch.Tensor):\n task_indices[task] = task_indices[task].to(whole_batch_input.device)\n return torch.index_select(whole_batch_input, 0, task_indices[task])\n else:\n return [whole_batch_input[i] for i in task_indices[task]]\n\n backbone_arguments = self._get_arguments(kwargs, \"backbone\")\n backbone_outputs = self._backbone(**backbone_arguments)\n combined_arguments = {**backbone_outputs, **kwargs}\n\n outputs = {**backbone_outputs}\n loss = None\n for head_name in self._heads:\n if head_name not in task_indices:\n continue\n\n head_arguments = self._get_arguments(combined_arguments, head_name)\n head_arguments = {\n key: make_inputs_for_task(head_name, value) for key, value in head_arguments.items()\n }\n\n head_outputs = self._heads[head_name](**head_arguments)\n for key in head_outputs:\n outputs[f\"{head_name}_{key}\"] = head_outputs[key]\n\n if \"loss\" in head_outputs:\n self._heads_called.add(head_name)\n head_loss = self._loss_weights[head_name] * head_outputs[\"loss\"]\n if loss is None:\n loss = head_loss\n else:\n loss += head_loss\n\n if loss is not None:\n outputs[\"loss\"] = loss\n\n return outputs\n\n def _get_arguments(self, available_args: Dict[str, Any], component: str) -> Dict[str, Any]:\n \"\"\"\n Given a list of things we might want to pass to a component (where \"component\" is either the\n backbone or a head), this method figures out which things we should actually pass, by\n mapping names and looking at allowed arguments.\n \"\"\"\n allowed_args = self._allowed_arguments[component]\n name_mapping = self._arg_name_mapping.get(component, {})\n kept_arguments = {}\n for key, value in available_args.items():\n new_key = name_mapping.get(key, key)\n if new_key in allowed_args:\n if new_key in kept_arguments:\n raise ValueError(\n f\"Got duplicate argument {new_key} for {component}. This likely means that\"\n \" you mapped multiple inputs to the same name. This is generally ok for\"\n \" the backbone, but you have to be sure each batch only gets one of those\"\n \" inputs. This is typically not ok for heads, and means something is not\"\n \" set up right.\"\n )\n kept_arguments[new_key] = value\n return kept_arguments\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n metrics = {}\n for head_name in self._heads_called:\n for key, value in self._heads[head_name].get_metrics(reset).items():\n metrics[f\"{head_name}_{key}\"] = value\n if reset:\n self._heads_called.clear()\n return metrics\n\n @overrides\n def make_output_human_readable(\n self, output_dict: Dict[str, torch.Tensor]\n ) -> Dict[str, torch.Tensor]:\n output_dict = self._backbone.make_output_human_readable(output_dict)\n for head_name, head in self._heads.items():\n head_outputs = {}\n for key, value in output_dict.items():\n if key.startswith(head_name):\n head_outputs[key.replace(f\"{head_name}_\", \"\")] = value\n readable_head_outputs = head.make_output_human_readable(head_outputs)\n for key, value in readable_head_outputs.items():\n output_dict[f\"{head_name}_{key}\"] = value\n return output_dict\n"
] | [
[
"torch.nn.ModuleDict",
"torch.index_select",
"torch.LongTensor"
]
] |
dpiponi/jax | [
"c40f5a991b50edee7820830f783218f7128feccc"
] | [
"jax/lax/lax_control_flow.py"
] | [
"# coding=utf-8\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nControl flow primitives.\n\"\"\"\n\n\nimport collections\nimport functools\nimport inspect\nimport itertools\nimport operator\nimport threading\nfrom typing import Callable, Sequence\n\nimport numpy as onp\n\nimport jax\nfrom jax import core\nfrom jax import dtypes\nfrom jax import util\nfrom jax.lax import lax\nfrom jax import linear_util as lu\nfrom jax.abstract_arrays import ConcreteArray, ShapedArray, raise_to_shaped\nfrom jax.api_util import flatten_fun_nokwargs, apply_flat_fun_nokwargs\nfrom jax.core import get_aval\nfrom jax.interpreters import ad\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import xla\nfrom jax.interpreters import batching\nfrom jax.interpreters import masking\nfrom jax.lib import xla_bridge as xb\nfrom jax.lib import xla_client\nfrom jax.util import (partial, unzip2, unzip4, safe_map, safe_zip, split_list,\n split_dict, cache, extend_name_stack)\nfrom jax.tree_util import (tree_flatten, tree_unflatten, treedef_is_leaf,\n treedef_children, treedef_tuple, tree_leaves,\n tree_map, tree_multimap)\nfrom jax import ad_util\n\nxops = xla_client.ops\n\n_map = safe_map\nzip = safe_zip\n_reduce = functools.reduce\n\n@cache()\ndef _initial_style_untyped_jaxpr(fun: Callable, in_tree, in_avals):\n in_pvals = [pe.PartialVal.unknown(aval) for aval in in_avals]\n wrapped_fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)\n with core.initial_style_staging():\n jaxpr, out_pvals, consts = pe.trace_to_jaxpr(\n wrapped_fun, in_pvals, instantiate=True, stage_out=False)\n return jaxpr, out_pvals, consts, out_tree\n\n@cache()\ndef _initial_style_jaxpr(fun: Callable, in_tree, in_avals):\n jaxpr, out_pvals, consts, out_tree = _initial_style_untyped_jaxpr(\n fun, in_tree, in_avals)\n out_avals = _map(raise_to_shaped, unzip2(out_pvals)[0])\n const_avals = tuple(raise_to_shaped(core.get_aval(c)) for c in consts)\n typed_jaxpr = core.TypedJaxpr(pe.convert_constvars_jaxpr(jaxpr),\n (), const_avals + in_avals, out_avals)\n return typed_jaxpr, consts, out_tree()\n\ndef _initial_style_jaxprs_with_common_consts(funs: Sequence[Callable],\n in_tree, in_avals):\n # When staging the branches of a conditional into jaxprs, constants are\n # extracted from each branch and converted to jaxpr arguments. To use the\n # staged jaxprs as the branches to a conditional *primitive*, we need for\n # their (input) signatures to match. This function \"joins\" the staged jaxprs:\n # for each one, it makes another that accepts *all* constants, but only uses\n # those that it needs (dropping the rest).\n\n jaxprs, all_out_pvals, all_consts, all_out_trees = unzip4([\n _initial_style_untyped_jaxpr(fun, in_tree, in_avals) for fun in funs])\n\n newvar = core.gensym('_') # TODO(frostig): safer gensym\n all_const_avals = tuple(\n tuple(raise_to_shaped(core.get_aval(c)) for c in consts)\n for consts in all_consts)\n unused_const_vars = tuple(\n tuple(newvar(aval) for aval in const_avals)\n for const_avals in all_const_avals)\n\n def pad_jaxpr_constvars(i, jaxpr):\n prefix = util.concatenate(unused_const_vars[:i])\n suffix = util.concatenate(unused_const_vars[i+1:])\n constvars = prefix + jaxpr.constvars + suffix\n return core.Jaxpr(constvars=constvars, invars=jaxpr.invars,\n outvars=jaxpr.outvars, eqns=jaxpr.eqns)\n\n const_avals = tuple(util.concatenate(all_const_avals))\n\n def type_and_const_convert_jaxpr(jaxpr, out_pvals):\n out_avals = _map(raise_to_shaped, unzip2(out_pvals)[0])\n return core.TypedJaxpr(pe.convert_constvars_jaxpr(jaxpr),\n (), const_avals + in_avals, out_avals)\n\n jaxprs = [pad_jaxpr_constvars(i, jaxpr) for i, jaxpr in enumerate(jaxprs)]\n typed_jaxprs = _map(type_and_const_convert_jaxpr, jaxprs, all_out_pvals)\n\n return (tuple(typed_jaxprs),\n tuple(util.concatenate(all_consts)),\n tuple(out_tree() for out_tree in all_out_trees))\n\ndef _abstractify(x):\n return raise_to_shaped(core.get_aval(x))\n\ndef typecheck(aval, x):\n aval = raise_to_shaped(aval).strip_weak_type()\n try:\n return aval == core.lattice_join(aval, core.get_aval(x)).strip_weak_type()\n except TypeError:\n return False\n\ndef typematch(aval1, aval2):\n return (raise_to_shaped(aval1).strip_weak_type() ==\n raise_to_shaped(aval2).strip_weak_type())\n\ndef _disable_jit_impl(prim, interp, *args, **kwargs):\n if jax.api._jit_is_disabled():\n return interp(*args, **kwargs)\n else:\n return xla.apply_primitive(prim, *args, **kwargs)\n\n\n### fori_loop and while_loop\n\ndef _fori_cond_fun(loop_carry):\n i, upper, _ = loop_carry\n return lax.lt(i, upper)\n\n@cache()\ndef _fori_body_fun(body_fun):\n def while_body_fun(loop_carry):\n i, upper, x = loop_carry\n return lax.add(i, lax._const(i, 1)), upper, body_fun(i, x)\n return while_body_fun\n\n@cache()\ndef _fori_scan_body_fun(body_fun):\n def scanned_fun(loop_carry, _):\n i, upper, x = loop_carry\n return (lax.add(i, lax._const(i, 1)), upper, body_fun(i, x)), None\n return scanned_fun\n\ndef fori_loop(lower, upper, body_fun, init_val):\n \"\"\"Loop from ``lower`` to ``upper`` by reduction to ``while_loop``.\n\n The type signature in brief is\n\n .. code-block:: haskell\n\n fori_loop :: Int -> Int -> ((int, a) -> a) -> a -> a\n\n The semantics of ``fori_loop`` are given by this Python implementation::\n\n def fori_loop(lower, upper, body_fun, init_val):\n val = init_val\n for i in range(lower, upper):\n val = body_fun(i, val)\n return val\n\n Unlike that Python version, ``fori_loop`` is implemented in terms of a call to\n ``while_loop``. See the docstring for ``while_loop`` for more information.\n\n Also unlike the Python analogue, the loop-carried value ``val`` must hold a\n fixed shape and dtype across all iterations (and not just be consistent up to\n NumPy rank/shape broadcasting and dtype promotion rules, for example). In\n other words, the type ``a`` in the type signature above represents an array\n with a fixed shape and dtype (or a nested tuple/list/dict container data\n structure with a fixed structure and arrays with fixed shape and dtype at the\n leaves).\n\n Args:\n lower: an integer representing the loop index lower bound (inclusive)\n upper: an integer representing the loop index upper bound (exclusive)\n body_fun: function of type ``(int, a) -> a``.\n init_val: initial loop carry value of type ``a``.\n\n Returns:\n Loop value from the final iteration, of type ``a``.\n \"\"\"\n # TODO(phawkins): perhaps do more type checking here, better error messages.\n lower_dtype = dtypes.canonicalize_dtype(lax.dtype(lower))\n upper_dtype = dtypes.canonicalize_dtype(lax.dtype(upper))\n if lower_dtype != upper_dtype:\n msg = (\"lower and upper arguments to fori_loop must have equal types, \"\n \"got {} and {}\")\n raise TypeError(msg.format(lower_dtype.name, upper_dtype.name))\n\n # If we can specialize on the trip count, call scan instead of a while_loop\n # to enable efficient reverse-mode differentiation.\n try:\n lower_ = int(lower)\n upper_ = int(upper)\n except TypeError:\n use_scan = False\n else:\n use_scan = False # TODO(mattjj): re-enable this\n\n if use_scan:\n (_, _, result), _ = scan(_fori_scan_body_fun(body_fun),\n (lower, upper, init_val), None,\n length=upper_ - lower_)\n else:\n _, _, result = while_loop(_fori_cond_fun, _fori_body_fun(body_fun),\n (lower, upper, init_val))\n return result\n\n\ndef while_loop(cond_fun, body_fun, init_val):\n \"\"\"Call ``body_fun`` repeatedly in a loop while ``cond_fun`` is True.\n\n The type signature in brief is\n\n .. code-block:: haskell\n\n while_loop :: (a -> Bool) -> (a -> a) -> a -> a\n\n The semantics of ``while_loop`` are given by this Python implementation::\n\n def while_loop(cond_fun, body_fun, init_val):\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n\n Unlike that Python version, ``while_loop`` is a JAX primitive and is lowered\n to a single XLA While HLO. That makes it useful for reducing compilation times\n for jit-compiled functions, since native Python loop constructs in an ``@jit``\n function are unrolled, leading to large XLA computations.\n\n Also unlike the Python analogue, the loop-carried value ``val`` must hold a\n fixed shape and dtype across all iterations (and not just be consistent up to\n NumPy rank/shape broadcasting and dtype promotion rules, for example). In\n other words, the type ``a`` in the type signature above represents an array\n with a fixed shape and dtype (or a nested tuple/list/dict container data\n structure with a fixed structure and arrays with fixed shape and dtype at the\n leaves).\n\n Another difference from using Python-native loop constructs is that\n ``while_loop`` is not reverse-mode differentiable because XLA computations\n require static bounds on memory requirements.\n\n Args:\n cond_fun: function of type ``a -> Bool``.\n body_fun: function of type ``a -> a``.\n init_val: value of type ``a``, a type that can be a scalar, array, or any\n pytree (nested Python tuple/list/dict) thereof, representing the initial\n loop carry value.\n\n Returns:\n The output from the final iteration of body_fun, of type ``a``.\n \"\"\"\n if jax.api._jit_is_disabled():\n try:\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n except core.ConcretizationTypeError:\n # Can't run this while_loop in Python (e.g. because there's a vmap\n # transformation on it), so we fall back to the primitive version.\n pass\n\n init_vals, in_tree = tree_flatten((init_val,))\n init_avals = tuple(_map(_abstractify, init_vals))\n cond_jaxpr, cond_consts, cond_tree = _initial_style_jaxpr(cond_fun, in_tree, init_avals)\n body_jaxpr, body_consts, body_tree = _initial_style_jaxpr(body_fun, in_tree, init_avals)\n if not treedef_is_leaf(cond_tree) or len(cond_jaxpr.out_avals) != 1:\n msg = \"cond_fun must return a boolean scalar, but got pytree {}.\"\n raise TypeError(msg.format(cond_tree))\n if cond_jaxpr.out_avals[0].strip_weak_type() != ShapedArray((), onp.bool_):\n msg = \"cond_fun must return a boolean scalar, but got output type(s) {}.\"\n raise TypeError(msg.format(cond_jaxpr.out_avals))\n\n in_tree_children = in_tree.children()\n assert len(in_tree_children) == 1\n _check_tree_and_avals(\"body_fun output and input\",\n # Extract the subtree and avals for the first element of the return tuple\n body_tree, body_jaxpr.out_avals,\n in_tree_children[0], init_avals)\n outs = while_p.bind(*itertools.chain(cond_consts, body_consts, init_vals),\n cond_nconsts=len(cond_consts), cond_jaxpr=cond_jaxpr,\n body_nconsts=len(body_consts), body_jaxpr=body_jaxpr)\n return tree_unflatten(body_tree, outs)\n\ndef _while_loop_abstract_eval(*args, **kwargs):\n return _map(raise_to_shaped, kwargs[\"body_jaxpr\"].out_avals)\n\ndef _while_loop_translation_rule(c, axis_env, name_stack, avals, backend, *args,\n cond_jaxpr, body_jaxpr, cond_nconsts, body_nconsts):\n cond_consts, body_consts, init_vals = split_list(args, [cond_nconsts, body_nconsts])\n batched = bool(cond_jaxpr.out_avals[0].shape)\n\n # Since jaxprs don't have tuples and have multiple return values, but we need\n # the HLO While loop to take a single tuple input and output a single boolean\n # (for the cond computation) or a single tuple output (for the body\n # computation), we build XLA computations that handle the tuple munging before\n # generating a Call into the computations formed from the jaxprs.\n\n init_carry = xops.Tuple(c, cond_consts + body_consts + init_vals)\n\n cond_c = xb.make_computation_builder(\"cond_computation\")\n cond_carry = xb.parameter(cond_c, 0, c.get_shape(init_carry))\n cond_carry_elts = [xops.GetTupleElement(cond_carry, i) for i in range(len(args))]\n x, _, z = split_list(cond_carry_elts, [cond_nconsts, body_nconsts])\n pred, = xla.jaxpr_subcomp(cond_c, cond_jaxpr.jaxpr, backend, axis_env,\n _map(partial(xb.constant, cond_c),\n cond_jaxpr.literals),\n extend_name_stack(name_stack, 'cond'), *(x + z))\n if batched:\n scalar = ShapedArray((), onp.bool_)\n or_ = xla.primitive_subcomputation(lax.or_p, scalar, scalar)\n pred = xops.Reduce(cond_c, [pred], [xb.constant(cond_c, onp.array(False))], or_,\n list(range(cond_jaxpr.out_avals[0].ndim)))\n\n body_c = xb.make_computation_builder(\"body_computation\")\n body_carry = xb.parameter(body_c, 0, c.get_shape(init_carry))\n body_carry_elts = [xops.GetTupleElement(body_carry, i) for i in range(len(args))]\n x, y, z = split_list(body_carry_elts, [cond_nconsts, body_nconsts])\n new_z = xla.jaxpr_subcomp(body_c, body_jaxpr.jaxpr, backend, axis_env,\n _map(partial(xb.constant, body_c), body_jaxpr.literals),\n extend_name_stack(name_stack, 'body'), *(y + z))\n if batched:\n body_pred, = xla.jaxpr_subcomp(body_c, cond_jaxpr.jaxpr, backend, axis_env,\n _map(partial(xb.constant, body_c), cond_jaxpr.literals),\n extend_name_stack(name_stack, 'body_pred'), *(x + z))\n new_z = _map(partial(_pred_bcast_select, body_c, body_pred), new_z, z)\n assert _map(body_c.get_shape, new_z) == _map(body_c.get_shape, z) # no broadcast\n new_carry = xops.Tuple(body_c, list(itertools.chain(x, y, new_z)))\n\n ans = xops.While(cond_c.build(pred), body_c.build(new_carry), init_carry)\n ans_elts = [xops.GetTupleElement(ans, i) for i in range(len(args))]\n _, _, z = split_list(ans_elts, [cond_nconsts, body_nconsts])\n return xops.Tuple(c, z)\n\ndef _pred_bcast_select(c, pred, x, y):\n pred_shape = c.get_shape(pred).dimensions()\n x_shape = c.get_shape(x).dimensions()\n y_shape = c.get_shape(y).dimensions()\n assert x_shape == y_shape\n assert pred_shape == x_shape[:len(pred_shape)] == y_shape[:len(pred_shape)]\n bcast_pred = xops.BroadcastInDim(pred, x_shape, list(range(len(pred_shape))))\n return xops.Select(bcast_pred, x, y)\n\ndef _while_loop_batching_rule(args, dims, cond_nconsts, cond_jaxpr,\n body_nconsts, body_jaxpr):\n size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}\n orig_batched = [d is not batching.not_mapped for d in dims]\n cconst_bat, bconst_bat, init_bat = split_list(orig_batched, [cond_nconsts, body_nconsts])\n\n # Fixpoint computation of which carry are batched: either\n # batched from init, or the carry out is batched. Each iteration promotes\n # at least one carry to batched. We need at most len(carry) iterations,\n # but we need one last iteration to prepare the jaxpr based on the final\n # carry_bat.\n carry_bat = init_bat\n for _ in range(1 + len(carry_bat)):\n batched = bconst_bat + carry_bat\n body_jaxpr_batched, carry_bat_out = batching.batch_jaxpr(\n body_jaxpr, size, batched, instantiate=carry_bat)\n cond_jaxpr_batched, (pred_bat,) = batching.batch_jaxpr(\n cond_jaxpr, size, cconst_bat + carry_bat,\n instantiate=bool(cond_jaxpr.out_avals[0].shape))\n carry_bat_out = _map(partial(operator.or_, pred_bat), carry_bat_out)\n if carry_bat_out == carry_bat:\n break\n else:\n carry_bat = _map(operator.or_, carry_bat, carry_bat_out)\n else:\n assert False, \"Fixpoint not reached\"\n\n consts, init = split_list(args, [cond_nconsts + body_nconsts])\n const_dims, init_dims = split_list(dims, [cond_nconsts + body_nconsts])\n new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0\n else x for x, d in zip(consts, const_dims)]\n new_init = [batching.broadcast(x, size, 0) if now_bat and not was_bat\n else batching.moveaxis(x, d, 0) if now_bat and d != 0 else x\n for x, d, was_bat, now_bat in zip(init, init_dims, init_bat, carry_bat)]\n\n outs = while_p.bind(*(new_consts + new_init),\n cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr_batched,\n body_nconsts=body_nconsts, body_jaxpr=body_jaxpr_batched)\n out_bdims = [0 if b else batching.not_mapped for b in carry_bat]\n return outs, out_bdims\n\ndef _while_loop_jvp(primals, tangents, cond_nconsts, cond_jaxpr, body_nconsts,\n body_jaxpr):\n nonzeros = [t is not ad_util.zero for t in tangents]\n cconst_nz, bconst_nz, init_nz = split_list(nonzeros, [cond_nconsts, body_nconsts])\n\n carry_nz = init_nz\n for _ in range(1 + len(carry_nz)):\n body_nonzeros = bconst_nz + carry_nz\n body_jvp, nonzeros_out = ad.jvp_jaxpr(\n body_jaxpr, body_nonzeros, instantiate=carry_nz)\n if nonzeros_out == carry_nz:\n break\n carry_nz = _map(operator.or_, carry_nz, nonzeros_out)\n else:\n assert False, \"Fixpoint not reached\"\n\n nonzeros = cconst_nz + body_nonzeros\n tangents = [ad.instantiate_zeros(x, t) if t is ad_util.zero and nz else t\n for x, t, nz in zip(primals, tangents, nonzeros)]\n\n cconst, bconst, init = split_list(primals, [cond_nconsts, body_nconsts])\n _, bconst_dot, init_dot = split_list(tangents, [cond_nconsts, body_nconsts])\n bconst_dot = _prune_zeros(bconst_dot)\n init_dot = _prune_zeros(init_dot)\n\n num_carry = len(primals) - cond_nconsts - body_nconsts\n\n body_jvp_rearranged = ad.rearrange_binders(\n body_jvp,\n [body_nconsts, num_carry], [len(bconst_dot), len(init_dot)],\n [num_carry], [len(init_dot)])\n\n newvar = core.gensym('')\n invars_aug = (\n cond_jaxpr.jaxpr.invars + [newvar(get_aval(x)) for x in init_dot])\n cond_jaxpr_augmented = core.Jaxpr(cond_jaxpr.jaxpr.constvars,\n invars_aug,\n cond_jaxpr.jaxpr.outvars,\n cond_jaxpr.jaxpr.eqns)\n in_avals_aug = (cond_jaxpr.in_avals[:cond_nconsts] +\n body_jvp_rearranged.in_avals[body_nconsts + len(bconst_dot):])\n cond_jaxpr_augmented = core.TypedJaxpr(cond_jaxpr_augmented,\n cond_jaxpr.literals,\n in_avals_aug,\n cond_jaxpr.out_avals)\n\n out = while_p.bind(\n *(cconst + bconst + bconst_dot + init + init_dot),\n cond_nconsts=cond_nconsts,\n cond_jaxpr=cond_jaxpr_augmented,\n body_nconsts=len(bconst) + len(bconst_dot),\n body_jaxpr=body_jvp_rearranged)\n\n out_carry, out_carry_dot = split_list(out, [num_carry])\n out_tangents_iter = iter(out_carry_dot)\n out_tangents = [next(out_tangents_iter) if nz else ad_util.zero\n for nz in nonzeros_out]\n return out_carry, out_tangents\n\ndef _while_partial_eval(trace: pe.JaxprTrace, *tracers: pe.Tracer, cond_nconsts: int,\n cond_jaxpr: pe.TypedJaxpr, body_nconsts: int,\n body_jaxpr: pe.TypedJaxpr) -> Sequence[pe.Tracer]:\n \"\"\"An implementation of partial evaluation for while.\n As long as some carry (and hence output) are known and the output\n of `cond_jaxpr` is known, we use a portion of the loop body to compute the known\n outputs of the `while_loop`. For the unknown outputs we generate Jaxpr to run\n the whole while, including recomputing the known parts.\n\n This means that we don't actually save any computation by partial\n evaluation if there are unknown outputs.\n\n What this achieves is that we can give a proper error for reverse\n differentiation of `while`, because in that use of partial evaluation the\n primal inputs are considered \"known\", and only the tangent computation is\n unknown (see issue #2129).\n \"\"\"\n unknowns = [not t.pval.is_known() for t in tracers]\n params = dict(cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr,\n body_nconsts=body_nconsts, body_jaxpr=body_jaxpr)\n\n cond_consts_uk, body_consts_uk, carry_init_uk = split_list(unknowns, [cond_nconsts, body_nconsts])\n # Fixpoint computation of unknown carry. Each iteration promotes\n # at least one carry to unknown. We need one last iteration to prepare the jaxpr.\n carry_uk = carry_init_uk\n for _ in range(1 + len(carry_uk)):\n body_jaxpr_known, _, carry_out_uk = pe.partial_eval_jaxpr(\n body_jaxpr, body_consts_uk + carry_uk, instantiate=carry_uk,\n trace_type=trace.master.trace_type)\n if carry_out_uk == carry_uk:\n break\n else:\n carry_uk = _map(operator.or_, carry_uk, carry_out_uk)\n else:\n assert False, \"Fixpoint not reached\"\n\n cond_jaxpr_known, _, cond_uk = pe.partial_eval_jaxpr(\n cond_jaxpr, cond_consts_uk + carry_uk, instantiate=False,\n trace_type=trace.master.trace_type)\n\n if cond_uk[0] or all([not uk for uk in unknowns]) or all(unknowns):\n # If conditional is unknown, or all inputs are known, or all are unknown,\n # just do the default processing.\n return trace.default_process_primitive(while_p, tracers, params)\n\n # Run the known part of the while. Prepare the inputs, as constants (if known), or\n # as core.unit.\n in_consts = [ core.unit if uk else t.pval.get_known()\n for uk, t in zip(cond_consts_uk + body_consts_uk + carry_uk,\n tracers)]\n # There should be no residuals for the cond_jaxpr_known\n assert 1 == len(cond_jaxpr_known.out_avals)\n # We ignore the residuals from the body_jaxpr_known, so the type of inputs matches\n # the type of outputs; residuals are at the end\n if len(body_jaxpr_known.out_avals) > len(body_jaxpr.out_avals):\n # TODO(necula): this is not quite enough; we should drop the residual computations also\n body_jaxpr_known.out_avals = body_jaxpr_known.out_avals[:len(body_jaxpr.out_avals)]\n body_jaxpr_known.jaxpr.outvars = body_jaxpr_known.jaxpr.outvars[:len(body_jaxpr.out_avals)]\n out_known = while_p.bind(\n *in_consts,\n cond_nconsts=cond_nconsts,\n cond_jaxpr=cond_jaxpr_known,\n body_nconsts=body_nconsts,\n body_jaxpr=body_jaxpr_known)\n\n # Run the whole while_loop to get all the outputs, then merge with known ones\n out_all: Sequence[pe.Tracer] = trace.default_process_primitive(while_p, tracers, params)\n out_tracers: Sequence[pe.Tracer] = [\n out_unknown if uk\n else pe.JaxprTracer(trace, pe.PartialVal.known(known), out_unknown.recipe)\n for uk, out_unknown, known in zip(carry_uk, out_all, out_known)]\n\n return out_tracers\n\ndef _while_transpose_error(*_, **kwargs):\n raise ValueError(\"Reverse-mode differentiation does not work for \"\n \"lax.while_loop or lax.fori_loop. \"\n \"Try using lax.scan instead.\")\n\nwhile_p = lax.Primitive('while')\nwhile_p.multiple_results = True\nwhile_p.def_impl(partial(xla.apply_primitive, while_p))\nwhile_p.def_abstract_eval(_while_loop_abstract_eval)\nad.primitive_jvps[while_p] = _while_loop_jvp\npe.custom_partial_eval_rules[while_p] = _while_partial_eval\nxla.initial_style_translations[while_p] = _while_loop_translation_rule\nad.primitive_transposes[while_p] = _while_transpose_error\nbatching.primitive_batchers[while_p] = _while_loop_batching_rule\n\n\n### cond\n\ndef cond(*args, **kwargs):\n \"\"\"Conditionally apply ``true_fun`` or ``false_fun``.\n\n Has equivalent semantics to this Python implementation::\n\n def cond(pred, true_fun, false_fun, operand):\n if pred:\n return true_fun(operand)\n else:\n return false_fun(operand)\n\n Pred must be a scalar type.\n\n Arguments:\n pred: Boolean scalar type, indicating which branch function to\n apply. Collections (list, tuple) are not supported.\n true_fun: Function (A -> B), to be applied if `pred` is True.\n false_fun: Function (A -> B), to be applied if `pred` is False.\n operand: Operand (A) input to either branch depending on `pred`.\n \"\"\"\n\n # detect an attempt to call the former, deprecated cond\n try:\n ba = inspect.signature(_cond_with_per_branch_args).bind(*args, **kwargs)\n except TypeError:\n pass\n else:\n return _cond_with_per_branch_args(*ba.args)\n\n return _cond(*args, **kwargs)\n\ndef _cond(pred, true_fun: Callable, false_fun: Callable, operand):\n if len(onp.shape(pred)) != 0:\n raise TypeError(\n f\"Pred must be a scalar, got {pred} of shape {onp.shape(pred)}.\")\n\n try:\n pred_dtype = dtypes.result_type(pred)\n except TypeError as err:\n msg = (\"Pred type must be either boolean or number, got {}.\")\n raise TypeError(msg.format(pred)) from err\n\n if pred_dtype.kind != 'b':\n if pred_dtype.kind in 'iuf':\n pred = pred != 0\n else:\n msg = (\"Pred type must be either boolean or number, got {}.\")\n raise TypeError(msg.format(pred_dtype))\n\n if jax.api._jit_is_disabled() and isinstance(core.get_aval(pred), ConcreteArray):\n if pred:\n return true_fun(operand)\n else:\n return false_fun(operand)\n\n ops, ops_tree = tree_flatten((operand,))\n ops_avals = tuple(_map(_abstractify, ops))\n\n jaxprs, consts, out_trees = _initial_style_jaxprs_with_common_consts(\n (true_fun, false_fun), ops_tree, ops_avals)\n true_jaxpr, false_jaxpr = jaxprs\n out_tree, false_out_tree = out_trees\n\n _check_tree_and_avals(\"true_fun and false_fun output\",\n out_tree, true_jaxpr.out_avals,\n false_out_tree, false_jaxpr.out_avals)\n\n linear = (False,) * (len(consts) + len(ops))\n out = cond_p.bind(\n pred, *consts, *ops,\n true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear)\n return tree_unflatten(out_tree, out)\n\ndef _cond_with_per_branch_args(pred,\n true_operand, true_fun: Callable,\n false_operand, false_fun: Callable):\n \"\"\"Conditionally apply ``true_fun`` or ``false_fun``.\n\n Has equivalent semantics to this Python implementation::\n\n def cond(pred, true_operand, true_fun, false_operand, false_fun):\n if pred:\n return true_fun(true_operand)\n else:\n return false_fun(false_operand)\n\n Pred has to be a scalar type, collection types (list, tuple) are not supported\n \"\"\"\n return _cond(pred,\n lambda op: true_fun(op[0]),\n lambda op: false_fun(op[1]),\n (true_operand, false_operand))\n\ndef _cond_abstract_eval(*args, **kwargs):\n return _map(raise_to_shaped, kwargs[\"true_jaxpr\"].out_avals)\n\ndef _cond_translation_rule(c, axis_env, name_stack, avals, backend,\n pred, *args, true_jaxpr, false_jaxpr, linear):\n del linear # Unused.\n\n def make_computation(name, jaxpr, op_shape):\n c = xb.make_computation_builder(name + '_comp')\n op = xb.parameter(c, 0, op_shape)\n ops = [xops.GetTupleElement(op, i) for i in range(len(jaxpr.in_avals))]\n outs = xla.jaxpr_subcomp(c, jaxpr.jaxpr, backend, axis_env,\n _map(partial(xb.constant, c), jaxpr.literals),\n extend_name_stack(name_stack, name + '_fun'), *ops)\n return c.build(xops.Tuple(c, outs))\n\n op = xops.Tuple(c, args)\n op_shape = c.get_shape(op)\n true_c = make_computation('true', true_jaxpr, op_shape)\n false_c = make_computation('false', false_jaxpr, op_shape)\n return xops.Conditional(pred, op, true_c, op, false_c)\n\ndef _cond_pred_bcast_select(pred, x, y):\n if core.get_aval(x) is core.get_aval(y) is core.abstract_unit:\n return x\n else:\n bcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred))))\n return lax.select(bcast_pred, x, y)\n\ndef _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, linear):\n # TODO: maybe avoid moving arg axes to front if we're promoting to select?\n size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}\n args = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0\n else x for x, d in zip(args, dims)]\n orig_bat = [d is not batching.not_mapped for d in dims]\n del dims\n pred, *ops = args\n pred_bat, *bat = orig_bat\n\n _, true_out_bat = batching.batch_jaxpr(true_jaxpr, size, bat, False)\n _, false_out_bat = batching.batch_jaxpr(false_jaxpr, size, bat, False)\n out_bat = [a or b for a, b in zip(true_out_bat, false_out_bat)]\n\n true_jaxpr_batched, _ = batching.batch_jaxpr(true_jaxpr, size, bat, out_bat)\n false_jaxpr_batched, _ = batching.batch_jaxpr(false_jaxpr, size, bat, out_bat)\n\n if pred_bat:\n true_out = core.jaxpr_as_fun(true_jaxpr_batched)(*ops)\n false_out = core.jaxpr_as_fun(false_jaxpr_batched)(*ops)\n true_out = [batching.broadcast(x, size, 0) if not b else x\n for x, b in zip(true_out, out_bat)]\n false_out = [batching.broadcast(x, size, 0) if not b else x\n for x, b in zip(false_out, out_bat)]\n return [_cond_pred_bcast_select(pred, t, f)\n for t, f in zip(true_out, false_out)], [0] * len(true_out)\n else:\n out_dims = [0 if b else batching.not_mapped for b in out_bat]\n out = cond_p.bind(\n pred, *ops,\n true_jaxpr=true_jaxpr_batched, false_jaxpr=false_jaxpr_batched,\n linear=linear)\n return out, out_dims\n\ndef _cond_jvp(primals, tangents, true_jaxpr, false_jaxpr, linear):\n nonzeros = [t is not ad_util.zero for t in tangents]\n\n pred_nz, *ops_nz = nonzeros\n assert pred_nz is False\n\n _, true_out_nz = ad.jvp_jaxpr(true_jaxpr, ops_nz, instantiate=False)\n _, false_out_nz = ad.jvp_jaxpr(false_jaxpr, ops_nz, instantiate=False)\n out_nz = [a or b for a, b in zip(true_out_nz, false_out_nz)]\n\n true_jvp, _ = ad.jvp_jaxpr(true_jaxpr, ops_nz, instantiate=out_nz)\n false_jvp, _ = ad.jvp_jaxpr(false_jaxpr, ops_nz, instantiate=out_nz)\n\n pred, *ops = primals\n _, *ops_dot = tangents\n ops_dot = _prune_zeros(ops_dot)\n\n ops_lin = tuple(linear)\n linear_jvp = ops_lin + (True,) * len(ops_dot)\n out = cond_p.bind(\n pred, *ops, *ops_dot,\n true_jaxpr=true_jvp, false_jaxpr=false_jvp, linear=linear_jvp)\n out_primals, out_tangents = split_list(out, [len(out_nz)])\n out_tangents_iter = iter(out_tangents)\n out_tangents = [\n next(out_tangents_iter) if nz else ad_util.zero for nz in out_nz]\n return out_primals, out_tangents\n\ndef _cond_partial_eval(trace, *tracers, true_jaxpr, false_jaxpr, linear):\n unknowns = [t.pval[0] is not None for t in tracers]\n\n pred_uk, *ops_uk = unknowns\n\n if pred_uk:\n # When the predicate is unknown, we stage out the whole cond.\n params = dict(true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear)\n return trace.default_process_primitive(cond_p, tracers, params)\n\n _, _, t_out_uks = pe.partial_eval_jaxpr(true_jaxpr, ops_uk, instantiate=False,\n trace_type=trace.master.trace_type)\n _, _, f_out_uks = pe.partial_eval_jaxpr(false_jaxpr, ops_uk, instantiate=False,\n trace_type=trace.master.trace_type)\n out_uks = [a or b for a, b in zip(t_out_uks, f_out_uks)]\n\n true_jaxpr_1, true_jaxpr_2, _ = pe.partial_eval_jaxpr(\n true_jaxpr, ops_uk, instantiate=out_uks,\n trace_type=trace.master.trace_type)\n false_jaxpr_1, false_jaxpr_2, _ = pe.partial_eval_jaxpr(\n false_jaxpr, ops_uk, instantiate=out_uks,\n trace_type=trace.master.trace_type)\n\n num_t_res = len(true_jaxpr_1.out_avals) - len(out_uks)\n num_f_res = len(false_jaxpr_1.out_avals) - len(out_uks)\n\n assert len(true_jaxpr.in_avals) == len(false_jaxpr.in_avals)\n assert len(true_jaxpr.in_avals) == len(tracers) - 1\n assert len(true_jaxpr.in_avals) == len(ops_uk)\n\n # Move the residuals to front\n move = [False] * len(ops_uk) + [True] * num_t_res\n true_jaxpr_2 = pe.move_binders_to_front(true_jaxpr_2, move)\n move = [False] * len(ops_uk) + [True] * num_f_res\n false_jaxpr_2 = pe.move_binders_to_front(false_jaxpr_2, move)\n\n # TODO(frostig,mattjj): pe.partial_eval_jaxpr should raise to shaped avals\n t_res_avals = _map(raise_to_shaped, true_jaxpr_2.in_avals[:num_t_res])\n f_res_avals = _map(raise_to_shaped, false_jaxpr_2.in_avals[:num_f_res])\n\n assert len(true_jaxpr_2.out_avals) == len(false_jaxpr_2.out_avals)\n num_outs = len(true_jaxpr_2.out_avals)\n\n # TODO(frostig): support joining a list of jaxpr/aval pairs rather than only a\n # true/false pair special case, in preparation for switch\n false_jaxpr_1 = _join_cond_outputs(\n false_jaxpr_1, num_outs, t_res_avals, zeros_on_left=False)\n true_jaxpr_1 = _join_cond_outputs(\n true_jaxpr_1, num_outs, f_res_avals, zeros_on_left=True)\n\n false_jaxpr_2, true_jaxpr_2 = _join_cond_pe_staged_jaxpr_inputs(\n [false_jaxpr_2, true_jaxpr_2], [f_res_avals, t_res_avals])\n\n # TODO(frostig,mattjj): reinstate this assertion once pe.partial_eval_jaxpr\n # raises to shaped avals\n # assert true_jaxpr_1.out_avals == false_jaxpr_1.out_avals\n num_res = num_t_res + num_f_res\n\n _, in_consts = unzip2([t.pval for t in tracers])\n out_consts_res = cond_p.bind(\n *in_consts, true_jaxpr=true_jaxpr_1, false_jaxpr=false_jaxpr_1,\n linear=linear)\n out_consts, res = split_list(out_consts_res, [len(out_consts_res) - num_res])\n\n # TODO(frostig,mattjj): remove raised_to_shaped of avals once\n # pe.partial_eval_jaxpr handles it\n out_avals = _map(raise_to_shaped, true_jaxpr_2.out_avals)\n out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uks)]\n\n pred_tracer = trace.instantiate_const(tracers[0])\n\n ops_tracers = [trace.instantiate_const(t) if uk\n else trace.new_instantiated_literal(core.unit)\n for uk, t in zip(unknowns[1:], tracers[1:])]\n\n res_tracers = _map(trace.new_instantiated_const, res)\n\n out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)\n for pv, const in zip(out_pvs, out_consts)]\n\n linear_2 = (False,) * num_res + linear\n params = dict(true_jaxpr=true_jaxpr_2, false_jaxpr=false_jaxpr_2,\n linear=linear_2)\n eqn = pe.new_eqn_recipe(\n [pred_tracer] + res_tracers + ops_tracers, out_tracers, cond_p, params)\n for t in out_tracers: t.recipe = eqn\n return out_tracers\n\ndef _join_cond_outputs(jaxpr, num_prefix, zeros_avals, zeros_on_left):\n @lu.wrap_init\n def f_aug(*args):\n prefix_and_rest = core.jaxpr_as_fun(jaxpr)(*args)\n prefix, rest = split_list(prefix_and_rest, [num_prefix])\n zeros = [ad_util.zeros_like_aval(a) for a in zeros_avals]\n if zeros_on_left:\n return prefix + zeros + rest\n else:\n return prefix + rest + zeros\n\n return _make_typed_jaxpr(f_aug, jaxpr.in_avals)\n\ndef _join_cond_pe_staged_jaxpr_inputs(jaxprs, res_avals_per_jaxpr):\n # When partially evaluating conditionals, each branch produces residuals\n # depending on the computation carried out by the branch, and a corresponding\n # staged jaxpr that accepts those residuals as its first few inputs. To use\n # these staged jaxprs as the branches of another conditional, we need for\n # their (input) signatures to match. This function \"joins\" the staged jaxprs:\n # for each one, it makes another that accepts *all* residuals, but still only\n # uses those that it needs (dropping the rest).\n\n newvar = core.gensym('~') # TODO(frostig): safer gensym\n unused_res_vars = tuple(\n tuple(newvar(aval) for aval in res_avals)\n for res_avals in res_avals_per_jaxpr)\n\n def pad_jaxpr_res_avals(i, jaxpr):\n res_vars_prefix = util.concatenate(unused_res_vars[:i])\n res_vars_suffix = util.concatenate(unused_res_vars[i+1:])\n res_avals_prefix = util.concatenate(res_avals_per_jaxpr[:i])\n res_avals_suffix = util.concatenate(res_avals_per_jaxpr[i+1:])\n\n res_avals = res_avals_per_jaxpr[i]\n num_res = len(res_avals)\n res_vars = jaxpr.jaxpr.invars[:num_res]\n\n non_res_vars = jaxpr.jaxpr.invars[num_res:]\n non_res_avals = jaxpr.in_avals[num_res:]\n\n aug_invars = res_vars_prefix + res_vars + res_vars_suffix + non_res_vars\n aug_avals = res_avals_prefix + res_avals + res_avals_suffix + non_res_avals\n\n jaxpr_aug = core.Jaxpr(jaxpr.jaxpr.constvars, aug_invars,\n jaxpr.jaxpr.outvars, jaxpr.jaxpr.eqns)\n jaxpr_aug = core.TypedJaxpr(jaxpr_aug, jaxpr.literals, aug_avals,\n jaxpr.out_avals)\n return jaxpr_aug\n\n return [pad_jaxpr_res_avals(i, jaxpr) for i, jaxpr in enumerate(jaxprs)]\n\ndef _transpose_cond_jaxpr(jaxpr, num_res):\n num_non_res = len(jaxpr.in_avals) - num_res\n res_avals, primal_avals = split_list(jaxpr.in_avals, [num_res])\n primal_avals = _map(raise_to_shaped, primal_avals)\n\n @lu.wrap_init\n def transposed(*args):\n res, cts_out = split_list(args, [num_res])\n primals = res + [ad.UndefinedPrimal(aval) for aval in primal_avals]\n cts_in = ad.backward_pass(\n jaxpr.jaxpr, jaxpr.literals, primals, cts_out)\n _, cts_in = split_list(cts_in, [num_res])\n return _map(ad.instantiate_zeros_aval, primal_avals, cts_in)\n\n return _make_typed_jaxpr(transposed, res_avals + jaxpr.out_avals)\n\ndef _cond_transpose(cts, *args, true_jaxpr, false_jaxpr, linear):\n pred, *ops = args\n in_avals = _map(raise_to_shaped, true_jaxpr.in_avals)\n num_res = len(ops) - sum(linear)\n\n t_jaxpr_trans = _transpose_cond_jaxpr(true_jaxpr, num_res)\n f_jaxpr_trans = _transpose_cond_jaxpr(false_jaxpr, num_res)\n lin_in_avals = _map(raise_to_shaped, [a for a, l in zip(in_avals, linear) if l])\n assert t_jaxpr_trans.out_avals == f_jaxpr_trans.out_avals == lin_in_avals\n\n res = ops[:num_res]\n cts = _map(ad.instantiate_zeros_aval, true_jaxpr.out_avals, cts)\n linear_trans = (False,) * num_res + (True,) * len(cts)\n\n out = cond_p.bind(\n pred, *res, *cts,\n true_jaxpr=t_jaxpr_trans, false_jaxpr=f_jaxpr_trans,\n linear=linear_trans)\n assert all(_map(typecheck, lin_in_avals, out))\n\n out_iter = iter(out)\n out = [next(out_iter) if l else None for l in linear]\n assert next(out_iter, None) is None\n return [None] + out\n\ndef cond_bind(*args, true_jaxpr, false_jaxpr, linear):\n if not core.skip_checks:\n assert len(linear) + 1 == len(args)\n assert len(args) == 1 + len(true_jaxpr.in_avals)\n assert len(true_jaxpr.in_avals) == len(false_jaxpr.in_avals)\n assert len(true_jaxpr.out_avals) == len(false_jaxpr.out_avals)\n assert all(_map(typematch, true_jaxpr.in_avals, false_jaxpr.in_avals))\n assert all(_map(typematch, true_jaxpr.out_avals, false_jaxpr.out_avals))\n pred, *ops = args\n assert all(_map(typecheck, true_jaxpr.in_avals, ops))\n assert all(_map(typecheck, false_jaxpr.in_avals, ops))\n core.check_jaxpr(true_jaxpr.jaxpr)\n core.check_jaxpr(false_jaxpr.jaxpr)\n return core.Primitive.bind(cond_p, *args, true_jaxpr=true_jaxpr,\n false_jaxpr=false_jaxpr, linear=linear)\n\ncond_p = lax.Primitive('cond')\ncond_p.multiple_results = True\ncond_p.def_impl(partial(xla.apply_primitive, cond_p))\ncond_p.def_abstract_eval(_cond_abstract_eval)\ncond_p.def_custom_bind(cond_bind)\nad.primitive_jvps[cond_p] = _cond_jvp\nad.primitive_transposes[cond_p] = _cond_transpose\npe.custom_partial_eval_rules[cond_p] = _cond_partial_eval\nbatching.primitive_batchers[cond_p] = _cond_batching_rule\nxla.initial_style_translations[cond_p] = _cond_translation_rule\n\n\n### scan\n\ndef scan(f, init, xs, length=None, reverse=False):\n \"\"\"Scan a function over leading array axes while carrying along state.\n\n The type signature in brief is\n\n .. code-block:: haskell\n\n scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\n\n where we use [t] here to denote the type t with an additional leading axis.\n That is, if t is an array type then [t] represents the type with an additional\n leading axis, and if t is a pytree (container) type with array leaves then [t]\n represents the type with the same pytree structure and corresponding leaves\n each with an additional leading axis.\n\n When ``a`` is an array type or None, and ``b`` is an array type, the semantics\n of ``scan`` are given roughly by this Python implementation::\n\n def scan(f, init, xs, length=None):\n if xs is None:\n xs = [None] * length\n carry = init\n ys = []\n for x in xs:\n carry, y = f(carry, x)\n ys.append(y)\n return carry, np.stack(ys)\n\n Unlike that Python version, both ``a`` and ``b`` may be arbitrary pytree\n types, and so multiple arrays can be scanned over at once and produce multiple\n output arrays. (None is actually an empty pytree.)\n\n Also unlike that Python version, ``scan`` is a JAX primitive and is lowered to\n a single XLA While HLO. That makes it useful for reducing compilation times\n for jit-compiled functions, since native Python loop constructs in an ``@jit``\n function are unrolled, leading to large XLA computations.\n\n Finally, the loop-carried value ``carry`` must hold a fixed shape and dtype\n across all iterations (and not just be consistent up to NumPy rank/shape\n broadcasting and dtype promotion rules, for example). In other words, the type\n ``c`` in the type signature above represents an array with a fixed shape and\n dtype (or a nested tuple/list/dict container data structure with a fixed\n structure and arrays with fixed shape and dtype at the leaves).\n\n Args:\n f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning\n that ``f`` accepts two arguments where the first is a value of the loop\n carry and the second is a slice of ``xs`` along its leading axis, and that\n ``f`` returns a pair where the first element represents a new value for\n the loop carry and the second represents a slice of the output.\n init: an initial loop carry value of type ``c``, which can be a scalar,\n array, or any pytree (nested Python tuple/list/dict) thereof, representing\n the initial loop carry value. This value must have the same structure as\n the first element of the pair returned by ``f``.\n xs: the value of type ``[a]`` over which to scan along the leading axis,\n where ``[a]`` can be an array or any pytree (nested Python\n tuple/list/dict) thereof with consistent leading axis sizes.\n length: optional integer specifying the number of loop iterations, which\n must agree with the sizes of leading axes of the arrays in ``xs`` (but can\n be used to perform scans where no input ``xs`` are needed).\n reverse: optional boolean specifying whether to run the scan iteration\n forward (the default) or in reverse, equivalent to reversing the leading\n axes of the arrays in both ``xs`` and in ``ys``.\n\n Returns:\n A pair of type ``(c, [b])`` where the first element represents the final\n loop carry value and the second element represents the stacked outputs of\n the second output of ``f`` when scanned over the leading axis of the inputs.\n \"\"\"\n init_flat, init_tree = tree_flatten(init)\n xs_flat, xs_tree = tree_flatten(xs)\n in_flat, in_tree = tree_flatten((init, xs))\n\n try:\n lengths = [x.shape[0] for x in xs_flat]\n except AttributeError as err:\n msg = \"scan got value with no leading axis to scan over: {}.\"\n raise ValueError(\n msg.format(', '.join(str(x) for x in xs_flat\n if not hasattr(x, 'shape')))) from err\n\n if length is not None:\n length = int(length)\n if not all(length == l for l in lengths):\n msg = (\"scan got `length` argument of {} which disagrees with \"\n \"leading axis sizes {}.\")\n raise ValueError(msg.format(length, [x.shape[0] for x in xs_flat]))\n else:\n unique_lengths = set(lengths)\n if len(unique_lengths) > 1:\n msg = \"scan got values with different leading axis sizes: {}.\"\n raise ValueError(msg.format(', '.join(str(x.shape[0]) for x in xs_flat)))\n elif len(unique_lengths) == 0:\n msg = \"scan got no values to scan over and `length` not provided.\"\n raise ValueError(msg)\n else:\n length, = unique_lengths\n\n if jax.api._jit_is_disabled():\n carry = init\n ys = []\n maybe_reversed = reversed if reverse else lambda x: x\n for i in maybe_reversed(range(length)):\n xs_slice = [_index_array(i, core.get_aval(x), x) for x in xs_flat]\n carry, y = f(carry, tree_unflatten(xs_tree, xs_slice))\n ys.append(y)\n stack = lambda y, *ys: (y if core.get_aval(y) is core.abstract_unit\n else jax.numpy.stack((y, *ys)))\n ys = tree_multimap(stack, *maybe_reversed(ys))\n return carry, ys\n\n carry_avals = tuple(_map(_abstractify, init_flat))\n x_shapes = [masking.padded_shape_as_value(x.shape[1:]) for x in xs_flat]\n x_dtypes = [x.dtype for x in xs_flat]\n x_avals = tuple(_map(ShapedArray, x_shapes, x_dtypes))\n jaxpr, consts, out_tree = _initial_style_jaxpr(f, in_tree, carry_avals + x_avals)\n out_tree_children = out_tree.children()\n if len(out_tree_children) != 2:\n msg = \"scan body output must be a pair, got {}.\"\n raise TypeError(msg.format(tree_unflatten(out_tree, jaxpr.out_avals)))\n _check_tree_and_avals(\"scan carry output and input\",\n # Extract the subtree and avals for the first element of the return tuple\n out_tree_children[0], jaxpr.out_avals[:out_tree_children[0].num_leaves],\n init_tree, carry_avals)\n\n out = scan_p.bind(*itertools.chain(consts, in_flat),\n reverse=reverse, length=length, jaxpr=jaxpr,\n num_consts=len(consts), num_carry=len(init_flat),\n linear=(False,) * (len(consts) + len(in_flat)))\n return tree_unflatten(out_tree, out)\n\ndef _scan_impl(*args, reverse, length, num_consts, num_carry, jaxpr, linear):\n consts, init, xs = split_list(args, [num_consts, num_carry])\n _, _, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])\n _, y_avals = split_list(jaxpr.out_avals, [num_carry])\n\n def cond_fun(vals):\n i, *_ = vals\n return i < length\n\n def body_fun(vals):\n [i], carry, ys = split_list(vals, [1, num_carry])\n i_ = length - i - 1 if reverse else i\n x = _map(partial(_index_array, i_), x_avals, xs)\n out_flat = core.jaxpr_as_fun(jaxpr)(*(consts + carry + x))\n carry_out, y_updates = split_list(out_flat, [num_carry])\n ys_out = _map(partial(_update_array, i_), y_avals, ys, y_updates)\n return [i + 1] + carry_out + ys_out\n\n ys_init = _map(partial(_empty_array, length), y_avals)\n if length == 0:\n return init + ys_init\n else:\n init_val = [lax._const(length, 0)] + init + ys_init\n _, *outs = while_loop(cond_fun, body_fun, init_val)\n return outs\n\ndef _index_array(i, aval, x):\n if aval is core.abstract_unit:\n return core.unit\n else:\n return lax.dynamic_index_in_dim(x, i, keepdims=False)\n\ndef _empty_array(sz, aval):\n if aval is core.abstract_unit:\n return core.unit\n else:\n return lax.full((sz,) + aval.shape, 0, aval.dtype)\n\ndef _update_array(i, aval, xs, x):\n if aval is core.abstract_unit:\n return core.unit\n else:\n return lax.dynamic_update_index_in_dim(xs, x, i, 0)\n\ndef _scan_abstract_eval(*args, reverse, length, num_consts, num_carry, jaxpr, linear):\n carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])\n ys_avals = [ShapedArray((length,) + aval.shape, aval.dtype)\n if aval is not core.abstract_unit else aval for aval in y_avals]\n return carry_avals + ys_avals\n\ndef _scan_jvp(primals, tangents, reverse, length, jaxpr, num_consts, num_carry,\n linear):\n num_xs = len(jaxpr.in_avals) - num_carry - num_consts\n num_ys = len(jaxpr.out_avals) - num_carry\n nonzeros = [t is not ad_util.zero for t in tangents]\n const_nz, init_nz, xs_nz = split_list(nonzeros, [num_consts, num_carry])\n\n # Fixpoint computation of which carry are not ad.zero: either\n # non-zero from init, or the carry out is non-zero. Each iteration promotes\n # at least one carry to non-zero. We need at most len(carry) iterations,\n # but we need one last iteration to prepare the jaxpr based on the final\n # carry_nz.\n carry_nz = init_nz\n for _ in range(1 + len(carry_nz)):\n nonzeros = const_nz + carry_nz + xs_nz\n jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(\n jaxpr, nonzeros, instantiate=carry_nz + [False] * num_ys)\n carry_nz_out, ys_nz = nonzeros_out[:num_carry], nonzeros_out[num_carry:]\n if carry_nz_out == carry_nz:\n break\n else:\n carry_nz = _map(operator.or_, carry_nz, carry_nz_out)\n else:\n assert False, \"Fixpoint not reached\"\n\n tangents = [ad.instantiate_zeros(x, t) if t is ad_util.zero and nz else t\n for x, t, nz in zip(primals, tangents, nonzeros)]\n\n consts, init, xs = split_list(primals, [num_consts, num_carry])\n all_tangents = split_list(tangents, [num_consts, num_carry])\n consts_dot, init_dot, xs_dot = _map(_prune_zeros, all_tangents)\n\n jaxpr_jvp_rearranged = ad.rearrange_binders(\n jaxpr_jvp,\n [num_consts, num_carry, num_xs], [len(consts_dot), len(init_dot), len(xs_dot)],\n [num_carry, num_ys], [len(init_dot), sum(nonzeros_out) - len(init_dot)])\n\n consts_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])\n jaxpr_jvp_linear = tuple(consts_linear + [True] * len(consts_dot)\n + init_linear + [True] * len(init_dot)\n + xs_linear + [True] * len(xs_dot))\n\n out_flat = scan_p.bind(\n *(consts + consts_dot + init + init_dot + xs + xs_dot),\n reverse=reverse, length=length, jaxpr=jaxpr_jvp_rearranged,\n num_consts=num_consts+len(consts_dot), num_carry=num_carry+len(init_dot),\n linear=jaxpr_jvp_linear)\n\n carry, carry_dot, ys, ys_dot = split_list(out_flat, [num_carry, len(init_dot), num_ys])\n primals_out = carry + ys\n tangents_out_iter = iter(carry_dot + ys_dot)\n tangents_out = [next(tangents_out_iter) if nz else ad_util.zero\n for nz in nonzeros_out]\n return primals_out, tangents_out\n\ndef _prune_zeros(ts):\n return [t for t in ts if t is not ad_util.zero]\n\ndef _scan_partial_eval(trace, *tracers, reverse, length, num_consts, num_carry,\n jaxpr, linear):\n if trace.master.trace_type is pe.StagingJaxprTrace:\n params = {\"reverse\": reverse, \"length\": length, \"num_consts\": num_consts,\n \"num_carry\": num_carry, \"jaxpr\": jaxpr, \"linear\": linear}\n return trace.default_process_primitive(scan_p, tracers, params)\n\n num_xs = len(jaxpr.in_avals) - num_carry - num_consts\n num_ys = len(jaxpr.out_avals) - num_carry\n\n unknowns = [t.pval[0] is not None for t in tracers]\n const_uk, init_uk, xs_uk = split_list(unknowns, [num_consts, num_carry])\n\n # Fixpoint computation of which carry are unknown (not a constant): either\n # unknown from init, or the carry out is unknown. Each iteration promotes\n # at least one carry to unknown. We need at most len(carry) iterations,\n # but we need one last iteration to prepare the jaxpr based on the final\n # carry_uk.\n carry_uk = init_uk\n for _ in range(1 + len(carry_uk)):\n unknowns = const_uk + carry_uk + xs_uk\n jaxpr_1, jaxpr_2, out_uk = pe.partial_eval_jaxpr(\n jaxpr, unknowns, instantiate=carry_uk + [False] * num_ys,\n trace_type=trace.master.trace_type)\n carry_uk_out, ys_uk = out_uk[:num_carry], out_uk[num_carry:]\n if carry_uk_out == carry_uk:\n break\n else:\n carry_uk = _map(operator.or_, carry_uk, carry_uk_out)\n else:\n assert False, \"Fixpoint not reached\"\n num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals)\n\n # The residuals are treated as extensive outputs of jaxpr_1 (and extensive\n # inputs to jaxpr_2), but residuals that are loop-invariant can be hoisted.\n # TODO(mattjj): hoist other loop-invariant values here too (instantiate=False)\n invariant_pvals = [pe.PartialVal.known(core.unit if uk else t.pval[1])\n for uk, t in zip(unknowns[:num_consts], tracers[:num_consts])]\n other_pvals = [pe.PartialVal.unknown(a) for a in jaxpr_1.in_avals[num_consts:]]\n in_pvals_1 = invariant_pvals + other_pvals\n untyped_jaxpr_1, out_pvals_1, consts_1 = pe.trace_to_jaxpr(\n lu.wrap_init(core.jaxpr_as_fun(jaxpr_1)), in_pvals_1,\n instantiate=[True] * (num_carry + num_ys) + [False] * num_res)\n const_avals_1 = [raise_to_shaped(core.get_aval(c)) for c in consts_1]\n in_avals_1 = [core.abstract_unit] * num_consts + jaxpr_1.in_avals[num_consts:]\n out_avals_1 = [core.abstract_unit if pv is None else pv for pv, c in out_pvals_1]\n\n # TODO(cjfj): Explain the need for the code below.\n for var in untyped_jaxpr_1.invars[:num_consts]:\n var.aval = core.abstract_unit\n\n jaxpr_1_opt = pe.TypedJaxpr(pe.convert_constvars_jaxpr(untyped_jaxpr_1),\n (), const_avals_1 + in_avals_1, out_avals_1)\n num_consts_1 = num_consts + len(consts_1)\n # any now-known residuals are intensive, so we want to revise jaxpr_2 to take\n # those inputs as constants rather than as extensive inputs\n _, _, res_pvals = split_list(out_pvals_1, [num_carry, num_ys])\n intensive_residuals = [const for pv, const in res_pvals if pv is None]\n move = [False] * len(jaxpr_1.in_avals) + [pv is None for pv, _ in res_pvals]\n jaxpr_2_opt = pe.move_binders_to_front(jaxpr_2, move)\n num_consts_2 = num_consts + len(intensive_residuals)\n\n in_consts = (list(consts_1) + [core.unit] * num_consts +\n [core.unit if uk else t.pval[1]\n for uk, t in zip(unknowns[num_consts:], tracers[num_consts:])])\n linear_1 = ([False] * len(consts_1) + [True] * num_consts +\n [lin or uk for uk, lin\n in zip(unknowns[num_consts:], linear[num_consts:])])\n out_flat = scan_p.bind(\n *in_consts, reverse=reverse, length=length, jaxpr=jaxpr_1_opt,\n num_consts=num_consts_1, num_carry=num_carry, linear=tuple(linear_1))\n out_carry, ys, res_and_units = split_list(out_flat, [num_carry, num_ys])\n extensive_residuals = [r for r, (pv, _) in zip(res_and_units, res_pvals) if pv is not None]\n\n new_tracers = [trace.instantiate_const(t) if uk else trace.new_instantiated_literal(core.unit)\n for uk, t in zip(unknowns, tracers)]\n carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])\n ys_avals = _map(partial(_promote_aval_rank, length), y_avals)\n out_avals = carry_avals + ys_avals\n out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uk)]\n\n out_consts = out_carry + ys\n int_res_tracers = _map(trace.new_instantiated_const, intensive_residuals)\n ext_res_tracers = _map(trace.new_instantiated_const, extensive_residuals)\n out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)\n for pv, const in zip(out_pvs, out_consts)]\n linear_2 = ([False] * len(int_res_tracers) +\n [lin or not uk for uk, lin in zip(unknowns, linear)] +\n [False] * len(ext_res_tracers))\n eqn = pe.new_eqn_recipe(int_res_tracers + new_tracers + ext_res_tracers,\n out_tracers, scan_p,\n dict(reverse=reverse, length=length, jaxpr=jaxpr_2_opt,\n num_consts=num_consts_2,\n num_carry=num_carry, linear=tuple(linear_2)))\n for t in out_tracers: t.recipe = eqn\n return out_tracers\n\ndef _promote_aval_rank(sz, aval):\n if aval is core.abstract_unit:\n return core.abstract_unit\n else:\n return ShapedArray((sz,) + aval.shape, aval.dtype)\n\ndef _scan_transpose(cts, *args, reverse, length, num_consts, num_carry, jaxpr, linear):\n # we've only implemented transposing scans with specific lin/nonlin patterns\n consts_lin, init_lin, xs_lin = split_list(linear, [num_consts, num_carry])\n num_ires = len(consts_lin) - sum(consts_lin)\n num_eres = len(xs_lin) - sum(xs_lin)\n if consts_lin != [False] * num_ires + [True] * (len(consts_lin) - num_ires):\n raise NotImplementedError\n if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres:\n raise NotImplementedError\n if not all(init_lin):\n pass # TODO(mattjj): error check https://github.com/google/jax/issues/1963\n\n consts, _, xs = split_list(args, [num_consts, num_carry])\n ires, _ = split_list(consts, [num_ires])\n _, eres = split_list(xs, [sum(xs_lin)])\n assert not any(ad.is_undefined_primal(r) for r in ires)\n assert not any(ad.is_undefined_primal(r) for r in eres)\n\n carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])\n ys_avals = _map(partial(_promote_aval_rank, length), y_avals)\n ct_carry, ct_ys = split_list(cts, [num_carry])\n ct_carry = _map(ad.instantiate_zeros_aval, carry_avals, ct_carry)\n ct_ys = _map(ad.instantiate_zeros_aval, ys_avals, ct_ys)\n ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[num_ires:num_consts])\n\n # jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b])\n # jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a])\n jaxpr_trans = _transpose_scan_jaxpr(\n num_ires, num_consts - num_ires, num_eres, jaxpr)\n linear_trans = ([False] * num_ires +\n [True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) +\n [False] * num_eres)\n\n outs = scan_p.bind(\n *(ires + ct_consts + ct_carry + ct_ys + eres), reverse=not reverse,\n length=length, jaxpr=jaxpr_trans, num_consts=num_ires,\n num_carry=num_consts-num_ires+num_carry, linear=tuple(linear_trans))\n ct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry])\n return [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres\n\n# transpose_scan_jaxpr :: ([res1, c, a, res2] -> b)\n# -> ([res1, CT c, CT b, res2] -> [CT c, CT a])\ndef _transpose_scan_jaxpr(num_res1, num_c, num_res2, jaxpr):\n num_a = len(jaxpr.in_avals) - num_res1 - num_c - num_res2\n res1_avals, c_avals, a_avals, res2_avals = split_list(\n jaxpr.in_avals, [num_res1, num_c, num_a])\n num_b = len(jaxpr.out_avals)\n b_avals = list(jaxpr.out_avals)\n\n @lu.wrap_init\n def transposed(*res1_cbar_bbar_res2):\n res1, c_bar, b_bar, res2 = split_list(\n res1_cbar_bbar_res2, [num_res1, num_c, num_b])\n primals = (res1 + [ad.UndefinedPrimal(aval) for aval in c_avals] +\n [ad.UndefinedPrimal(aval) for aval in a_avals] + res2)\n cbar_abar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, primals,\n b_bar)\n _, new_c_bar, a_bar, _ = split_list(cbar_abar, [num_res1, num_c, num_a])\n a_bar = _map(ad.instantiate_zeros_aval, a_avals, a_bar)\n c_bar = _map(ad.instantiate_zeros_aval, c_avals,\n _map(ad.add_tangents, c_bar, new_c_bar))\n return c_bar + a_bar\n return _make_typed_jaxpr(transposed, res1_avals + c_avals + b_avals + res2_avals)\n\ndef _make_typed_jaxpr(traceable: lu.WrappedFun, in_avals: Sequence[core.AbstractValue]):\n pvals = [pe.PartialVal.unknown(aval) for aval in in_avals]\n jaxpr, pvals_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True)\n out_avals, _ = unzip2(pvals_out)\n return core.TypedJaxpr(jaxpr, consts, in_avals, _map(raise_to_shaped, out_avals))\n\n\ndef _scan_batching_rule(args, dims, reverse, length, jaxpr, num_consts,\n num_carry, linear):\n num_ys = len(jaxpr.out_avals) - num_carry\n size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}\n orig_batched = [d is not batching.not_mapped for d in dims]\n const_batched, init_batched, xs_batched = split_list(orig_batched, [num_consts, num_carry])\n\n # Fixpoint computation of which carry are batched: either\n # batched from init, or the carry out is batched. Each iteration promotes\n # at least one carry to batched. We need at most len(carry) iterations,\n # but we need one last iteration to prepare the jaxpr based on the final\n # carry_batched.\n carry_batched = init_batched\n for _ in range(1 + len(carry_batched)):\n batched = const_batched + carry_batched + xs_batched\n jaxpr_batched, batched_out = batching.batch_jaxpr(\n jaxpr, size, batched, instantiate=carry_batched + [False] * num_ys)\n carry_batched_out, ys_batched = batched_out[:num_carry], batched_out[num_carry:]\n if carry_batched_out == carry_batched:\n break\n else:\n carry_batched = _map(operator.or_, carry_batched, carry_batched_out)\n else:\n assert False, \"Fixpoint not reached\"\n\n consts, init, xs = split_list(args, [num_consts, num_carry])\n consts_bdims, init_bdims, xs_bdims = split_list(dims, [num_consts, num_carry])\n new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0\n else x for x, d in zip(consts, consts_bdims)]\n new_init = [batching.broadcast(x, size, 0) if now_batched and not was_batched\n else batching.moveaxis(x, d, 0) if now_batched else x\n for x, d, was_batched, now_batched in\n zip(init, init_bdims, init_batched, carry_batched)]\n new_xs = [batching.moveaxis(x, d, 1) if d is not batching.not_mapped and d != 1\n else x for x, d in zip(xs, xs_bdims)]\n new_args = new_consts + new_init + new_xs\n\n outs = scan_p.bind(*new_args, reverse=reverse, length=length, jaxpr=jaxpr_batched,\n num_consts=num_consts, num_carry=num_carry, linear=linear)\n carry_bdims = [0 if b else batching.not_mapped for b in carry_batched]\n ys_bdims = [1 if b else batching.not_mapped for b in ys_batched]\n return outs, carry_bdims + ys_bdims\n\ndef _scan_shape_rule(shapes, reverse, length, jaxpr,\n num_consts, num_carry, linear):\n const_shexprs, init_shexprs, xs_shexprs = split_list(shapes, [num_consts, num_carry])\n _, y_avals = split_list(jaxpr.out_avals, [num_carry])\n ys_shapes = [(length,) + tuple(y_aval.shape) for y_aval in y_avals]\n return init_shexprs + ys_shapes\n\ndef _scan_masking_rule(shape_envs, padded_vals, shape_exprs, reverse, length,\n jaxpr, num_consts, num_carry, linear):\n out_shape = _scan_shape_rule(shape_exprs, reverse, length, jaxpr,\n num_consts, num_carry, linear)\n dynamic_length = length.evaluate(shape_envs.logical)\n masked_jaxpr = _masked_scan_jaxpr(jaxpr, num_consts, num_carry)\n consts, init, xs = split_list(padded_vals, [num_consts, num_carry])\n max_length, = {x.shape[0] for x in xs}\n const_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])\n out_vals = scan_p.bind(\n *itertools.chain([dynamic_length] + consts, [0], init, xs),\n reverse=reverse, length=max_length, jaxpr=masked_jaxpr,\n num_consts=1 + num_consts, num_carry=1 + num_carry,\n linear=tuple([False] + const_linear + [False] + init_linear + xs_linear))\n return out_vals[1:], out_shape\n\ndef _masked_scan_jaxpr(jaxpr, num_consts, num_carry):\n fun = core.jaxpr_as_fun(jaxpr)\n\n @lu.wrap_init\n def masked(*args):\n [dynamic_length], consts, [i], carry, xs = split_list(\n args, [1, num_consts, 1, num_carry])\n out = fun(*(consts + carry + xs))\n new_carry, ys = split_list(out, [num_carry])\n new_carry = [lax.select(i < dynamic_length, new_c, c)\n for new_c, c in zip(new_carry, carry)]\n return [i + 1] + new_carry + ys\n\n aval = ShapedArray((), dtypes.int_)\n const_avals, carry_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])\n return _make_typed_jaxpr(masked, [aval] + const_avals + [aval] + carry_avals + x_avals)\n\ndef scan_bind(*args, reverse, length, num_consts, num_carry, jaxpr, linear):\n if not core.skip_checks:\n assert len(linear) == len(args)\n consts, init, xs = split_list(args, [num_consts, num_carry])\n consts_avals, init_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])\n xs_avals = _map(partial(_promote_aval_rank, length), x_avals)\n assert all(_map(typecheck, consts_avals, consts)), (consts, consts_avals)\n assert all(_map(typecheck, init_avals, init))\n # assert all(_map(typecheck, xs_avals, xs))\n carry_avals, _ = split_list(jaxpr.out_avals, [num_carry])\n assert all(_map(typematch, init_avals, carry_avals))\n core.check_jaxpr(jaxpr.jaxpr)\n return core.Primitive.bind(scan_p, *args, reverse=reverse, length=length,\n jaxpr=jaxpr, num_consts=num_consts,\n num_carry=num_carry, linear=linear)\n\nscan_p = core.Primitive(\"scan\")\nscan_p.multiple_results = True\nscan_p.def_custom_bind(scan_bind)\nscan_p.def_impl(_scan_impl)\n# scan_p.def_impl(partial(xla.apply_primitive, scan_p)) # TODO(mattjj): re-enable\nscan_p.def_abstract_eval(_scan_abstract_eval)\nad.primitive_jvps[scan_p] = _scan_jvp\nad.primitive_transposes[scan_p] = _scan_transpose\npe.custom_partial_eval_rules[scan_p] = _scan_partial_eval\nxla.initial_style_translations[scan_p] = \\\n xla.lower_fun_initial_style(_scan_impl)\nbatching.primitive_batchers[scan_p] = _scan_batching_rule\nmasking.shape_parameterized_primitive_rules[scan_p] = _scan_masking_rule\n\n\ndef map(f, xs):\n \"\"\"Map a function over leading array axes.\n\n Like Python's builtin map, except inputs and outputs are in the form of\n stacked arrays. Consider using the ``jax.vmap`` transform instead, unless you\n need to apply a function element by element for reduced memory usage or\n heterogeneous computation with other control flow primitives.\n\n When ``xs`` is an array type, the semantics of ``map`` are given by this\n Python implementation::\n\n def map(f, xs):\n return np.stack([f(x) for x in xs])\n\n Like ``scan``, ``map`` is implemented in terms of JAX primitives so many of\n the same advantages over a Python loop apply: ``xs`` may be an arbitrary\n nested pytree type, and the mapped computation is compiled only once.\n\n Args:\n f: a Python function to apply element-wise over the first axis or axes of\n ``xs``.\n xs: values over which to map along the leading axis.\n\n Returns:\n Mapped values.\n \"\"\"\n g = lambda _, x: ((), f(x))\n _, ys = scan(g, (), xs)\n return ys\n\n\ndef _concat_masking_rule(padded_vals, logical_shapes, dimension):\n result = lax.concatenate(padded_vals, dimension) # fragmented\n offset = 0\n for padded_val, logical_shape in zip(padded_vals, logical_shapes):\n result = _memcpy(dimension, logical_shape[dimension], padded_val,\n result, offset)\n offset = offset + logical_shape[dimension]\n return result\n\ndef _memcpy(axis, num, src, dst, offset):\n def body(i, dst):\n update = lax.dynamic_index_in_dim(src, i, axis)\n return lax.dynamic_update_index_in_dim(dst, update, i + offset, axis)\n return fori_loop(0, num, body, dst)\n\nmasking.masking_rules[lax.concatenate_p] = _concat_masking_rule\n\n\ndef _check_tree(func_name, expected_name, actual_tree, expected_tree):\n if actual_tree != expected_tree:\n raise TypeError(\n \"{}() output pytree structure must match {}, got {} and {}.\"\n .format(func_name, expected_name, actual_tree, expected_tree))\n\n\ndef _check_tree_and_avals(what, tree1, avals1, tree2, avals2):\n \"\"\"Raises TypeError if (tree1, avals1) does not match (tree2, avals2).\n\n Corresponding `tree` and `avals` must match in the sense that the number of\n leaves in `tree` must be equal to the length of `avals`. `what` will be\n prepended to details of the mismatch in TypeError.\n \"\"\"\n if tree1 != tree2:\n msg = (\"{} must have same type structure, got {} and {}.\")\n raise TypeError(msg.format(what, tree1, tree2))\n if not all(safe_map(typematch, avals1, avals2)):\n msg = (\"{} must have identical types, \"\n \"got\\n{}\\nand\\n{}.\")\n raise TypeError(msg.format(what, tree_unflatten(tree1, avals1),\n tree_unflatten(tree2, avals2)))\n\n\ndef _stop_gradient_fun(f):\n \"\"\"Create a version of f() that stops all gradients.\"\"\"\n def wrapper(*args, **kwargs):\n args_flat, in_args_tree = tree_flatten((args, kwargs))\n args_avals = tuple(_map(_abstractify, args_flat))\n g = lambda a, b: f(*a, **b)\n jaxpr, consts, out_tree = _initial_style_jaxpr(g, in_args_tree, args_avals)\n out = core.jaxpr_as_fun(jaxpr)(*lax.stop_gradient(consts + tuple(args_flat)))\n return tree_unflatten(out_tree, out)\n return wrapper\n\n\n_RootTuple = collections.namedtuple('_RootTuple', 'f, solve, l_and_s')\n\n\ndef _split_root_args(args, const_lengths):\n params_list = split_list(args, list(const_lengths))\n return _RootTuple(*params_list[:-1]), params_list[-1]\n\n\ndef custom_root(f, initial_guess, solve, tangent_solve):\n \"\"\"Differentiably solve for a roots of a function.\n\n This is a low-level routine, mostly intended for internal use in JAX.\n Gradients of custom_root() are defined with respect to closed-over variables\n from the provided function ``f`` via the implicit function theorem:\n https://en.wikipedia.org/wiki/Implicit_function_theorem\n\n Args:\n f: function for which to find a root. Should accept a single argument,\n return a tree of arrays with the same structure as its input.\n initial_guess: initial guess for a zero of f.\n solve: function to solve for the roots of f. Should take two positional\n arguments, f and initial_guess, and return a solution with the same\n structure as initial_guess such that func(solution) = 0. In other words,\n the following is assumed to be true (but not checked)::\n\n solution = solve(f, initial_guess)\n error = f(solution)\n assert all(error == 0)\n\n tangent_solve: function to solve the tangent system. Should take two\n positional arguments, a linear function ``g`` (the function ``f``\n linearized at its root) and a tree of array(s) ``y`` with the same\n structure as initial_guess, and return a solution ``x`` such that\n ``g(x)=y``:\n\n - For scalar ``y``, use ``lambda g, y: y / g(1.0)``.\n - For vector ``y``, you could use a linear solve with the Jacobian, if\n dimensionality of ``y`` is not too large:\n ``lambda g, y: np.linalg.solve(jacobian(g)(y), y)``.\n\n Returns:\n The result of calling solve(f, initial_guess) with gradients defined via\n implicit differentiation assuming ``f(solve(f, initial_guess)) == 0``.\n \"\"\"\n guess_flat, in_args_tree = tree_flatten((initial_guess,))\n guess_avals = tuple(_map(_abstractify, guess_flat))\n f_jaxpr, f_consts, out_tree = _initial_style_jaxpr(\n f, in_args_tree, guess_avals)\n\n in_tree, = treedef_children(in_args_tree)\n _check_tree(\"f\", \"initial_guess\", out_tree, in_tree)\n\n solve_jaxpr, solve_consts, solution_tree = _initial_style_jaxpr(\n partial(solve, _stop_gradient_fun(f)), in_args_tree, guess_avals)\n _check_tree(\"solve\", \"initial_guess\", solution_tree, in_tree)\n\n def linearize_and_solve(x, b):\n unchecked_zeros, f_jvp = jax.linearize(f, x)\n return tangent_solve(f_jvp, b)\n\n l_and_s_jaxpr, l_and_s_consts, out_tree = _initial_style_jaxpr(\n linearize_and_solve, treedef_tuple((in_tree,) * 2), guess_avals * 2)\n _check_tree(\"tangent_solve\", \"x\", out_tree, in_tree)\n\n all_consts = [f_consts, solve_consts, l_and_s_consts]\n const_lengths = _RootTuple(*_map(len, all_consts))\n jaxprs = _RootTuple(f_jaxpr, solve_jaxpr, l_and_s_jaxpr)\n\n out_flat = _custom_root(\n const_lengths, jaxprs, *(_flatten(all_consts) + guess_flat))\n return tree_unflatten(out_tree, out_flat)\n\n\n@partial(jax.custom_jvp, nondiff_argnums=(0, 1))\ndef _custom_root(const_lengths, jaxprs, *args):\n params, initial_guess = _split_root_args(args, const_lengths)\n solution = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + initial_guess))\n return solution\n\n\n@_custom_root.defjvp\ndef _root_jvp(const_lengths, jaxprs, primals, tangents):\n params, _ = _split_root_args(primals, const_lengths)\n solution = _custom_root(const_lengths, jaxprs, *primals)\n\n params_dot, _ = _split_root_args(tangents, const_lengths)\n\n # F(m, u) = 0 # system of equations in u, parameterized by m\n # # solution is u*(m) defined in a neighborhood\n # F(m, u*(m)) = 0 # satisfied in a neighborhood\n #\n # ∂_0 F(m, u*(m)) + ∂_1 F(m, u*(m)) ∂ u*(m) = 0 # implied by line above\n # ∂ u*(m) = - (∂_1 F(m, u*(m)))^{-1} ∂_0 F(m, u*(m)) # rearrange\n #\n # ∂ u*(m)[v] = - (∂_1 F(m, u*(m)))^{-1} [∂_0 F(m, u*(m))[v]] # jvp\n\n f = core.jaxpr_as_fun(jaxprs.f)\n linearize_and_solve = partial(\n core.jaxpr_as_fun(jaxprs.l_and_s), *params.l_and_s)\n f_at_solution = lambda *params: f(*itertools.chain(params, solution))\n _, rhs = ad.jvp(lu.wrap_init(f_at_solution)).call_wrapped(\n params.f, params_dot.f)\n solution_dot = _map(\n operator.neg, linearize_and_solve(*itertools.chain(solution, rhs)))\n\n return solution, solution_dot\n\n\nclass _LinearSolveTuple(collections.namedtuple(\n '_LinearSolveTuple', 'matvec, vecmat, solve, transpose_solve')):\n\n def transpose(self):\n return type(self)(self.vecmat, self.matvec, self.transpose_solve, self.solve)\n\n\ndef _split_linear_solve_args(args, const_lengths):\n params_list = split_list(args, list(const_lengths))\n return _LinearSolveTuple(*params_list[:-1]), params_list[-1]\n\n\ndef _transpose_function(linear_fun, primals):\n \"\"\"Transpose a linear function.\"\"\"\n # TODO(shoyer): can we use something more direct than the vjp machinery?\n # It's particularly awkward that we need the second argument to give\n # particular values of the primals, which are entirely arbitrary.\n _, vjp_fun = jax.vjp(linear_fun, primals)\n\n def transposed_fun(x):\n (y,) = vjp_fun(x)\n return y\n\n return transposed_fun\n\n\ndef _flatten(args):\n return [x for arg in args for x in arg]\n\n\ndef _check_shapes(func_name, expected_name, actual, expected, tree):\n actual_shapes = _map(onp.shape, actual)\n expected_shapes = _map(onp.shape, expected)\n if actual_shapes != expected_shapes:\n actual_shape_tree = tree_unflatten(tree, actual_shapes)\n act_shape_tree = tree_unflatten(tree, actual_shapes)\n raise ValueError('{}() output shapes must match {}, got {} and {}'\n .format(func_name, expected_name,\n tree_unflatten(tree, actual_shapes),\n tree_unflatten(tree, expected_shapes)))\n\n\ndef custom_linear_solve(\n matvec, b, solve, transpose_solve=None, symmetric=False):\n \"\"\"Perform a matrix-free linear solve with implicitly defined gradients.\n\n This function allows for overriding or defining gradients for a linear\n solve directly via implicit differentiation at the solution, rather than by\n differentiating *through* the solve operation. This can sometimes be much faster\n or more numerically stable, or differentiating through the solve operation\n may not even be implemented (e.g., if ``solve`` uses ``lax.while_loop``).\n\n Required invariant::\n\n x = solve(matvec, b) # solve the linear equation\n assert matvec(x) == b # not checked\n\n Args:\n matvec: linear function to invert. Must be differentiable.\n b: constant right handle side of the equation. May be any nested structure\n of arrays.\n solve: higher level function that solves for solution to the linear\n equation, i.e., ``solve(matvec, x)) == x`` for all ``x`` of the same form\n as ``b``. This function need not be differentiable.\n transpose_solve: higher level function for solving the transpose linear\n equation, i.e., ``transpose_solve(vecmat, x) == x``, where ``vecmat`` is\n the transpose of the linear map ``matvec`` (computed automatically with\n autodiff). Required for backwards mode automatic differentiation, unless\n ``symmetric=True``, in which case ``solve`` provides the default value.\n symmetric: bool indicating if it is safe to assume the linear map\n corresponds to a symmetric matrix, i.e., ``matvec == vecmat``.\n\n Returns:\n Result of ``solve(matvec, b)``, with gradients defined assuming that the\n solution ``x`` satisfies the linear equation ``matvec(x) == b``.\n \"\"\"\n if transpose_solve is None and symmetric:\n transpose_solve = solve\n\n b_flat, in_args_tree = tree_flatten((b,))\n b_avals = tuple(_map(_abstractify, b_flat))\n matvec_jaxpr, matvec_consts, out_tree = _initial_style_jaxpr(\n matvec, in_args_tree, b_avals)\n\n tree, = treedef_children(in_args_tree)\n _check_tree(\"matvec\", \"b\", out_tree, tree)\n\n solve_jaxpr, solve_consts, out_tree = _initial_style_jaxpr(\n partial(solve, matvec), in_args_tree, b_avals)\n _check_tree(\"solve\", \"b\", out_tree, tree)\n\n if transpose_solve is None:\n vecmat_jaxpr = tr_solve_jaxpr = None\n vecmat_consts = tr_solve_consts = []\n else:\n if symmetric:\n vecmat = matvec\n vecmat_jaxpr = matvec_jaxpr\n vecmat_consts = matvec_consts\n else:\n vecmat = _transpose_function(matvec, b)\n vecmat_jaxpr, vecmat_consts, out_tree = _initial_style_jaxpr(\n vecmat, in_args_tree, b_avals)\n assert out_tree == tree\n\n tr_solve_jaxpr, tr_solve_consts, out_tree = _initial_style_jaxpr(\n partial(transpose_solve, vecmat), in_args_tree, b_avals)\n _check_tree(\"transpose_solve\", \"b\", out_tree, tree)\n\n all_consts = [matvec_consts, vecmat_consts, solve_consts, tr_solve_consts]\n const_lengths = _LinearSolveTuple(*_map(len, all_consts))\n jaxprs = _LinearSolveTuple(\n matvec_jaxpr, vecmat_jaxpr, solve_jaxpr, tr_solve_jaxpr)\n\n out_flat = linear_solve_p.bind(\n *(_flatten(all_consts) + b_flat),\n const_lengths=const_lengths, jaxprs=jaxprs, tree=tree)\n return tree_unflatten(tree, out_flat)\n\n\ndef _linear_solve_abstract_eval(*args, **kwargs):\n return _map(raise_to_shaped, args[sum(kwargs['const_lengths']):])\n\n\ndef _custom_linear_solve_impl(*args, **kwargs):\n const_lengths, jaxprs, tree = split_dict(\n kwargs, ['const_lengths', 'jaxprs', 'tree'])\n params, b = _split_linear_solve_args(args, const_lengths)\n x = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + b))\n _check_shapes('solve', 'b', x, b, tree)\n return x\n\n\ndef _tangent_linear_map(func, params, params_dot, *x):\n \"\"\"Compute the tangent of a linear map.\n\n Assuming ``func(*params, *x)`` is linear in ``x`` and computes ``A @ x``,\n this function computes ``∂A @ x``.\n \"\"\"\n assert any(p is not ad_util.zero for p in params_dot)\n zeros = [ad_util.zero] * len(x)\n _, out_tangent = ad.jvp(lu.wrap_init(func)).call_wrapped(\n params + list(x), params_dot + zeros)\n return out_tangent\n\n\ndef _custom_linear_solve_jvp(primals, tangents, const_lengths, jaxprs, tree):\n # A x - b = 0\n # ∂A x + A ∂x - ∂b = 0\n # ∂x = A^{-1} (∂b - ∂A x)\n\n kwargs = dict(const_lengths=const_lengths, jaxprs=jaxprs, tree=tree)\n x = linear_solve_p.bind(*primals, **kwargs)\n\n params, _ = _split_linear_solve_args(primals, const_lengths)\n params_dot, b_dot = _split_linear_solve_args(tangents, const_lengths)\n\n if all(p is ad_util.zero for p in params_dot.matvec):\n # no need to evaluate matvec_tangents\n rhs = b_dot\n else:\n matvec_tangents = _tangent_linear_map(\n core.jaxpr_as_fun(jaxprs.matvec), params.matvec, params_dot.matvec, *x)\n _check_shapes(\"matvec\", \"b\", matvec_tangents, x, tree)\n rhs = _map(ad.add_tangents, b_dot, _map(operator.neg, matvec_tangents))\n\n x_dot = linear_solve_p.bind(*(_flatten(params) + rhs), **kwargs)\n\n return x, x_dot\n\n\ndef _linear_solve_transpose_rule(cotangent, *primals, **kwargs):\n const_lengths, jaxprs, tree = split_dict(\n kwargs, ['const_lengths', 'jaxprs', 'tree'])\n\n if jaxprs.transpose_solve is None:\n raise TypeError('transpose_solve required for backwards mode automatic '\n 'differentiation of custom_linear_solve')\n\n params, b = _split_linear_solve_args(primals, const_lengths)\n assert all(ad.is_undefined_primal(x) for x in b)\n cotangent_b = linear_solve_p.bind(\n *(_flatten(params.transpose()) + cotangent),\n const_lengths=const_lengths.transpose(), jaxprs=jaxprs.transpose(),\n tree=tree)\n return [None] * sum(const_lengths) + cotangent_b\n\n\ndef _linear_solve_batching_rule(args, dims, **kwargs):\n const_lengths, jaxprs, tree = split_dict(kwargs,\n [\"const_lengths\", \"jaxprs\", \"tree\"])\n orig_bat = [d is not batching.not_mapped for d in dims]\n size, = {\n a.shape[d] for a, d in zip(args, dims) if d is not batching.not_mapped\n }\n\n params, b = _split_linear_solve_args(args, const_lengths)\n params_dims, b_dims = _split_linear_solve_args(dims, const_lengths)\n params_bat, orig_b_bat = _split_linear_solve_args(orig_bat, const_lengths)\n\n (matvec, vecmat, solve, solve_t) = jaxprs\n (matvec_bat, vecmat_bat, solve_bat, solve_t_bat) = params_bat\n\n # Fixpoint computation of which parts of x and b are batched; we need to\n # ensure this is consistent between all four jaxprs\n b_bat = orig_b_bat\n x_bat = [False] * len(solve.out_avals)\n for i in range(1 + len(orig_b_bat) + len(solve.out_avals)):\n # Apply vecmat and solve -> new batched parts of x\n solve_jaxpr_batched, solve_x_bat = batching.batch_jaxpr(\n solve, size, solve_bat + b_bat, instantiate=x_bat)\n if vecmat is None:\n vecmat_jaxpr_batched = None\n x_bat_out = solve_x_bat\n else:\n vecmat_jaxpr_batched, vecmat_x_bat = batching.batch_jaxpr(\n vecmat, size, vecmat_bat + b_bat, instantiate=x_bat)\n x_bat_out = _map(operator.or_, vecmat_x_bat, solve_x_bat)\n # Apply matvec and solve_t -> new batched parts of b\n matvec_jaxpr_batched, matvec_b_bat = batching.batch_jaxpr(\n matvec, size, matvec_bat + x_bat_out, instantiate=b_bat)\n if solve_t is None:\n solve_t_jaxpr_batched = None\n b_bat_out = _map(operator.or_, matvec_b_bat, orig_b_bat)\n else:\n solve_t_jaxpr_batched, solve_t_b_bat = batching.batch_jaxpr(\n solve_t, size, solve_t_bat + x_bat_out, instantiate=b_bat)\n b_bat_out = _map(lambda m, s, o: m or s or o, matvec_b_bat, solve_t_b_bat,\n orig_b_bat)\n if x_bat_out == x_bat and b_bat_out == b_bat:\n break\n else:\n x_bat = x_bat_out\n b_bat = b_bat_out\n else:\n assert False, \"Fixedpoint not reached\"\n\n batched_jaxprs = _LinearSolveTuple(matvec_jaxpr_batched, vecmat_jaxpr_batched,\n solve_jaxpr_batched, solve_t_jaxpr_batched)\n\n # Move batched axes to the front\n new_params = [\n batching.moveaxis(x, d, 0)\n if d is not batching.not_mapped and d != 0 else x\n for x, d in zip(_flatten(params), _flatten(params_dims))\n ]\n # Broadcast out b if necessary\n new_b = [\n batching.broadcast(x, size, 0) if now_bat and not was_bat else\n batching.moveaxis(x, d, 0) if now_bat and d != 0 else x\n for x, d, was_bat, now_bat in zip(b, b_dims, orig_b_bat, b_bat)\n ]\n\n outs = linear_solve_p.bind(\n *(new_params + new_b),\n const_lengths=const_lengths,\n jaxprs=batched_jaxprs,\n tree=tree)\n out_dims = [0 if batched else batching.not_mapped for batched in b_bat]\n return outs, out_dims\n\n\nlinear_solve_p = core.Primitive('custom_linear_solve')\nlinear_solve_p.multiple_results = True\nlinear_solve_p.def_impl(_custom_linear_solve_impl)\nlinear_solve_p.def_abstract_eval(_linear_solve_abstract_eval)\nad.primitive_jvps[linear_solve_p] = _custom_linear_solve_jvp\nxla.initial_style_translations[linear_solve_p] = \\\n xla.lower_fun_initial_style(_custom_linear_solve_impl)\nad.primitive_transposes[linear_solve_p] = _linear_solve_transpose_rule\nbatching.primitive_batchers[linear_solve_p] = _linear_solve_batching_rule\n\n\ndef _interleave(a, b):\n \"\"\"Given two Tensors of static shape, interleave them along the first axis.\"\"\"\n # TODO(mattjj)\n import jax.numpy as np\n # [a b c ...] [d e f ...] -> [a d b e c f ...]\n half_num_elems = b.shape[0]\n\n if a.shape[0] > b.shape[0]:\n return np.concatenate(\n [np.reshape(np.stack([a[: -1], b], axis=1),\n (2 * half_num_elems,) + a.shape[1:]),\n a[-1:]], axis=0)\n else:\n return np.reshape(np.stack([a, b], axis=1),\n (2 * half_num_elems,) + a.shape[1:])\n\ndef associative_scan(fn, elems):\n \"\"\"Perform a scan with an associative binary operation, in parallel.\n\n Args:\n fn: Python callable implementing an associative binary operation with\n signature `r = fn(a, b)`. This must satisfy associativity:\n `fn(a, fn(b, c)) == fn(fn(a, b), c)`. The inputs and result are\n (possibly nested structures of) `Tensor`(s), matching `elems`. Each\n `Tensor` has a leading batch dimension in place of `num_elems`; the `fn`\n is expected to map over this dimension. The result `r` has the same shape\n (and structure) as the two inputs `a` and `b`.\n elems: A (possibly nested structure of) `Tensor`(s), each with leading\n dimension `num_elems`, which must be known statically.\n Returns:\n result: A (possibly nested structure of) `Tensor`(s) of the same shape\n and structure as `elems`, in which the `k`th element is the result of\n recursively applying `fn` to combine the first `k` elements of\n `elems`. For example, given `elems = [a, b, c, ...]`, the result\n would be `[a, fn(a, b), fn(fn(a, b), c), ...]`.\n\n #### Examples\n\n ```python\n # Example 1: Partials sums of numbers.\n\n np.associative_scan(operator.add, np.arange(0, 4))\n # ==> [ 0, 1, 3, 6]\n\n # Example 2: Partial products of random matrices.\n\n np.associative_scan(np.matmul, matrices)\n ```\n \"\"\"\n elems_flat, tree = tree_flatten(elems)\n\n def lowered_fn(a_flat, b_flat):\n # Lower `fn` to operate on flattened sequences of elems.\n a = tree_unflatten(tree, a_flat)\n b = tree_unflatten(tree, b_flat)\n c = fn(a, b)\n c_flat, _ = tree_flatten(c)\n return c_flat\n\n # Check that all inputs have a consistent leading dimension `num_elems`.\n num_elems = int(elems_flat[0].shape[0])\n\n if not all(int(elem.shape[0]) == num_elems for elem in elems_flat[1:]):\n raise ValueError('Input `Tensor`s must have the same first dimension.'\n ' (saw: {})'.format([elems.shape for elem in elems_flat]))\n\n if num_elems < 2:\n return elems\n\n # Summary of algorithm:\n #\n # Consider elements of `_scan(elems)` at odd indices. That's the same as first\n # summing successive pairs of elements of `elems` and performing a scan on\n # that half sized tensor. We perform the latter scan by recursion.\n #\n # Now consider the even elements of `_scan(elems)`. These can be computed\n # from the odd elements of `_scan(elems)` by adding each odd element of\n # `_scan(elems)` to the matching even element in the original `elems`.\n #\n # We return the odd and even elements interleaved.\n #\n # For the base case of the recursion we return the first element\n # of `elems` followed by the sum of the first two elements computed as\n # a (small two-down-to-one) reduction step.\n def _scan(elems):\n \"\"\"Perform scan on `elems`.\"\"\"\n\n num_elems = elems[0].shape[0]\n\n reduced_elems = lowered_fn([elem[0:-1:2] for elem in elems],\n [elem[1::2] for elem in elems])\n\n if reduced_elems[0].shape[0] == 1:\n # Base case has either 2 or 3 elements.\n if num_elems == 2:\n return [lax.concatenate([elem[0:1], reduced_elem], dimension=0)\n for (reduced_elem, elem) in zip(reduced_elems, elems)]\n elif num_elems == 3:\n reduced_reduced_elems = lowered_fn(\n reduced_elems,\n [elem[2:3] for elem in elems])\n return [\n lax.concatenate([elem[0:1], reduced_elem, reduced_reduced_elem],\n dimension=0)\n for (reduced_reduced_elem, reduced_elem, elem)\n in zip(reduced_reduced_elems, reduced_elems, elems)]\n\n # Recursively compute scan for partially reduced tensors.\n odd_elems = _scan(reduced_elems)\n\n if num_elems % 2 == 0:\n results = lowered_fn([odd_elem[:-1] for odd_elem in odd_elems],\n [elem[2::2] for elem in elems])\n else:\n results = lowered_fn([odd_elem for odd_elem in odd_elems],\n [elem[2::2] for elem in elems])\n\n # The first element of a scan is the same as the first element\n # of the original `elems`.\n even_elems = [lax.concatenate([elem[0:1], result], dimension=0)\n for (elem, result) in zip(elems, results)]\n return tuple(_map(_interleave, even_elems, odd_elems))\n\n scans = _scan(elems_flat)\n\n return tree_unflatten(tree, scans)\n"
] | [
[
"numpy.array",
"numpy.shape",
"numpy.ndim"
]
] |
clolsonus/dmd | [
"b57441cab716b996c386c5e5b32358434e1e9166"
] | [
"curt_mypydmd3.py"
] | [
"#!/usr/bin/env python3\n\nimport argparse\nimport cv2\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport skvideo.io # pip install scikit-video\n\nfrom pydmd import DMD\n\nparser = argparse.ArgumentParser(description='virtual choir')\nparser.add_argument('video', help='video file')\nparser.add_argument('--scale', type=float, default=1.0, help='scale input')\nparser.add_argument(\"--skip-frames\", type=int, default=0)\nparser.add_argument('--write', action='store_true', help='write out video')\nargs = parser.parse_args()\n\n# pathname work\nabspath = os.path.abspath(args.video)\nfilename, ext = os.path.splitext(abspath)\ndirname = os.path.dirname(args.video)\nmode_video = filename + \"_modes.mp4\"\n\nmetadata = skvideo.io.ffprobe(args.video)\n#print(metadata.keys())\nprint(json.dumps(metadata[\"video\"], indent=4))\nfps_string = metadata['video']['@avg_frame_rate']\n(num, den) = fps_string.split('/')\nfps = float(num) / float(den)\ncodec = metadata['video']['@codec_long_name']\n#w = int(round(int(metadata['video']['@width']) * scale))\n#h = int(round(int(metadata['video']['@height']) * scale))\nif \"@duration\" in metadata[\"video\"]:\n total_frames = int(round(float(metadata['video']['@duration']) * fps))\nelse:\n total_frames = 1\n\nprint('fps:', fps)\nprint('codec:', codec)\n#print('output size:', w, 'x', h)\nprint('total frames:', total_frames)\n\nprint(\"Opening \", args.video)\nreader = skvideo.io.FFmpegReader(args.video, inputdict={}, outputdict={})\n\nrows = 3\ncols = 3\nmax_rank = ((rows*cols) * 2) - 2\nprint(\"max rank:\", max_rank)\n\n# process video at this scale factor\nscale = args.scale\ndmd_size = 200\nwindow_size = 64\n\ndef draw_mode(label, mode, shape):\n real = np.abs(mode.real)\n equalized = 255 * (real / np.max(real))\n (h, w) = shape[:2]\n big = cv2.resize(np.flipud(equalized.reshape((dmd_size,dmd_size)).astype('uint8')), (w, h), interpolation=cv2.INTER_AREA)\n cv2.imshow(label, big)\n return big\n\ncounter = 0\n\nX = []\ndmd = DMD(svd_rank=max_rank)\n\ninputdict = {\n '-r': str(fps)\n}\nlossless = {\n # See all options: https://trac.ffmpeg.org/wiki/Encode/H.264\n '-vcodec': 'libx264', # use the h.264 codec\n '-crf': '0', # set the constant rate factor to 0, (lossless)\n '-preset': 'veryslow', # maximum compression\n '-r': str(fps) # match input fps\n}\nsane = {\n # See all options: https://trac.ffmpeg.org/wiki/Encode/H.264\n '-vcodec': 'libx264', # use the h.264 codec\n '-crf': '17', # visually lossless (or nearly so)\n '-preset': 'medium', # default compression\n '-r': str(fps) # match input fps\n}\nif args.write:\n mode_writer = skvideo.io.FFmpegWriter(mode_video, inputdict=inputdict,\n outputdict=sane)\n\nprint(\"collecting video frames\")\nfor frame in reader.nextFrame():\n counter += 1\n if counter <= args.skip_frames:\n continue\n \n frame = frame[:,:,::-1] # convert from RGB to BGR (to make opencv happy)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n scaled = cv2.resize(gray, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_AREA)\n cv2.imshow(\"input\", scaled)\n\n small = cv2.resize(scaled, (dmd_size,dmd_size), interpolation=cv2.INTER_AREA)\n\n X.append( np.flipud(small) )\n while len(X) > window_size:\n del X[0]\n\n dmd.fit(np.array(X))\n print(dmd.modes.shape)\n if len(dmd.eigs):\n #print(dmd.eigs)\n idx = np.argsort(np.abs(dmd.eigs-1))\n #idx = np.argsort(np.abs(dmd.eigs.imag))\n print(idx)\n print(dmd.eigs)\n print(dmd.eigs[idx[0]])\n print(dmd.reconstructed_data.shape)\n\n #for i in range(len(idx)):\n # draw_mode(\"freq index: %d\" % i, dmd.modes[:,idx[i]], scaled.shape)\n\n big = 255 * dmd.reconstructed_data[:,-1] / np.max(dmd.reconstructed_data[:,-1]) # avoid overflow\n big = cv2.resize(np.flipud(big.reshape((dmd_size,dmd_size)).astype('uint8')), (scaled.shape[1], scaled.shape[0]), interpolation=cv2.INTER_AREA)\n big = 255 * ( big / np.max(big) )\n cv2.imshow(\"reconstructed\", big.astype('uint8'))\n \n def draw_text(img, label, x, y, subscale=1.0, just=\"center\"):\n font_scale = subscale * h / 700\n size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX,\n font_scale, 1)\n if just == \"center\":\n locx = int(x - size[0][0]*0.5)\n locy = int(y + size[0][1]*1.5)\n elif just == \"lower-right\":\n locx = int(x - size[0][0])\n locy = int(y - size[0][1])\n\n cv2.putText(img, label, (locx, locy),\n cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n 1, cv2.LINE_AA)\n\n (h, w) = scaled.shape[:2]\n grid = np.zeros( (h*rows, w*cols) ).astype('uint8')\n grid[0:h,0:w] = scaled\n draw_text(grid, \"Original\", w*0.5, 0)\n r = 0\n c = 1\n for i in range(0, max_rank, 2):\n if i >= len(idx):\n break\n #print(i)\n if c >= cols:\n r += 1\n c = 0\n #print(\"grid:\", r, c, \"i:\", i)\n grid[r*h:(r+1)*h,c*w:(c+1)*w] = draw_mode(\"a\", dmd.modes[:,idx[i]], scaled.shape)\n #grid[r*h:(r+1)*h,c*w:(c+1)*w] = scaled\n eig = dmd.eigs[idx[i]]\n label = \"Mode: %d (%.4f + %.4fj)\" % (i, eig.real, eig.imag)\n draw_text(grid, label, (c+0.5)*w, r*h)\n c += 1\n draw_text(grid, \"www.uav.aem.umn.edu\", w*(rows-0.03), h*(cols-0.03), just=\"lower-right\")\n cv2.imshow(\"grid\", grid)\n if args.write:\n mode_writer.writeFrame(grid)\n \n if 0xFF & cv2.waitKey(1) == 27:\n break\n\nif False:\n (h, w) = X[0].shape\n print(w,h)\n\n print(\"running dmd\")\n dmd = DMD(svd_rank=5)\n dmd.fit(np.array(X))\n\n dmd.plot_modes_2D(figsize=(12,5))\n\n print(X[0].shape[0])\n x1 = np.array(list(range(w)))\n x2 = np.array(list(range(h)))\n x1grid, x2grid = np.meshgrid(x1, x2)\n fig = plt.figure(figsize=(18,12))\n for id_subplot, snapshot in enumerate(dmd.reconstructed_data.T[:16], start=1):\n plt.subplot(4, 4, id_subplot)\n plt.pcolor(x1grid, x2grid, snapshot.reshape(x1grid.shape).real, vmin=-1, vmax=1)\n\n for eig in dmd.eigs:\n print('Eigenvalue {}: distance from unit circle {}'.format(eig, np.abs(eig.imag**2+eig.real**2 - 1)))\n\n dmd.plot_eigs(show_axes=True, show_unit_circle=True)\n\n for mode in dmd.modes.T:\n plt.plot(mode.real)\n plt.title('Modes')\n plt.show()\n\n for dynamic in dmd.dynamics:\n plt.plot(dynamic.real)\n plt.title('Dynamics')\n plt.show()\n\n fig = plt.figure(figsize=(17,6))\n\n for n, mode, dynamic in zip(range(131, 133), dmd.modes.T, dmd.dynamics):\n plt.subplot(n)\n plt.pcolor(x1grid, x2grid, (mode.reshape(-1, 1).dot(dynamic.reshape(1, -1))).real.T)\n\n plt.subplot(133)\n plt.pcolor(x1grid, x2grid, dmd.reconstructed_data.T.real)\n plt.colorbar()\n\n plt.show()\n\n"
] | [
[
"matplotlib.pyplot.plot",
"numpy.flipud",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.abs",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid"
]
] |
akharche/numba-dppy | [
"f12dac64b149bd72f305f341ff64b796bbb648c1"
] | [
"numba_dppy/tests/njit_tests/dpnp/test_numpy_array_ops.py"
] | [
"################################################################################\n# Numba-DPPY\n#\n# Copyright 2020-2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\nimport dpctl\nimport numpy as np\nimport pytest\nfrom numba import njit\n\nimport numba_dppy as dppy\nfrom numba_dppy.tests._helper import (\n dpnp_debug,\n filter_strings,\n is_gen12,\n skip_no_dpnp,\n)\n\nfrom ._helper import wrapper_function\n\npytestmark = skip_no_dpnp\n\nlist_of_dtypes = [\n np.int32,\n np.int64,\n np.float32,\n np.float64,\n]\n\n\[email protected](params=list_of_dtypes)\ndef input_arrays(request):\n # The size of input and out arrays to be used\n N = 10\n a = np.array(np.random.random(N), request.param)\n b = np.array(np.random.random(N), request.param)\n return a, b\n\n\nlist_of_shape = [\n (10),\n (5, 2),\n]\n\n\[email protected](params=list_of_shape)\ndef get_shape(request):\n return request.param\n\n\nlist_of_unary_ops = [\n \"sum\",\n \"prod\",\n \"max\",\n \"min\",\n \"mean\",\n \"argmax\",\n \"argmin\",\n \"argsort\",\n \"copy\",\n \"cumsum\",\n \"cumprod\",\n]\n\n\[email protected](params=list_of_unary_ops)\ndef unary_op(request):\n return (\n wrapper_function(\"a\", f\"a.{request.param}()\", globals()),\n request.param,\n )\n\n\[email protected](\"filter_str\", filter_strings)\ndef test_unary_ops(filter_str, unary_op, input_arrays, get_shape, capfd):\n a = input_arrays[0]\n op, name = unary_op\n if name != \"argsort\" and name != \"copy\":\n a = np.reshape(a, get_shape)\n if name == \"cumprod\" and (\n filter_str == \"opencl:cpu:0\"\n or a.dtype == np.int32\n or is_gen12(filter_str)\n ):\n pytest.skip()\n if name == \"cumsum\" and (\n filter_str == \"opencl:cpu:0\"\n or a.dtype == np.int32\n or is_gen12(filter_str)\n ):\n pytest.skip()\n if name == \"mean\" and is_gen12(filter_str):\n pytest.skip()\n if name == \"argmax\" and is_gen12(filter_str):\n pytest.skip()\n\n actual = np.empty(shape=a.shape, dtype=a.dtype)\n expected = np.empty(shape=a.shape, dtype=a.dtype)\n\n f = njit(op)\n device = dpctl.SyclDevice(filter_str)\n with dpctl.device_context(device), dpnp_debug():\n actual = f(a)\n captured = capfd.readouterr()\n assert \"dpnp implementation\" in captured.out\n\n expected = op(a)\n np.testing.assert_allclose(actual, expected, rtol=1e-3, atol=0)\n\n\nlist_of_indices = [\n np.array([0, 2, 5]),\n np.array([0, 5]),\n]\n\n\[email protected](params=list_of_indices)\ndef indices(request):\n return request.param\n\n\ndef get_take_fn():\n return wrapper_function(\"a, ind\", \"a.take(ind)\", globals())\n\n\[email protected](\"filter_str\", filter_strings)\ndef test_take(filter_str, input_arrays, indices, capfd):\n a = input_arrays[0]\n fn = get_take_fn()\n\n actual = np.empty(shape=a.shape, dtype=a.dtype)\n expected = np.empty(shape=a.shape, dtype=a.dtype)\n\n f = njit(fn)\n device = dpctl.SyclDevice(filter_str)\n with dpctl.device_context(device), dpnp_debug():\n actual = f(a, indices)\n captured = capfd.readouterr()\n assert \"dpnp implementation\" in captured.out\n\n expected = fn(a, indices)\n np.testing.assert_allclose(actual, expected, rtol=1e-3, atol=0)\n"
] | [
[
"numpy.empty",
"numpy.reshape",
"numpy.random.random",
"numpy.testing.assert_allclose",
"numpy.array"
]
] |
nane121/HacktoberFest2020 | [
"29eb99754ee93f643d4b0bd7e18570079e718d59"
] | [
"ML from sratch/logistic_regression.py"
] | [
"import numpy as np\n\nclass LogisticRegression:\n \n \n def __init__(self, lr = 0.001, n_iters = 1000):\n self.lr = lr\n self.n_iters = n_iters\n self.weights = None\n self.bias = None\n \n def fit(self, X, y):\n #init parameters\n n_samples, n_features = X.shape\n self.weights = np.zeros(n_features)\n self.bias = 0\n \n #gradient descent\n for _ in range(self.n_iters): \n linear_model = np.dot(X, self.weights) + self.bias\n y_predicted = self._sigmoid(linear_model)\n \n dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y))\n db = (1 / n_samples) * np.sum(y_predicted - y)\n\n self.weights -= self.lr * dw\n self.bias -= self.lr * db\n\n \n def predict(self, X):\n linear_model = np.dot(X, self.weights) + self.bias\n y_predicted = self._sigmoid(linear_model)\n y_predicted_cls = (1 if i > 0.5 else 0 for i in y_predicted)\n return y_predicted_cls\n\n def _sigmoid(self, x):\n return 1 / (1 + np.exp(-x))"
] | [
[
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.exp"
]
] |
beyond007008/test | [
"ba4302a8d65ac8b63627bcfa8e3b23871fa2c390"
] | [
"tensorflowTUT/tensorflow8_feeds.py"
] | [
"# View more python learning tutorial on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nimport tensorflow as tf\n\ninput1 = tf.placeholder(tf.float32)\ninput2 = tf.placeholder(tf.float32)\nouput = tf.multiply(input1, input2)\n\nwith tf.Session() as sess:\n print(sess.run(ouput, feed_dict={input1: [7.], input2: [2.]}))\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.multiply",
"tensorflow.Session"
]
] |
PortfolioCollection/Character-Recogniser | [
"f826d6d932a5f19580ff0cdc6cd40e3a7e05ae35"
] | [
"-KNN Approach-/KNN_Tester.py"
] | [
"#--------Hopping-------#\nimport os\nimport sys\nsys.path.append('../_Core Functions_')\nimport Hop\n#----CUSTOM CLASSES-----#\nimport Extractor\nimport KNN_Trainer\n#---SUPPORT LIBRARIES---#\nimport numpy as np\nimport time\nimport re\nimport time\n\ndef test_image(answer_array,index,filename):\n image = Extractor.getImage(filename)\n optimal_number = test_one(image)\n Hop.go_to_TestImages()\n if answer_array[index] == int(optimal_number):\n return 1\n return 0\n\ndef test_one(image):\n FOLDER_NAME = \"-KNN Approach-\"\n test = Extractor.ImageToMatrix(image)\n\n Hop.go_to_approach(\"/\"+FOLDER_NAME)\n \n best_score = 100\n optimal_number = -1\n \n grayscale = Extractor.ImageToMatrix(image)\n r = np.zeros((grayscale.shape[0], grayscale.shape[1]), dtype=int)\n \n lr = KNN_Trainer.read_image(grayscale)\n lean = KNN_Trainer.record_left_right(lr)\n segments = KNN_Trainer.record_segment(lr)\n outside = KNN_Trainer.inside_outside(lr,grayscale)\n\n neighbors = open(\"save.txt\")\n \n for line in neighbors:\n match = \"line ([0-9]*): lean\\(([0-9].[0-9]*)\\) segment\\(([0-9].[0-9]*)\\) outside\\(([0-9].[0-9]*)\\) class\\(([0-9])\\)\"\n string = re.match(match, line)\n train_line,train_lean,train_segments,train_outside,train_number = string.group(1),string.group(2),string.group(3),string.group(4),string.group(5)\n score = abs(lean-float(train_lean))+abs(segments-float(train_segments))\n if score < best_score:\n best_score = score\n optimal_number = train_number\n return optimal_number\n \n \ndef test_loop(num_tests=10000):\n file = open('status.txt', 'w')\n file.write(str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime()))+\"\\n\")\n file.flush()\n STOP_AT = min(num_tests,10000)\n PERCENTILE = STOP_AT/100\n \n answer_array = []\n Hop.go_to_home()\n answers = open(\"mnist-test-labels.txt\", \"r\")\n \n index = 0\n for line in answers:\n answer_array.append(int(line.strip()))\n\n index = 0\n correct = 0\n percent = 1\n Hop.go_to_TestImages()\n start_time = time.time()\n for filename in os.listdir(os.getcwd()):\n correct += test_image(answer_array, index, filename)\n index+=1\n if index % PERCENTILE == 0:\n print(str(percent) + \"%\")\n percent += 1\n file.write(str(index)+\": \"+str(round(correct/index*100,2))+\"%\\n\")\n file.flush()\n if index == STOP_AT:\n break\n file.write(\"done\")\n file.flush()\n file.close()\n duration = (time.time()-start_time)\n print(\"Seconds:\"+str(duration))\n print(str(correct/index*100)+\"% correct\")\n\n \n\nif __name__ == \"__main__\":\n os.chdir(\"..\")\n Hop.set_project_path()\n Hop.go_to_approach(\"/-KNN Approach-\")\n test_loop(50)\n\n \n\n \n"
] | [
[
"numpy.zeros"
]
] |
CTPLab/IID_representation_learning | [
"b9dc13536963f9af332b039f7cc772e2f1090c62"
] | [
"restyle/models/encoders/map2style.py"
] | [
"import numpy as np\nfrom torch import nn\nfrom torch.nn import Conv2d, Module\n\nfrom models.stylegan2.model import EqualLinear\n\n\nclass GradualStyleBlock(Module):\n def __init__(self, in_c, out_c, spatial):\n super(GradualStyleBlock, self).__init__()\n self.out_c = out_c\n self.spatial = spatial\n num_pools = int(np.log2(spatial))\n modules = []\n modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU()]\n for i in range(num_pools - 1):\n modules += [\n Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU()\n ]\n self.convs = nn.Sequential(*modules)\n self.linear = EqualLinear(out_c, out_c, lr_mul=1)\n self.norm = nn.LayerNorm([out_c], elementwise_affine=False)\n\n def forward(self, x):\n x = self.convs(x)\n x = x.view(-1, self.out_c)\n x = self.linear(x)\n x = self.norm(x)\n return x\n\n\nclass GradualNoiseBlock(Module):\n def __init__(self, in_c, out_c, stride, affine):\n super(GradualNoiseBlock, self).__init__()\n self.conv = nn.Conv2d(in_c, out_c, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.norm = nn.InstanceNorm2d(out_c, affine=True)\n self.relu = nn.LeakyReLU()\n self.conv1 = nn.Conv2d(out_c, 1, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.norm1 = nn.InstanceNorm2d(1, affine=affine)\n self.downsample = nn.Conv2d(in_c, 1, kernel_size=3,\n stride=2, padding=1, bias=False)\n\n def forward(self, x):\n identity = self.downsample(x)\n x = self.conv(x)\n x = self.norm(x)\n x = self.relu(x)\n\n y = self.conv1(x) + identity\n y = self.norm1(y)\n return x, y\n"
] | [
[
"numpy.log2",
"torch.nn.LayerNorm",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.InstanceNorm2d",
"torch.nn.LeakyReLU"
]
] |
Rishabhdhyani/Emotion-Age-Gender-Detector | [
"7aa5dc722e1102e8ffcfcb33f06b35194cccc523"
] | [
"demo.py"
] | [
"from pathlib import Path\nimport cv2\nimport dlib\nimport numpy as np\nimport argparse\nfrom contextlib import contextmanager\nfrom wide_resnet import WideResNet\nfrom keras.utils.data_utils import get_file\n\npretrained_model = \"https://github.com/yu4u/age-gender-estimation/releases/download/v0.5/weights.28-3.73.hdf5\"\nmodhash = 'fbe63257a054c1c5466cfd7bf14646d6'\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"This script detects faces from web cam input, \"\n \"and estimates age and gender for the detected faces.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--weight_file\", type=str, default=None,\n help=\"path to weight file (e.g. weights.28-3.73.hdf5)\")\n parser.add_argument(\"--depth\", type=int, default=16,\n help=\"depth of network\")\n parser.add_argument(\"--width\", type=int, default=8,\n help=\"width of network\")\n parser.add_argument(\"--margin\", type=float, default=0.4,\n help=\"margin around detected face for age-gender estimation\")\n parser.add_argument(\"--image_dir\", type=str, default=None,\n help=\"target image directory; if set, images in image_dir are used instead of webcam\")\n args = parser.parse_args()\n return args\n\n\ndef draw_label(image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX,\n font_scale=0.8, thickness=1):\n size = cv2.getTextSize(label, font, font_scale, thickness)[0]\n x, y = point\n cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)\n cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)\n\n\n@contextmanager\ndef video_capture(*args, **kwargs):\n cap = cv2.VideoCapture(*args, **kwargs)\n try:\n yield cap\n finally:\n cap.release()\n\n\ndef yield_images():\n # capture video\n with video_capture(0) as cap:\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n while True:\n # get video frame\n ret, img = cap.read()\n\n if not ret:\n raise RuntimeError(\"Failed to capture image\")\n\n yield img\n\n\ndef yield_images_from_dir(image_dir):\n image_dir = Path(image_dir)\n\n for image_path in image_dir.glob(\"*.*\"):\n img = cv2.imread(str(image_path), 1)\n\n if img is not None:\n h, w, _ = img.shape\n r = 640 / max(w, h)\n yield cv2.resize(img, (int(w * r), int(h * r)))\n\n\ndef main():\n args = get_args()\n depth = args.depth\n k = args.width\n weight_file = args.weight_file\n margin = args.margin\n image_dir = args.image_dir\n\n if not weight_file:\n weight_file = get_file(\"weights.28-3.73.hdf5\", pretrained_model, cache_subdir=\"pretrained_models\",\n file_hash=modhash, cache_dir=Path(__file__).resolve().parent)\n\n # for face detection\n detector = dlib.get_frontal_face_detector()\n\n # load model and weights\n img_size = 64\n model = WideResNet(img_size, depth=depth, k=k)()\n model.load_weights(weight_file)\n\n image_generator = yield_images_from_dir(image_dir) if image_dir else yield_images()\n\n for img in image_generator:\n input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_h, img_w, _ = np.shape(input_img)\n\n # detect faces using dlib detector\n detected = detector(input_img, 1)\n faces = np.empty((len(detected), img_size, img_size, 3))\n\n if len(detected) > 0:\n for i, d in enumerate(detected):\n x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()\n xw1 = max(int(x1 - margin * w), 0)\n yw1 = max(int(y1 - margin * h), 0)\n xw2 = min(int(x2 + margin * w), img_w - 1)\n yw2 = min(int(y2 + margin * h), img_h - 1)\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\n # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)\n faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))\n\n # predict ages and genders of the detected faces\n results = model.predict(faces)\n predicted_genders = results[0]\n ages = np.arange(0, 101).reshape(101, 1)\n predicted_ages = results[1].dot(ages).flatten()\n\n # draw results\n for i, d in enumerate(detected):\n label = \"{}, {}\".format(int(predicted_ages[i]),\n \"F\" if predicted_genders[i][0] > 0.5 else \"M\")\n draw_label(img, (d.left(), d.top()), label)\n\n cv2.imshow(\"result\", img)\n key = cv2.waitKey(-1) if image_dir else cv2.waitKey(30)\n\n if key == 27: # ESC\n break\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.arange",
"numpy.shape"
]
] |
CarsonSlovoka/query_attendance | [
"eb94258a4ef0258d05c2bec06656b1b054ed0232"
] | [
"query_attendance.py"
] | [
"\"\"\"\nprepared:\n 1. chromedriver.exe: download from https://chromedriver.chromium.org/downloads\n #. put ``chromedriver.exe`` to {executable}/Scripts/\n\nUSAGE::\n employee_id_1234 password --action=1 --debug=0\n\"\"\"\nfrom os.path import abspath, dirname\nfrom os import path, startfile\nfrom time import sleep\nfrom sys import executable\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException, UnexpectedAlertPresentException\n\nfrom pandas import DataFrame\nfrom enum import Enum\n\nfrom argparse import ArgumentParser\nfrom configparser import ConfigParser, ExtendedInterpolation\n\ntry:\n import colorama\n from colorama import Fore, Back\n colorama.init(autoreset=True)\nexcept ImportError as _e:\n colorama = _e\n\nBACKGROUND_MODE = True\n\n\nclass URL:\n __slots__ = []\n EMPLOYEE = \"\"\n\n\nclass Action(Enum):\n __slots__ = []\n QUERY_ATTENDANCE = 1\n\n\ndef highlight_print(msg: str) -> None:\n if isinstance(colorama, ImportError):\n print(msg)\n else:\n print(Back.LIGHTYELLOW_EX + Fore.RED + msg)\n\n\ndef main(args):\n try:\n args.action = Action(args.action)\n except ValueError:\n highlight_print(f'ERROR. wrong action number: {args.action}') if args.debug else None\n return\n\n dict_run = {Action.QUERY_ATTENDANCE: lambda web_driver: query_attendance(web_driver), }\n\n try:\n web = login(args.url, args.username, args.password)\n except UnexpectedAlertPresentException as e:\n highlight_print(f'ERROR. MSG:{e.alert_text}') if args.debug else None\n return\n dict_run[args.action](web)\n\n\ndef login(url, username, password):\n global BACKGROUND_MODE\n chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_experimental_option(\"detach\", True) # It still exists when the program ends.\n chrome_options.add_argument(\"--start-maximized\") if not BACKGROUND_MODE else None\n chrome_options.add_argument(\"headless\") if BACKGROUND_MODE else None\n # chrome_options.add_argument('window-size=2560,1440')\n chrome_driver_exe_path = abspath(abspath(path.join(dirname(executable), r'Scripts\\chromedriver.exe')))\n assert path.exists(chrome_driver_exe_path), 'chromedriver.exe not found!'\n web = webdriver.Chrome(executable_path=chrome_driver_exe_path, options=chrome_options)\n web.set_window_position(-9999, 0) if not BACKGROUND_MODE else None\n web.implicitly_wait(3) # global setting ``maximum wait time``\n web.get(url)\n try:\n entry_username = web.find_element_by_name('username')\n entry_password = web.find_element_by_name('password')\n except NoSuchElementException: # has been login before.\n print('NoSuchElementException: identifierId')\n web.maximize_window() if not BACKGROUND_MODE else None\n return\n\n entry_username.send_keys(username)\n entry_password.send_keys(password)\n btn_commit = web.find_element_by_name('imageField')\n # btn_commit.click()\n webdriver.ActionChains(web).move_to_element(btn_commit).click(btn_commit).perform()\n sleep(2)\n web.maximize_window()\n return web\n\n\ndef query_attendance(web):\n page_root = web.window_handles[0] # This means the original page that will be empty after logging\n page_home = web.window_handles[1]\n web.switch_to.window(page_home)\n\n if '個人專區':\n personal_area = web.find_element_by_css_selector('#T_PM000600')\n personal_area.click()\n label_query_attendance = web.find_element_by_xpath('//*[@id=\"mtDropDown5\"]/div/div[2]/div/table/tbody/tr[2]/td[1]')\n label_query_attendance.click()\n\n if '異常回報':\n error_report = web.find_element_by_xpath('//*[@id=\"mtDropDown6\"]/div/div[2]/div/table/tbody/tr[4]/td[1]')\n webdriver.ActionChains(web).move_to_element(error_report).click(error_report).perform()\n\n if 'Enter Query Page...':\n web.switch_to.frame('frmMAIN')\n select_begin_month = Select(web.find_element_by_name(\"selM_A\"))\n\n if 'select pre-month':\n begin_month_value = int(select_begin_month.first_selected_option.text)\n if begin_month_value == 1: # change year\n input_year = web.find_element_by_name('txtY_A')\n begin_year = int(input_year.get_attribute('value')) - 1\n input_year.clear()\n input_year.send_keys(str(begin_year))\n begin_month_value = begin_month_value - 1 if begin_month_value != 1 else 12\n select_begin_month.select_by_value(str(begin_month_value))\n\n if 'commit':\n Select(web.find_element_by_name(\"sltDataType\")).select_by_visible_text(\"異常刷卡資料\") # or 全部刷卡資料\n go_btn = web.find_element_by_xpath('/html/body/form/table/tbody/tr/td[2]/table[2]/tbody/tr[1]/td/table/tbody/tr[2]/td/table/tbody/tr/td/table/tbody/tr/td[3]/table/tbody/tr[2]/td[3]')\n go_btn.click()\n\n if 'select last page':\n web.switch_to.frame('mainFrame')\n\n select_page = Select(web.find_element_by_name('selPage'))\n n_last_page = len(select_page.options)\n select_page.select_by_visible_text(str(n_last_page))\n\n if 'get report':\n web.switch_to.default_content()\n web.switch_to.frame('frmMAIN')\n web.switch_to.frame('mainFrame')\n \"\"\"\n Important!\n The error of \"element is not attached to the page document\" still happened even the elements you obviously can found.\n That is because the page was changed.\n so you need reload it again. (switch_to)\n \"\"\"\n tbody = web.find_element_by_xpath('/html/body/form/table[1]/tbody') # https://stackoverflow.com/questions/24795198/get-all-child-elements\n list_rows = [[cell.text for cell in row.find_elements_by_tag_name('td')] for row in tbody.find_elements_by_tag_name('tr')]\n columns_title = list_rows[0]\n list_rows = list_rows[1:] # max data count: 17\n\n list_rows = [e for e in list_rows if not all(data == ' ' for data in e)]\n df = DataFrame(list_rows, columns=columns_title)\n df.sort_values(['應刷卡時段'], ascending=[False], inplace=True)\n df.to_csv('temp.txt', index=False)\n startfile('temp.txt')\n\n\nif __name__ == '__main__':\n highlight_print(f'{executable}')\n config = ConfigParser(interpolation=ExtendedInterpolation())\n read_result = config.read(['config.ini'], encoding='utf-8')\n arg_parser = ArgumentParser()\n if len(read_result) == 0: # file exists\n arg_parser.add_argument(\"username\", help=\"username\")\n arg_parser.add_argument(\"password\", help=\"password\")\n arg_parser.add_argument(\"URL\", dest='URL', help=\"URL\")\n arg_parser.add_argument(\"--action\", help=\"action\", dest=\"action\", default=None)\n arg_parser.add_argument(\"--debug\", help=\"debug\", dest=\"debug\", default=False)\n else:\n arg_parser.add_argument(\"--username\", help=\"username\", dest='username', default=config['Required']['username'])\n arg_parser.add_argument(\"--password\", help=\"password\", dest='password', default=config['Required']['password'])\n arg_parser.add_argument(\"--URL\", help='URL', dest='url', default=config['Required']['URL'])\n arg_parser.add_argument(\"--action\", help=\"action\", dest=\"action\", default=config['Option']['action'])\n arg_parser.add_argument(\"--debug\", help=\"debug\", dest=\"debug\", default=config['Option']['debug'])\n\n g_args = arg_parser.parse_args()\n g_args.debug = int(g_args.debug)\n g_args.action = int(g_args.action)\n main(g_args)\n"
] | [
[
"pandas.DataFrame"
]
] |
loveagri/a_journey_into_math_of_ml | [
"e081b67d51a8dc74daa55bb0de35de86acdaa536"
] | [
"04_transformer_tutorial_2nd_part/BERT_tutorial/Sentiment_Training.py"
] | [
"from torch.optim import Adam\nfrom torch.utils.data import DataLoader\n\nfrom dataset.sentiment_dataset_v2 import CLSDataset\nfrom models.bert_sentiment_analysis_v2 import *\nfrom sklearn import metrics\nfrom metrics import *\n\nimport tqdm\nimport pandas as pd\nimport numpy as np\nimport configparser\nimport os\nimport json\n\n\nclass Sentiment_trainer:\n def __init__(self, max_seq_len,\n batch_size,\n lr, # 学习率\n with_cuda=True, # 是否使用GPU, 如未找到GPU, 则自动切换CPU\n ):\n config_ = configparser.ConfigParser()\n config_.read(\"./config/sentiment_model_config.ini\")\n self.config = config_[\"DEFAULT\"]\n self.vocab_size = int(self.config[\"vocab_size\"])\n self.batch_size = batch_size\n self.lr = lr\n # 加载字典\n with open(self.config[\"word2idx_path\"], \"r\", encoding=\"utf-8\") as f:\n self.word2idx = json.load(f)\n # 判断是否有可用GPU\n cuda_condition = torch.cuda.is_available() and with_cuda\n self.device = torch.device(\"cuda:0\" if cuda_condition else \"cpu\")\n # 允许的最大序列长度\n self.max_seq_len = max_seq_len\n # 定义模型超参数\n bertconfig = BertConfig(vocab_size=self.vocab_size)\n # 初始化BERT情感分析模型\n self.bert_model = Bert_Sentiment_Analysis(config=bertconfig)\n # 将模型发送到计算设备(GPU或CPU)\n self.bert_model.to(self.device)\n # 声明训练数据集, 按照pytorch的要求定义数据集class\n train_dataset = CLSDataset(corpus_path=self.config[\"train_corpus_path\"],\n word2idx=self.word2idx,\n max_seq_len=self.max_seq_len,\n data_regularization=True\n )\n self.train_dataloader = DataLoader(train_dataset,\n batch_size=self.batch_size,\n num_workers=0,\n collate_fn=lambda x: x # 这里为了动态padding\n )\n # 声明测试数据集\n test_dataset = CLSDataset(corpus_path=self.config[\"test_corpus_path\"],\n word2idx=self.word2idx,\n max_seq_len=self.max_seq_len,\n data_regularization=False\n )\n self.test_dataloader = DataLoader(test_dataset,\n batch_size=self.batch_size,\n num_workers=0,\n collate_fn=lambda x: x)\n # 初始化位置编码\n self.hidden_dim = bertconfig.hidden_size\n self.positional_enc = self.init_positional_encoding()\n # 扩展位置编码的维度, 留出batch维度,\n # 即positional_enc: [batch_size, embedding_dimension]\n self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0)\n\n # 声明需要优化的参数, 并传入Adam优化器\n self.optim_parameters = list(self.bert_model.parameters())\n\n # all_parameters = list(self.bert_model.named_parameters())\n # lis_ = [\"dense.weight\", \"dense.bias\", \"final_dense.weight\", \"final_dense.bias\"]\n # # self.optim_parameters = [i[1] for i in all_parameters if i[0] in lis_]\n # self.optim_parameters = list(self.bert_model.parameters())\n\n self.init_optimizer(lr=self.lr)\n if not os.path.exists(self.config[\"state_dict_dir\"]):\n os.mkdir(self.config[\"state_dict_dir\"])\n\n def init_optimizer(self, lr):\n # 用指定的学习率初始化优化器\n self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)\n\n def init_positional_encoding(self):\n position_enc = np.array([\n [pos / np.power(10000, 2 * i / self.hidden_dim) for i in range(self.hidden_dim)]\n if pos != 0 else np.zeros(self.hidden_dim) for pos in range(self.max_seq_len)])\n\n position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i\n position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1\n denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True))\n # 归一化\n position_enc = position_enc / (denominator + 1e-8)\n position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)\n return position_enc\n\n\n def load_model(self, model, dir_path=\"../output\", load_bert=False):\n checkpoint_dir = self.find_most_recent_state_dict(dir_path)\n checkpoint = torch.load(checkpoint_dir)\n # 情感分析模型刚开始训练的时候, 需要载入预训练的BERT,\n # 这是我们不载入模型原本用于训练Next Sentence的pooler\n # 而是重新初始化了一个\n if load_bert:\n checkpoint[\"model_state_dict\"] = {k[5:]: v for k, v in checkpoint[\"model_state_dict\"].items()\n if k[:4] == \"bert\" and \"pooler\" not in k}\n model.load_state_dict(checkpoint[\"model_state_dict\"], strict=False)\n torch.cuda.empty_cache()\n model.to(self.device)\n print(\"{} loaded!\".format(checkpoint_dir))\n\n def train(self, epoch):\n # 一个epoch的训练\n self.bert_model.train()\n self.iteration(epoch, self.train_dataloader, train=True)\n\n def test(self, epoch):\n # 一个epoch的测试, 并返回测试集的auc\n self.bert_model.eval()\n with torch.no_grad():\n return self.iteration(epoch, self.test_dataloader, train=False)\n\n def padding(self, output_dic_lis):\n \"\"\"动态padding, 以当前mini batch内最大的句长进行补齐长度\"\"\"\n text_input = [i[\"text_input\"] for i in output_dic_lis]\n text_input = torch.nn.utils.rnn.pad_sequence(text_input, batch_first=True)\n label = torch.cat([i[\"label\"] for i in output_dic_lis])\n return {\"text_input\": text_input,\n \"label\": label}\n\n def iteration(self, epoch, data_loader, train=True, df_name=\"df_log.pickle\"):\n # 初始化一个pandas DataFrame进行训练日志的存储\n df_path = self.config[\"state_dict_dir\"] + \"/\" + df_name\n if not os.path.isfile(df_path):\n df = pd.DataFrame(columns=[\"epoch\", \"train_loss\", \"train_auc\",\n \"test_loss\", \"test_auc\"\n ])\n df.to_pickle(df_path)\n print(\"log DataFrame created!\")\n\n # 进度条显示\n str_code = \"train\" if train else \"test\"\n data_iter = tqdm.tqdm(enumerate(data_loader),\n desc=\"EP_%s:%d\" % (str_code, epoch),\n total=len(data_loader),\n bar_format=\"{l_bar}{r_bar}\")\n\n total_loss = 0\n # 存储所有预测的结果和标记, 用来计算auc\n all_predictions, all_labels = [], []\n\n for i, data in data_iter:\n # padding\n data = self.padding(data)\n # 将数据发送到计算设备\n data = {key: value.to(self.device) for key, value in data.items()}\n # 根据padding之后文本序列的长度截取相应长度的位置编码,\n # 并发送到计算设备\n positional_enc = self.positional_enc[:, :data[\"text_input\"].size()[-1], :].to(self.device)\n\n # 正向传播, 得到预测结果和loss\n predictions, loss = self.bert_model.forward(text_input=data[\"text_input\"],\n positional_enc=positional_enc,\n labels=data[\"label\"]\n )\n # 提取预测的结果和标记, 并存到all_predictions, all_labels里\n # 用来计算auc\n predictions = predictions.detach().cpu().numpy().reshape(-1).tolist()\n labels = data[\"label\"].cpu().numpy().reshape(-1).tolist()\n all_predictions.extend(predictions)\n all_labels.extend(labels)\n # 计算auc\n fpr, tpr, thresholds = metrics.roc_curve(y_true=all_labels,\n y_score=all_predictions)\n auc = metrics.auc(fpr, tpr)\n\n # 反向传播\n if train:\n # 清空之前的梯度\n self.optimizer.zero_grad()\n # 反向传播, 获取新的梯度\n loss.backward()\n # 用获取的梯度更新模型参数\n self.optimizer.step()\n\n # 为计算当前epoch的平均loss\n total_loss += loss.item()\n\n if train:\n log_dic = {\n \"epoch\": epoch,\n \"train_loss\": total_loss/(i+1), \"train_auc\": auc,\n \"test_loss\": 0, \"test_auc\": 0\n }\n\n else:\n log_dic = {\n \"epoch\": epoch,\n \"train_loss\": 0, \"train_auc\": 0,\n \"test_loss\": total_loss/(i+1), \"test_auc\": auc\n }\n\n if i % 10 == 0:\n data_iter.write(str({k: v for k, v in log_dic.items() if v != 0}))\n\n threshold_ = find_best_threshold(all_predictions, all_labels)\n print(str_code + \" best threshold: \" + str(threshold_))\n\n # 将当前epoch的情况记录到DataFrame里\n if train:\n df = pd.read_pickle(df_path)\n df = df.append([log_dic])\n df.reset_index(inplace=True, drop=True)\n df.to_pickle(df_path)\n else:\n log_dic = {k: v for k, v in log_dic.items() if v != 0 and k != \"epoch\"}\n df = pd.read_pickle(df_path)\n df.reset_index(inplace=True, drop=True)\n for k, v in log_dic.items():\n df.at[epoch, k] = v\n df.to_pickle(df_path)\n # 返回auc, 作为early stop的衡量标准\n return auc\n\n def find_most_recent_state_dict(self, dir_path):\n \"\"\"\n :param dir_path: 存储所有模型文件的目录\n :return: 返回最新的模型文件路径, 按模型名称最后一位数进行排序\n \"\"\"\n dic_lis = [i for i in os.listdir(dir_path)]\n if len(dic_lis) == 0:\n raise FileNotFoundError(\"can not find any state dict in {}!\".format(dir_path))\n dic_lis = [i for i in dic_lis if \"model\" in i]\n dic_lis = sorted(dic_lis, key=lambda k: int(k.split(\".\")[-1]))\n return dir_path + \"/\" + dic_lis[-1]\n\n def save_state_dict(self, model, epoch, state_dict_dir=\"../output\", file_path=\"bert.model\"):\n \"\"\"存储当前模型参数\"\"\"\n if not os.path.exists(state_dict_dir):\n os.mkdir(state_dict_dir)\n save_path = state_dict_dir + \"/\" + file_path + \".epoch.{}\".format(str(epoch))\n model.to(\"cpu\")\n torch.save({\"model_state_dict\": model.state_dict()}, save_path)\n print(\"{} saved!\".format(save_path))\n model.to(self.device)\n\n\nif __name__ == '__main__':\n def init_trainer(dynamic_lr, batch_size=24):\n trainer = Sentiment_trainer(max_seq_len=300,\n batch_size=batch_size,\n lr=dynamic_lr,\n with_cuda=True,)\n return trainer, dynamic_lr\n\n start_epoch = 0\n train_epoches = 9999\n trainer, dynamic_lr = init_trainer(dynamic_lr=1e-06, batch_size=24)\n\n\n all_auc = []\n threshold = 999\n patient = 10\n best_loss = 999999999\n for epoch in range(start_epoch, start_epoch + train_epoches):\n if epoch == start_epoch and epoch == 0:\n # 第一个epoch的训练需要加载预训练的BERT模型\n trainer.load_model(trainer.bert_model, dir_path=\"./bert_state_dict\", load_bert=True)\n elif epoch == start_epoch:\n trainer.load_model(trainer.bert_model, dir_path=trainer.config[\"state_dict_dir\"])\n print(\"train with learning rate {}\".format(str(dynamic_lr)))\n # 训练一个epoch\n trainer.train(epoch)\n # 保存当前epoch模型参数\n trainer.save_state_dict(trainer.bert_model, epoch,\n state_dict_dir=trainer.config[\"state_dict_dir\"],\n file_path=\"sentiment.model\")\n\n auc = trainer.test(epoch)\n\n all_auc.append(auc)\n best_auc = max(all_auc)\n if all_auc[-1] < best_auc:\n threshold += 1\n dynamic_lr *= 0.8\n trainer.init_optimizer(lr=dynamic_lr)\n else:\n # 如果\n threshold = 0\n\n if threshold >= patient:\n print(\"epoch {} has the lowest loss\".format(start_epoch + np.argmax(np.array(all_auc))))\n print(\"early stop!\")\n break"
] | [
[
"torch.utils.data.DataLoader",
"numpy.sum",
"pandas.read_pickle",
"numpy.zeros",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"pandas.DataFrame",
"numpy.cos",
"numpy.power",
"numpy.array",
"numpy.sin"
]
] |
rouzbeh-afrasiabi/Image-Classifier | [
"01518aff07c37f09c6e3a860c7ead92a0e607c71"
] | [
"utils.py"
] | [
"import argparse\nimport os\nimport sys\nimport torch\nimport json\nfrom datetime import datetime\nimport numpy as np\n\ncwd = os.getcwd()\ncurrent_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n\n\nclass Bunch(object):\n def __init__(self, adict):\n self.__dict__.update(adict) \n \ndef check_file(filename,folder): \n exists=False\n if(check_folder(folder)): \n for root, dirs, files in os.walk(folder):\n if(filename in [file for file in files]):\n exists=True\n else:\n exists=False\n return exists\n\ndef check_folder(foldername): \n exists=False\n for root, dirs, files in os.walk(cwd):\n if(foldername in [dir for dir in dirs]):\n exists=True\n while(not exists):\n print('folder '+foldername,' does not exist in '+cwd)\n user_input = input(\"Create folder? (Y/N)\")\n if(user_input ==\"Y\"):\n os.mkdir(cwd+'/'+foldername)\n exists=True\n break\n else:\n print('Using default folder ','saved_data')\n if(not check_folder('saved_data')):\n os.mkdir(cwd+'/saved_data')\n exists=True\n break\n return exists\n\ndef search_checkpoint(folder):\n checkpoint_files=[]\n log_files=[]\n for root, dirs, files in os.walk(folder):\n for dir in dirs:\n for child_root, child_dirs, child_files in os.walk(dir):\n for filename in child_files:\n if('.pth' in filename):\n if (os.path.join(folder,dir, filename) not in checkpoint_files):\n checkpoint_files.append(os.path.join(folder,dir, filename))\n if('.log' in filename):\n if (os.path.join(folder,dir, filename) not in checkpoint_files):\n log_files.append(os.path.join(folder,dir, filename))\n return(checkpoint_files,log_files)\n \ndef select_checkpoint(new_command_args,checkpoint_files,log_files):\n user_input=0\n checkpoint_to_load=None\n if(not new_command_args.checkpoint):\n print('following checkpoint files were found, please select one to continue\\n')\n for i,x in enumerate(checkpoint_files):\n print([i+1], os.path.basename(x))\n while(user_input==0):\n try:\n user_input = int(input(\"Select checkpoint to load\"+str([1,len(checkpoint_files)])+':'))\n except ValueError:\n print('Value not integer')\n continue\n if(user_input<=len(checkpoint_files) and user_input>=1):\n checkpoint_to_load=(checkpoint_files[user_input-1])\n break\n else:\n user_input=0\n continue\n else:\n if(any(os.path.basename(new_command_args.checkpoint) in x for x in checkpoint_files)):\n index=int(np.where(os.path.basename(new_command_args.checkpoint) in x for x in checkpoint_files)[0])\n checkpoint_to_load=checkpoint_files[index]\n log_found=False\n print(checkpoint_to_load)\n for i,x in enumerate(log_files):\n with open(x,\"r\") as F:\n for k,line in enumerate(F):\n if (os.path.basename(checkpoint_to_load) in line ):\n log_found=True\n timestamp=os.path.basename(checkpoint_to_load).split('.')[-2]\n specs=json.loads(line)\n command_args=Bunch(specs[timestamp])\n if(not log_found):\n print('could not find the checkpoint log associated with this checkpoint')\n command_args=''\n return(command_args,checkpoint_to_load)\n \ndef get_input():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n command_parser = argparse.ArgumentParser(description='Here you can train a model for flower type catagorization')\n command_parser.add_argument('data_dir', action=\"store\", nargs='*', default=\"/home/workspace/aipnd-project/flowers/\")\n command_parser.add_argument('--arch', action=\"store\", dest=\"model_arch\", default=\"resnet\")\n command_parser.add_argument('--save_dir', action=\"store\", dest=\"save_dir\", default='saved_data')\n command_parser.add_argument('--file_name', action=\"store\", dest=\"file_name\", default=\"checkpoint\")\n command_parser.add_argument('--learning_rate', action=\"store\", dest=\"learning_rate\", default=0.001)\n command_parser.add_argument('--momentum', action=\"store\", dest=\"momentum\", default=0.9)\n command_parser.add_argument('--epochs', action=\"store\", dest=\"epochs\", type=int, default=1)\n command_parser.add_argument('--hidden_units', action=\"store\",dest=\"hidden_units\", type=int, default=1020)\n command_parser.add_argument('--gpu',action='store_true',dest=\"use_gpu\",default=False)\n command_parser.add_argument('--batch', action=\"store\", dest=\"batch_size\", type=int, default=64)\n command_parser.add_argument('--shuffle_off',action='store_false',dest=\"shuffle\",default=True) \n command_parser.add_argument('--print_every',action='store',dest=\"print_every\",default=10)\n \n command_args = command_parser.parse_args() \n if (command_args.file_name!=\"\"):\n command_args.file_name=command_args.file_name+\".\"+command_args.model_arch+'.'+current_time+\".pth\"\n while(command_args.use_gpu and 'cpu' in str(device)):\n print('Warning: GPU is not available, switching to CPU. Training will take much longer using CPU\\n')\n user_input = input(\"Continue? (Y/N)\")\n if(user_input=='Y'):\n break\n if(user_input=='N'):\n print('Please turn GPU on and try again')\n user_input = input(\"Try again? (Y/N)\")\n if (user_input=='Y'):\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n continue\n else:\n print('Using CPU then')\n command_args.use_gpu=False\n break\n else:\n print(\"Use Y for yes and N for No, try again\")\n continue\n command_args.use_gpu=False\n while (check_file(command_args.file_name,command_args.save_dir)):\n print('Warning: This will overwrite '+command_args.file_name)\n user_input = input(\"Continue? (Y/N)\")\n if(user_input=='Y'):\n break\n if (user_input=='N'):\n new_name = input(\"Please provide new name: \")\n if(new_name!=command_args.file_name and new_name!='' and not check_file(new_name,command_args.save_dir)):\n command_args.file_name=new_name\n break\n else:\n continue\n \n else:\n print(\"Use Y for yes and N for No, try again\")\n continue\n log_file=os.path.join(cwd, command_args.save_dir+\"/input_log.log\")\n with open(log_file, 'a') as f:\n if(os.stat(log_file).st_size != 0):\n f.write('\\n'+json.dumps({current_time: vars(command_args)}))\n else:\n f.write(json.dumps({command_args.file_name: vars(command_args)}))\n return(command_args)\n\ndef get_input_predict():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n command_parser = argparse.ArgumentParser(description='Here you can perform inference based on a pre-trained model')\n command_parser.add_argument('file_path', action=\"store\", default=\"\")\n command_parser.add_argument('--checkpoint', action=\"store\", default=\"\")\n command_parser.add_argument('--gpu',action='store_true',dest=\"use_gpu\",default=False)\n command_parser.add_argument('--top_k', action=\"store\", dest=\"top_k\", type=int, default=3)\n command_parser.add_argument('--category_names', action=\"store\", dest=\"category_names\", default='cat_to_name.json') \n \n command_args = command_parser.parse_args() \n while(command_args.use_gpu and 'cpu' in str(device)):\n print('Warning: GPU is not available, switching to CPU. Training will take much longer using CPU\\n')\n user_input = input(\"Continue? (Y/N)\")\n if(user_input=='Y'):\n break\n if(user_input=='N'):\n print('Please turn GPU on and try again')\n user_input = input(\"Try again? (Y/N)\")\n if (user_input=='Y'):\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n continue\n else:\n print('Using CPU then')\n command_args.use_gpu=False\n break\n else:\n print(\"Use Y for yes and N for No, try again\")\n continue\n command_args.use_gpu=False \n return(command_args)"
] | [
[
"torch.cuda.is_available"
]
] |
TomDonoghue/psychopy_templates | [
"3c3cbd75eee512c16be992957ee6ee242ab3666e"
] | [
"offline/offline_functions_template.py"
] | [
"\"\"\"\nTHIS IS A TEMPLATE FOR AN OFFLINE EXPERIMENT.\nPARTS IN ALL CAPS ARE NOTES ON THE TEMPLATE, AND NEED UPDATING TO RUN.\n\nClasses & Functions to run the .... experiment.\n\nNotes:\n- Here, set up to use LSL for sending event markers. This can be changed.\n- The default set up is to use LSL for sending event markers. This can be changed.\n\"\"\"\n\nimport random\nimport datetime\n\nimport numpy as np\n\nimport pylsl\nfrom psychopy import visual, core, data, event\n\n###################################################################################################\n####################################### Experiment - Classes ######################################\n###################################################################################################\n\nclass ExpInfo(object):\n \"\"\"Class to store experiment run parameters.\"\"\"\n\n def __init__(self):\n\n # Initialize subject number field\n self.subnum = int()\n\n # Experiment version\n self.runversion = ''\n self.dateversion = ''\n self.eegsystem = ''\n\n # Run time\n self.datetimenow = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n # Experiment Block Settings\n self.nblocks = int()\n self.block_number = 1\n self.ntrials_per_block = int()\n\n # Thresholding Settings - FOR BASIC STAIRCASE. CHANGE FOR OTHER/NO THRESHOLDING\n self.nthresh_trials = int()\n self.nreversals = int()\n self.step_sizes = [] # LIST OF STEP SIZES - INTS OR FLOATS\n\n # Settings for train\n self.ntrain_1 = int() # Number of trials in first set of practice trials\n self.ntrain_2 = int() # Number of trials in second set of practice trials\n\n # Check current lsl version\n self.lsl_protocol_version = pylsl.protocol_version()\n self.lsl_library_version = pylsl.library_version()\n\n # Set booleans for what have been checked\n self.check_monitor = False\n\n\n def update_block_number(self):\n \"\"\"Increment block number after running a block.\"\"\"\n\n self.block_number += 1\n\n\nclass RunInfo(object):\n \"\"\"Class to store information details used to run individual trials.\"\"\"\n\n def __init__(self):\n\n # Trial stimuli settings - ADD EXPERIMENT SPECIFIC VARIABLES HERE\n self.wait_time = float() # Time to wait for a response (in seconds)\n self.iti = float() # Time to wait between trials (in seconds)\n\n # Initialize file name vars\n self.dat_fn = None\n self.dat_f = None\n\n # Initialize clock var\n self.clock = None\n\n\n def make_files(self, subnum):\n \"\"\"Initialize data files and write headers.\"\"\"\n\n # Experiment Data File\n self.dat_fn = \"PATH_TO_SAVE_FILE_TO\" + str(subnum) + \"_exp.csv\"\n self.dat_f = open(self.dat_fn, 'w')\n self.dat_f.write('PUT HEADERS HERE')\n self.dat_f.close()\n\n\n def make_clock(self):\n \"\"\"Make the clock to use for the experiment.\"\"\"\n self.clock = core.Clock()\n\n\nclass Stim(object):\n \"\"\"Class to store all the stimuli used in the experiment.\"\"\"\n\n def __init__(self):\n\n # Boolean for whether stimuli have been created\n self.stim = False\n\n # ADD ALL PARAMETERS TO CREATE STIMULI HERE\n # DON'T ACTUALLY CREATE THE STIM AT INIT\n self.fix_height = int() # <- EXAMPLE PARAM\n\n # INITIALIZE VARS THAT WILL STORE STIM\n self.fix = None\n\n\n def make_stim(self, mywin):\n \"\"\"Create the stimuli for the experiment.\"\"\"\n\n # Set boolean that stim have been created\n self.stim = True\n\n # Fixation Cross - KEEP IF YOU NEED A FIXATION CROSS\n self.fix = visual.TextStim(mywin, '+', height=self.fix_height, pos=[0, 0])\n\n # CREATE OTHER STIMULI\n\n\n def update_stim(self, new_stim_param):\n \"\"\"Update stimuli. Used in behavioral thresholding.\n\n Parameters\n ----------\n self : Stim() object\n Object of all stimuli for the task.\n new_stim_param : float\n New value to update stimuli to.\n\n NOTE: MIGHT HAVE OTHER INPUTS, SUCH AS WHICH SIDE YOU ARE UPDATING.\n \"\"\"\n\n # UPDATE STIMULI\n pass\n\n\nclass Block(object):\n \"\"\"Class to store information to run a block of trials.\"\"\"\n\n def __init__(self):\n\n # Initialize arrays to store trial parameters for a block\n # INITIALIZE ARRAYS FOR TRIAL PARAMETERS HERE\n # FOR EXAMPLE, TRIAL CONDITION, SIDE OF PRESENTATION, ETC.\n pass\n\n\nclass Inds(object):\n \"\"\"Class to store index number for data storage.\n\n Notes:\n - An index object is used to be able to index data with\n meaningful variables, instead of 'magic' numbers.\n \"\"\"\n\n def __init__(self):\n\n # Index numbers in trial output data vector\n self.block = 0\n self.trial = 1\n # ADD MORE INDICES HERE\n\n\n###################################################################################################\n##################################### Experiment - Functions ######################################\n###################################################################################################\n\ndef run_block(mywin, marker_outlet, exinfo, run, stim):\n \"\"\"Runs a blocks of trials in the experiment.\n\n Parameters\n ----------\n mywin : psychopy Window object\n Pointer to psychopy visual window.\n marker_outlet : pylsl StreamOutlet() object\n LSL output stream to send event markers.\n exinfo : ExpInfo() object\n Object of information about the experiment.\n run : RunInfo() object\n Object of information / parameters to run task trials.\n stim : Stim() object\n Object of all stimuli for the task.\n\n Returns\n -------\n exp_block_data : 2d array\n Matrix of experiment data from the block.\n \"\"\"\n\n # Get block settings\n ntrials_block = exinfo.ntrials_per_block\n block_num = exinfo.block_number\n\n # Set up matrix for block data\n exp_block_data = np.zeros(shape=(ntrials_block, LEN_TRIAL_DAT_OUT))\n\n # Get block trial run info\n block, order = make_block_trials(ntrials_block)\n # ^ SETTINGS FROM HERE SHOULD BE PUT INTO TRIAL_INFO TO SET TRIAL PARAMETERS\n\n # Send marker for start of block\n marker_outlet.push_sample(pylsl.vectorstr([\"Start Block\"]))\n\n # Beginning of block message\n message = visual.TextStim(mywin, text='')\n disp_text(mywin, message, 'Press space when you are ready to start a block of trials.')\n\n # Pause before block starts, then start with a fixation cross\n core.wait(1.0)\n stim.fix.draw()\n core.wait(1.5)\n\n # Loop through trials\n for trial in range(0, ntrials_block):\n trial_info = [block_num, ETC] # <- LIST OF INFORMATION TO RUN THE TRIAL\n # FOR EXAMPLE: [BLOCK_NUM, TRIAL_NUM, RUN_TYPE, SIDE, ETC.]\n trial_dat = run_trial(mywin, marker_outlet, run, stim, trial_info)\n exp_block_data[trial, :] = trial_dat\n iti = run.iti + random.random() # Adds jitter to ITI\n core.wait(iti)\n\n # Send marker for end of block\n marker_outlet.push_sample(pylsl.vectorstr([\"End Block\"]))\n\n # End of block message\n disp_text(mywin, message, 'That is the end of this block of trials.')\n\n # Return block data\n return exp_block_data\n\n\ndef make_block_trials(ntrials_block):\n \"\"\"Creates a matrix of pseudo-random balanced trial parameters for a block of trials.\n\n Parameters\n ----------\n ntrials_block : int\n Number of trials in the block.\n\n Returns\n -------\n block : 2d array\n Matrix of trial parameters (this is NOT random).\n order : 1d array\n Randomized order to run the trials in.\n \"\"\"\n\n ## CREATE VECTORS OF TRIAL PARAMETER SETTINGS FOR A BLOCK OF TRIALS\n # FOR EXAMPLE: COND_VEC = NP.APPEND(NP.ZEROS(NTRIAL_BLOCK/2), NP.ONES(NTRIAL_BLOCK/2))\n # ^ CREATES A VECTOR TO HAVE 50% OF EACH OF TWO TRIAL CONDITIONS\n\n # Collect run details into block object\n block = Block()\n # ADD BLOCK RUN\n # EXAMPLE: block.CONDITION = COND_VEC\n\n # Set up array for run order\n order = range(0, len(ntrials_block))\n random.shuffle(order)\n\n return block, order\n\n\ndef run_trial(mywin, marker_outlet, run, stim, trial_info):\n \"\"\"This function runs experiment trials.\n\n Parameters\n ----------\n mywin : psychopy Window object\n Pointer to psychopy visual window.\n marker_outlet : pylsl StreamOutlet() object\n LSL output stream to send event markers.\n run : RunInfo() object\n Object of information / parameters to run task trials.\n stim : Stim() object\n Object of all stimuli for the task.\n trial_info : 1d array\n Vector of trial parameters.\n trial_info: [] <- LIST OF TRIAL INPUT PARAMETERS\n\n Returns\n -------\n trial_dat : 1d array\n Vector of trial data about behavior.\n trial_dat: [] <- VECTOR OF TRIAL DATA. SET UP FOR WHAT YOU WANT TO COLLECT.\n \"\"\"\n\n # Get index object\n ind = Inds()\n\n # Get trial parameters\n # PULL OUT TRIAL SETTINGS FROM TRIAL_INFO\n # FOR EXAMPLE, TRIAL_TYPE, SIDE etc.\n\n # Get fixation cross\n fix = stim.fix\n\n # Set up trial, pull out stimuli to use for the trial\n ## USE INPUT TRIAL PARAMETERS TO PULL OUT REQUIRED STIM\n\n # Run the trial\n fix.draw()\n # DRAW ANY TRIAL MARKERS\n mywin.flip()\n marker_outlet.push_sample(pylsl.vectorstr([\"Markers\"]))\n core.wait(WAIT_TIME + random.random()/10.)\n\n # Present stimuli to subject\n # DRAW STIMULI\n # FLIP SCREEN\n pres_time = run.clock.getTime() # In seconds\n\n # Draw any markers and fixation after the presentation\n fix.draw()\n # DRAW ANY POST TRIAL MARKERS\n mywin.flip()\n\n # Wait for response\n resp = event.waitKeys(maxWait=run.wait_time, timeStamped=run.clock)\n\n # After the wait period, draw only fixation\n fix.draw()\n mywin.flip()\n\n # Check if detected trial\n if resp is None:\n saw = 0\n rt = None\n marker_outlet.push_sample(pylsl.vectorstr([\"MissTrial\"]))\n else:\n saw = 1\n rt = resp[0][1] - pres_time # Time stamp\n marker_outlet.push_sample(pylsl.vectorstr([\"HitTrial\"]))\n\n # Output data - trial data\n trial_dat = [] # <- SET LIST OF DATA YOU WANT TO COLLECT / RETURN\n\n ## Print data to files\n strlist_exp = [] # <- MAKE A LIST OF STRINGS OF SAME DATA FOR FILE\n strlist_exp = \",\".join(strlist_exp)\n dat_f = open(run.dat_fn, 'a')\n dat_f.write(strlist_exp + '\\n')\n dat_f.close()\n\n return trial_dat\n\n\ndef train(mywin, marker_outlet, exinfo, run, stim):\n \"\"\"Trains the participant on the task.\n\n Parameters\n ----------\n mywin : psychopy Window object\n Pointer to psychopy visual window.\n marker_outlet : pylsl StreamOutlet() object\n LSL output stream to send event markers.\n exinfo : ExpInfo() object\n Object of information about the experiment.\n run : RunInfo() object\n Object of information / parameters to run task trials.\n stim : Stim() object\n Object of all stimuli for the task.\n\n Returns\n -------\n train_exp_data : 2d array\n Matrix of trial data from train trials.\n train_meth_data : 2d array\n Matrix of method data from train trials.\n \"\"\"\n\n # Get index object\n ind = Inds()\n\n # Get number of practice trials per practice block\n n_prac_1 = exinfo.ntrain_1\n n_prac_2 = exinfo.ntrain_2\n\n # Set up trial parameters\n block_num = -1\n trial_num = 0\n trial_info = [block_num, trial_num, ETC] # <- TRIAL RUN SETTINGS FOR PRACTICE TRIALS\n\n # Set up and display text to explain the task\n message = visual.TextStim(mywin, text='')\n disp_text(mywin, message, \"INSTRUCTIONS\")\n\n # Run a Test Trial to show an example\n disp_text(mywin, message, \"Lets try a test trial.\")\n initial_trial = run_trial(mywin, marker_outlet, run, stim, trial_info)\n core.wait(1.0)\n\n # Initialize matrices for train data (train block 1)\n train_exp_1 = np.zeros(shape=(n_prac_1, LEN_TRIAL_DAT_OUT))\n\n # Run a practice block trials\n disp_text(mywin, message, \"Lets try a few more practice trials.\")\n\n for trial in range(0, n_prac_1):\n\n # Run trial\n train_exp_1[trial, :] = run_trial(mywin, marker_outlet, run, stim, trial_info)\n core.wait(1.0)\n\n # Present more instructions IF NEEDED\n disp_text(mywin, message, \"MORE INSTRUCTIONS\")\n\n # Initialize matrices for train data (train block 2)\n train_exp_2 = np.zeros(shape=(n_prac_2, LEN_TRIAL_DAT_OUT))\n\n # Run another practice block of trials\n for trial in range(0, n_prac_2):\n\n # Run trial\n train_exp_2[trial, :] = run_trial(mywin, marker_outlet, run, stim, trial_info)\n core.wait(1.0)\n\n # Collect all train data together\n train_exp_data = np.vstack([initial_trial, train_exp_1, train_exp_2])\n\n # Pause to ask the subject if they have any questions about the task\n str_message = (\"If you have any questions about the task \"\n \"please ask the experimenter now.\")\n disp_text(mywin, message, str_message)\n\n return train_exp_data\n\n\ndef disp_text(mywin, message, text):\n \"\"\"Displays text on screen, waits for a keypress, then clears text and continues.\n\n Parameters\n ----------\n mywin : psychopy Window object\n Pointer to psychopy visual window.\n message : psychopy visual.TextStim() object\n Psychopy text stimulus object.\n text : str\n Words to display (string).\n \"\"\"\n\n # Set the given text in text object\n message.setText(text)\n\n # Display text, flip screen and wait for key press\n message.draw()\n mywin.flip()\n _ = event.waitKeys()\n mywin.flip()\n\n return\n\n\ndef threshold_staircase(mywin, marker_outlet, exinfo, run, stim):\n \"\"\"Run a psychophysical staircase to set stimuli parameters.\n Here - looking to set parameters for 50 percent detection rate.\n\n Parameters\n ----------\n mywin : psychopy Window object\n Pointer to psychopy visual window.\n marker_outlet : pylsl StreamOutlet() object\n LSL output stream to send event markers.\n exinfo : ExpInfo() object\n Object of information about the experiment.\n run : RunInfo() object\n Object of information / parameters to run task trials.\n stim : Stim() object\n Object of all stimuli for the task.\n\n Returns\n -------\n thresh_exp_data : 2d array\n Experiment data from thresholding trials.\n stim : Stim() object\n Stim object updated with thresholded luminances.\n \"\"\"\n\n # Get index object\n ind = Inds()\n\n # Set up trial information for thresholding trials\n block_num = 0\n trial_num = 1\n trial_info = [block_num, trial_num, ETC] # <- TRIAL RUN SETTINGS FOR THRESH TRIALS\n\n # Set up staircase condition parameters\n stair_conds = [] # <- LIST OF DICTIONARIES TO INITIALIZE STAIRCASES\n\n # Set up staircase handler\n staircases = data.MultiStairHandler(\n stairType='simple', conditions=stair_conds,\n nTrials=exinfo.nthresh_trials)\n\n # Initialize matrices to store thresholding data\n thresh_exp_data = np.zeros([0, LEN_TRIAL_DAT_OUT])\n\n # Initialize message\n message = visual.TextStim(mywin, text='')\n break_message = (\"You may take a short break. \"\n \"Press when you are ready for a new block of trials.\")\n\n # Pause before block starts, then start with a fixation cross\n core.wait(1.0)\n stim.fix.draw()\n core.wait(1.5)\n\n # Run threshold trial\n for thresh_param, cond in staircases:\n\n # SET UP TRIAL\n # MIGHT NEED TO EXTRACT CONDITION, ETC.\n\n # Update stimulus luminance\n stim.update_stim(THRESH_PARAM)\n\n # Run trial\n exp_trial_dat = run_trial(mywin, marker_outlet, run, stim, trial_info)\n\n # Inter-trial interval\n core.wait(run.iti + random.random())\n\n # Add subject response to staircase\n staircases.addResponse(exp_trial_dat[RESP_INDEX])\n\n # Add data to thresh data matrices\n thresh_exp_data = np.vstack([thresh_exp_data, exp_trial_dat])\n\n # Increment trial number\n trial_num += 1\n\n # Take a break after a certain number of trials\n if trial_num % 48 == 0:\n disp_text(mywin, message, break_message)\n\n # Pull out results from staircases\n # DEPENDING HOW YOU ARE USING STAIRCASES, THIS MIGHT REQUIRED AVERAGING OVER\n # MULTIPLE STAIRCASES, AND/OR A SET OF REVERSAL POINTS IN EACH STAIRCASE\n UPDATE_PARAMS = None\n\n # Update stim to be used for main experiment\n stim.update_stim(UPDATE_PARAMS)\n\n # Save staircase object\n staircases.saveAsPickle(\"PATH_TO_SAVE_FILE_TO\" + str(exinfo.subnum) + \"_thresh\")\n\n return thresh_exp_data, thresh_meth_data, stim\n\n\ndef check_monitor(mywin, exinfo):\n \"\"\"Checks details about the monitor, and stores in an ExpInfo() object.\n\n Parameters\n ----------\n mywin : psychopy Window object\n Pointer to psychopy visual window.\n exinfo : ExpInfo() object\n Object of information about the experiment.\n\n Returns\n -------\n exinfo : ExpInfo() object\n Object of information about the experiment.\n \"\"\"\n\n # Update that monitor check has been run\n exinfo.check_monitor = True\n\n # Check frame rate and time per frame and save to exinfo\n exinfo.mon_frame_rate = mywin.getActualFrameRate()\n exinfo.mon_ms_frame = mywin.getMsPerFrame()\n\n return exinfo\n\n\ndef experiment_log(exinfo, run, stim):\n \"\"\"Writes out a log (txt file) with relevant run information.\n\n Parameters\n ----------\n exinfo : ExpInfo() object\n Object of information about the experiment.\n run : RunInfo() object\n Object of information / parameters to run task trials.\n stim : Stim() object\n Object of all stimuli for the task.\n \"\"\"\n\n ## Set up & open file for experiment log\n logfilename = \"PATH_TO_SAVE_FILE_TO\" + str(exinfo.subnum) + '_ExpLog.txt'\n logfile = open(logfilename, 'w')\n\n ## Write to file\n # Basic Information\n logfile.write('\\n RUN INFORMATION \\n')\n logfile.write('Run Version: ' + str(exinfo.runversion) + '\\n')\n logfile.write('Date of Current Version: ' + str(exinfo.dateversion) + '\\n')\n logfile.write('Subject Number: ' + str(exinfo.subnum) + '\\n')\n logfile.write('Date: ' + str(exinfo.datetimenow) + '\\n')\n\n # Software Information\n logfile.write('\\n SOFTWARE INFORMATION \\n')\n logfile.write('Pylsl Protocol Version: ' + str(exinfo.lsl_protocol_version) + '\\n')\n logfile.write('Pylsl Library Version: ' + str(exinfo.lsl_library_version) + '\\n')\n\n # Monitor / display information\n if exinfo.check_monitor:\n logfile.write('\\n MONITOR INFORMATION \\n')\n logfile.write('Monitor Frame Rate: ' + str(exinfo.mon_frame_rate) + '\\n')\n logfile.write('Monitor ms per frame: ' + str(exinfo.mon_ms_frame) + '\\n')\n\n # Experiment Information - THESE FIELDS MAY NEED UPDATING\n logfile.write('\\n EXPERIMENT INFORMATION \\n')\n logfile.write('Number of exp blocks: ' + str(exinfo.nblocks) + '\\n')\n logfile.write('Number trials per block: ' + str(exinfo.ntrials_per_block) + '\\n')\n logfile.write('Thresholding - Number Reversals: ' + str(exinfo.nreversals) + '\\n')\n logfile.write('Thresholding - Step Sizes: ' + str(exinfo.step_sizes) + '\\n')\n logfile.write('Thresholding - Min Number Trials: ' + str(exinfo.nthresh_trials) + '\\n')\n\n # Presentation Information - ADD FIELDS FOR EXPERIMENT SPECIFIC PRESENTATION INFORMATION\n logfile.write('\\n PRESENTATION INFORMATION \\n')\n\n # Close log file\n logfile.close()\n"
] | [
[
"numpy.vstack",
"numpy.zeros"
]
] |
smit-s/arviz | [
"6a51574efc2dfa652d489091121a3c46da64d12e"
] | [
"arviz/data/io_pymc3.py"
] | [
"\"\"\"PyMC3-specific conversion code.\"\"\"\nimport logging\nimport warnings\nfrom typing import Dict, List, Any, Optional, Iterable, Union, TYPE_CHECKING\nfrom types import ModuleType\n\nimport numpy as np\nimport xarray as xr\nfrom .. import utils\nfrom .inference_data import InferenceData, concat\nfrom .base import requires, dict_to_dataset, generate_dims_coords, make_attrs, CoordSpec, DimSpec\n\nif TYPE_CHECKING:\n import pymc3 as pm\n from pymc3 import MultiTrace, Model # pylint: disable=invalid-name\n import theano\n from typing import Set # pylint: disable=ungrouped-imports\nelse:\n MultiTrace = Any # pylint: disable=invalid-name\n Model = Any # pylint: disable=invalid-name\n\n___all__ = [\"\"]\n\n_log = logging.getLogger(__name__)\n\nCoords = Dict[str, List[Any]]\nDims = Dict[str, List[str]]\n# random variable object ...\nVar = Any # pylint: disable=invalid-name\n\n\ndef _monkey_patch_pymc3(pm: ModuleType) -> None: # pylint: disable=invalid-name\n assert pm.__name__ == \"pymc3\"\n\n def fixed_eq(self, other):\n \"\"\"Use object identity for MultiObservedRV equality.\"\"\"\n return self is other\n\n if tuple([int(x) for x in pm.__version__.split(\".\")]) < (3, 9): # type: ignore\n pm.model.MultiObservedRV.__eq__ = fixed_eq # type: ignore\n\n\nclass PyMC3Converter: # pylint: disable=too-many-instance-attributes\n \"\"\"Encapsulate PyMC3 specific logic.\"\"\"\n\n model = None # type: Optional[pm.Model]\n nchains = None # type: int\n ndraws = None # type: int\n posterior_predictive = None # Type: Optional[Dict[str, np.ndarray]]\n predictions = None # Type: Optional[Dict[str, np.ndarray]]\n prior = None # Type: Optional[Dict[str, np.ndarray]]\n\n def __init__(\n self,\n *,\n trace=None,\n prior=None,\n posterior_predictive=None,\n log_likelihood=True,\n predictions=None,\n coords: Optional[Coords] = None,\n dims: Optional[Dims] = None,\n model=None\n ):\n import pymc3\n import theano\n\n _monkey_patch_pymc3(pymc3)\n\n self.pymc3 = pymc3\n self.theano = theano\n\n self.trace = trace\n\n # this permits us to get the model from command-line argument or from with model:\n try:\n self.model = self.pymc3.modelcontext(model or self.model)\n except TypeError:\n self.model = None\n\n # This next line is brittle and may not work forever, but is a secret\n # way to access the model from the trace.\n if trace is not None:\n if self.model is None:\n self.model = list(self.trace._straces.values())[ # pylint: disable=protected-access\n 0\n ].model\n self.nchains = trace.nchains if hasattr(trace, \"nchains\") else 1\n self.ndraws = len(trace)\n else:\n self.nchains = self.ndraws = 0\n\n if self.model is None:\n warnings.warn(\n \"Using `from_pymc3` without the model will be deprecated in a future release. \"\n \"Not using the model will return less accurate and less useful results. \"\n \"Make sure you use the model argument or call from_pymc3 within a model context.\",\n PendingDeprecationWarning,\n )\n\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.log_likelihood = log_likelihood\n self.predictions = predictions\n\n def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:\n return next(iter(dct.values()))\n\n if trace is None:\n # if you have a posterior_predictive built with keep_dims,\n # you'll lose here, but there's nothing I can do about that.\n self.nchains = 1\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None:\n # pylint: disable=line-too-long\n raise ValueError(\n \"When constructing InferenceData must have at least\"\n \" one of trace, prior, posterior_predictive or predictions.\"\n )\n\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0]\n\n self.coords = coords\n self.dims = dims\n self.observations = self.find_observations()\n\n def find_observations(self) -> Optional[Dict[str, Var]]:\n \"\"\"If there are observations available, return them as a dictionary.\"\"\"\n has_observations = False\n if self.model is not None:\n if any((hasattr(obs, \"observations\") for obs in self.model.observed_RVs)):\n has_observations = True\n if has_observations:\n assert self.model is not None\n return {obs.name: obs.observations for obs in self.model.observed_RVs}\n return None\n\n def log_likelihood_vals_point(self, point, var, log_like_fun):\n \"\"\"Compute log likelihood for each observed point.\"\"\"\n log_like_val = utils.one_de(log_like_fun(point))\n if var.missing_values:\n log_like_val = np.where(var.observations.mask, np.nan, log_like_val)\n return log_like_val\n\n @requires(\"trace\")\n @requires(\"model\")\n def _extract_log_likelihood(self):\n \"\"\"Compute log likelihood of each observation.\"\"\"\n # If we have predictions, then we have a thinned trace which does not\n # support extracting a log likelihood.\n if self.log_likelihood is True:\n cached = [(var, var.logp_elemwise) for var in self.model.observed_RVs]\n else:\n cached = [\n (var, var.logp_elemwise)\n for var in self.model.observed_RVs\n if var.name in self.log_likelihood\n ]\n try:\n log_likelihood_dict = self.pymc3.sampling._DefaultTrace( # pylint: disable=protected-access\n len(self.trace.chains)\n )\n except AttributeError:\n raise AttributeError(\n \"Installed version of ArviZ requires PyMC3>=3.8. Please upgrade with \"\n \"`pip install pymc3>=3.8` or `conda install -c conda-forge pymc3>=3.8`.\"\n )\n for var, log_like_fun in cached:\n for chain in self.trace.chains:\n log_like_chain = [\n self.log_likelihood_vals_point(point, var, log_like_fun)\n for point in self.trace.points([chain])\n ]\n log_likelihood_dict.insert(var.name, np.stack(log_like_chain), chain)\n return log_likelihood_dict.trace_dict\n\n @requires(\"trace\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n var_names = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n data = {}\n for var_name in var_names:\n data[var_name] = np.array(self.trace.get_values(var_name, combine=False, squeeze=False))\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires(\"trace\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from PyMC3 trace.\"\"\"\n data = {}\n rename_key = {\"model_logp\": \"lp\"}\n data = {}\n for stat in self.trace.stat_names:\n name = rename_key.get(stat, stat)\n data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))\n\n return dict_to_dataset(data, library=self.pymc3, dims=None, coords=self.coords)\n\n @requires(\"trace\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood and log_p data from PyMC3 trace.\"\"\"\n if self.predictions or not self.log_likelihood:\n return None\n try:\n data = self._extract_log_likelihood()\n except TypeError:\n warnings.warn(\n \"\"\"Could not compute log_likelihood, it will be omitted.\n Check your model object or set log_likelihood=False\"\"\"\n )\n return None\n return dict_to_dataset(data, library=self.pymc3, dims=self.dims, coords=self.coords)\n\n def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:\n \"\"\"Take Dict of variables to numpy ndarrays (samples) and translate into dataset.\"\"\"\n data = {}\n for k, ary in dct.items():\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n # pylint: disable=line-too-long\n _log.warning(\n \"posterior predictive variable %s's shape not compatible with number of chains and draws. \"\n \"This can mean that some draws or even whole chains are not represented.\",\n k,\n )\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires([\"posterior_predictive\"])\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)\n\n @requires([\"predictions\"])\n def predictions_to_xarray(self):\n \"\"\"Convert predictions (out of sample predictions) to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.observations is not None:\n prior_predictive_vars = list(self.observations.keys())\n prior_vars = [key for key in self.prior.keys() if key not in prior_predictive_vars]\n else:\n prior_vars = list(self.prior.keys())\n prior_predictive_vars = None\n\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: utils.expand_dims(self.prior[k]) for k in var_names},\n library=self.pymc3,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.predictions:\n return None\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n observed_data = {}\n for name, vals in self.observations.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n vals = utils.one_de(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))\n\n @requires([\"trace\", \"predictions\"])\n @requires(\"model\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant data to xarray.\"\"\"\n # For constant data, we are concerned only with deterministics and data.\n # The constant data vars must be either pm.Data (TensorSharedVariable) or pm.Deterministic\n constant_data_vars = {} # type: Dict[str, Var]\n for var in self.model.deterministics:\n ancestors = self.theano.tensor.gof.graph.ancestors(var.owner.inputs)\n # no dependency on a random variable\n if not any((isinstance(a, self.pymc3.model.PyMC3Variable) for a in ancestors)):\n constant_data_vars[var.name] = var\n\n def is_data(name, var) -> bool:\n assert self.model is not None\n return (\n var not in self.model.deterministics\n and var not in self.model.observed_RVs\n and var not in self.model.free_RVs\n and var not in self.model.potentials\n and (self.observations is None or name not in self.observations)\n )\n\n # I don't know how to find pm.Data, except that they are named variables that aren't\n # observed or free RVs, nor are they deterministics, and then we eliminate observations.\n for name, var in self.model.named_vars.items():\n if is_data(name, var):\n constant_data_vars[name] = var\n\n if not constant_data_vars:\n return None\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n constant_data = {}\n for name, vals in constant_data_vars.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n # this might be a Deterministic, and must be evaluated\n elif hasattr(self.model[name], \"eval\"):\n vals = self.model[name].eval()\n vals = np.atleast_1d(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n try:\n constant_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n except ValueError as e: # pylint: disable=invalid-name\n raise ValueError(\"Error translating constant_data variable %s: %s\" % (name, e))\n return xr.Dataset(data_vars=constant_data, attrs=make_attrs(library=self.pymc3))\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (e.g., there is no `trace`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n \"\"\"\n id_dict = {\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n if self.predictions:\n id_dict[\"predictions_constant_data\"] = self.constant_data_to_xarray()\n else:\n id_dict[\"constant_data\"] = self.constant_data_to_xarray()\n return InferenceData(**id_dict)\n\n\ndef from_pymc3(\n trace: Optional[MultiTrace] = None,\n *,\n prior: Optional[Dict[str, Any]] = None,\n posterior_predictive: Optional[Dict[str, Any]] = None,\n log_likelihood: Union[bool, Iterable[str]] = True,\n coords: Optional[CoordSpec] = None,\n dims: Optional[DimSpec] = None,\n model: Optional[Model] = None\n) -> InferenceData:\n \"\"\"Convert pymc3 data into an InferenceData object.\n\n All three of them are optional arguments, but at least one of ``trace``,\n ``prior`` and ``posterior_predictive`` must be present.\n\n Parameters\n ----------\n trace : pymc3.MultiTrace, optional\n Trace generated from MCMC sampling.\n prior : dict, optional\n Dictionary with the variable names as keys, and values numpy arrays\n containing prior and prior predictive samples.\n posterior_predictive : dict, optional\n Dictionary with the variable names as keys, and values numpy arrays\n containing posterior predictive samples.\n log_likelihood : bool or array_like of str, optional\n List of variables to calculate `log_likelihood`. Defaults to True which calculates\n `log_likelihood` for all observed variables. If set to False, log_likelihood is skipped.\n coords : dict of {str: array-like}, optional\n Map of coordinate names to coordinate values\n dims : dict of {str: list of str}, optional\n Map of variable names to the coordinate names to use to index its dimensions.\n model : pymc3.Model, optional\n Model used to generate ``trace``. It is not necessary to pass ``model`` if in\n ``with`` context.\n\n Returns\n -------\n InferenceData\n \"\"\"\n return PyMC3Converter(\n trace=trace,\n prior=prior,\n posterior_predictive=posterior_predictive,\n log_likelihood=log_likelihood,\n coords=coords,\n dims=dims,\n model=model,\n ).to_inference_data()\n\n\n### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But\n### perhaps we should have an inplace argument?\ndef from_pymc3_predictions(\n predictions,\n posterior_trace: Optional[MultiTrace] = None,\n model: Optional[Model] = None,\n coords=None,\n dims=None,\n idata_orig: Optional[InferenceData] = None,\n inplace: bool = False,\n) -> InferenceData:\n \"\"\"Translate out-of-sample predictions into ``InferenceData``.\n\n Parameters\n ----------\n predictions: Dict[str, np.ndarray]\n The predictions are the return value of ``pymc3.sample_posterior_predictive``,\n a dictionary of strings (variable names) to numpy ndarrays (draws).\n posterior_trace: pm.MultiTrace\n This should be a trace that has been thinned appropriately for\n ``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is\n a deterministic function of the shape of any predictor (explanatory, independent, etc.)\n variables must be *removed* from this trace.\n model: pymc3.Model\n This argument is *not* optional, unlike in conventional uses of ``from_pymc3``.\n The reason is that the posterior_trace argument is likely to supply an incorrect\n value of model.\n coords: Dict[str, array-like[Any]]\n Coordinates for the variables. Map from coordinate names to coordinate values.\n dims: Dict[str, array-like[str]]\n Map from variable name to ordered set of coordinate names.\n idata_orig: InferenceData, optional\n If supplied, then modify this inference data in place, adding ``predictions`` and\n (if available) ``predictions_constant_data`` groups. If this is not supplied, make a\n fresh InferenceData\n inplace: boolean, optional\n If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,\n rather than returning a fresh InferenceData object.\n\n Returns\n -------\n InferenceData:\n May be modified ``idata_orig``.\n \"\"\"\n if inplace and not idata_orig:\n raise ValueError(\n (\n \"Do not pass True for inplace unless passing\"\n \"an existing InferenceData as idata_orig\"\n )\n )\n new_idata = PyMC3Converter(\n trace=posterior_trace, predictions=predictions, model=model, coords=coords, dims=dims\n ).to_inference_data()\n if idata_orig is None:\n return new_idata\n elif inplace:\n concat([idata_orig, new_idata], dim=None, inplace=True)\n return idata_orig\n else:\n # if we are not returning in place, then merge the old groups into the new inference\n # data and return that.\n concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)\n return new_idata\n"
] | [
[
"numpy.stack",
"numpy.where",
"numpy.atleast_1d"
]
] |
tlatkowski/tf-feature-selection | [
"86a62e28211f854944822b4aefffae582fa060d8"
] | [
"methods/selection.py"
] | [
"import tensorflow as tf\n\n\ndef fisher(data, num_instances: list, top_k_features=2):\n \"\"\"\n Performs Fisher feature selection method according to the following formula:\n D(f) = (m1(f) - m2(f) / (std1(f) - std2(f))\n\n :param data:\n :param num_instances:\n :param top_k_features:\n :return: the list of most significant features.\n \"\"\"\n assert len(num_instances) == 2, \"Fisher selection method can be performed for two-class problems.\"\n\n data = tf.convert_to_tensor(data)\n num_features = data.get_shape().as_list()[-1]\n if top_k_features > num_features:\n top_k_features = num_features\n class1, class2 = tf.split(data, num_instances)\n\n with tf.name_scope('fisher_selection'):\n mean1, std1 = tf.nn.moments(class1, axes=0)\n mean2, std2 = tf.nn.moments(class2, axes=0)\n fisher_coeffs = tf.abs(mean1 - mean2) / (std1 + std2)\n selected_features = tf.nn.top_k(fisher_coeffs, k=top_k_features)\n\n return selected_features\n\n\ndef feature_correlation_with_class(data, num_instances: list, top_k_features=10):\n \"\"\"\n Makes feature correlation with class selection according to the following formula:\n D(f) = [(m1(f) - m(f))^2 + (m2(f) - m(f))^2] / 2*sigma(f)^2\n :return: the list of most significant features.\n \"\"\"\n data = tf.convert_to_tensor(data)\n num_features = data.get_shape().as_list()[-1]\n if top_k_features > num_features:\n top_k_features = num_features\n class1, class2 = tf.split(data, num_instances)\n\n with tf.name_scope('corr_selection'):\n mean1, std1 = tf.nn.moments(class1, axes=0)\n mean2, std2 = tf.nn.moments(class2, axes=0)\n mean, std = tf.nn.moments(data, axes=0)\n corr_coeffs = (tf.square(mean1 - mean) + tf.square(mean2 - mean)) / 2 * tf.square(std)\n selected_features = tf.nn.top_k(corr_coeffs, k=top_k_features)\n\n return selected_features\n\n\ndef t_test(data, num_instances: list, top_k_features=10):\n \"\"\"\n Makes feature correlation with class selection according to the following formula:\n D(f) = [(m1(f) - m(f))^2 + (m2(f) - m(f))^2] / 2*sigma(f)^2\n :return: the list of most significant features.\n \"\"\"\n data = tf.convert_to_tensor(data)\n num_features = data.get_shape().as_list()[-1]\n if top_k_features > num_features:\n top_k_features = num_features\n class1, class2 = tf.split(data, num_instances)\n\n with tf.name_scope('t_test_selection'):\n mean1, std1 = tf.nn.moments(class1, axes=0)\n mean2, std2 = tf.nn.moments(class2, axes=0)\n t_test_coeffs = tf.abs(mean1 - mean2) / tf.sqrt(\n tf.square(std1) / num_instances[0] + tf.square(std2) / num_instances[1])\n selected_features = tf.nn.top_k(t_test_coeffs, k=top_k_features)\n\n return selected_features\n\n\ndef random(data, num_instances: list, top_k_features=10):\n data = tf.convert_to_tensor(data)\n num_features = data.get_shape().as_list()[-1]\n if top_k_features > num_features:\n top_k_features = num_features\n class1, class2 = tf.split(data, num_instances)\n\n with tf.name_scope('random_selection'):\n pass"
] | [
[
"tensorflow.nn.moments",
"tensorflow.nn.top_k",
"tensorflow.name_scope",
"tensorflow.abs",
"tensorflow.convert_to_tensor",
"tensorflow.square",
"tensorflow.split"
]
] |
josemariamoreira/BrainSpace | [
"d7e8e65c6463a81146e7fcfcca902feef04d329d"
] | [
"brainspace/mesh/mesh_operations.py"
] | [
"\"\"\"\nBasic functions on surface meshes.\n\"\"\"\n\n# Author: Oualid Benkarim <[email protected]>\n# License: BSD 3 clause\n\n\nimport warnings\nimport numpy as np\n\nfrom vtk import (vtkDataObject, vtkThreshold, vtkGeometryFilter,\n vtkAppendPolyData)\n\nfrom .array_operations import get_connected_components\nfrom ..vtk_interface import wrap_vtk, serial_connect, get_output\nfrom ..vtk_interface.pipeline import connect\nfrom ..vtk_interface.decorators import wrap_input\n\n\nASSOC_CELLS = vtkDataObject.FIELD_ASSOCIATION_CELLS\nASSOC_POINTS = vtkDataObject.FIELD_ASSOCIATION_POINTS\n\n\n@wrap_input(0)\ndef _surface_selection(surf, array_name, low=-np.inf, upp=np.inf,\n use_cell=False, keep=True):\n \"\"\"Selection of points or cells meeting some thresholding criteria.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n array_name : str or ndarray\n Array used to perform selection.\n low : float or -np.inf\n Lower threshold. Default is -np.inf.\n upp : float or np.inf\n Upper threshold. Default is +np.inf.\n use_cell : bool, optional\n If True, apply selection to cells. Otherwise, use points.\n Default is False.\n keep : bool, optional\n If True, elements within the thresholds (inclusive) are kept.\n Otherwise, are discarded. Default is True.\n\n Returns\n -------\n surf_selected : BSPolyData\n Surface after thresholding.\n\n \"\"\"\n\n if low > upp:\n raise ValueError('Threshold limits are not valid: {0} -- {1}'.\n format(low, upp))\n\n at = 'c' if use_cell else 'p'\n if isinstance(array_name, np.ndarray):\n drop_array = True\n array = array_name\n array_name = surf.append_array(array, at=at)\n else:\n drop_array = False\n array = surf.get_array(name=array_name, at=at, return_name=False)\n\n if array.ndim > 1:\n raise ValueError('Arrays has more than one dimension.')\n\n if low == -np.inf:\n low = array.min()\n if upp == np.inf:\n upp = array.max()\n\n if keep is False:\n raise ValueError(\"Don't support 'keep=False'.\")\n\n # tf = wrap_vtk(vtkThreshold, invert=not keep)\n tf = wrap_vtk(vtkThreshold)\n tf.ThresholdBetween(low, upp)\n if use_cell:\n tf.SetInputArrayToProcess(0, 0, 0, ASSOC_CELLS, array_name)\n else:\n tf.SetInputArrayToProcess(0, 0, 0, ASSOC_POINTS, array_name)\n\n gf = wrap_vtk(vtkGeometryFilter(), merging=False)\n surf_sel = serial_connect(surf, tf, gf)\n\n # Check results\n mask = np.logical_and(array >= low, array <= upp)\n if keep:\n n_expected = np.count_nonzero(mask)\n else:\n n_expected = np.count_nonzero(~mask)\n\n n_sel = surf_sel.n_cells if use_cell else surf_sel.n_points\n if n_expected != n_sel:\n element = 'cells' if use_cell else 'points'\n warnings.warn('The number of selected {0} is different than expected. '\n 'This may be due to the topology after after selection: '\n 'expected={1}, selected={2}.'.\n format(element, n_expected, n_sel))\n\n if drop_array:\n surf.remove_array(name=array_name, at=at)\n surf_sel.remove_array(name=array_name, at=at)\n\n return surf_sel\n\n\n@wrap_input(0)\ndef _surface_mask(surf, mask, use_cell=False):\n \"\"\"Selection fo points or cells meeting some criteria.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n mask : str or ndarray\n Binary boolean or integer array. Zero or False elements are\n discarded.\n use_cell : bool, optional\n If True, apply selection to cells. Otherwise, use points.\n Default is False.\n\n Returns\n -------\n surf_masked : BSPolyData\n PolyData after masking.\n\n \"\"\"\n\n if isinstance(mask, np.ndarray):\n if np.issubdtype(mask.dtype, np.bool_):\n mask = mask.astype(np.uint8)\n else:\n mask = surf.get_array(name=mask, at='c' if use_cell else 'p')\n\n if np.any(np.unique(mask) > 1):\n raise ValueError('Cannot work with non-binary mask.')\n\n return _surface_selection(surf, mask, low=1, upp=1, use_cell=use_cell,\n keep=True)\n\n\ndef drop_points(surf, array_name, low=-np.inf, upp=np.inf):\n \"\"\"Remove surface points whose values fall within the threshold.\n\n Cells corresponding to these points are also removed.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n array_name : str or 1D ndarray\n Array used to perform selection. If str, it must be an array in\n the PointData attributes of the PolyData.\n low : float or -np.inf\n Lower threshold. Default is -np.inf.\n upp : float or np.inf\n Upper threshold. Default is np.inf.\n\n Returns\n -------\n surf_selected : vtkPolyData or BSPolyData\n PolyData after thresholding.\n\n See Also\n --------\n :func:`drop_cells`\n :func:`select_points`\n :func:`mask_points`\n\n \"\"\"\n\n return _surface_selection(surf, array_name, low=low, upp=upp, keep=False)\n\n\ndef drop_cells(surf, array_name, low=-np.inf, upp=np.inf):\n \"\"\"Remove surface cells whose values fall within the threshold.\n\n Points corresponding to these cells are also removed.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n array_name : str or 1D ndarray\n Array used to perform selection. If str, it must be an array in\n the CellData attributes of the PolyData.\n low : float or -np.inf\n Lower threshold. Default is -np.inf.\n upp : float or np.inf\n Upper threshold. Default is np.inf.\n\n Returns\n -------\n surf_selected : vtkPolyData or BSPolyData\n PolyData after thresholding.\n\n See Also\n --------\n :func:`drop_points`\n :func:`select_cells`\n :func:`mask_cells`\n\n \"\"\"\n\n return _surface_selection(surf, array_name, low=low, upp=upp, use_cell=True,\n keep=False)\n\n\ndef select_points(surf, array_name, low=-np.inf, upp=np.inf):\n \"\"\"Select surface points whose values fall within the threshold.\n\n Cells corresponding to these points are also kept.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n array_name : str or 1D ndarray\n Array used to perform selection. If str, it must be an array in\n the PointData attributes of the PolyData.\n low : float or -np.inf\n Lower threshold. Default is -np.inf.\n upp : float or np.inf\n Upper threshold. Default is np.inf.\n\n Returns\n -------\n surf_selected : vtkPolyData or BSPolyData\n PolyData after selection.\n\n See Also\n --------\n :func:`select_cells`\n :func:`drop_points`\n :func:`mask_points`\n\n \"\"\"\n\n return _surface_selection(surf, array_name, low=low, upp=upp, keep=True)\n\n\ndef select_cells(surf, array_name, low=-np.inf, upp=np.inf):\n \"\"\"Select surface cells whose values fall within the threshold.\n\n Points corresponding to these cells are also kept.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n array_name : str or 1D ndarray\n Array used to perform selection. If str, it must be an array in\n the CellData attributes of the PolyData.\n low : float or -np.inf\n Lower threshold. Default is -np.inf.\n upp : float or np.inf\n Upper threshold. Default is np.inf.\n\n Returns\n -------\n surf_selected : vtkPolyData or BSPolyData\n PolyData after selection.\n\n See Also\n --------\n :func:`select_points`\n :func:`drop_cells`\n :func:`mask_cells`\n\n \"\"\"\n\n return _surface_selection(surf, array_name, low=low, upp=upp, use_cell=True,\n keep=True)\n\n\ndef mask_points(surf, mask):\n \"\"\"Mask surface points.\n\n Cells corresponding to these points are also kept.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n mask : 1D ndarray\n Binary boolean array. Zero elements are discarded.\n\n Returns\n -------\n surf_masked : vtkPolyData or BSPolyData\n PolyData after masking.\n\n See Also\n --------\n :func:`mask_cells`\n :func:`drop_points`\n :func:`select_points`\n\n \"\"\"\n\n return _surface_mask(surf, mask)\n\n\ndef mask_cells(surf, mask):\n \"\"\"Mask surface cells.\n\n Points corresponding to these cells are also kept.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n mask : 1D ndarray\n Binary boolean array. Zero elements are discarded.\n\n Returns\n -------\n surf_masked : vtkPolyData or BSPolyData\n PolyData after masking.\n\n See Also\n --------\n :func:`mask_points`\n :func:`drop_cells`\n :func:`select_cells`\n\n \"\"\"\n\n return _surface_mask(surf, mask, use_cell=True)\n\n\ndef combine_surfaces(*surfs):\n \"\"\" Combine surfaces.\n\n Parameters\n ----------\n surfs : sequence of vtkPolyData and/or BSPolyData\n Input surfaces.\n\n Returns\n -------\n res : BSPolyData\n Combination of input surfaces.\n\n See Also\n --------\n :func:`split_surface`\n\n \"\"\"\n\n alg = vtkAppendPolyData()\n for s in surfs:\n alg = connect(s, alg, add_conn=True)\n return get_output(alg)\n\n\n@wrap_input(0)\ndef split_surface(surf, labeling=None):\n \"\"\" Split surface according to the labeling.\n\n Parameters\n ----------\n surf : vtkPolyData or BSPolyData\n Input surface.\n labeling : str, 1D ndarray or None, optional\n Array used to perform the splitting. If str, it must be an array in\n the PointData attributes of `surf`. If None, split surface in its\n connected components. Default is None.\n\n Returns\n -------\n res : dict[int, BSPolyData]\n Dictionary of sub-surfaces for each label.\n\n See Also\n --------\n :func:`combine_surfaces`\n :func:`mask_points`\n\n \"\"\"\n\n if labeling is None:\n labeling = get_connected_components(surf)\n elif isinstance(labeling, str):\n labeling = surf.get_array(labeling, at='p')\n\n ulab = np.unique(labeling)\n return {l: mask_points(surf, labeling == l) for l in ulab}\n"
] | [
[
"numpy.logical_and",
"numpy.issubdtype",
"numpy.unique",
"numpy.count_nonzero"
]
] |
JaviAibar/DeepSpeechUnity | [
"b65dd29a6c71e4c274f521aa8b3a4c0af1240f90"
] | [
"Transcripter_Data/StreamingAssets/Dependencies/client.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport numpy as np\nimport shlex\nimport subprocess\nimport sys\nimport wave\nimport ffmpeg\nimport json\nimport os\nfrom pathlib import Path\n\nfrom deepspeech import Model, version\nfrom timeit import default_timer as timer\n\ntry:\n from shhlex import quote\nexcept ImportError:\n from pipes import quote\n\nexport_json = False\ndebug_mode = False\ndef print_debug(text):\n if debug_mode:\n print(text)\n\ndef token_to_string(token):\n return \"Text: \" + (token.text if token.text else \"\\\" \\\"\") + \", start_time: \"+str(token.start_time)+\", timestep: \"+str(token.timestep)\n\n\nprint(\"Executed client.py\")\ndef convert_samplerate(audio_path, desired_sample_rate):\n prev_path = os.getcwd()\n mono_path = get_original_name(audio_path)\n os.chdir(\"./sox\")\n sox_cmd = \"\\'\"+str(os.getcwd()).replace(\"/\", \"\\\\\")+'\\sox.exe\\' {} --type raw --bits 16 --channels 1 --rate {} --encoding signed-integer --endian little --compression 0.0 --no-dither - '.format(quote(audio_path.replace(\"/\", \"\\\\\")), desired_sample_rate)\n try:\n output = subprocess.check_output(shlex.split(sox_cmd), stderr=subprocess.PIPE)\n except subprocess.CalledProcessError as e:\n raise RuntimeError('SoX returned non-zero status: {}'.format(e.stderr))\n except OSError as e:\n raise OSError(e.errno, 'SoX not found, use {}hz files or install it: {}'.format(desired_sample_rate, e.strerror))\n\n os.chdir(prev_path)\n return desired_sample_rate, np.frombuffer(output, np.int16)\n\n\ndef metadata_to_string(metadata):\n return ''.join(token.text for token in metadata.tokens)\n\n\ndef words_from_candidate_transcript(metadata):\n word = \"\"\n word_list = []\n word_start_time = 0\n # Loop through each character\n for i, token in enumerate(metadata.tokens):\n # Append character to word if it's not a space\n if token.text != \" \":\n if len(word) == 0:\n # Log the start time of the new word\n word_start_time = token.start_time\n\n word = word + token.text\n # Word boundary is either a space or the last character in the array\n if token.text == \" \" or i == len(metadata.tokens) - 1:\n word_duration = token.start_time - word_start_time\n\n if word_duration < 0:\n word_duration = 0\n\n each_word = dict()\n each_word[\"word\"] = word\n each_word[\"start_time\"] = round(word_start_time, 4)\n each_word[\"duration\"] = round(word_duration, 4)\n\n word_list.append(each_word)\n # Reset\n word = \"\"\n word_start_time = 0\n\n return word_list\n\n\ndef metadata_json_output(metadata):\n json_result = dict()\n json_result[\"transcripts\"] = [{\n \"confidence\": transcript.confidence,\n \"words\": words_from_candidate_transcript(transcript),\n } for transcript in metadata.transcripts]\n return json.dumps(json_result, indent=2)\n\n\n\nclass VersionAction(argparse.Action):\n def __init__(self, *args, **kwargs):\n super(VersionAction, self).__init__(nargs=0, *args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n print('DeepSpeech ', version())\n exit(0)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Running DeepSpeech inference.')\n parser.add_argument('--model', required=True,\n help='Path to the model (protocol buffer binary file)')\n parser.add_argument('--scorer', required=False,\n help='Path to the external scorer file')\n parser.add_argument('--audio', required=True,\n help='Path to the audio file to run (WAV format)')\n parser.add_argument('--beam_width', type=int,\n help='Beam width for the CTC decoder')\n parser.add_argument('--lm_alpha', type=float,\n help='Language model weight (lm_alpha). If not specified, use default from the scorer package.')\n parser.add_argument('--lm_beta', type=float,\n help='Word insertion bonus (lm_beta). If not specified, use default from the scorer package.')\n parser.add_argument('--version', action=VersionAction,\n help='Print version and exits')\n parser.add_argument('--extended', required=False, action='store_true',\n help='Output string from extended metadata')\n parser.add_argument('--json', required=False, action='store_true',\n help='Output json from metadata with timestamp of each word')\n parser.add_argument('--candidate_transcripts', type=int, default=3,\n help='Number of candidate transcripts to include in JSON output')\n parser.add_argument('--hot_words', type=str,\n help='Hot-words and their boosts.')\n parser.add_argument('--output', type=str,\n help='Output folder path where the srt file will be saved.')\n parser.add_argument('--srt_name', type=str,\n help='Name the output srt file will have.')\n parser.add_argument('--fake_video', type=str,\n help='It will create a video using subprocess and an image.')\n parser.add_argument('--video_to_audio', type=str,\n help='It will create a video using subprocess and an image.')\n args = parser.parse_args()\n\n print('Loading model from file {}'.format(args.model), file=sys.stderr)\n model_load_start = timer()\n # sphinx-doc: python_ref_model_start\n ds = Model(args.model)\n # sphinx-doc: python_ref_model_stop\n model_load_end = timer() - model_load_start\n print('Loaded model in {:.3}s.'.format(model_load_end), file=sys.stderr)\n\n if args.beam_width:\n ds.setBeamWidth(args.beam_width)\n\n desired_sample_rate = ds.sampleRate()\n\n if args.scorer:\n print('Loading scorer from files {}'.format(args.scorer), file=sys.stderr)\n scorer_load_start = timer()\n ds.enableExternalScorer(args.scorer)\n scorer_load_end = timer() - scorer_load_start\n print('Loaded scorer in {:.3}s.'.format(scorer_load_end), file=sys.stderr)\n\n if args.lm_alpha and args.lm_beta:\n ds.setScorerAlphaBeta(args.lm_alpha, args.lm_beta)\n\n if args.hot_words:\n print('Adding hot-words', file=sys.stderr)\n for word_boost in args.hot_words.split(','):\n word,boost = word_boost.split(':')\n ds.addHotWord(word,float(boost))\n\n if args.audio[-3:] != \"wav\":\n print(\"Video detectect, extracting audio file\")\n args.audio = get_audio_from_file(args.audio)\n\n fin = wave.open(args.audio, 'rb')\n fs_orig = fin.getframerate()\n is_mono = fin.getnchannels() == 1\n\n if fs_orig != desired_sample_rate or not is_mono:\n print('Warning: original sample rate ({}) is different than {}hz. Resampling might produce erratic speech recognition.'.format(fs_orig, desired_sample_rate), file=sys.stderr)\n fs_new, audio = convert_samplerate(args.audio, desired_sample_rate)\n else:\n audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)\n\n audio_length = fin.getnframes() * (1/fs_orig)\n fin.close()\n\n print('Running inference.', file=sys.stderr)\n inference_start = timer()\n # sphinx-doc: python_ref_inference_start\n if args.extended:\n #metadata = ds.sttWithMetadata(audio, 1)\n #print(\"metadata\")\n #print(metadata)\n #for id, transcript in metadata.transcripts:\n # print(i)\n # print(transcript.confidence)\n # print(words_from_candidate_transcript(transcript))\n #metadata = metadata.transcripts[0].tokens\n obj = ds.sttWithMetadata(audio, 1)\n metadata = obj.transcripts[0].tokens\n #################################################\n if len(metadata) == 0:\n sys.exit(\"Sorry, this audio seems unintelligible or too short\")\n\n res = process_captions(metadata)\n print_debug(\"Finished reading, the final result:\")\n print_debug(res)\n #f = open(\"output.srt\", \"w\")\n\n path_segmented = get_original_name(args.audio)\n path_to_save = generate_path(path_segmented, args.output, args.srt_name)\n\n if os.path.exists(path_to_save):\n os.remove(path_to_save)\n f = open(path_to_save, \"w\")\n f.write(res)\n f.close()\n print(\"Successfully saved in \"+path_to_save)\n if export_json:\n jsonfile = open(\"results.json\", \"w\")\n json.dumps(data, jsonfile)\n jsonfile.close()\n\n #print(metadata)\n #print(metadata_to_string(ds.sttWithMetadata(audio, 1).transcripts[0]))\n elif args.json:\n print(metadata_json_output(ds.sttWithMetadata(audio, args.candidate_transcripts)))\n else:\n print(ds.stt(audio))\n # sphinx-doc: python_ref_inference_stop\n inference_end = timer() - inference_start\n print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr)\n\n\ndef process_captions(metadata):\n index = 1\n threshold_soft = 0.05 # if exceed and possible, next line\n threshold_hard = 0.7 # if exceed, next block\n offset = 2\n limit_characters_line = 35\n #s = \"\"\n word = \"\"\n line = [\"\", \"\"]\n res = \"\"\n #new_block = True\n new_word = True\n prev = metadata[0].start_time\n block_start_time = metadata[0].start_time\n block_end_time = 0\n before_read_time = metadata[0].start_time\n after_read_time = 0\n line_count = 0 # Each block has a maximum of 2 lines\n data = []\n for id, t in enumerate(metadata):\n if export_json:\n data.append({\"text\":t.text, \"start_time\": t.start_time, \"timestep\": t.timestep})\n #block_end_time = t.start_time\n #if new_block:\n # before_read_time = t.start_time\n # new_block = False\n if new_word:\n print_debug(\"\\nNew word started at \"+str(t.start_time))\n before_read_time = t.start_time\n new_word = False\n\n #if id > 23 and id < 90: print(t.text, t.start_time)\n\n # As we didn't found an space nor the space is long, we're still with the same word\n if t.text != \" \" and t.start_time - prev < threshold_hard: # and t.start_time - prev < threshold_hard:\n print_debug(\"Added letter to the word \" + token_to_string(t))\n prev = t.start_time\n word += t.text\n # Word change\n else:\n if t.start_time - prev >= threshold_hard: # Silence greater than threshold\n print_debug(\"There was a great silence: \"+str(t.start_time - prev)+\", it shouldn't be greater than \"+str(threshold_hard)+\" to keep with the previous LINE\")\n line[line_count] += word\n res += str(index) + \"\\n\" + time_format(block_start_time) +\" --> \"+ time_format(prev) + \"\\n\" + line[0] + \"\\n\" + line[1]+\"\\n\\n\"\n print_debug(\"We save the already stated word with its timestamp\\n\")\n\n prev = t.start_time\n block_start_time = t.start_time\n line_count = 0\n line = [\"\", \"\"]\n word = t.text\n #new_block = True\n index += 1\n else: # word read\n #after_read_time = t.start_time\n print_debug(\"Complete read word: \" + word)\n #print(\"id; \", id, \"len meta: \", len(metadata))\n if len(line[line_count]) + len(word) > limit_characters_line: # Characters per line reached (new line or new block required)\n if line_count == 0 : # and metadata[id+1].start_time - t.start_time < threshold_hard\n print_debug(\"Line reached its limit, Saving the word in the next LINE\")\n line_count += 1\n line[line_count] += word + \" \"\n word = \"\"\n print_debug(\"Lines result so far is:\\n\\tLinea 1: \" +line[0]+ \"\\nLinea 2: \"+line[1]+\"\\n\")\n else: # We're on the second line, therefore, we need a new block\n print_debug(\"Line reached its limit, but we're at the second line, so we need a new BLOCK\")\n print_debug(\"Lines result so far is:\\n\\tLinea 1: \" +str(line[0])+ \"\\nLinea 2: \" + line[1])\n print_debug(\"We save both line to the final result\\n\")\n line_count = 0\n res += str(index) + \"\\n\" + time_format(block_start_time) +\" --> \"+ time_format(after_read_time) + \"\\n\" + line[0] + \"\\n\" + line[1]+\"\\n\\n\"\n block_start_time = before_read_time\n line = [\"\", \"\"]\n line[line_count] = word + \" \"\n word = \"\"\n #new_block = True\n index += 1\n\n else: # trivial case: we can save the word and continue reading.\n print_debug(\"Finished reading word: \" + word)\n line[line_count] += word + \" \"\n word = \"\"\n new_word = True\n after_read_time = t.start_time\n line[line_count] += word\n res += str(index) + \"\\n\" + time_format(block_start_time) +\" --> \"+ time_format(metadata[len(metadata)-1].start_time) + \"\\n\" + line[0] + \"\\n\" + line[1]+\"\\n\\n\"\n return res\n\ndef time_format(seconds):\n mon, sec = divmod(seconds, 60)\n hr, mon = divmod(mon, 60)\n return (\"{0:02.0f}:{1:02.0f}:{2:0>6.3f}\".format(hr, mon, sec)).replace(\".\", \",\")\n\ndef generate_path(path_segmented, output_path, output_name):\n if output_path:\n path_to_save = output_path\n else:\n path_to_save = path_segmented[0]\n\n if output_name:\n path_to_save = path_to_save + \"/\" + (output_name if output_name[-3:] == \"srt\" else output_name + \".srt\")\n else:\n path_to_save = path_to_save + \"/\" + path_segmented[1] + \".srt\"\n return path_to_save\n\ndef get_original_name(original_path):\n pathFragms = original_path.split(\"/\")\n fileName = pathFragms[len(pathFragms)-1].split(\".\")[0]\n pathFragms.pop()\n return [\"/\".join(pathFragms), fileName]\n\ndef get_audio_from_file(audio_path):\n original_path = get_original_name(audio_path)\n new_path = original_path[0] + \"/\" + original_path[1] + \".wav\"\n stream = ffmpeg.input(audio_path)\n stream = ffmpeg.output(stream.audio, new_path, ar=16000, ac=1)\n stream = ffmpeg.overwrite_output(stream)\n ffmpeg.run(stream)\n return new_path\n\ndef prueba():\n print(\"esteeee \"+str(os.getcwd()))\n\n print(\"esteeee2 \"+str(os.getcwd()))\n\n stream = ffmpeg.input('test.mp4')\n stream = ffmpeg.output(stream.audio, 'test.wav', ar=16000)\n stream = ffmpeg.overwrite_output(stream)\n ffmpeg.run(stream)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.frombuffer"
]
] |
AITrading2020/zipline_1.3.0_pro | [
"8825c3ea029b3e75b10084b98aa4fa7609e68a75"
] | [
"zipline/utils/factory.py"
] | [
"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nFactory functions to prepare useful data.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta, datetime\nfrom trading_calendars import get_calendar\n\nfrom zipline.sources import SpecificEquityTrades\nfrom zipline.finance.trading import SimulationParameters\nfrom zipline.sources.test_source import create_trade\n\n\ndef create_simulation_parameters(year=2006,\n start=None,\n end=None,\n capital_base=float(\"1.0e5\"),\n num_days=None,\n data_frequency='daily',\n emission_rate='daily',\n trading_calendar=None):\n\n if not trading_calendar:\n trading_calendar = get_calendar(\"NYSE\")\n\n if start is None:\n start = pd.Timestamp(\"{0}-01-01\".format(year), tz='UTC')\n elif type(start) == datetime:\n start = pd.Timestamp(start)\n\n if end is None:\n if num_days:\n start_index = trading_calendar.all_sessions.searchsorted(start)\n end = trading_calendar.all_sessions[start_index + num_days - 1]\n else:\n end = pd.Timestamp(\"{0}-12-31\".format(year), tz='UTC')\n elif type(end) == datetime:\n end = pd.Timestamp(end)\n\n sim_params = SimulationParameters(\n start_session=start,\n end_session=end,\n capital_base=capital_base,\n data_frequency=data_frequency,\n emission_rate=emission_rate,\n trading_calendar=trading_calendar,\n )\n\n return sim_params\n\n\ndef get_next_trading_dt(current, interval, trading_calendar):\n next_dt = pd.Timestamp(current).tz_convert(trading_calendar.tz)\n\n while True:\n # Convert timestamp to naive before adding day, otherwise the when\n # stepping over EDT an hour is added.\n next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))\n next_dt = next_dt + interval\n next_dt = pd.Timestamp(next_dt, tz=trading_calendar.tz)\n next_dt_utc = next_dt.tz_convert('UTC')\n if trading_calendar.is_open_on_minute(next_dt_utc):\n break\n next_dt = next_dt_utc.tz_convert(trading_calendar.tz)\n\n return next_dt_utc\n\n\ndef create_trade_history(sid, prices, amounts, interval, sim_params,\n trading_calendar, source_id=\"test_factory\"):\n trades = []\n current = sim_params.first_open\n\n oneday = timedelta(days=1)\n use_midnight = interval >= oneday\n for price, amount in zip(prices, amounts):\n if use_midnight:\n trade_dt = current.replace(hour=0, minute=0)\n else:\n trade_dt = current\n trade = create_trade(sid, price, amount, trade_dt, source_id)\n trades.append(trade)\n current = get_next_trading_dt(current, interval, trading_calendar)\n\n assert len(trades) == len(prices)\n return trades\n\n\ndef create_returns_from_range(sim_params):\n return pd.Series(index=sim_params.sessions,\n data=np.random.rand(len(sim_params.sessions)))\n\n\ndef create_returns_from_list(returns, sim_params):\n return pd.Series(index=sim_params.sessions[:len(returns)],\n data=returns)\n\n\ndef create_daily_trade_source(sids, sim_params, env, trading_calendar,\n concurrent=False):\n \"\"\"\n creates trade_count trades for each sid in sids list.\n first trade will be on sim_params.start_session, and daily\n thereafter for each sid. Thus, two sids should result in two trades per\n day.\n \"\"\"\n return create_trade_source(\n sids,\n timedelta(days=1),\n sim_params,\n env=env,\n trading_calendar=trading_calendar,\n concurrent=concurrent,\n )\n\n\ndef create_trade_source(sids, trade_time_increment, sim_params, env,\n trading_calendar, concurrent=False):\n\n # If the sim_params define an end that is during market hours, that will be\n # used as the end of the data source\n if trading_calendar.is_open_on_minute(sim_params.end_session):\n end = sim_params.end_session\n # Otherwise, the last_close after the end_session is used as the end of the\n # data source\n else:\n end = sim_params.last_close\n\n args = tuple()\n kwargs = {\n 'sids': sids,\n 'start': sim_params.first_open,\n 'end': end,\n 'delta': trade_time_increment,\n 'filter': sids,\n 'concurrent': concurrent,\n 'env': env,\n 'trading_calendar': trading_calendar,\n }\n source = SpecificEquityTrades(*args, **kwargs)\n\n return source\n"
] | [
[
"pandas.Timestamp"
]
] |
amanchokshi/mwa-satellites | [
"f9e8de353e7eddf28ed715c01d7d3fb5336f0f18"
] | [
"paper_plots/presentation/waterfall.py"
] | [
"import os\nimport sys\nimport time\nimport argparse\nimport numpy as np\n\nsys.path.append(\"../../decode_rf_data\")\nimport matplotlib.pyplot as plt\nfrom colormap import spectral\nimport rf_data as rf\n\nparser = argparse.ArgumentParser(\n description=\"\"\"\n Will plot waterfall plots\n \"\"\"\n)\n\nparser.add_argument(\n \"--rf_dir\",\n metavar=\"\\b\",\n default=\"./../../../data/\",\n help=\"Path to rf data directory. Default=./../../../data/\",\n)\nparser.add_argument(\n \"--ref_name\",\n metavar=\"\\b\",\n default=\"rf0XX_2019-10-10-02:30\",\n help=\"Name of ref data file. Default=rf0XX_2019-10-10-02:30\",\n)\nparser.add_argument(\n \"--tile_name\",\n metavar=\"\\b\",\n default=\"S10XX_2019-10-10-02:30\",\n help=\"Name of tile data file. Default=S10XX_2019-10-10-02:30\",\n)\nparser.add_argument(\n \"--out_dir\",\n metavar=\"\\b\",\n default=\"./../../outputs/paper_plots/\",\n help=\"Output dir. Default=./../../outputs/paper_plots/\",\n)\n\nargs = parser.parse_args()\nrf_dir = args.rf_dir\nref_name = args.ref_name\ntile_name = args.tile_name\nout_dir = args.out_dir\n\n# Make output dir if it doesn't exist\nos.makedirs(os.path.dirname(out_dir), exist_ok=True)\n\n\n# read in raw data\nref_p, ref_t = rf.read_data(f\"{rf_dir}/{ref_name}.txt\")\ntile_p, tile_t = rf.read_data(f\"{rf_dir}/{tile_name}.txt\")\n\n# scale median to zero\nref_p_median = np.median(ref_p)\ntile_p_median = np.median(tile_p)\nr_image = ref_p - ref_p_median\nt_image = tile_p - tile_p_median\n\n# setting dynamic range of waterfall to be 30 dB above the median\nvmin = 0\nvmax = 30\n\n# Custom spectral colormap\ncmap = spectral()\n\nnice_fonts = {\n # Use LaTeX to write all text\n \"text.usetex\": True,\n \"font.family\": \"sans-serif\",\n # Use 10pt font in plots, to match 10pt font in document\n \"axes.labelsize\": 10,\n \"font.size\": 10,\n # Make the legend/label fonts a little smaller\n \"legend.fontsize\": 8,\n \"xtick.labelsize\": 8,\n \"ytick.labelsize\": 8,\n \"ytick.color\": \"#696969\",\n \"xtick.color\": \"#696969\",\n \"axes.labelcolor\": \"#696969\",\n \"axes.edgecolor\": \"#696969\",\n}\n\nplt.rcParams.update(nice_fonts)\n\nfig = plt.figure(figsize=(7, 4.2))\n# fig, axs = plt.subplots(1,2, figsize=(7,5))\n# axs = axs.ravel()\n\nax1 = fig.add_axes([0.00, 0.0, 0.46, 1])\nax2 = fig.add_axes([0.50, 0.0, 0.46, 1])\ncax = fig.add_axes([0.98, 0.0, 0.015, 1])\n\nax1.imshow(t_image, vmin=vmin, vmax=vmax, interpolation=\"none\", cmap=cmap)\nax1.set_aspect(\"auto\")\nimage = ax1.get_images()[0]\ncbar = fig.colorbar(image, cax=cax, label=\"Power [dB]\")\n\n# Number of time steps on y-axis\nnumber_t = 5\nt_step = int(len(tile_t) / (number_t - 1))\ntimes = list(tile_t)\ntimes = times[::t_step]\n\nt_tz = []\n\n# Convert UNIX time to local HH:MM time\nfor i in range(len(times)):\n\n perth_t = float(times[i]) + 28800 # 28800=+8GMT @ PERTH\n hms = time.strftime(\"%H:%M\", time.gmtime(perth_t))\n t_tz.append(hms)\n\n# Frequency: x-axis\nstart_freq = 137.15\nstop_freq = 138.55\n\n# X-axis stuff\nx_ax = t_image.shape[1]\nfreqs = np.arange(start_freq, stop_freq, 0.25)\nx_ticks = np.arange(0, x_ax, (0.25 / 0.0125)) # .0125MHz/ch\nax1.set_xticks(x_ticks)\nax1.set_xticklabels(freqs)\nax1.set_xlabel(\"Freqency [MHz]\")\n\n# Y-axis stuff\ny_ax = t_image.shape[0]\ny_ticks = np.arange(0, y_ax, t_step)\nax1.set_yticks(y_ticks)\nax1.set_yticklabels(t_tz)\nax1.set_ylabel(\"MWA local time [HH:MM]\")\n\n\nax2.imshow(r_image, vmin=vmin, vmax=vmax, interpolation=\"none\", cmap=cmap)\nax2.set_aspect(\"auto\")\n# Number of time steps on y-axis\nnumber_t = 5\nt_step = int(len(ref_t) / (number_t - 1))\ntimes = list(ref_t)\ntimes = times[::t_step]\n\nt_tz = []\n\n# Convert UNIX time to local HH:MM time\nfor i in range(len(times)):\n\n perth_t = float(times[i]) + 28800 # 28800=+8GMT @ PERTH\n hms = time.strftime(\"%H:%M\", time.gmtime(perth_t))\n t_tz.append(hms)\n\n# Frequency: x-axis\nstart_freq = 137.15\nstop_freq = 138.55\n\n# X-axis stuff\nx_ax = r_image.shape[1]\nfreqs = np.arange(start_freq, stop_freq, 0.25)\nx_ticks = np.arange(0, x_ax, (0.25 / 0.0125)) # .0125MHz/ch\nax2.set_xticks(x_ticks)\nax2.set_xticklabels(freqs)\nax2.set_xlabel(\"Freqency [MHz]\")\n\n# Y-axis stuff\nax2.set_yticklabels([])\nax2.set_yticks([])\n\n\nplt.savefig(f\"waterfall.png\", dpi=144, transparent=True, bbox_inches=\"tight\")\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.median",
"matplotlib.pyplot.rcParams.update",
"numpy.arange"
]
] |
Shmarkus/facenet | [
"ff3eaecc389e0636463ccb318b395c5f99eb39b4"
] | [
"tmp/visualize.py"
] | [
"\"\"\"Visualize individual feature channels and their combinations to explore the space of patterns learned by the neural network\nBased on http://nbviewer.jupyter.org/github/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb\n\"\"\"\n# MIT License\n#\n# Copyright (c) 2016 David Sandberg\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport sys\nimport argparse\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport importlib\nfrom scipy import misc\n\ndef main(args):\n\n network = importlib.import_module(args.model_def, 'inference')\n\n # Start with a gray image with a little noise\n np.random.seed(seed=args.seed)\n img_noise = np.random.uniform(size=(args.image_size,args.image_size,3)) + 100.0\n\n sess = tf.Session()\n\n t_input = tf.placeholder(np.float32, shape=(args.image_size,args.image_size,3), name='input') # define the input tensor\n image_mean = 117.0\n t_preprocessed = tf.expand_dims(t_input-image_mean, 0)\n\n # Build the inference graph\n network.inference(t_preprocessed, 1.0,\n phase_train=True, weight_decay=0.0)\n\n # Create a saver for restoring variables\n saver = tf.train.Saver(tf.global_variables())\n\n # Restore the parameters\n saver.restore(sess, args.model_file)\n\n layers = [op.name for op in tf.get_default_graph().get_operations() if op.type=='Conv2D']\n feature_nums = {layer: int(T(layer).get_shape()[-1]) for layer in layers}\n\n print('Number of layers: %d' % len(layers))\n\n for layer in sorted(feature_nums.keys()):\n print('%s%d' % ((layer+': ').ljust(40), feature_nums[layer]))\n\n # Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity\n # to have non-zero gradients for features with negative initial activations.\n layer = 'InceptionResnetV1/Repeat_2/block8_3/Conv2d_1x1/Conv2D'\n #layer = 'incept4b/in4_conv1x1_31/Conv2D'\n result_dir = '../data/'\n print('Number of features in layer \"%s\": %d' % (layer, feature_nums[layer]))\n channels = range(feature_nums[layer])\n np.random.shuffle(channels)\n for i in range(32):\n print('Rendering feature %d' % channels[i])\n channel = channels[i]\n img = render_naive(sess, t_input, T(layer)[:,:,:,channel], img_noise)\n filename = '%s_%03d.png' % (layer.replace('/', '_'), channel)\n misc.imsave(os.path.join(result_dir, filename), img)\n\n\ndef T(layer):\n '''Helper for getting layer output tensor'''\n return tf.get_default_graph().get_tensor_by_name('%s:0' % layer)\n\ndef visstd(a, s=0.1):\n '''Normalize the image range for visualization'''\n return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5\n\ndef render_naive(sess, t_input, t_obj, img0, iter_n=20, step=1.0):\n t_score = tf.reduce_mean(t_obj) # defining the optimization objective\n t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!\n\n img = img0.copy()\n for _ in range(iter_n):\n g, _ = sess.run([t_grad, t_score], {t_input:img})\n # normalizing the gradient, so the same step size should work\n g /= g.std()+1e-8 # for different layers and networks\n img += g*step\n return visstd(img)\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('model_file', type=str,\n help='Directory containing the graph definition and checkpoint files.')\n parser.add_argument('--model_def', type=str,\n help='Model definition. Points to a module containing the definition of the inference graph.',\n default='models.nn4')\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=96)\n parser.add_argument('--seed', type=int,\n help='Random seed.', default=666)\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n"
] | [
[
"tensorflow.compat.v1.placeholder",
"numpy.random.uniform",
"tensorflow.compat.v1.Session",
"numpy.random.shuffle",
"numpy.random.seed",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.disable_v2_behavior"
]
] |
idelbrid/gpytorch | [
"092d523027a844939ba85d7ea8c8c7b7511843d5"
] | [
"gpytorch/lazy/kronecker_product_lazy_tensor.py"
] | [
"#!/usr/bin/env python3\n\nimport torch\nimport operator\nfrom .lazy_tensor import LazyTensor\nfrom .non_lazy_tensor import lazify\nfrom ..utils.broadcasting import _matmul_broadcast_shape\nfrom ..utils.memoize import cached\nfrom functools import reduce\n\n\ndef _prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\n\ndef _matmul(lazy_tensors, kp_shape, rhs):\n output_shape = _matmul_broadcast_shape(kp_shape, rhs.shape)\n output_batch_shape = output_shape[:-2]\n\n res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])\n num_cols = rhs.size(-1)\n for lazy_tensor in lazy_tensors:\n res = res.view(*output_batch_shape, lazy_tensor.size(-1), -1)\n factor = lazy_tensor._matmul(res)\n factor = factor.view(*output_batch_shape, lazy_tensor.size(-2), -1, num_cols).transpose(-3, -2)\n res = factor.reshape(*output_batch_shape, -1, num_cols)\n return res\n\n\ndef _t_matmul(lazy_tensors, kp_shape, rhs):\n kp_t_shape = (*kp_shape[:-2], kp_shape[-1], kp_shape[-2])\n output_shape = _matmul_broadcast_shape(kp_t_shape, rhs.shape)\n output_batch_shape = torch.Size(output_shape[:-2])\n\n res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])\n num_cols = rhs.size(-1)\n for lazy_tensor in lazy_tensors:\n res = res.view(*output_batch_shape, lazy_tensor.size(-2), -1)\n factor = lazy_tensor._t_matmul(res)\n factor = factor.view(*output_batch_shape, lazy_tensor.size(-1), -1, num_cols).transpose(-3, -2)\n res = factor.reshape(*output_batch_shape, -1, num_cols)\n return res\n\n\nclass KroneckerProductLazyTensor(LazyTensor):\n def __init__(self, *lazy_tensors):\n try:\n lazy_tensors = tuple(lazify(lazy_tensor) for lazy_tensor in lazy_tensors)\n except TypeError:\n raise RuntimeError(\"KroneckerProductLazyTensor is intended to wrap lazy tensors.\")\n for prev_lazy_tensor, curr_lazy_tensor in zip(lazy_tensors[:-1], lazy_tensors[1:]):\n if prev_lazy_tensor.batch_shape != curr_lazy_tensor.batch_shape:\n raise RuntimeError(\n \"KroneckerProductLazyTensor expects lazy tensors with the \"\n \"same batch shapes. Got {}.\".format([lv.batch_shape for lv in lazy_tensors])\n )\n super(KroneckerProductLazyTensor, self).__init__(*lazy_tensors)\n self.lazy_tensors = lazy_tensors\n\n def _get_indices(self, row_index, col_index, *batch_indices):\n row_factor = self.size(-2)\n col_factor = self.size(-1)\n\n res = None\n for lazy_tensor in self.lazy_tensors:\n sub_row_size = lazy_tensor.size(-2)\n sub_col_size = lazy_tensor.size(-1)\n\n row_factor //= sub_row_size\n col_factor //= sub_col_size\n sub_res = lazy_tensor._get_indices(\n row_index.div(row_factor).fmod(sub_row_size),\n col_index.div(col_factor).fmod(sub_col_size),\n *batch_indices\n )\n res = sub_res if res is None else (sub_res * res)\n\n return res\n\n def _matmul(self, rhs):\n is_vec = rhs.ndimension() == 1\n if is_vec:\n rhs = rhs.unsqueeze(-1)\n\n res = _matmul(self.lazy_tensors, self.shape, rhs.contiguous())\n\n if is_vec:\n res = res.squeeze(-1)\n return res\n\n def _t_matmul(self, rhs):\n is_vec = rhs.ndimension() == 1\n if is_vec:\n rhs = rhs.unsqueeze(-1)\n\n res = _t_matmul(self.lazy_tensors, self.shape, rhs.contiguous())\n\n if is_vec:\n res = res.squeeze(-1)\n return res\n\n def _expand_batch(self, batch_shape):\n return self.__class__(*[lazy_tensor._expand_batch(batch_shape) for lazy_tensor in self.lazy_tensors])\n\n @cached(name=\"size\")\n def _size(self):\n left_size = _prod(lazy_tensor.size(-2) for lazy_tensor in self.lazy_tensors)\n right_size = _prod(lazy_tensor.size(-1) for lazy_tensor in self.lazy_tensors)\n return torch.Size((*self.lazy_tensors[0].batch_shape, left_size, right_size))\n\n def _transpose_nonbatch(self):\n return self.__class__(*(lazy_tensor._transpose_nonbatch() for lazy_tensor in self.lazy_tensors), **self._kwargs)\n"
] | [
[
"torch.Size"
]
] |
syuoni/eznlp | [
"9d1397d8e9630c099295712cbcffa495353a3268"
] | [
"eznlp/model/decoder/sequence_tagging.py"
] | [
"# -*- coding: utf-8 -*-\nfrom typing import List\nfrom collections import Counter\nimport torch\n\nfrom ...wrapper import TargetWrapper, Batch\nfrom ...utils import ChunksTagsTranslator\nfrom ...nn.utils import unpad_seqs\nfrom ...nn.modules import CombinedDropout, CRF\nfrom ...nn.init import reinit_layer_\nfrom ...metrics import precision_recall_f1_report\nfrom .base import DecoderMixinBase, SingleDecoderConfigBase, DecoderBase\n\n\nclass SequenceTaggingDecoderMixin(DecoderMixinBase):\n @property\n def scheme(self):\n return self._scheme\n \n @scheme.setter\n def scheme(self, scheme: str):\n self._scheme = scheme\n self.translator = ChunksTagsTranslator(scheme=scheme)\n \n @property\n def idx2tag(self):\n return self._idx2tag\n \n @idx2tag.setter\n def idx2tag(self, idx2tag: List[str]):\n self._idx2tag = idx2tag\n self.tag2idx = {t: i for i, t in enumerate(self.idx2tag)} if idx2tag is not None else None\n \n @property\n def voc_dim(self):\n return len(self.tag2idx)\n \n @property\n def pad_idx(self):\n return self.tag2idx['<pad>']\n \n def exemplify(self, data_entry: dict, training: bool=True):\n return {'tags_obj': Tags(data_entry, self, training=training)}\n \n def batchify(self, batch_examples: List[dict]):\n return {'tags_objs': [ex['tags_obj'] for ex in batch_examples]}\n \n def retrieve(self, batch: Batch):\n return [tags_obj.chunks for tags_obj in batch.tags_objs]\n \n def evaluate(self, y_gold: List[List[tuple]], y_pred: List[List[tuple]]):\n \"\"\"Micro-F1 for entity recognition. \n \n References\n ----------\n https://www.clips.uantwerpen.be/conll2000/chunking/output.html\n \"\"\"\n scores, ave_scores = precision_recall_f1_report(y_gold, y_pred)\n return ave_scores['micro']['f1']\n\n\n\nclass Tags(TargetWrapper):\n \"\"\"A wrapper of tags with underlying chunks. \n \n Parameters\n ----------\n data_entry: dict\n {'tokens': TokenSequence, \n 'chunks': List[tuple]}\n \"\"\"\n def __init__(self, data_entry: dict, config: SequenceTaggingDecoderMixin, training: bool=True):\n super().__init__(training)\n \n self.chunks = data_entry.get('chunks', None)\n if self.chunks is not None:\n self.tags = config.translator.chunks2tags(data_entry['chunks'], len(data_entry['tokens']))\n self.tag_ids = torch.tensor([config.tag2idx[t] for t in self.tags], dtype=torch.long)\n\n\n\nclass SequenceTaggingDecoderConfig(SingleDecoderConfigBase, SequenceTaggingDecoderMixin):\n def __init__(self, **kwargs):\n self.in_drop_rates = kwargs.pop('in_drop_rates', (0.5, 0.0, 0.0))\n \n self.scheme = kwargs.pop('scheme', 'BIOES')\n self.idx2tag = kwargs.pop('idx2tag', None)\n \n self.use_crf = kwargs.pop('use_crf', True)\n super().__init__(**kwargs)\n \n \n @property\n def name(self):\n return self._name_sep.join([self.scheme, self.criterion])\n \n def __repr__(self):\n repr_attr_dict = {key: getattr(self, key) for key in ['in_dim', 'in_drop_rates', 'scheme', 'criterion']}\n return self._repr_non_config_attrs(repr_attr_dict)\n \n @property\n def criterion(self):\n if self.use_crf:\n return \"CRF\"\n else:\n return super().criterion\n \n def instantiate_criterion(self, **kwargs):\n if self.criterion.lower().startswith('crf'):\n return CRF(tag_dim=self.voc_dim, pad_idx=self.pad_idx, batch_first=True)\n else:\n return super().instantiate_criterion(**kwargs)\n \n \n def build_vocab(self, *partitions):\n counter = Counter()\n for data in partitions:\n for data_entry in data:\n curr_tags = self.translator.chunks2tags(data_entry['chunks'], len(data_entry['tokens']))\n counter.update(curr_tags)\n self.idx2tag = ['<pad>'] + list(counter.keys())\n \n \n def instantiate(self):\n return SequenceTaggingDecoder(self)\n\n\n\nclass SequenceTaggingDecoder(DecoderBase, SequenceTaggingDecoderMixin):\n def __init__(self, config: SequenceTaggingDecoderConfig):\n super().__init__()\n self.scheme = config.scheme\n self.idx2tag = config.idx2tag\n \n self.dropout = CombinedDropout(*config.in_drop_rates)\n self.hid2logit = torch.nn.Linear(config.in_dim, config.voc_dim)\n reinit_layer_(self.hid2logit, 'sigmoid')\n \n self.criterion = config.instantiate_criterion(ignore_index=config.pad_idx, reduction='sum')\n \n \n def forward(self, batch: Batch, full_hidden: torch.Tensor):\n # logits: (batch, step, tag_dim)\n logits = self.hid2logit(self.dropout(full_hidden))\n \n if isinstance(self.criterion, CRF):\n batch_tag_ids = torch.nn.utils.rnn.pad_sequence([tags_obj.tag_ids for tags_obj in batch.tags_objs], \n batch_first=True, \n padding_value=self.criterion.pad_idx)\n losses = self.criterion(logits, batch_tag_ids, mask=batch.mask)\n \n else:\n losses = [self.criterion(lg[:slen], tags_obj.tag_ids) for lg, tags_obj, slen in zip(logits, batch.tags_objs, batch.seq_lens.cpu().tolist())]\n # `torch.stack`: Concatenates sequence of tensors along a new dimension. \n losses = torch.stack(losses, dim=0)\n \n return losses\n \n \n def decode_tags(self, batch: Batch, full_hidden: torch.Tensor):\n # logits: (batch, step, tag_dim)\n logits = self.hid2logit(full_hidden)\n \n if isinstance(self.criterion, CRF):\n # List of List of predicted-tag-ids\n batch_tag_ids = self.criterion.decode(logits, mask=batch.mask)\n \n else:\n best_paths = logits.argmax(dim=-1)\n batch_tag_ids = unpad_seqs(best_paths, batch.seq_lens)\n \n return [[self.idx2tag[i] for i in tag_ids] for tag_ids in batch_tag_ids]\n \n \n def decode(self, batch: Batch, full_hidden: torch.Tensor):\n batch_tags = self.decode_tags(batch, full_hidden)\n return [self.translator.tags2chunks(tags) for tags in batch_tags]\n"
] | [
[
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor",
"torch.nn.Linear"
]
] |
frederikschubert/atari-representation-learning | [
"35743d4e2c765c8be4b771271b2c738c8859af92"
] | [
"scripts/run_rl.py"
] | [
"import multiprocessing\n\nmultiprocessing.set_start_method(\"spawn\", True)\nimport os\nimport sys\nimport time\nfrom collections import deque\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport wandb\n\nfrom a2c_ppo_acktr import algo, utils\nfrom a2c_ppo_acktr.model import NNBase, Policy\nfrom a2c_ppo_acktr.storage import RolloutStorage\nfrom aari.envs import make_vec_envs\nfrom src.encoders import ImpalaCNN, NatureCNN\nfrom src.utils import get_argparser\n\n\ndef get_envs(\n env_name, seed=42, num_processes=1, num_frame_stack=1, downsample=False, color=False\n):\n return make_vec_envs(\n env_name, seed, num_processes, num_frame_stack, downsample, color\n )\n\n\ndef get_encoder(args, observation_shape, device):\n if args.encoder_type == \"Nature\":\n encoder = NatureCNN(observation_shape[0], args)\n elif args.encoder_type == \"Impala\":\n encoder = ImpalaCNN(observation_shape[0], args)\n\n if args.weights_path == \"None\":\n sys.stderr.write(\n \"Training without loading in encoder weights! Are sure you want to do that??\"\n )\n else:\n print(\n \"Print loading in encoder weights from probe of type {} from the following path: {}\".format(\n args.method, args.weights_path\n )\n )\n encoder.load_state_dict(torch.load(args.weights_path, map_location=device))\n encoder.eval()\n return encoder\n\nclass SimpleBase(NNBase):\n def __init__(self, num_inputs, recurrent=False, hidden_size=256, encoder=None):\n super().__init__(recurrent, num_inputs, hidden_size)\n init_ = lambda m: utils.init(\n m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2)\n )\n\n if recurrent:\n num_inputs = hidden_size\n\n # self.actor = init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n self.encoder = encoder\n self.critic_linear = init_(nn.Linear(num_inputs, 1))\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks):\n if args.weights_path == \"None\":\n x = self.encoder(inputs)\n else:\n with torch.no_grad():\n x = self.encoder(inputs)\n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n return self.critic_linear(x), x, rnn_hxs\n\ndef get_agent(args, envs, encoder, device):\n\n actor_critic = Policy(\n [encoder.feature_size], envs.action_space, base=SimpleBase, base_kwargs={\"encoder\": encoder}\n )\n actor_critic.to(device)\n agent = algo.PPO(\n actor_critic,\n args.ppo_clip_param,\n args.ppo_epoch,\n args.ppo_num_mini_batch,\n args.ppo_value_loss_coef,\n args.ppo_entropy_coef,\n lr=args.ppo_lr,\n eps=args.ppo_eps,\n max_grad_norm=args.ppo_max_grad_norm,\n )\n return agent, actor_critic\n\n\ndef train(args, envs, encoder, agent, actor_critic, device):\n rollouts = RolloutStorage(\n args.num_steps,\n args.num_processes,\n envs.observation_space.shape,\n envs.action_space,\n actor_critic.recurrent_hidden_state_size,\n )\n\n obs = envs.reset()\n rollouts.obs[0].copy_(obs)\n rollouts.to(device)\n\n episode_rewards = deque(maxlen=10)\n\n start = time.time()\n num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes\n for j in range(num_updates):\n\n if args.ppo_use_linear_lr_decay:\n # decrease learning rate linearly\n utils.update_linear_schedule(agent.optimizer, j, num_updates, args.ppo_lr)\n\n for step in range(args.num_steps):\n # Sample actions\n with torch.no_grad():\n value, action, action_log_probs, recurrent_hidden_states, actor_features, dist_entropy = actor_critic.act(\n rollouts.obs[step],\n rollouts.recurrent_hidden_states[step],\n rollouts.masks[step],\n )\n\n # Obser reward and next obs\n obs, reward, done, infos = envs.step(action)\n\n # TODO: Check that the encoder is not updated\n # TODO: Analyze features of vae and infonce-st encoder\n\n for info in infos:\n if \"episode\" in info.keys():\n episode_rewards.append(info[\"episode\"][\"r\"])\n\n # If done then clean the history of observations.\n masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])\n bad_masks = torch.FloatTensor(\n [[0.0] if \"bad_transition\" in info.keys() else [1.0] for info in infos]\n )\n rollouts.insert(\n obs,\n recurrent_hidden_states,\n action,\n action_log_probs,\n value,\n reward,\n masks,\n bad_masks,\n )\n\n with torch.no_grad():\n next_value = actor_critic.get_value(\n rollouts.obs[-1],\n rollouts.recurrent_hidden_states[-1],\n rollouts.masks[-1],\n )\n\n rollouts.compute_returns(\n next_value, False, args.ppo_gamma, 0.0, args.use_proper_time_limits\n )\n\n value_loss, action_loss, dist_entropy = agent.update(rollouts)\n\n rollouts.after_update()\n\n # save for every interval-th episode or for the last epoch\n if j % args.save_interval == 0 or j == num_updates - 1:\n torch.save(\n [actor_critic, getattr(utils.get_vec_normalize(envs), \"ob_rms\", None)],\n os.path.join(wandb.run.dir, args.env_name + \".pt\"),\n )\n\n if j % args.log_interval == 0 and len(episode_rewards) > 1:\n total_num_steps = (j + 1) * args.num_processes * args.num_steps\n end = time.time()\n print(\n \"Updates {}, num timesteps {}, FPS {} \\n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\\n\".format(\n j,\n total_num_steps,\n int(total_num_steps / (end - start)),\n len(episode_rewards),\n np.mean(episode_rewards),\n np.median(episode_rewards),\n np.min(episode_rewards),\n np.max(episode_rewards),\n )\n )\n wandb.log(\n {\n \"updates\": j,\n \"total_num_steps\": total_num_steps,\n \"fps\": int(total_num_steps / (end - start)),\n \"episode_rewards_mean\": np.mean(episode_rewards),\n \"episode_rewards_median\": np.median(episode_rewards),\n \"episode_rewards_min\": np.min(episode_rewards),\n \"episode_rewards_max\": np.max(episode_rewards),\n \"entropy\": dist_entropy,\n \"value_loss\": value_loss,\n \"policy_loss\": action_loss,\n }\n )\n\n\ndef run_rl(args):\n device = torch.device(\n \"cuda:\" + str(args.cuda_id) if torch.cuda.is_available() else \"cpu\"\n )\n envs = get_envs(\n env_name=args.env_name,\n seed=args.seed,\n num_processes=args.num_processes,\n num_frame_stack=args.num_frame_stack,\n downsample=not args.no_downsample,\n color=args.color,\n )\n encoder = get_encoder(args, envs.observation_space.shape, device)\n agent, actor_critic = get_agent(args, envs, encoder, device)\n wandb.watch(actor_critic)\n train(args, envs, encoder, agent, actor_critic, device)\n\n\nif __name__ == \"__main__\":\n parser = get_argparser()\n args = parser.parse_args()\n tags = [\"rl\"]\n wandb.init(project=args.wandb_proj, tags=tags)\n config = {}\n config.update(vars(args))\n wandb.config.update(config)\n run_rl(args)\n"
] | [
[
"torch.FloatTensor",
"torch.nn.Linear",
"torch.load",
"torch.nn.init.constant_",
"torch.no_grad",
"numpy.median",
"torch.cuda.is_available",
"numpy.max",
"numpy.min",
"numpy.sqrt",
"numpy.mean"
]
] |
ubc-vision/mist | [
"ef8ab358e83dc306f356910578c5a7c1f8d193d8"
] | [
"datasets/mnist.py"
] | [
"import os\nimport torch\nimport torch.utils.data\nimport numpy as np\nimport skimage.io\nimport skimage.transform\n\nMNIST_CLASSES = (\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\"\n)\n\nclass MNISTMetaData():\n def __init__(self):\n self.cls = MNIST_CLASSES\n def get_num_class(self):\n return len(self.cls)\n def get_class_name(self, class_id):\n return self.cls[class_id]\n\nclass MNIST(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n self.root_dir = config.dataset_dir+'/'+config.dataset+'/'\n self.image_paths = np.genfromtxt(self.root_dir + mode + '.txt', delimiter=',', dtype='str', encoding='utf-8')\n self.labels = np.genfromtxt(self.root_dir + mode +'_labels.txt', delimiter=',', dtype='int', encoding='utf-8')\n self.keypoints = np.load(self.root_dir + mode +'_keypoints.npy')\n self.num_kp = config.k\n self.image_size =config.image_size\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, idx):\n # load image\n img_name = os.path.join(self.root_dir, self.image_paths[idx])\n image = skimage.io.imread(img_name)\n image = skimage.transform.resize(image,(self.image_size,self.image_size))\n image = torch.from_numpy(image).permute(2, 0, 1).float()\n image = torch.clamp(image, 0.0, 1.0)\n\n # load keypoints\n keypoints = torch.from_numpy(self.keypoints[idx].copy())\n keypoints[:,2] = keypoints[:,2] * 2.0\n keypoints = torch.cat((keypoints,keypoints[:,[2]]), axis=-1)\n stride = self.image_size/image.shape[1]\n keypoints = keypoints*stride\n\n # load label\n labels = torch.from_numpy(self.labels[idx])\n\n return image, keypoints, labels"
] | [
[
"numpy.load",
"torch.from_numpy",
"numpy.genfromtxt",
"torch.cat",
"torch.clamp"
]
] |
Jun-bitacademy/PyPortfolioOpt | [
"a1ffaf403b1a7ba992480838993986241d067d52"
] | [
"From_Colab/Strategies/def_Momentum1mo_eff.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport FinanceDataReader as fdr\nfrom pykrx import stock\nimport datetime\nimport requests\n# from datetime import timedelta # 마이크로초 전, 마이크로초 후 를 구하고 싶다면 timedelta\nfrom dateutil.relativedelta import relativedelta # 몇달 전, 몇달 후, 몇년 전, 몇년 후 를 구하고 싶다면 relativedelta\nfrom pypfopt.efficient_frontier import EfficientFrontier\nfrom pypfopt import risk_models\nfrom pypfopt import expected_returns\nfrom pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\nfrom pypfopt import plotting\nimport warnings\nwarnings.filterwarnings(action='ignore')\nfrom Class_Strategies import Strategies as st\n\n# 모멘텀 1 mo\nmomentum_1month_rank = st.momentum_1month()\n\ndef Momentum1mo_eff():\n # 종목 이름 및 코드\n kospi_temp = fdr.StockListing('KOSPI')[['Symbol', 'Name']]\n kosdaq_temp = fdr.StockListing('KOSDAQ')[['Symbol', 'Name']]\n code_name_dict = pd.concat([kospi_temp, kosdaq_temp])\n code_name_dict = code_name_dict.set_index('Symbol').to_dict().get('Name') # {'095570': 'AJ네트웍스',\n\n assets = np.array(momentum_1month_rank.index[:30])\n start_date = datetime.datetime.today() - relativedelta(years=3)\n start_date = start_date.strftime('%Y%m%d')\n today = datetime.datetime.today().strftime(\"%Y%m%d\")\n end_date = today\n df = pd.DataFrame()\n\n for s in assets:\n df[s] = fdr.DataReader(s, start_date, end_date)['Close']\n\n # drop null\n dfnull = df.dropna(axis=1)\n\n # 수익률의 공분산\n mu = expected_returns.mean_historical_return(dfnull)\n S = risk_models.sample_cov(dfnull)\n # print(plotting.plot_covariance(S))\n\n # 포폴 최적화 (Max sharp ratio)\n vol_limit = 0.3\n ef = EfficientFrontier(mu, S, solver=\"SCS\")\n weights = ef.efficient_risk(vol_limit)\n cleaned_weights = ef.clean_weights()\n print(ef.portfolio_performance(verbose=True))\n\n one_million = 1000000\n portfolio_val = 15 * one_million\n latest_prices = get_latest_prices(dfnull)\n weights = cleaned_weights\n da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)\n allocation, leftover = da.lp_portfolio(verbose=False)\n rmse = da._allocation_rmse_error(verbose=False)\n\n # 각 종목별 실제 투자 금액\n inv_total_price = {}\n for i in allocation.keys():\n inv_total_price[i] = latest_prices.loc[i] * allocation[i]\n inv_total_price\n\n # 총 투자금액\n investment = 0\n for i in inv_total_price.values():\n investment += i\n print(investment)\n\n # 각 종목별 실제 투자 비중\n inv_total_weight = {}\n for i in allocation.keys():\n inv_total_weight[i] = inv_total_price[i] / investment\n inv_total_weight\n\n # 투자비중의 합계\n investment_w = 0\n for i in inv_total_weight.values():\n investment_w += i\n print(investment_w)\n\n # 결과값으로 불러올 값을 리스트로 저장\n name_list = [] # 종목명(회사이름)\n total_price_stock = [] # 각 종목별 실제 투자 금액\n total_weight_stock = [] # 각 종목별 실제 투자 비중\n for i in allocation.keys(): # i = 포트폴리오에 할당된 종목의 종목코드\n name_list.append(code_name_dict.get(i))\n total_price_stock.append(inv_total_price.get(i))\n total_weight_stock.append(inv_total_weight.get(i))\n\n # Get the discrete allocation values\n discrete_allocation_list = []\n for symbol in allocation:\n discrete_allocation_list.append(allocation.get(symbol))\n print(discrete_allocation_list)\n\n portfolio_df = pd.DataFrame(columns=['종목명', '종목코드', '수량(주)', '투자금액(원)', '투자비중'])\n portfolio_df['종목명'] = name_list\n portfolio_df['종목코드'] = allocation\n portfolio_df['수량(주)'] = discrete_allocation_list\n portfolio_df['투자금액(원)'] = total_price_stock\n portfolio_df['투자비중'] = total_weight_stock\n portfolio_df_sorted = portfolio_df.sort_values('투자비중', ascending=False)\n portfolio_df_sorted = portfolio_df_sorted.reset_index(drop=True)\n # 투자 금액에 따라 최적화된 포트폴리오 종목별 수량\n portfolio_df_sorted.loc[\"합계\", 2:] = portfolio_df_sorted.sum()\n\n ################# 코스피랑 비교 ####################\n # 각 일자별, 종목별 종가에 해당 weights를 곱해주기\n for i, weight in cleaned_weights.items():\n dfnull[i] = dfnull[i] * weight\n\n # 일자별 종목의 (종가*비중) 합계를 Port열에 저장\n dfnull['Port'] = dfnull.sum(axis=1)\n\n # 일자별 종가의 전일대비 변동률(수익률)을 portfolio라는 데이터프레임으로 저장\n portfolio = dfnull[['Port']].pct_change()\n\n # 코스피지수 불러오기\n kospi = fdr.DataReader('KS11', start_date, end_date)[['Close']]\n\n # 코스피지수의 변동률(수익률) 구하기\n # 변동률(수익률) = (당일가격-전일가격) / 전일가격\n # 7/20의 변동률(수익률) = (7/20 가격-7-19 가격) / 7/19 가격\n kospi_pct = kospi.pct_change()\n\n # 코스피와 포트폴리오 합치기\n result = kospi_pct.join(portfolio)\n\n # 1열을 0으로 (Nan 값을 0으로)\n result.iloc[0] = 0\n\n # 열 이름 변경\n result.columns = ['KOSPI', 'PORTFOLIO']\n\n # 1에서 시작해서, 전일대비 변동률(수익률)을 적용하여 수치화하기\n wealth = (1 + result).cumprod()\n\n"
] | [
[
"numpy.array",
"pandas.DataFrame",
"pandas.concat"
]
] |
vishalbelsare/graphsim | [
"1ecd23608fe562d5f363cae2323c1916e82ba4e9"
] | [
"graphsim/iter/SimRank.py"
] | [
"\"\"\"\nSimRank similarity measure.\n\"\"\"\n#!/usr/bin/env python\n# Copyright (C) 2015 by\n# Xiaming Chen <[email protected]>\n# All rights reserved.\n# BSD license.\nimport itertools\n\nimport numpy as np\nimport networkx as nx\nfrom typedecorator import params, returns\n\n__author__ = \"Xiaming Chen\"\n__email__ = \"[email protected]\"\n\n__all__ = [ 'simrank', 'simrank_bipartite' ]\n\n\n@params(G=nx.Graph, r=float, max_iter=int, eps=float)\ndef simrank(G, r=0.8, max_iter=100, eps=1e-4):\n \"\"\" Algorithm of G. Jeh and J. Widom. SimRank: A Measure\n of Structural-Context Similarity. In KDD'02.\n\n Thanks to Jon Tedesco's answer in SO question #9767773.\n \"\"\"\n if isinstance(G, nx.MultiGraph):\n assert(\"The SimRank of MultiGraph is not supported.\")\n\n if isinstance(G, nx.MultiDiGraph):\n assert(\"The SimRank of MultiDiGraph is not supported.\")\n\n directed = False\n if isinstance(G, nx.DiGraph):\n directed = True\n\n nodes = G.nodes()\n nodes_i = {}\n for (k, v) in [(nodes[i], i) for i in range(0, len(nodes))]:\n nodes_i[k] = v\n\n sim_prev = np.zeros(len(nodes))\n sim = np.identity(len(nodes))\n\n for i in range(max_iter):\n if np.allclose(sim, sim_prev, atol=eps):\n break\n\n sim_prev = np.copy(sim)\n for u, v in itertools.product(nodes, nodes):\n if u is v: continue\n\n if directed:\n u_ns, v_ns = G.predecessors(u), G.predecessors(v)\n else:\n u_ns, v_ns = G.neighbors(u), G.neighbors(v)\n\n # Evaluating the similarity of current nodes pair\n if len(u_ns) == 0 or len(v_ns) == 0:\n sim[nodes_i[u]][nodes_i[v]] = 0\n else:\n s_uv = sum([sim_prev[nodes_i[u_n]][nodes_i[v_n]] for u_n, v_n in itertools.product(u_ns, v_ns)])\n sim[nodes_i[u]][nodes_i[v]] = (r * s_uv) / (len(u_ns) * len(v_ns))\n\n print(\"Converge after %d iterations (eps=%f).\" % (i, eps))\n\n return sim\n\n\n@params(G=nx.DiGraph, r=float, max_iter=int, eps=float)\ndef simrank_bipartite(G, r=0.8, max_iter=100, eps=1e-4):\n \"\"\" A bipartite version in the paper.\n \"\"\"\n if not nx.is_bipartite(G):\n assert(\"A bipartie graph is required.\")\n\n nodes = G.nodes()\n nodes_i = {}\n for (k, v) in [(nodes[i], i) for i in range(0, len(nodes))]:\n nodes_i[k] = v\n\n sim_prev = np.zeros(len(nodes))\n sim = np.identity(len(nodes))\n\n lns = {}\n rns = {}\n for n in nodes:\n preds = G.predecessors(n)\n succs = G.successors(n)\n if len(preds) == 0:\n lns[n] = succs\n else:\n rns[n] = preds\n\n def _update_partite(ns):\n for u, v in itertools.product(ns.keys(), ns.keys()):\n if u is v: continue\n u_ns, v_ns = ns[u], ns[v]\n if len(u_ns) == 0 or len(v_ns) == 0:\n sim[nodes_i[u]][nodes_i[v]] = 0\n else:\n s_uv = sum([sim_prev[nodes_i[u_n]][nodes_i[v_n]] for u_n, v_n in itertools.product(u_ns, v_ns)])\n sim[nodes_i[u]][nodes_i[v]] = (r * s_uv) / (len(u_ns) * len(v_ns))\n\n for i in range(max_iter):\n if np.allclose(sim, sim_prev, atol=eps):\n break\n sim_prev = np.copy(sim)\n _update_partite(lns)\n _update_partite(rns)\n\n print(\"Converge after %d iterations (eps=%f).\" % (i, eps))\n\n return sim\n\n\nif __name__ == '__main__':\n # Example university web graph in the paper\n G = nx.DiGraph()\n G.add_edges_from([(1,2), (1,3), (2,4), (4,1), (3,5), (5,3)])\n print(simrank(G))\n\n # Example bipartie graph of cake-bakers in the paper\n G = nx.DiGraph()\n G.add_edges_from([(1,3), (1,4), (1,5), (2,4), (2,5), (2,6)])\n print(simrank_bipartite(G))\n"
] | [
[
"numpy.allclose",
"numpy.copy"
]
] |
tencent-youtu-visionseed/yt-visionseed-sdk-python | [
"647252c80d27d6ded8cf83434b2ee69a2c829895"
] | [
"example/example-plot.py"
] | [
"# -*- coding:utf-8 -*-\n# author: chenliang @ Youtu Lab, Tencent\nfrom visionseed import YtVisionSeed, YtDataLink\nimport serial\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nvs = YtVisionSeed( serial.Serial(\"/dev/ttyACM0\",115200,timeout=0.5) )\n\nclass Chart:\n def __init__(self, maxlen=100, title='', xlabel='frame', ylabel=''):\n self.maxlen = maxlen\n self.x = []\n self.data = []\n self.labels = []\n\n plt.ion()\n self.fig, self.ax = plt.subplots()\n self.ax.set(xlabel=xlabel, ylabel=ylabel, title=title)\n self.ax.grid()\n self.line, = self.ax.plot([], [], 'r-') # Returns a tuple of line objects, thus the comma\n\n def onData(self, value, label=''):\n # Data for plotting\n if (self.maxlen > 0 and len(self.data) > self.maxlen):\n self.data.pop(0)\n self.labels.pop(0)\n else:\n self.x.append(len(self.data))\n\n self.data.append(value)\n self.labels.append(label)\n # print(self.data)\n\n self.ax.set_xticklabels(self.labels)\n self.line.set_xdata(self.x)\n self.line.set_ydata(self.data)\n self.ax.relim() # Recalculate limits\n self.ax.autoscale_view(True,True,True) #Autoscale\n\n self.fig.canvas.draw()\n\n def idle(self):\n self.fig.canvas.flush_events()\n\ndef main():\n chart = Chart(100, 'mouth', 'frame', 'openess')\n while True:\n result, msg = vs.recvRunOnce()\n\n if result:\n YtVisionSeedModel = YtDataLink.YtVisionSeedModel\n count = result.getResult([YtVisionSeedModel.FACE_DETECTION])\n for i in range(count):\n line = ''\n # 获取检测框\n rect = result.getResult([YtVisionSeedModel.FACE_DETECTION, i])\n if (rect):\n line += 'rect: (%d, %d, %d, %d) ' % (rect.x, rect.y, rect.w, rect.h)\n\n # 获取人脸识别结果\n faceName = result.getResult([YtVisionSeedModel.FACE_DETECTION, i, YtVisionSeedModel.FACE_RECOGNITION])\n if (faceName):\n line += 'name: %s (confidence: %.3f) ' % (faceName.str, faceName.conf)\n\n # 获取轨迹ID\n traceId = result.getResult([YtVisionSeedModel.FACE_DETECTION, i, YtVisionSeedModel.DETECTION_TRACE])\n if not (traceId is None):\n line += 'traceId: %d ' % traceId\n\n # 获取90点关键点\n shape = result.getResult([YtVisionSeedModel.FACE_DETECTION, i, YtVisionSeedModel.FACE_LANDMARK])\n if (shape):\n faceShape = shape.faceShape\n l1 = (faceShape.mouth[0] - faceShape.mouth[6]).length()\n l2 = (faceShape.mouth[3] - faceShape.mouth[9]).length()\n ratio = (l2 / (l1 + 0.01))\n line += 'mouth: ' + ('open' if ratio > 1 else 'close')\n chart.onData(ratio)\n\n print(line)\n\n chart.idle()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.ion",
"matplotlib.pyplot.subplots"
]
] |
urasakikeisuke/seamseg | [
"2b3a6d8aaaa895df4949e263e97c2f8b83332b88"
] | [
"seamseg/utils/panoptic.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport torch\n\nfrom .bbx import invert_roi_bbx\nfrom .misc import Empty\nfrom .roi_sampling import roi_sampling\n\n\nclass PanopticPreprocessing:\n def __init__(self,\n score_threshold=0.5,\n overlap_threshold=0.5,\n min_stuff_area=64 * 64):\n self.score_threshold = score_threshold\n self.overlap_threshold = overlap_threshold\n self.min_stuff_area = min_stuff_area\n\n def __call__(self, sem_pred, bbx_pred, cls_pred, obj_pred, msk_pred, num_stuff):\n img_size = [sem_pred.size(0), sem_pred.size(1)]\n\n # Initialize outputs\n occupied = torch.zeros_like(sem_pred, dtype=torch.uint8)\n msk = torch.zeros_like(sem_pred)\n cat = [255]\n obj = [0]\n iscrowd = [0]\n\n # Process thing\n try:\n if bbx_pred is None or cls_pred is None or obj_pred is None or msk_pred is None:\n raise Empty\n\n # Remove low-confidence instances\n keep = obj_pred > self.score_threshold\n if not keep.any():\n raise Empty\n obj_pred, bbx_pred, cls_pred, msk_pred = obj_pred[keep], bbx_pred[keep], cls_pred[keep], msk_pred[keep]\n\n # Up-sample masks\n bbx_inv = invert_roi_bbx(bbx_pred, list(msk_pred.shape[-2:]), img_size)\n bbx_idx = torch.arange(0, msk_pred.size(0), dtype=torch.long, device=msk_pred.device)\n msk_pred = roi_sampling(msk_pred.unsqueeze(1).sigmoid(), bbx_inv, bbx_idx, tuple(img_size), padding=\"zero\")\n msk_pred = msk_pred.squeeze(1) > 0.5\n\n # Sort by score\n idx = torch.argsort(obj_pred, descending=True)\n\n # Process instances\n for msk_i, cls_i, obj_i in zip(msk_pred[idx], cls_pred[idx], obj_pred[idx]):\n # Check intersection\n intersection = occupied & msk_i\n if intersection.float().sum() / msk_i.float().sum() > self.overlap_threshold:\n continue\n\n # Add non-intersecting part to output\n msk_i = msk_i - intersection\n msk[msk_i] = len(cat)\n cat.append(cls_i.item() + num_stuff)\n obj.append(obj_i.item())\n iscrowd.append(0)\n\n # Update occupancy mask\n occupied += msk_i\n except Empty:\n pass\n\n # Process stuff\n for cls_i in range(sem_pred.max().item() + 1):\n msk_i = sem_pred == cls_i\n\n # Remove occupied part and check remaining area\n msk_i = msk_i & ~occupied\n if msk_i.float().sum() < self.min_stuff_area:\n continue\n\n # Add non-intersecting part to output\n msk[msk_i] = len(cat)\n cat.append(cls_i)\n obj.append(1)\n iscrowd.append(cls_i >= num_stuff)\n\n # Update occupancy mask\n occupied += msk_i\n\n # Wrap in tensors\n cat = torch.tensor(cat, dtype=torch.long)\n obj = torch.tensor(obj, dtype=torch.float)\n iscrowd = torch.tensor(iscrowd, dtype=torch.uint8)\n\n return msk.cpu(), cat, obj, iscrowd\n\n\ndef panoptic_stats(msk_gt, cat_gt, panoptic_pred, num_classes, _num_stuff):\n # Move gt to CPU\n msk_gt, cat_gt = msk_gt.cpu(), cat_gt.cpu()\n msk_pred, cat_pred, _, iscrowd_pred = panoptic_pred\n\n # Convert crowd predictions to void\n msk_remap = msk_pred.new_zeros(cat_pred.numel())\n msk_remap[~iscrowd_pred] = torch.arange(\n 0, (~iscrowd_pred).long().sum().item(), dtype=msk_remap.dtype, device=msk_remap.device)\n msk_pred = msk_remap[msk_pred]\n cat_pred = cat_pred[~iscrowd_pred]\n\n iou = msk_pred.new_zeros(num_classes, dtype=torch.double)\n tp = msk_pred.new_zeros(num_classes, dtype=torch.double)\n fp = msk_pred.new_zeros(num_classes, dtype=torch.double)\n fn = msk_pred.new_zeros(num_classes, dtype=torch.double)\n\n if cat_gt.numel() > 1:\n msk_gt = msk_gt.view(-1)\n msk_pred = msk_pred.view(-1)\n\n # Compute confusion matrix\n confmat = msk_pred.new_zeros(cat_gt.numel(), cat_pred.numel(), dtype=torch.double)\n confmat.view(-1).index_add_(0, msk_gt * cat_pred.numel() + msk_pred,\n confmat.new_ones(msk_gt.numel()))\n\n # track potentially valid FP, i.e. those that overlap with void_gt <= 0.5\n num_pred_pixels = confmat.sum(0)\n valid_fp = (confmat[0] / num_pred_pixels) <= 0.5\n\n # compute IoU without counting void pixels (both in gt and pred)\n _iou = confmat / ((num_pred_pixels - confmat[0]).unsqueeze(0) + confmat.sum(1).unsqueeze(1) - confmat)\n\n # flag TP matches, i.e. same class and iou > 0.5\n matches = ((cat_gt.unsqueeze(1) == cat_pred.unsqueeze(0)) & (_iou > 0.5))\n\n # remove potential match of void_gt against void_pred\n matches[0, 0] = 0\n\n _iou = _iou[matches]\n tp_i, _ = matches.max(1)\n fn_i = ~tp_i\n fn_i[0] = 0 # remove potential fn match due to void against void\n fp_i = ~matches.max(0)[0] & valid_fp\n fp_i[0] = 0 # remove potential fp match due to void against void\n\n # Compute per instance classes for each tp, fp, fn\n tp_cat = cat_gt[tp_i]\n fn_cat = cat_gt[fn_i]\n fp_cat = cat_pred[fp_i]\n\n # Accumulate per class counts\n if tp_cat.numel() > 0:\n tp.index_add_(0, tp_cat, tp.new_ones(tp_cat.numel()))\n if fp_cat.numel() > 0:\n fp.index_add_(0, fp_cat, fp.new_ones(fp_cat.numel()))\n if fn_cat.numel() > 0:\n fn.index_add_(0, fn_cat, fn.new_ones(fn_cat.numel()))\n if tp_cat.numel() > 0:\n iou.index_add_(0, tp_cat, _iou)\n\n # note else branch is not needed because if cat_gt has only void we don't penalize predictions\n return iou, tp, fp, fn\n"
] | [
[
"torch.zeros_like",
"torch.argsort",
"torch.tensor"
]
] |
martins0n/etna | [
"51e9cec5183da2499ca247b0e2db215507246ceb"
] | [
"tests/test_transforms/test_missing_values/test_impute_transform.py"
] | [
"from copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom etna.datasets import TSDataset\nfrom etna.models import NaiveModel\nfrom etna.transforms.missing_values import TimeSeriesImputerTransform\nfrom etna.transforms.missing_values.imputation import _OneSegmentTimeSeriesImputerTransform\n\n\[email protected]\ndef ts_nans_beginning(example_reg_tsds):\n \"\"\"Example dataset with NaNs at the beginning.\"\"\"\n ts = deepcopy(example_reg_tsds)\n\n # nans at the beginning (shouldn't be filled)\n ts.loc[ts.index[:5], pd.IndexSlice[\"segment_1\", \"target\"]] = np.NaN\n\n # nans in the middle (should be filled)\n ts.loc[ts.index[8], pd.IndexSlice[\"segment_1\", \"target\"]] = np.NaN\n ts.loc[ts.index[10], pd.IndexSlice[\"segment_2\", \"target\"]] = np.NaN\n ts.loc[ts.index[40], pd.IndexSlice[\"segment_2\", \"target\"]] = np.NaN\n return ts\n\n\ndef test_wrong_init_one_segment():\n \"\"\"Check that imputer for one segment fails to init with wrong imputing strategy.\"\"\"\n with pytest.raises(ValueError):\n _ = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"wrong_strategy\", window=-1, seasonality=1, default_value=None\n )\n\n\ndef test_wrong_init_two_segments(all_date_present_df_two_segments):\n \"\"\"Check that imputer for two segments fails to fit_transform with wrong imputing strategy.\"\"\"\n with pytest.raises(ValueError):\n _ = TimeSeriesImputerTransform(strategy=\"wrong_strategy\")\n\n\[email protected]\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_all_dates_present_impute(all_date_present_df: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that imputer does nothing with series without gaps.\"\"\"\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=fill_strategy, window=-1, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(all_date_present_df)\n np.testing.assert_array_equal(all_date_present_df[\"target\"], result[\"target\"])\n\n\[email protected]\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_all_dates_present_impute_two_segments(all_date_present_df_two_segments: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that imputer does nothing with series without gaps.\"\"\"\n imputer = TimeSeriesImputerTransform(strategy=fill_strategy)\n result = imputer.fit_transform(all_date_present_df_two_segments)\n for segment in result.columns.get_level_values(\"segment\"):\n np.testing.assert_array_equal(all_date_present_df_two_segments[segment][\"target\"], result[segment][\"target\"])\n\n\[email protected](\"fill_strategy\", [\"zero\", \"mean\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_all_missing_impute_fail(df_all_missing: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that imputer can't fill nans if all values are nans.\"\"\"\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=fill_strategy, window=-1, seasonality=1, default_value=None\n )\n with pytest.raises(ValueError, match=\"Series hasn't non NaN values which means it is empty and can't be filled\"):\n _ = imputer.fit_transform(df_all_missing)\n\n\[email protected](\"fill_strategy\", [\"mean\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_all_missing_impute_fail_two_segments(df_all_missing_two_segments: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that imputer can't fill nans if all values are nans.\"\"\"\n imputer = TimeSeriesImputerTransform(strategy=fill_strategy)\n with pytest.raises(ValueError, match=\"Series hasn't non NaN values which means it is empty and can't be filled\"):\n _ = imputer.fit_transform(df_all_missing_two_segments)\n\n\ndef test_one_missing_value_zero(df_with_missing_value_x_index: pd.DataFrame):\n \"\"\"Check that imputer with zero-strategy works correctly in case of one missing value in data.\"\"\"\n df, idx = df_with_missing_value_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"zero\", window=-1, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(df)[\"target\"]\n assert result.loc[idx] == 0\n assert not result.isna().any()\n\n\ndef test_range_missing_zero(df_with_missing_range_x_index: pd.DataFrame):\n \"\"\"Check that imputer with zero-strategy works correctly in case of range of missing values in data.\"\"\"\n df, rng = df_with_missing_range_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"zero\", window=-1, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(df)[\"target\"]\n expected_series = pd.Series(index=rng, data=[0 for _ in rng], name=\"target\")\n np.testing.assert_array_almost_equal(result.loc[rng].reset_index(drop=True), expected_series)\n assert not result.isna().any()\n\n\ndef test_one_missing_value_mean(df_with_missing_value_x_index: pd.DataFrame):\n \"\"\"Check that imputer with mean-strategy works correctly in case of one missing value in data.\"\"\"\n df, idx = df_with_missing_value_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"mean\", window=-1, seasonality=1, default_value=None\n )\n expected_value = df[\"target\"].mean()\n result = imputer.fit_transform(df)[\"target\"]\n assert result.loc[idx] == expected_value\n assert not result.isna().any()\n\n\ndef test_range_missing_mean(df_with_missing_range_x_index):\n \"\"\"Check that imputer with mean-strategy works correctly in case of range of missing values in data.\"\"\"\n df, rng = df_with_missing_range_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"mean\", window=-1, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(df)[\"target\"]\n expected_value = df[\"target\"].mean()\n expected_series = pd.Series(index=rng, data=[expected_value for _ in rng], name=\"target\")\n np.testing.assert_array_almost_equal(result.loc[rng].reset_index(drop=True), expected_series)\n assert not result.isna().any()\n\n\ndef test_one_missing_value_forward_fill(df_with_missing_value_x_index):\n \"\"\"Check that imputer with forward-fill-strategy works correctly in case of one missing value in data.\"\"\"\n df, idx = df_with_missing_value_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"forward_fill\", window=-1, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(df)[\"target\"]\n\n timestamps = np.array(sorted(df.index))\n timestamp_idx = np.where(timestamps == idx)[0][0]\n expected_value = df.loc[timestamps[timestamp_idx - 1], \"target\"]\n assert result.loc[idx] == expected_value\n assert not result.isna().any()\n\n\ndef test_range_missing_forward_fill(df_with_missing_range_x_index: pd.DataFrame):\n \"\"\"Check that imputer with forward-fill-strategy works correctly in case of range of missing values in data.\"\"\"\n df, rng = df_with_missing_range_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"forward_fill\", window=-1, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(df)[\"target\"]\n\n timestamps = np.array(sorted(df.index))\n rng = [pd.Timestamp(x) for x in rng]\n timestamp_idx = min(np.where([x in rng for x in timestamps])[0])\n expected_value = df.loc[timestamps[timestamp_idx - 1], \"target\"]\n expected_series = pd.Series(index=rng, data=[expected_value for _ in rng], name=\"target\")\n np.testing.assert_array_almost_equal(result.loc[rng], expected_series)\n assert not result.isna().any()\n\n\[email protected](\"window\", [1, -1, 2])\ndef test_one_missing_value_running_mean(df_with_missing_value_x_index: pd.DataFrame, window: int):\n \"\"\"Check that imputer with running-mean-strategy works correctly in case of one missing value in data.\"\"\"\n df, idx = df_with_missing_value_x_index\n timestamps = np.array(sorted(df.index))\n timestamp_idx = np.where(timestamps == idx)[0][0]\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"running_mean\", window=window, seasonality=1, default_value=None\n )\n if window == -1:\n expected_value = df.loc[: timestamps[timestamp_idx - 1], \"target\"].mean()\n else:\n expected_value = df.loc[timestamps[timestamp_idx - window] : timestamps[timestamp_idx - 1], \"target\"].mean()\n result = imputer.fit_transform(df)[\"target\"]\n assert result.loc[idx] == expected_value\n assert not result.isna().any()\n\n\[email protected](\"window\", [1, -1, 2])\ndef test_range_missing_running_mean(df_with_missing_range_x_index: pd.DataFrame, window: int):\n \"\"\"Check that imputer with running-mean-strategy works correctly in case of range of missing values in data.\"\"\"\n df, rng = df_with_missing_range_x_index\n timestamps = np.array(sorted(df.index))\n timestamp_idxs = np.where([x in rng for x in timestamps])[0]\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"running_mean\", window=window, seasonality=1, default_value=None\n )\n result = imputer.fit_transform(df)[\"target\"]\n\n assert not result.isna().any()\n for idx in timestamp_idxs:\n if window == -1:\n expected_value = result.loc[: timestamps[idx - 1]].mean()\n else:\n expected_value = result.loc[timestamps[idx - window] : timestamps[idx - 1]].mean()\n assert result.loc[timestamps[idx]] == expected_value\n\n\[email protected]\ndef sample_ts():\n timestamp = pd.date_range(start=\"2020-01-01\", end=\"2020-01-11\", freq=\"D\")\n df1 = pd.DataFrame()\n df1[\"timestamp\"] = timestamp\n df1[\"segment\"] = \"segment_1\"\n df1[\"target\"] = np.arange(-1, 10)\n\n df2 = pd.DataFrame()\n df2[\"timestamp\"] = timestamp\n df2[\"segment\"] = \"segment_2\"\n df2[\"target\"] = np.arange(0, 110, 10)\n\n df = pd.concat([df1, df2], ignore_index=True)\n ts = TSDataset(df=TSDataset.to_dataset(df), freq=\"D\")\n return ts\n\n\[email protected]\ndef ts_to_fill(sample_ts):\n \"\"\"TSDataset with nans to fill with imputer.\"\"\"\n ts = deepcopy(sample_ts)\n ts.df.loc[[\"2020-01-01\", \"2020-01-03\", \"2020-01-08\", \"2020-01-09\"], pd.IndexSlice[:, \"target\"]] = np.NaN\n return ts\n\n\[email protected](\n \"window, seasonality, expected\",\n [\n (\n 1,\n 3,\n np.array(\n [[np.NaN, 0, np.NaN, 2, 3, 4, 5, 3, 4, 8, 9], [np.NaN, 10, np.NaN, 30, 40, 50, 60, 40, 50, 90, 100]]\n ).T,\n ),\n (\n 3,\n 1,\n np.array(\n [[np.NaN, 0, 0, 2, 3, 4, 5, 4, 13 / 3, 8, 9], [np.NaN, 10, 10, 30, 40, 50, 60, 50, 160 / 3, 90, 100]]\n ).T,\n ),\n (\n 3,\n 3,\n np.array(\n [[np.NaN, 0, np.NaN, 2, 3, 4, 5, 3 / 2, 4, 8, 9], [np.NaN, 10, np.NaN, 30, 40, 50, 60, 25, 50, 90, 100]]\n ).T,\n ),\n (\n -1,\n 3,\n np.array(\n [[np.NaN, 0, np.NaN, 2, 3, 4, 5, 3 / 2, 4, 8, 9], [np.NaN, 10, np.NaN, 30, 40, 50, 60, 25, 50, 90, 100]]\n ).T,\n ),\n ],\n)\ndef test_missing_values_seasonal(ts_to_fill, window: int, seasonality: int, expected: np.ndarray):\n ts = deepcopy(ts_to_fill)\n imputer = TimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"seasonal\", window=window, seasonality=seasonality, default_value=None\n )\n ts.fit_transform([imputer])\n result = ts.df.loc[pd.IndexSlice[:], pd.IndexSlice[:, \"target\"]].values\n\n np.testing.assert_array_equal(result, expected)\n\n\[email protected](\n \"window, seasonality, default_value, expected\",\n [\n (\n 1,\n 3,\n 100,\n np.array([[np.NaN, 0, 100, 2, 3, 4, 5, 3, 4, 8, 9], [np.NaN, 10, 100, 30, 40, 50, 60, 40, 50, 90, 100]]).T,\n ),\n ],\n)\ndef test_default_value(ts_to_fill, window: int, seasonality: int, default_value: float, expected: np.ndarray):\n ts = deepcopy(ts_to_fill)\n imputer = TimeSeriesImputerTransform(\n in_column=\"target\", strategy=\"seasonal\", window=window, seasonality=seasonality, default_value=default_value\n )\n ts.fit_transform([imputer])\n result = ts.df.loc[pd.IndexSlice[:], pd.IndexSlice[:, \"target\"]].values\n\n np.testing.assert_array_equal(result, expected)\n\n\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_inverse_transform_one_segment(df_with_missing_range_x_index: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that transform + inverse_transform don't change original df for one segment.\"\"\"\n df, rng = df_with_missing_range_x_index\n imputer = _OneSegmentTimeSeriesImputerTransform(\n in_column=\"target\", strategy=fill_strategy, window=-1, seasonality=1, default_value=None\n )\n transform_result = imputer.fit_transform(df)\n inverse_transform_result = imputer.inverse_transform(transform_result)\n np.testing.assert_array_equal(df, inverse_transform_result)\n\n\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_inverse_transform_many_segments(df_with_missing_range_x_index_two_segments: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that transform + inverse_transform don't change original df for two segments.\"\"\"\n df, rng = df_with_missing_range_x_index_two_segments\n imputer = TimeSeriesImputerTransform(strategy=fill_strategy)\n transform_result = imputer.fit_transform(df)\n inverse_transform_result = imputer.inverse_transform(transform_result)\n np.testing.assert_array_equal(df, inverse_transform_result)\n\n\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_inverse_transform_in_forecast(df_with_missing_range_x_index_two_segments: pd.DataFrame, fill_strategy: str):\n \"\"\"Check that inverse_transform doesn't change anything in forecast.\"\"\"\n df, rng = df_with_missing_range_x_index_two_segments\n ts = TSDataset(df, freq=pd.infer_freq(df.index))\n imputer = TimeSeriesImputerTransform(strategy=fill_strategy)\n model = NaiveModel()\n ts.fit_transform(transforms=[imputer])\n model.fit(ts)\n ts_test = ts.make_future(3)\n assert np.all(ts_test[:, :, \"target\"].isna())\n ts_forecast = model.forecast(ts_test)\n for segment in ts.segments:\n true_value = ts[:, segment, \"target\"].values[-1]\n assert np.all(ts_forecast[:, segment, \"target\"] == true_value)\n\n\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_fit_transform_nans_at_the_beginning(fill_strategy, ts_nans_beginning):\n \"\"\"Check that transform doesn't fill NaNs at the beginning.\"\"\"\n imputer = TimeSeriesImputerTransform(in_column=\"target\", strategy=fill_strategy)\n df_init = ts_nans_beginning.to_pandas()\n ts_nans_beginning.fit_transform([imputer])\n df_filled = ts_nans_beginning.to_pandas()\n for segment in ts_nans_beginning.segments:\n df_segment_init = df_init.loc[:, pd.IndexSlice[segment, \"target\"]]\n df_segment_filled = df_filled.loc[:, pd.IndexSlice[segment, \"target\"]]\n first_valid_index = df_segment_init.first_valid_index()\n assert df_segment_init[:first_valid_index].equals(df_segment_filled[:first_valid_index])\n assert not df_segment_filled[first_valid_index:].isna().any()\n\n\[email protected](\"fill_strategy\", [\"mean\", \"zero\", \"running_mean\", \"forward_fill\", \"seasonal\"])\ndef test_fit_transform_nans_at_the_end(fill_strategy, ts_diff_endings):\n \"\"\"Check that transform correctly works with NaNs at the end.\"\"\"\n imputer = TimeSeriesImputerTransform(in_column=\"target\", strategy=fill_strategy)\n ts_diff_endings.fit_transform([imputer])\n assert (ts_diff_endings[:, :, \"target\"].isna()).sum().sum() == 0\n"
] | [
[
"pandas.infer_freq",
"pandas.Series",
"pandas.date_range",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"numpy.all",
"pandas.concat",
"numpy.array",
"numpy.where",
"pandas.Timestamp"
]
] |
cle-ros/RoutingNetworks | [
"0f1fe1221c67a224a02bca6247d3c4488ede0a04"
] | [
"PytorchRouting/DecisionLayers/Decision.py"
] | [
"\"\"\"\nThis file defines class DecisionModule.\n\n@author: Clemens Rosenbaum :: [email protected]\n@created: 6/7/18\n\"\"\"\nimport abc\nimport copy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.distributions.distribution import Distribution\n\nfrom .PolicyStorage import ApproxPolicyStorage, TabularPolicyStorage\nfrom PytorchRouting.RewardFunctions.PerAction.PerActionBaseReward import PerActionBaseReward\n\n\nclass Decision(nn.Module, metaclass=abc.ABCMeta):\n \"\"\"\n Class DecisionModule defines the base class for all decision modules.\n \"\"\"\n\n def __init__(\n self,\n num_selections,\n in_features,\n num_agents=1,\n exploration=0.1,\n policy_storage_type='approx',\n detach=True,\n approx_hidden_dims=(),\n policy_net=None,\n additional_reward_func=PerActionBaseReward(),\n set_pg_temp=False,\n **kwargs\n ):\n nn.Module.__init__(self)\n self._in_features = in_features\n self._num_selections = num_selections\n self._num_agents = num_agents\n self._exploration = exploration\n self._detach = detach\n self._pol_type = policy_storage_type\n self._pol_hidden_dims = approx_hidden_dims\n self._policy = self._construct_policy_storage(\n self._num_selections, self._pol_type, policy_net, self._pol_hidden_dims)\n self.additional_reward_func = additional_reward_func\n self._dist_dim = 1\n self._set_pg_temp = set_pg_temp\n self._pg_temperature = 1.\n\n def set_exploration(self, exploration):\n self._exploration = exploration\n\n @abc.abstractmethod\n def _forward(self, xs, prior_action):\n return torch.zeros(1, 1), [], torch.zeros(1, 1)\n\n @staticmethod\n def _eval_stochastic_are_exp(actions, dist):\n if len(dist.shape) == 3:\n dist = dist[:, :, 0]\n return (torch.max(dist, dim=1)[1].view(-1) == actions.view(-1)).byte()\n\n @abc.abstractmethod\n def _forward(self, xs, prior_action):\n return torch.zeros(1, 1), [], torch.zeros(1, 1)\n\n @staticmethod\n def _loss(self, is_terminal, state, next_state, action, next_action, reward, cum_return, final_reward):\n pass\n\n def _construct_policy_storage(self, out_dim, policy_storage_type, approx_module, approx_hidden_dims, in_dim=None):\n in_dim = in_dim or self._in_features\n if approx_module is not None:\n policy = nn.ModuleList(\n [ApproxPolicyStorage(approx=copy.deepcopy(approx_module), detach=self._detach)\n for _ in range(self._num_agents)]\n )\n elif policy_storage_type in ('approx', 0):\n policy = nn.ModuleList(\n [ApproxPolicyStorage(\n in_features=in_dim,\n num_selections=out_dim,\n hidden_dims=approx_hidden_dims,\n detach=self._detach)\n for _ in range(self._num_agents)]\n )\n elif policy_storage_type in ('tabular', 1):\n policy = nn.ModuleList(\n [TabularPolicyStorage(num_selections=out_dim)\n for _ in range(self._num_agents)]\n )\n else:\n raise ValueError(f'Policy storage type {policy_storage_type} not understood.')\n return policy\n\n def forward(self, xs, mxs, prior_actions=None, mask=None, update_target=None):\n \"\"\"\n The forward method of DecisionModule takes a batch of inputs, and a list of metainformation, and\n append the decision made to the metainformation objects.\n :param xs:\n :param mxs:\n :param prior_actions: prior actions that select the agent\n :param mask: a torch.ByteTensor that determines if the trajectory is active. if it is not, no action\n will be executed\n :param update_target: (only relevant for GumbelSoftmax) if specified, this will include the gradientflow\n in update_target, and will thus return update_target\n :return: xs OR update_target, if specified, with potentially an attached backward object\n \"\"\"\n # input checking\n assert len(xs) == len(mxs)\n batch_size = xs.size(0)\n assert self._num_agents == 1 or prior_actions is not None, \\\n 'Decision makers with more than one action have to have prior_actions provided.'\n assert mask is None or mask.max() == 1, \\\n 'Please check that a batch being passed in has at least one active (non terminated) trajectory.'\n # computing the termination mask and the prior actions if not passed in\n mask = torch.ones(batch_size, dtype=torch.uint8, device=xs.device) \\\n if mask is None else mask\n prior_actions = torch.zeros(batch_size, dtype=torch.long, device=xs.device) \\\n if prior_actions is None or len(prior_actions) == 0 else prior_actions.reshape(-1)\n ys = xs.clone() if update_target is None else update_target.clone() # required as in-place ops follow\n # initializing the return vars\n actions = torch.zeros(batch_size, dtype=torch.long, device=xs.device)\n are_exp = torch.zeros(batch_size, dtype=torch.uint8, device=xs.device)\n dists = torch.zeros((batch_size, self._num_selections, 5), device=xs.device)\n # \"clustering\" by agent\n for i in torch.arange(0, prior_actions.max() + 1, device=xs.device):\n if i not in prior_actions:\n continue\n # computing the mask as the currently computed agent on the active trajectories\n m = ((prior_actions == i) * mask)\n if not any(m):\n continue\n # selecting the actions\n y, a, e, d = self._forward(xs[m], i)\n # merging the results\n ys[m], actions[m], are_exp[m], dists[m, :, :d.size(-1)] = \\\n y, a.view(-1), e.view(-1), d.view(d.size(0), d.size(1), -1)\n actions = actions.view(-1) # flattens the actions tensor, but does not produce a scalar\n assert len(actions) == len(are_exp) == dists.size(0) == len(mxs)\n # amending the metas\n for ia, a, e, d, mx in zip(mask, actions, are_exp, dists.split(1, dim=0), mxs):\n if ia:\n mx.append('actions', a, new_step=True)\n mx.append('is_exploratory', e.squeeze())\n mx.append('states', d)\n mx.append('loss_funcs', self._loss)\n mx.append('reward_func', self.additional_reward_func)\n self.additional_reward_func.register(d, a)\n return ys, mxs, actions\n\n"
] | [
[
"torch.zeros",
"torch.ones",
"torch.nn.Module.__init__",
"torch.max"
]
] |
janmalec/openmc | [
"4a4ac4c351d41fe153ca3341820cc507e484ce50"
] | [
"tests/regression_tests/deplete/example_geometry.py"
] | [
"\"\"\"An example file showing how to make a geometry.\n\nThis particular example creates a 3x3 geometry, with 8 regular pins and one\nGd-157 2 wt-percent enriched. All pins are segmented.\n\"\"\"\n\nfrom collections import OrderedDict\nimport math\n\nimport numpy as np\nimport openmc\n\n\ndef density_to_mat(dens_dict):\n \"\"\"Generates an OpenMC material from a cell ID and self.number_density.\n\n Parameters\n ----------\n dens_dict : dict\n Dictionary mapping nuclide names to densities\n\n Returns\n -------\n openmc.Material\n The OpenMC material filled with nuclides.\n\n \"\"\"\n mat = openmc.Material()\n for key in dens_dict:\n mat.add_nuclide(key, 1.0e-24*dens_dict[key])\n mat.set_density('sum')\n\n return mat\n\n\ndef generate_initial_number_density():\n \"\"\" Generates initial number density.\n\n These results were from a CASMO5 run in which the gadolinium pin was\n loaded with 2 wt percent of Gd-157.\n \"\"\"\n\n # Concentration to be used for all fuel pins\n fuel_dict = OrderedDict()\n fuel_dict['U235'] = 1.05692e21\n fuel_dict['U234'] = 1.00506e19\n fuel_dict['U238'] = 2.21371e22\n fuel_dict['O16'] = 4.62954e22\n fuel_dict['O17'] = 1.127684e20\n fuel_dict['Xe135'] = 1.0e10\n fuel_dict['Xe136'] = 1.0e10\n fuel_dict['Gd156'] = 1.0e10\n fuel_dict['Gd157'] = 1.0e10\n # fuel_dict['O18'] = 9.51352e19 # Does not exist in ENDF71, merged into 17\n\n # Concentration to be used for the gadolinium fuel pin\n fuel_gd_dict = OrderedDict()\n fuel_gd_dict['U235'] = 1.03579e21\n fuel_gd_dict['U238'] = 2.16943e22\n fuel_gd_dict['Gd156'] = 3.95517E+10\n fuel_gd_dict['Gd157'] = 1.08156e20\n fuel_gd_dict['O16'] = 4.64035e22\n fuel_dict['Xe136'] = 1.0e10\n fuel_dict['Xe135'] = 1.0e10\n # There are a whole bunch of 1e-10 stuff here.\n\n # Concentration to be used for cladding\n clad_dict = OrderedDict()\n clad_dict['O16'] = 3.07427e20\n clad_dict['O17'] = 7.48868e17\n clad_dict['Cr50'] = 3.29620e18\n clad_dict['Cr52'] = 6.35639e19\n clad_dict['Cr53'] = 7.20763e18\n clad_dict['Cr54'] = 1.79413e18\n clad_dict['Fe54'] = 5.57350e18\n clad_dict['Fe56'] = 8.74921e19\n clad_dict['Fe57'] = 2.02057e18\n clad_dict['Fe58'] = 2.68901e17\n clad_dict['Cr50'] = 3.29620e18\n clad_dict['Cr52'] = 6.35639e19\n clad_dict['Cr53'] = 7.20763e18\n clad_dict['Cr54'] = 1.79413e18\n clad_dict['Ni58'] = 2.51631e19\n clad_dict['Ni60'] = 9.69278e18\n clad_dict['Ni61'] = 4.21338e17\n clad_dict['Ni62'] = 1.34341e18\n clad_dict['Ni64'] = 3.43127e17\n clad_dict['Zr90'] = 2.18320e22\n clad_dict['Zr91'] = 4.76104e21\n clad_dict['Zr92'] = 7.27734e21\n clad_dict['Zr94'] = 7.37494e21\n clad_dict['Zr96'] = 1.18814e21\n clad_dict['Sn112'] = 4.67352e18\n clad_dict['Sn114'] = 3.17992e18\n clad_dict['Sn115'] = 1.63814e18\n clad_dict['Sn116'] = 7.00546e19\n clad_dict['Sn117'] = 3.70027e19\n clad_dict['Sn118'] = 1.16694e20\n clad_dict['Sn119'] = 4.13872e19\n clad_dict['Sn120'] = 1.56973e20\n clad_dict['Sn122'] = 2.23076e19\n clad_dict['Sn124'] = 2.78966e19\n\n # Gap concentration\n # Funny enough, the example problem uses air.\n gap_dict = OrderedDict()\n gap_dict['O16'] = 7.86548e18\n gap_dict['O17'] = 2.99548e15\n gap_dict['N14'] = 3.38646e19\n gap_dict['N15'] = 1.23717e17\n\n # Concentration to be used for coolant\n # No boron\n cool_dict = OrderedDict()\n cool_dict['H1'] = 4.68063e22\n cool_dict['O16'] = 2.33427e22\n cool_dict['O17'] = 8.89086e18\n\n # Store these dictionaries in the initial conditions dictionary\n initial_density = OrderedDict()\n initial_density['fuel_gd'] = fuel_gd_dict\n initial_density['fuel'] = fuel_dict\n initial_density['gap'] = gap_dict\n initial_density['clad'] = clad_dict\n initial_density['cool'] = cool_dict\n\n # Set up libraries to use\n temperature = OrderedDict()\n sab = OrderedDict()\n\n # Toggle betweeen MCNP and NNDC data\n MCNP = False\n\n if MCNP:\n temperature['fuel_gd'] = 900.0\n temperature['fuel'] = 900.0\n # We approximate temperature of everything as 600K, even though it was\n # actually 580K.\n temperature['gap'] = 600.0\n temperature['clad'] = 600.0\n temperature['cool'] = 600.0\n else:\n temperature['fuel_gd'] = 293.6\n temperature['fuel'] = 293.6\n temperature['gap'] = 293.6\n temperature['clad'] = 293.6\n temperature['cool'] = 293.6\n\n sab['cool'] = 'c_H_in_H2O'\n\n # Set up burnable materials\n burn = OrderedDict()\n burn['fuel_gd'] = True\n burn['fuel'] = True\n burn['gap'] = False\n burn['clad'] = False\n burn['cool'] = False\n\n return temperature, sab, initial_density, burn\n\n\ndef segment_pin(n_rings, n_wedges, r_fuel, r_gap, r_clad):\n \"\"\" Calculates a segmented pin.\n\n Separates a pin with n_rings and n_wedges. All cells have equal volume.\n Pin is centered at origin.\n \"\"\"\n\n # Calculate all the volumes of interest\n v_fuel = math.pi * r_fuel**2\n v_gap = math.pi * r_gap**2 - v_fuel\n v_clad = math.pi * r_clad**2 - v_fuel - v_gap\n v_ring = v_fuel / n_rings\n v_segment = v_ring / n_wedges\n\n # Compute ring radiuses\n r_rings = np.zeros(n_rings)\n\n for i in range(n_rings):\n r_rings[i] = math.sqrt(1.0/(math.pi) * v_ring * (i+1))\n\n # Compute thetas\n theta = np.linspace(0, 2*math.pi, n_wedges + 1)\n\n # Compute surfaces\n fuel_rings = [openmc.ZCylinder(x0=0, y0=0, r=r_rings[i])\n for i in range(n_rings)]\n\n fuel_wedges = [openmc.Plane(a=math.cos(theta[i]), b=math.sin(theta[i]))\n for i in range(n_wedges)]\n\n gap_ring = openmc.ZCylinder(x0=0, y0=0, r=r_gap)\n clad_ring = openmc.ZCylinder(x0=0, y0=0, r=r_clad)\n\n # Create cells\n fuel_cells = []\n if n_wedges == 1:\n for i in range(n_rings):\n cell = openmc.Cell(name='fuel')\n if i == 0:\n cell.region = -fuel_rings[0]\n else:\n cell.region = +fuel_rings[i-1] & -fuel_rings[i]\n fuel_cells.append(cell)\n else:\n for i in range(n_rings):\n for j in range(n_wedges):\n cell = openmc.Cell(name='fuel')\n if i == 0:\n if j != n_wedges-1:\n cell.region = (-fuel_rings[0]\n & +fuel_wedges[j]\n & -fuel_wedges[j+1])\n else:\n cell.region = (-fuel_rings[0]\n & +fuel_wedges[j]\n & -fuel_wedges[0])\n else:\n if j != n_wedges-1:\n cell.region = (+fuel_rings[i-1]\n & -fuel_rings[i]\n & +fuel_wedges[j]\n & -fuel_wedges[j+1])\n else:\n cell.region = (+fuel_rings[i-1]\n & -fuel_rings[i]\n & +fuel_wedges[j]\n & -fuel_wedges[0])\n fuel_cells.append(cell)\n\n # Gap ring\n gap_cell = openmc.Cell(name='gap')\n gap_cell.region = +fuel_rings[-1] & -gap_ring\n fuel_cells.append(gap_cell)\n\n # Clad ring\n clad_cell = openmc.Cell(name='clad')\n clad_cell.region = +gap_ring & -clad_ring\n fuel_cells.append(clad_cell)\n\n # Moderator\n mod_cell = openmc.Cell(name='cool')\n mod_cell.region = +clad_ring\n fuel_cells.append(mod_cell)\n\n # Form universe\n fuel_u = openmc.Universe()\n fuel_u.add_cells(fuel_cells)\n\n return fuel_u, v_segment, v_gap, v_clad\n\n\ndef generate_geometry(n_rings, n_wedges):\n \"\"\" Generates example geometry.\n\n This function creates the initial geometry, a 9 pin reflective problem.\n One pin, containing gadolinium, is discretized into sectors.\n\n In addition to what one would do with the general OpenMC geometry code, it\n is necessary to create a dictionary, volume, that maps a cell ID to a\n volume. Further, by naming cells the same as the above materials, the code\n can automatically handle the mapping.\n\n Parameters\n ----------\n n_rings : int\n Number of rings to generate for the geometry\n n_wedges : int\n Number of wedges to generate for the geometry\n \"\"\"\n\n pitch = 1.26197\n r_fuel = 0.412275\n r_gap = 0.418987\n r_clad = 0.476121\n\n n_pin = 3\n\n # This table describes the 'fuel' to actual type mapping\n # It's not necessary to do it this way. Just adjust the initial conditions\n # below.\n mapping = ['fuel', 'fuel', 'fuel',\n 'fuel', 'fuel_gd', 'fuel',\n 'fuel', 'fuel', 'fuel']\n\n # Form pin cell\n fuel_u, v_segment, v_gap, v_clad = segment_pin(n_rings, n_wedges, r_fuel, r_gap, r_clad)\n\n # Form lattice\n all_water_c = openmc.Cell(name='cool')\n all_water_u = openmc.Universe(cells=(all_water_c, ))\n\n lattice = openmc.RectLattice()\n lattice.pitch = [pitch]*2\n lattice.lower_left = [-pitch*n_pin/2, -pitch*n_pin/2]\n lattice_array = [[fuel_u for i in range(n_pin)] for j in range(n_pin)]\n lattice.universes = lattice_array\n lattice.outer = all_water_u\n\n # Bound universe\n x_low = openmc.XPlane(-pitch*n_pin/2, 'reflective')\n x_high = openmc.XPlane(pitch*n_pin/2, 'reflective')\n y_low = openmc.YPlane(-pitch*n_pin/2, 'reflective')\n y_high = openmc.YPlane(pitch*n_pin/2, 'reflective')\n z_low = openmc.ZPlane(-10, 'reflective')\n z_high = openmc.ZPlane(10, 'reflective')\n\n # Compute bounding box\n lower_left = [-pitch*n_pin/2, -pitch*n_pin/2, -10]\n upper_right = [pitch*n_pin/2, pitch*n_pin/2, 10]\n\n root_c = openmc.Cell(fill=lattice)\n root_c.region = (+x_low & -x_high\n & +y_low & -y_high\n & +z_low & -z_high)\n root_u = openmc.Universe(universe_id=0, cells=(root_c, ))\n geometry = openmc.Geometry(root_u)\n\n v_cool = pitch**2 - (v_gap + v_clad + n_rings * n_wedges * v_segment)\n\n # Store volumes for later usage\n volume = {'fuel': v_segment, 'gap': v_gap, 'clad': v_clad, 'cool': v_cool}\n\n return geometry, volume, mapping, lower_left, upper_right\n\n\ndef generate_problem(n_rings=5, n_wedges=8):\n \"\"\" Merges geometry and materials.\n\n This function initializes the materials for each cell using the dictionaries\n provided by generate_initial_number_density. It is assumed a cell named\n 'fuel' will have further region differentiation (see mapping).\n\n Parameters\n ----------\n n_rings : int, optional\n Number of rings to generate for the geometry\n n_wedges : int, optional\n Number of wedges to generate for the geometry\n \"\"\"\n\n # Get materials dictionary, geometry, and volumes\n temperature, sab, initial_density, burn = generate_initial_number_density()\n geometry, volume, mapping, lower_left, upper_right = generate_geometry(n_rings, n_wedges)\n\n # Apply distribmats, fill geometry\n cells = geometry.root_universe.get_all_cells()\n for cell_id in cells:\n cell = cells[cell_id]\n if cell.name == 'fuel':\n\n omc_mats = []\n\n for cell_type in mapping:\n omc_mat = density_to_mat(initial_density[cell_type])\n\n if cell_type in sab:\n omc_mat.add_s_alpha_beta(sab[cell_type])\n omc_mat.temperature = temperature[cell_type]\n omc_mat.depletable = burn[cell_type]\n omc_mat.volume = volume['fuel']\n\n omc_mats.append(omc_mat)\n\n cell.fill = omc_mats\n elif cell.name != '':\n omc_mat = density_to_mat(initial_density[cell.name])\n\n if cell.name in sab:\n omc_mat.add_s_alpha_beta(sab[cell.name])\n omc_mat.temperature = temperature[cell.name]\n omc_mat.depletable = burn[cell.name]\n omc_mat.volume = volume[cell.name]\n\n cell.fill = omc_mat\n\n return geometry, lower_left, upper_right\n"
] | [
[
"numpy.linspace",
"numpy.zeros"
]
] |
khieu/cifar10_challenge | [
"332682fbf07f1ecae2cd3d4bb07ee7e1c6fb3c18"
] | [
"run_natural_instances.py"
] | [
"\"\"\"Evaluates a model against examples from a .npy file as specified\r\n in config.json\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom datetime import datetime\r\nimport json\r\nimport math\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom model import Model\r\nimport cifar10_input\r\n\r\nwith open('config.json') as config_file:\r\n config = json.load(config_file)\r\n\r\ndata_path = config['data_path']\r\n\r\ndef run_attack(checkpoint, x_adv, epsilon):\r\n cifar = cifar10_input.CIFAR10Data(data_path)\r\n\r\n model = Model(mode='eval')\r\n\r\n saver = tf.train.Saver()\r\n\r\n num_eval_examples = 10000\r\n eval_batch_size = 100\r\n\r\n num_batches = int(math.ceil(num_eval_examples / eval_batch_size))\r\n total_corr = 0\r\n\r\n x_nat = cifar.eval_data.xs\r\n l_inf = np.amax(np.abs(x_nat - x_adv))\r\n\r\n #if l_inf > epsilon + 0.0001:\r\n # print('maximum perturbation found: {}'.format(l_inf))\r\n # print('maximum perturbation allowed: {}'.format(epsilon))\r\n # return\r\n\r\n y_pred = [] # label accumulator\r\n\r\n with tf.Session() as sess:\r\n # Restore the checkpoint\r\n saver.restore(sess, checkpoint)\r\n\r\n # Iterate over the samples batch-by-batch\r\n for ibatch in range(num_batches):\r\n bstart = ibatch * eval_batch_size\r\n bend = min(bstart + eval_batch_size, num_eval_examples)\r\n\r\n x_batch = x_adv[bstart:bend, :]\r\n y_batch = cifar.eval_data.ys[bstart:bend]\r\n\r\n dict_adv = {model.x_input: x_batch,\r\n model.y_input: y_batch}\r\n cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],\r\n feed_dict=dict_adv)\r\n\r\n total_corr += cur_corr\r\n y_pred.append(y_pred_batch)\r\n\r\n accuracy = total_corr / num_eval_examples\r\n\r\n print('Accuracy: {:.2f}%'.format(100.0 * accuracy))\r\n y_pred = np.concatenate(y_pred, axis=0)\r\n np.save('pred.npy', y_pred)\r\n print('Output saved at pred.npy')\r\n\r\nif __name__ == '__main__':\r\n import json\r\n\r\n with open('config.json') as config_file:\r\n config = json.load(config_file)\r\n\r\n model_dir = config['model_dir']\r\n\r\n checkpoint = tf.train.latest_checkpoint(model_dir)\r\n x_adv = np.load(config['natural_test_path'])\r\n\r\n if checkpoint is None:\r\n print('No checkpoint found')\r\n elif x_adv.shape != (10000, 32, 32, 3):\r\n print('Invalid shape: expected (10000, 32, 32, 3), found {}'.format(x_adv.shape))\r\n elif np.amax(x_adv) > 255.0001 or np.amin(x_adv) < -0.0001:\r\n print('Invalid pixel range. Expected [0, 255], found [{}, {}]'.format(\r\n np.amin(x_adv),\r\n np.amax(x_adv)))\r\n else:\r\n run_attack(checkpoint, x_adv, config['epsilon'])\r\n"
] | [
[
"numpy.save",
"numpy.load",
"numpy.abs",
"tensorflow.train.latest_checkpoint",
"numpy.amin",
"tensorflow.train.Saver",
"tensorflow.Session",
"numpy.amax",
"numpy.concatenate"
]
] |
MiCigo/matplotlib-tutorial | [
"31b7a8fd8ec334fa16609a894558ce23be035787"
] | [
"basic-train/basic-2.py"
] | [
"# -*- coding: utf-8 -*-\n'''\n@Description: code\n@Author: MiCi\n@Date: 2020-03-07 22:46:46\n@LastEditTime: 2020-03-12 10:47:55\n@LastEditors: MiCi\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef basic_label_2():\n x = np.linspace(-3, 3, 50)\n y1 = x*2 + 1\n y2 = x**2\n # label属性设定 图例名称\n plt.plot(x, y1, label='y1 name')\n plt.plot(x, y2, color='red', linewidth=1.0,\n linestyle='--', label='y2 name')\n\n plt.xlim((-1, 2))\n plt.ylim((-2, 3))\n plt.xlabel('X')\n plt.ylabel('Y')\n new_ticks = np.linspace(-1, 2, 5)\n plt.xticks(new_ticks)\n plt.yticks([-2, -1.8, 1, 3], ['first', 'second', 'third', 'fourth'])\n\n ax = plt.gca()\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.spines['bottom'].set_position(('data', 0))\n ax.yaxis.set_ticks_position('left')\n ax.spines['left'].set_position(('data', 0))\n\n # 绘制图例,通过loc参数设定图例位置\n # 'best': 0, 'upper right': 1, 'upper left': 2, 'lower left': 3\n # 'lower right': 4, 'right': 5, 'center left': 6, 'center right': 7\n # 'lower center': 8, 'upper center': 9, 'center': 10\n plt.legend(loc=0)\n\n tempx = 0.5\n tempy = 2*tempx + 1\n # 再画一条垂直线\n plt.plot([tempx, tempx, ], [0, tempy, ], 'k--', linewidth=2)\n # scatter在图中画点,设定size与color\n plt.scatter([tempx, ], [tempy, ], s=50, color='b')\n\n # annotate添加注释,xytext + textcoords表示对于标注的描述和偏离\n # arrowprops对图中箭头类型和箭头弧度的设置\n plt.annotate(\n '2x+1=%s' % tempy, xy=(tempx, tempy),\n xycoords='data', xytext=(+30, -30),\n textcoords='offset points', fontsize=16,\n arrowprops=dict(\n arrowstyle='->', connectionstyle='arc3,rad=.2'\n )\n )\n\n # text,直接通过设置注释\n plt.text(-1, 1, 'test test', fontdict={'size': 16, 'color': 'b'})\n\n plt.show()\n\n\ndef basic_label_2_practice():\n print('write your anwser')\n\n\ndef basic_label_2_practice_answer():\n return\n\n\nif __name__ == '__main__':\n print('Start learn basic label 2')\n basic_label_2()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.text",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
generoso/mlflow | [
"715e07e2c45109ef179d220f4e25848e92f79102"
] | [
"mlflow/types/schema.py"
] | [
"import json\nfrom enum import Enum\n\nimport numpy as np\nimport pandas as pd\nimport string\nfrom typing import Dict, Any, List, Union, Optional\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.utils.annotations import deprecated\n\n\ndef _pandas_string_type():\n try:\n return pd.StringDtype()\n except AttributeError:\n return np.object\n\n\nclass DataType(Enum):\n \"\"\"\n MLflow data types.\n \"\"\"\n\n def __new__(cls, value, numpy_type, spark_type, pandas_type=None):\n res = object.__new__(cls)\n res._value_ = value\n res._numpy_type = numpy_type\n res._spark_type = spark_type\n res._pandas_type = pandas_type if pandas_type is not None else numpy_type\n return res\n\n # NB: We only use pandas extension type for strings. There are also pandas extension types for\n # integers and boolean values. We do not use them here for now as most downstream tools are\n # most likely to use / expect native numpy types and would not be compatible with the extension\n # types.\n boolean = (1, np.dtype(\"bool\"), \"BooleanType\")\n \"\"\"Logical data (True, False) .\"\"\"\n integer = (2, np.dtype(\"int32\"), \"IntegerType\")\n \"\"\"32b signed integer numbers.\"\"\"\n long = (3, np.dtype(\"int64\"), \"LongType\")\n \"\"\"64b signed integer numbers. \"\"\"\n float = (4, np.dtype(\"float32\"), \"FloatType\")\n \"\"\"32b floating point numbers. \"\"\"\n double = (5, np.dtype(\"float64\"), \"DoubleType\")\n \"\"\"64b floating point numbers. \"\"\"\n string = (6, np.dtype(\"str\"), \"StringType\", _pandas_string_type())\n \"\"\"Text data.\"\"\"\n binary = (7, np.dtype(\"bytes\"), \"BinaryType\", np.object)\n \"\"\"Sequence of raw bytes.\"\"\"\n datetime = (8, np.dtype(\"datetime64\"), \"TimestampType\")\n \"\"\"64b datetime data.\"\"\"\n\n def __repr__(self):\n return self.name\n\n def to_numpy(self) -> np.dtype:\n \"\"\"Get equivalent numpy data type. \"\"\"\n return self._numpy_type\n\n def to_pandas(self) -> np.dtype:\n \"\"\"Get equivalent pandas data type. \"\"\"\n return self._pandas_type\n\n def to_spark(self):\n import pyspark.sql.types\n\n return getattr(pyspark.sql.types, self._spark_type)()\n\n\nclass ColSpec(object):\n \"\"\"\n Specification of name and type of a single column in a dataset.\n \"\"\"\n\n def __init__(\n self, type: DataType, name: Optional[str] = None # pylint: disable=redefined-builtin\n ):\n self._name = name\n try:\n self._type = DataType[type] if isinstance(type, str) else type\n except KeyError:\n raise MlflowException(\n \"Unsupported type '{0}', expected instance of DataType or \"\n \"one of {1}\".format(type, [t.name for t in DataType])\n )\n if not isinstance(self.type, DataType):\n raise TypeError(\n \"Expected mlflow.models.signature.Datatype or str for the 'type' \"\n \"argument, but got {}\".format(self.type.__class__)\n )\n\n @property\n def type(self) -> DataType:\n \"\"\"The column data type.\"\"\"\n return self._type\n\n @property\n def name(self) -> Optional[str]:\n \"\"\"The column name or None if the columns is unnamed.\"\"\"\n return self._name\n\n def to_dict(self) -> Dict[str, Any]:\n if self.name is None:\n return {\"type\": self.type.name}\n else:\n return {\"name\": self.name, \"type\": self.type.name}\n\n def __eq__(self, other) -> bool:\n if isinstance(other, ColSpec):\n names_eq = (self.name is None and other.name is None) or self.name == other.name\n return names_eq and self.type == other.type\n return False\n\n def __repr__(self) -> str:\n if self.name is None:\n return repr(self.type)\n else:\n return \"{name}: {type}\".format(name=repr(self.name), type=repr(self.type))\n\n\nclass TensorInfo(object):\n \"\"\"\n Representation of the shape and type of a Tensor.\n \"\"\"\n\n def __init__(\n self, dtype: np.dtype, shape: Union[tuple, list],\n ):\n if not isinstance(dtype, np.dtype):\n raise TypeError(\n \"Expected `type` to be instance of `{0}`, received `{1}`\".format(\n np.dtype, type.__class__\n )\n )\n # Throw if size information exists flexible numpy data types\n if dtype.char in [\"U\", \"S\"] and not dtype.name.isalpha():\n raise MlflowException(\n \"MLflow does not support size information in flexible numpy data types. Use\"\n ' np.dtype(\"{0}\") instead'.format(dtype.name.rstrip(string.digits))\n )\n\n if not isinstance(shape, (tuple, list)):\n raise TypeError(\n \"Expected `shape` to be instance of `{0}` or `{1}`, received `{2}`\".format(\n tuple, list, shape.__class__\n )\n )\n self._dtype = dtype\n self._shape = tuple(shape)\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"\n A unique character code for each of the 21 different numpy built-in types.\n See https://numpy.org/devdocs/reference/generated/numpy.dtype.html#numpy.dtype for details.\n \"\"\"\n return self._dtype\n\n @property\n def shape(self) -> tuple:\n \"\"\"The tensor shape\"\"\"\n return self._shape\n\n def to_dict(self) -> Dict[str, Any]:\n return {\"dtype\": self._dtype.name, \"shape\": self._shape}\n\n @classmethod\n def from_json_dict(cls, **kwargs):\n \"\"\"\n Deserialize from a json loaded dictionary.\n The dictionary is expected to contain `dtype` and `shape` keys.\n \"\"\"\n if not {\"dtype\", \"shape\"} <= set(kwargs.keys()):\n raise MlflowException(\n \"Missing keys in TensorSpec JSON. Expected to find keys `dtype` and `shape`\"\n )\n tensor_type = np.dtype(kwargs[\"dtype\"])\n tensor_shape = tuple(kwargs[\"shape\"])\n return cls(tensor_type, tensor_shape)\n\n def __repr__(self) -> str:\n return \"Tensor({type}, {shape})\".format(type=repr(self.dtype.name), shape=repr(self.shape))\n\n\nclass TensorSpec(object):\n \"\"\"\n Specification used to represent a dataset stored as a Tensor.\n \"\"\"\n\n def __init__(\n self,\n type: np.dtype, # pylint: disable=redefined-builtin\n shape: Union[tuple, list],\n name: Optional[str] = None,\n ):\n self._name = name\n self._tensorInfo = TensorInfo(type, shape)\n\n @property\n def type(self) -> np.dtype:\n \"\"\"\n A unique character code for each of the 21 different numpy built-in types.\n See https://numpy.org/devdocs/reference/generated/numpy.dtype.html#numpy.dtype for details.\n \"\"\"\n return self._tensorInfo.dtype\n\n @property\n def name(self) -> Optional[str]:\n \"\"\"The tensor name or None if the tensor is unnamed.\"\"\"\n return self._name\n\n @property\n def shape(self) -> tuple:\n \"\"\"The tensor shape\"\"\"\n return self._tensorInfo.shape\n\n def to_dict(self) -> Dict[str, Any]:\n if self.name is None:\n return {\"type\": \"tensor\", \"tensor-spec\": self._tensorInfo.to_dict()}\n else:\n return {\"name\": self.name, \"type\": \"tensor\", \"tensor-spec\": self._tensorInfo.to_dict()}\n\n @classmethod\n def from_json_dict(cls, **kwargs):\n \"\"\"\n Deserialize from a json loaded dictionary.\n The dictionary is expected to contain `type` and `tensor-spec` keys.\n \"\"\"\n if not {\"tensor-spec\", \"type\"} <= set(kwargs.keys()):\n raise MlflowException(\n \"Missing keys in TensorSpec JSON. Expected to find keys `tensor-spec` and `type`\"\n )\n if kwargs[\"type\"] != \"tensor\":\n raise MlflowException(\"Type mismatch, TensorSpec expects `tensor` as the type\")\n tensor_info = TensorInfo.from_json_dict(**kwargs[\"tensor-spec\"])\n return cls(\n tensor_info.dtype, tensor_info.shape, kwargs[\"name\"] if \"name\" in kwargs else None\n )\n\n def __eq__(self, other) -> bool:\n if isinstance(other, TensorSpec):\n names_eq = (self.name is None and other.name is None) or self.name == other.name\n return names_eq and self.type == other.type and self.shape == other.shape\n return False\n\n def __repr__(self) -> str:\n if self.name is None:\n return repr(self._tensorInfo)\n else:\n return \"{name}: {info}\".format(name=repr(self.name), info=repr(self._tensorInfo))\n\n\nclass Schema(object):\n \"\"\"\n Specification of a dataset.\n\n Schema is represented as a list of :py:class:`ColSpec` or :py:class:`TensorSpec`. A combination\n of `ColSpec` and `TensorSpec` is not allowed.\n\n The dataset represented by a schema can be named, with unique non empty names for every input.\n In the case of :py:class:`ColSpec`, the dataset columns can be unnamed with implicit integer\n index defined by their list indices.\n Combination of named and unnamed data inputs are not allowed.\n \"\"\"\n\n def __init__(self, inputs: List[Union[ColSpec, TensorSpec]]):\n if not (\n all(map(lambda x: x.name is None, inputs))\n or all(map(lambda x: x.name is not None, inputs))\n ):\n raise MlflowException(\n \"Creating Schema with a combination of named and unnamed inputs \"\n \"is not allowed. Got input names {}\".format([x.name for x in inputs])\n )\n if not (\n all(map(lambda x: isinstance(x, TensorSpec), inputs))\n or all(map(lambda x: isinstance(x, ColSpec), inputs))\n ):\n raise MlflowException(\n \"Creating Schema with a combination of {0} and {1} is not supported. \"\n \"Please choose one of {0} or {1}\".format(ColSpec.__class__, TensorSpec.__class__)\n )\n if (\n all(map(lambda x: isinstance(x, TensorSpec), inputs))\n and len(inputs) > 1\n and any(map(lambda x: x.name is None, inputs))\n ):\n raise MlflowException(\n \"Creating Schema with multiple unnamed TensorSpecs is not supported. \"\n \"Please provide names for each TensorSpec.\"\n )\n self._inputs = inputs\n\n @property\n def inputs(self) -> List[Union[ColSpec, TensorSpec]]:\n \"\"\"Representation of a dataset that defines this schema.\"\"\"\n return self._inputs\n\n @property\n @deprecated(alternative=\"mlflow.types.Schema.inputs\", since=\"1.14\")\n def columns(self) -> List[ColSpec]:\n \"\"\"\n .. deprecated:: 1.14\n Please use :func:`mlflow.types.Schema.inputs`\n The list of columns that defines this schema.\n\n \"\"\"\n if self.is_tensor_spec():\n raise MlflowException(\"Not supported by TensorSpec, use `inputs` instead\")\n return self._inputs\n\n def is_tensor_spec(self) -> bool:\n \"\"\"Return true iff this schema is specified using TensorSpec\"\"\"\n return self.inputs and isinstance(self.inputs[0], TensorSpec)\n\n def input_names(self) -> List[Union[str, int]]:\n \"\"\"Get list of data names or range of indices if the schema has no names.\"\"\"\n return [x.name or i for i, x in enumerate(self.inputs)]\n\n @deprecated(alternative=\"mlflow.types.Schema.input_names\", since=\"1.14\")\n def column_names(self) -> List[Union[str, int]]:\n \"\"\"\n .. deprecated:: 1.14\n Please use :func:`mlflow.types.Schema.input_names()`\n Get list of column names or range of indices if the schema has no column names.\n\n \"\"\"\n if self.is_tensor_spec():\n raise MlflowException(\"Not supported by TensorSpec, use input_names() instead\")\n return [x.name or i for i, x in enumerate(self.columns)]\n\n def has_input_names(self) -> bool:\n \"\"\"Return true iff this schema declares names, false otherwise. \"\"\"\n return self.inputs and self.inputs[0].name is not None\n\n @deprecated(alternative=\"mlflow.types.Schema.has_input_names\", since=\"1.14\")\n def has_column_names(self) -> bool:\n \"\"\"\n .. deprecated:: 1.14\n Please use :func:`mlflow.types.Schema.has_input_names()`\n Return true iff this schema declares column names, false otherwise.\n\n \"\"\"\n if self.is_tensor_spec():\n raise MlflowException(\"Not supported by TensorSpec, use has_input_names() instead\")\n return self.columns and self.columns[0].name is not None\n\n def input_types(self) -> List[Union[DataType, np.dtype]]:\n \"\"\" Get types of the represented dataset.\"\"\"\n return [x.type for x in self.inputs]\n\n @deprecated(alternative=\"mlflow.types.Schema.input_types\", since=\"1.14\")\n def column_types(self) -> List[DataType]:\n \"\"\"\n .. deprecated:: 1.14\n Please use :func:`mlflow.types.Schema.input_types()`\n Get types of the represented dataset. Unsupported by TensorSpec.\n\n \"\"\"\n if self.is_tensor_spec():\n raise MlflowException(\"TensorSpec only supports numpy types, use numpy_types() instead\")\n return [x.type for x in self.columns]\n\n def numpy_types(self) -> List[np.dtype]:\n \"\"\" Convenience shortcut to get the datatypes as numpy types.\"\"\"\n if self.is_tensor_spec():\n return [x.type for x in self.inputs]\n return [x.type.to_numpy() for x in self.inputs]\n\n def pandas_types(self) -> List[np.dtype]:\n \"\"\" Convenience shortcut to get the datatypes as pandas types. Unsupported by TensorSpec.\"\"\"\n if self.is_tensor_spec():\n raise MlflowException(\"TensorSpec only supports numpy types, use numpy_types() instead\")\n return [x.type.to_pandas() for x in self.inputs]\n\n def as_spark_schema(self):\n \"\"\"Convert to Spark schema. If this schema is a single unnamed column, it is converted\n directly the corresponding spark data type, otherwise it's returned as a struct (missing\n column names are filled with an integer sequence).\n Unsupported by TensorSpec.\n \"\"\"\n if self.is_tensor_spec():\n raise MlflowException(\"TensorSpec cannot be converted to spark dataframe\")\n if len(self.inputs) == 1 and self.inputs[0].name is None:\n return self.inputs[0].type.to_spark()\n from pyspark.sql.types import StructType, StructField\n\n return StructType(\n [\n StructField(name=col.name or str(i), dataType=col.type.to_spark())\n for i, col in enumerate(self.inputs)\n ]\n )\n\n def to_json(self) -> str:\n \"\"\"Serialize into json string.\"\"\"\n return json.dumps([x.to_dict() for x in self.inputs])\n\n def to_dict(self) -> List[Dict[str, Any]]:\n \"\"\"Serialize into a jsonable dictionary.\"\"\"\n return [x.to_dict() for x in self.inputs]\n\n @classmethod\n def from_json(cls, json_str: str):\n \"\"\" Deserialize from a json string.\"\"\"\n\n def read_input(x: dict):\n return TensorSpec.from_json_dict(**x) if x[\"type\"] == \"tensor\" else ColSpec(**x)\n\n return cls([read_input(x) for x in json.loads(json_str)])\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Schema):\n return self.inputs == other.inputs\n else:\n return False\n\n def __repr__(self) -> str:\n return repr(self.inputs)\n"
] | [
[
"numpy.dtype",
"pandas.StringDtype"
]
] |
pierre-haessig/matplotlib | [
"0d945044ca3fbf98cad55912584ef80911f330c6"
] | [
"examples/pylab_examples/ginput_manual_clabel.py"
] | [
"#!/usr/bin/env python\n# -*- noplot -*-\n\nfrom __future__ import print_function\n\"\"\"\nThis provides examples of uses of interactive functions, such as ginput,\nwaitforbuttonpress and manual clabel placement.\n\nThis script must be run interactively using a backend that has a\ngraphical user interface (for example, using GTKAgg backend, but not\nPS backend).\n\nSee also ginput_demo.py\n\"\"\"\nimport time\nimport matplotlib\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\ndef tellme(s):\n print(s)\n plt.title(s,fontsize=16)\n plt.draw()\n\n##################################################\n# Define a triangle by clicking three points\n##################################################\nplt.clf()\nplt.axis([-1.,1.,-1.,1.])\nplt.setp(plt.gca(),autoscale_on=False)\n\ntellme('You will define a triangle, click to begin')\n\nplt.waitforbuttonpress()\n\nhappy = False\nwhile not happy:\n pts = []\n while len(pts) < 3:\n tellme('Select 3 corners with mouse')\n pts = np.asarray( plt.ginput(3,timeout=-1) )\n if len(pts) < 3:\n tellme('Too few points, starting over')\n time.sleep(1) # Wait a second\n\n ph = plt.fill( pts[:,0], pts[:,1], 'r', lw=2 )\n\n tellme('Happy? Key click for yes, mouse click for no')\n\n happy = plt.waitforbuttonpress()\n\n # Get rid of fill\n if not happy:\n for p in ph: p.remove()\n\n##################################################\n# Now contour according to distance from triangle\n# corners - just an example\n##################################################\n\n# Define a nice function of distance from individual pts\ndef f(x,y,pts):\n z = np.zeros_like(x)\n for p in pts:\n z = z + 1/(np.sqrt((x-p[0])**2+(y-p[1])**2))\n return 1/z\n\nX,Y = np.meshgrid( np.linspace(-1,1,51), np.linspace(-1,1,51) )\nZ = f(X,Y,pts)\n\nCS = plt.contour( X, Y, Z, 20 )\n\ntellme( 'Use mouse to select contour label locations, middle button to finish' )\nCL = plt.clabel( CS, manual=True )\n\n##################################################\n# Now do a zoom\n##################################################\ntellme( 'Now do a nested zoom, click to begin' )\nplt.waitforbuttonpress()\n\nhappy = False\nwhile not happy:\n tellme( 'Select two corners of zoom, middle mouse button to finish' )\n pts = np.asarray( plt.ginput(2,timeout=-1) )\n\n happy = len(pts) < 2\n if happy: break\n\n pts = np.sort(pts,axis=0)\n plt.axis( pts.T.ravel() )\n\ntellme('All Done!')\nplt.show()\n"
] | [
[
"numpy.sqrt",
"numpy.zeros_like",
"matplotlib.pyplot.ginput",
"numpy.sort",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.show",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.linspace"
]
] |
ejgenc/Data-Analysis_Istanbul-Health-Tourism | [
"34b9838690ca640c6a7a60f63eb2f51983ec46ef"
] | [
"src/data_visualization/visualize_bivariate_analysis_htourism_center_count_at_district_level.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n------ What is this file? ------\n\nThis script targets one file:\n - geographic_distribution_of_htourism_centers.shp\n \nThe script produces a small-multiples scatterplot visualization of htourism center count\nper distict.\n\nReturns a small multiples view of two scatterplots related to htourism center\ncount at district level.\n\"\"\"\n#%% --- Import Required Packages ---\n\nimport os\nfrom pathlib import Path # To wrap around filepaths\nimport geopandas as gpd\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#%% --- Set proper directory to assure integration with doit ---\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\n#%% --- Import Data ---\n\n#Import htourism centers data - aggregated at the district level\nimport_fp = Path(\"../../data/final/geographic_distribution_of_htourism_centers.shp\")\nhtourism_gdf_agg = gpd.read_file(import_fp, encoding = \"utf-8-sig\")\n\n#%% --- Fill missing values with zero ---\n\nhtourism_gdf_agg.fillna(0, inplace = True)\n\n#%% --- Get pearson's r ---\n\nresults = []\ndependent_variable = htourism_gdf_agg.loc[:,\"htourism_c\"]\nindependent_variables = [htourism_gdf_agg.loc[:,\"population\"],\n htourism_gdf_agg.loc[:,\"yearly_ave\"]]\nlabels = [\"Population\", \"Yearly average household income (in thousand TL)\"]\n\nfor independent_variable in independent_variables:\n result = pearsonr(independent_variable, dependent_variable)\n results.append(result)\n \n \n#%% --- Visualization One: Small multiples scatterplot\n\nwith plt.style.context('matplotlib_stylesheet_ejg_fixes'):\n \n # --- Create figure and axes ---\n fig_1 = plt.figure(figsize = (10.80,10.80))\n \n districts_to_label_list = [\n [\"Esenyurt\", \"Kucukcekmece\", \"Bahcelievler\",\n \"Uskudar\", \"Kadikoy\", \"Atasehir\", \"Besiktas\",\n \"Sisli\", \"Bakirkoy\", \"Beyoglu\", \"Fatih\"],\n [\"Kadikoy\",\"Besiktas\", \"Bakirkoy\",\"Adalar\",\n \"Sisli\", \"Sariyer\",\"Uskudar\",\"Atasehir\",\n \"Maltepe\",\"Fatih\", \"Beyoglu\",\"Bahcelievler\"]\n ]\n\n i = 1\n for independent_variable in independent_variables:\n ax = fig_1.add_subplot(2,1,i)\n i += 1\n ax.scatter(independent_variable, dependent_variable,\n s = 40,\n color = \"#02b72e\",\n marker = \"s\")\n \n ax.set_xlabel(labels[i -2],\n fontfamily = \"Arial\",\n fontsize = 16,\n fontweight = \"bold\")\n \n ax.text(x = 0.90, y = 0.90,\n s = \"r = {:.2f}\".format(results[i - 2][0]),\n transform = ax.transAxes,\n fontfamily = \"Arial\",\n fontsize = 14,\n fontweight = \"bold\")\n \n districts_to_label = (lambda x: districts_to_label_list[0] if np.array_equal(independent_variable,htourism_gdf_agg.loc[:,\"population\"]) else districts_to_label_list[1])(independent_variable)\n districts_to_label_mask = htourism_gdf_agg.loc[:,\"district_e\"].isin(districts_to_label)\n\n districts_to_label_xy_df = htourism_gdf_agg.loc[districts_to_label_mask,[\"district_e\",\"htourism_c\",\"population\",\"yearly_ave\"]]\n \n for idx, row in districts_to_label_xy_df.iterrows():\n x = (lambda x: row[\"yearly_ave\"] + 2 if np.array_equal(independent_variable,htourism_gdf_agg.loc[:,\"yearly_ave\"]) else row[\"population\"] + 3)(independent_variable)\n y = row[\"htourism_c\"] #To align it properly\n ax.annotate(s = row[\"district_e\"],\n xy = (x,y),\n horizontalalignment='left',\n verticalalignment = \"center\")\n \n fig_1.text(x = 0.05, y = 0.225,\n s = \"Number of institutions related to health tourism\",\n fontfamily = \"Arial\",\n fontsize = 16,\n fontweight = \"bold\",\n rotation = 90)\n \n#%% --- Export Figures ---\n\ncurrent_filename_split = os.path.basename(__file__).split(\".\")[0].split(\"_\")\ncurrent_filename_complete = \"_\".join(current_filename_split)\n\nmkdir_path = Path(\"../../media/figures/raw/{}\".format(current_filename_complete))\nos.mkdir(mkdir_path)\n\nfile_extensions = [\".png\", \".svg\"]\n\nfor file_extension in file_extensions:\n filename_extended = \"scatterplot\" + file_extension\n export_fp = Path.joinpath(mkdir_path, filename_extended)\n fig_1.savefig(export_fp,\n bbox_inches = \"tight\")"
] | [
[
"matplotlib.pyplot.figure",
"numpy.array_equal",
"scipy.stats.pearsonr",
"matplotlib.pyplot.style.context"
]
] |
bhumikapaharia/xarray | [
"39e586f96b8f23d3703a781c59c2ee01eb9d598a"
] | [
"xarray/tests/test_dask.py"
] | [
"import operator\nimport pickle\nimport sys\nfrom contextlib import suppress\nfrom distutils.version import LooseVersion\nfrom textwrap import dedent\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport xarray as xr\nimport xarray.ufuncs as xu\nfrom xarray import DataArray, Dataset, Variable\nfrom xarray.core import duck_array_ops\nfrom xarray.testing import assert_chunks_equal\nfrom xarray.tests import mock\n\nfrom ..core.duck_array_ops import lazy_array_equiv\nfrom . import (\n assert_allclose,\n assert_array_equal,\n assert_equal,\n assert_frame_equal,\n assert_identical,\n raise_if_dask_computes,\n requires_pint_0_15,\n requires_scipy_or_netCDF4,\n)\nfrom .test_backends import create_tmp_file\n\ndask = pytest.importorskip(\"dask\")\nda = pytest.importorskip(\"dask.array\")\ndd = pytest.importorskip(\"dask.dataframe\")\n\nON_WINDOWS = sys.platform == \"win32\"\n\n\ndef test_raise_if_dask_computes():\n data = da.from_array(np.random.RandomState(0).randn(4, 6), chunks=(2, 2))\n with pytest.raises(RuntimeError, match=r\"Too many computes\"):\n with raise_if_dask_computes():\n data.compute()\n\n\nclass DaskTestCase:\n def assertLazyAnd(self, expected, actual, test):\n with dask.config.set(scheduler=\"synchronous\"):\n test(actual, expected)\n\n if isinstance(actual, Dataset):\n for k, v in actual.variables.items():\n if k in actual.dims:\n assert isinstance(v.data, np.ndarray)\n else:\n assert isinstance(v.data, da.Array)\n elif isinstance(actual, DataArray):\n assert isinstance(actual.data, da.Array)\n for k, v in actual.coords.items():\n if k in actual.dims:\n assert isinstance(v.data, np.ndarray)\n else:\n assert isinstance(v.data, da.Array)\n elif isinstance(actual, Variable):\n assert isinstance(actual.data, da.Array)\n else:\n assert False\n\n\nclass TestVariable(DaskTestCase):\n def assertLazyAndIdentical(self, expected, actual):\n self.assertLazyAnd(expected, actual, assert_identical)\n\n def assertLazyAndAllClose(self, expected, actual):\n self.assertLazyAnd(expected, actual, assert_allclose)\n\n @pytest.fixture(autouse=True)\n def setUp(self):\n self.values = np.random.RandomState(0).randn(4, 6)\n self.data = da.from_array(self.values, chunks=(2, 2))\n\n self.eager_var = Variable((\"x\", \"y\"), self.values)\n self.lazy_var = Variable((\"x\", \"y\"), self.data)\n\n def test_basics(self):\n v = self.lazy_var\n assert self.data is v.data\n assert self.data.chunks == v.chunks\n assert_array_equal(self.values, v)\n\n def test_copy(self):\n self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())\n self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True))\n\n def test_chunk(self):\n for chunks, expected in [\n ({}, ((2, 2), (2, 2, 2))),\n (3, ((3, 1), (3, 3))),\n ({\"x\": 3, \"y\": 3}, ((3, 1), (3, 3))),\n ({\"x\": 3}, ((3, 1), (2, 2, 2))),\n ({\"x\": (3, 1)}, ((3, 1), (2, 2, 2))),\n ]:\n rechunked = self.lazy_var.chunk(chunks)\n assert rechunked.chunks == expected\n self.assertLazyAndIdentical(self.eager_var, rechunked)\n\n def test_indexing(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(u[0], v[0])\n self.assertLazyAndIdentical(u[:1], v[:1])\n self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])\n\n @pytest.mark.skipif(\n LooseVersion(dask.__version__) < LooseVersion(\"2021.04.1\"),\n reason=\"Requires dask v2021.04.1 or later\",\n )\n @pytest.mark.parametrize(\n \"expected_data, index\",\n [\n (da.array([99, 2, 3, 4]), 0),\n (da.array([99, 99, 99, 4]), slice(2, None, -1)),\n (da.array([99, 99, 3, 99]), [0, -1, 1]),\n (da.array([99, 99, 99, 4]), np.arange(3)),\n (da.array([1, 99, 99, 99]), [False, True, True, True]),\n (da.array([1, 99, 99, 99]), np.arange(4) > 0),\n (da.array([99, 99, 99, 99]), Variable((\"x\"), da.array([1, 2, 3, 4])) > 0),\n ],\n )\n def test_setitem_dask_array(self, expected_data, index):\n arr = Variable((\"x\"), da.array([1, 2, 3, 4]))\n expected = Variable((\"x\"), expected_data)\n arr[index] = 99\n assert_identical(arr, expected)\n\n @pytest.mark.skipif(\n LooseVersion(dask.__version__) >= LooseVersion(\"2021.04.1\"),\n reason=\"Requires dask v2021.04.0 or earlier\",\n )\n def test_setitem_dask_array_error(self):\n with pytest.raises(TypeError, match=r\"stored in a dask array\"):\n v = self.lazy_var\n v[:1] = 0\n\n def test_squeeze(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())\n\n def test_equals(self):\n v = self.lazy_var\n assert v.equals(v)\n assert isinstance(v.data, da.Array)\n assert v.identical(v)\n assert isinstance(v.data, da.Array)\n\n def test_transpose(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(u.T, v.T)\n\n def test_shift(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))\n self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))\n assert v.data.chunks == v.shift(x=1).data.chunks\n\n def test_roll(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))\n assert v.data.chunks == v.roll(x=1).data.chunks\n\n def test_unary_op(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(-u, -v)\n self.assertLazyAndIdentical(abs(u), abs(v))\n self.assertLazyAndIdentical(u.round(), v.round())\n\n def test_binary_op(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(2 * u, 2 * v)\n self.assertLazyAndIdentical(u + u, v + v)\n self.assertLazyAndIdentical(u[0] + u, v[0] + v)\n\n def test_repr(self):\n expected = dedent(\n \"\"\"\\\n <xarray.Variable (x: 4, y: 6)>\n {!r}\"\"\".format(\n self.lazy_var.data\n )\n )\n assert expected == repr(self.lazy_var)\n\n def test_pickle(self):\n # Test that pickling/unpickling does not convert the dask\n # backend to numpy\n a1 = Variable([\"x\"], build_dask_array(\"x\"))\n a1.compute()\n assert not a1._in_memory\n assert kernel_call_count == 1\n a2 = pickle.loads(pickle.dumps(a1))\n assert kernel_call_count == 1\n assert_identical(a1, a2)\n assert not a1._in_memory\n assert not a2._in_memory\n\n def test_reduce(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndAllClose(u.mean(), v.mean())\n self.assertLazyAndAllClose(u.std(), v.std())\n with raise_if_dask_computes():\n actual = v.argmax(dim=\"x\")\n self.assertLazyAndAllClose(u.argmax(dim=\"x\"), actual)\n with raise_if_dask_computes():\n actual = v.argmin(dim=\"x\")\n self.assertLazyAndAllClose(u.argmin(dim=\"x\"), actual)\n self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())\n self.assertLazyAndAllClose((u < 1).all(\"x\"), (v < 1).all(\"x\"))\n with pytest.raises(NotImplementedError, match=r\"only works along an axis\"):\n v.median()\n with pytest.raises(NotImplementedError, match=r\"only works along an axis\"):\n v.median(v.dims)\n with raise_if_dask_computes():\n v.reduce(duck_array_ops.mean)\n\n def test_missing_values(self):\n values = np.array([0, 1, np.nan, 3])\n data = da.from_array(values, chunks=(2,))\n\n eager_var = Variable(\"x\", values)\n lazy_var = Variable(\"x\", data)\n self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))\n self.assertLazyAndIdentical(Variable(\"x\", range(4)), lazy_var.fillna(2))\n self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())\n\n def test_concat(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], \"x\"))\n self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], \"x\"))\n self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], \"x\"))\n self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], \"x\"))\n self.assertLazyAndIdentical(\n u[:3], Variable.concat([v[[0, 2]], v[[1]]], \"x\", positions=[[0, 2], [1]])\n )\n\n def test_missing_methods(self):\n v = self.lazy_var\n try:\n v.argsort()\n except NotImplementedError as err:\n assert \"dask\" in str(err)\n try:\n v[0].item()\n except NotImplementedError as err:\n assert \"dask\" in str(err)\n\n @pytest.mark.filterwarnings(\"ignore::PendingDeprecationWarning\")\n def test_univariate_ufunc(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndAllClose(np.sin(u), xu.sin(v))\n\n @pytest.mark.filterwarnings(\"ignore::PendingDeprecationWarning\")\n def test_bivariate_ufunc(self):\n u = self.eager_var\n v = self.lazy_var\n self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))\n self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))\n\n def test_compute(self):\n u = self.eager_var\n v = self.lazy_var\n\n assert dask.is_dask_collection(v)\n (v2,) = dask.compute(v + 1)\n assert not dask.is_dask_collection(v2)\n\n assert ((u + 1).data == v2.data).all()\n\n def test_persist(self):\n u = self.eager_var\n v = self.lazy_var + 1\n\n (v2,) = dask.persist(v)\n assert v is not v2\n assert len(v2.__dask_graph__()) < len(v.__dask_graph__())\n assert v2.__dask_keys__() == v.__dask_keys__()\n assert dask.is_dask_collection(v)\n assert dask.is_dask_collection(v2)\n\n self.assertLazyAndAllClose(u + 1, v)\n self.assertLazyAndAllClose(u + 1, v2)\n\n @requires_pint_0_15(reason=\"Need __dask_tokenize__\")\n def test_tokenize_duck_dask_array(self):\n import pint\n\n unit_registry = pint.UnitRegistry()\n\n q = unit_registry.Quantity(self.data, \"meter\")\n variable = xr.Variable((\"x\", \"y\"), q)\n\n token = dask.base.tokenize(variable)\n post_op = variable + 5 * unit_registry.meter\n\n assert dask.base.tokenize(variable) != dask.base.tokenize(post_op)\n # Immutability check\n assert dask.base.tokenize(variable) == token\n\n\nclass TestDataArrayAndDataset(DaskTestCase):\n def assertLazyAndIdentical(self, expected, actual):\n self.assertLazyAnd(expected, actual, assert_identical)\n\n def assertLazyAndAllClose(self, expected, actual):\n self.assertLazyAnd(expected, actual, assert_allclose)\n\n def assertLazyAndEqual(self, expected, actual):\n self.assertLazyAnd(expected, actual, assert_equal)\n\n @pytest.fixture(autouse=True)\n def setUp(self):\n self.values = np.random.randn(4, 6)\n self.data = da.from_array(self.values, chunks=(2, 2))\n self.eager_array = DataArray(\n self.values, coords={\"x\": range(4)}, dims=(\"x\", \"y\"), name=\"foo\"\n )\n self.lazy_array = DataArray(\n self.data, coords={\"x\": range(4)}, dims=(\"x\", \"y\"), name=\"foo\"\n )\n\n def test_rechunk(self):\n chunked = self.eager_array.chunk({\"x\": 2}).chunk({\"y\": 2})\n assert chunked.chunks == ((2,) * 2, (2,) * 3)\n self.assertLazyAndIdentical(self.lazy_array, chunked)\n\n def test_new_chunk(self):\n chunked = self.eager_array.chunk()\n assert chunked.data.name.startswith(\"xarray-<this-array>\")\n\n def test_lazy_dataset(self):\n lazy_ds = Dataset({\"foo\": ((\"x\", \"y\"), self.data)})\n assert isinstance(lazy_ds.foo.variable.data, da.Array)\n\n def test_lazy_array(self):\n u = self.eager_array\n v = self.lazy_array\n\n self.assertLazyAndAllClose(u, v)\n self.assertLazyAndAllClose(-u, -v)\n self.assertLazyAndAllClose(u.T, v.T)\n self.assertLazyAndAllClose(u.mean(), v.mean())\n self.assertLazyAndAllClose(1 + u, 1 + v)\n\n actual = xr.concat([v[:2], v[2:]], \"x\")\n self.assertLazyAndAllClose(u, actual)\n\n def test_compute(self):\n u = self.eager_array\n v = self.lazy_array\n\n assert dask.is_dask_collection(v)\n (v2,) = dask.compute(v + 1)\n assert not dask.is_dask_collection(v2)\n\n assert ((u + 1).data == v2.data).all()\n\n def test_persist(self):\n u = self.eager_array\n v = self.lazy_array + 1\n\n (v2,) = dask.persist(v)\n assert v is not v2\n assert len(v2.__dask_graph__()) < len(v.__dask_graph__())\n assert v2.__dask_keys__() == v.__dask_keys__()\n assert dask.is_dask_collection(v)\n assert dask.is_dask_collection(v2)\n\n self.assertLazyAndAllClose(u + 1, v)\n self.assertLazyAndAllClose(u + 1, v2)\n\n def test_concat_loads_variables(self):\n # Test that concat() computes not-in-memory variables at most once\n # and loads them in the output, while leaving the input unaltered.\n d1 = build_dask_array(\"d1\")\n c1 = build_dask_array(\"c1\")\n d2 = build_dask_array(\"d2\")\n c2 = build_dask_array(\"c2\")\n d3 = build_dask_array(\"d3\")\n c3 = build_dask_array(\"c3\")\n # Note: c is a non-index coord.\n # Index coords are loaded by IndexVariable.__init__.\n ds1 = Dataset(data_vars={\"d\": (\"x\", d1)}, coords={\"c\": (\"x\", c1)})\n ds2 = Dataset(data_vars={\"d\": (\"x\", d2)}, coords={\"c\": (\"x\", c2)})\n ds3 = Dataset(data_vars={\"d\": (\"x\", d3)}, coords={\"c\": (\"x\", c3)})\n\n assert kernel_call_count == 0\n out = xr.concat(\n [ds1, ds2, ds3], dim=\"n\", data_vars=\"different\", coords=\"different\"\n )\n # each kernel is computed exactly once\n assert kernel_call_count == 6\n # variables are loaded in the output\n assert isinstance(out[\"d\"].data, np.ndarray)\n assert isinstance(out[\"c\"].data, np.ndarray)\n\n out = xr.concat([ds1, ds2, ds3], dim=\"n\", data_vars=\"all\", coords=\"all\")\n # no extra kernel calls\n assert kernel_call_count == 6\n assert isinstance(out[\"d\"].data, dask.array.Array)\n assert isinstance(out[\"c\"].data, dask.array.Array)\n\n out = xr.concat([ds1, ds2, ds3], dim=\"n\", data_vars=[\"d\"], coords=[\"c\"])\n # no extra kernel calls\n assert kernel_call_count == 6\n assert isinstance(out[\"d\"].data, dask.array.Array)\n assert isinstance(out[\"c\"].data, dask.array.Array)\n\n out = xr.concat([ds1, ds2, ds3], dim=\"n\", data_vars=[], coords=[])\n # variables are loaded once as we are validing that they're identical\n assert kernel_call_count == 12\n assert isinstance(out[\"d\"].data, np.ndarray)\n assert isinstance(out[\"c\"].data, np.ndarray)\n\n out = xr.concat(\n [ds1, ds2, ds3],\n dim=\"n\",\n data_vars=\"different\",\n coords=\"different\",\n compat=\"identical\",\n )\n # compat=identical doesn't do any more kernel calls than compat=equals\n assert kernel_call_count == 18\n assert isinstance(out[\"d\"].data, np.ndarray)\n assert isinstance(out[\"c\"].data, np.ndarray)\n\n # When the test for different turns true halfway through,\n # stop computing variables as it would not have any benefit\n ds4 = Dataset(data_vars={\"d\": (\"x\", [2.0])}, coords={\"c\": (\"x\", [2.0])})\n out = xr.concat(\n [ds1, ds2, ds4, ds3], dim=\"n\", data_vars=\"different\", coords=\"different\"\n )\n # the variables of ds1 and ds2 were computed, but those of ds3 didn't\n assert kernel_call_count == 22\n assert isinstance(out[\"d\"].data, dask.array.Array)\n assert isinstance(out[\"c\"].data, dask.array.Array)\n # the data of ds1 and ds2 was loaded into numpy and then\n # concatenated to the data of ds3. Thus, only ds3 is computed now.\n out.compute()\n assert kernel_call_count == 24\n\n # Finally, test that originals are unaltered\n assert ds1[\"d\"].data is d1\n assert ds1[\"c\"].data is c1\n assert ds2[\"d\"].data is d2\n assert ds2[\"c\"].data is c2\n assert ds3[\"d\"].data is d3\n assert ds3[\"c\"].data is c3\n\n # now check that concat() is correctly using dask name equality to skip loads\n out = xr.concat(\n [ds1, ds1, ds1], dim=\"n\", data_vars=\"different\", coords=\"different\"\n )\n assert kernel_call_count == 24\n # variables are not loaded in the output\n assert isinstance(out[\"d\"].data, dask.array.Array)\n assert isinstance(out[\"c\"].data, dask.array.Array)\n\n out = xr.concat(\n [ds1, ds1, ds1], dim=\"n\", data_vars=[], coords=[], compat=\"identical\"\n )\n assert kernel_call_count == 24\n # variables are not loaded in the output\n assert isinstance(out[\"d\"].data, dask.array.Array)\n assert isinstance(out[\"c\"].data, dask.array.Array)\n\n out = xr.concat(\n [ds1, ds2.compute(), ds3],\n dim=\"n\",\n data_vars=\"all\",\n coords=\"different\",\n compat=\"identical\",\n )\n # c1,c3 must be computed for comparison since c2 is numpy;\n # d2 is computed too\n assert kernel_call_count == 28\n\n out = xr.concat(\n [ds1, ds2.compute(), ds3],\n dim=\"n\",\n data_vars=\"all\",\n coords=\"all\",\n compat=\"identical\",\n )\n # no extra computes\n assert kernel_call_count == 30\n\n # Finally, test that originals are unaltered\n assert ds1[\"d\"].data is d1\n assert ds1[\"c\"].data is c1\n assert ds2[\"d\"].data is d2\n assert ds2[\"c\"].data is c2\n assert ds3[\"d\"].data is d3\n assert ds3[\"c\"].data is c3\n\n def test_groupby(self):\n u = self.eager_array\n v = self.lazy_array\n\n expected = u.groupby(\"x\").mean(...)\n with raise_if_dask_computes():\n actual = v.groupby(\"x\").mean(...)\n self.assertLazyAndAllClose(expected, actual)\n\n def test_rolling(self):\n u = self.eager_array\n v = self.lazy_array\n\n expected = u.rolling(x=2).mean()\n with raise_if_dask_computes():\n actual = v.rolling(x=2).mean()\n self.assertLazyAndAllClose(expected, actual)\n\n def test_groupby_first(self):\n u = self.eager_array\n v = self.lazy_array\n\n for coords in [u.coords, v.coords]:\n coords[\"ab\"] = (\"x\", [\"a\", \"a\", \"b\", \"b\"])\n with pytest.raises(NotImplementedError, match=r\"dask\"):\n v.groupby(\"ab\").first()\n expected = u.groupby(\"ab\").first()\n with raise_if_dask_computes():\n actual = v.groupby(\"ab\").first(skipna=False)\n self.assertLazyAndAllClose(expected, actual)\n\n def test_reindex(self):\n u = self.eager_array.assign_coords(y=range(6))\n v = self.lazy_array.assign_coords(y=range(6))\n\n for kwargs in [\n {\"x\": [2, 3, 4]},\n {\"x\": [1, 100, 2, 101, 3]},\n {\"x\": [2.5, 3, 3.5], \"y\": [2, 2.5, 3]},\n ]:\n expected = u.reindex(**kwargs)\n actual = v.reindex(**kwargs)\n self.assertLazyAndAllClose(expected, actual)\n\n def test_to_dataset_roundtrip(self):\n u = self.eager_array\n v = self.lazy_array\n\n expected = u.assign_coords(x=u[\"x\"])\n self.assertLazyAndEqual(expected, v.to_dataset(\"x\").to_array(\"x\"))\n\n def test_merge(self):\n def duplicate_and_merge(array):\n return xr.merge([array, array.rename(\"bar\")]).to_array()\n\n expected = duplicate_and_merge(self.eager_array)\n actual = duplicate_and_merge(self.lazy_array)\n self.assertLazyAndEqual(expected, actual)\n\n @pytest.mark.filterwarnings(\"ignore::PendingDeprecationWarning\")\n def test_ufuncs(self):\n u = self.eager_array\n v = self.lazy_array\n self.assertLazyAndAllClose(np.sin(u), xu.sin(v))\n\n def test_where_dispatching(self):\n a = np.arange(10)\n b = a > 3\n x = da.from_array(a, 5)\n y = da.from_array(b, 5)\n expected = DataArray(a).where(b)\n self.assertLazyAndEqual(expected, DataArray(a).where(y))\n self.assertLazyAndEqual(expected, DataArray(x).where(b))\n self.assertLazyAndEqual(expected, DataArray(x).where(y))\n\n def test_simultaneous_compute(self):\n ds = Dataset({\"foo\": (\"x\", range(5)), \"bar\": (\"x\", range(5))}).chunk()\n\n count = [0]\n\n def counting_get(*args, **kwargs):\n count[0] += 1\n return dask.get(*args, **kwargs)\n\n ds.load(scheduler=counting_get)\n\n assert count[0] == 1\n\n def test_stack(self):\n data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))\n arr = DataArray(data, dims=(\"w\", \"x\", \"y\"))\n stacked = arr.stack(z=(\"x\", \"y\"))\n z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=[\"x\", \"y\"])\n expected = DataArray(data.reshape(2, -1), {\"z\": z}, dims=[\"w\", \"z\"])\n assert stacked.data.chunks == expected.data.chunks\n self.assertLazyAndEqual(expected, stacked)\n\n def test_dot(self):\n eager = self.eager_array.dot(self.eager_array[0])\n lazy = self.lazy_array.dot(self.lazy_array[0])\n self.assertLazyAndAllClose(eager, lazy)\n\n @pytest.mark.skipif(LooseVersion(dask.__version__) >= \"2.0\", reason=\"no meta\")\n def test_dataarray_repr_legacy(self):\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n a = DataArray(data, dims=[\"x\"], coords={\"y\": (\"x\", nonindex_coord)})\n expected = dedent(\n \"\"\"\\\n <xarray.DataArray 'data' (x: 1)>\n {!r}\n Coordinates:\n y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>\n Dimensions without coordinates: x\"\"\".format(\n data\n )\n )\n assert expected == repr(a)\n assert kernel_call_count == 0 # should not evaluate dask array\n\n @pytest.mark.skipif(LooseVersion(dask.__version__) < \"2.0\", reason=\"needs meta\")\n def test_dataarray_repr(self):\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n a = DataArray(data, dims=[\"x\"], coords={\"y\": (\"x\", nonindex_coord)})\n expected = dedent(\n \"\"\"\\\n <xarray.DataArray 'data' (x: 1)>\n {!r}\n Coordinates:\n y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>\n Dimensions without coordinates: x\"\"\".format(\n data\n )\n )\n assert expected == repr(a)\n assert kernel_call_count == 0 # should not evaluate dask array\n\n @pytest.mark.skipif(LooseVersion(dask.__version__) < \"2.0\", reason=\"needs meta\")\n def test_dataset_repr(self):\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n ds = Dataset(data_vars={\"a\": (\"x\", data)}, coords={\"y\": (\"x\", nonindex_coord)})\n expected = dedent(\n \"\"\"\\\n <xarray.Dataset>\n Dimensions: (x: 1)\n Coordinates:\n y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>\n Dimensions without coordinates: x\n Data variables:\n a (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>\"\"\"\n )\n assert expected == repr(ds)\n assert kernel_call_count == 0 # should not evaluate dask array\n\n def test_dataarray_pickle(self):\n # Test that pickling/unpickling converts the dask backend\n # to numpy in neither the data variable nor the non-index coords\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n a1 = DataArray(data, dims=[\"x\"], coords={\"y\": (\"x\", nonindex_coord)})\n a1.compute()\n assert not a1._in_memory\n assert not a1.coords[\"y\"]._in_memory\n assert kernel_call_count == 2\n a2 = pickle.loads(pickle.dumps(a1))\n assert kernel_call_count == 2\n assert_identical(a1, a2)\n assert not a1._in_memory\n assert not a2._in_memory\n assert not a1.coords[\"y\"]._in_memory\n assert not a2.coords[\"y\"]._in_memory\n\n def test_dataset_pickle(self):\n # Test that pickling/unpickling converts the dask backend\n # to numpy in neither the data variables nor the non-index coords\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n ds1 = Dataset(data_vars={\"a\": (\"x\", data)}, coords={\"y\": (\"x\", nonindex_coord)})\n ds1.compute()\n assert not ds1[\"a\"]._in_memory\n assert not ds1[\"y\"]._in_memory\n assert kernel_call_count == 2\n ds2 = pickle.loads(pickle.dumps(ds1))\n assert kernel_call_count == 2\n assert_identical(ds1, ds2)\n assert not ds1[\"a\"]._in_memory\n assert not ds2[\"a\"]._in_memory\n assert not ds1[\"y\"]._in_memory\n assert not ds2[\"y\"]._in_memory\n\n def test_dataarray_getattr(self):\n # ipython/jupyter does a long list of getattr() calls to when trying to\n # represent an object.\n # Make sure we're not accidentally computing dask variables.\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n a = DataArray(data, dims=[\"x\"], coords={\"y\": (\"x\", nonindex_coord)})\n with suppress(AttributeError):\n getattr(a, \"NOTEXIST\")\n assert kernel_call_count == 0\n\n def test_dataset_getattr(self):\n # Test that pickling/unpickling converts the dask backend\n # to numpy in neither the data variables nor the non-index coords\n data = build_dask_array(\"data\")\n nonindex_coord = build_dask_array(\"coord\")\n ds = Dataset(data_vars={\"a\": (\"x\", data)}, coords={\"y\": (\"x\", nonindex_coord)})\n with suppress(AttributeError):\n getattr(ds, \"NOTEXIST\")\n assert kernel_call_count == 0\n\n def test_values(self):\n # Test that invoking the values property does not convert the dask\n # backend to numpy\n a = DataArray([1, 2]).chunk()\n assert not a._in_memory\n assert a.values.tolist() == [1, 2]\n assert not a._in_memory\n\n def test_from_dask_variable(self):\n # Test array creation from Variable with dask backend.\n # This is used e.g. in broadcast()\n a = DataArray(self.lazy_array.variable, coords={\"x\": range(4)}, name=\"foo\")\n self.assertLazyAndIdentical(self.lazy_array, a)\n\n @requires_pint_0_15(reason=\"Need __dask_tokenize__\")\n def test_tokenize_duck_dask_array(self):\n import pint\n\n unit_registry = pint.UnitRegistry()\n\n q = unit_registry.Quantity(self.data, unit_registry.meter)\n data_array = xr.DataArray(\n data=q, coords={\"x\": range(4)}, dims=(\"x\", \"y\"), name=\"foo\"\n )\n\n token = dask.base.tokenize(data_array)\n post_op = data_array + 5 * unit_registry.meter\n\n assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op)\n # Immutability check\n assert dask.base.tokenize(data_array) == token\n\n\nclass TestToDaskDataFrame:\n def test_to_dask_dataframe(self):\n # Test conversion of Datasets to dask DataFrames\n x = np.random.randn(10)\n y = np.arange(10, dtype=\"uint8\")\n t = list(\"abcdefghij\")\n\n ds = Dataset(\n {\"a\": (\"t\", da.from_array(x, chunks=4)), \"b\": (\"t\", y), \"t\": (\"t\", t)}\n )\n\n expected_pd = pd.DataFrame({\"a\": x, \"b\": y}, index=pd.Index(t, name=\"t\"))\n\n # test if 1-D index is correctly set up\n expected = dd.from_pandas(expected_pd, chunksize=4)\n actual = ds.to_dask_dataframe(set_index=True)\n # test if we have dask dataframes\n assert isinstance(actual, dd.DataFrame)\n\n # use the .equals from pandas to check dataframes are equivalent\n assert_frame_equal(expected.compute(), actual.compute())\n\n # test if no index is given\n expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4)\n\n actual = ds.to_dask_dataframe(set_index=False)\n\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected.compute(), actual.compute())\n\n def test_to_dask_dataframe_2D(self):\n # Test if 2-D dataset is supplied\n w = np.random.randn(2, 3)\n ds = Dataset({\"w\": ((\"x\", \"y\"), da.from_array(w, chunks=(1, 2)))})\n ds[\"x\"] = (\"x\", np.array([0, 1], np.int64))\n ds[\"y\"] = (\"y\", list(\"abc\"))\n\n # dask dataframes do not (yet) support multiindex,\n # but when it does, this would be the expected index:\n exp_index = pd.MultiIndex.from_arrays(\n [[0, 0, 0, 1, 1, 1], [\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"]], names=[\"x\", \"y\"]\n )\n expected = pd.DataFrame({\"w\": w.reshape(-1)}, index=exp_index)\n # so for now, reset the index\n expected = expected.reset_index(drop=False)\n actual = ds.to_dask_dataframe(set_index=False)\n\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n @pytest.mark.xfail(raises=NotImplementedError)\n def test_to_dask_dataframe_2D_set_index(self):\n # This will fail until dask implements MultiIndex support\n w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))\n ds = Dataset({\"w\": ((\"x\", \"y\"), w)})\n ds[\"x\"] = (\"x\", np.array([0, 1], np.int64))\n ds[\"y\"] = (\"y\", list(\"abc\"))\n\n expected = ds.compute().to_dataframe()\n actual = ds.to_dask_dataframe(set_index=True)\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n def test_to_dask_dataframe_coordinates(self):\n # Test if coordinate is also a dask array\n x = np.random.randn(10)\n t = np.arange(10) * 2\n\n ds = Dataset(\n {\n \"a\": (\"t\", da.from_array(x, chunks=4)),\n \"t\": (\"t\", da.from_array(t, chunks=4)),\n }\n )\n\n expected_pd = pd.DataFrame({\"a\": x}, index=pd.Index(t, name=\"t\"))\n expected = dd.from_pandas(expected_pd, chunksize=4)\n actual = ds.to_dask_dataframe(set_index=True)\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected.compute(), actual.compute())\n\n def test_to_dask_dataframe_not_daskarray(self):\n # Test if DataArray is not a dask array\n x = np.random.randn(10)\n y = np.arange(10, dtype=\"uint8\")\n t = list(\"abcdefghij\")\n\n ds = Dataset({\"a\": (\"t\", x), \"b\": (\"t\", y), \"t\": (\"t\", t)})\n\n expected = pd.DataFrame({\"a\": x, \"b\": y}, index=pd.Index(t, name=\"t\"))\n\n actual = ds.to_dask_dataframe(set_index=True)\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n def test_to_dask_dataframe_no_coordinate(self):\n x = da.from_array(np.random.randn(10), chunks=4)\n ds = Dataset({\"x\": (\"dim_0\", x)})\n\n expected = ds.compute().to_dataframe().reset_index()\n actual = ds.to_dask_dataframe()\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n expected = ds.compute().to_dataframe()\n actual = ds.to_dask_dataframe(set_index=True)\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n def test_to_dask_dataframe_dim_order(self):\n values = np.array([[1, 2], [3, 4]], dtype=np.int64)\n ds = Dataset({\"w\": ((\"x\", \"y\"), values)}).chunk(1)\n\n expected = ds[\"w\"].to_series().reset_index()\n actual = ds.to_dask_dataframe(dim_order=[\"x\", \"y\"])\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n expected = ds[\"w\"].T.to_series().reset_index()\n actual = ds.to_dask_dataframe(dim_order=[\"y\", \"x\"])\n assert isinstance(actual, dd.DataFrame)\n assert_frame_equal(expected, actual.compute())\n\n with pytest.raises(ValueError, match=r\"does not match the set of dimensions\"):\n ds.to_dask_dataframe(dim_order=[\"x\"])\n\n\[email protected](\"method\", [\"load\", \"compute\"])\ndef test_dask_kwargs_variable(method):\n x = Variable(\"y\", da.from_array(np.arange(3), chunks=(2,)))\n # args should be passed on to da.Array.compute()\n with mock.patch.object(\n da.Array, \"compute\", return_value=np.arange(3)\n ) as mock_compute:\n getattr(x, method)(foo=\"bar\")\n mock_compute.assert_called_with(foo=\"bar\")\n\n\[email protected](\"method\", [\"load\", \"compute\", \"persist\"])\ndef test_dask_kwargs_dataarray(method):\n data = da.from_array(np.arange(3), chunks=(2,))\n x = DataArray(data)\n if method in [\"load\", \"compute\"]:\n dask_func = \"dask.array.compute\"\n else:\n dask_func = \"dask.persist\"\n # args should be passed on to \"dask_func\"\n with mock.patch(dask_func) as mock_func:\n getattr(x, method)(foo=\"bar\")\n mock_func.assert_called_with(data, foo=\"bar\")\n\n\[email protected](\"method\", [\"load\", \"compute\", \"persist\"])\ndef test_dask_kwargs_dataset(method):\n data = da.from_array(np.arange(3), chunks=(2,))\n x = Dataset({\"x\": ((\"y\"), data)})\n if method in [\"load\", \"compute\"]:\n dask_func = \"dask.array.compute\"\n else:\n dask_func = \"dask.persist\"\n # args should be passed on to \"dask_func\"\n with mock.patch(dask_func) as mock_func:\n getattr(x, method)(foo=\"bar\")\n mock_func.assert_called_with(data, foo=\"bar\")\n\n\nkernel_call_count = 0\n\n\ndef kernel(name):\n \"\"\"Dask kernel to test pickling/unpickling and __repr__.\n Must be global to make it pickleable.\n \"\"\"\n global kernel_call_count\n kernel_call_count += 1\n return np.ones(1, dtype=np.int64)\n\n\ndef build_dask_array(name):\n global kernel_call_count\n kernel_call_count = 0\n return dask.array.Array(\n dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64\n )\n\n\[email protected](\n \"persist\", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]\n)\ndef test_persist_Dataset(persist):\n ds = Dataset({\"foo\": (\"x\", range(5)), \"bar\": (\"x\", range(5))}).chunk()\n ds = ds + 1\n n = len(ds.foo.data.dask)\n\n ds2 = persist(ds)\n\n assert len(ds2.foo.data.dask) == 1\n assert len(ds.foo.data.dask) == n # doesn't mutate in place\n\n\[email protected](\n \"persist\", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]\n)\ndef test_persist_DataArray(persist):\n x = da.arange(10, chunks=(5,))\n y = DataArray(x)\n z = y + 1\n n = len(z.data.dask)\n\n zz = persist(z)\n\n assert len(z.data.dask) == n\n assert len(zz.data.dask) == zz.data.npartitions\n\n\ndef test_dataarray_with_dask_coords():\n import toolz\n\n x = xr.Variable(\"x\", da.arange(8, chunks=(4,)))\n y = xr.Variable(\"y\", da.arange(8, chunks=(4,)) * 2)\n data = da.random.random((8, 8), chunks=(4, 4)) + 1\n array = xr.DataArray(data, dims=[\"x\", \"y\"])\n array.coords[\"xx\"] = x\n array.coords[\"yy\"] = y\n\n assert dict(array.__dask_graph__()) == toolz.merge(\n data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__()\n )\n\n (array2,) = dask.compute(array)\n assert not dask.is_dask_collection(array2)\n\n assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())\n\n\ndef test_basic_compute():\n ds = Dataset({\"foo\": (\"x\", range(5)), \"bar\": (\"x\", range(5))}).chunk({\"x\": 2})\n for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]:\n with dask.config.set(scheduler=get):\n ds.compute()\n ds.foo.compute()\n ds.foo.variable.compute()\n\n\ndef test_dask_layers_and_dependencies():\n ds = Dataset({\"foo\": (\"x\", range(5)), \"bar\": (\"x\", range(5))}).chunk()\n\n x = dask.delayed(ds)\n assert set(x.__dask_graph__().dependencies).issuperset(\n ds.__dask_graph__().dependencies\n )\n assert set(x.foo.__dask_graph__().dependencies).issuperset(\n ds.__dask_graph__().dependencies\n )\n\n\ndef make_da():\n da = xr.DataArray(\n np.ones((10, 20)),\n dims=[\"x\", \"y\"],\n coords={\"x\": np.arange(10), \"y\": np.arange(100, 120)},\n name=\"a\",\n ).chunk({\"x\": 4, \"y\": 5})\n da.x.attrs[\"long_name\"] = \"x\"\n da.attrs[\"test\"] = \"test\"\n da.coords[\"c2\"] = 0.5\n da.coords[\"ndcoord\"] = da.x * 2\n da.coords[\"cxy\"] = (da.x * da.y).chunk({\"x\": 4, \"y\": 5})\n\n return da\n\n\ndef make_ds():\n map_ds = xr.Dataset()\n map_ds[\"a\"] = make_da()\n map_ds[\"b\"] = map_ds.a + 50\n map_ds[\"c\"] = map_ds.x + 20\n map_ds = map_ds.chunk({\"x\": 4, \"y\": 5})\n map_ds[\"d\"] = (\"z\", [1, 1, 1, 1])\n map_ds[\"z\"] = [0, 1, 2, 3]\n map_ds[\"e\"] = map_ds.x + map_ds.y\n map_ds.coords[\"c1\"] = 0.5\n map_ds.coords[\"cx\"] = (\"x\", np.arange(len(map_ds.x)))\n map_ds.coords[\"cx\"].attrs[\"test2\"] = \"test2\"\n map_ds.attrs[\"test\"] = \"test\"\n map_ds.coords[\"xx\"] = map_ds[\"a\"] * map_ds.y\n\n map_ds.x.attrs[\"long_name\"] = \"x\"\n map_ds.y.attrs[\"long_name\"] = \"y\"\n\n return map_ds\n\n\n# fixtures cannot be used in parametrize statements\n# instead use this workaround\n# https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly\[email protected]\ndef map_da():\n return make_da()\n\n\[email protected]\ndef map_ds():\n return make_ds()\n\n\ndef test_unify_chunks(map_ds):\n ds_copy = map_ds.copy()\n ds_copy[\"cxy\"] = ds_copy.cxy.chunk({\"y\": 10})\n\n with pytest.raises(ValueError, match=r\"inconsistent chunks\"):\n ds_copy.chunks\n\n expected_chunks = {\"x\": (4, 4, 2), \"y\": (5, 5, 5, 5)}\n with raise_if_dask_computes():\n actual_chunks = ds_copy.unify_chunks().chunks\n assert actual_chunks == expected_chunks\n assert_identical(map_ds, ds_copy.unify_chunks())\n\n out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars(\"cxy\"))\n assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))\n assert out_b.chunks == expected_chunks\n\n # Test unordered dims\n da = ds_copy[\"cxy\"]\n out_a, out_b = xr.unify_chunks(da.chunk({\"x\": -1}), da.T.chunk({\"y\": -1}))\n assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))\n assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2))\n\n # Test mismatch\n with pytest.raises(ValueError, match=r\"Dimension 'x' size mismatch: 10 != 2\"):\n xr.unify_chunks(da, da.isel(x=slice(2)))\n\n\[email protected](\"obj\", [make_ds(), make_da()])\[email protected](\n \"transform\", [lambda x: x.compute(), lambda x: x.unify_chunks()]\n)\ndef test_unify_chunks_shallow_copy(obj, transform):\n obj = transform(obj)\n unified = obj.unify_chunks()\n assert_identical(obj, unified) and obj is not obj.unify_chunks()\n\n\[email protected](\"obj\", [make_da()])\ndef test_auto_chunk_da(obj):\n actual = obj.chunk(\"auto\").data\n expected = obj.data.rechunk(\"auto\")\n np.testing.assert_array_equal(actual, expected)\n assert actual.chunks == expected.chunks\n\n\ndef test_map_blocks_error(map_da, map_ds):\n def bad_func(darray):\n return (darray * darray.x + 5 * darray.y)[:1, :1]\n\n with pytest.raises(ValueError, match=r\"Received dimension 'x' of length 1\"):\n xr.map_blocks(bad_func, map_da).compute()\n\n def returns_numpy(darray):\n return (darray * darray.x + 5 * darray.y).values\n\n with pytest.raises(TypeError, match=r\"Function must return an xarray DataArray\"):\n xr.map_blocks(returns_numpy, map_da)\n\n with pytest.raises(TypeError, match=r\"args must be\"):\n xr.map_blocks(operator.add, map_da, args=10)\n\n with pytest.raises(TypeError, match=r\"kwargs must be\"):\n xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])\n\n def really_bad_func(darray):\n raise ValueError(\"couldn't do anything.\")\n\n with pytest.raises(Exception, match=r\"Cannot infer\"):\n xr.map_blocks(really_bad_func, map_da)\n\n ds_copy = map_ds.copy()\n ds_copy[\"cxy\"] = ds_copy.cxy.chunk({\"y\": 10})\n\n with pytest.raises(ValueError, match=r\"inconsistent chunks\"):\n xr.map_blocks(bad_func, ds_copy)\n\n with pytest.raises(TypeError, match=r\"Cannot pass dask collections\"):\n xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk()))\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks(obj):\n def func(obj):\n result = obj + obj.x + 5 * obj.y\n return result\n\n with raise_if_dask_computes():\n actual = xr.map_blocks(func, obj)\n expected = func(obj)\n assert_chunks_equal(expected.chunk(), actual)\n assert_identical(actual, expected)\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks_convert_args_to_list(obj):\n expected = obj + 10\n with raise_if_dask_computes():\n actual = xr.map_blocks(operator.add, obj, [10])\n assert_chunks_equal(expected.chunk(), actual)\n assert_identical(actual, expected)\n\n\ndef test_map_blocks_dask_args():\n da1 = xr.DataArray(\n np.ones((10, 20)),\n dims=[\"x\", \"y\"],\n coords={\"x\": np.arange(10), \"y\": np.arange(20)},\n ).chunk({\"x\": 5, \"y\": 4})\n\n # check that block shapes are the same\n def sumda(da1, da2):\n assert da1.shape == da2.shape\n return da1 + da2\n\n da2 = da1 + 1\n with raise_if_dask_computes():\n mapped = xr.map_blocks(sumda, da1, args=[da2])\n xr.testing.assert_equal(da1 + da2, mapped)\n\n # one dimension in common\n da2 = (da1 + 1).isel(x=1, drop=True)\n with raise_if_dask_computes():\n mapped = xr.map_blocks(operator.add, da1, args=[da2])\n xr.testing.assert_equal(da1 + da2, mapped)\n\n # test that everything works when dimension names are different\n da2 = (da1 + 1).isel(x=1, drop=True).rename({\"y\": \"k\"})\n with raise_if_dask_computes():\n mapped = xr.map_blocks(operator.add, da1, args=[da2])\n xr.testing.assert_equal(da1 + da2, mapped)\n\n with pytest.raises(ValueError, match=r\"Chunk sizes along dimension 'x'\"):\n xr.map_blocks(operator.add, da1, args=[da1.chunk({\"x\": 1})])\n\n with pytest.raises(ValueError, match=r\"indexes along dimension 'x' are not equal\"):\n xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))])\n\n # reduction\n da1 = da1.chunk({\"x\": -1})\n da2 = da1 + 1\n with raise_if_dask_computes():\n mapped = xr.map_blocks(lambda a, b: (a + b).sum(\"x\"), da1, args=[da2])\n xr.testing.assert_equal((da1 + da2).sum(\"x\"), mapped)\n\n # reduction with template\n da1 = da1.chunk({\"x\": -1})\n da2 = da1 + 1\n with raise_if_dask_computes():\n mapped = xr.map_blocks(\n lambda a, b: (a + b).sum(\"x\"), da1, args=[da2], template=da1.sum(\"x\")\n )\n xr.testing.assert_equal((da1 + da2).sum(\"x\"), mapped)\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks_add_attrs(obj):\n def add_attrs(obj):\n obj = obj.copy(deep=True)\n obj.attrs[\"new\"] = \"new\"\n obj.cxy.attrs[\"new2\"] = \"new2\"\n return obj\n\n expected = add_attrs(obj)\n with raise_if_dask_computes():\n actual = xr.map_blocks(add_attrs, obj)\n\n assert_identical(actual, expected)\n\n # when template is specified, attrs are copied from template, not set by function\n with raise_if_dask_computes():\n actual = xr.map_blocks(add_attrs, obj, template=obj)\n assert_identical(actual, obj)\n\n\ndef test_map_blocks_change_name(map_da):\n def change_name(obj):\n obj = obj.copy(deep=True)\n obj.name = \"new\"\n return obj\n\n expected = change_name(map_da)\n with raise_if_dask_computes():\n actual = xr.map_blocks(change_name, map_da)\n\n assert_identical(actual, expected)\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks_kwargs(obj):\n expected = xr.full_like(obj, fill_value=np.nan)\n with raise_if_dask_computes():\n actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan))\n assert_chunks_equal(expected.chunk(), actual)\n assert_identical(actual, expected)\n\n\ndef test_map_blocks_to_array(map_ds):\n with raise_if_dask_computes():\n actual = xr.map_blocks(lambda x: x.to_array(), map_ds)\n\n # to_array does not preserve name, so cannot use assert_identical\n assert_equal(actual, map_ds.to_array())\n\n\[email protected](\n \"func\",\n [\n lambda x: x,\n lambda x: x.to_dataset(),\n lambda x: x.drop_vars(\"x\"),\n lambda x: x.expand_dims(k=[1, 2, 3]),\n lambda x: x.expand_dims(k=3),\n lambda x: x.assign_coords(new_coord=(\"y\", x.y.data * 2)),\n lambda x: x.astype(np.int32),\n lambda x: x.x,\n ],\n)\ndef test_map_blocks_da_transformations(func, map_da):\n with raise_if_dask_computes():\n actual = xr.map_blocks(func, map_da)\n\n assert_identical(actual, func(map_da))\n\n\[email protected](\n \"func\",\n [\n lambda x: x,\n lambda x: x.drop_vars(\"cxy\"),\n lambda x: x.drop_vars(\"a\"),\n lambda x: x.drop_vars(\"x\"),\n lambda x: x.expand_dims(k=[1, 2, 3]),\n lambda x: x.expand_dims(k=3),\n lambda x: x.rename({\"a\": \"new1\", \"b\": \"new2\"}),\n lambda x: x.x,\n ],\n)\ndef test_map_blocks_ds_transformations(func, map_ds):\n with raise_if_dask_computes():\n actual = xr.map_blocks(func, map_ds)\n\n assert_identical(actual, func(map_ds))\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks_da_ds_with_template(obj):\n func = lambda x: x.isel(x=[1])\n template = obj.isel(x=[1, 5, 9])\n with raise_if_dask_computes():\n actual = xr.map_blocks(func, obj, template=template)\n assert_identical(actual, template)\n\n with raise_if_dask_computes():\n actual = obj.map_blocks(func, template=template)\n assert_identical(actual, template)\n\n\ndef test_map_blocks_template_convert_object():\n da = make_da()\n func = lambda x: x.to_dataset().isel(x=[1])\n template = da.to_dataset().isel(x=[1, 5, 9])\n with raise_if_dask_computes():\n actual = xr.map_blocks(func, da, template=template)\n assert_identical(actual, template)\n\n ds = da.to_dataset()\n func = lambda x: x.to_array().isel(x=[1])\n template = ds.to_array().isel(x=[1, 5, 9])\n with raise_if_dask_computes():\n actual = xr.map_blocks(func, ds, template=template)\n assert_identical(actual, template)\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks_errors_bad_template(obj):\n with pytest.raises(ValueError, match=r\"unexpected coordinate variables\"):\n xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute()\n with pytest.raises(ValueError, match=r\"does not contain coordinate variables\"):\n xr.map_blocks(lambda x: x.drop_vars(\"cxy\"), obj, template=obj).compute()\n with pytest.raises(ValueError, match=r\"Dimensions {'x'} missing\"):\n xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute()\n with pytest.raises(ValueError, match=r\"Received dimension 'x' of length 1\"):\n xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()\n with pytest.raises(TypeError, match=r\"must be a DataArray\"):\n xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute()\n with pytest.raises(ValueError, match=r\"map_blocks requires that one block\"):\n xr.map_blocks(\n lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1])\n ).compute()\n with pytest.raises(ValueError, match=r\"Expected index 'x' to be\"):\n xr.map_blocks(\n lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values\n obj,\n template=obj.isel(x=[1, 5, 9]),\n ).compute()\n\n\ndef test_map_blocks_errors_bad_template_2(map_ds):\n with pytest.raises(ValueError, match=r\"unexpected data variables {'xyz'}\"):\n xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute()\n\n\[email protected](\"obj\", [make_da(), make_ds()])\ndef test_map_blocks_object_method(obj):\n def func(obj):\n result = obj + obj.x + 5 * obj.y\n return result\n\n with raise_if_dask_computes():\n expected = xr.map_blocks(func, obj)\n actual = obj.map_blocks(func)\n\n assert_identical(expected, actual)\n\n\ndef test_map_blocks_hlg_layers():\n # regression test for #3599\n ds = xr.Dataset(\n {\n \"x\": ((\"a\",), dask.array.ones(10, chunks=(5,))),\n \"z\": ((\"b\",), dask.array.ones(10, chunks=(5,))),\n }\n )\n mapped = ds.map_blocks(lambda x: x)\n\n xr.testing.assert_equal(mapped, ds)\n\n\ndef test_make_meta(map_ds):\n from ..core.parallel import make_meta\n\n meta = make_meta(map_ds)\n\n for variable in map_ds._coord_names:\n assert variable in meta._coord_names\n assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim\n\n for variable in map_ds.data_vars:\n assert variable in meta.data_vars\n assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim\n\n\ndef test_identical_coords_no_computes():\n lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=(\"y\", \"x\"))\n a = xr.DataArray(\n da.zeros((10, 10), chunks=2), dims=(\"y\", \"x\"), coords={\"lons\": lons2}\n )\n b = xr.DataArray(\n da.zeros((10, 10), chunks=2), dims=(\"y\", \"x\"), coords={\"lons\": lons2}\n )\n with raise_if_dask_computes():\n c = a + b\n assert_identical(c, a)\n\n\[email protected](\n \"obj\", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]\n)\[email protected](\n \"transform\",\n [\n lambda x: x.reset_coords(),\n lambda x: x.reset_coords(drop=True),\n lambda x: x.isel(x=1),\n lambda x: x.attrs.update(new_attrs=1),\n lambda x: x.assign_coords(cxy=1),\n lambda x: x.rename({\"x\": \"xnew\"}),\n lambda x: x.rename({\"cxy\": \"cxynew\"}),\n ],\n)\ndef test_token_changes_on_transform(obj, transform):\n with raise_if_dask_computes():\n assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj))\n\n\[email protected](\n \"obj\", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]\n)\ndef test_token_changes_when_data_changes(obj):\n with raise_if_dask_computes():\n t1 = dask.base.tokenize(obj)\n\n # Change data_var\n if isinstance(obj, DataArray):\n obj *= 2\n else:\n obj[\"a\"] *= 2\n with raise_if_dask_computes():\n t2 = dask.base.tokenize(obj)\n assert t2 != t1\n\n # Change non-index coord\n obj.coords[\"ndcoord\"] *= 2\n with raise_if_dask_computes():\n t3 = dask.base.tokenize(obj)\n assert t3 != t2\n\n # Change IndexVariable\n obj = obj.assign_coords(x=obj.x * 2)\n with raise_if_dask_computes():\n t4 = dask.base.tokenize(obj)\n assert t4 != t3\n\n\[email protected](\"obj\", [make_da().compute(), make_ds().compute()])\ndef test_token_changes_when_buffer_changes(obj):\n with raise_if_dask_computes():\n t1 = dask.base.tokenize(obj)\n\n if isinstance(obj, DataArray):\n obj[0, 0] = 123\n else:\n obj[\"a\"][0, 0] = 123\n with raise_if_dask_computes():\n t2 = dask.base.tokenize(obj)\n assert t2 != t1\n\n obj.coords[\"ndcoord\"][0] = 123\n with raise_if_dask_computes():\n t3 = dask.base.tokenize(obj)\n assert t3 != t2\n\n\[email protected](\n \"transform\",\n [lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],\n)\[email protected](\"obj\", [make_da(), make_ds(), make_ds().variables[\"a\"]])\ndef test_token_identical(obj, transform):\n with raise_if_dask_computes():\n assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))\n assert dask.base.tokenize(obj.compute()) == dask.base.tokenize(\n transform(obj.compute())\n )\n\n\ndef test_recursive_token():\n \"\"\"Test that tokenization is invoked recursively, and doesn't just rely on the\n output of str()\n \"\"\"\n a = np.ones(10000)\n b = np.ones(10000)\n b[5000] = 2\n assert str(a) == str(b)\n assert dask.base.tokenize(a) != dask.base.tokenize(b)\n\n # Test DataArray and Variable\n da_a = DataArray(a)\n da_b = DataArray(b)\n assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)\n\n # Test Dataset\n ds_a = da_a.to_dataset(name=\"x\")\n ds_b = da_b.to_dataset(name=\"x\")\n assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b)\n\n # Test IndexVariable\n da_a = DataArray(a, dims=[\"x\"], coords={\"x\": a})\n da_b = DataArray(a, dims=[\"x\"], coords={\"x\": b})\n assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)\n\n\n@requires_scipy_or_netCDF4\ndef test_normalize_token_with_backend(map_ds):\n with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:\n map_ds.to_netcdf(tmp_file)\n read = xr.open_dataset(tmp_file)\n assert not dask.base.tokenize(map_ds) == dask.base.tokenize(read)\n read.close()\n\n\[email protected](\n \"compat\", [\"broadcast_equals\", \"equals\", \"identical\", \"no_conflicts\"]\n)\ndef test_lazy_array_equiv_variables(compat):\n var1 = xr.Variable((\"y\", \"x\"), da.zeros((10, 10), chunks=2))\n var2 = xr.Variable((\"y\", \"x\"), da.zeros((10, 10), chunks=2))\n var3 = xr.Variable((\"y\", \"x\"), da.zeros((20, 10), chunks=2))\n\n with raise_if_dask_computes():\n assert getattr(var1, compat)(var2, equiv=lazy_array_equiv)\n # values are actually equal, but we don't know that till we compute, return None\n with raise_if_dask_computes():\n assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None\n\n # shapes are not equal, return False without computes\n with raise_if_dask_computes():\n assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False\n\n # if one or both arrays are numpy, return None\n assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None\n assert (\n getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None\n )\n\n with raise_if_dask_computes():\n assert getattr(var1, compat)(var2.transpose(\"y\", \"x\"))\n\n\[email protected](\n \"compat\", [\"broadcast_equals\", \"equals\", \"identical\", \"no_conflicts\"]\n)\ndef test_lazy_array_equiv_merge(compat):\n da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=(\"y\", \"x\"))\n da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=(\"y\", \"x\"))\n da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=(\"y\", \"x\"))\n\n with raise_if_dask_computes():\n xr.merge([da1, da2], compat=compat)\n # shapes are not equal; no computes necessary\n with raise_if_dask_computes(max_computes=0):\n with pytest.raises(ValueError):\n xr.merge([da1, da3], compat=compat)\n with raise_if_dask_computes(max_computes=2):\n xr.merge([da1, da2 / 2], compat=compat)\n\n\[email protected](\"ignore::FutureWarning\") # transpose_coords\[email protected](\"obj\", [make_da(), make_ds()])\[email protected](\n \"transform\",\n [\n lambda a: a.assign_attrs(new_attr=\"anew\"),\n lambda a: a.assign_coords(cxy=a.cxy),\n lambda a: a.copy(),\n lambda a: a.isel(x=np.arange(a.sizes[\"x\"])),\n lambda a: a.isel(x=slice(None)),\n lambda a: a.loc[dict(x=slice(None))],\n lambda a: a.loc[dict(x=np.arange(a.sizes[\"x\"]))],\n lambda a: a.loc[dict(x=a.x)],\n lambda a: a.sel(x=a.x),\n lambda a: a.sel(x=a.x.values),\n lambda a: a.transpose(...),\n lambda a: a.squeeze(), # no dimensions to squeeze\n lambda a: a.sortby(\"x\"), # \"x\" is already sorted\n lambda a: a.reindex(x=a.x),\n lambda a: a.reindex_like(a),\n lambda a: a.rename({\"cxy\": \"cnew\"}).rename({\"cnew\": \"cxy\"}),\n lambda a: a.pipe(lambda x: x),\n lambda a: xr.align(a, xr.zeros_like(a))[0],\n # assign\n # swap_dims\n # set_index / reset_index\n ],\n)\ndef test_transforms_pass_lazy_array_equiv(obj, transform):\n with raise_if_dask_computes():\n assert_equal(obj, transform(obj))\n\n\ndef test_more_transforms_pass_lazy_array_equiv(map_da, map_ds):\n with raise_if_dask_computes():\n assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy)\n assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy)\n assert_equal(map_ds.map(lambda x: x), map_ds)\n assert_equal(map_ds.set_coords(\"a\").reset_coords(\"a\"), map_ds)\n assert_equal(map_ds.update({\"a\": map_ds.a}), map_ds)\n\n # fails because of index error\n # assert_equal(\n # map_ds.rename_dims({\"x\": \"xnew\"}).rename_dims({\"xnew\": \"x\"}), map_ds\n # )\n\n assert_equal(\n map_ds.rename_vars({\"cxy\": \"cnew\"}).rename_vars({\"cnew\": \"cxy\"}), map_ds\n )\n\n assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da)\n assert_equal(map_da.astype(map_da.dtype), map_da)\n assert_equal(map_da.transpose(\"y\", \"x\", transpose_coords=False).cxy, map_da.cxy)\n\n\ndef test_optimize():\n # https://github.com/pydata/xarray/issues/3698\n a = dask.array.ones((10, 4), chunks=(5, 2))\n arr = xr.DataArray(a).chunk(5)\n (arr2,) = dask.optimize(arr)\n arr2.compute()\n\n\n# The graph_manipulation module is in dask since 2021.2 but it became usable with\n# xarray only since 2021.3\[email protected](LooseVersion(dask.__version__) <= \"2021.02.0\", reason=\"new module\")\ndef test_graph_manipulation():\n \"\"\"dask.graph_manipulation passes an optional parameter, \"rename\", to the rebuilder\n function returned by __dask_postperist__; also, the dsk passed to the rebuilder is\n a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict.\n \"\"\"\n import dask.graph_manipulation as gm\n\n v = Variable([\"x\"], [1, 2]).chunk(-1).chunk(1) * 2\n da = DataArray(v)\n ds = Dataset({\"d1\": v[0], \"d2\": v[1], \"d3\": (\"x\", [3, 4])})\n\n v2, da2, ds2 = gm.clone(v, da, ds)\n\n assert_equal(v2, v)\n assert_equal(da2, da)\n assert_equal(ds2, ds)\n\n for a, b in ((v, v2), (da, da2), (ds, ds2)):\n assert a.__dask_layers__() != b.__dask_layers__()\n assert len(a.__dask_layers__()) == len(b.__dask_layers__())\n assert a.__dask_graph__().keys() != b.__dask_graph__().keys()\n assert len(a.__dask_graph__()) == len(b.__dask_graph__())\n assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()\n assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)\n\n # Above we performed a slice operation; adding the two slices back together creates\n # a diamond-shaped dependency graph, which in turn will trigger a collision in layer\n # names if we were to use HighLevelGraph.cull() instead of\n # HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__().\n assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2)\n"
] | [
[
"numpy.ones",
"pandas.MultiIndex.from_arrays",
"numpy.maximum",
"numpy.random.randn",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.random.RandomState",
"numpy.array",
"numpy.sin",
"pandas.Index"
]
] |
chenyu-2020/PaddleNLP | [
"14c3209118b2cadcce9a8f66b760c9cddb3a02ad"
] | [
"examples/machine_reading_comprehension/SQuAD/run_squad.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n# Copyright 2018 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport random\nimport time\nimport json\nimport math\n\nfrom functools import partial\nimport numpy as np\nimport paddle\n\nfrom paddle.io import DataLoader\nfrom args import parse_args\n\nimport paddlenlp as ppnlp\n\nfrom paddlenlp.data import Pad, Stack, Tuple, Dict\nfrom paddlenlp.transformers import BertForQuestionAnswering, BertTokenizer, ErnieForQuestionAnswering, ErnieTokenizer\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom paddlenlp.metrics.squad import squad_evaluate, compute_prediction\nfrom paddlenlp.datasets import load_dataset\n\nMODEL_CLASSES = {\n \"bert\": (BertForQuestionAnswering, BertTokenizer),\n \"ernie\": (ErnieForQuestionAnswering, ErnieTokenizer)\n}\n\n\ndef prepare_train_features(examples, tokenizer, args):\n # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results\n # in one example possible giving several features when a context is long, each of those features having a\n # context that overlaps a bit the context of the previous feature.\n #NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is\n # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.\n contexts = [examples[i]['context'] for i in range(len(examples))]\n questions = [examples[i]['question'] for i in range(len(examples))]\n\n tokenized_examples = tokenizer(\n questions,\n contexts,\n stride=args.doc_stride,\n max_seq_len=args.max_seq_length)\n\n # Let's label those examples!\n for i, tokenized_example in enumerate(tokenized_examples):\n # We will label impossible answers with the index of the CLS token.\n input_ids = tokenized_example[\"input_ids\"]\n cls_index = input_ids.index(tokenizer.cls_token_id)\n\n # The offset mappings will give us a map from token to character position in the original context. This will\n # help us compute the start_positions and end_positions.\n offsets = tokenized_example['offset_mapping']\n\n # Grab the sequence corresponding to that example (to know what is the context and what is the question).\n sequence_ids = tokenized_example['token_type_ids']\n\n # One example can give several spans, this is the index of the example containing this span of text.\n sample_index = tokenized_example['overflow_to_sample']\n answers = examples[sample_index]['answers']\n answer_starts = examples[sample_index]['answer_starts']\n\n # If no answers are given, set the cls_index as answer.\n if len(answer_starts) == 0:\n tokenized_examples[i][\"start_positions\"] = cls_index\n tokenized_examples[i][\"end_positions\"] = cls_index\n else:\n # Start/end character index of the answer in the text.\n start_char = answer_starts[0]\n end_char = start_char + len(answers[0])\n\n # Start token index of the current span in the text.\n token_start_index = 0\n while sequence_ids[token_start_index] != 1:\n token_start_index += 1\n\n # End token index of the current span in the text.\n token_end_index = len(input_ids) - 1\n while sequence_ids[token_end_index] != 1:\n token_end_index -= 1\n # Minus one more to reach actual text\n token_end_index -= 1\n\n # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).\n if not (offsets[token_start_index][0] <= start_char and\n offsets[token_end_index][1] >= end_char):\n tokenized_examples[i][\"start_positions\"] = cls_index\n tokenized_examples[i][\"end_positions\"] = cls_index\n else:\n # Otherwise move the token_start_index and token_end_index to the two ends of the answer.\n # Note: we could go after the last offset if the answer is the last word (edge case).\n while token_start_index < len(offsets) and offsets[\n token_start_index][0] <= start_char:\n token_start_index += 1\n tokenized_examples[i][\"start_positions\"] = token_start_index - 1\n while offsets[token_end_index][1] >= end_char:\n token_end_index -= 1\n tokenized_examples[i][\"end_positions\"] = token_end_index + 1\n\n return tokenized_examples\n\n\ndef prepare_validation_features(examples, tokenizer, args):\n # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results\n # in one example possible giving several features when a context is long, each of those features having a\n # context that overlaps a bit the context of the previous feature.\n #NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is\n # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.\n contexts = [examples[i]['context'] for i in range(len(examples))]\n questions = [examples[i]['question'] for i in range(len(examples))]\n\n tokenized_examples = tokenizer(\n questions,\n contexts,\n stride=args.doc_stride,\n max_seq_len=args.max_seq_length)\n\n # For validation, there is no need to compute start and end positions\n for i, tokenized_example in enumerate(tokenized_examples):\n # Grab the sequence corresponding to that example (to know what is the context and what is the question).\n sequence_ids = tokenized_example['token_type_ids']\n\n # One example can give several spans, this is the index of the example containing this span of text.\n sample_index = tokenized_example['overflow_to_sample']\n tokenized_examples[i][\"example_id\"] = examples[sample_index]['id']\n\n # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token\n # position is part of the context or not.\n tokenized_examples[i][\"offset_mapping\"] = [\n (o if sequence_ids[k] == 1 else None)\n for k, o in enumerate(tokenized_example[\"offset_mapping\"])\n ]\n\n return tokenized_examples\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n paddle.seed(args.seed)\n\n\[email protected]_grad()\ndef evaluate(model, data_loader, args):\n model.eval()\n\n all_start_logits = []\n all_end_logits = []\n tic_eval = time.time()\n\n for batch in data_loader:\n input_ids, token_type_ids = batch\n start_logits_tensor, end_logits_tensor = model(input_ids,\n token_type_ids)\n\n for idx in range(start_logits_tensor.shape[0]):\n if len(all_start_logits) % 1000 == 0 and len(all_start_logits):\n print(\"Processing example: %d\" % len(all_start_logits))\n print('time per 1000:', time.time() - tic_eval)\n tic_eval = time.time()\n\n all_start_logits.append(start_logits_tensor.numpy()[idx])\n all_end_logits.append(end_logits_tensor.numpy()[idx])\n\n all_predictions, all_nbest_json, scores_diff_json = compute_prediction(\n data_loader.dataset.data, data_loader.dataset.new_data,\n (all_start_logits, all_end_logits), args.version_2_with_negative,\n args.n_best_size, args.max_answer_length,\n args.null_score_diff_threshold)\n\n # Can also write all_nbest_json and scores_diff_json files if needed\n with open('prediction.json', \"w\", encoding='utf-8') as writer:\n writer.write(\n json.dumps(\n all_predictions, ensure_ascii=False, indent=4) + \"\\n\")\n\n squad_evaluate(\n examples=data_loader.dataset.data,\n preds=all_predictions,\n na_probs=scores_diff_json)\n\n model.train()\n\n\nclass CrossEntropyLossForSQuAD(paddle.nn.Layer):\n def __init__(self):\n super(CrossEntropyLossForSQuAD, self).__init__()\n\n def forward(self, y, label):\n start_logits, end_logits = y\n start_position, end_position = label\n start_position = paddle.unsqueeze(start_position, axis=-1)\n end_position = paddle.unsqueeze(end_position, axis=-1)\n start_loss = paddle.nn.functional.softmax_with_cross_entropy(\n logits=start_logits, label=start_position, soft_label=False)\n start_loss = paddle.mean(start_loss)\n end_loss = paddle.nn.functional.softmax_with_cross_entropy(\n logits=end_logits, label=end_position, soft_label=False)\n end_loss = paddle.mean(end_loss)\n\n loss = (start_loss + end_loss) / 2\n return loss\n\n\ndef run(args):\n paddle.set_device(args.device)\n if paddle.distributed.get_world_size() > 1:\n paddle.distributed.init_parallel_env()\n rank = paddle.distributed.get_rank()\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n\n set_seed(args)\n if rank == 0:\n if os.path.exists(args.model_name_or_path):\n print(\"init checkpoint from %s\" % args.model_name_or_path)\n\n model = model_class.from_pretrained(args.model_name_or_path)\n\n if paddle.distributed.get_world_size() > 1:\n model = paddle.DataParallel(model)\n\n if args.do_train:\n if args.train_file:\n train_ds = load_dataset('squad', data_files=args.train_file)\n elif args.version_2_with_negative:\n train_ds = load_dataset('squad', splits='train_v2')\n else:\n train_ds = load_dataset('squad', splits='train_v1')\n train_ds.map(partial(\n prepare_train_features, tokenizer=tokenizer, args=args),\n batched=True)\n train_batch_sampler = paddle.io.DistributedBatchSampler(\n train_ds, batch_size=args.batch_size, shuffle=True)\n train_batchify_fn = lambda samples, fn=Dict({\n \"input_ids\": Pad(axis=0, pad_val=tokenizer.pad_token_id),\n \"token_type_ids\": Pad(axis=0, pad_val=tokenizer.pad_token_type_id),\n \"start_positions\": Stack(dtype=\"int64\"),\n \"end_positions\": Stack(dtype=\"int64\")\n }): fn(samples)\n\n train_data_loader = DataLoader(\n dataset=train_ds,\n batch_sampler=train_batch_sampler,\n collate_fn=train_batchify_fn,\n return_list=True)\n\n num_training_steps = args.max_steps if args.max_steps > 0 else len(\n train_data_loader) * args.num_train_epochs\n num_train_epochs = math.ceil(num_training_steps /\n len(train_data_loader))\n\n lr_scheduler = LinearDecayWithWarmup(\n args.learning_rate, num_training_steps, args.warmup_proportion)\n\n # Generate parameter names needed to perform weight decay.\n # All bias and LayerNorm parameters are excluded.\n decay_params = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_params)\n criterion = CrossEntropyLossForSQuAD()\n\n global_step = 0\n tic_train = time.time()\n for epoch in range(num_train_epochs):\n for step, batch in enumerate(train_data_loader):\n global_step += 1\n input_ids, token_type_ids, start_positions, end_positions = batch\n\n logits = model(\n input_ids=input_ids, token_type_ids=token_type_ids)\n loss = criterion(logits, (start_positions, end_positions))\n\n if global_step % args.logging_steps == 0:\n print(\n \"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s\"\n % (global_step, epoch + 1, step + 1, loss,\n args.logging_steps / (time.time() - tic_train)))\n tic_train = time.time()\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_grad()\n\n if global_step % args.save_steps == 0 or global_step == num_training_steps:\n if rank == 0:\n output_dir = os.path.join(args.output_dir,\n \"model_%d\" % global_step)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # need better way to get inner model of DataParallel\n model_to_save = model._layers if isinstance(\n model, paddle.DataParallel) else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n print('Saving checkpoint to:', output_dir)\n if global_step == num_training_steps:\n break\n\n if args.do_predict and rank == 0:\n if args.predict_file:\n dev_ds = load_dataset('squad', data_files=args.predict_file)\n elif args.version_2_with_negative:\n dev_ds = load_dataset('squad', splits='dev_v2')\n else:\n dev_ds = load_dataset('squad', splits='dev_v1')\n\n dev_ds.map(partial(\n prepare_validation_features, tokenizer=tokenizer, args=args),\n batched=True)\n dev_batch_sampler = paddle.io.BatchSampler(\n dev_ds, batch_size=args.batch_size, shuffle=False)\n\n dev_batchify_fn = lambda samples, fn=Dict({\n \"input_ids\": Pad(axis=0, pad_val=tokenizer.pad_token_id),\n \"token_type_ids\": Pad(axis=0, pad_val=tokenizer.pad_token_type_id)\n }): fn(samples)\n\n dev_data_loader = DataLoader(\n dataset=dev_ds,\n batch_sampler=dev_batch_sampler,\n collate_fn=dev_batchify_fn,\n return_list=True)\n\n evaluate(model, dev_data_loader, args)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n run(args)\n"
] | [
[
"numpy.random.seed"
]
] |
ivyshihwork/python | [
"f0324c0d61f98948316f79886084adb44752b9b2"
] | [
"games/06_us-states-game/test.py"
] | [
"import os\nimport pandas\nimport csv\n\ndirname = os.path.dirname(__file__)\nstate_file = os.path.join(dirname, '50_states.csv')\n\nstate_coors = pandas.read_csv(state_file)\nstate='ohio'\n\nstate_info = state_coors[state_coors.state.str.lower() == state]\n# state_name = state_info.values[0,0].lower()\n# state_x = state_info.values[0,1]\n# state_y = state_info.values[0,2]\n\nstate_name = state_info.state.values[0]\nstate_x = int(state_info.x)\nstate_y = int(state_info.y)\n\nprint(state_name)\nprint(state_x)\nprint(state_y)\n\n# def rightState(guess_state):\n# if state_coors[state_coors.state.str.lower() == guess_state].state.values[0].lower() == guess_state:\n# global right_answer\n# right_answer += 1\n# return True\n# else:\n# return False\n#\n# right_answer = 0\n#\n# print(rightState(state))\n#\n#\n# state_names = [ state.lower() for state in state_coors.state.to_list()]\n# print(state_names)\n"
] | [
[
"pandas.read_csv"
]
] |
munrojm/pymatgen | [
"95514da2c1f4bd0ee897e657de768ca987fe05e9"
] | [
"pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\n\"\"\"\nThis module contains some utility functions and classes that are used in the chemenv package.\n\"\"\"\n\n__author__ = \"David Waroquiers\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__credits__ = \"Geoffroy Hautier\"\n__version__ = \"2.0\"\n__maintainer__ = \"David Waroquiers\"\n__email__ = \"[email protected]\"\n__date__ = \"Feb 20, 2016\"\n\nimport math\n\nimport numpy as np\nfrom numpy.linalg import norm\nfrom scipy.integrate import quad\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.spatial import ConvexHull\n\nfrom pymatgen.analysis.chemenv.utils.chemenv_errors import SolidAngleError\n\n\ndef get_lower_and_upper_f(surface_calculation_options):\n \"\"\"Get the lower and upper functions defining a surface in the distance-angle space of neighbors.\n\n :param surface_calculation_options: Options for the surface.\n :return: Dictionary containing the \"lower\" and \"upper\" functions for the surface.\n \"\"\"\n mindist = surface_calculation_options[\"distance_bounds\"][\"lower\"]\n maxdist = surface_calculation_options[\"distance_bounds\"][\"upper\"]\n minang = surface_calculation_options[\"angle_bounds\"][\"lower\"]\n maxang = surface_calculation_options[\"angle_bounds\"][\"upper\"]\n if surface_calculation_options[\"type\"] == \"standard_elliptic\":\n lower_and_upper_functions = quarter_ellipsis_functions(xx=(mindist, maxang), yy=(maxdist, minang))\n elif surface_calculation_options[\"type\"] == \"standard_diamond\":\n deltadist = surface_calculation_options[\"distance_bounds\"][\"delta\"]\n deltaang = surface_calculation_options[\"angle_bounds\"][\"delta\"]\n lower_and_upper_functions = diamond_functions(\n xx=(mindist, maxang), yy=(maxdist, minang), x_y0=deltadist, y_x0=deltaang\n )\n elif surface_calculation_options[\"type\"] == \"standard_spline\":\n lower_points = surface_calculation_options[\"lower_points\"]\n upper_points = surface_calculation_options[\"upper_points\"]\n degree = surface_calculation_options[\"degree\"]\n lower_and_upper_functions = spline_functions(\n lower_points=lower_points, upper_points=upper_points, degree=degree\n )\n else:\n raise ValueError(\n 'Surface calculation of type \"{}\" ' \"is not implemented\".format(surface_calculation_options[\"type\"])\n )\n return lower_and_upper_functions\n\n\ndef function_comparison(f1, f2, x1, x2, numpoints_check=500):\n \"\"\"\n Method that compares two functions\n\n Args:\n f1: First function to compare\n f2: Second function to compare\n x1: Lower bound of the interval to compare\n x2: Upper bound of the interval to compare\n numpoints_check: Number of points used to compare the functions\n\n Returns:\n Whether the function are equal (\"=\"), f1 is always lower than f2 (\"<\"), f1 is always larger than f2 (\">\"),\n f1 is always lower than or equal to f2 (\"<\"), f1 is always larger than or equal to f2 (\">\") on the\n interval [x1, x2]. If the two functions cross, a RuntimeError is thrown (i.e. we expect to compare\n functions that do not cross...)\n \"\"\"\n xx = np.linspace(x1, x2, num=numpoints_check)\n y1 = f1(xx)\n y2 = f2(xx)\n if np.all(y1 < y2):\n return \"<\"\n if np.all(y1 > y2):\n return \">\"\n if np.all(y1 == y2):\n return \"=\"\n if np.all(y1 <= y2):\n return \"<=\"\n if np.all(y1 >= y2):\n return \">=\"\n raise RuntimeError(\"Error in comparing functions f1 and f2 ...\")\n\n\ndef quarter_ellipsis_functions(xx, yy):\n \"\"\"\n Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to\n be aligned with the axes. The two ellipsis pass through the two points xx and yy.\n\n Args:\n xx:\n First point\n yy:\n Second point\n\n Returns:\n A dictionary with the lower and upper quarter ellipsis functions.\n \"\"\"\n npxx = np.array(xx)\n npyy = np.array(yy)\n if np.any(npxx == npyy):\n raise RuntimeError(\"Invalid points for quarter_ellipsis_functions\")\n if np.all(npxx < npyy) or np.all(npxx > npyy):\n if npxx[0] < npyy[0]:\n p1 = npxx\n p2 = npyy\n else:\n p1 = npyy\n p2 = npxx\n c_lower = np.array([p1[0], p2[1]])\n c_upper = np.array([p2[0], p1[1]])\n b2 = (p2[1] - p1[1]) ** 2\n else:\n if npxx[0] < npyy[0]:\n p1 = npxx\n p2 = npyy\n else:\n p1 = npyy\n p2 = npxx\n c_lower = np.array([p2[0], p1[1]])\n c_upper = np.array([p1[0], p2[1]])\n b2 = (p1[1] - p2[1]) ** 2\n b2overa2 = b2 / (p2[0] - p1[0]) ** 2\n\n def lower(x):\n return c_lower[1] - np.sqrt(b2 - b2overa2 * (x - c_lower[0]) ** 2)\n\n def upper(x):\n return c_upper[1] + np.sqrt(b2 - b2overa2 * (x - c_upper[0]) ** 2)\n\n return {\"lower\": lower, \"upper\": upper}\n\n\ndef spline_functions(lower_points, upper_points, degree=3):\n \"\"\"\n Method that creates two (upper and lower) spline functions based on points lower_points and upper_points.\n\n Args:\n lower_points:\n Points defining the lower function.\n upper_points:\n Points defining the upper function.\n degree:\n Degree for the spline function\n\n Returns:\n A dictionary with the lower and upper spline functions.\n \"\"\"\n lower_xx = np.array([pp[0] for pp in lower_points])\n lower_yy = np.array([pp[1] for pp in lower_points])\n upper_xx = np.array([pp[0] for pp in upper_points])\n upper_yy = np.array([pp[1] for pp in upper_points])\n\n lower_spline = UnivariateSpline(lower_xx, lower_yy, k=degree, s=0)\n upper_spline = UnivariateSpline(upper_xx, upper_yy, k=degree, s=0)\n\n def lower(x):\n return lower_spline(x)\n\n def upper(x):\n return upper_spline(x)\n\n return {\"lower\": lower, \"upper\": upper}\n\n\ndef diamond_functions(xx, yy, y_x0, x_y0):\n r\"\"\"\n Method that creates two upper and lower functions based on points xx and yy\n as well as intercepts defined by y_x0 and x_y0. The resulting functions\n form kind of a distorted diamond-like structure aligned from\n point xx to point yy.\n\n Schematically :\n\n xx is symbolized by x, yy is symbolized by y, y_x0 is equal to the distance\n from x to a, x_y0 is equal to the distance from x to b, the lines a-p and\n b-q are parallel to the line x-y such that points p and q are\n obtained automatically.\n In case of an increasing diamond the lower function is x-b-q and the upper\n function is a-p-y while in case of a\n decreasing diamond, the lower function is a-p-y and the upper function is\n x-b-q.\n\n Increasing diamond | Decreasing diamond\n p--y x----b\n / /| |\\ \\\n / / | | \\ q\n / / | a \\ |\n a / | \\ \\ |\n | / q \\ \\ |\n |/ / \\ \\|\n x----b p--y\n\n Args:\n xx:\n First point\n yy:\n Second point\n\n Returns:\n A dictionary with the lower and upper diamond functions.\n \"\"\"\n npxx = np.array(xx)\n npyy = np.array(yy)\n if np.any(npxx == npyy):\n raise RuntimeError(\"Invalid points for diamond_functions\")\n if np.all(npxx < npyy) or np.all(npxx > npyy):\n if npxx[0] < npyy[0]:\n p1 = npxx\n p2 = npyy\n else:\n p1 = npyy\n p2 = npxx\n else:\n if npxx[0] < npyy[0]:\n p1 = npxx\n p2 = npyy\n else:\n p1 = npyy\n p2 = npxx\n slope = (p2[1] - p1[1]) / (p2[0] - p1[0])\n if slope > 0.0:\n x_bpoint = p1[0] + x_y0\n myy = p1[1]\n bq_intercept = myy - slope * x_bpoint\n myx = p1[0]\n myy = p1[1] + y_x0\n ap_intercept = myy - slope * myx\n x_ppoint = (p2[1] - ap_intercept) / slope\n\n def lower(x):\n return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept)\n\n def upper(x):\n return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept)\n\n else:\n x_bpoint = p1[0] + x_y0\n myy = p1[1]\n bq_intercept = myy - slope * x_bpoint\n myx = p1[0]\n myy = p1[1] - y_x0\n ap_intercept = myy - slope * myx\n x_ppoint = (p2[1] - ap_intercept) / slope\n\n def lower(x):\n return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept)\n\n def upper(x):\n return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept)\n\n return {\"lower\": lower, \"upper\": upper}\n\n\ndef rectangle_surface_intersection(\n rectangle,\n f_lower,\n f_upper,\n bounds_lower=None,\n bounds_upper=None,\n check=True,\n numpoints_check=500,\n):\n \"\"\"\n Method to calculate the surface of the intersection of a rectangle (aligned with axes) and another surface\n defined by two functions f_lower and f_upper.\n\n Args:\n rectangle:\n Rectangle defined as : ((x1, x2), (y1, y2)).\n f_lower:\n Function defining the lower bound of the surface.\n f_upper:\n Function defining the upper bound of the surface.\n bounds_lower:\n Interval in which the f_lower function is defined.\n bounds_upper:\n Interval in which the f_upper function is defined.\n check:\n Whether to check if f_lower is always lower than f_upper.\n numpoints_check:\n Number of points used to check whether f_lower is always lower than f_upper\n\n Returns:\n The surface of the intersection of the rectangle and the surface defined by f_lower and f_upper.\n \"\"\"\n x1 = np.min(rectangle[0])\n x2 = np.max(rectangle[0])\n y1 = np.min(rectangle[1])\n y2 = np.max(rectangle[1])\n # Check that f_lower is allways lower than f_upper between x1 and x2 if no bounds are given or between the bounds\n # of the f_lower and f_upper functions if they are given.\n if check:\n if bounds_lower is not None:\n if bounds_upper is not None:\n if not all(np.array(bounds_lower) == np.array(bounds_upper)):\n raise ValueError(\"Bounds should be identical for both f_lower and f_upper\")\n if \"<\" not in function_comparison(\n f1=f_lower,\n f2=f_upper,\n x1=bounds_lower[0],\n x2=bounds_lower[1],\n numpoints_check=numpoints_check,\n ):\n raise RuntimeError(\n \"Function f_lower is not allways lower or equal to function f_upper within \"\n \"the domain defined by the functions bounds.\"\n )\n else:\n raise ValueError(\"Bounds are given for f_lower but not for f_upper\")\n elif bounds_upper is not None:\n if bounds_lower is None:\n raise ValueError(\"Bounds are given for f_upper but not for f_lower\")\n if \"<\" not in function_comparison(\n f1=f_lower,\n f2=f_upper,\n x1=bounds_lower[0],\n x2=bounds_lower[1],\n numpoints_check=numpoints_check,\n ):\n raise RuntimeError(\n \"Function f_lower is not allways lower or equal to function f_upper within \"\n \"the domain defined by the functions bounds.\"\n )\n else:\n if \"<\" not in function_comparison(f1=f_lower, f2=f_upper, x1=x1, x2=x2, numpoints_check=numpoints_check):\n raise RuntimeError(\n \"Function f_lower is not allways lower or equal to function f_upper within \"\n \"the domain defined by x1 and x2.\"\n )\n if bounds_lower is None:\n raise NotImplementedError(\"Bounds should be given right now ...\")\n if x2 < bounds_lower[0] or x1 > bounds_lower[1]:\n return 0.0, 0.0\n if x1 < bounds_lower[0]:\n xmin = bounds_lower[0]\n else:\n xmin = x1\n if x2 > bounds_lower[1]:\n xmax = bounds_lower[1]\n else:\n xmax = x2\n\n def diff(x):\n flwx = f_lower(x)\n fupx = f_upper(x)\n minup = np.min([fupx, y2 * np.ones_like(fupx)], axis=0)\n maxlw = np.max([flwx, y1 * np.ones_like(flwx)], axis=0)\n zeros = np.zeros_like(fupx)\n upper = np.where(y2 >= flwx, np.where(y1 <= fupx, minup, zeros), zeros)\n lower = np.where(y1 <= fupx, np.where(y2 >= flwx, maxlw, zeros), zeros)\n return upper - lower\n\n return quad(diff, xmin, xmax)\n\n\ndef my_solid_angle(center, coords):\n \"\"\"\n Helper method to calculate the solid angle of a set of coords from the\n center.\n\n Args:\n center:\n Center to measure solid angle from.\n coords:\n List of coords to determine solid angle.\n\n Returns:\n The solid angle.\n \"\"\"\n o = np.array(center)\n r = [np.array(c) - o for c in coords]\n r.append(r[0])\n n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]\n n.append(np.cross(r[1], r[0]))\n phi = 0.0\n for i in range(len(n) - 1):\n try:\n value = math.acos(-np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])))\n except ValueError:\n mycos = -np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))\n if 0.999999999999 < mycos < 1.000000000001:\n value = math.acos(1.0)\n elif -0.999999999999 > mycos > -1.000000000001:\n value = math.acos(-1.0)\n else:\n raise SolidAngleError(mycos)\n phi += value\n return phi + (3 - len(r)) * math.pi\n\n\ndef vectorsToMatrix(aa, bb):\n \"\"\"\n Performs the vector multiplication of the elements of two vectors, constructing the 3x3 matrix.\n :param aa: One vector of size 3\n :param bb: Another vector of size 3\n :return: A 3x3 matrix M composed of the products of the elements of aa and bb :\n M_ij = aa_i * bb_j\n \"\"\"\n MM = np.zeros([3, 3], np.float_)\n for ii in range(3):\n for jj in range(3):\n MM[ii, jj] = aa[ii] * bb[jj]\n return MM\n\n\ndef matrixTimesVector(MM, aa):\n \"\"\"\n\n :param MM: A matrix of size 3x3\n :param aa: A vector of size 3\n :return: A vector of size 3 which is the product of the matrix by the vector\n \"\"\"\n bb = np.zeros(3, np.float_)\n for ii in range(3):\n bb[ii] = np.sum(MM[ii, :] * aa)\n return bb\n\n\ndef rotateCoords(coords, R):\n \"\"\"\n Rotate the list of points using rotation matrix R\n :param coords: List of points to be rotated\n :param R: Rotation matrix\n :return: List of rotated points\n \"\"\"\n newlist = []\n for pp in coords:\n rpp = matrixTimesVector(R, pp)\n newlist.append(rpp)\n return newlist\n\n\ndef rotateCoordsOpt(coords, R):\n \"\"\"\n Rotate the list of points using rotation matrix R\n :param coords: List of points to be rotated\n :param R: Rotation matrix\n :return: List of rotated points\n \"\"\"\n return [np.dot(R, pp) for pp in coords]\n\n\ndef changebasis(uu, vv, nn, pps):\n \"\"\"\n For a list of points given in standard coordinates (in terms of e1, e2 and e3), returns the same list\n expressed in the basis (uu, vv, nn), which is supposed to be orthonormal.\n :param uu: First vector of the basis\n :param vv: Second vector of the basis\n :param nn: Third vector of the bais\n :param pps: List of points in basis (e1, e2, e3)\n :return: List of points in basis (uu, vv, nn)\n \"\"\"\n MM = np.zeros([3, 3], np.float_)\n for ii in range(3):\n MM[ii, 0] = uu[ii]\n MM[ii, 1] = vv[ii]\n MM[ii, 2] = nn[ii]\n PP = np.linalg.inv(MM)\n newpps = []\n for pp in pps:\n newpps.append(matrixTimesVector(PP, pp))\n return newpps\n\n\ndef collinear(p1, p2, p3=None, tolerance=0.25):\n \"\"\"\n Checks if the three points p1, p2 and p3 are collinear or not within a given tolerance. The collinearity is\n checked by computing the area of the triangle defined by the three points p1, p2 and p3. If the area of this\n triangle is less than (tolerance x largest_triangle), then the three points are considered collinear. The\n largest_triangle is defined as the right triangle whose legs are the two smallest distances between the three\n points ie, its area is : 0.5 x (min(|p2-p1|,|p3-p1|,|p3-p2|) x secondmin(|p2-p1|,|p3-p1|,|p3-p2|))\n :param p1: First point\n :param p2: Second point\n :param p3: Third point (origin [0.0, 0.0, 0.0 if not given])\n :param tolerance: Area tolerance for the collinearity test (0.25 gives about 0.125 deviation from the line)\n :return: True if the three points are considered as collinear within the given tolerance, False otherwise\n \"\"\"\n if p3 is None:\n triangle_area = 0.5 * np.linalg.norm(np.cross(p1, p2))\n dist = np.sort([np.linalg.norm(p2 - p1), np.linalg.norm(p1), np.linalg.norm(p2)])\n else:\n triangle_area = 0.5 * np.linalg.norm(np.cross(p1 - p3, p2 - p3))\n dist = np.sort([np.linalg.norm(p2 - p1), np.linalg.norm(p3 - p1), np.linalg.norm(p3 - p2)])\n largest_triangle_area = 0.5 * dist[0] * dist[1]\n return triangle_area < tolerance * largest_triangle_area\n\n\ndef anticlockwise_sort(pps):\n \"\"\"\n Sort a list of 2D points in anticlockwise order\n :param pps: List of points to be sorted\n :return: Sorted list of points\n \"\"\"\n newpps = []\n angles = np.zeros(len(pps), np.float_)\n for ipp, pp in enumerate(pps):\n angles[ipp] = np.arctan2(pp[1], pp[0])\n iisorted = np.argsort(angles)\n for ii in range(len(pps)):\n newpps.append(pps[iisorted[ii]])\n return newpps\n\n\ndef anticlockwise_sort_indices(pps):\n \"\"\"\n Returns the indices that would sort a list of 2D points in anticlockwise order\n :param pps: List of points to be sorted\n :return: Indices of the sorted list of points\n \"\"\"\n angles = np.zeros(len(pps), np.float_)\n for ipp, pp in enumerate(pps):\n angles[ipp] = np.arctan2(pp[1], pp[0])\n return np.argsort(angles)\n\n\ndef sort_separation(separation):\n \"\"\"Sort a separation.\n\n :param separation: Initial separation.\n :return: Sorted list of separation.\n \"\"\"\n if len(separation[0]) > len(separation[2]):\n return [sorted(separation[2]), sorted(separation[1]), sorted(separation[0])]\n return [sorted(separation[0]), sorted(separation[1]), sorted(separation[2])]\n\n\ndef sort_separation_tuple(separation):\n \"\"\"Sort a separation\n\n :param separation: Initial separation\n :return: Sorted tuple of separation\n \"\"\"\n if len(separation[0]) > len(separation[2]):\n return (\n tuple(sorted(separation[2])),\n tuple(sorted(separation[1])),\n tuple(sorted(separation[0])),\n )\n return (\n tuple(sorted(separation[0])),\n tuple(sorted(separation[1])),\n tuple(sorted(separation[2])),\n )\n\n\ndef separation_in_list(separation_indices, separation_indices_list):\n \"\"\"\n Checks if the separation indices of a plane are already in the list\n :param separation_indices: list of separation indices (three arrays of integers)\n :param separation_indices_list: list of the list of separation indices to be compared to\n :return: True if the separation indices are already in the list, False otherwise\n \"\"\"\n sorted_separation = sort_separation(separation_indices)\n for sep in separation_indices_list:\n if len(sep[1]) == len(sorted_separation[1]) and np.allclose(sorted_separation[1], sep[1]):\n return True\n return False\n\n\ndef is_anion_cation_bond(valences, ii, jj):\n \"\"\"\n Checks if two given sites are an anion and a cation.\n :param valences: list of site valences\n :param ii: index of a site\n :param jj: index of another site\n :return: True if one site is an anion and the other is a cation (from the valences)\n \"\"\"\n if valences == \"undefined\":\n return True\n if valences[ii] == 0 or valences[jj] == 0:\n return True\n return (valences[ii] > 0 > valences[jj]) or (valences[jj] > 0 > valences[ii])\n\n\nclass Plane:\n \"\"\"\n Class used to describe a plane\n \"\"\"\n\n TEST_2D_POINTS = [\n np.array([0, 0], np.float_),\n np.array([1, 0], np.float_),\n np.array([0, 1], np.float_),\n np.array([-1, 0], np.float_),\n np.array([0, -1], np.float_),\n np.array([0, 2], np.float_),\n np.array([2, 0], np.float_),\n np.array([0, -2], np.float_),\n np.array([-2, 0], np.float_),\n np.array([1, 1], np.float_),\n np.array([2, 2], np.float_),\n np.array([-1, -1], np.float_),\n np.array([-2, -2], np.float_),\n np.array([1, 2], np.float_),\n np.array([1, -2], np.float_),\n np.array([-1, 2], np.float_),\n np.array([-1, -2], np.float_),\n np.array([2, 1], np.float_),\n np.array([2, -1], np.float_),\n np.array([-2, 1], np.float_),\n np.array([-2, -1], np.float_),\n ]\n\n def __init__(self, coefficients, p1=None, p2=None, p3=None):\n \"\"\"\n Initializes a plane from the 4 coefficients a, b, c and d of ax + by + cz + d = 0\n :param coefficients: abcd coefficients of the plane\n \"\"\"\n # Initializes the normal vector\n self.normal_vector = np.array([coefficients[0], coefficients[1], coefficients[2]], np.float_)\n normv = np.linalg.norm(self.normal_vector)\n self.normal_vector /= normv\n nonzeros = np.argwhere(self.normal_vector != 0.0).flatten()\n zeros = list(set(range(3)) - set(nonzeros))\n if len(nonzeros) == 0:\n raise ValueError(\"Normal vector is equal to 0.0\")\n if self.normal_vector[nonzeros[0]] < 0.0:\n self.normal_vector = -self.normal_vector\n dd = -np.float_(coefficients[3]) / normv\n else:\n dd = np.float_(coefficients[3]) / normv\n self._coefficients = np.array(\n [self.normal_vector[0], self.normal_vector[1], self.normal_vector[2], dd],\n np.float_,\n )\n self._crosses_origin = np.isclose(dd, 0.0, atol=1e-7, rtol=0.0)\n self.p1 = p1\n self.p2 = p2\n self.p3 = p3\n # Initializes 3 points belonging to the plane (useful for some methods)\n if self.p1 is None:\n self.init_3points(nonzeros, zeros)\n self.vector_to_origin = dd * self.normal_vector\n self.e1 = None\n self.e2 = None\n self.e3 = self.normal_vector\n\n def init_3points(self, nonzeros, zeros):\n \"\"\"Initialialize three random points on this plane.\n\n :param nonzeros: Indices of plane coefficients ([a, b, c]) that are not zero.\n :param zeros: Indices of plane coefficients ([a, b, c]) that are equal to zero.\n :return: None\n \"\"\"\n if len(nonzeros) == 3:\n self.p1 = np.array([-self.d / self.a, 0.0, 0.0], np.float_)\n self.p2 = np.array([0.0, -self.d / self.b, 0.0], np.float_)\n self.p3 = np.array([0.0, 0.0, -self.d / self.c], np.float_)\n elif len(nonzeros) == 2:\n self.p1 = np.zeros(3, np.float_)\n self.p1[nonzeros[1]] = -self.d / self.coefficients[nonzeros[1]]\n self.p2 = np.array(self.p1)\n self.p2[zeros[0]] = 1.0\n self.p3 = np.zeros(3, np.float_)\n self.p3[nonzeros[0]] = -self.d / self.coefficients[nonzeros[0]]\n elif len(nonzeros) == 1:\n self.p1 = np.zeros(3, np.float_)\n self.p1[nonzeros[0]] = -self.d / self.coefficients[nonzeros[0]]\n self.p2 = np.array(self.p1)\n self.p2[zeros[0]] = 1.0\n self.p3 = np.array(self.p1)\n self.p3[zeros[1]] = 1.0\n\n def __str__(self):\n \"\"\"\n String representation of the Plane object\n :return: String representation of the Plane object\n \"\"\"\n outs = [\"Plane object\"]\n outs.append(\" => Normal vector : {nn}\".format(nn=self.normal_vector))\n outs.append(\" => Equation of the plane ax + by + cz + d = 0\")\n outs.append(\" with a = {v}\".format(v=self._coefficients[0]))\n outs.append(\" b = {v}\".format(v=self._coefficients[1]))\n outs.append(\" c = {v}\".format(v=self._coefficients[2]))\n outs.append(\" d = {v}\".format(v=self._coefficients[3]))\n return \"\\n\".join(outs)\n\n def is_in_plane(self, pp, dist_tolerance):\n \"\"\"\n Determines if point pp is in the plane within the tolerance dist_tolerance\n :param pp: point to be tested\n :param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane\n :return: True if pp is in the plane, False otherwise\n \"\"\"\n return np.abs(np.dot(self.normal_vector, pp) + self._coefficients[3]) <= dist_tolerance\n\n def is_same_plane_as(self, plane):\n \"\"\"\n Checks whether the plane is identical to another Plane \"plane\"\n :param plane: Plane to be compared to\n :return: True if the two facets are identical, False otherwise\n \"\"\"\n return np.allclose(self._coefficients, plane.coefficients)\n\n def is_in_list(self, plane_list):\n \"\"\"\n Checks whether the plane is identical to one of the Planes in the plane_list list of Planes\n :param plane_list: List of Planes to be compared to\n :return: True if the plane is in the list, False otherwise\n \"\"\"\n for plane in plane_list:\n if self.is_same_plane_as(plane):\n return True\n return False\n\n def indices_separate(self, points, dist_tolerance):\n \"\"\"\n Returns three lists containing the indices of the points lying on one side of the plane, on the plane\n and on the other side of the plane. The dist_tolerance parameter controls the tolerance to which a point\n is considered to lie on the plane or not (distance to the plane)\n :param points: list of points\n :param dist_tolerance: tolerance to which a point is considered to lie on the plane\n or not (distance to the plane)\n :return: The lists of indices of the points on one side of the plane, on the plane and\n on the other side of the plane\n \"\"\"\n side1 = []\n inplane = []\n side2 = []\n for ip, pp in enumerate(points):\n if self.is_in_plane(pp, dist_tolerance):\n inplane.append(ip)\n else:\n if np.dot(pp + self.vector_to_origin, self.normal_vector) < 0.0:\n side1.append(ip)\n else:\n side2.append(ip)\n return [side1, inplane, side2]\n\n def distance_to_point(self, point):\n \"\"\"\n Computes the absolute distance from the plane to the point\n :param point: Point for which distance is computed\n :return: Distance between the plane and the point\n \"\"\"\n return np.abs(np.dot(self.normal_vector, point) + self.d)\n\n def distances(self, points):\n \"\"\"\n Computes the distances from the plane to each of the points. Positive distances are on the side of the\n normal of the plane while negative distances are on the other side\n :param points: Points for which distances are computed\n :return: Distances from the plane to the points (positive values on the side of the normal to the plane,\n negative values on the other side)\n \"\"\"\n return [np.dot(self.normal_vector, pp) + self.d for pp in points]\n\n def distances_indices_sorted(self, points, sign=False):\n \"\"\"\n Computes the distances from the plane to each of the points. Positive distances are on the side of the\n normal of the plane while negative distances are on the other side. Indices sorting the points from closest\n to furthest is also computed.\n :param points: Points for which distances are computed\n :param sign: Whether to add sign information in the indices sorting the points distances\n :return: Distances from the plane to the points (positive values on the side of the normal to the plane,\n negative values on the other side), as well as indices of the points from closest to furthest. For the\n latter, when the sign parameter is True, items of the sorting list are given as tuples of (index, sign).\n \"\"\"\n distances = [np.dot(self.normal_vector, pp) + self.d for pp in points]\n indices = sorted(range(len(distances)), key=lambda k: np.abs(distances[k]))\n if sign:\n indices = [(ii, int(np.sign(distances[ii]))) for ii in indices]\n return distances, indices\n\n def distances_indices_groups(self, points, delta=None, delta_factor=0.05, sign=False):\n \"\"\"\n Computes the distances from the plane to each of the points. Positive distances are on the side of the\n normal of the plane while negative distances are on the other side. Indices sorting the points from closest\n to furthest is also computed. Grouped indices are also given, for which indices of the distances that are\n separated by less than delta are grouped together. The delta parameter is either set explictly or taken as\n a fraction (using the delta_factor parameter) of the maximal point distance.\n :param points: Points for which distances are computed\n :param delta: Distance interval for which two points are considered in the same group.\n :param delta_factor: If delta is None, the distance interval is taken as delta_factor times the maximal\n point distance.\n :param sign: Whether to add sign information in the indices sorting the points distances\n :return: Distances from the plane to the points (positive values on the side of the normal to the plane,\n negative values on the other side), as well as indices of the points from closest to furthest and\n grouped indices of distances separated by less than delta. For the sorting list and the grouped\n indices, when the sign parameter is True, items are given as tuples of (index, sign).\n \"\"\"\n distances, indices = self.distances_indices_sorted(points=points)\n if delta is None:\n delta = delta_factor * np.abs(distances[indices[-1]])\n iends = [\n ii\n for ii, idist in enumerate(indices, start=1)\n if ii == len(distances) or (np.abs(distances[indices[ii]]) - np.abs(distances[idist]) > delta)\n ]\n if sign:\n indices = [(ii, int(np.sign(distances[ii]))) for ii in indices]\n grouped_indices = [indices[iends[ii - 1] : iend] if ii > 0 else indices[:iend] for ii, iend in enumerate(iends)]\n return distances, indices, grouped_indices\n\n def projectionpoints(self, pps):\n \"\"\"\n Projects each points in the point list pps on plane and returns the list of projected points\n :param pps: List of points to project on plane\n :return: List of projected point on plane\n \"\"\"\n return [pp - np.dot(pp - self.p1, self.normal_vector) * self.normal_vector for pp in pps]\n\n def orthonormal_vectors(self):\n \"\"\"\n Returns a list of three orthogonal vectors, the two first being parallel to the plane and the\n third one is the normal vector of the plane\n :return: List of orthogonal vectors\n :raise: ValueError if all the coefficients are zero or if there is some other strange error\n \"\"\"\n if self.e1 is None:\n diff = self.p2 - self.p1\n self.e1 = diff / norm(diff)\n self.e2 = np.cross(self.e3, self.e1)\n return [self.e1, self.e2, self.e3]\n\n def orthonormal_vectors_old(self):\n \"\"\"\n Returns a list of three orthogonal vectors, the two first being parallel to the plane and the\n third one is the normal vector of the plane\n :return: List of orthogonal vectors\n :raise: ValueError if all the coefficients are zero or if there is some other strange error\n \"\"\"\n if self.e1 is None:\n imax = np.argmax(np.abs(self.normal_vector))\n if imax == 0:\n self.e1 = np.array([self.e3[1], -self.e3[0], 0.0]) / np.sqrt(self.e3[0] ** 2 + self.e3[1] ** 2)\n elif imax == 1:\n self.e1 = np.array([0.0, self.e3[2], -self.e3[1]]) / np.sqrt(self.e3[1] ** 2 + self.e3[2] ** 2)\n elif imax == 2:\n self.e1 = np.array([-self.e3[2], 0.0, self.e3[0]]) / np.sqrt(self.e3[0] ** 2 + self.e3[2] ** 2)\n else:\n raise ValueError(\"Only three values in the normal vector, should not be here ...\")\n self.e2 = np.cross(self.e3, self.e1)\n return [self.e1, self.e2, self.e3]\n\n def project_and_to2dim_ordered_indices(self, pps, plane_center=\"mean\"):\n \"\"\"\n Projects each points in the point list pps on plane and returns the indices that would sort the\n list of projected points in anticlockwise order\n :param pps: List of points to project on plane\n :return: List of indices that would sort the list of projected points\n \"\"\"\n pp2d = self.project_and_to2dim(pps, plane_center)\n return anticlockwise_sort_indices(pp2d)\n\n def project_and_to2dim(self, pps, plane_center):\n \"\"\"\n Projects the list of points pps to the plane and changes the basis from 3D to the 2D basis of the plane\n :param pps: List of points to be projected\n :return: :raise:\n \"\"\"\n proj = self.projectionpoints(pps)\n [u1, u2, u3] = self.orthonormal_vectors()\n PP = np.array([[u1[0], u2[0], u3[0]], [u1[1], u2[1], u3[1]], [u1[2], u2[2], u3[2]]])\n xypps = []\n for pp in proj:\n xyzpp = np.dot(pp, PP)\n xypps.append(xyzpp[0:2])\n if str(plane_center) == str(\"mean\"):\n mean = np.zeros(2, np.float_)\n for pp in xypps:\n mean += pp\n mean /= len(xypps)\n xypps = [pp - mean for pp in xypps]\n elif plane_center is not None:\n projected_plane_center = self.projectionpoints([plane_center])[0]\n xy_projected_plane_center = np.dot(projected_plane_center, PP)[0:2]\n xypps = [pp - xy_projected_plane_center for pp in xypps]\n return xypps\n\n def fit_error(self, points, fit=\"least_square_distance\"):\n \"\"\"Evaluate the error for a list of points with respect to this plane.\n\n :param points: List of points.\n :param fit: Type of fit error.\n :return: Error for a list of points with respect to this plane.\n \"\"\"\n if fit == \"least_square_distance\":\n return self.fit_least_square_distance_error(points)\n if fit == \"maximum_distance\":\n return self.fit_maximum_distance_error(points)\n return None\n\n def fit_least_square_distance_error(self, points):\n \"\"\"Evaluate the sum of squared distances error for a list of points with respect to this plane.\n\n :param points: List of points.\n :return: Sum of squared distances error for a list of points with respect to this plane.\n \"\"\"\n return np.sum([self.distance_to_point(pp) ** 2.0 for pp in points])\n\n def fit_maximum_distance_error(self, points):\n \"\"\"Evaluate the max distance error for a list of points with respect to this plane.\n\n :param points: List of points.\n :return: Max distance error for a list of points with respect to this plane.\n \"\"\"\n return np.max([self.distance_to_point(pp) for pp in points])\n\n @property\n def coefficients(self):\n \"\"\"Return a copy of the plane coefficients.\n\n :return: Plane coefficients as a numpy array.\n \"\"\"\n return np.copy(self._coefficients)\n\n @property\n def abcd(self):\n \"\"\"Return a tuple with the plane coefficients.\n\n :return: Tuple with the plane coefficients.\n \"\"\"\n return (\n self._coefficients[0],\n self._coefficients[1],\n self._coefficients[2],\n self._coefficients[3],\n )\n\n @property\n def a(self):\n \"\"\"Coefficient a of the plane.\"\"\"\n return self._coefficients[0]\n\n @property\n def b(self):\n \"\"\"Coefficient b of the plane.\"\"\"\n return self._coefficients[1]\n\n @property\n def c(self):\n \"\"\"Coefficient c of the plane.\"\"\"\n return self._coefficients[2]\n\n @property\n def d(self):\n \"\"\"Coefficient d of the plane.\"\"\"\n return self._coefficients[3]\n\n @property\n def distance_to_origin(self):\n \"\"\"Distance of the plane to the origin.\"\"\"\n return self._coefficients[3]\n\n @property\n def crosses_origin(self):\n \"\"\"Whether this plane crosses the origin (i.e. coefficient d is 0.0).\"\"\"\n return self._crosses_origin\n\n @classmethod\n def from_2points_and_origin(cls, p1, p2):\n \"\"\"Initializes plane from two points and the origin.\n\n :param p1: First point.\n :param p2: Second point.\n :return: Plane.\n \"\"\"\n return cls.from_3points(p1, p2, np.zeros(3))\n\n @classmethod\n def from_3points(cls, p1, p2, p3):\n \"\"\"Initializes plane from three points.\n\n :param p1: First point.\n :param p2: Second point.\n :param p3: Third point.\n :return: Plane.\n \"\"\"\n nn = np.cross(p1 - p3, p2 - p3)\n normal_vector = nn / norm(nn)\n nonzeros = np.argwhere(normal_vector != 0.0)\n if normal_vector[nonzeros[0, 0]] < 0.0:\n normal_vector = -normal_vector\n dd = -np.dot(normal_vector, p1)\n coefficients = np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_)\n return cls(coefficients, p1=p1, p2=p2, p3=p3)\n\n @classmethod\n def from_npoints(cls, points, best_fit=\"least_square_distance\"):\n \"\"\"Initializes plane from a list of points.\n\n If the number of points is larger than 3, will use a least square fitting or max distance fitting.\n\n :param points: List of points.\n :param best_fit: Type of fitting procedure for more than 3 points.\n :return: Plane\n \"\"\"\n if len(points) == 2:\n return cls.from_2points_and_origin(points[0], points[1])\n if len(points) == 3:\n return cls.from_3points(points[0], points[1], points[2])\n if best_fit == \"least_square_distance\":\n return cls.from_npoints_least_square_distance(points)\n if best_fit == \"maximum_distance\":\n return cls.from_npoints_maximum_distance(points)\n return None\n\n @classmethod\n def from_npoints_least_square_distance(cls, points):\n \"\"\"Initializes plane from a list of points using a least square fitting procedure.\n\n :param points: List of points.\n :return: Plane.\n \"\"\"\n mean_point = np.array([sum([pp[ii] for pp in points]) for ii in range(3)], np.float_)\n mean_point /= len(points)\n AA = np.zeros((len(points), 3), np.float_)\n for ii, pp in enumerate(points):\n for jj in range(3):\n AA[ii, jj] = pp[jj] - mean_point[jj]\n [UU, SS, Vt] = np.linalg.svd(AA)\n imin = np.argmin(SS)\n normal_vector = Vt[imin]\n nonzeros = np.argwhere(normal_vector != 0.0)\n if normal_vector[nonzeros[0, 0]] < 0.0:\n normal_vector = -normal_vector\n dd = -np.dot(normal_vector, mean_point)\n coefficients = np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_)\n return cls(coefficients)\n\n @classmethod\n def perpendicular_bisector(cls, p1, p2):\n \"\"\"Initialize a plane from the perpendicular bisector of two points.\n\n The perpendicular bisector of two points is the plane perpendicular to the vector joining these two points\n and passing through the middle of the segment joining the two points.\n\n :param p1: First point.\n :param p2: Second point.\n :return: Plane.\n \"\"\"\n middle_point = 0.5 * (p1 + p2)\n normal_vector = p2 - p1\n dd = -np.dot(normal_vector, middle_point)\n return cls(np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_))\n\n @classmethod\n def from_npoints_maximum_distance(cls, points):\n \"\"\"Initializes plane from a list of points using a max distance fitting procedure.\n\n :param points: List of points.\n :return: Plane.\n \"\"\"\n convex_hull = ConvexHull(points)\n heights = []\n ipoints_heights = []\n for isimplex, simplex in enumerate(convex_hull.simplices):\n cc = convex_hull.equations[isimplex]\n plane = Plane.from_coefficients(cc[0], cc[1], cc[2], cc[3])\n distances = [plane.distance_to_point(pp) for pp in points]\n ipoint_height = np.argmax(distances)\n heights.append(distances[ipoint_height])\n ipoints_heights.append(ipoint_height)\n imin_height = np.argmin(heights)\n normal_vector = convex_hull.equations[imin_height, 0:3]\n cc = convex_hull.equations[imin_height]\n highest_point = points[ipoints_heights[imin_height]]\n middle_point = (\n Plane.from_coefficients(cc[0], cc[1], cc[2], cc[3]).projectionpoints([highest_point])[0] + highest_point\n ) / 2\n dd = -np.dot(normal_vector, middle_point)\n return cls(np.array([normal_vector[0], normal_vector[1], normal_vector[2], dd], np.float_))\n\n @classmethod\n def from_coefficients(cls, a, b, c, d):\n \"\"\"Initialize plane from its coefficients.\n\n :param a: a coefficient of the plane.\n :param b: b coefficient of the plane.\n :param c: c coefficient of the plane.\n :param d: d coefficient of the plane.\n :return: Plane.\n \"\"\"\n return cls(np.array([a, b, c, d], np.float_))\n"
] | [
[
"numpy.sum",
"scipy.spatial.ConvexHull",
"numpy.argwhere",
"numpy.any",
"numpy.argsort",
"numpy.cross",
"numpy.isclose",
"numpy.copy",
"numpy.ones_like",
"scipy.interpolate.UnivariateSpline",
"numpy.allclose",
"numpy.argmin",
"numpy.abs",
"numpy.where",
"numpy.linspace",
"numpy.sqrt",
"numpy.float_",
"numpy.zeros",
"numpy.argmax",
"numpy.all",
"numpy.max",
"numpy.min",
"numpy.linalg.norm",
"scipy.integrate.quad",
"numpy.zeros_like",
"numpy.arctan2",
"numpy.sign",
"numpy.linalg.inv",
"numpy.linalg.svd",
"numpy.array",
"numpy.dot"
]
] |
ehsanvds/Imitation_Learning | [
"7a27d4c83d93299f504b3c44364d1f387d788d7b"
] | [
"main.py"
] | [
"\"\"\"\r\nImitation Learning\r\n@author: Ehsan\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport os\r\nimport general_functions\r\nimport network_functions\r\n\r\n#%% set parameters\r\nmeasure_path = r'...\\All_measurements_truncated.csv'\r\nimage_dir = r'...\\images'\r\nweights_path = r'...\\model_weights.ckpt'\r\nsave_path = r'...\\whole_model.h5'\r\next = '.png' # extension format of images\r\nn_msr_param = 4 # number of parameters for the measurements\r\ncat_columns = ['throttle_fl', 'brake_fl'] # categorical columns in the measurements\r\n\r\n#%% Main\r\nif __name__=='__main__':\r\n # available device\r\n print('Available GPUs:', tf.config.experimental.list_physical_devices('GPU'))\r\n \r\n # reading the images\r\n print('Reading the images ...')\r\n input_images = []\r\n files = filelist(image_dir, ext)\r\n for k in files:\r\n input_images.append(read_image(os.path.join(image_dir,k))) \r\n input_images = tf.convert_to_tensor(input_images, dtype=tf.float32)\r\n # visualize(image, augm_img)\r\n \r\n # reading the measurements\r\n print('Reading the measurements ...')\r\n df_measure = pd.read_csv(measure_path, index_col=None, header='infer')\r\n df_measure = normalize(df_measure,cat_columns)\r\n for i in cat_columns:\r\n df_measure[i] = pd.Categorical(df_measure[i])\r\n control_output = df_measure.iloc[:,0:3]\r\n control_output = tf.convert_to_tensor(control_output.values, dtype=tf.float32)\r\n input_measure = df_measure.iloc[:,3:]\r\n input_measure = tf.convert_to_tensor(input_measure.values, dtype=tf.float32)\r\n \r\n # building the model\r\n print('Building the model ...')\r\n img_input = tf.keras.Input(shape=tuple(np.array(tf.shape(input_images[0]))), name='img_input')\r\n msr_input = tf.keras.Input(shape=(n_msr_param,), name='msr_input')\r\n model = full_network(img_input, msr_input)\r\n model = tf.keras.Model(inputs=[img_input, msr_input], outputs=model)\r\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0002),\r\n loss='mean_squared_error', metrics=['accuracy'])\r\n print(model.summary())\r\n\r\n # creating a callback\r\n callback = tf.keras.callbacks.ModelCheckpoint(filepath=weights_path,\r\n save_weights_only=True, verbose=1)\r\n \r\n # training\r\n print('Training the model ...')\r\n # Pleas make sure to use gast 0.2.2\r\n input_db = tf.data.Dataset.from_tensor_slices({'img_input':input_images, 'msr_input':input_measure})\r\n augm_input_db = (input_db.map(augment, num_parallel_calls=tf.data.experimental.AUTOTUNE))\r\n control_db = tf.data.Dataset.from_tensor_slices(control_output)\r\n dataset = tf.data.Dataset.zip((augm_input_db,control_db)).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\r\n model.fit(dataset, epochs=8, callbacks=[callback])\r\n \r\n # saving the whole model\r\n print('Saving the model ...')\r\n model.save(save_path)\r\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.shape",
"tensorflow.config.experimental.list_physical_devices",
"pandas.read_csv",
"tensorflow.keras.Model",
"pandas.Categorical",
"tensorflow.convert_to_tensor",
"tensorflow.data.Dataset.zip",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.Input"
]
] |
Alex-Greenen/SpectralNeuralAnimation | [
"0f5569f6f78705b11350ff18ccecb1205c642ffd"
] | [
"HighFrequency/Vizualise.py"
] | [
"from ProcessData.Skeleton import Skeleton\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport torch\nfrom typing import Tuple\n\nfrom HighFrequency.LossType import HighFrequencyLossType\nfrom HighFrequency.DataBase import DataBase\nfrom ProcessData.Utils import *\nimport Format\n\ndef plotState(last_pose:torch.tensor, next_pose:torch.tensor, latent_true:torch.tensor, latent_interpolated:torch.tensor, errors: Tuple[float], database:DataBase) -> None:\n \n fig = plt.figure(figsize=(12,8), dpi= 100)\n skeleton = Format.skeleton\n parents = skeleton._parents\n rotation = Format.rotation\n\n # Truth Figure\n ax = fig.add_subplot(1, 2 if errors!=None else 1, 1, projection='3d')\n ax.set_xlim3d(-100, 100)\n ax.set_ylim3d(-100, 100)\n ax.set_zlim3d(0, 200)\n\n for l in latent_true:\n pose_gt = database.AE_network.decoder(l.unsqueeze(0))\n R = torch.split(pose_gt, database.poseDimDiv, dim=-1)[0]\n R = reformat(R)\n R = correct(R, rotation)\n X = np.reshape(getX(R, skeleton, rotation).clone().detach().cpu().numpy()[0], (-1,3))\n for i in range(len(X)):\n p = parents[i]\n if p != -1:\n ax.plot([X[i,2], X[p,2]], [X[i,1], X[p,1]], [X[i,0], X[p,0]], alpha = 0.5, linewidth=0.5, c='black')\n \n for l in latent_interpolated:\n pose_gt = database.AE_network.decoder(l.unsqueeze(0))\n R = torch.split(pose_gt, database.poseDimDiv, dim=-1)[0]\n R = reformat(R)\n R = correct(R, rotation)\n X = np.reshape(getX(R, skeleton, rotation).clone().detach().cpu().numpy()[0], (-1,3))\n for i in range(len(X)):\n p = parents[i]\n if p != -1:\n ax.plot([X[i,2], X[p,2]], [X[i,1], X[p,1]], [X[i,0], X[p,0]], alpha = 1.0, linewidth=0.5, c='green')\n \n R = torch.split(last_pose, database.poseDimDiv, dim=-1)[0]\n R = reformat(R.unsqueeze(0))\n R = correct(R, rotation)\n X = np.reshape(getX(R, skeleton, rotation).clone().detach().cpu().numpy()[0], (-1,3))\n for i in range(len(X)):\n p = parents[i]\n if p != -1:\n ax.plot([X[i,2], X[p,2]], [X[i,1], X[p,1]], [X[i,0], X[p,0]], alpha = 1.0, linewidth=0.5, c='red')\n\n R = torch.split(next_pose, database.poseDimDiv, dim=-1)[0]\n R = reformat(R.unsqueeze(0))\n R = correct(R, rotation)\n X = np.reshape(getX(R, skeleton, rotation).clone().detach().cpu().numpy()[0], (-1,3))\n for i in range(len(X)):\n p = parents[i]\n if p != -1:\n ax.plot([X[i,2], X[p,2]], [X[i,1], X[p,1]], [X[i,0], X[p,0]], alpha = 1.0, linewidth=0.5, c='red')\n\n # Errors\n if errors!= None:\n ax = fig.add_subplot(1, 2, 2)\n ax.bar([str(l.name) for l in list(HighFrequencyLossType)], errors)\n plt.xticks(rotation= 45, fontsize = 10) \n\n plt.show(block=False)\n "
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"torch.split",
"matplotlib.pyplot.xticks"
]
] |
westonsteimel/polars | [
"67c2cb78f4ce228142dfadd4b8e6f1ebd0399acd"
] | [
"py-polars/polars/functions.py"
] | [
"from typing import Union, TextIO, Optional, List, BinaryIO\nimport numpy as np\nfrom pathlib import Path\nfrom .frame import DataFrame\nfrom .series import Series\nfrom .lazy import LazyFrame\nimport pyarrow as pa\nimport pyarrow.parquet\nimport pyarrow.csv\nimport pyarrow.compute\nimport builtins\nimport urllib.request\nimport io\n\nfrom typing import Dict\nfrom .datatypes import DataType\n\n\ndef _process_http_file(path: str) -> io.BytesIO:\n with urllib.request.urlopen(path) as f:\n return io.BytesIO(f.read())\n\n\ndef _prepare_file_arg(\n file: Union[str, TextIO, Path, BinaryIO]\n) -> Union[str, TextIO, Path, BinaryIO]:\n \"\"\"\n Utility for read_[csv, parquet]. (not to be used by scan_[csv, parquet]).\n\n Does one of:\n - A path.Path object is converted to a string\n - a raw file on the web is downloaded into a buffer.\n \"\"\"\n if isinstance(file, Path):\n file = str(file)\n\n if isinstance(file, str) and file.startswith(\"http\"):\n file = _process_http_file(file)\n\n return file\n\n\ndef get_dummies(df: DataFrame) -> DataFrame:\n return df.to_dummies()\n\n\ndef read_csv(\n file: Union[str, TextIO, Path],\n infer_schema_length: int = 100,\n batch_size: int = 64,\n has_headers: bool = True,\n ignore_errors: bool = False,\n stop_after_n_rows: Optional[int] = None,\n skip_rows: int = 0,\n projection: Optional[List[int]] = None,\n sep: str = \",\",\n columns: Optional[List[str]] = None,\n rechunk: bool = True,\n encoding: str = \"utf8\",\n n_threads: Optional[int] = None,\n dtype: \"Optional[Dict[str, DataType]]\" = None,\n new_columns: \"Optional[List[str]]\" = None,\n use_pyarrow: bool = True,\n) -> \"DataFrame\":\n \"\"\"\n Read into a DataFrame from a csv file.\n\n Parameters\n ----------\n file\n Path to a file or a file like object.\n infer_schema_length\n Maximum number of lines to read to infer schema.\n batch_size\n Number of lines to read into the buffer at once. Modify this to change performance.\n has_headers\n If the CSV file has headers or not.\n ignore_errors\n Try to keep reading lines if some lines yield errors.\n stop_after_n_rows\n After n rows are read from the CSV stop reading. During multi-threaded parsing, an upper bound of `n` rows\n cannot be guaranteed.\n skip_rows\n Start reading after `skip_rows`.\n projection\n Indexes of columns to select\n sep\n Delimiter/ value seperator\n columns\n Columns to project/ select\n rechunk\n Make sure that all columns are contiguous in memory by aggregating the chunks into a single array.\n encoding\n - \"utf8\"\n _ \"utf8-lossy\"\n n_threads\n Number of threads to use in csv parsing. Defaults to the number of physical cpu's of you system.\n dtype\n Overwrite the dtypes during inference\n use_pyarrow\n Use pyarrow's native CSV parser.\n\n Returns\n -------\n DataFrame\n \"\"\"\n file = _prepare_file_arg(file)\n\n if (\n use_pyarrow\n and dtype is None\n and has_headers\n and projection is None\n and sep == \",\"\n and columns is None\n and stop_after_n_rows is None\n and not ignore_errors\n and n_threads is None\n and encoding == \"utf8\"\n ):\n tbl = pa.csv.read_csv(file, pa.csv.ReadOptions(skip_rows=skip_rows))\n return from_arrow_table(tbl, rechunk)\n\n df = DataFrame.read_csv(\n file=file,\n infer_schema_length=infer_schema_length,\n batch_size=batch_size,\n has_headers=has_headers,\n ignore_errors=ignore_errors,\n stop_after_n_rows=stop_after_n_rows,\n skip_rows=skip_rows,\n projection=projection,\n sep=sep,\n columns=columns,\n rechunk=rechunk,\n encoding=encoding,\n n_threads=n_threads,\n dtype=dtype,\n )\n if new_columns:\n df.columns = new_columns\n return df\n\n\ndef scan_csv(\n file: Union[str, Path],\n has_headers: bool = True,\n ignore_errors: bool = False,\n sep: str = \",\",\n skip_rows: int = 0,\n stop_after_n_rows: \"Optional[int]\" = None,\n cache: bool = True,\n dtype: \"Optional[Dict[str, DataType]]\" = None,\n) -> \"LazyFrame\":\n \"\"\"\n Lazily read from a csv file.\n\n This allows the query optimizer to push down predicates and projections to the scan level,\n thereby potentially reducing memory overhead.\n\n Parameters\n ----------\n file\n Path to a file\n has_headers\n If the CSV file has headers or not.\n ignore_errors\n Try to keep reading lines if some lines yield errors.\n sep\n Delimiter/ value separator\n skip_rows\n Start reading after `skip_rows`.\n stop_after_n_rows\n After n rows are read from the CSV stop reading.\n During multi-threaded parsing, an upper bound of `n` rows\n cannot be guaranteed.\n cache\n Cache the result after reading\n dtype\n Overwrite the dtypes during inference\n \"\"\"\n if isinstance(file, Path):\n file = str(file)\n return LazyFrame.scan_csv(\n file=file,\n has_headers=has_headers,\n sep=sep,\n ignore_errors=ignore_errors,\n skip_rows=skip_rows,\n stop_after_n_rows=stop_after_n_rows,\n cache=cache,\n dtype=dtype,\n )\n\n\ndef scan_parquet(\n file: Union[str, Path],\n stop_after_n_rows: \"Optional[int]\" = None,\n cache: bool = True,\n) -> \"LazyFrame\":\n \"\"\"\n Lazily read from a parquet file.\n\n This allows the query optimizer to push down predicates and projections to the scan level,\n thereby potentially reducing memory overhead.\n\n Parameters\n ----------\n file\n Path to a file\n stop_after_n_rows\n After n rows are read from the parquet stops reading.\n cache\n Cache the result after reading\n \"\"\"\n if isinstance(file, Path):\n file = str(file)\n return LazyFrame.scan_parquet(\n file=file, stop_after_n_rows=stop_after_n_rows, cache=cache\n )\n\n\ndef read_ipc(file: Union[str, BinaryIO, Path]) -> \"DataFrame\":\n \"\"\"\n Read into a DataFrame from Arrow IPC stream format. This is also called the feather format.\n\n Parameters\n ----------\n file\n Path to a file or a file like object.\n\n Returns\n -------\n DataFrame\n \"\"\"\n file = _prepare_file_arg(file)\n return DataFrame.read_ipc(file)\n\n\ndef read_parquet(\n source: \"Union[str, BinaryIO, Path, List[str]]\",\n stop_after_n_rows: \"Optional[int]\" = None,\n memory_map=True,\n columns: Optional[List[str]] = None,\n **kwargs\n) -> \"DataFrame\":\n \"\"\"\n Read into a DataFrame from a parquet file.\n\n Parameters\n ----------\n source\n Path to a file | list of files, or a file like object. If the path is a directory, that directory will be used\n as partition aware scan.\n stop_after_n_rows\n After n rows are read from the parquet stops reading. Note: this cannot be used in partition aware parquet reads.\n memory_map\n Memory map underlying file. This will likely increase performance.\n columns\n Columns to project / select\n **kwargs\n kwargs for [pyarrow.parquet.read_table](https://arrow.apache.org/docs/python/generated/pyarrow.parquet.read_table.html)\n\n Returns\n -------\n DataFrame\n \"\"\"\n source = _prepare_file_arg(source)\n if stop_after_n_rows is not None:\n return DataFrame.read_parquet(source, stop_after_n_rows=stop_after_n_rows)\n else:\n return from_arrow_table(\n pa.parquet.read_table(\n source, memory_map=memory_map, columns=columns, **kwargs\n )\n )\n\n\ndef arg_where(mask: \"Series\"):\n \"\"\"\n Get index values where Boolean mask evaluate True.\n\n Parameters\n ----------\n mask\n Boolean Series\n\n Returns\n -------\n UInt32 Series\n \"\"\"\n return mask.arg_true()\n\n\ndef from_arrow_table(table: pa.Table, rechunk: bool = True) -> \"DataFrame\":\n \"\"\"\n Create a DataFrame from an arrow Table\n\n Parameters\n ----------\n table\n Arrow Table\n rechunk\n Make sure that all data is contiguous.\n \"\"\"\n return DataFrame.from_arrow(table, rechunk)\n\n\ndef from_pandas(\n df: \"pandas.DataFrame\", rechunk: bool = True # noqa: F821\n) -> \"DataFrame\":\n \"\"\"\n Convert from a pandas DataFrame to a polars DataFrame\n\n Parameters\n ----------\n df\n DataFrame to convert\n rechunk\n Make sure that all data is contiguous.\n\n Returns\n -------\n A Polars DataFrame\n \"\"\"\n\n # Note: we first tried to infer the schema via pyarrow and then modify the schema if needed.\n # However arrow 3.0 determines the type of a string like this:\n # pa.array(array).type\n # needlessly allocating and failing when the string is too large for the string dtype.\n data = {}\n\n for (name, dtype) in zip(df.columns, df.dtypes):\n if dtype == \"object\" and isinstance(df[name][0], str):\n data[name] = pa.array(df[name], pa.large_utf8())\n elif dtype == \"datetime64[ns]\":\n data[name] = pa.compute.cast(\n pa.array(np.array(df[name].values, dtype=\"datetime64[ms]\")), pa.date64()\n )\n else:\n data[name] = pa.array(df[name])\n\n table = pa.table(data)\n return from_arrow_table(table, rechunk)\n\n\ndef concat(dfs: \"List[DataFrame]\", rechunk=True) -> \"DataFrame\":\n \"\"\"\n Aggregate all the Dataframe in a List of DataFrames to a single DataFrame\n\n Parameters\n ----------\n dfs\n DataFrames to concatenate\n rechunk\n rechunk the final DataFrame\n \"\"\"\n assert len(dfs) > 0\n df = dfs[0]\n for i in builtins.range(1, len(dfs)):\n try:\n df = df.vstack(dfs[i], in_place=False)\n # could have a double borrow (one mutable one ref)\n except RuntimeError:\n df.vstack(dfs[i].clone(), in_place=True)\n\n if rechunk:\n return df.rechunk()\n return df\n\n\ndef arange(\n lower: int, upper: int, step: Optional[int] = None, name: Optional[str] = None\n) -> Series:\n \"\"\"\n Create a Series that ranges from lower bound to upper bound.\n Parameters\n ----------\n lower\n Lower bound value.\n upper\n Upper bound value.\n step\n Optional step size. If none given, the step size will be 1.\n name\n Name of the Series\n \"\"\"\n if name is None:\n name = \"\"\n return Series(name, np.arange(lower, upper, step), nullable=False)\n\n\ndef repeat(\n val: \"Union[int, float, str]\", n: int, name: Optional[str] = None\n) -> \"Series\":\n \"\"\"\n Repeat a single value n times and collect into a Series.\n\n Parameters\n ----------\n val\n Value to repeat.\n n\n Number of repeats.\n name\n Optional name of the Series.\n \"\"\"\n if name is None:\n name = \"\"\n if isinstance(val, str):\n s = Series._repeat(name, val, n)\n s.rename(name)\n return s\n else:\n return Series.from_arrow(name, pa.repeat(val, n))\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] |
matthieuvigne/pinocchio | [
"01f211eceda3ac2e5edc8cf101690afb6f3184d3"
] | [
"doc/d-practical-exercises/src/robot_hand.py"
] | [
"from math import pi\n\nimport numpy as np\nfrom numpy.linalg import norm, pinv\n\nimport pinocchio as se3\nfrom pinocchio.utils import cross, zero, rotate, eye\nfrom display import Display\n\n\nclass Visual(object):\n '''\n Class representing one 3D mesh of the robot, to be attached to a joint. The class contains:\n * the name of the 3D objects inside Gepetto viewer.\n * the ID of the joint in the kinematic tree to which the body is attached.\n * the placement of the body with respect to the joint frame.\n This class is only used in the list Robot.visuals (see below).\n\n The visual are supposed mostly to be capsules. In that case, the object also contains\n radius and length of the capsule.\n The collision checking computes collision test, distance, and witness points.\n Using the supporting robot, the collision Jacobian returns a 1xN matrix corresponding\n to the normal direction.\n '''\n def __init__(self, name, jointParent, placement, radius=.1, length=None):\n '''Length and radius are used in case of capsule objects'''\n self.name = name # Name in gepetto viewer\n self.jointParent = jointParent # ID (int) of the joint\n self.placement = placement # placement of the body wrt joint, i.e. bodyMjoint\n if length is not None:\n self.length = length\n self.radius = radius\n\n def place(self, display, oMjoint):\n oMbody = oMjoint * self.placement\n display.place(self.name, oMbody, False)\n\n def isCapsule(self):\n return hasattr(self, 'length') and hasattr(self, 'radius')\n\n def collision(self, c2, data=None, oMj1=None, oMj2=None):\n if data is not None:\n oMj1 = data.oMi[self.jointParent]\n oMj2 = data.oMi[c2.jointParent]\n M1 = oMj1 * self.placement\n M2 = oMj2 * c2.placement\n\n assert(self.isCapsule() and c2.isCapsule())\n l1 = self.length\n r1 = self.radius\n l2 = c2.length\n r2 = c2.radius\n\n a1 = M1.act(np.matrix([0, 0, -l1 / 2]).T)\n b1 = M2.act(np.matrix([0, 0, -l2 / 2]).T)\n a2 = M1.act(np.matrix([0, 0, +l1 / 2]).T)\n b2 = M2.act(np.matrix([0, 0, +l2 / 2]).T)\n\n ab = pinv(np.hstack([a1 - a2, b2 - b1])) * (b2 - a2)\n\n if all(0 <= ab <= 1):\n asat = bsat = False\n pa = a2 + ab[0, 0] * (a1 - a2)\n pb = b2 + ab[1, 0] * (b1 - b2)\n else:\n asat = bsat = True\n i = np.argmin(np.vstack([ab, 1 - ab]))\n\n pa = a2 if i == 0 else a1\n pb = b2 if i == 1 else b1\n if i == 0 or i == 2: # fix a to pa, search b\n b = (pinv(b1 - b2) * (pa - b2))[0, 0]\n if b < 0:\n pb = b2\n elif b > 1:\n pb = b1\n else:\n pb = b2 + b * (b1 - b2)\n bsat = False\n else: # fix b\n a = (pinv(a1 - a2) * (pb - a2))[0, 0]\n if a < 0:\n pa = a2\n elif a > 1:\n pa = a1\n else:\n pa = a2 + a * (a1 - a2)\n asat = False\n\n dist = norm(pa - pb) - (r1 + r2)\n if norm(pa - pb) > 1e-3:\n # Compute witness points\n ab = pa - pb\n ab /= norm(ab)\n wa = pa - ab * r1\n wb = pb + ab * r2\n\n # Compute normal matrix\n x = np.matrix([1., 0, 0]).T\n r1 = cross(ab, x)\n if norm(r1) < 1e-2:\n x = np.matrix([0, 1., 0]).T\n r1 = cross(ab, x)\n r1 /= norm(r1)\n r2 = cross(ab, r1)\n R = np.hstack([r1, r2, ab])\n\n self.dist = dist\n c2.dist = dist\n self.w = wa\n c2.w = wb\n self.R = R\n c2.R = R\n\n return dist\n\n def jacobian(self, c2, robot, q):\n Ja = se3.jacobian(robot.model, robot.data, q, self.jointParent, False, True)\n Jb = se3.jacobian(robot.model, robot.data, q, c2.jointParent, False, True)\n\n Xa = se3.SE3(self.R, self.w).action\n Xb = se3.SE3(c2.R, c2.w).action\n\n J = (Xa * Ja)[2, :] - (Xb * Jb)[2, :]\n return J\n\n def displayCollision(self, viewer, name='world/wa'):\n viewer.viewer.gui.setVisibility(name, 'ON')\n viewer.place(name, se3.SE3(self.R, self.w))\n\n\nclass Robot(object):\n '''\n Define a class Robot with 7DOF (shoulder=3 + elbow=1 + wrist=3).\n The configuration is nq=7. The velocity is the same.\n The members of the class are:\n * viewer: a display encapsulating a gepetto viewer client to create 3D objects and place them.\n * model: the kinematic tree of the robot.\n * data: the temporary variables to be used by the kinematic algorithms.\n * visuals: the list of all the 'visual' 3D objects to render the robot, each element of the list being\n an object Visual (see above).\n\n CollisionPairs is a list of visual indexes.\n Reference to the collision pair is used in the collision test and jacobian of the collision\n (which are simply proxy method to methods of the visual class).\n '''\n\n def __init__(self):\n self.viewer = Display()\n self.visuals = []\n self.model = se3.Model()\n self.createHand()\n self.data = self.model.createData()\n self.q0 = zero(self.model.nq)\n # self.q0[3] = 1.0\n self.v0 = zero(self.model.nv)\n self.collisionPairs = []\n\n def createHand(self, root_id=0, prefix='', joint_placement=None):\n def trans(x, y, z):\n return se3.SE3(eye(3), np.matrix([x, y, z]).T)\n\n def inertia(m, c):\n return se3.Inertia(m, np.matrix(c, np.double).T, eye(3) * m ** 2)\n\n def joint_name(body):\n return prefix + body + '_joint'\n\n def body_name(body):\n return 'world/' + prefix + body\n\n color = [red, green, blue, transparency] = [1, 1, 0.78, 1.0]\n joint_id = root_id\n cm = 1e-2\n\n joint_placement = joint_placement if joint_placement is not None else se3.SE3.Identity()\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('wrist'))\n self.model.appendBodyToJoint(joint_id, inertia(3, [0, 0, 0]), se3.SE3.Identity())\n\n L, W, H = 3 * cm, 5 * cm, 1 * cm\n self.viewer.viewer.gui.addSphere(body_name('wrist'), .02, color)\n self.viewer.viewer.gui.addBox(body_name('wpalm'), L / 2, W / 2, H, color)\n self.visuals.append(Visual(body_name('wpalm'), joint_id, trans(L / 2, 0, 0)))\n\n self.viewer.viewer.gui.addCapsule(body_name('wpalmb'), H, W, color)\n self.visuals.append(Visual(body_name('wpalmb'), joint_id, se3.SE3(rotate('x', pi / 2), zero(3)), H, W))\n\n self.viewer.viewer.gui.addCapsule(body_name('wpalmt'), H, W, color)\n pos = se3.SE3(rotate('x', pi / 2), np.matrix([L, 0, 0]).T)\n self.visuals.append(Visual(body_name('wpalmt'), joint_id, pos, H, W))\n\n self.viewer.viewer.gui.addCapsule(body_name('wpalml'), H, L, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([L / 2, -W / 2, 0]).T)\n self.visuals.append(Visual(body_name('wpalml'), joint_id, pos, H, L))\n\n self.viewer.viewer.gui.addCapsule(body_name('wpalmr'), H, L, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([L / 2, +W / 2, 0]).T)\n self.visuals.append(Visual(body_name('wpalmr'), joint_id, pos, H, L))\n\n joint_placement = se3.SE3(eye(3), np.matrix([5 * cm, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('palm'))\n self.model.appendBodyToJoint(joint_id, inertia(2, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('palm2'), 1 * cm, W, color)\n self.visuals.append(Visual(body_name('palm2'), joint_id, se3.SE3(rotate('x', pi / 2), zero(3)), H, W))\n\n FL = 4 * cm\n palmIdx = joint_id\n\n joint_placement = se3.SE3(eye(3), np.matrix([2 * cm, W / 2, 0]).T)\n joint_id = self.model.addJoint(palmIdx, se3.JointModelRY(), joint_placement, joint_name('finger11'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('finger11'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)\n self.visuals.append(Visual(body_name('finger11'), joint_id, pos, H, FL - 2 * H))\n\n joint_placement = se3.SE3(eye(3), np.matrix([FL, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger12'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n\n self.viewer.viewer.gui.addCapsule(body_name('finger12'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)\n self.visuals.append(Visual(body_name('finger12'), joint_id, pos, H, FL - 2 * H))\n\n joint_placement = se3.SE3(eye(3), np.matrix([FL - 2 * H, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger13'))\n self.model.appendBodyToJoint(joint_id, inertia(.3, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addSphere(body_name('finger13'), H, color)\n self.visuals.append(Visual(body_name('finger13'), joint_id, trans(2 * H, 0, 0), H, 0))\n\n joint_placement = se3.SE3(eye(3), np.matrix([2 * cm, 0, 0]).T)\n joint_id = self.model.addJoint(palmIdx, se3.JointModelRY(), joint_placement, joint_name('finger21'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('finger21'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)\n self.visuals.append(Visual(body_name('finger21'), joint_id, pos, H, FL - 2 * H))\n\n joint_placement = se3.SE3(eye(3), np.matrix([FL, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger22'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('finger22'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)\n self.visuals.append(Visual(body_name('finger22'), joint_id, pos, H, FL - 2 * H))\n\n joint_placement = se3.SE3(eye(3), np.matrix([FL - H, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger23'))\n self.model.appendBodyToJoint(joint_id, inertia(.3, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addSphere(body_name('finger23'), H, color)\n self.visuals.append(Visual(body_name('finger23'), joint_id, trans(H, 0, 0), H, 0))\n\n joint_placement = se3.SE3(eye(3), np.matrix([2 * cm, -W / 2, 0]).T)\n joint_id = self.model.addJoint(palmIdx, se3.JointModelRY(), joint_placement, joint_name('finger31'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('finger31'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)\n self.visuals.append(Visual(body_name('finger31'), joint_id, pos, H, FL - 2 * H))\n\n joint_placement = se3.SE3(eye(3), np.matrix([FL, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger32'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('finger32'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)\n self.visuals.append(Visual(body_name('finger32'), joint_id, pos, H, FL - 2 * H))\n\n joint_placement = se3.SE3(eye(3), np.matrix([FL - 2 * H, 0, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger33'))\n self.model.appendBodyToJoint(joint_id, inertia(.3, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addSphere(body_name('finger33'), H, color)\n self.visuals.append(Visual(body_name('finger33'), joint_id, trans(2 * H, 0, 0), H, 0))\n\n joint_placement = se3.SE3(eye(3), np.matrix([1 * cm, -W / 2 - H * 1.5, 0]).T)\n joint_id = self.model.addJoint(1, se3.JointModelRY(), joint_placement, joint_name('thumb1'))\n self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('thumb1'), H, 2 * cm, color)\n pos = se3.SE3(rotate('z', pi / 3) * rotate('x', pi / 2), np.matrix([1 * cm, -1 * cm, 0]).T)\n self.visuals.append(Visual(body_name('thumb1'), joint_id, pos, 2 * cm))\n\n joint_placement = se3.SE3(rotate('z', pi / 3) * rotate('x', pi), np.matrix([3 * cm, -1.8 * cm, 0]).T)\n joint_id = self.model.addJoint(joint_id, se3.JointModelRZ(), joint_placement, joint_name('thumb2'))\n self.model.appendBodyToJoint(joint_id, inertia(.4, [0, 0, 0]), se3.SE3.Identity())\n self.viewer.viewer.gui.addCapsule(body_name('thumb2'), H, FL - 2 * H, color)\n pos = se3.SE3(rotate('x', pi / 3), np.matrix([-0.7 * cm, .8 * cm, -0.5 * cm]).T)\n self.visuals.append(Visual(body_name('thumb2'), joint_id, pos, H, FL - 2 * H))\n\n # Prepare some patches to represent collision points. Yet unvisible.\n for i in range(10):\n self.viewer.viewer.gui.addCylinder('world/wa%i' % i, .01, .003, [1.0, 0, 0, 1])\n self.viewer.viewer.gui.addCylinder('world/wb%i' % i, .01, .003, [1.0, 0, 0, 1])\n self.viewer.viewer.gui.setVisibility('world/wa%i' % i, 'OFF')\n self.viewer.viewer.gui.setVisibility('world/wb%i' % i, 'OFF')\n\n def checkCollision(self, pairIndex):\n ia, ib = self.collisionPairs[pairIndex]\n va = self.visuals[ia]\n vb = self.visuals[ib]\n dist = va.collision(vb, self.data)\n return dist\n\n def collisionJacobian(self, pairIndex, q):\n ia, ib = self.collisionPairs[pairIndex]\n va = self.visuals[ia]\n vb = self.visuals[ib]\n return va.jacobian(vb, self, q)\n\n def displayCollision(self, pairIndex, meshIndex, onlyOne=False):\n ia, ib = self.collisionPairs[pairIndex]\n va = self.visuals[ia]\n vb = self.visuals[ib]\n va.displayCollision(self.viewer, 'world/wa%i' % meshIndex)\n vb.displayCollision(self.viewer, 'world/wb%i' % meshIndex)\n self.viewer.viewer.gui.setVisibility('world/wa%i' % meshIndex, 'ON')\n self.viewer.viewer.gui.setVisibility('world/wb%i' % meshIndex, 'ON')\n\n def display(self, q):\n se3.forwardKinematics(self.model, self.data, q)\n for visual in self.visuals:\n visual.place(self.viewer, self.data.oMi[visual.jointParent])\n self.viewer.viewer.gui.refresh()\n"
] | [
[
"numpy.vstack",
"numpy.matrix",
"numpy.hstack",
"numpy.linalg.pinv",
"numpy.linalg.norm"
]
] |
jolibrain/pytorch-CycleGAN-and-pix2pix | [
"43465d660d445e020067979fa8d592a1b480c869"
] | [
"models/cycle_gan_semantic_model.py"
] | [
"import torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\nfrom torch.autograd import Variable\nimport numpy as np\nfrom .modules import loss\nfrom util.util import gaussian\n\nclass CycleGANSemanticModel(BaseModel):\n #def name(self):\n # return 'CycleGANModel'\n\n # new, copied from cyclegan model\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.\n A (source domain), B (target domain).\n Generators: G_A: A -> B; G_B: B -> A.\n Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.\n Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)\n Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)\n Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 \"Photo generation from paintings\" in the paper)\n Dropout is not used in the original CycleGAN paper.\n \"\"\"\n parser.set_defaults(no_dropout=False) # default CycleGAN did not use dropout, beniz: we do\n if is_train:\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')\n parser.add_argument('--rec_noise', type=float, default=0.0, help='whether to add noise to reconstruction')\n\n return parser\n \n def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n # specify the training losses you want to print out. The program will call base_model.get_current_losses\n self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', \n 'D_B', 'G_B', 'cycle_B', 'idt_B', \n 'sem_AB', 'sem_BA', 'CLS']\n # specify the images you want to save/display. The program will call base_model.get_current_visuals\n visual_names_A = ['real_A', 'fake_B', 'rec_A']\n visual_names_B = ['real_B', 'fake_A', 'rec_B']\n if self.isTrain and self.opt.lambda_identity > 0.0:\n visual_names_A.append('idt_B')\n visual_names_B.append('idt_A') # beniz: inverted for original\n\n self.visual_names = visual_names_A + visual_names_B\n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n if self.isTrain:\n self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'CLS']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'G_B']\n\n # load/define networks\n # The naming conversion is different from those used in the paper\n # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,\n opt.ngf, opt.netG, opt.norm, \n not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,\n opt.ngf, opt.netG, opt.norm, \n not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n #use_sigmoid = opt.no_lsgan\n self.netD_A = networks.define_D(opt.output_nc, opt.ndf,\n opt.netD,\n opt.n_layers_D, opt.norm, opt.D_dropout, opt.D_spectral, #use_sigmoid, \n opt.init_type, opt.init_gain, self.gpu_ids)\n self.netD_B = networks.define_D(opt.input_nc, opt.ndf,\n opt.netD,\n opt.n_layers_D, opt.norm, opt.D_dropout, opt.D_spectral, #use_sigmoid, \n opt.init_type, opt.init_gain, self.gpu_ids)\n self.netCLS = networks.define_C(opt.output_nc, opt.ndf,opt.crop_size,\n init_type=opt.init_type, init_gain=opt.init_gain,\n gpu_ids=self.gpu_ids, nclasses=opt.semantic_nclasses)\n \n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n assert(opt.input_nc == opt.output_nc)\n self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\n self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\n # define loss functions\n self.criterionGAN = loss.GANLoss(opt.gan_mode).to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n self.criterionCLS = torch.nn.modules.CrossEntropyLoss()\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_CLS = torch.optim.Adam(self.netCLS.parameters(), lr=1e-3, betas=(opt.beta1, 0.999))\n self.optimizers = []\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n #beniz: not adding optimizers CLS (?)\n\n self.rec_noise = opt.rec_noise\n\n def set_input(self, input):\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n #print(input['B'])\n if 'A_label' in input:# and 'B_label' in input:\n #self.input_A_label = input['A_label' if AtoB else 'B_label'].to(self.device)\n self.input_A_label = input['A_label'].to(self.device)\n #self.input_B_label = input['B_label' if AtoB else 'A_label'].to(self.device) # beniz: unused\n #self.image_paths = input['B_paths'] # Hack!! forcing the labels to corresopnd to B domain\n\n\n def forward(self):\n self.fake_B = self.netG_A(self.real_A)\n\n if self.rec_noise > 0.0:\n self.fake_B_noisy1 = gaussian(self.fake_B, self.rec_noise)\n self.rec_A= self.netG_B(self.fake_B_noisy1)\n else:\n self.rec_A = self.netG_B(self.fake_B)\n\n self.fake_A = self.netG_B(self.real_B)\n\n if self.rec_noise > 0.0:\n self.fake_A_noisy1 = gaussian(self.fake_A, self.rec_noise)\n self.rec_B = self.netG_A(self.fake_A_noisy1)\n else:\n self.rec_B = self.netG_A(self.fake_A)\n\n if self.isTrain:\n # Forward all four images through classifier\n # Keep predictions from fake images only\n #print('real_A shape=',self.real_A.shape)\n #print('real_A=',self.real_A)\n self.pred_real_A = self.netCLS(self.real_A)\n _,self.gt_pred_A = self.pred_real_A.max(1)\n pred_real_B = self.netCLS(self.real_B)\n _,self.gt_pred_B = pred_real_B.max(1)\n self.pred_fake_A = self.netCLS(self.fake_A)\n self.pred_fake_B = self.netCLS(self.fake_B)\n\n _,self.pfB = self.pred_fake_B.max(1) #beniz: unused ?\n \n\n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n \n def backward_CLS(self):\n label_A = self.input_A_label\n # forward only real source image through semantic classifier\n pred_A = self.netCLS(self.real_A) \n self.loss_CLS = self.criterionCLS(pred_A, label_A)\n self.loss_CLS.backward()\n\n def backward_D_A(self):\n fake_B = self.fake_B_pool.query(self.fake_B)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n\n def backward_D_B(self):\n fake_A = self.fake_A_pool.query(self.fake_A)\n self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)\n\n def backward_G(self):\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed.\n self.idt_A = self.netG_A(self.real_B)\n\n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed.\n self.idt_B = self.netG_B(self.real_A)\n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) # removed the factor 2...\n # GAN loss D_B(G_B(B))\n self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)\n # Forward cycle loss\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n # combined loss standard cyclegan\n self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B\n\n # semantic loss AB\n #print('fake_B=',self.pred_fake_B)\n #print('input_A_label=',self.input_A_label)\n #print(self.pred_fake_B.shape,self.input_A_label.shape)\n self.loss_sem_AB = self.criterionCLS(self.pred_fake_B, self.input_A_label)\n #self.loss_sem_AB = self.criterionCLS(self.pred_fake_B, self.gt_pred_A)\n # semantic loss BA\n self.loss_sem_BA = self.criterionCLS(self.pred_fake_A, self.gt_pred_B)\n #self.loss_sem_BA = 0\n #self.loss_sem_BA = self.criterionCLS(self.pred_fake_A, self.pfB) # beniz\n \n # only use semantic loss when classifier has reasonably low loss\n #if True:\n if not hasattr(self, 'loss_CLS') or self.loss_CLS.detach().item() > 1.0:\n self.loss_sem_AB = 0 * self.loss_sem_AB \n self.loss_sem_BA = 0 * self.loss_sem_BA \n \n self.loss_G += self.loss_sem_BA + self.loss_sem_AB\n self.loss_G.backward()\n\n def optimize_parameters(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n # forward\n self.forward() # compute fake images and reconstruction images.\n # G_A and G_B\n self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs\n self.set_requires_grad([self.netG_A, self.netG_B], True)\n self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero\n self.backward_G() # calculate gradients for G_A and G_B\n self.optimizer_G.step() # update G_A and G_B's weights\n # D_A and D_B\n self.set_requires_grad([self.netD_A, self.netD_B], True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D_A() # calculate gradients for D_A\n self.backward_D_B() # calculate graidents for D_B\n self.optimizer_D.step() # update D_A and D_B's weights\n # CLS\n self.set_requires_grad([self.netD_A, self.netD_B], False)\n self.set_requires_grad([self.netCLS], True)\n self.optimizer_CLS.zero_grad()\n self.backward_CLS()\n self.optimizer_CLS.step()\n\n\n"
] | [
[
"torch.nn.modules.CrossEntropyLoss",
"torch.nn.L1Loss"
]
] |
shivamvats/stable-baselines3 | [
"d67a3bc800389212f94f274c4cf6036c78923105"
] | [
"stable_baselines3/sac/sac.py"
] | [
"from typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport gym\nimport numpy as np\nimport torch as th\nfrom torch.nn import functional as F\n\nfrom stable_baselines3.common.buffers import ReplayBuffer\nfrom stable_baselines3.common.noise import ActionNoise\nfrom stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm\nfrom stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule\nfrom stable_baselines3.common.utils import polyak_update\nfrom stable_baselines3.sac.policies import SACPolicy\n\n\nclass SAC(OffPolicyAlgorithm):\n \"\"\"\n Soft Actor-Critic (SAC)\n Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,\n This implementation borrows code from original implementation (https://github.com/haarnoja/sac)\n from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo\n (https://github.com/rail-berkeley/softlearning/)\n and from Stable Baselines (https://github.com/hill-a/stable-baselines)\n Paper: https://arxiv.org/abs/1801.01290\n Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html\n\n Note: we use double q target and not value target as discussed\n in https://github.com/hill-a/stable-baselines/issues/270\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: learning rate for adam optimizer,\n the same learning rate will be used for all networks (Q-Values, Actor and Value function)\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param ent_coef: Entropy regularization coefficient. (Equivalent to\n inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.\n Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)\n :param target_update_interval: update the target network every ``target_network_update_freq``\n gradient steps.\n :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[SACPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 3e-4,\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = 1,\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n replay_buffer_class: Optional[ReplayBuffer] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n ent_coef: Union[str, float] = \"auto\",\n target_update_interval: int = 1,\n target_entropy: Union[str, float] = \"auto\",\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(SAC, self).__init__(\n policy,\n env,\n SACPolicy,\n learning_rate,\n buffer_size,\n learning_starts,\n batch_size,\n tau,\n gamma,\n train_freq,\n gradient_steps,\n action_noise,\n replay_buffer_class=replay_buffer_class,\n replay_buffer_kwargs=replay_buffer_kwargs,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n create_eval_env=create_eval_env,\n seed=seed,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n use_sde_at_warmup=use_sde_at_warmup,\n optimize_memory_usage=optimize_memory_usage,\n supported_action_spaces=(gym.spaces.Box),\n )\n\n self.target_entropy = target_entropy\n self.log_ent_coef = None # type: Optional[th.Tensor]\n # Entropy coefficient / Entropy temperature\n # Inverse of the reward scale\n self.ent_coef = ent_coef\n self.target_update_interval = target_update_interval\n self.ent_coef_optimizer = None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super(SAC, self)._setup_model()\n self._create_aliases()\n # Target entropy is used when learning the entropy coefficient\n if self.target_entropy == \"auto\":\n # automatically set target entropy if needed\n self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)\n else:\n # Force conversion\n # this will also throw an error for unexpected string\n self.target_entropy = float(self.target_entropy)\n\n # The entropy coefficient or entropy can be learned automatically\n # see Automating Entropy Adjustment for Maximum Entropy RL section\n # of https://arxiv.org/abs/1812.05905\n if isinstance(self.ent_coef, str) and self.ent_coef.startswith(\"auto\"):\n # Default initial value of ent_coef when learned\n init_value = 1.0\n if \"_\" in self.ent_coef:\n init_value = float(self.ent_coef.split(\"_\")[1])\n assert init_value > 0.0, \"The initial value of ent_coef must be greater than 0\"\n\n # Note: we optimize the log of the entropy coeff which is slightly different from the paper\n # as discussed in https://github.com/rail-berkeley/softlearning/issues/37\n self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)\n self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))\n else:\n # Force conversion to float\n # this will throw an error if a malformed string (different from 'auto')\n # is passed\n self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)\n\n def _create_aliases(self) -> None:\n self.actor = self.policy.actor\n self.critic = self.policy.critic\n self.critic_target = self.policy.critic_target\n\n def train(self, gradient_steps: int, batch_size: int = 64) -> None:\n # Switch to train mode (this affects batch norm / dropout)\n self.policy.set_training_mode(True)\n # Update optimizers learning rate\n optimizers = [self.actor.optimizer, self.critic.optimizer]\n if self.ent_coef_optimizer is not None:\n optimizers += [self.ent_coef_optimizer]\n\n # Update learning rate according to lr schedule\n self._update_learning_rate(optimizers)\n\n ent_coef_losses, ent_coefs = [], []\n actor_losses, critic_losses = [], []\n\n for gradient_step in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)\n\n # We need to sample because `log_std` may have changed between two gradient steps\n if self.use_sde:\n self.actor.reset_noise()\n\n # Action by the current actor for the sampled state\n actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)\n log_prob = log_prob.reshape(-1, 1)\n\n ent_coef_loss = None\n if self.ent_coef_optimizer is not None:\n # Important: detach the variable from the graph\n # so we don't change it with other losses\n # see https://github.com/rail-berkeley/softlearning/issues/60\n ent_coef = th.exp(self.log_ent_coef.detach())\n ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()\n ent_coef_losses.append(ent_coef_loss.item())\n else:\n ent_coef = self.ent_coef_tensor\n\n ent_coefs.append(ent_coef.item())\n\n # Optimize entropy coefficient, also called\n # entropy temperature or alpha in the paper\n if ent_coef_loss is not None:\n self.ent_coef_optimizer.zero_grad()\n ent_coef_loss.backward()\n self.ent_coef_optimizer.step()\n\n with th.no_grad():\n # Select action according to policy\n next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)\n # Compute the next Q values: min over all critics targets\n next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)\n next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)\n # add entropy term\n next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)\n # td error + entropy term\n target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values\n\n # Get current Q-values estimates for each critic network\n # using action from the replay buffer\n current_q_values = self.critic(replay_data.observations, replay_data.actions)\n\n # Compute critic loss\n critic_loss = 0.5 * sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])\n critic_losses.append(critic_loss.item())\n\n # Optimize the critic\n self.critic.optimizer.zero_grad()\n critic_loss.backward()\n self.critic.optimizer.step()\n\n # Compute actor loss\n # Alternative: actor_loss = th.mean(log_prob - qf1_pi)\n # Mean over all critic networks\n q_values_pi = th.cat(self.critic.forward(replay_data.observations, actions_pi), dim=1)\n min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)\n actor_loss = (ent_coef * log_prob - min_qf_pi).mean()\n actor_losses.append(actor_loss.item())\n\n # Optimize the actor\n self.actor.optimizer.zero_grad()\n actor_loss.backward()\n self.actor.optimizer.step()\n\n # Update target networks\n if gradient_step % self.target_update_interval == 0:\n polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)\n\n self._n_updates += gradient_steps\n\n self.logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n self.logger.record(\"train/ent_coef\", np.mean(ent_coefs))\n self.logger.record(\"train/actor_loss\", np.mean(actor_losses))\n self.logger.record(\"train/critic_loss\", np.mean(critic_losses))\n if len(ent_coef_losses) > 0:\n self.logger.record(\"train/ent_coef_loss\", np.mean(ent_coef_losses))\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"SAC\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n render: Optional[bool] = False,\n ) -> OffPolicyAlgorithm:\n\n return super(SAC, self).learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n eval_env=eval_env,\n eval_freq=eval_freq,\n n_eval_episodes=n_eval_episodes,\n tb_log_name=tb_log_name,\n eval_log_path=eval_log_path,\n reset_num_timesteps=reset_num_timesteps,\n render=render,\n )\n\n def _excluded_save_params(self) -> List[str]:\n return super(SAC, self)._excluded_save_params() + [\"actor\", \"critic\", \"critic_target\"]\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"actor.optimizer\", \"critic.optimizer\"]\n if self.ent_coef_optimizer is not None:\n saved_pytorch_variables = [\"log_ent_coef\"]\n state_dicts.append(\"ent_coef_optimizer\")\n else:\n saved_pytorch_variables = [\"ent_coef_tensor\"]\n return state_dicts, saved_pytorch_variables\n"
] | [
[
"torch.nn.functional.mse_loss",
"torch.min",
"torch.ones",
"torch.no_grad",
"numpy.prod",
"numpy.mean"
]
] |
crotsu/LearningPython | [
"53b8e0a268a9bb7e912fd34f1fd33c90efb85c8d"
] | [
"3dplot.py"
] | [
"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(-3, 3, 0.25)\ny = np.arange(-3, 3, 0.25)\nX, Y = np.meshgrid(x, y)\nZ = 6*X*X + 4*X*Y + 3*Y*Y\n\nfig = plt.figure()\nax = Axes3D(fig)\nax.plot_wireframe(X,Y,Z) \n\nxx = np.linspace(-3, 3, 100)\nyy = np.linspace(-3, 3, 100)\nzeros = np.zeros(100)\n\nplt.plot(xx,zeros)\nplt.plot(zeros,yy)\n\nplt.show()\n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.meshgrid",
"numpy.linspace"
]
] |
tiancity-NJU/Person-Reid | [
"153e5695acca533229793ef96d0e7cb01dbc243d"
] | [
"reid/models/inception.py"
] | [
"from __future__ import absolute_import\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\nfrom torch.nn import init\r\n\r\n\r\n__all__ = ['InceptionNet', 'inception']\r\n\r\n\r\ndef _make_conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1,\r\n bias=False):\r\n conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,\r\n stride=stride, padding=padding, bias=bias)\r\n bn = nn.BatchNorm2d(out_planes)\r\n relu = nn.ReLU(inplace=True)\r\n return nn.Sequential(conv, bn, relu)\r\n\r\n\r\nclass Block(nn.Module):\r\n def __init__(self, in_planes, out_planes, pool_method, stride):\r\n super(Block, self).__init__()\r\n self.branches = nn.ModuleList([\r\n nn.Sequential(\r\n _make_conv(in_planes, out_planes, kernel_size=1, padding=0),\r\n _make_conv(out_planes, out_planes, stride=stride)\r\n ),\r\n nn.Sequential(\r\n _make_conv(in_planes, out_planes, kernel_size=1, padding=0),\r\n _make_conv(out_planes, out_planes),\r\n _make_conv(out_planes, out_planes, stride=stride))\r\n ])\r\n\r\n if pool_method == 'Avg':\r\n assert stride == 1\r\n self.branches.append(\r\n _make_conv(in_planes, out_planes, kernel_size=1, padding=0))\r\n self.branches.append(nn.Sequential(\r\n nn.AvgPool2d(kernel_size=3, stride=1, padding=1),\r\n _make_conv(in_planes, out_planes, kernel_size=1, padding=0)))\r\n else:\r\n self.branches.append(\r\n nn.MaxPool2d(kernel_size=3, stride=stride, padding=1))\r\n\r\n def forward(self, x):\r\n return torch.cat([b(x) for b in self.branches], 1)\r\n\r\n\r\nclass InceptionNet(nn.Module):\r\n def __init__(self, cut_at_pooling=False, num_features=256, norm=False,\r\n dropout=0, num_classes=0):\r\n super(InceptionNet, self).__init__()\r\n self.cut_at_pooling = cut_at_pooling\r\n\r\n self.conv1 = _make_conv(3, 32)\r\n self.conv2 = _make_conv(32, 32)\r\n self.conv3 = _make_conv(32, 32)\r\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)\r\n self.in_planes = 32\r\n self.inception4a = self._make_inception(64, 'Avg', 1)\r\n self.inception4b = self._make_inception(64, 'Max', 2)\r\n self.inception5a = self._make_inception(128, 'Avg', 1)\r\n self.inception5b = self._make_inception(128, 'Max', 2)\r\n self.inception6a = self._make_inception(256, 'Avg', 1)\r\n self.inception6b = self._make_inception(256, 'Max', 2)\r\n\r\n if not self.cut_at_pooling:\r\n self.num_features = num_features\r\n self.norm = norm\r\n self.dropout = dropout\r\n self.has_embedding = num_features > 0\r\n self.num_classes = num_classes\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n\r\n if self.has_embedding:\r\n self.feat = nn.Linear(self.in_planes, self.num_features)\r\n self.feat_bn = nn.BatchNorm1d(self.num_features)\r\n else:\r\n # Change the num_features to CNN output channels\r\n self.num_features = self.in_planes\r\n if self.dropout > 0:\r\n self.drop = nn.Dropout(self.dropout)\r\n if self.num_classes > 0:\r\n self.classifier = nn.Linear(self.num_features, self.num_classes)\r\n\r\n self.reset_params()\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv2(x)\r\n x = self.conv3(x)\r\n x = self.pool3(x)\r\n x = self.inception4a(x)\r\n x = self.inception4b(x)\r\n x = self.inception5a(x)\r\n x = self.inception5b(x)\r\n x = self.inception6a(x)\r\n x = self.inception6b(x)\r\n\r\n if self.cut_at_pooling:\r\n return x\r\n\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n\r\n if self.has_embedding:\r\n x = self.feat(x)\r\n x = self.feat_bn(x)\r\n if self.norm:\r\n x = F.normalize(x)\r\n elif self.has_embedding:\r\n x = F.relu(x)\r\n if self.dropout > 0:\r\n x = self.drop(x)\r\n if self.num_classes > 0:\r\n x = self.classifier(x)\r\n return x\r\n\r\n def _make_inception(self, out_planes, pool_method, stride):\r\n block = Block(self.in_planes, out_planes, pool_method, stride)\r\n self.in_planes = (out_planes * 4 if pool_method == 'Avg' else\r\n out_planes * 2 + self.in_planes)\r\n return block\r\n\r\n def reset_params(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n init.kaiming_normal(m.weight, mode='fan_out')\r\n if m.bias is not None:\r\n init.constant(m.bias, 0)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n init.constant(m.weight, 1)\r\n init.constant(m.bias, 0)\r\n elif isinstance(m, nn.Linear):\r\n init.normal(m.weight, std=0.001)\r\n if m.bias is not None:\r\n init.constant(m.bias, 0)\r\n\r\n\r\ndef inception(**kwargs):\r\n return InceptionNet(**kwargs)\r\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.init.constant",
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.normalize",
"torch.nn.init.normal",
"torch.nn.functional.relu",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.kaiming_normal",
"torch.nn.ReLU",
"torch.nn.Dropout"
]
] |
aesim-tech/simba-python-examples | [
"704591ea48155e83bb79b40ad66de197be4c580b"
] | [
"1. Run Simulation/1. Run Simulation.py"
] | [
"#%% Load modules\r\nfrom aesim.simba import DesignExamples\r\nimport matplotlib.pyplot as plt\r\n\r\n#%% Load project\r\nflybackConverter = DesignExamples.DCDC_Flyback()\r\n\r\n#%% Get the job object and solve the system\r\njob = flybackConverter.TransientAnalysis.NewJob()\r\nstatus = job.Run()\r\n\r\n#%% Get results\r\nt = job.TimePoints\r\nVout = job.GetSignalByName('R2 - Instantaneous Voltage').DataPoints\r\n\r\n#%% Plot Curve\r\nfig, ax = plt.subplots()\r\nax.set_title(flybackConverter.Name)\r\nax.set_ylabel('Vout (V)')\r\nax.set_xlabel('time (s)')\r\nax.plot(t,Vout)\r\n\r\n# %%\r\n"
] | [
[
"matplotlib.pyplot.subplots"
]
] |
emrejilta/web-scraper | [
"a2b60fe125a317c5ab444bbefc3a5da83dc40f4c"
] | [
"scraper.py"
] | [
"#!/usr/bin/env python3\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\nurl = \"https://redis.io/commands\"\n\n# Gets the entire html\ncontent = requests.get(url).text\n\n# Parse the html content\nsoup = BeautifulSoup(content, \"lxml\")\n\n# D\nsection = soup.find_all(\"section\", attrs={\"id\": \"commands\"})\ncontainer = section[0].find_all(\"div\", attrs={\"class\": \"container\"})\n\n# Due to 2 div's named \"container\", the second one is chosen\ncommands = container[1].find_all(\"li\")\n\n# The headings of the table\nheadings = [\"Command\", \"Arguments\", \"Summary\"]\n\n# The matrix to be shown as table\nall_rows = []\n\nfor i in range(len(commands)):\n row = []\n\n # Commands\n command = commands[i].find_all(\"span\", attrs={\"class\": \"command\"})\n row.append(command[0].contents[0].strip())\n\n # Parameters\n args = commands[i].findAll(\"span\", attrs={\"class\": \"args\"})\n row.append(args[0].text.strip())\n\n # Description\n summary = commands[i].findAll(\"span\", attrs={\"class\": \"summary\"})\n row.append(summary[0].text)\n\n all_rows.append(row)\n\n\n\n\ndf = pd.DataFrame(data=all_rows, columns=headings)\ndf.to_csv(\"Redis-Commands.csv\")\n"
] | [
[
"pandas.DataFrame"
]
] |
zhaominyiz/RFDA-PyTorch | [
"4e172d49d96fbf3626c15a8b85103450ec9b1312"
] | [
"test_yuv_RF.py"
] | [
"# @ author: Minyi Zhao\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\nimport time\nimport yaml\nimport argparse\nimport torch\nimport os.path as op\nimport numpy as np\nfrom collections import OrderedDict\nfrom tqdm import tqdm\n\nimport utils\nimport dataset\nfrom net_rfda import RFDA\nfrom skimage.metrics import peak_signal_noise_ratio, structural_similarity\n\nneedSSIM = True\ndef receive_arg():\n \"\"\"Process all hyper-parameters and experiment settings.\n \n Record in opts_dict.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--opt_path', type=str, default='option.yml', \n help='Path to option YAML file.'\n )\n args = parser.parse_args()\n \n with open(args.opt_path, 'r') as fp:\n opts_dict = yaml.load(fp, Loader=yaml.FullLoader)\n\n opts_dict['opt_path'] = args.opt_path\n\n if opts_dict['train']['exp_name'] == None:\n opts_dict['train']['exp_name'] = utils.get_timestr()\n\n opts_dict['train']['log_path'] = op.join(\n \"exp\", opts_dict['train']['exp_name'], \"log_test.log\"\n )\n opts_dict['train']['checkpoint_save_path_pre'] = op.join(\n \"exp\", opts_dict['train']['exp_name'], \"ckp_\"\n )\n opts_dict['test']['restore_iter'] = int(\n opts_dict['test']['restore_iter']\n )\n opts_dict['test']['checkpoint_save_path'] = (\n f\"{opts_dict['train']['checkpoint_save_path_pre']}\"\n f\"{opts_dict['test']['restore_iter']}\"\n '.pt'\n )\n\n return opts_dict\n\n\ndef main():\n # ==========\n # parameters\n # ==========\n\n opts_dict = receive_arg()\n unit = opts_dict['test']['criterion']['unit']\n PSNRS = []\n SSIMS = []\n # ==========\n # open logger\n # ==========\n\n log_fp = open(opts_dict['train']['log_path'], 'w')\n msg = (\n f\"{'<' * 10} Test {'>' * 10}\\n\"\n f\"Timestamp: [{utils.get_timestr()}]\\n\"\n f\"\\n{'<' * 10} Options {'>' * 10}\\n\"\n f\"{utils.dict2str(opts_dict['test'])}\"\n )\n print(msg)\n log_fp.write(msg + '\\n')\n log_fp.flush()\n radius_real = 3\n # ========== \n # Ensure reproducibility or Speed up\n # ==========\n\n #torch.backends.cudnn.benchmark = False # if reproduce\n #torch.backends.cudnn.deterministic = True # if reproduce\n torch.backends.cudnn.benchmark = True # speed up\n\n # ==========\n # create test data prefetchers\n # ==========\n \n # create datasets\n test_ds_type = opts_dict['dataset']['test']['type']\n radius = opts_dict['network']['radius']\n assert test_ds_type in dataset.__all__, \\\n \"Not implemented!\"\n test_ds_cls = getattr(dataset, test_ds_type)\n test_ds = test_ds_cls(\n opts_dict=opts_dict['dataset']['test'], \n radius=radius\n )\n\n test_num = len(test_ds)\n test_vid_num = test_ds.get_vid_num()\n\n # create datasamplers\n test_sampler = None # no need to sample test data\n\n # create dataloaders\n test_loader = utils.create_dataloader(\n dataset=test_ds, \n opts_dict=opts_dict, \n sampler=test_sampler, \n phase='val'\n )\n assert test_loader is not None\n\n # create dataloader prefetchers\n test_prefetcher = utils.CPUPrefetcher(test_loader)\n\n # ==========\n # create & load model\n # ==========\n\n model = RTVQE(opts_dict=opts_dict['network'])\n\n checkpoint_save_path = opts_dict['test']['checkpoint_save_path']\n msg = f'loading model {checkpoint_save_path}...'\n print(msg)\n log_fp.write(msg + '\\n')\n checkpoint_save_path = '/remote-home/myzhao/MM_CKPS/Final_QP37.pt'\n checkpoint = torch.load(checkpoint_save_path)\n if 'module.' in list(checkpoint['state_dict'].keys())[0]: # multi-gpu training\n new_state_dict = OrderedDict()\n for k, v in checkpoint['state_dict'].items():\n name = k[7:] # remove module\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n else: # single-gpu training\n model.load_state_dict(checkpoint['state_dict'])\n \n msg = f'> model {checkpoint_save_path} loaded.'\n print(msg)\n log_fp.write(msg + '\\n')\n\n model = model.cuda()\n model.eval()\n\n # ==========\n # define criterion\n # ==========\n\n # define criterion\n assert opts_dict['test']['criterion'].pop('type') == \\\n 'PSNR', \"Not implemented.\"\n criterion = utils.PSNR()\n # ==========\n # validation\n # ==========\n \n # create timer\n total_timer = utils.Timer()\n\n # create counters\n per_aver_dict = dict()\n ori_aver_dict = dict()\n name_vid_dict = dict()\n if needSSIM:\n per_aver_dict_ssim = dict()\n ori_aver_dict_ssim = dict()\n for index_vid in range(test_vid_num):\n per_aver_dict[index_vid] = utils.Counter()\n ori_aver_dict[index_vid] = utils.Counter()\n if needSSIM:\n per_aver_dict_ssim[index_vid] = utils.Counter()\n ori_aver_dict_ssim[index_vid] = utils.Counter()\n name_vid_dict[index_vid] = \"\"\n\n pbar = tqdm(\n total=7980, \n ncols=opts_dict['test']['pbar_len']\n )\n\n # fetch the first batch\n test_prefetcher.reset()\n val_data = test_prefetcher.next()\n\n with torch.no_grad():\n while val_data is not None:\n # get data\n gt_data = val_data['gt'] # (B [RGB] H W)\n lq_data = val_data['lq'] # (B T [RGB] H W)\n index_vid = val_data['index_vid'].item()\n name_vid = val_data['name_vid'][0] # bs must be 1!\n \n b, t, c, _, _ = lq_data.shape\n\n for i in range(t):\n neighbor_list = list(range(i - radius_real, i + radius_real + 1))\n neighbor_list = list(np.clip(neighbor_list, 0, t - 1))\n frm_list = []\n # print(neighbor_list,'vs',lq_data.size())\n for _i in neighbor_list:\n # print(\"i=\",i)\n # print(lq_data[:,int(i),:...].size())\n frm_list.append(lq_data[:,int(_i),...])\n frm_list = torch.cat(frm_list,1).cuda()\n if i==0:\n # print(\"first size\",first.size())\n enhanced_data,hint = model(frm_list) # (B [RGB] H W)\n else:\n enhanced_data,hint = model(frm_list,hint)\n\n # eval\n batch_ori = criterion(lq_data[0, i, ...].cuda(), gt_data[0,i,...].cuda())\n batch_perf = criterion(enhanced_data[0], gt_data[0,i,...].cuda())\n enhanced_data = enhanced_data[0].cpu().squeeze().numpy()\n lq_data2 = lq_data[0, i, ...].cpu().squeeze().numpy()\n gt_data2 = gt_data[0,i].cpu().squeeze().numpy()\n # print(lq_data.shape,\"vs\",gt_data.shape)\n # batch_ori = peak_signal_noise_ratio(lq_data, gt_data, data_range=1.0)\n # batch_perf = peak_signal_noise_ratio(enhanced_data, gt_data, data_range=1.0)\n PSNRS.append(batch_perf-batch_ori)\n if needSSIM:\n batch_ori_ssim = structural_similarity(lq_data2, gt_data2, data_range=1.0)\n batch_perf_ssim = structural_similarity(enhanced_data, gt_data2, data_range=1.0)\n SSIMS.append(batch_perf_ssim-batch_ori_ssim)\n print(\"Ave PSNR:\",sum(PSNRS)/len(PSNRS))\n if needSSIM:\n print(\"Ave SSIM:\",sum(SSIMS)/len(SSIMS)*100.0)\n # Calculate SSIM\n # display\n pbar.set_description(\n \"{:s}: [{:.3f}] {:s} -> [{:.3f}] {:s}\"\n .format(name_vid, batch_ori, unit, batch_perf, unit)\n )\n pbar.update()\n\n # log\n per_aver_dict[index_vid].accum(volume=batch_perf)\n ori_aver_dict[index_vid].accum(volume=batch_ori)\n if needSSIM:\n per_aver_dict_ssim[index_vid].accum(volume=batch_perf_ssim)\n ori_aver_dict_ssim[index_vid].accum(volume=batch_ori_ssim)\n if name_vid_dict[index_vid] == \"\":\n name_vid_dict[index_vid] = name_vid\n else:\n assert name_vid_dict[index_vid] == name_vid, \"Something wrong.\"\n\n # fetch next batch\n val_data = test_prefetcher.next()\n \n # end of val\n pbar.close()\n\n # log\n msg = '\\n' + '<' * 10 + ' Results ' + '>' * 10\n print(msg)\n log_fp.write(msg + '\\n')\n for index_vid in range(test_vid_num):\n per = per_aver_dict[index_vid].get_ave()\n ori = ori_aver_dict[index_vid].get_ave()\n name_vid = name_vid_dict[index_vid]\n msg = \"PSNR: {:s}: [{:.3f}] {:s} -> [{:.3f}] {:s} Delta:[{:.3f}]\".format(\n name_vid, ori, unit, per, unit,(per-ori)\n )\n print(msg)\n log_fp.write(msg + '\\n')\n if needSSIM:\n per_ssim = per_aver_dict_ssim[index_vid].get_ave()\n ori_ssim = ori_aver_dict_ssim[index_vid].get_ave()\n msg = \"SSIM: {:s}: [{:.3f}] {:s} -> [{:.3f}] {:s} Delta:[{:.3f}] \".format(\n name_vid, ori_ssim*100.0, unit, per_ssim*100.0, unit,(per_ssim-ori_ssim)*100.0\n )\n print(msg)\n log_fp.write(msg + '\\n')\n ave_per = np.mean([\n per_aver_dict[index_vid].get_ave() for index_vid in range(test_vid_num)\n ])\n ave_ori = np.mean([\n ori_aver_dict[index_vid].get_ave() for index_vid in range(test_vid_num)\n ])\n if needSSIM:\n ave_per_ssim = np.mean([\n per_aver_dict_ssim[index_vid].get_ave() for index_vid in range(test_vid_num)\n ])\n ave_ori_ssim = np.mean([\n ori_aver_dict_ssim[index_vid].get_ave() for index_vid in range(test_vid_num)\n ])\n msg = (\n f\"{'> ori: [{:.3f}] {:s}'.format(ave_ori, unit)}\\n\"\n f\"{'> ave: [{:.3f}] {:s}'.format(ave_per, unit)}\\n\"\n f\"{'> delta: [{:.3f}] {:s}'.format(ave_per - ave_ori, unit)}\"\n )\n print(msg)\n log_fp.write(msg + '\\n'+'SSIM:\\n')\n log_fp.flush()\n if needSSIM:\n msg = (\n f\"{'> ori: [{:.3f}] {:s}'.format(ave_ori_ssim*100.0, unit)}\\n\"\n f\"{'> ave: [{:.3f}] {:s}'.format(ave_per_ssim*100.0, unit)}\\n\"\n f\"{'> delta: [{:.3f}] {:s}'.format(ave_per_ssim*100.0 - ave_ori_ssim*100.0, unit)}\"\n )\n print(msg)\n log_fp.write(msg + '\\n')\n log_fp.flush()\n\n # ==========\n # final log & close logger\n # ==========\n\n total_time = total_timer.get_interval() / 3600\n msg = \"TOTAL TIME: [{:.1f}] h\".format(total_time)\n print(msg)\n log_fp.write(msg + '\\n')\n \n msg = (\n f\"\\n{'<' * 10} Goodbye {'>' * 10}\\n\"\n f\"Timestamp: [{utils.get_timestr()}]\"\n )\n print(msg)\n log_fp.write(msg + '\\n')\n PSNRS = np.array(PSNRS)\n # print(\"STD\",std(PSNRS),\"PVD\",pvd(PSNRS))\n # log_fp.write(\"STD: \"+str(std(PSNRS))+\"PVD:\"+str(pvd(PSNRS)))\n log_fp.close()\n \n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.load",
"torch.no_grad",
"numpy.clip",
"numpy.array",
"torch.cat"
]
] |
zabaras/MH-MDGM | [
"6b3d0e0b8662da9d5ef62813113b444c18b70b49"
] | [
"Inversion/Channel/mdgm_mcmc.py"
] | [
"import numpy as np \nfrom forward import Prediction\nimport matplotlib.pyplot as plt\nimport os\nimport scipy\nfrom matplotlib import ticker,cm\nfrom latent import latent_c\n\nclass inference(object):\n def __init__(self,step,burn_in,dim,obs,sigma,true_permeability,true_pressure,obs_position,scale,device):\n self.dim = dim\n self.burn_in = burn_in\n self.step = step\n self.obs = obs\n self.sigma = sigma\n self.true_permeability = true_permeability\n self.true_pressure = true_pressure\n self.pos = obs_position\n self.plot_interval = 1000\n self.num_obs = 64\n self.prediction = Prediction(scale,device)\n\n def pCN(self,old_params,beta,dim):\n '''\n Here the prior distribution is standart Normal\n '''\n new_params = np.sqrt(1-beta*beta)*old_params+beta*np.random.randn(dim,)\n return new_params\n\n def fx(self,model,exp,zc,zf): \n '''\n compute the permeability field X using MDGM, and compute f(X) using forward model\n ''' \n input = self.prediction.permeability(exp,model,zc,zf=zf)\n like , pre = self.prediction.forward(input)\n f_x = np.reshape(like,(1,-1))\n return f_x,input,pre \n\n def Comp_log_likelihood(self,obs,sigma,exp,model,zc,zf=None):\n '''\n compute likelihood function for inference\n '''\n f_x,input,pre = self.fx(model,exp,zc,zf)\n e=obs-f_x\n log_likelihood = -0.5 * np.sum( np.power(e/sigma,2.0))\n return f_x,input,pre, log_likelihood\n \n def pCN_MH(self,init_c,init_state_f,beta,exp,noise,output_dir):\n '''\n pCN MH refer to 'Variational data assimilation using targetted random walks'\n and we have '-' in the log-likelihood\n MH algorithm with MDGM, two proposal distributions for latent zc and zf respectively,\n where zc can capture global feature, zf is latent variable for parameter local adaption.\n proposal for zc with small step size(0.01)\n proposal for zf with big step size(first 50% state using 0.08, rest with 0.04)\n '''\n coarse_beta = 0.01\n model = self.prediction.model(exp)\n old_params_c = init_c\n dim_c=old_params_c.shape[0]\n accept_num = 0\n dim = dim_c+self.dim\n samples = np.zeros([dim,self.step])\n fx_obs = np.zeros([self.num_obs,self.step])\n log_likelihood = np.zeros([1,self.step])\n old_params_f = init_state_f\n old_fx,old_input,old_pre,old_log_l= self.Comp_log_likelihood(self.obs,self.sigma,exp,model,old_params_c,zf = old_params_f)\n old_params = np.concatenate((old_params_c,old_params_f),axis=0)\n samples[:,0] = old_params.reshape([dim,])\n fx_obs[:,0] = old_fx\n log_likelihood[:,0] = old_log_l\n self.plot_para_field(0,old_input,old_pre,self.true_permeability, self.true_pressure,beta,'pCN',exp,noise,output_dir)\n for i in range(1,self.step):\n if i > 0.5*self.step:\n beta = 0.04\n new_params_c = self.pCN(old_params_c,coarse_beta,dim_c)\n new_params_f = self.pCN(old_params_f,beta,self.dim)\n new_fx,new_input, new_pre, new_log_l= self.Comp_log_likelihood(self.obs,self.sigma,exp,model,new_params_c,zf = new_params_f) \n new_params = np.concatenate((new_params_c,new_params_f),axis=0)\n ratio2 = np.exp(new_log_l - old_log_l)\n alpha = min(1,ratio2)\n np.random.seed(i)\n z = np.random.rand()\n if z<= alpha:\n print('-----------------------------------------------------------------')\n log_likelihood[:,i] = new_log_l\n fx_obs[:,i] = new_fx\n old_fx= new_fx\n samples[:,i] = new_params\n old_input = new_input\n old_pre = new_pre\n old_params = new_params\n old_params_c = new_params_c \n old_params_f = new_params_f\n old_log_l = new_log_l\n accept_num = accept_num +1\n elif z>alpha:\n samples[:,i] = old_params\n fx_obs[:,i] = old_fx\n log_likelihood[:,i] = old_log_l\n if (i+1)%self.plot_interval == 0:\n self.plot_para_field(i,old_input,old_pre,self.true_permeability, self.true_pressure,beta,'pCN',exp,noise,output_dir)\n\n post_samples = samples[:,self.burn_in:]\n accept_ratio = (accept_num/self.step)\n print('accept_ratio:',accept_ratio)\n accept = [accept_ratio,accept_num]\n return samples, post_samples,fx_obs,accept,log_likelihood\n \n\n def plot_para_field(self,i,input,pre_pressure, true_permeability, true_pressure,beta,type,exp,noise,output_dir):\n '''\n Plot Markov chain state(log permeability)\n '''\n samples = [np.log(true_permeability),input.data.cpu().numpy(), true_pressure.reshape(64,64),pre_pressure]\n fig, _ = plt.subplots(2,2, figsize=(6,6))\n vmin1 = [np.amin(samples[0]), np.amin(samples[0]),np.amin(samples[2]), np.amin(samples[2])]\n vmax1 = [np.amax(samples[0]), np.amax(samples[0]),np.amax(samples[2]), np.amax(samples[2])] \n for j, ax in enumerate(fig.axes):\n ax.set_aspect('equal')\n ax.set_axis_off()\n cax = ax.imshow(samples[j], cmap='jet',vmin=vmin1[j],vmax=vmax1[j])\n cbar = plt.colorbar(cax, ax=ax, fraction=0.046, pad=0.04,\n format=ticker.ScalarFormatter(useMathText=True))\n plt.savefig(output_dir+f'/{type}_step_size{beta}_state_{i+1}.pdf')\n plt.close()"
] | [
[
"numpy.sqrt",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"numpy.random.seed",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"numpy.exp",
"matplotlib.ticker.ScalarFormatter",
"numpy.amin",
"numpy.log",
"numpy.power",
"numpy.random.rand",
"matplotlib.pyplot.close",
"numpy.amax",
"numpy.concatenate"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.