repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
gbravoi/monte-carlo-tree-search | [
"578df8df925e5f569e7354daff6642e1781389b6"
] | [
"checkers/utils.py"
] | [
"\"\"\"\nMartin Kersner, [email protected]\nseoulai.com\n2018\n\nAdapted by Gabriela B. to work with python 2.7 and ROS\n\"\"\"\nimport random\n\n\nimport numpy as np\n\nfrom base import Constants\nfrom rules import Rules\n\n\nclass BoardEncoding(object):\n def __init__(self):\n self._constants = Constants()\n self._encoding = {}\n\n self.empty = 0\n self.dark = 20\n self.dark_king = 21\n self.light = 10\n self.light_king = 11\n\n def __getitem__(self, name):\n return self._encoding[name]\n\n @property\n def empty(self):\n return self._encoding[self._constants.EMPTY]\n\n @empty.setter\n def empty(self, value):\n self._encoding[self._constants.EMPTY] = value\n\n @property\n def dark(self):\n return self._encoding[self._constants.DARK]\n\n @dark.setter\n def dark(self, value):\n self._encoding[self._constants.DARK] = value\n\n @property\n def dark_king(self):\n return self._encoding[self._constants.DARK_KING]\n\n @dark_king.setter\n def dark_king(self, value):\n self._encoding[self._constants.DARK_KING] = value\n\n @property\n def light(self):\n return self._encoding[self._constants.LIGHT]\n\n @light.setter\n def light(self, value):\n self._encoding[self._constants.LIGHT] = value\n\n @property\n def light_king(self):\n return self._encoding[self._constants.LIGHT_KING]\n\n @light_king.setter\n def light_king(self, value):\n self._encoding[self._constants.LIGHT_KING] = value\n\n\ndef board_list2numpy(\n board_list,\n encoding) :\n \"\"\"Convert the state of game (`board_list`) into 2D NumPy Array using `encoding`.\n\n Args:\n board_list: (List[List[Piece]]) State of the game.\n encoding: (BoardEncoding) Optional argument. If not given default encoding will be utilized.\n\n Returns:\n board_numpy: (np.array)\n \"\"\"\n board_size = len(board_list)\n constants = Constants()\n board_numpy = encoding[constants.EMPTY] * np.ones((board_size, board_size))\n\n for row in range(board_size):\n for col in range(board_size):\n if board_list[row][col] is not None:\n ptype = board_list[row][col].ptype\n king = board_list[row][col].king\n\n if ptype == constants.LIGHT:\n if king:\n piece_type = constants.LIGHT_KING\n else:\n piece_type = constants.LIGHT\n else: # DARK\n if king:\n piece_type = constants.DARK_KING\n else:\n piece_type = constants.DARK\n\n board_numpy[row][col] = encoding[piece_type]\n\n return board_numpy\n\n\ndef generate_random_move(\n board,\n ptype,\n board_size):\n \"\"\"Generate random move from all `ptype` valid moves but does not execute it.\n\n Args:\n board: (List[List[Piece]]) State of the game.\n ptype: (int) type of piece for which random move will be generated\n board_size: (int) size of board\n \"\"\"\n valid_moves = Rules.generate_valid_moves(board, ptype, board_size)\n rand_from_row, rand_from_col = random.choice(list(valid_moves.keys()))\n rand_to_row, rand_to_col = random.choice(valid_moves[(rand_from_row, rand_from_col)])\n return rand_from_row, rand_from_col, rand_to_row, rand_to_col\n\n\n#new functions\ndef print_board(board_list):\n\t\"\"\" \n\tprint board for debugging putposes\n receives board as a board_list: List[List],\n\t\"\"\"\n\tnumpy_board=board_list2numpy(board_list)\n\tprint(numpy_board)\n"
] | [
[
"numpy.ones"
]
] |
darrenluc93/web-scraping-challenge | [
"50a9a21161ab0920038c8e0d6a9390bb8e35c5f5"
] | [
"scrape_mars.py"
] | [
"#Import Libraries\n#Web Scraping tools \nfrom bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\n#from splinter import Browser\n\n#DataFrame tools\nimport pandas as pd\n\n#Misc tools for web scraping\nimport time\nimport requests\n\n#Function to initianilze browser.\ndef init_browser():\n\n #Settings for headless mode.\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n\n #path to the driver and load the options.\n browser = webdriver.Chrome(\"/usr/local/bin/chromedriver\",chrome_options = options)\n\n #returns the brower.\n return browser\n\ndef scrapper():\n\n #Call browser function\n browser = init_browser()\n #Dictionary to store all the results.\n marsInfo_dict = {}\n\n #Code to get NASA Mars News ----------------------------------------------------------------------------------------------\n try:\n\n url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&year=2020%3Apublish_date&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n\n #splinter option - open url\n #browser.visit(url)\n\n #Open url.\n browser.get(url)\n\n #Time to let the website load all the elements\n time.sleep(4) \n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n #Collect the latest news title\n news_title = soup.find_all('li', class_=\"slide\")[0].find(class_=\"content_title\").text\n news_p = soup.find_all('li', class_=\"slide\")[0].text\n\n marsInfo_dict['news_title'] = news_title\n marsInfo_dict['news_p'] = news_p\n \n except :\n print(f\"Problem at website {url}\")\n\n #Code to get JPL Mars Space Images - Featured Image ---------------------------------------------------------------------------------\n try:\n\n url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n\n #splinter option - open url\n #browser.visit(url)\n\n #Opens the url.\n browser.get(url)\n\n #splinter option - FULL IMAGE BUTTON\n #browser.click_link_by_id(\"full_image\")\n\n #Interact with the FULL IMAGE BUTTON\n browser.find_element_by_id(\"full_image\").click()\n\n time.sleep(4)\n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n featured_image_url = \"https://www.jpl.nasa.gov/\" + soup.find_all('img', class_=\"fancybox-image\")[0]['src']\n\n marsInfo_dict['featured_image_url'] = featured_image_url\n \n except :\n print(f\"Problem at website {url}\")\n \n #Mars Weather ------------------------------------------------------------------------------------------------------------------------\n try:\n url = \"https://twitter.com/marswxreport?lang=en\"\n \n #splinter option - open url\n #browser.visit(url)\n\n #Open the url.\n browser.get(url)\n\n #Time to let the website load all the elements\n time.sleep(4)\n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n mars_weather = soup.find_all('article', class_=\"css-1dbjc4n r-1loqt21 r-18u37iz r-1ny4l3l r-o7ynqc r-6416eg\")[0].text.strip().replace('Mars Weather@MarsWxReport·19hInSight ','')\n\n marsInfo_dict['mars_weather'] = mars_weather\n \n except :\n print(mars_weather)\n print(f\"Problem at website {url}\")\n\n # Mars Facts--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n try:\n url = 'http://space-facts.com/mars/'\n\n #Load url to pandas read html.\n tables = pd.read_html(url)\n\n #Tables\n marsFacts_df = tables[0]\n earthMars_df = tables[1]\n\n #Rename columns\n marsFacts_df.columns = ['Facts', 'Values']\n\n\n #Outpout\n html_outputFacts = marsFacts_df.to_html(index = False)\n html_outputFacts = html_outputFacts.replace('\\n', '')\n\n html_outputMarsEarth = earthMars_df.to_html(index = False)\n html_outputMarsEarth = html_outputMarsEarth.replace('\\n', '')\n\n marsInfo_dict['html_outputFacts'] = html_outputFacts\n marsInfo_dict['html_outputMarsEarth'] = html_outputMarsEarth\n\n except :\n print(f\"Problem at website {url}\")\n\n #hemisphereImages ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n try:\n temp_list = []\n\n url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n\n #splinter option - open url\n #browser.visit(url)\n\n #Opens the url.\n browser.get(url)\n\n time.sleep(4)\n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n # close web browser\n browser.close()\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n links = soup.find_all('div', class_=\"description\")\n\n for link in links:\n\n highDef_url = f\"https://astrogeology.usgs.gov{link.find('a')['href']}\"\n\n responseHighDef = requests.get(highDef_url)\n\n soupHighDef = bs(responseHighDef.text, 'html.parser')\n\n highDef_url = soupHighDef.find_all(\"div\", class_=\"downloads\")[0].find('a')['href']\n\n title = link.find('h3').text \n\n temp_list.append({\"title\" : title, \"img_url\" : highDef_url})\n\n marsInfo_dict['hemisphere_image_urls'] = temp_list\n\n except :\n print(f\"Problem at website {url}\")\n\n return marsInfo_dict"
] | [
[
"pandas.read_html"
]
] |
Sethan/deeplearning-graphics | [
"ce164847a323d3f07cfe241f4bbed6029777c58d"
] | [
"ssd/modeling/backbone/basic.py"
] | [
"import torch\n\n\nclass BasicModel(torch.nn.Module):\n \"\"\"\n This is a basic backbone for SSD.\n The feature extractor outputs a list of 6 feature maps, with the sizes:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n where \"output_channels\" is the same as cfg.BACKBONE.OUT_CHANNELS\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n image_size = cfg.INPUT.IMAGE_SIZE\n output_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS\n self.output_channels = output_channels\n image_channels = cfg.MODEL.BACKBONE.INPUT_CHANNELS\n self.output_feature_size = cfg.MODEL.PRIORS.FEATURE_MAPS\n self.num_filters = [32,64]\n \n \n self.feature_extractor38 = torch.nn.Sequential(\n #part 1 38x38\n torch.nn.Conv2d(\n in_channels=image_channels,\n out_channels=self.num_filters[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[0]),\n torch.nn.MaxPool2d(2, stride=2),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.05),\n torch.nn.Conv2d(\n in_channels=self.num_filters[0],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n \n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.06),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.07),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.08),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.09),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.01),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.MaxPool2d(2, stride=2),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.11),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.12),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.13),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.14),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=2,\n padding=1\n )\n )\n \n self.feature_extractor19 = torch.nn.Sequential(\n \n #part 2 19x19\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.15),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.16),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.17),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.18),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.19),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.2),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.21),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.22),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor9 = torch.nn.Sequential(\n \n #part 3 10x10\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.23),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.24),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.25),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.26),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.27),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.28),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.29),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.30),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor5 = torch.nn.Sequential(\n #part 4 5x5\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.31),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.32),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.33),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.34),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.35),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.36),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.37),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.38),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor3 = torch.nn.Sequential(\n \n #part 5 3x3\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.39),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.40),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.41),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.42),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.43),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.44),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.45),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.46),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor1 = torch.nn.Sequential(\n \n #part 6 1x1\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.48),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.49),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.50),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.51),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.52),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.53),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.54),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.55),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[5],\n kernel_size=3,\n stride=1,\n padding=0\n ))\n def forward(self, x):\n \"\"\"\n The forward functiom should output features with shape:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n We have added assertion tests to check this, iteration through out_features,\n where out_features[0] should have the shape:\n shape(-1, output_channels[0], 38, 38),\n \"\"\"\n \n out_features = []\n out = self.feature_extractor38(x)\n out_features.append(out)\n out = self.feature_extractor19(out)\n out_features.append(out)\n out = self.feature_extractor9(out)\n out_features.append(out)\n out = self.feature_extractor5(out)\n out_features.append(out)\n out = self.feature_extractor3(out)\n out_features.append(out)\n out = self.feature_extractor1(out)\n out_features.append(out)\n feature_list = [38,19,10,5,3,1]\n for idx, feature in enumerate(out_features):\n expected_shape = (self.output_channels[idx], feature_list[idx], feature_list[idx])\n assert feature.shape[1:] == expected_shape, \\\n f\"Expected shape: {expected_shape}, got: {feature.shape[1:]} at output IDX: {idx}\"\n return tuple(out_features)\n\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Dropout2d",
"torch.nn.Conv2d",
"torch.nn.ELU"
]
] |
lego0901/pytea | [
"8ede650def2e68f4610ba816451d8b9e28f09f76"
] | [
"packages/pytea/pytest/unit_tests/passes/pass_argmax_dim01.py"
] | [
"'''\npass_argmax_dim01.py\nCopyright (c) Seoul National University\nLicensed under the MIT license.\nAuthor: Woo Sung Song\n\ntorch.Tensor.argmax with dim parameter.\n! This is not available since maximum stack size exceeding error has been occured\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\na = torch.rand(2, 3)\n#m = a.argmax(dim=1)\n\n# shape assertion\n#m + torch.rand(2, 4, 5)"
] | [
[
"torch.rand"
]
] |
jgharris7/DocClass | [
"9ef62e655272cca8374187040eb3dd73f3f82b72"
] | [
"model/app/LearnTfidfCNB.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 22:43:22 2021\r\n\r\n@author: jgharris\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 21:09:34 2021\r\n\r\n@author: jgharris\r\n\"\"\"\r\n\r\nroot='C:/Users/jgharris/DocClass/'\r\n\r\ndataFile='/data/shuffled-full-set-hashed.csv'\r\n\r\n\r\n\r\nimport statistics as stat\r\nimport pandas as pd\r\n \r\nfrom sklearn.model_selection import train_test_split\r\n \r\nfrom sklearn.metrics import accuracy_score\r\n \r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pickle\r\nfrom DocClfTfidfCNB import DocClfTfidfCNB\r\nfrom Documents import Documents\r\n\r\n\r\n#dataFile='/test/testshort.csv'\r\n\r\nmodelName=\"nbtfidfv0\"\r\nmaxlines=80000000\r\ntestsize=.3\r\nrandom_state=45\r\nMAXSTRINGLENGH=4000\r\nFIRSTSTRINGLENGTH=80\r\nconf_mat=[]\r\ndef main(): \r\n # Set up corpus for training \r\n corpus=Documents()\r\n corpus.readFromFile(root+dataFile,maxline=maxlines)\r\n ''' \r\n model1=DocClfComplNB(maxStringLength=MAXSTRINGLENGH, \\\r\n firstStringLength=FIRSTSTRINGLENGTH)\r\n '''\r\n model1=DocClfTfidfCNB(maxStringLength=MAXSTRINGLENGH, \\\r\n firstStringLength=FIRSTSTRINGLENGTH)\r\n print()\r\n # split into test and training sets\r\n xtrain,xtest,ytrain,ytest=\\\r\n train_test_split(corpus.words,corpus.y,test_size=testsize, \\\r\n random_state=random_state)\r\n ytrainpred=model1.fit(xtrain,ytrain)\r\n ytestpred=model1.predict(xtest)\r\n\r\n trainAccuracy=accuracy_score(ytrain,ytrainpred)\r\n testAccuracy=accuracy_score(ytest,ytestpred)\r\n controlAccuracy=accuracy_score(np.random.permutation(ytest),ytestpred)\r\n \r\n \r\n global conf_mat\r\n conf_mat =model1.confidence(ytest, ytestpred)\r\n print(model1.confidence)\r\n print()\r\n print( np.unique(ytestpred,return_counts=True))\r\n print()\r\n \r\n [print(\"%-20s\" % key +\" %5.3f\" % value) for key,value in model1.confidence.items()]\r\n for row in range(0,conf_mat.shape[0]):\r\n print( [\" %4d\" % conf_mat[row,col] for col in range(0,conf_mat.shape[1])])\r\n \r\n rowsum=conf_mat.sum(axis=0)\r\n colsum=conf_mat.sum(axis=1)\r\n labels=[]\r\n [labels.append(key) for key in model1.confidence.keys()]\r\n print(\"item rowsum colsum\")\r\n for ic in range(0,conf_mat.shape[0]):\r\n print(\"%-25s\" % labels[ic] + \" %5d\" % rowsum[ic]+ \" %5d\" % colsum[ic])\r\n \r\n print(\"\")\r\n print('train=%6.2f test=%6.2f control=%6.2f' % \r\n (trainAccuracy,testAccuracy,controlAccuracy))\r\n # compute accuracy given predicted value\r\n \r\n \r\n pickle.dump(model1,open(root+modelName+\".pckmdl\",\"wb\"))\r\n \r\n print(ytestpred[0])\r\n print(xtest[0][0:20])\r\n testfile=open(root+modelName+\"testdata.txt\",\"wt\")\r\n \r\n testfile.write(ytestpred[0])\r\n testfile.write(\"\\n\")\r\n testfile.write(xtest[0])\r\n testfile.write(\"\\n\")\r\n testfile.write(ytestpred[10])\r\n testfile.write(\"\\n\")\r\n testfile.write(xtest[10])\r\n testfile.write(\"\\n\")\r\n testfile.close()\r\n print( model1.message)\r\n \r\n \r\n \r\nif __name__=='__main__':\r\n main()\r\n "
] | [
[
"numpy.random.permutation",
"sklearn.metrics.accuracy_score",
"numpy.unique",
"sklearn.model_selection.train_test_split"
]
] |
dampierch/herv | [
"9f1ce0e676977b6c8d25fdf446c0807826b80bea"
] | [
"scripts/gdc_req_legacy.py"
] | [
"'''\nthis script queries the gdc legacy archive via the search and retrieve api and\nreturns msi_status object (from files endpoint on legacy)\n-- get uuids of xml files with the msi annotations from legacy server\n-- download each xml file\n-- parse xml files to extract msi annotations for each subject\n\nscript should be called from within gdc_ann_make, which itself should be called\nas part of snakemake pipeline\n-- usage: snakemake setup_tcga\n'''\n\n\nimport io\nimport json\nimport os\nimport pandas as pd\nimport requests\nimport re\nimport subprocess\nimport glob\nimport xml.etree.ElementTree as ET\n\n\nmodname = 'gdc_req_legacy'\n\n\ndef set_filters():\n '''\n set filters for gdc legacy files endpoint search\n -- json format\n -- for files.data_type, values for MSI status are 'Auxiliary test' and\n 'Microsatellite instability'\n -- here use 'Auxiliary test' per TCGAbiolinks examples\n '''\n filters = {\n 'op':'and',\n 'content':[\n {'op':'or',\n 'content':[\n {'op':'in',\n 'content':{\n 'field':'cases.project.project_id',\n 'value':'TCGA-COAD'\n }\n },\n {'op':'in',\n 'content':{\n 'field':'cases.project.project_id',\n 'value':'TCGA-READ'\n }\n }\n ]\n },\n {'op':'and',\n 'content':[\n {'op':'in',\n 'content':{\n 'field':'files.data_category',\n 'value':'Other'\n }\n },\n {'op':'in',\n 'content':{\n 'field':'files.data_type',\n 'value':'Auxiliary test'\n }\n },\n {'op':'in',\n 'content':{\n 'field':'files.access',\n 'value':'open'\n }\n }\n ]\n }\n ]\n }\n filters = json.dumps(filters)\n return filters\n\n\ndef set_fields():\n '''\n set fields for extraction from endpoint\n '''\n fields = [\n 'file_name',\n 'file_id',\n 'md5sum',\n 'file_size',\n 'state'\n ]\n fields = ','.join(fields)\n return fields\n\n\ndef set_params(filters,fields):\n '''\n set parameters for https get request to endpoint\n -- set size parameter empirically to a level greater than number of target\n cases to get all records at once\n '''\n params = {\n 'filters': filters,\n 'fields': fields,\n 'format': 'TSV',\n 'size': '1500'\n }\n return params\n\n\ndef get_results(endpoint,params):\n '''\n given an endpoint and parameters, execute https GET request for xml file_id\n entities and build results dataframe with msi results\n '''\n response = requests.get(endpoint, params=params)\n object = io.StringIO(response.content.decode('utf-8'))\n results = pd.read_table(object)\n return results\n\n\ndef download_xml_uuid(files_res,dest):\n '''\n download xml files one at a time by uuid\n '''\n file_count = 0\n for uuid in files_res.id:\n cmd = ' '.join(['gdc-client download',uuid,'-d',dest])\n subprocess.call(cmd, shell=True)\n print(' '.join([uuid,'downloaded']))\n file_count = file_count + 1\n print(' '.join([str(file_count),'files downloaded']))\n\n\ndef download_xml_manifest(files_res,dest):\n '''\n -- create manifest object\n -- write manifest to file\n -- use manifest for bulk download\n '''\n select = ['file_id', 'file_name', 'md5sum', 'file_size', 'state']\n manifest = files_res[select]\n manifest.columns = ['id', 'filename', 'md5', 'size', 'state']\n manifest = manifest.sort_values(by=['id'])\n out_file = dest + 'manifest.tsv'\n manifest.to_csv(out_file, sep='\\t', index=False)\n cmd = ' '.join(['gdc-client download','-m',out_file,'-d',dest])\n subprocess.call(cmd, shell=True)\n print('manifest downloaded')\n\n\ndef parse_xml(files_res,dest):\n '''\n parse xml files to extract msi status\n '''\n msi_dict = {}\n msi_dict['subject_id'] = []\n msi_dict['msi_status'] = []\n tag1 = 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'\n tag2 = 'mononucleotide_marker_panel_analysis_status'\n file_count = 0\n for uuid in files_res.id:\n pattern = dest + uuid + '/*.xml'\n fn = glob.glob(pattern)[0]\n tree = ET.parse(fn)\n for elem in tree.getiterator():\n if 'bcr_patient_barcode' in elem.tag:\n subject_id = elem.text\n if tag1 in elem.tag and elem.text != None:\n msi_status = elem.text\n elif tag2 in elem.tag and elem.text != None:\n msi_status = elem.text\n msi_dict['subject_id'].append(subject_id)\n msi_dict['msi_status'].append(msi_status)\n file_count = file_count + 1\n print(' '.join([str(file_count),'files parsed']))\n msi_res = pd.DataFrame.from_dict(msi_dict)\n return msi_res\n\n\ndef check_outpath(out_path):\n '''\n check for presence of absence of out_path and make directory if absent\n '''\n l = out_path.strip('/').split('/')\n d = ''\n for e in l:\n d = d + '/' + e\n if os.path.exists(d):\n print(d,'present')\n else:\n print(d,'absent')\n print('making',d,'now')\n os.mkdir(d)\n\n\ndef main():\n endpoint = 'https://api.gdc.cancer.gov/legacy/files/'\n filters = set_filters()\n fields = set_fields()\n params = set_params(filters, fields)\n files_res = get_results(endpoint, params)\n dest = os.environ['ann_dir'] + 'tcga/msi/'\n check_outpath(dest)\n download_xml_manifest(files_res, dest)\n msi_res = parse_xml(files_res, dest)\n return msi_res\n\n\nif __name__ == '__main__':\n print('This script is not meant to be run as main. See usage statment:')\n print('usage: snakemake setup_tcga')\nelse:\n msi_res = main()\n"
] | [
[
"pandas.read_table",
"pandas.DataFrame.from_dict"
]
] |
spacegoing/t2t_caps | [
"ded708b738fa8966eb7544708c4a785479da4c3c"
] | [
"tensor2tensor/layers/discretization.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Discretization bottlenecks used to train discrete latent variables.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom functools import partial\n# Dependency imports\nfrom tensor2tensor.layers import common_layers\nimport tensorflow as tf\nfrom tensorflow.python.training import moving_averages\n\n\ndef project_hidden(x, projection_tensors, hidden_size, num_blocks):\n \"\"\"Project encoder hidden state into block_dim using projection tensors.\n\n Args:\n x: Encoder hidden state of shape [-1, hidden_size].\n projection_tensors: Projection tensors used to project the hidden state.\n hidden_size: Dimension of the latent space.\n num_blocks: Number of blocks in DVQ.\n\n Returns:\n Projected states of shape [-1, num_blocks, block_dim].\n \"\"\"\n x = tf.reshape(x, shape=[1, -1, hidden_size])\n x_tiled = tf.reshape(\n tf.tile(x, multiples=[num_blocks, 1, 1]),\n shape=[num_blocks, -1, hidden_size])\n x_projected = tf.matmul(x_tiled, projection_tensors)\n x_projected = tf.transpose(x_projected, perm=[1, 0, 2])\n return x_projected\n\n\ndef slice_hidden(x, hidden_size, num_blocks):\n \"\"\"Slice encoder hidden state into block_dim.\n\n Args:\n x: Encoder hidden state of shape [-1, hidden_size].\n hidden_size: Dimension of the latent space.\n num_blocks: Number of blocks in DVQ.\n\n Returns:\n Sliced states of shape [-1, num_blocks, block_dim].\n \"\"\"\n block_dim = int(hidden_size // num_blocks)\n x_sliced = tf.reshape(x, shape=[-1, num_blocks, block_dim])\n return x_sliced\n\n\ndef nearest_neighbor(x,\n means,\n block_v_size,\n random_top_k=1,\n soft_em=False,\n num_samples=1):\n \"\"\"Find the nearest element in means to elements in x.\n\n Args:\n x: Batch of encoder continuous latent states sliced/projected into shape\n [-1, num_blocks, block_dim].\n means: Embedding table of shpae [num_blocks, block_v_size, block_dim].\n block_v_size: Number of table entries per block.\n random_top_k: Noisy top-k if this is bigger than 1 (Default: 1).\n soft_em: If True then use soft EM rather than hard EM (Default: False).\n num_samples: Number of samples to take in soft EM (Default: 1).\n\n Returns:\n Tensor with nearest element in mean encoded in one-hot notation\n and distances.\n \"\"\"\n x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)\n means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)\n scalar_prod = tf.matmul(\n tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))\n scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])\n dist = x_norm_sq + tf.transpose(\n means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod\n\n # computing cluster probabilities\n if soft_em:\n num_blocks = common_layers.shape_list(dist)[1]\n nearest_idx = tf.stack(\n [\n tf.multinomial(-dist[:, i, :], num_samples=num_samples)\n for i in range(num_blocks)\n ],\n axis=1)\n nearest_hot = tf.one_hot(nearest_idx, depth=block_v_size)\n nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)\n else:\n if random_top_k > 1:\n _, top_k_idx = tf.nn.top_k(-dist, k=random_top_k)\n nearest_idx = tf.gather(\n top_k_idx,\n tf.random_uniform(\n [1], minval=0, maxval=random_top_k - 1, dtype=tf.int32),\n axis=-1)\n else:\n nearest_idx = tf.argmax(-dist, axis=-1)\n nearest_hot = tf.one_hot(nearest_idx, block_v_size)\n return nearest_hot\n\n\ndef embedding_lookup(x,\n means,\n num_blocks,\n block_v_size,\n random_top_k=1,\n soft_em=False,\n num_samples=1):\n \"\"\"Compute nearest neighbors and loss for training the embeddings via DVQ.\n\n Args:\n x: Batch of encoder continuous latent states sliced/projected into shape\n [-1, num_blocks, block_dim].\n means: Embedding table of shape [num_blocks, block_v_size, block_dim].\n num_blocks: Number of blocks in DVQ.\n block_v_size: Number of table entries per block.\n random_top_k: Noisy top-k if this is bigger than 1 (Default: 1).\n soft_em: If True then use soft EM rather than hard EM (Default: False).\n num_samples: Number of samples to use for soft EM (Default: 1).\n\n Returns:\n The nearest neighbor in one hot form, the nearest neighbor itself, the\n commitment loss, embedding training loss and distances.\n \"\"\"\n x_means_hot = nearest_neighbor(\n x,\n means,\n block_v_size,\n random_top_k,\n soft_em=soft_em,\n num_samples=num_samples)\n x_means_hot_flat = tf.reshape(x_means_hot, [-1, num_blocks, block_v_size])\n x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)\n x_means = tf.transpose(x_means, [1, 0, 2])\n q_loss = tf.reduce_mean(tf.square((tf.stop_gradient(x) - x_means)))\n e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))\n return x_means_hot, x_means, q_loss, e_loss\n\n\ndef bit_to_int(x_bit, num_bits, base=2):\n \"\"\"Turn x_bit representing numbers bitwise (lower-endian) to int tensor.\n\n Args:\n x_bit: Tensor containing numbers in a particular base to be converted to\n int.\n num_bits: Number of bits in the representation.\n base: Base of the representation.\n\n Returns:\n Integer representation of this number.\n \"\"\"\n x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))\n x_labels = []\n for i in range(num_bits):\n x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i))\n res = sum(x_labels)\n return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1]))\n\n\ndef int_to_bit(x_int, num_bits, base=2):\n \"\"\"Turn x_int representing numbers into a bitwise (lower-endian) tensor.\n\n Args:\n x_int: Tensor containing integer to be converted into base notation.\n num_bits: Number of bits in the representation.\n base: Base of the representation.\n\n Returns:\n Corresponding number expressed in base.\n \"\"\"\n x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))\n x_labels = []\n for i in range(num_bits):\n x_labels.append(\n tf.floormod(\n tf.floordiv(tf.to_int32(x_l),\n tf.to_int32(base)**i), tf.to_int32(base)))\n res = tf.concat(x_labels, axis=-1)\n return tf.to_float(res)\n\n\ndef int_to_bit_embed(x_int, num_bits, embedding_size, base=2):\n \"\"\"Turn x_int into a bitwise (lower-endian) tensor and embed densly.\"\"\"\n shape = common_layers.shape_list(x_int)\n inputs = int_to_bit(x_int, num_bits, base=base)\n inputs = tf.reshape(inputs, shape[:-1] + [shape[-1] * 8])\n inputs = 2.0 * tf.to_float(inputs) - 1.0 # Move from 0/1 to -1/1.\n return tf.layers.dense(inputs, embedding_size, name=\"int_to_bit_embed\")\n\n\ndef embed(x,\n hidden_size,\n z_size,\n filter_size,\n name,\n bottleneck_kind=\"dvq\",\n soft_em=False,\n num_blocks=2,\n num_residuals=1,\n block_v_size=None,\n means=None):\n \"\"\"Embedding function that takes discrete latent and returns embedding.\n\n Args:\n x: Input to the discretization bottleneck.\n hidden_size: Dimension of the latent state.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n filter_size: Filter size to be used for the embedding function.\n name: Name for the bottleneck scope.\n bottleneck_kind: Kind of discretization bottleneck to use; one of dvq,\n semhash, gumbel-softmax (Default: dvq).\n soft_em: If True then it uses a multi-sample version of EM (Default: False).\n num_blocks: Number of blocks in DVQ (Default: 2).\n num_residuals: Number of residuals (Default: 1).\n block_v_size: Number of embedding entries per block (Default: None).\n means: The embedding table for dvq (Default: None).\n\n Returns:\n Continuous embedding to be passed on to the decoder.\n\n Raises:\n ValueError: For unknown or missing arguments.\n \"\"\"\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n if bottleneck_kind == \"semhash\":\n c = int_to_bit(x, z_size)\n h1a = tf.layers.dense(c, filter_size, name=\"vch1a\")\n h1b = tf.layers.dense(1.0 - c, filter_size, name=\"vch1b\")\n h1 = h1a + h1b\n elif bottleneck_kind == \"gumbel-softmax\":\n hot = tf.one_hot(x, 2**z_size)\n h1 = tf.layers.dense(hot, hidden_size, name=\"dae_dense\")\n elif bottleneck_kind == \"dvq\":\n if block_v_size is None:\n raise ValueError(\"Bottleneck kind is dvq but block_v_size is None.\")\n\n if soft_em:\n assert num_residuals == 1\n x_hot_flat = tf.reshape(x, shape=[-1, num_blocks, block_v_size])\n h1 = tf.matmul(tf.transpose(x_hot_flat, perm=[1, 0, 2]), means[0])\n h1 = tf.transpose(h1, perm=[1, 0, 2])\n new_shape = common_layers.shape_list(x)\n new_shape[-1] = hidden_size\n h1 = tf.reshape(h1, shape=new_shape)\n else:\n shape_x = common_layers.shape_list(x)\n x_flat = tf.reshape(x, [-1, 1])\n c = int_to_bit(x_flat, num_bits=z_size, base=2)\n shape = common_layers.shape_list(c)\n new_shape = shape\n new_shape[-1] = num_residuals\n new_shape.append(num_blocks)\n new_shape.append(int(z_size / (num_residuals * num_blocks)))\n c = tf.to_int32(tf.reshape(c, shape=new_shape))\n h1_shape = shape_x\n h1_shape.append(hidden_size)\n h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)\n for i in range(num_residuals):\n c_residual = bit_to_int(\n c[:, :, i, :, :],\n num_bits=int(z_size / (num_residuals * num_blocks)),\n base=2)\n c_hot = tf.one_hot(c_residual, depth=block_v_size, axis=-1)\n c_hot_flat = tf.reshape(c_hot, shape=[-1, num_blocks, block_v_size])\n h1_residual = tf.matmul(\n tf.transpose(c_hot_flat, perm=[1, 0, 2]), means[i])\n h1_residual = tf.transpose(h1_residual, perm=[1, 0, 2])\n h1_residual = tf.reshape(h1_residual, shape=h1_shape)\n h1 += h1_residual\n elif bottleneck_kind == \"rounding\":\n h1 = x\n else:\n raise ValueError(\"Unknown bottleneck kind.\")\n\n h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name=\"vch2\")\n return tf.layers.dense(tf.nn.relu(h2), hidden_size, name=\"vcfin\")\n\n\ndef vae(x, name, z_size):\n \"\"\"Simple variational autoencoder without discretization.\n\n Args:\n x: Input to the discretization bottleneck.\n name: Name for the bottleneck scope.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n\n Returns:\n Embedding function, latent, loss, mu and log_simga.\n \"\"\"\n with tf.variable_scope(name):\n mu = tf.layers.dense(x, z_size, name=\"mu\")\n log_sigma = tf.layers.dense(x, z_size, name=\"log_sigma\")\n shape = common_layers.shape_list(x)\n epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])\n z = mu + tf.exp(log_sigma / 2) * epsilon\n kl = 0.5 * tf.reduce_mean(\n tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)\n free_bits = z_size // 4\n kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))\n return z, kl_loss, mu, log_sigma\n\n\ndef top_k_softmax(x, k):\n \"\"\"Calculate softmax(x), select top-k and rescale to sum to 1.\n\n Args:\n x: Input to softmax over.\n k: Number of top-k to select.\n\n Returns:\n softmax(x) and maximum item.\n \"\"\"\n x = tf.nn.softmax(x)\n top_x, _ = tf.nn.top_k(x, k=k + 1)\n min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True)\n x = tf.nn.relu((x - min_top) + 1e-12)\n x /= tf.reduce_sum(x, axis=-1, keep_dims=True)\n return x, tf.reduce_max(top_x, axis=-1)\n\n\ndef gumbel_sample(shape):\n \"\"\"Sample from the Gumbel distribution, protect from overflows.\n\n Args:\n shape: Shape of Gumbel samples.\n\n Returns:\n Noise drawn from Gumbel distribution.\n \"\"\"\n uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998)\n return -tf.log(-tf.log(uniform_samples))\n\n\ndef gumbel_softmax(x,\n name,\n z_size,\n mode,\n softmax_k=0,\n kl_warmup_steps=150000,\n summary=True):\n \"\"\"Gumbel softmax discretization bottleneck.\n\n Args:\n x: Input to the discretization bottleneck.\n name: Name for the bottleneck scope.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n mode: Mode represents whether we are training or testing for bottlenecks\n that differ in behavior (Default: None).\n softmax_k: If > 1 then do top-k softmax (Default: 0).\n kl_warmup_steps: Number of steps for kl warmup (Default: 150000).\n summary: If True, then write summaries (Default: True).\n\n Returns:\n Embedding function, discrete code and loss.\n \"\"\"\n with tf.variable_scope(name):\n m = tf.layers.dense(x, 2**z_size, name=\"mask\")\n if softmax_k > 0:\n m, kl = top_k_softmax(m, softmax_k)\n return m, m, 1.0 - tf.reduce_mean(kl)\n logsm = tf.nn.log_softmax(m)\n\n # Gumbel-softmax sample.\n gumbel_samples = gumbel_sample(common_layers.shape_list(m))\n steps = kl_warmup_steps\n gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5\n temperature = 1.2 - common_layers.inverse_lin_decay(steps)\n\n # 10% of the time keep reasonably high temperature to keep learning.\n temperature = tf.cond(\n tf.less(tf.random_uniform([]), 0.9), lambda: temperature,\n lambda: tf.random_uniform([], minval=0.5, maxval=1.0))\n s = tf.nn.softmax((logsm + gumbel_samples) / temperature)\n m = tf.nn.softmax(m)\n kl = -tf.reduce_max(logsm, axis=-1)\n\n if summary:\n tf.summary.histogram(\"max-log\", tf.reshape(kl, [-1]))\n\n # Calculate the argmax and construct hot vectors.\n maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1])\n maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size))\n\n # Add losses that prevent too few being used.\n distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot\n d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True)\n d_variance = tf.reduce_mean(tf.square(distrib - d_mean), axis=[0])\n d_dev = -tf.reduce_mean(d_variance)\n ret = s\n\n if mode != tf.contrib.learn.ModeKeys.TRAIN:\n ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval.\n return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002\n\n\ndef discrete_bottleneck(x,\n hidden_size,\n z_size,\n filter_size,\n name,\n mode=None,\n startup_steps=50000,\n bottleneck_kind=\"dvq\",\n num_blocks=2,\n num_residuals=1,\n reshape_method=\"slice\",\n projection_tensors=None,\n means=None,\n beta=0.25,\n noise_dev=1.,\n decay=0.999,\n discrete_mix=0.5,\n random_top_k=1,\n soft_em=False,\n num_samples=1,\n epsilon=1e-5,\n softmax_k=0,\n kl_warmup_steps=150000,\n ema=True,\n ema_count=None,\n ema_means=None,\n summary=True):\n \"\"\"Discretization bottleneck for latent variables.\n\n Args:\n x: Input to the discretization bottleneck.\n hidden_size: Dimension of the latent state.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n filter_size: Filter size to be used for the embedding function.\n name: Name for the bottleneck scope.\n mode: Mode represents whether we are training or testing for bottlenecks\n that differ in behavior (Default: None).\n startup_steps: Number of steps after which latent predictor is trained\n (Default: 50000).\n bottleneck_kind: Kind of discretization bottleneck to use; one of dvq,\n semhash, gumbel-softmax (Default: dvq).\n num_blocks: Number of blocks to use for decomposed vector\n quantization (Default: 2).\n num_residuals: Number of residual units used to compute nearest\n neighbors (Default: 1).\n reshape_method: Method to reshape for DVQ (Default: slice).\n projection_tensors: If the reshape method is project, then these are the\n tensors used to project (Default: None).\n means: The embedding table for dvq (Default: None).\n beta: Beta factor for the DVQ loss (Default: 0.25).\n noise_dev: Stddev for noise added for semhash (Default: 0).\n decay: Decay factor for the exponential moving average (Default: 0.999).\n discrete_mix: Factor for mixing discrete and non-discrete input for semhash\n (Default: 0.5).\n random_top_k: Noisy top-k for DVQ (Default: 1).\n soft_em: If True then use soft EM rather than hard EM (Default: False).\n num_samples: Number of samples for soft EM (Default: 1).\n epsilon: Epsilon parameter for DVQ (Default: 1e-5).\n softmax_k: If > 1 then do top-k softmax (Default: 0).\n kl_warmup_steps: Number of steps for kl warmup (Default: 150000).\n ema: If True update embeddings using exponential moving averages (Default:\n True).\n ema_count: Table of counts for each embedding corresponding to how many\n examples in a batch it was the closest to (Default: None).\n ema_means: Exponentially averaged version of the embeddings (Default: None).\n summary: If True, then write summaries (Default: True).\n\n Returns:\n Embedding to pass to the decoder, discrete latent, loss, and the embedding\n function.\n\n Raises:\n ValueError: If projection_tensors is None for reshape_method project, or\n ema_count or ema_means is None if we are using ema, or unknown args.\n \"\"\"\n block_v_size = None\n if bottleneck_kind == \"dvq\":\n # Define the dvq parameters\n assert means is not None\n\n # Check block dimensions add up\n if hidden_size % num_blocks != 0:\n raise ValueError(\"num_blocks does not divide hidden size\")\n\n if z_size % num_residuals != 0:\n raise ValueError(\"num_residuals does not divide embedding table size\")\n\n z_size_per_residual = int(z_size / num_residuals)\n\n if z_size_per_residual % num_blocks != 0:\n raise ValueError(\"num_blocks does not divide embedding table size\")\n\n block_v_size = 2**(z_size_per_residual / num_blocks)\n block_v_size = int(block_v_size)\n\n # Set the reshape method corresponding to projections or slices\n if reshape_method == \"slice\":\n reshape_fn = partial(\n slice_hidden, hidden_size=hidden_size, num_blocks=num_blocks)\n elif reshape_method == \"project\":\n if projection_tensors is None:\n raise ValueError(\n \"Projection tensors is None for reshape_method project\")\n reshape_fn = partial(\n project_hidden,\n projection_tensors=projection_tensors,\n hidden_size=hidden_size,\n num_blocks=num_blocks)\n else:\n raise ValueError(\"Unknown reshape_method\")\n\n # Check if the ema settings make sense\n if ema:\n if ema_count is None:\n raise ValueError(\"ema_count is None but ema is True\")\n if ema_means is None:\n raise ValueError(\"ema_means is None but ema is True\")\n\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n l = tf.constant(0.0)\n if bottleneck_kind == \"dense\":\n c = tf.layers.dense(x, z_size, name=\"vcc\")\n h1 = tf.layers.dense(c, filter_size, name=\"vch1\")\n elif bottleneck_kind == \"vae\":\n c, l, _, _ = vae(x, z_size, \"vae\")\n h1 = tf.layers.dense(c, filter_size, name=\"vch1\")\n elif bottleneck_kind == \"semhash\":\n c = tf.layers.dense(x, z_size, name=\"vcc\")\n y_clean = common_layers.saturating_sigmoid(c)\n if summary:\n tf.summary.histogram(\"y_clean\", tf.reshape(y_clean, [-1]))\n if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.truncated_normal(\n common_layers.shape_list(c), mean=0.0, stddev=noise_dev)\n y = common_layers.saturating_sigmoid(c + noise)\n else:\n y = y_clean\n d = tf.to_float(tf.less(0.5, y))\n y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y)\n pd = common_layers.inverse_exp_decay(startup_steps * 2)\n pd *= discrete_mix\n pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0\n c = tf.where(\n tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd),\n y_discrete, y)\n h1a = tf.layers.dense(c, filter_size, name=\"vch1a\")\n h1b = tf.layers.dense(1.0 - c, filter_size, name=\"vch1b\")\n h1 = h1a + h1b\n dx = tf.to_int32(tf.stop_gradient(d))\n c = bit_to_int(dx, z_size)\n elif bottleneck_kind == \"gumbel-softmax\":\n _, hot, l = gumbel_softmax(x, name, z_size, mode, softmax_k,\n kl_warmup_steps, summary)\n c = tf.argmax(hot, axis=-1)\n h1 = tf.layers.dense(hot, hidden_size, name=\"dae_dense\")\n elif bottleneck_kind == \"dvq\":\n x_reshaped = reshape_fn(x)\n x_res = x_reshaped\n x_means_hot = []\n x_means = 0\n l = 0\n for i in range(num_residuals):\n x_means_hot_res, x_means_res, q_loss_res, e_loss_res = embedding_lookup(\n x_res, means[i], num_blocks, block_v_size, random_top_k, soft_em,\n num_samples)\n # Update the ema variables\n if ema:\n tf.logging.info(\"Using EMA with beta = {}\".format(beta))\n updated_ema_count_res = moving_averages.assign_moving_average(\n ema_count[i],\n tf.reduce_sum(\n tf.reshape(\n x_means_hot_res, shape=[-1, num_blocks, block_v_size]),\n axis=0),\n decay,\n zero_debias=False)\n\n dw = tf.matmul(\n tf.transpose(x_means_hot_res, perm=[1, 2, 0]),\n tf.transpose(x_res, perm=[1, 0, 2]))\n\n updated_ema_means_res = moving_averages.assign_moving_average(\n ema_means[i], dw, decay, zero_debias=False)\n n = tf.reduce_sum(updated_ema_count_res, axis=-1, keep_dims=True)\n updated_ema_count_res = ((updated_ema_count_res + epsilon) /\n (n + 2**z_size * epsilon) * n)\n # pylint: disable=g-no-augmented-assignment\n updated_ema_means_res = updated_ema_means_res / tf.expand_dims(\n updated_ema_count_res, axis=-1)\n # pylint: enable=g-no-augmented-assignment\n\n with tf.control_dependencies([e_loss_res]):\n update_means_res = tf.assign(means[i], updated_ema_means_res)\n with tf.control_dependencies([update_means_res]):\n l += beta * e_loss_res\n else:\n l += q_loss_res + beta * e_loss_res\n\n # Update the residuals\n x_res -= x_means_res\n x_means += x_means_res\n x_means_hot.append(x_means_hot_res)\n\n # Get the discrete latent representation\n x_means_hot = tf.stack(x_means_hot, axis=1)\n x_means_idx = tf.argmax(x_means_hot, axis=-1)\n\n # Get the binary representation\n x_means_bits = int_to_bit(\n x_means_idx,\n num_bits=int(z_size / (num_residuals * num_blocks)),\n base=2)\n shape = common_layers.shape_list(x_means_bits)\n new_shape = shape[:-2]\n new_shape[-1] = z_size\n x_means_bits = tf.reshape(x_means_bits, shape=new_shape)\n c = bit_to_int(tf.to_int32(x_means_bits), num_bits=z_size, base=2)\n\n # Adjust shape of c\n shape_x = common_layers.shape_list(x)\n new_shape = shape_x[:-1]\n c = tf.reshape(c, new_shape)\n\n # If we are doing soft EM then c is x_means_hot\n if soft_em:\n c = x_means_hot\n new_shape.append(block_v_size)\n c = tf.reshape(c, new_shape)\n\n x_means = tf.reshape(x_means, shape_x)\n x_reshaped = tf.reshape(x_reshaped, shape_x)\n h1 = x_reshaped + tf.stop_gradient(x_means - x_reshaped)\n else:\n raise ValueError(\"Unknown discretization method.\")\n\n h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name=\"vch2\")\n res = tf.layers.dense(tf.nn.relu(h2), hidden_size, name=\"vcfin\")\n\n embed_fn = partial(\n embed,\n hidden_size=hidden_size,\n z_size=z_size,\n filter_size=filter_size,\n name=name,\n bottleneck_kind=bottleneck_kind,\n soft_em=soft_em,\n num_blocks=num_blocks,\n num_residuals=num_residuals,\n block_v_size=block_v_size,\n means=means)\n return res, c, l, embed_fn\n\n\n# New API for discretization bottlenecks:\n# * Each method is separate and provides 2 functions:\n# * The [method]_bottleneck function returns discretized state.\n# * The [method]_unbottleneck function moves from discretized state to dense.\n\n\ndef tanh_discrete_bottleneck(x, bottleneck_size, bottleneck_noise,\n discretize_warmup_steps, mode):\n \"\"\"Simple discretization through tanh, flip bottleneck_noise many bits.\"\"\"\n x = tf.tanh(tf.layers.dense(x, bottleneck_size,\n name=\"tanh_discrete_bottleneck\"))\n d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)\n if mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.random_uniform(common_layers.shape_list(x))\n noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0\n d *= noise\n d = common_layers.mix(d, x, discretize_warmup_steps,\n mode == tf.estimator.ModeKeys.TRAIN)\n return d\n\n\ndef tanh_discrete_unbottleneck(x, hidden_size):\n \"\"\"Simple un-discretization from tanh.\"\"\"\n x = tf.layers.dense(x, hidden_size, name=\"tanh_discrete_unbottleneck\")\n return x\n\n\ndef isemhash_bottleneck(x, bottleneck_size, bottleneck_noise,\n discretize_warmup_steps, mode,\n isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):\n \"\"\"Improved semantic hashing bottleneck.\"\"\"\n with tf.variable_scope(\"isemhash_bottleneck\"):\n x = tf.layers.dense(x, bottleneck_size, name=\"dense\")\n y = common_layers.saturating_sigmoid(x)\n if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.truncated_normal(\n common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)\n y = common_layers.saturating_sigmoid(x + noise)\n d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)\n d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1].\n if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits.\n noise = tf.random_uniform(common_layers.shape_list(x))\n noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0\n d *= noise\n d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,\n mode == tf.estimator.ModeKeys.TRAIN,\n max_prob=isemhash_mix_prob)\n return d\n\n\ndef isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0):\n \"\"\"Improved semantic hashing un-bottleneck.\"\"\"\n filter_size = int(hidden_size * isemhash_filter_size_multiplier)\n x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1].\n with tf.variable_scope(\"isemhash_unbottleneck\"):\n h1a = tf.layers.dense(x, filter_size, name=\"hidden1a\")\n h1b = tf.layers.dense(1.0 - x, filter_size, name=\"hidden1b\")\n h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name=\"hidden2\")\n return tf.layers.dense(tf.nn.relu(h2), hidden_size, name=\"final\")\n\n\ndef parametrized_bottleneck(x, hparams):\n \"\"\"Meta-function calling all the above bottlenecks with hparams.\"\"\"\n if hparams.bottleneck_kind == \"tanh_discrete\":\n return tanh_discrete_bottleneck(\n x, hparams.bottleneck_size, hparams.bottleneck_noise * 0.5,\n hparams.discretize_warmup_steps, hparams.mode)\n if hparams.bottleneck_kind == \"isemhash\":\n return isemhash_bottleneck(\n x, hparams.bottleneck_size, hparams.bottleneck_noise * 0.5,\n hparams.discretize_warmup_steps, hparams.mode,\n hparams.isemhash_noise_dev, hparams.isemhash_mix_prob)\n raise ValueError(\"Unsupported hparams.bottleneck_kind %s\"\n % hparams.bottleneck_kind)\n\n\ndef parametrized_unbottleneck(x, hidden_size, hparams):\n \"\"\"Meta-function calling all the above un-bottlenecks with hparams.\"\"\"\n if hparams.bottleneck_kind == \"tanh_discrete\":\n return tanh_discrete_unbottleneck(x, hidden_size)\n if hparams.bottleneck_kind == \"isemhash\":\n return isemhash_unbottleneck(\n x, hidden_size, hparams.isemhash_filter_size_multiplier)\n raise ValueError(\"Unsupported hparams.bottleneck_kind %s\"\n % hparams.bottleneck_kind)\n"
] | [
[
"tensorflow.reduce_max",
"tensorflow.reshape",
"tensorflow.nn.top_k",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.one_hot",
"tensorflow.concat",
"tensorflow.random_normal",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.multinomial",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.nn.log_softmax",
"tensorflow.less",
"tensorflow.reduce_min",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.stack",
"tensorflow.to_float",
"tensorflow.expand_dims",
"tensorflow.random_uniform",
"tensorflow.assign",
"tensorflow.tile",
"tensorflow.layers.dense",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient",
"tensorflow.to_int32",
"tensorflow.exp",
"tensorflow.square",
"tensorflow.argmax",
"tensorflow.log",
"tensorflow.nn.relu",
"tensorflow.maximum"
]
] |
amaarquadri/perfect-information-game | [
"6755f9633935be762d039ece9c0b646c64de6ab8"
] | [
"perfect_information_game/tablebases/symmetry_transform.py"
] | [
"import numpy as np\nfrom perfect_information_game.games import Chess\nfrom perfect_information_game.utils import iter_product\nfrom perfect_information_game.tablebases import get_verified_chess_subclass\n\n\nclass SymmetryTransform:\n # noinspection PyChainedComparisons\n PAWNLESS_UNIQUE_SQUARE_INDICES = [(i, j) for i, j in iter_product(Chess.BOARD_SHAPE)\n if i < 4 and j < 4 and i <= j]\n UNIQUE_SQUARE_INDICES = [(i, j) for i, j in iter_product(Chess.BOARD_SHAPE) if j < 4]\n\n def __init__(self, GameClass, state):\n self.GameClass = get_verified_chess_subclass(GameClass)\n self.flip_colors = self.flip_i = self.flip_j = self.flip_diagonal = False\n\n if self.should_swap_colours(state):\n # black is attacking, so switch white and black\n self.flip_colors = True\n i, j = self.GameClass.get_king_pos(state, self.GameClass.BLACK_SLICE)\n i = self.GameClass.ROWS - 1 - i\n else:\n i, j = self.GameClass.get_king_pos(state, self.GameClass.WHITE_SLICE)\n\n pawnless = np.all(state[:, :, self.GameClass.WHITE_PAWN] == 0) and \\\n np.all(state[:, :, self.GameClass.BLACK_PAWN] == 0)\n\n if pawnless and not (i < 4):\n self.flip_i = True\n i = self.GameClass.ROWS - 1 - i\n if not (j < 4): # horizontal flipping can be done, even with pawns\n self.flip_j = True\n j = self.GameClass.COLUMNS - 1 - j\n if pawnless and not (i <= j):\n self.flip_diagonal = True\n\n def should_swap_colours(self, state):\n heuristic = self.GameClass.heuristic(state)\n if heuristic > 0:\n # white is up in material, so don't swap colours\n return False\n if heuristic < 0:\n # black is up in material, so swap colours\n return True\n # compare the number of pawns on each rank, from most advanced to least advanced pawns\n # no need to check second rank pawns, because if everything else is equal they must be equal too\n for rank in range(7, 2, -1):\n if np.sum(state[rank - 1, :, self.GameClass.BLACK_PAWN]) > \\\n np.sum(state[8 - rank, :, self.GameClass.WHITE_PAWN]):\n # black has more pawns than white on this rank, so swap colours\n return True\n return False\n\n @staticmethod\n def identity(GameClass):\n identity = SymmetryTransform(GameClass, GameClass.STARTING_STATE)\n identity.flip_colors = identity.flip_i = identity.flip_j = identity.flip_diagonal = False\n return identity\n\n @staticmethod\n def random(GameClass, descriptor):\n \"\"\"\n Returns a random symmetry transform for the given descriptor.\n \"\"\"\n random = SymmetryTransform.identity(GameClass)\n pawnless = 'p' not in descriptor and 'P' not in descriptor\n\n random.flip_colors = np.random.random() < 0.5\n random.flip_j = np.random.random() < 0.5\n if pawnless:\n random.flip_i = np.random.random() < 0.5\n random.flip_diagonal = np.random.random() < 0.5\n return random\n\n def is_identity(self):\n return not self.flip_colors and not self.flip_i and not self.flip_j and not self.flip_diagonal\n\n def transform_state(self, state):\n if self.flip_colors:\n state = self.flip_state_colors(self.GameClass, state)\n if self.flip_i:\n state = self.flip_state_i(state)\n if self.flip_j:\n state = self.flip_state_j(state)\n if self.flip_diagonal:\n state = self.flip_state_diagonal(state)\n return state\n\n def untransform_state(self, state):\n # since all transform_funcs are their own inverses, we can just run through them in reverse\n if self.flip_diagonal:\n state = self.flip_state_diagonal(state)\n if self.flip_j:\n state = self.flip_state_j(state)\n if self.flip_i:\n state = self.flip_state_i(state)\n if self.flip_colors:\n state = self.flip_state_colors(self.GameClass, state)\n return state\n\n def transform_outcome(self, outcome):\n return -outcome if self.flip_colors else outcome\n\n @staticmethod\n def flip_state_colors(GameClass, state):\n special_layers = np.copy(state[..., -2:])\n special_layers[..., -1] = 1 - special_layers[..., -1] # flip whose turn it is\n new_state = np.concatenate((state[..., GameClass.BLACK_SLICE], state[..., GameClass.WHITE_SLICE],\n special_layers),\n axis=-1)\n # need to flip board vertically after flipping colours\n # this ensures that the pawns move in the correct directions\n return SymmetryTransform.flip_state_i(new_state)\n\n @staticmethod\n def flip_state_i(state):\n return np.flip(state, axis=0)\n\n @staticmethod\n def flip_state_j(state):\n return np.flip(state, axis=1)\n\n @staticmethod\n def flip_state_diagonal(state):\n return np.rot90(np.flip(state, axis=1), axes=(0, 1))\n"
] | [
[
"numpy.sum",
"numpy.copy",
"numpy.random.random",
"numpy.all",
"numpy.flip",
"numpy.concatenate"
]
] |
tremblerz/enas | [
"329ee3f8beb5e715bf2dad1182cfb5120b3485f9"
] | [
"src/ptb/ptb_enas_controller.py"
] | [
"\n\n\n\nimport sys\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom src.utils import get_train_ops\nfrom src.common_ops import stack_lstm\n\nfrom tensorflow.python.training import moving_averages\n\nclass PTBEnasController(object):\n def __init__(self,\n rhn_depth=5,\n lstm_size=32,\n lstm_num_layers=2,\n lstm_keep_prob=1.0,\n tanh_constant=None,\n temperature=None,\n num_funcs=2,\n lr_init=1e-3,\n lr_dec_start=0,\n lr_dec_every=100,\n lr_dec_rate=0.9,\n l2_reg=0,\n entropy_weight=None,\n clip_mode=None,\n grad_bound=None,\n bl_dec=0.999,\n optim_algo=\"adam\",\n sync_replicas=False,\n num_aggregate=None,\n num_replicas=None,\n name=\"controller\"):\n\n print(\"-\" * 80)\n print(\"Building PTBEnasController\")\n\n self.rhn_depth = rhn_depth\n self.lstm_size = lstm_size\n self.lstm_num_layers = lstm_num_layers \n self.lstm_keep_prob = lstm_keep_prob\n self.tanh_constant = tanh_constant\n self.temperature = temperature\n self.num_funcs = num_funcs\n self.lr_init = lr_init\n self.lr_dec_start = lr_dec_start\n self.lr_dec_every = lr_dec_every\n self.lr_dec_rate = lr_dec_rate\n self.l2_reg = l2_reg\n self.entropy_weight = entropy_weight\n self.clip_mode = clip_mode\n self.grad_bound = grad_bound\n self.bl_dec = bl_dec\n self.optim_algo = optim_algo\n self.sync_replicas = sync_replicas\n self.num_aggregate = num_aggregate\n self.num_replicas = num_replicas\n self.name = name\n\n self._create_params()\n self._build_sampler()\n\n def _create_params(self):\n initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)\n with tf.variable_scope(self.name, initializer=initializer):\n with tf.variable_scope(\"lstm\"):\n self.w_lstm = []\n for layer_id in range(self.lstm_num_layers):\n with tf.variable_scope(\"layer_{}\".format(layer_id)):\n w = tf.get_variable(\"w\", [2 * self.lstm_size, 4 * self.lstm_size])\n self.w_lstm.append(w)\n\n num_funcs = self.num_funcs\n with tf.variable_scope(\"embedding\"):\n self.g_emb = tf.get_variable(\"g_emb\", [1, self.lstm_size])\n self.w_emb = tf.get_variable(\"w\", [num_funcs, self.lstm_size])\n\n with tf.variable_scope(\"softmax\"):\n self.w_soft = tf.get_variable(\"w\", [self.lstm_size, num_funcs])\n\n with tf.variable_scope(\"attention\"):\n self.attn_w_1 = tf.get_variable(\"w_1\", [self.lstm_size, self.lstm_size])\n self.attn_w_2 = tf.get_variable(\"w_2\", [self.lstm_size, self.lstm_size])\n self.attn_v = tf.get_variable(\"v\", [self.lstm_size, 1])\n\n def _build_sampler(self):\n \"\"\"Build the sampler ops and the log_prob ops.\"\"\"\n\n arc_seq = []\n sample_log_probs = []\n sample_entropy = []\n all_h = []\n all_h_w = []\n\n # sampler ops\n inputs = self.g_emb\n prev_c, prev_h = [], []\n for _ in range(self.lstm_num_layers):\n prev_c.append(tf.zeros([1, self.lstm_size], dtype=tf.float32))\n prev_h.append(tf.zeros([1, self.lstm_size], dtype=tf.float32))\n\n # used = tf.zeros([self.rhn_depth, 2], dtype=tf.int32)\n for layer_id in range(self.rhn_depth):\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n all_h.append(next_h[-1])\n all_h_w.append(tf.matmul(next_h[-1], self.attn_w_1))\n\n if layer_id > 0:\n query = tf.matmul(next_h[-1], self.attn_w_2)\n query = query + tf.concat(all_h_w[:-1], axis=0)\n query = tf.tanh(query)\n logits = tf.matmul(query, self.attn_v)\n logits = tf.reshape(logits, [1, layer_id])\n\n if self.temperature is not None:\n logits /= self.temperature\n if self.tanh_constant is not None:\n logits = self.tanh_constant * tf.tanh(logits)\n diff = tf.to_float(layer_id - tf.range(0, layer_id)) ** 2\n logits -= tf.reshape(diff, [1, layer_id]) / 6.0\n\n skip_index = tf.multinomial(logits, 1)\n skip_index = tf.to_int32(skip_index)\n skip_index = tf.reshape(skip_index, [1])\n arc_seq.append(skip_index)\n\n log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=skip_index)\n sample_log_probs.append(log_prob)\n\n entropy = log_prob * tf.exp(-log_prob)\n sample_entropy.append(tf.stop_gradient(entropy))\n\n inputs = tf.nn.embedding_lookup(\n tf.concat(all_h[:-1], axis=0), skip_index)\n inputs /= (0.1 + tf.to_float(layer_id - skip_index))\n else:\n inputs = self.g_emb\n\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n logits = tf.matmul(next_h[-1], self.w_soft)\n if self.temperature is not None:\n logits /= self.temperature\n if self.tanh_constant is not None:\n logits = self.tanh_constant * tf.tanh(logits)\n func = tf.multinomial(logits, 1)\n func = tf.to_int32(func)\n func = tf.reshape(func, [1])\n arc_seq.append(func)\n log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=func)\n sample_log_probs.append(log_prob)\n entropy = log_prob * tf.exp(-log_prob)\n sample_entropy.append(tf.stop_gradient(entropy))\n inputs = tf.nn.embedding_lookup(self.w_emb, func)\n\n arc_seq = tf.concat(arc_seq, axis=0)\n self.sample_arc = arc_seq\n\n self.sample_log_probs = tf.concat(sample_log_probs, axis=0)\n self.ppl = tf.exp(tf.reduce_mean(self.sample_log_probs))\n\n sample_entropy = tf.concat(sample_entropy, axis=0)\n self.sample_entropy = tf.reduce_sum(sample_entropy)\n\n self.all_h = all_h\n\n def build_trainer(self, child_model):\n # actor\n self.valid_loss = tf.to_float(child_model.rl_loss)\n self.valid_loss = tf.stop_gradient(self.valid_loss)\n self.valid_ppl = tf.exp(self.valid_loss)\n self.reward = 80.0 / self.valid_ppl\n\n if self.entropy_weight is not None:\n self.reward += self.entropy_weight * self.sample_entropy\n\n # or baseline\n self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)\n self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)\n baseline_update = tf.assign_sub(\n self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))\n\n with tf.control_dependencies([baseline_update]):\n self.reward = tf.identity(self.reward)\n self.loss = self.sample_log_probs * (self.reward - self.baseline)\n\n self.train_step = tf.Variable(\n 0, dtype=tf.int32, trainable=False, name=\"train_step\")\n tf_variables = [var\n for var in tf.trainable_variables() if var.name.startswith(self.name)]\n\n self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(\n self.loss,\n tf_variables,\n self.train_step,\n clip_mode=self.clip_mode,\n grad_bound=self.grad_bound,\n l2_reg=self.l2_reg,\n lr_init=self.lr_init,\n lr_dec_start=self.lr_dec_start,\n lr_dec_every=self.lr_dec_every,\n lr_dec_rate=self.lr_dec_rate,\n optim_algo=self.optim_algo,\n sync_replicas=self.sync_replicas,\n num_aggregate=self.num_aggregate,\n num_replicas=self.num_replicas)\n\n"
] | [
[
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.identity",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.reduce_sum",
"tensorflow.assign_sub",
"tensorflow.multinomial",
"tensorflow.random_uniform_initializer",
"tensorflow.to_float",
"tensorflow.tanh",
"tensorflow.control_dependencies",
"tensorflow.nn.embedding_lookup",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient",
"tensorflow.to_int32",
"tensorflow.exp",
"tensorflow.trainable_variables",
"tensorflow.get_variable"
]
] |
HuguesMoreau/Sensors_similariy | [
"4b8592049c83b03a11f5c57fab247290ee29b8f5"
] | [
"models/SHL_2018/transforms.py"
] | [
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis file contains diverse preprocessing functions (mostly norms ans spectrograms),\r\nand basic tests and visualizations.\r\nIf you are to work with any IPython console (ex: with Jupyter or spyder), is is advised\r\nto launch a '%matplotlib qt' ,to get clean widow\r\n\"\"\"\r\n\r\n\r\nif __name__ == '__main__': # this is used to launch the file from anywhere\r\n import sys\r\n sys.path.append(\"../..\")\r\n\r\nimport numpy as np\r\nimport torch\r\nimport scipy.signal, scipy.interpolate, scipy.ndimage\r\n\r\n\r\nfrom param import classes_names, fs, duration_window, duration_overlap, spectro_batch_size\r\nfrom models.SHL_2018 import Datasets\r\n\r\nif __name__ == \"__main__\":\r\n import matplotlib.pyplot as plt\r\n n_classes = len(classes_names)\r\n # We will need this for the tests\r\n DS = Datasets.SignalsDataSet(mode='train', transform=None)\r\n\r\n\r\n#%% transform functions\r\n\r\n\"\"\"In all following functions, the input parameter (data) is, by default,\r\n a dict of numpy arrays, containing signal names (eg. \"Gyr_z\") as keys, and 1-dimensional\r\n arrays as values\r\n\r\nMost of this part contains basic visualizations to make sure the preprocessing is correct\"\"\"\r\n\r\n\r\n\r\n\r\nclass TemporalTransform():\r\n \"\"\" create the base transform to use to each element of the data\r\n\r\n Parameters\r\n ----------\r\n signal_name: a string (ex: 'Gyr_y', 'Ori_x')\r\n If the string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\r\n be the norm of the three (or four) axis of the signal.\r\n\r\n Returns\r\n -------\r\n a function with input: a dict of (_, 6000) arrays (key example: 'Gyr_y')\r\n and output: an array with the same shape.\r\n \"\"\"\r\n def __init__(self, signal_name):\r\n super(TemporalTransform, self).__init__()\r\n self.signal_name = signal_name\r\n\r\n def __call__(self, data):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n data: a dict of (B, 6000) arrays (key example: 'Gyr_y')\r\n\r\n Returns\r\n -------\r\n an array with shape (B, 6000), where B depends on the input shape.\r\n \"\"\"\r\n if self.signal_name[-2:] in ['_x', '_y', '_z', '_w'] or self.signal_name == \"Pressure\":\r\n processed_signal = data[self.signal_name]\r\n elif self.signal_name[-5:] == '_norm':\r\n suffix_location = self.signal_name.index(\"_\") # 4 if signal_name == \"LAcc\", 3 otherwise\r\n sensor = self.signal_name[:suffix_location] # ex: 'Acc', 'LAcc'\r\n if sensor == \"Ori\":\r\n # in that case, data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2 should be 1.0\r\n processed_signal = np.sqrt(data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2 \\\r\n + data[sensor+\"_w\"]**2)\r\n else :\r\n processed_signal = np.sqrt(data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2)\r\n else :\r\n raise ValueError(\"unknown signal name: '{}'. Signal names should end with either '_x', '_y', '_z', '_w', or '_norm'\".format(signal_name))\r\n return processed_signal\r\n\r\n\r\n\r\n def __str__(self):\r\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\r\n str_to_return = \"Temporal_transform\"\r\n str_to_return += f\"\\n\\t Signal: {self.signal_name}\"\r\n return str_to_return\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # plot one figure per sensor\r\n # on each figure, one subplot per class,\r\n # to find one instance per each class, we start looking at index = index0\r\n index0 = 0\r\n\r\n for tested_signal_name in [\"Acc_norm\", \"Ori_norm\", \"Mag_norm\", \"LAcc_x\"]:\r\n # plot 1 segment from each class.\r\n plt.figure()\r\n\r\n if tested_signal_name != 'Pressure':\r\n suffix_location = tested_signal_name.index(\"_\")\r\n tested_sensor = tested_signal_name[:suffix_location] # ex: 'Acc', 'LAcc'\r\n else:\r\n tested_sensor = 'Pressure'\r\n\r\n sensor_axis = [tested_sensor + axis for axis in [\"_x\", \"_y\", \"_z\"]] if tested_sensor != 'Pressure' else ['Pressure']\r\n if tested_sensor == \"Ori\" : sensor_axis.append(tested_sensor+\"_w\")\r\n temporal_transform = TemporalTransform(tested_signal_name)\r\n remaining_classes = classes_names.copy()\r\n index = index0\r\n\r\n while len(remaining_classes)>0:\r\n data_tensor, class_tensor = DS[index] # data is a dict of 2D tensors (1,nb)\r\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\r\n class_index = int(class_tensor)\r\n class_name = classes_names[class_index-1]\r\n\r\n if class_name in remaining_classes:\r\n\r\n remaining_classes.remove(class_name)\r\n plt.subplot(2, 4, n_classes - len(remaining_classes))\r\n for k,signal in enumerate(sensor_axis):\r\n\r\n if k==0: # compute the temporal axis once\r\n nb = data_cpu[signal].shape[1]\r\n x_t = np.linspace(0, nb/fs, nb)\r\n\r\n plt.plot(x_t, data_cpu[signal][0,:])\r\n selected_signal = temporal_transform(data_cpu)\r\n error_message_dtype = \"One of the signals does not have the correct type: {}, {} \\n dtype should be float32, is actually {}\".format(tested_signal_name, str(temporal_transform), selected_signal.dtype)\r\n assert (selected_signal.dtype == 'float32'), error_message_dtype\r\n\r\n plt.plot(x_t, selected_signal[0,:], '--')\r\n plt.xlabel(\"t (s)\")\r\n legend = sensor_axis + [tested_signal_name+' (selected)']\r\n plt.legend(legend)\r\n plt.title(\"{} ({}, index={})\".format(tested_sensor, classes_names[class_index-1], index))\r\n index +=1\r\n plt.show()\r\n\r\n\r\n\r\n\r\n#%%\r\n\r\n# ---------------- Spectrogram transforms ---------------------\r\n\r\n\r\n# Interpolation functions\r\ndef interpol_log(f, t, spectrogram, out_size):\r\n \"\"\"interpolates the spectrogram in input using a linear axis for the timestamps and a LOG axis for the frequencies\r\n\r\n Parameters\r\n ----------\r\n f : numpy array, shape: (F_in,), frequencies of the spectrogram\r\n t : numpy array, shape: (T_in,), timestamps of the spectrogram\r\n spectrogram : (B, F_in, T_in), B is batch size; 3D numpy array\r\n\r\n out_size : couple of ints (F_out, T_out)\r\n\r\n Returns\r\n -------\r\n f_interpolated : numpy array, shape: (F_out,), frequencies of the spectrogram AFTER interpolation\r\n t_interpolated : numpy array, shape: (T_out,), timestamps of the spectrogram AFTER interpolation\r\n a spectrogram, where the f axis (second dimension) has been re-interpolated\r\n using a log axis\r\n\r\n \"\"\"\r\n B = spectrogram.shape[0]\r\n out_f, out_t = out_size\r\n\r\n log_f = np.log(f+f[1]) # log between 0.2 Hz and 50.2 Hz\r\n\r\n log_f_normalized = (log_f-log_f[0])/(log_f[-1]-log_f[0]) # between 0.0 and 1.0\r\n t_normalized = (t-t[0])/(t[-1]-t[0])\r\n\r\n rescaled_f = out_f*log_f_normalized # 0 and 48\r\n # rescaled_f = (out_f-1)*log_f_normalized ??\r\n rescaled_t = out_t*t_normalized\r\n\r\n spectrogram_interpolated = np.zeros( (B, out_f, out_t), dtype='float32')\r\n index_f, index_t = np.arange(out_f), np.arange(out_t) # between 0 and 47\r\n\r\n for i in range(B):\r\n spectrogram_fn = scipy.interpolate.interp2d(rescaled_t, rescaled_f, spectrogram[i,:,:], copy=False)\r\n # interp2d returns a 2D function\r\n spectrogram_interpolated[i,:,:] = spectrogram_fn(index_t, index_f) # care to the order\r\n\r\n f_fn = scipy.interpolate.interp1d(rescaled_f, f, copy=False)\r\n f_interpolated = f_fn(index_f)\r\n\r\n t_fn = scipy.interpolate.interp1d(rescaled_t, t, copy=False)\r\n t_interpolated = t_fn(index_t)\r\n\r\n\r\n return f_interpolated, t_interpolated, spectrogram_interpolated\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#%%\r\n# ---------------- The spectrogram class --------------\r\nclass SpectrogramTransform():\r\n \"\"\" create the transform to work with spectrograms. This class behaves\r\n essentially the same as TempralTransform, except the created transform\r\n returns a dict of 3d array instead of 2d\r\n\r\n\r\n Parameters\r\n ----------\r\n signal_name: a string signal (ex: 'Gyr_y', 'Ori_x')\r\n If the string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\r\n be the norm of the three (or four) axis of the signal.\r\n\r\n Returns\r\n -------\r\n a function with input: data : a dict of (_, 6000) arrays (key example: 'Gyr_y')\r\n and output: a dictionnary of 2d arrays.\r\n\r\n \"\"\"\r\n def __init__(self, signal_name):\r\n super(SpectrogramTransform, self).__init__()\r\n\r\n self.temporal_transform = TemporalTransform(signal_name)\r\n self.fs = fs\r\n self.duration_window = duration_window\r\n self.duration_overlap = duration_overlap\r\n self.spectro_batch_size = spectro_batch_size # these values were loaded from the param file\r\n self.signal_name = signal_name\r\n self.out_size = (48, 48)\r\n\r\n def __call__(self, data):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n data : a dict of (B, 6000) arrays (key example: 'Gyr_y')\r\n\r\n Returns\r\n -------\r\n An array with shape (B, F, T), where B (dataset size) depends on the\r\n input shape, and F and T are equal to 48 here.\r\n \"\"\"\r\n temporal_signal = self.temporal_transform(data)\r\n del data # free some memory\r\n fs = self.fs\r\n nperseg = int(self.duration_window * fs)\r\n noverlap = int(self.duration_overlap * fs)\r\n\r\n spectro_batch_size = self.spectro_batch_size\r\n # turning 13,000 temporal signals into (550, 500) array\r\n # spectrograms at once is too much: a single (13000, 550, 500) array,\r\n # with simple precision requires 7.15 Go !\r\n # This is why we work with batches of 1000 instead. For each batch,\r\n # we compute the complete sectrogram (1000 x 550 x 500), then\r\n # interpolate it to smaller sizes, before working wit the following batch.\r\n\r\n current_spectro_batch_size = temporal_signal.shape[0]\r\n\r\n if current_spectro_batch_size < spectro_batch_size :\r\n f, t, spectrogram = scipy.signal.spectrogram(temporal_signal, fs=fs, nperseg=nperseg, noverlap=noverlap)\r\n f_interpolated, t_interpolated, interpolated_spectrogram = interpol_log(f, t, spectrogram, self.out_size)\r\n # f, t, and possibly out_size will be ignored when the function does not need them\r\n else :\r\n n_batches = (current_spectro_batch_size-1)//spectro_batch_size +1\r\n nb_interp_f, nb_interp_t = self.out_size\r\n interpolated_spectrogram = np.zeros((current_spectro_batch_size, nb_interp_f, nb_interp_t), dtype='float32')\r\n for i in range(n_batches):\r\n i_min = i * spectro_batch_size\r\n i_max = (i+1) * spectro_batch_size # does not matter if it goes beyond current_spectro_batch_size\r\n this_temporal_signal = temporal_signal[i_min:i_max,:]\r\n f, t, spectrogram = scipy.signal.spectrogram(this_temporal_signal, fs=fs, nperseg=nperseg, noverlap=noverlap)\r\n f_interpolated, t_interpolated, interpolated_spectrogram[i_min:i_max,:,:] = interpol_log(f, t, spectrogram, self.out_size)\r\n del temporal_signal\r\n np.log(interpolated_spectrogram + 1e-10, dtype='float32', out=interpolated_spectrogram) # in-place operation\r\n self.f_interpolated = f_interpolated\r\n self.t_interpolated = t_interpolated\r\n return interpolated_spectrogram\r\n\r\n\r\n\r\n def __str__(self):\r\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\r\n str_to_return = \"Spectrogram transform\"\r\n str_to_return += f\"\\n\\t Signals: {self.signal_name}\"\r\n str_to_return += f\"\\n\\t Output size: {self.out_size}\"\r\n str_to_return += f\"\\n\\t Interpolation: log-interpolation\"\r\n str_to_return += \"\\n\\t Log-power\"\r\n return str_to_return\r\n\r\n# end of class SpectrogramTransform():\r\n\r\n\r\n\r\n#%%\r\nif __name__ == \"__main__\":\r\n fontdict = {'fontsize':10}\r\n n_ticks = 10\r\n\r\n # we plot the raw spectrogram and two interpolated spectrograms for the following classes\r\n selected_classes = [\"Run\", \"Walk\"]\r\n remaining_classes = selected_classes.copy()\r\n nsel = len(selected_classes)\r\n index = 3204 # where to tart the search\r\n plt.figure(figsize=(12,8))\r\n signal_name = \"Acc_norm\"\r\n temporal_transform = TemporalTransform(signal_name) # we will plot the result\r\n spectrogram_transform = SpectrogramTransform(signal_name)\r\n\r\n while len(remaining_classes)>0:\r\n data_tensor, class_tensor = DS[index]\r\n data_cpu = {signal:data_tensor[signal].cpu().detach().numpy() for signal in data_tensor.keys()}\r\n class_index = int(class_tensor)\r\n class_name = classes_names[class_index-1]\r\n\r\n if class_name in remaining_classes:\r\n remaining_classes.remove(class_name)\r\n i_class = nsel - len(remaining_classes) # between 1 and n\r\n\r\n temporal_signal = temporal_transform(data_cpu)\r\n nb = temporal_signal.shape[1]\r\n x_t = np.linspace(0, nb/fs, nb)\r\n plt.subplot(2,nsel,i_class)\r\n plt.plot(x_t, temporal_signal[0,:])\r\n plt.title(f'{class_name} (index={index})', fontdict)\r\n plt.xlabel(\"t (sec)\")\r\n plt.ylabel(signal_name)\r\n\r\n data_tensor, _ = DS[index] # we need to recreate data because the variable is deleted\r\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\r\n spectrogram_interpolated = spectrogram_transform(data_cpu)\r\n f_interpolated = spectrogram_transform.f_interpolated\r\n t_interpolated = spectrogram_transform.t_interpolated\r\n\r\n plt.subplot(2,nsel,i_class + nsel)\r\n t_interpolated = spectrogram_transform.t_interpolated\r\n f_interpolated = spectrogram_transform.f_interpolated\r\n matrix_shape = spectrogram_interpolated.shape\r\n time_list = [f'{t_interpolated[i]:.0f}' for i in np.round(np.linspace(0, matrix_shape[2]-1,n_ticks)).astype(int)]\r\n freq_list = [f'{f_interpolated[i]:.1f}' for i in np.round(np.linspace(0, matrix_shape[1]-1,n_ticks)).astype(int)]\r\n\r\n plt.xticks(np.linspace(0, matrix_shape[2]-1, n_ticks), time_list)\r\n plt.yticks(np.linspace(0, matrix_shape[1]-1, n_ticks), freq_list)\r\n plt.imshow(spectrogram_interpolated[0,:,:])\r\n\r\n plt.ylabel(\"f (Hz)\")\r\n plt.xlabel(\"t (s)\")\r\n plt.colorbar()\r\n\r\n index += 1\r\n\r\n plt.show()\r\n\r\n\r\n#%%\r\n\r\n\r\n\r\n\r\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"torch.device",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
aditya2592/PoseCNN | [
"da9eaae850eed7521a2a48a4d27474d655caab42"
] | [
"lib/rpn_layer/proposal_target_layer.py"
] | [
"# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick, Sean Bell and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport numpy.random as npr\nfrom fcn.config import cfg\nfrom utils.bbox_transform import bbox_transform\nfrom utils.cython_bbox import bbox_overlaps\n\ndef proposal_target_layer(rpn_rois, rpn_scores, gt_boxes, poses, _num_classes):\n \"\"\"\n Assign object detection proposals to ground-truth targets. Produces proposal\n classification labels and bounding-box regression targets.\n \"\"\"\n\n # Proposal ROIs (0, x1, y1, x2, y2) coming from RPN\n # (i.e., rpn.proposal_layer.ProposalLayer), or any other source\n all_rois = rpn_rois\n all_scores = rpn_scores\n\n # Include ground-truth boxes in the set of candidate rois\n if cfg.TRAIN.USE_GT:\n zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)\n all_rois = np.vstack(\n (all_rois, np.hstack((zeros, gt_boxes[:, :-1])))\n )\n # not sure if it a wise appending, but anyway i am not using it\n all_scores = np.vstack((all_scores, zeros))\n\n num_images = 1\n rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)\n\n # Sample rois with classification labels and bounding box regression\n # targets\n labels, rois, roi_scores, bbox_targets, bbox_inside_weights, poses_target, poses_weight = _sample_rois(\n all_rois, all_scores, gt_boxes, poses, fg_rois_per_image,\n rois_per_image, _num_classes)\n\n rois = rois.reshape(-1, 5)\n roi_scores = roi_scores.reshape(-1)\n labels = labels.reshape(-1, 1)\n bbox_targets = bbox_targets.reshape(-1, _num_classes * 4)\n bbox_inside_weights = bbox_inside_weights.reshape(-1, _num_classes * 4)\n bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)\n\n return rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights, poses_target, poses_weight\n\n\ndef _get_bbox_regression_labels(bbox_target_data, num_classes):\n \"\"\"Bounding-box regression targets (bbox_target_data) are stored in a\n compact form N x (class, tx, ty, tw, th)\n\n This function expands those targets into the 4-of-4*K representation used\n by the network (i.e. only one class has non-zero targets).\n\n Returns:\n bbox_target (ndarray): N x 4K blob of regression targets\n bbox_inside_weights (ndarray): N x 4K blob of loss weights\n \"\"\"\n\n clss = bbox_target_data[:, 0]\n bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)\n bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\n inds = np.where(clss > 0)[0]\n for ind in inds:\n cls = clss[ind]\n start = int(4 * cls)\n end = start + 4\n bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]\n bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS\n return bbox_targets, bbox_inside_weights\n\n\ndef _compute_targets(ex_rois, gt_rois, labels):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 4\n\n targets = bbox_transform(ex_rois, gt_rois)\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n return np.hstack(\n (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)\n\n\ndef _compute_pose_targets(quaternions, labels, num_classes):\n \"\"\"Compute pose regression targets for an image.\"\"\"\n\n num = quaternions.shape[0]\n poses_target = np.zeros((num, 4 * num_classes), dtype=np.float32)\n poses_weight = np.zeros((num, 4 * num_classes), dtype=np.float32)\n\n for i in xrange(num):\n cls = labels[i]\n if cls > 0:\n start = int(4 * cls)\n end = start + 4\n poses_target[i, start:end] = quaternions[i, :]\n poses_weight[i, start:end] = 1.0\n\n return poses_target, poses_weight\n\n\ndef _sample_rois(all_rois, all_scores, gt_boxes, poses, fg_rois_per_image, rois_per_image, num_classes):\n \"\"\"Generate a random sample of RoIs comprising foreground and background\n examples.\n \"\"\"\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n quaternions = poses[gt_assignment, 6:10]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.size > 0 and bg_inds.size > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.size < bg_rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)\n elif fg_inds.size > 0:\n to_replace = fg_inds.size < rois_per_image\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = rois_per_image\n elif bg_inds.size > 0:\n to_replace = bg_inds.size < rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n rois = all_rois[keep_inds]\n roi_scores = all_scores[keep_inds]\n\n # pose regression targets and weights\n poses_target, poses_weight = _compute_pose_targets(quaternions[keep_inds], labels, num_classes)\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, poses_target, poses_weight\n"
] | [
[
"numpy.vstack",
"numpy.append",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.hstack",
"numpy.round",
"numpy.where",
"numpy.array"
]
] |
dendisuhubdy/Vitis-AI | [
"524f65224c52314155dafc011d488ed30e458fcb"
] | [
"alveo/apps/whole_app_acceleration/classification/test_classify_pp.py"
] | [
"# Copyright 2019 Xilinx Inc.\n# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom six import itervalues, iteritems\nfrom ctypes import *\nimport numpy as np\n\nimport os, sys\nfrom vai.dpuv1.rt import xdnn, xdnn_io\nfrom vai.dpuv1.rt.vitis.python.dpu.runner import Runner\nimport waa_rt\n\nimport multiprocessing as mp\nimport ctypes\n\n\ndef pre_process(q,args):\n\n xclbin_p=str(args['xclbin']+\"/xdnn_v3_96x16_2pe_8b_9mb_bank03.xclbin\")\n kernelName_p=\"pp_pipeline_accel\"\n deviceIdx_p=args['deviceid']\n fpga_pp = waa_rt.PreProcess(xclbin_p,kernelName_p,deviceIdx_p, 0)\n batch_sz = args['batch_sz']\n img_paths = xdnn_io.getFilePaths(args['images'])\n print(\"Pre-processing handle created. Populating Queue\")\n for i in range(0, len(img_paths), batch_sz):\n for j, p in enumerate(img_paths[i:i + batch_sz]):\n arr, ht = fpga_pp.preprocess_input(p)\n q.put(arr)\n print(\"Queue populated\")\n\n\ndef process_xdnn(q,args):\n runner = Runner(args['vitis_rundir'])\n inTensors = runner.get_input_tensors()\n outTensors = runner.get_output_tensors()\n batch_sz = args['batch_sz']\n if batch_sz == -1:\n # use Runner's suggested batch size\n batch_sz = inTensors[0].dims[0]\n\n if args['golden']:\n goldenMap = xdnn_io.getGoldenMap(args['golden'])\n top5Count = 0\n top1Count = 0\n\n fpgaBlobs = []\n for io in [inTensors, outTensors]:\n blobs = []\n for t in io:\n shape = (batch_sz,) + tuple([t.dims[i] for i in range(t.ndims)][1:])\n blobs.append(np.empty((shape), dtype=np.float32, order='C'))\n fpgaBlobs.append(blobs)\n\n img_paths = xdnn_io.getFilePaths(args['images'])\n labels = xdnn_io.get_labels(args['labels'])\n xdnnCPUOp = xdnn.XDNNCPUOp(\"%s/weights.h5\" % args['vitis_rundir'])\n fcOutput = np.empty((batch_sz, args['outsz'],), dtype=np.float32, order='C')\n\n fpgaInput = fpgaBlobs[0][0]\n for i in range(0, len(img_paths), batch_sz):\n pl = []\n # fill tensor input data from image file\n for j, p in enumerate(img_paths[i:i + batch_sz]):\n\n img, _ = q.get(), None\n pl.append(p)\n np.copyto(fpgaInput[j], img)\n\n jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])\n runner.wait(jid)\n\n xdnnCPUOp.computeFC(fpgaBlobs[1][0], fcOutput)\n softmaxOut = xdnnCPUOp.computeSoftmax(fcOutput)\n if args['golden']:\n for j,p in enumerate(img_paths[i:i + batch_sz]):\n top1Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 1)\n top5Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 5)\n else:\n xdnn_io.printClassification(softmaxOut, pl, labels)\n\n if args['golden']:\n print ( (\"\\nAverage accuracy (n=%d) Top-1: %.1f%%, Top-5: %.1f%%\\n\") % (len(img_paths), float(top1Count)/float(len(img_paths))*100., float(top5Count)/float(len(img_paths))*100.) )\n\nif __name__ == '__main__':\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\" + '\\33[32m' + \"Running Inference with HW Pre-processing\" + '\\33[0m') \n\n args = xdnn_io.processCommandLine()\n\t\t#Create a queue for passing the pre-processed data\n q = mp.Queue()\n\t\t#Creating a process to run HW pre-processing kernel\n p_preprocess = mp.Process(target=pre_process,args=(q,args))\n\t\t#Process to run XDNN\n p_xdnn = mp.Process(target=process_xdnn,args=(q,args))\n\n p_preprocess.start()\n p_xdnn.start()\n p_preprocess.join()\n p_xdnn.join()\n"
] | [
[
"numpy.empty",
"numpy.copyto"
]
] |
loveredcarrot/ssl_multi_seg | [
"5315dbcc2c44e8effab28699c1491dd67b7ce00b"
] | [
"code/networks/Unet.py"
] | [
"# -*- coding: utf-8 -*- \n# @Time : 2021/4/8 15:52\n# @Author : aurorazeng\n# @File : Unet.py \n# @license: (C) Copyright 2021-2026, aurorazeng; No reprobaiction without permission.\n\n\n\"\"\"\nThe implementation is borrowed from: https://github.com/HiLab-git/PyMIC\n\"\"\"\nfrom __future__ import division, print_function\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.distributions.uniform import Uniform\n\n\nclass ConvBlock(nn.Module):\n \"\"\"two convolution layers with batch norm and leaky relu\"\"\"\n\n def __init__(self, in_channels, out_channels, dropout_p):\n super(ConvBlock, self).__init__()\n self.conv_conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n # nn.LeakyReLU(),\n nn.ReLU(),\n nn.Dropout(dropout_p),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n # nn.LeakyReLU()\n nn.ReLU()\n )\n\n def forward(self, x):\n return self.conv_conv(x)\n\n\nclass DownBlock(nn.Module):\n \"\"\"Downsampling followed by ConvBlock\"\"\"\n\n def __init__(self, in_channels, out_channels, dropout_p):\n super(DownBlock, self).__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n ConvBlock(in_channels, out_channels, dropout_p)\n\n )\n\n def forward(self, x):\n return self.maxpool_conv(x)\n\n\nclass UpBlock(nn.Module):\n \"\"\"Upssampling followed by ConvBlock\"\"\"\n\n def __init__(self, in_channels1, in_channels2, out_channels, dropout_p,\n bilinear=True):\n super(UpBlock, self).__init__()\n self.bilinear = bilinear\n if bilinear:\n self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1)\n self.up = nn.Upsample(\n scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(\n in_channels1, in_channels2, kernel_size=2, stride=2)\n self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p)\n\n def forward(self, x1, x2):\n if self.bilinear:\n x1 = self.conv1x1(x1)\n x1 = self.up(x1)\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n\n\nclass Encoder(nn.Module):\n def __init__(self, params):\n super(Encoder, self).__init__()\n self.params = params\n self.in_chns = self.params['in_chns']\n self.ft_chns = self.params['feature_chns']\n self.n_class = self.params['class_num']\n self.bilinear = self.params['bilinear']\n self.dropout = self.params['dropout']\n assert (len(self.ft_chns) == 5)\n self.in_conv = ConvBlock(\n self.in_chns, self.ft_chns[0], self.dropout[0])\n self.down1 = DownBlock(\n self.ft_chns[0], self.ft_chns[1], self.dropout[1])\n self.down2 = DownBlock(\n self.ft_chns[1], self.ft_chns[2], self.dropout[2])\n self.down3 = DownBlock(\n self.ft_chns[2], self.ft_chns[3], self.dropout[3])\n self.down4 = DownBlock(\n self.ft_chns[3], self.ft_chns[4], self.dropout[4])\n\n def forward(self, x):\n x0 = self.in_conv(x)\n x1 = self.down1(x0)\n x2 = self.down2(x1)\n x3 = self.down3(x2)\n x4 = self.down4(x3)\n return [x0, x1, x2, x3, x4]\n\n\nclass Decoder(nn.Module):\n def __init__(self, params):\n super(Decoder, self).__init__()\n self.params = params\n self.in_chns = self.params['in_chns']\n self.ft_chns = self.params['feature_chns']\n self.n_class = self.params['class_num']\n self.bilinear = self.params['bilinear']\n assert (len(self.ft_chns) == 5)\n\n self.up1 = UpBlock(\n self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)\n self.up2 = UpBlock(\n self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)\n self.up3 = UpBlock(\n self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)\n self.up4 = UpBlock(\n self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)\n\n self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,\n kernel_size=1, padding=0)\n\n def forward(self, feature):\n x0 = feature[0]\n x1 = feature[1]\n x2 = feature[2]\n x3 = feature[3]\n x4 = feature[4]\n\n x = self.up1(x4, x3)\n x = self.up2(x, x2)\n x = self.up3(x, x1)\n x = self.up4(x, x0)\n output = self.out_conv(x)\n return output\n\n\nclass UNet(nn.Module):\n def __init__(self, in_chns, class_num):\n super(UNet, self).__init__()\n\n params = {'in_chns': in_chns,\n # 'feature_chns': [16, 32, 64, 128, 256],\n 'feature_chns': [32, 64, 128, 256, 512],\n 'dropout': [0, 0, 0, 0, 0],\n 'class_num': class_num,\n 'bilinear': False,\n 'acti_func': 'relu'}\n\n self.encoder = Encoder(params)\n self.decoder = Decoder(params)\n\n def forward(self, x):\n feature = self.encoder(x)\n output = self.decoder(feature)\n return output\n\n\nclass UNetWithDrop(nn.Module):\n def __init__(self, in_chns, class_num):\n super(UNetWithDrop, self).__init__()\n\n params = {'in_chns': in_chns,\n # 'feature_chns': [16, 32, 64, 128, 256],\n 'feature_chns': [32, 64, 128, 256, 512],\n 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],\n 'class_num': class_num,\n 'bilinear': False,\n 'acti_func': 'relu'}\n\n self.encoder = Encoder(params)\n self.decoder = Decoder(params)\n\n def forward(self, x):\n feature = self.encoder(x)\n output = self.decoder(feature)\n return output\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d",
"torch.nn.Dropout",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.cat",
"torch.nn.ConvTranspose2d"
]
] |
Atica57/DALLE-pytorch | [
"4fa108271aeb1972fcb118390ec15b656f2c328a"
] | [
"train_dalle.py"
] | [
"import argparse\nfrom random import choice\nfrom pathlib import Path\n\n# torch\n\nimport torch\nfrom torch.optim import Adam\nfrom torch.nn.utils import clip_grad_norm_\n\n# vision imports\n\nfrom PIL import Image\nfrom torchvision import transforms as T\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.utils import make_grid, save_image\n\n# dalle related classes and utils\n\nfrom dalle_pytorch import OpenAIDiscreteVAE, DiscreteVAE, DALLE\nfrom dalle_pytorch.simple_tokenizer import tokenize, tokenizer, VOCAB_SIZE\n\n# argument parsing\n\nparser = argparse.ArgumentParser()\n\ngroup = parser.add_mutually_exclusive_group(required = False)\n\ngroup.add_argument('--vae_path', type = str,\n help='path to your trained discrete VAE')\n\ngroup.add_argument('--dalle_path', type = str,\n help='path to your partially trained DALL-E')\n\nparser.add_argument('--image_text_folder', type = str, required = True,\n help='path to your folder of images and text for learning the DALL-E')\n\nargs = parser.parse_args()\n\n# helpers\n\ndef exists(val):\n return val is not None\n\n# constants\n\nVAE_PATH = args.vae_path\nDALLE_PATH = args.dalle_path\nRESUME = exists(DALLE_PATH)\n\nEPOCHS = 20\nBATCH_SIZE = 4\nLEARNING_RATE = 3e-4\nGRAD_CLIP_NORM = 0.5\n\nMODEL_DIM = 512\nTEXT_SEQ_LEN = 256\nDEPTH = 2\nHEADS = 4\nDIM_HEAD = 64\n\n# reconstitute vae\n\nif RESUME:\n dalle_path = Path(DALLE_PATH)\n assert dalle_path.exists(), 'DALL-E model file does not exist'\n\n loaded_obj = torch.load(str(dalle_path))\n\n dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']\n\n vae = DiscreteVAE(**vae_params)\n\n dalle_params = dict(\n vae = vae,\n **dalle_params\n )\n\n IMAGE_SIZE = vae_params['image_size']\n\nelse:\n if exists(VAE_PATH):\n vae_path = Path(VAE_PATH)\n assert vae_path.exists(), 'VAE model file does not exist'\n\n loaded_obj = torch.load(str(vae_path))\n\n vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']\n\n vae = DiscreteVAE(**vae_params)\n vae.load_state_dict(weights)\n else:\n print('using OpenAIs pretrained VAE for encoding images to tokens')\n vae_params = None\n\n vae = OpenAIDiscreteVAE()\n\n IMAGE_SIZE = vae.image_size\n\n dalle_params = dict(\n vae = vae,\n num_text_tokens = VOCAB_SIZE,\n text_seq_len = TEXT_SEQ_LEN,\n dim = MODEL_DIM,\n depth = DEPTH,\n heads = HEADS,\n dim_head = DIM_HEAD\n )\n\n# helpers\n\ndef save_model(path):\n save_obj = {\n 'hparams': dalle_params,\n 'vae_params': vae_params,\n 'weights': dalle.state_dict()\n }\n\n torch.save(save_obj, path)\n\n# dataset loading\n\nclass TextImageDataset(Dataset):\n def __init__(self, folder, text_len = 256, image_size = 128):\n super().__init__()\n path = Path(folder)\n\n text_files = [*path.glob('**/*.txt')]\n\n image_files = [\n *path.glob('**/*.png'),\n *path.glob('**/*.jpg'),\n *path.glob('**/*.jpeg')\n ]\n\n text_files = {t.stem: t for t in text_files}\n image_files = {i.stem: i for i in image_files}\n\n keys = (image_files.keys() & text_files.keys())\n\n self.keys = list(keys)\n self.text_files = {k: v for k, v in text_files.items() if k in keys}\n self.image_files = {k: v for k, v in image_files.items() if k in keys}\n\n self.image_tranform = T.Compose([\n T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),\n T.CenterCrop(image_size),\n T.Resize(image_size),\n T.ToTensor(),\n T.Lambda(lambda t: t.expand(3, -1, -1)),\n T.Normalize((0.5,) * 3, (0.5,) * 3)\n ])\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, ind):\n key = self.keys[ind]\n text_file = self.text_files[key]\n image_file = self.image_files[key]\n\n image = Image.open(image_file)\n descriptions = text_file.read_text().split('\\n')\n descriptions = list(filter(lambda t: len(t) > 0, descriptions))\n description = choice(descriptions)\n\n tokenized_text = tokenize(description).squeeze(0)\n mask = tokenized_text != 0\n\n image_tensor = self.image_tranform(image)\n return tokenized_text, image_tensor, mask\n\n# create dataset and dataloader\n\nds = TextImageDataset(\n args.image_text_folder,\n text_len = TEXT_SEQ_LEN,\n image_size = IMAGE_SIZE\n)\n\nassert len(ds) > 0, 'dataset is empty'\nprint(f'{len(ds)} image-text pairs found for training')\n\ndl = DataLoader(ds, batch_size = BATCH_SIZE, shuffle = True, drop_last = True)\n\n# initialize DALL-E\n\ndalle = DALLE(**dalle_params).cuda()\n\nif RESUME:\n dalle.load_state_dict(weights)\n\n# optimizer\n\nopt = Adam(dalle.parameters(), lr = LEARNING_RATE)\n\n# experiment tracker\n\nimport wandb\n\nwandb.config.depth = DEPTH\nwandb.config.heads = HEADS\nwandb.config.dim_head = DIM_HEAD\n\nwandb.init(project = 'dalle_train_transformer', resume = RESUME)\n\n# training\n\nfor epoch in range(EPOCHS):\n for i, (text, images, mask) in enumerate(dl):\n text, images, mask = map(lambda t: t.cuda(), (text, images, mask))\n\n loss = dalle(text, images, mask = mask, return_loss = True)\n\n loss.backward()\n clip_grad_norm_(dalle.parameters(), GRAD_CLIP_NORM)\n\n opt.step()\n opt.zero_grad()\n\n log = {}\n\n if i % 10 == 0:\n print(epoch, i, f'loss - {loss.item()}')\n\n log = {\n **log,\n 'epoch': epoch,\n 'iter': i,\n 'loss': loss.item()\n }\n\n if i % 100 == 0:\n sample_text = text[:1]\n token_list = sample_text.masked_select(sample_text != 0).tolist()\n decoded_text = tokenizer.decode(token_list)\n\n image = dalle.generate_images(\n text[:1],\n mask = mask[:1],\n filter_thres = 0.9 # topk sampling at 0.9\n )\n\n save_model(f'./dalle.pt')\n wandb.save(f'./dalle.pt')\n\n log = {\n **log,\n 'image': wandb.Image(image, caption = decoded_text)\n }\n\n wandb.log(log)\n\nsave_model(f'./dalle-final.pt')\nwandb.save('./dalle-final.pt')\nwandb.finish()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.save"
]
] |
jhuebotter/CartpoleSNNdemo | [
"d18a85cbc45bff48295c46c9cd8c9fc00192318c"
] | [
"CartPole/_CartPole_mathematical_helpers.py"
] | [
"\"\"\"\nSmall general mathematical functions.\nThis file was necessary to make CartPole module self-contained.\n\"\"\"\n\nfrom math import fmod\nimport numpy as np\n\n\n# Wraps the angle into range [-π, π]\ndef wrap_angle_rad(angle: float) -> float:\n Modulo = fmod(angle, 2 * np.pi) # positive modulo\n if Modulo < -np.pi:\n angle = Modulo + 2 * np.pi\n elif Modulo > np.pi:\n angle = Modulo - 2 * np.pi\n else:\n angle = Modulo\n return angle\n\n\ndef wrap_angle_rad_inplace(angle: np.ndarray) -> None:\n Modulo = np.fmod(angle, 2 * np.pi) # positive modulo\n neg_wrap, pos_wrap = Modulo < -np.pi, Modulo > np.pi\n angle[neg_wrap] = Modulo[neg_wrap] + 2 * np.pi\n angle[pos_wrap] = Modulo[pos_wrap] - 2 * np.pi\n angle[~(neg_wrap | pos_wrap)] = Modulo[~(neg_wrap | pos_wrap)]\n\n\ndef conditional_decorator(dec, cond):\n def decorator(func):\n return dec(func) if cond else func\n return decorator\n\n\n"
] | [
[
"numpy.fmod"
]
] |
augustehirth/Cirq | [
"e616710a0fa243524a9f6d7bc0d35e6b952fe3d0"
] | [
"cirq-google/cirq_google/serialization/op_serializer_test.py"
] | [
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, List\n\nimport copy\nimport numpy as np\nimport pytest\nimport sympy\n\nfrom google.protobuf import json_format\n\nimport cirq\nimport cirq_google as cg\nfrom cirq_google.api import v2\n\n\nDEFAULT_TOKEN = 'test_tag'\n\n\ndef op_proto(json: Dict) -> v2.program_pb2.Operation:\n op = v2.program_pb2.Operation()\n json_format.ParseDict(json, op)\n return op\n\n\nclass GateWithAttribute(cirq.SingleQubitGate):\n def __init__(self, val):\n self.val = val\n\n\nclass GateWithProperty(cirq.SingleQubitGate):\n def __init__(self, val, not_req=None):\n self._val = val\n self._not_req = not_req\n\n @property\n def val(self):\n return self._val\n\n\nclass GateWithMethod(cirq.SingleQubitGate):\n def __init__(self, val):\n self._val = val\n\n def get_val(self):\n return self._val\n\n\nclass SubclassGate(GateWithAttribute):\n\n pass\n\n\ndef get_val(op):\n return op.gate.get_val()\n\n\nTEST_CASES = (\n (float, 1.0, {'arg_value': {'float_value': 1.0}}),\n (str, 'abc', {'arg_value': {'string_value': 'abc'}}),\n (float, 1, {'arg_value': {'float_value': 1.0}}),\n (List[bool], [True, False], {'arg_value': {'bool_values': {'values': [True, False]}}}),\n (List[bool], (True, False), {'arg_value': {'bool_values': {'values': [True, False]}}}),\n (\n List[bool],\n np.array([True, False], dtype=bool),\n {'arg_value': {'bool_values': {'values': [True, False]}}},\n ),\n (sympy.Symbol, sympy.Symbol('x'), {'symbol': 'x'}),\n (float, sympy.Symbol('x'), {'symbol': 'x'}),\n (\n float,\n sympy.Symbol('x') - sympy.Symbol('y'),\n {\n 'func': {\n 'type': 'add',\n 'args': [\n {'symbol': 'x'},\n {\n 'func': {\n 'type': 'mul',\n 'args': [{'arg_value': {'float_value': -1.0}}, {'symbol': 'y'}],\n }\n },\n ],\n }\n },\n ),\n)\n\n\[email protected](('val_type', 'val', 'arg_value'), TEST_CASES)\ndef test_to_proto_attribute(val_type, val, arg_value):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter='val')\n ],\n )\n q = cirq.GridQubit(1, 2)\n result = serializer.to_proto(GateWithAttribute(val)(q), arg_function_language='linear')\n expected = op_proto(\n {'gate': {'id': 'my_gate'}, 'args': {'my_val': arg_value}, 'qubits': [{'id': '1_2'}]}\n )\n assert result == expected\n\n\[email protected](('val_type', 'val', 'arg_value'), TEST_CASES)\ndef test_to_proto_property(val_type, val, arg_value):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter='val')\n ],\n )\n q = cirq.GridQubit(1, 2)\n result = serializer.to_proto(GateWithProperty(val)(q), arg_function_language='linear')\n expected = op_proto(\n {'gate': {'id': 'my_gate'}, 'args': {'my_val': arg_value}, 'qubits': [{'id': '1_2'}]}\n )\n assert result == expected\n\n\[email protected](('val_type', 'val', 'arg_value'), TEST_CASES)\ndef test_to_proto_callable(val_type, val, arg_value):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithMethod,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter=get_val)\n ],\n )\n q = cirq.GridQubit(1, 2)\n result = serializer.to_proto(GateWithMethod(val)(q), arg_function_language='linear')\n expected = op_proto(\n {'gate': {'id': 'my_gate'}, 'args': {'my_val': arg_value}, 'qubits': [{'id': '1_2'}]}\n )\n assert result == expected\n\n\ndef test_to_proto_gate_predicate():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n can_serialize_predicate=lambda x: x.gate.val == 1,\n )\n q = cirq.GridQubit(1, 2)\n assert serializer.to_proto(GateWithAttribute(0)(q)) is None\n assert serializer.to_proto(GateWithAttribute(1)(q)) is not None\n assert not serializer.can_serialize_operation(GateWithAttribute(0)(q))\n assert serializer.can_serialize_operation(GateWithAttribute(1)(q))\n\n\ndef test_to_proto_gate_mismatch():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='GateWithAttribute.*GateWithProperty'):\n serializer.to_proto(GateWithAttribute(1.0)(q))\n\n\ndef test_to_proto_unsupported_type():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=bytes, op_getter='val')],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='bytes'):\n serializer.to_proto(GateWithProperty(b's')(q))\n\n\ndef test_to_proto_named_qubit_supported():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.NamedQubit('a')\n arg_value = 1.0\n result = serializer.to_proto(GateWithProperty(arg_value)(q))\n\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': arg_value}}},\n 'qubits': [{'id': 'a'}],\n }\n )\n assert result == expected\n\n\ndef test_to_proto_line_qubit_supported():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.LineQubit('10')\n arg_value = 1.0\n result = serializer.to_proto(GateWithProperty(arg_value)(q))\n\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': arg_value}}},\n 'qubits': [{'id': '10'}],\n }\n )\n assert result == expected\n\n\ndef test_to_proto_required_but_not_present():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(\n serialized_name='my_val', serialized_type=float, op_getter=lambda x: None\n )\n ],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='required'):\n serializer.to_proto(GateWithProperty(1.0)(q))\n\n\ndef test_to_proto_no_getattr():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='nope')],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='does not have'):\n serializer.to_proto(GateWithProperty(1.0)(q))\n\n\ndef test_to_proto_not_required_ok():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val'),\n cg.SerializingArg(\n serialized_name='not_req',\n serialized_type=float,\n op_getter='not_req',\n required=False,\n ),\n ],\n )\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n }\n )\n\n q = cirq.GridQubit(1, 2)\n assert serializer.to_proto(GateWithProperty(0.125)(q)) == expected\n\n\[email protected](\n ('val_type', 'val'),\n (\n (float, 's'),\n (str, 1.0),\n (sympy.Symbol, 1.0),\n (List[bool], [1.0]),\n (List[bool], 'a'),\n (List[bool], (1.0,)),\n ),\n)\ndef test_to_proto_type_mismatch(val_type, val):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter='val')\n ],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match=str(type(val))):\n serializer.to_proto(GateWithProperty(val)(q))\n\n\ndef test_can_serialize_operation_subclass():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n can_serialize_predicate=lambda x: x.gate.val == 1,\n )\n q = cirq.GridQubit(1, 1)\n assert serializer.can_serialize_operation(SubclassGate(1)(q))\n assert not serializer.can_serialize_operation(SubclassGate(0)(q))\n\n\ndef test_defaults_not_serialized():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(\n serialized_name='my_val', serialized_type=float, default=1.0, op_getter='val'\n )\n ],\n )\n q = cirq.GridQubit(1, 2)\n no_default = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n }\n )\n assert no_default == serializer.to_proto(GateWithAttribute(0.125)(q))\n with_default = op_proto({'gate': {'id': 'my_gate'}, 'qubits': [{'id': '1_2'}]})\n assert with_default == serializer.to_proto(GateWithAttribute(1.0)(q))\n\n\ndef test_token_serialization():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.GridQubit(1, 2)\n tag = cg.CalibrationTag('my_token')\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n 'token_value': 'my_token',\n }\n )\n assert expected == serializer.to_proto(GateWithAttribute(0.125)(q).with_tags(tag))\n\n\nONE_CONSTANT = [v2.program_pb2.Constant(string_value='my_token')]\nTWO_CONSTANTS = [\n v2.program_pb2.Constant(string_value='other_token'),\n v2.program_pb2.Constant(string_value='my_token'),\n]\n\n\[email protected](\n ('constants', 'expected_index', 'expected_constants'),\n (\n ([], 0, ONE_CONSTANT),\n (ONE_CONSTANT, 0, ONE_CONSTANT),\n (TWO_CONSTANTS, 1, TWO_CONSTANTS),\n ),\n)\ndef test_token_serialization_with_constant_reference(constants, expected_index, expected_constants):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n # Make a local copy since we are modifying the array in-place.\n constants = copy.copy(constants)\n q = cirq.GridQubit(1, 2)\n tag = cg.CalibrationTag('my_token')\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n 'token_constant_index': expected_index,\n }\n )\n assert expected == serializer.to_proto(\n GateWithAttribute(0.125)(q).with_tags(tag), constants=constants\n )\n assert constants == expected_constants\n\n\ndef default_circuit_proto():\n op1 = v2.program_pb2.Operation()\n op1.gate.id = 'x_pow'\n op1.args['half_turns'].arg_value.string_value = 'k'\n op1.qubits.add().id = '1_1'\n\n op2 = v2.program_pb2.Operation()\n op2.gate.id = 'x_pow'\n op2.args['half_turns'].arg_value.float_value = 1.0\n op2.qubits.add().id = '1_2'\n op2.token_constant_index = 0\n\n return v2.program_pb2.Circuit(\n scheduling_strategy=v2.program_pb2.Circuit.MOMENT_BY_MOMENT,\n moments=[\n v2.program_pb2.Moment(\n operations=[op1, op2],\n ),\n ],\n )\n\n\ndef default_circuit():\n return cirq.FrozenCircuit(\n cirq.X(cirq.GridQubit(1, 1)) ** sympy.Symbol('k'),\n cirq.X(cirq.GridQubit(1, 2)).with_tags(DEFAULT_TOKEN),\n cirq.measure(cirq.GridQubit(1, 1), key='m'),\n )\n\n\ndef test_circuit_op_serializer_properties():\n serializer = cg.CircuitOpSerializer()\n assert serializer.internal_type == cirq.FrozenCircuit\n assert serializer.serialized_id == 'circuit'\n\n\ndef test_can_serialize_circuit_op():\n serializer = cg.CircuitOpSerializer()\n assert serializer.can_serialize_operation(cirq.CircuitOperation(default_circuit()))\n assert not serializer.can_serialize_operation(cirq.X(cirq.GridQubit(1, 1)))\n\n\ndef test_circuit_op_to_proto_errors():\n serializer = cg.CircuitOpSerializer()\n to_serialize = cirq.CircuitOperation(default_circuit())\n\n constants = [\n v2.program_pb2.Constant(string_value=DEFAULT_TOKEN),\n v2.program_pb2.Constant(circuit_value=default_circuit_proto()),\n ]\n raw_constants = {\n DEFAULT_TOKEN: 0,\n default_circuit(): 1,\n }\n\n with pytest.raises(ValueError, match='CircuitOp serialization requires a constants list'):\n serializer.to_proto(to_serialize)\n\n with pytest.raises(ValueError, match='CircuitOp serialization requires a constants list'):\n serializer.to_proto(to_serialize, constants=constants)\n\n with pytest.raises(ValueError, match='CircuitOp serialization requires a constants list'):\n serializer.to_proto(to_serialize, raw_constants=raw_constants)\n\n with pytest.raises(ValueError, match='Serializer expected CircuitOperation'):\n serializer.to_proto(\n v2.program_pb2.Operation(), constants=constants, raw_constants=raw_constants\n )\n\n bad_raw_constants = {cirq.FrozenCircuit(): 0}\n with pytest.raises(ValueError, match='Encountered a circuit not in the constants table'):\n serializer.to_proto(to_serialize, constants=constants, raw_constants=bad_raw_constants)\n\n with pytest.raises(ValueError, match='Cannot serialize repetitions of type'):\n serializer.to_proto(\n to_serialize ** sympy.Symbol('a'), constants=constants, raw_constants=raw_constants\n )\n\n\[email protected]('repetitions', [1, 5, ['a', 'b', 'c']])\ndef test_circuit_op_to_proto(repetitions):\n serializer = cg.CircuitOpSerializer()\n if isinstance(repetitions, int):\n repetition_ids = None\n else:\n repetition_ids = repetitions\n repetitions = len(repetition_ids)\n to_serialize = cirq.CircuitOperation(\n circuit=default_circuit(),\n qubit_map={cirq.GridQubit(1, 1): cirq.GridQubit(1, 2)},\n measurement_key_map={'m': 'results'},\n param_resolver={'k': 1.0},\n repetitions=repetitions,\n repetition_ids=repetition_ids,\n )\n\n constants = [\n v2.program_pb2.Constant(string_value=DEFAULT_TOKEN),\n v2.program_pb2.Constant(circuit_value=default_circuit_proto()),\n ]\n raw_constants = {\n DEFAULT_TOKEN: 0,\n default_circuit(): 1,\n }\n\n repetition_spec = v2.program_pb2.RepetitionSpecification()\n if repetition_ids is None:\n repetition_spec.repetition_count = repetitions\n else:\n for rep_id in repetition_ids:\n repetition_spec.repetition_ids.ids.append(rep_id)\n\n qubit_map = v2.program_pb2.QubitMapping()\n q_p1 = qubit_map.entries.add()\n q_p1.key.id = '1_1'\n q_p1.value.id = '1_2'\n\n measurement_key_map = v2.program_pb2.MeasurementKeyMapping()\n meas_p1 = measurement_key_map.entries.add()\n meas_p1.key.string_key = 'm'\n meas_p1.value.string_key = 'results'\n\n arg_map = v2.program_pb2.ArgMapping()\n arg_p1 = arg_map.entries.add()\n arg_p1.key.arg_value.string_value = 'k'\n arg_p1.value.arg_value.float_value = 1.0\n\n expected = v2.program_pb2.CircuitOperation(\n circuit_constant_index=1,\n repetition_specification=repetition_spec,\n qubit_map=qubit_map,\n measurement_key_map=measurement_key_map,\n arg_map=arg_map,\n )\n actual = serializer.to_proto(to_serialize, constants=constants, raw_constants=raw_constants)\n assert actual == expected\n"
] | [
[
"numpy.array"
]
] |
LiyrAstroph/CDNest | [
"afb6b869ce1c4ebd76662b20310f1d9d3db4e26e"
] | [
"tests/rastrigin_accept_action.py"
] | [
"#\n# sample from a Rastrigin test function\n# this is to illustrate how to use accept_action in CDNest to avoid repeat calculations.\n#\n# A 2D Rastrigin function looks\n# \n# logL=-(10.0*2 + (coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) ) \n#\n# Every perturb, only one parameter is updated, so that the terms related to the rest parameters \n# do not need to recalculate, just use the values in the previous step.\n#\n# In this example, we use an array to record values of the term \"(coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0]))\"\n# in every accepted perturb.\n#\n\nfrom mpi4py import MPI\nimport numpy as np\nimport cydnest\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\n\ndef randh():\n \"\"\"\n generate from the heavy-tailed distribution.\n \"\"\"\n return 10.0**(1.5 - 3*np.abs(np.random.randn()/np.sqrt(-np.log(np.random.rand()))))*np.random.randn()\n\ndef wrap(x, a, b):\n assert b > a\n return (x - a)%(b - a) + a\n\nclass Model(object):\n\n def __init__(self, num_params=1, num_particles=1):\n \"\"\"\n intialize the model\n \"\"\"\n # number of particles each core holds\n self.num_particles = num_particles\n\n # number of parameters\n self.num_params = num_params \n\n # parameter ranges, a list\n self.param_range = [[-5.12, 5.12]]*num_params\n\n # parameter prior type.\n # three types: Uniform, Gaussian, Log \n self.prior_type = [\"Uniform\"]*num_params\n\n # parameter prior information. used when the prior is Gaussian\n # indicate the mean and standard deviation of the Gaussian prior\n self.prior_info = [[0.0, 1.0]]*num_params\n \n # which parameter being perturbed \n # which particle being perturbed\n self.which_param_update = 0\n self.which_particle_update = 0\n\n # perturbed values and accepted values for all particles\n self.value_perturb = [0.0]*self.num_particles\n self.value_accept = [0.0]*self.num_particles\n\n def accept_action(self):\n \"\"\"\n action taken when a perturb is accepted\n record the accepted values from the perturbed values\n \"\"\"\n\n # note \"which_particle_update\" is updated and \"which_param_update\" is updated\n if self.which_param_update < 1:\n self.value_accept[self.which_particle_update] = self.value_perturb[self.which_particle_update]\n \n def kill_action(self, i, i_copy):\n \"\"\"\n cdnest kill a particle when it is not updated for a long time.\n action taken when a particle is killed: i particle is killed,\n copy i_copy particle's values to i particle's values\n this function is needed, since we record some accepted values \n \"\"\"\n self.value_accept[i] = self.value_accept[i_copy]\n return\n \n # users can define their own functions to generate \n # the initial parameter values \n # this is optinal. if not defined, cydnest will use the internal \n # function. \n def from_prior(self):\n \"\"\"\n generate initial values of model parameters from priors\n \"\"\"\n coords = np.zeros(self.num_params)\n for i in range(self.num_params):\n if self.prior_type[i] == \"Uniform\":\n coords[i] = np.random.uniform(self.param_range[i][0], self.param_range[i][1])\n elif self.prior_type[i] == \"Gaussian\":\n coords[i] = np.random.randn() * self.prior_info[i][1] + self.prior_info[0]\n wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n elif self.prior_type[i] == \"Log\": # LOG prior\n coords[i] = np.random.uniform(np.log(self.param_range[i][0]), np.log(self.param_range[i][1]))\n coords[i] = np.exp(coords[i])\n\n return coords\n\n # users can define their own functions to perturb \n # parameter values for sampling \n # this is optinal. if not defined, cydnest will use the internal \n # function. \n def perturb(self, coords):\n \"\"\"\n perturb the parameters\n \"\"\"\n i = np.random.randint(self.num_params)\n \n # record which parameter is updated\n self.which_param_update = i\n\n LogH = 0.0 # prior ratio: ln(prior(new)/prior(old)) = ln(prior(new)) - ln(prior(old))\n width = (self.param_range[i][1]-self.param_range[i][0])\n if self.prior_type[i] == \"Uniform\":\n coords[i] += width*randh()\n coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n elif self.prior_type[i] == \"Gaussian\": \n LogH -= ( -0.5* (coords[i] - self.prior_info[i][0])**2/self.prior_info[i][1]**2 ) # ln(Gaussian)\n coords[i] += width*randh()\n coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n LogH += ( -0.5* (coords[i] - self.prior_info[i][0])**2/self.prior_info[i][1]**2 )\n elif self.prior_type[i] == \"Log\":\n LogH -= ( -np.log(coords[i]) ) # ln(1/x) = -ln(x)\n coords[i] += width*randh()\n coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n LogH += ( -np.log(coords[i]) )\n return LogH \n \n def log_likelihood_initial(self, coords):\n \"\"\"\n calculate likelihood at initial start\n \"\"\" \n self.which_particle_update = cydnest.get_which_particle_update()\n self.value_accept[self.which_particle_update] = coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])\n value = self.value_accept[self.which_particle_update]\n return -(10.0*2 + (value) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )\n\n def log_likelihood(self, coords):\n \"\"\"\n calculate likelihood\n \"\"\"\n # get which particle is being updated, and save it to self model\n\n self.which_particle_update = cydnest.get_which_particle_update()\n \n value = 0.0\n if self.which_param_update < 1: # when 0-th parameter update, recalculate\n self.value_perturb[self.which_particle_update] = coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])\n value = self.value_perturb[self.which_particle_update]\n else: # otherwise, use the accepted value\n value = self.value_accept[self.which_particle_update]\n\n return -(10.0*2 + (value) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )\n\n# create a model\nmodel = Model(num_params=2, num_particles=2)\n\n# create a dnest sampler\n# max_num_save is the number of samples to generate\n# max_num_levels is the number of levels \n# ptol is the likelihood tolerance in loge()\nsampler = cydnest.sampler(model, sample_dir=\"./\", max_num_saves = 10000, ptol=0.1, num_particles=model.num_particles)\n#\n# The full argument lists look like:\n# sampler = cydnest.sampler(model, sample_dir=\"./\", max_num_saves = 10000, ptol=0.1, \n# num_particles=1, thread_steps_factor = 10, \n# max_num_levels = 0, lam = 10, beta = 100\n# new_level_interval_factor = 2, save_interval_factor = 2)\n#\n\n\n# run sampler\nlogz = sampler.run()\ncomm.Barrier()\n\n# ouput evidence\nif rank == 0:\n print(\"Evidence:\", logz)\n\n psample = np.loadtxt(sampler.get_sample_dir() +\"/posterior_sample\" + sampler.get_sample_tag() + \".txt\")\n psample_info = np.loadtxt(sampler.get_sample_dir() +\"/posterior_sample_info\" + sampler.get_sample_tag() + \".txt\")\n\n fig = plt.figure(figsize=(15, 12))\n ax = fig.add_subplot(111, projection='3d')\n \n\n X = np.arange(-1.5, 1.5, 0.01)\n Y = np.arange(-1.5, 1.5, 0.01)\n X, Y = np.meshgrid(X, Y)\n Z = -(10.0*2 + (X**2 - 10*np.cos(2.0*np.pi*X)) + (Y**2 - 10*np.cos(2.0*np.pi*Y)) )\n ax.plot_surface(X, Y, Z, cmap=cm.ocean, rstride=2, cstride=2, linewidth=0, antialiased=False, zorder=0)\n\n idx = np.where((np.abs(psample[:, 0]) <1.4) & (np.abs(psample[:, 1]) <1.4))\n ax.plot(psample[idx[0], 0], psample[idx[0], 1], psample_info[idx[0]], ls='none', marker='+', zorder=10)\n ax.set_xlim(-1.5, 1.5)\n ax.set_ylim(-1.5, 1.5)\n ax.set_xlabel(r'$\\theta_1$')\n ax.set_ylabel(r'$\\theta_2$')\n ax.set_zlabel(r'$\\log L$')\n fig.savefig(\"fig_rastrigin.jpg\", bbox_inches='tight')\n plt.show()\n\n # do postprocess, plot, show the properties of sampling \n cydnest.postprocess(sampler.get_sample_dir(), sampler.get_sample_tag(), temperature=1.0, doplot=True)"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.randn",
"numpy.cos",
"numpy.abs",
"numpy.exp",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.random.rand",
"numpy.meshgrid",
"numpy.random.randint"
]
] |
echo-ray/catalyst | [
"8b4274d17f0a42ee4d1d5e09d30fb0919aea2a51"
] | [
"catalyst/marketplace/marketplace.py"
] | [
"from __future__ import print_function\n\nimport glob\nimport json\nimport os\nimport re\nimport shutil\nimport sys\nimport time\nimport webbrowser\n\nimport bcolz\nimport logbook\nimport pandas as pd\nimport requests\nfrom requests_toolbelt import MultipartDecoder\nfrom requests_toolbelt.multipart.decoder import \\\n NonMultipartContentTypeException\n\nfrom catalyst.constants import (\n LOG_LEVEL, AUTH_SERVER, ETH_REMOTE_NODE, MARKETPLACE_CONTRACT,\n MARKETPLACE_CONTRACT_ABI, ENIGMA_CONTRACT, ENIGMA_CONTRACT_ABI)\nfrom catalyst.exchange.utils.stats_utils import set_print_settings\nfrom catalyst.marketplace.marketplace_errors import (\n MarketplacePubAddressEmpty, MarketplaceDatasetNotFound,\n MarketplaceNoAddressMatch, MarketplaceHTTPRequest,\n MarketplaceNoCSVFiles, MarketplaceRequiresPython3)\nfrom catalyst.marketplace.utils.auth_utils import get_key_secret, \\\n get_signed_headers\nfrom catalyst.marketplace.utils.bundle_utils import merge_bundles\nfrom catalyst.marketplace.utils.eth_utils import bin_hex, from_grains, \\\n to_grains\nfrom catalyst.marketplace.utils.path_utils import get_bundle_folder, \\\n get_data_source_folder, get_marketplace_folder, \\\n get_user_pubaddr, get_temp_bundles_folder, extract_bundle\nfrom catalyst.utils.paths import ensure_directory\n\nif sys.version_info.major < 3:\n import urllib\nelse:\n import urllib.request as urllib\n\nlog = logbook.Logger('Marketplace', level=LOG_LEVEL)\n\n\nclass Marketplace:\n def __init__(self):\n global Web3\n try:\n from web3 import Web3, HTTPProvider\n except ImportError:\n raise MarketplaceRequiresPython3()\n\n self.addresses = get_user_pubaddr()\n\n if self.addresses[0]['pubAddr'] == '':\n raise MarketplacePubAddressEmpty(\n filename=os.path.join(\n get_marketplace_folder(), 'addresses.json')\n )\n self.default_account = self.addresses[0]['pubAddr']\n\n self.web3 = Web3(HTTPProvider(ETH_REMOTE_NODE))\n\n contract_url = urllib.urlopen(MARKETPLACE_CONTRACT)\n\n self.mkt_contract_address = Web3.toChecksumAddress(\n contract_url.readline().decode(\n contract_url.info().get_content_charset()).strip())\n\n abi_url = urllib.urlopen(MARKETPLACE_CONTRACT_ABI)\n abi = json.load(abi_url)\n\n self.mkt_contract = self.web3.eth.contract(\n self.mkt_contract_address,\n abi=abi,\n )\n\n contract_url = urllib.urlopen(ENIGMA_CONTRACT)\n\n self.eng_contract_address = Web3.toChecksumAddress(\n contract_url.readline().decode(\n contract_url.info().get_content_charset()).strip())\n\n abi_url = urllib.urlopen(ENIGMA_CONTRACT_ABI)\n abi = json.load(abi_url)\n\n self.eng_contract = self.web3.eth.contract(\n self.eng_contract_address,\n abi=abi,\n )\n\n # def get_data_sources_map(self):\n # return [\n # dict(\n # name='Marketcap',\n # desc='The marketcap value in USD.',\n # start_date=pd.to_datetime('2017-01-01'),\n # end_date=pd.to_datetime('2018-01-15'),\n # data_frequencies=['daily'],\n # ),\n # dict(\n # name='GitHub',\n # desc='The rate of development activity on GitHub.',\n # start_date=pd.to_datetime('2017-01-01'),\n # end_date=pd.to_datetime('2018-01-15'),\n # data_frequencies=['daily', 'hour'],\n # ),\n # dict(\n # name='Influencers',\n # desc='Tweets & related sentiments by selected influencers.',\n # start_date=pd.to_datetime('2017-01-01'),\n # end_date=pd.to_datetime('2018-01-15'),\n # data_frequencies=['daily', 'hour', 'minute'],\n # ),\n # ]\n\n def to_text(self, hex):\n return Web3.toText(hex).rstrip('\\0')\n\n def choose_pubaddr(self):\n if len(self.addresses) == 1:\n address = self.addresses[0]['pubAddr']\n address_i = 0\n print('Using {} for this transaction.'.format(address))\n else:\n while True:\n for i in range(0, len(self.addresses)):\n print('{}\\t{}\\t{}'.format(\n i,\n self.addresses[i]['pubAddr'],\n self.addresses[i]['desc'])\n )\n address_i = int(input('Choose your address associated with '\n 'this transaction: [default: 0] ') or 0)\n if not (0 <= address_i < len(self.addresses)):\n print('Please choose a number between 0 and {}\\n'.format(\n len(self.addresses) - 1))\n else:\n address = Web3.toChecksumAddress(\n self.addresses[address_i]['pubAddr'])\n break\n\n return address, address_i\n\n def sign_transaction(self, tx):\n\n url = 'https://www.myetherwallet.com/#offline-transaction'\n print('\\nVisit {url} and enter the following parameters:\\n\\n'\n 'From Address:\\t\\t{_from}\\n'\n '\\n\\tClick the \"Generate Information\" button\\n\\n'\n 'To Address:\\t\\t{to}\\n'\n 'Value / Amount to Send:\\t{value}\\n'\n 'Gas Limit:\\t\\t{gas}\\n'\n 'Gas Price:\\t\\t[Accept the default value]\\n'\n 'Nonce:\\t\\t\\t{nonce}\\n'\n 'Data:\\t\\t\\t{data}\\n'.format(\n url=url,\n _from=tx['from'],\n to=tx['to'],\n value=tx['value'],\n gas=tx['gas'],\n nonce=tx['nonce'],\n data=tx['data'], )\n )\n\n webbrowser.open_new(url)\n\n signed_tx = input('Copy and Paste the \"Signed Transaction\" '\n 'field here:\\n')\n\n if signed_tx.startswith('0x'):\n signed_tx = signed_tx[2:]\n\n return signed_tx\n\n def check_transaction(self, tx_hash):\n\n if 'ropsten' in ETH_REMOTE_NODE:\n etherscan = 'https://ropsten.etherscan.io/tx/'\n elif 'rinkeby' in ETH_REMOTE_NODE:\n etherscan = 'https://rinkeby.etherscan.io/tx/'\n else:\n etherscan = 'https://etherscan.io/tx/'\n etherscan = '{}{}'.format(etherscan, tx_hash)\n\n print('\\nYou can check the outcome of your transaction here:\\n'\n '{}\\n\\n'.format(etherscan))\n\n def _list(self):\n data_sources = self.mkt_contract.functions.getAllProviders().call()\n\n data = []\n for index, data_source in enumerate(data_sources):\n if index > 0:\n if 'test' not in Web3.toText(data_source).lower():\n data.append(\n dict(\n dataset=self.to_text(data_source)\n )\n )\n return pd.DataFrame(data)\n\n def list(self):\n df = self._list()\n\n set_print_settings()\n if df.empty:\n print('There are no datasets available yet.')\n else:\n print(df)\n\n def subscribe(self, dataset=None):\n\n if dataset is None:\n\n df_sets = self._list()\n if df_sets.empty:\n print('There are no datasets available yet.')\n return\n\n set_print_settings()\n while True:\n print(df_sets)\n dataset_num = input('Choose the dataset you want to '\n 'subscribe to [0..{}]: '.format(\n df_sets.size - 1))\n try:\n dataset_num = int(dataset_num)\n except ValueError:\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n if dataset_num not in range(0, df_sets.size):\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n dataset = df_sets.iloc[dataset_num]['dataset']\n break\n\n dataset = dataset.lower()\n\n address = self.choose_pubaddr()[0]\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(dataset)\n ).call()\n\n if not provider_info[4]:\n print('The requested \"{}\" dataset is not registered in '\n 'the Data Marketplace.'.format(dataset))\n return\n\n grains = provider_info[1]\n price = from_grains(grains)\n\n subscribed = self.mkt_contract.functions.checkAddressSubscription(\n address, Web3.toHex(dataset)\n ).call()\n\n if subscribed[5]:\n print(\n '\\nYou are already subscribed to the \"{}\" dataset.\\n'\n 'Your subscription started on {} UTC, and is valid until '\n '{} UTC.'.format(\n dataset,\n pd.to_datetime(subscribed[3], unit='s', utc=True),\n pd.to_datetime(subscribed[4], unit='s', utc=True)\n )\n )\n return\n\n print('\\nThe price for a monthly subscription to this dataset is'\n ' {} ENG'.format(price))\n\n print(\n 'Checking that the ENG balance in {} is greater than {} '\n 'ENG... '.format(address, price), end=''\n )\n\n wallet_address = address[2:]\n balance = self.web3.eth.call({\n 'from': address,\n 'to': self.eng_contract_address,\n 'data': '0x70a08231000000000000000000000000{}'.format(\n wallet_address\n )\n })\n\n try:\n balance = Web3.toInt(balance) # web3 >= 4.0.0b7\n except TypeError:\n balance = Web3.toInt(hexstr=balance) # web3 <= 4.0.0b6\n\n if balance > grains:\n print('OK.')\n else:\n print('FAIL.\\n\\nAddress {} balance is {} ENG,\\nwhich is lower '\n 'than the price of the dataset that you are trying to\\n'\n 'buy: {} ENG. Get enough ENG to cover the costs of the '\n 'monthly\\nsubscription for what you are trying to buy, '\n 'and try again.'.format(\n address, from_grains(balance), price))\n return\n\n while True:\n agree_pay = input('Please confirm that you agree to pay {} ENG '\n 'for a monthly subscription to the dataset \"{}\" '\n 'starting today. [default: Y] '.format(\n price, dataset)) or 'y'\n if agree_pay.lower() not in ('y', 'n'):\n print(\"Please answer Y or N.\")\n else:\n if agree_pay.lower() == 'y':\n break\n else:\n return\n\n print('Ready to subscribe to dataset {}.\\n'.format(dataset))\n print('In order to execute the subscription, you will need to sign '\n 'two different transactions:\\n'\n '1. First transaction is to authorize the Marketplace contract '\n 'to spend {} ENG on your behalf.\\n'\n '2. Second transaction is the actual subscription for the '\n 'desired dataset'.format(price))\n\n tx = self.eng_contract.functions.approve(\n self.mkt_contract_address,\n grains,\n ).buildTransaction(\n {'from': address,\n 'nonce': self.web3.eth.getTransactionCount(address)}\n )\n\n signed_tx = self.sign_transaction(tx)\n try:\n tx_hash = '0x{}'.format(\n bin_hex(self.web3.eth.sendRawTransaction(signed_tx))\n )\n print(\n '\\nThis is the TxHash for this transaction: {}'.format(tx_hash)\n )\n\n except Exception as e:\n print('Unable to subscribe to data source: {}'.format(e))\n return\n\n self.check_transaction(tx_hash)\n\n print('Waiting for the first transaction to succeed...')\n\n while True:\n try:\n if self.web3.eth.getTransactionReceipt(tx_hash).status:\n break\n else:\n print('\\nTransaction failed. Aborting...')\n return\n except AttributeError:\n pass\n for i in range(0, 10):\n print('.', end='', flush=True)\n time.sleep(1)\n\n print('\\nFirst transaction successful!\\n'\n 'Now processing second transaction.')\n\n tx = self.mkt_contract.functions.subscribe(\n Web3.toHex(dataset),\n ).buildTransaction({\n 'from': address,\n 'nonce': self.web3.eth.getTransactionCount(address)})\n\n signed_tx = self.sign_transaction(tx)\n\n try:\n tx_hash = '0x{}'.format(bin_hex(\n self.web3.eth.sendRawTransaction(signed_tx)))\n print('\\nThis is the TxHash for this transaction: '\n '{}'.format(tx_hash))\n\n except Exception as e:\n print('Unable to subscribe to data source: {}'.format(e))\n return\n\n self.check_transaction(tx_hash)\n\n print('Waiting for the second transaction to succeed...')\n\n while True:\n try:\n if self.web3.eth.getTransactionReceipt(tx_hash).status:\n break\n else:\n print('\\nTransaction failed. Aborting...')\n return\n except AttributeError:\n pass\n for i in range(0, 10):\n print('.', end='', flush=True)\n time.sleep(1)\n\n print('\\nSecond transaction successful!\\n'\n 'You have successfully subscribed to dataset {} with'\n 'address {}.\\n'\n 'You can now ingest this dataset anytime during the '\n 'next month by running the following command:\\n'\n 'catalyst marketplace ingest --dataset={}'.format(\n dataset, address, dataset))\n\n def process_temp_bundle(self, ds_name, path):\n \"\"\"\n Merge the temp bundle into the main bundle for the specified\n data source.\n\n Parameters\n ----------\n ds_name\n path\n\n Returns\n -------\n\n \"\"\"\n tmp_bundle = extract_bundle(path)\n bundle_folder = get_data_source_folder(ds_name)\n ensure_directory(bundle_folder)\n if os.listdir(bundle_folder):\n zsource = bcolz.ctable(rootdir=tmp_bundle, mode='r')\n ztarget = bcolz.ctable(rootdir=bundle_folder, mode='r')\n merge_bundles(zsource, ztarget)\n\n else:\n os.rename(tmp_bundle, bundle_folder)\n\n pass\n\n def ingest(self, ds_name=None, start=None, end=None, force_download=False):\n\n if ds_name is None:\n\n df_sets = self._list()\n if df_sets.empty:\n print('There are no datasets available yet.')\n return\n\n set_print_settings()\n while True:\n print(df_sets)\n dataset_num = input('Choose the dataset you want to '\n 'ingest [0..{}]: '.format(\n df_sets.size - 1))\n try:\n dataset_num = int(dataset_num)\n except ValueError:\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n if dataset_num not in range(0, df_sets.size):\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n ds_name = df_sets.iloc[dataset_num]['dataset']\n break\n\n # ds_name = ds_name.lower()\n\n # TODO: catch error conditions\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(ds_name)\n ).call()\n\n if not provider_info[4]:\n print('The requested \"{}\" dataset is not registered in '\n 'the Data Marketplace.'.format(ds_name))\n return\n\n address, address_i = self.choose_pubaddr()\n fns = self.mkt_contract.functions\n check_sub = fns.checkAddressSubscription(\n address, Web3.toHex(ds_name)\n ).call()\n\n if check_sub[0] != address or self.to_text(check_sub[1]) != ds_name:\n print('You are not subscribed to dataset \"{}\" with address {}. '\n 'Plese subscribe first.'.format(ds_name, address))\n return\n\n if not check_sub[5]:\n print('Your subscription to dataset \"{}\" expired on {} UTC.'\n 'Please renew your subscription by running:\\n'\n 'catalyst marketplace subscribe --dataset={}'.format(\n ds_name,\n pd.to_datetime(check_sub[4], unit='s', utc=True),\n ds_name)\n )\n\n if 'key' in self.addresses[address_i]:\n key = self.addresses[address_i]['key']\n secret = self.addresses[address_i]['secret']\n else:\n key, secret = get_key_secret(address)\n\n headers = get_signed_headers(ds_name, key, secret)\n log.debug('Starting download of dataset for ingestion...')\n r = requests.post(\n '{}/marketplace/ingest'.format(AUTH_SERVER),\n headers=headers,\n stream=True,\n )\n if r.status_code == 200:\n target_path = get_temp_bundles_folder()\n try:\n decoder = MultipartDecoder.from_response(r)\n for part in decoder.parts:\n h = part.headers[b'Content-Disposition'].decode('utf-8')\n # Extracting the filename from the header\n name = re.search(r'filename=\"(.*)\"', h).group(1)\n\n filename = os.path.join(target_path, name)\n with open(filename, 'wb') as f:\n # for chunk in part.content.iter_content(\n # chunk_size=1024):\n # if chunk: # filter out keep-alive new chunks\n # f.write(chunk)\n f.write(part.content)\n\n self.process_temp_bundle(ds_name, filename)\n\n except NonMultipartContentTypeException:\n response = r.json()\n raise MarketplaceHTTPRequest(\n request='ingest dataset',\n error=response,\n )\n else:\n raise MarketplaceHTTPRequest(\n request='ingest dataset',\n error=r.status_code,\n )\n\n log.info('{} ingested successfully'.format(ds_name))\n\n def get_dataset(self, ds_name, start=None, end=None):\n ds_name = ds_name.lower()\n\n # TODO: filter ctable by start and end date\n bundle_folder = get_data_source_folder(ds_name)\n z = bcolz.ctable(rootdir=bundle_folder, mode='r')\n\n df = z.todataframe() # type: pd.DataFrame\n df.set_index(['date', 'symbol'], drop=True, inplace=True)\n\n # TODO: implement the filter more carefully\n # if start and end is None:\n # df = df.xs(start, level=0)\n\n return df\n\n def clean(self, ds_name=None, data_frequency=None):\n\n if ds_name is None:\n mktplace_root = get_marketplace_folder()\n folders = [os.path.basename(f.rstrip('/'))\n for f in glob.glob('{}/*/'.format(mktplace_root))\n if 'temp_bundles' not in f]\n\n while True:\n for idx, f in enumerate(folders):\n print('{}\\t{}'.format(idx, f))\n dataset_num = input('Choose the dataset you want to '\n 'clean [0..{}]: '.format(\n len(folders) - 1))\n try:\n dataset_num = int(dataset_num)\n except ValueError:\n print('Enter a number between 0 and {}'.format(\n len(folders) - 1))\n else:\n if dataset_num not in range(0, len(folders)):\n print('Enter a number between 0 and {}'.format(\n len(folders) - 1))\n else:\n ds_name = folders[dataset_num]\n break\n\n ds_name = ds_name.lower()\n\n if data_frequency is None:\n folder = get_data_source_folder(ds_name)\n\n else:\n folder = get_bundle_folder(ds_name, data_frequency)\n\n shutil.rmtree(folder)\n pass\n\n def create_metadata(self, key, secret, ds_name, data_frequency, desc,\n has_history=True, has_live=True):\n \"\"\"\n\n Returns\n -------\n\n \"\"\"\n headers = get_signed_headers(ds_name, key, secret)\n r = requests.post(\n '{}/marketplace/register'.format(AUTH_SERVER),\n json=dict(\n ds_name=ds_name,\n desc=desc,\n data_frequency=data_frequency,\n has_history=has_history,\n has_live=has_live,\n ),\n headers=headers,\n )\n\n if r.status_code != 200:\n raise MarketplaceHTTPRequest(\n request='register', error=r.status_code\n )\n\n if 'error' in r.json():\n raise MarketplaceHTTPRequest(\n request='upload file', error=r.json()['error']\n )\n\n def register(self):\n while True:\n desc = input('Enter the name of the dataset to register: ')\n dataset = desc.lower()\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(dataset)\n ).call()\n\n if provider_info[4]:\n print('There is already a dataset registered under '\n 'the name \"{}\". Please choose a different '\n 'name.'.format(dataset))\n else:\n break\n\n price = int(\n input(\n 'Enter the price for a monthly subscription to '\n 'this dataset in ENG: '\n )\n )\n while True:\n freq = input('Enter the data frequency [daily, hourly, minute]: ')\n if freq.lower() not in ('daily', 'hourly', 'minute'):\n print('Not a valid frequency.')\n else:\n break\n\n while True:\n reg_pub = input(\n 'Does it include historical data? [default: Y]: '\n ) or 'y'\n if reg_pub.lower() not in ('y', 'n'):\n print('Please answer Y or N.')\n else:\n if reg_pub.lower() == 'y':\n has_history = True\n else:\n has_history = False\n break\n\n while True:\n reg_pub = input(\n 'Doest it include live data? [default: Y]: '\n ) or 'y'\n if reg_pub.lower() not in ('y', 'n'):\n print('Please answer Y or N.')\n else:\n if reg_pub.lower() == 'y':\n has_live = True\n else:\n has_live = False\n break\n\n address, address_i = self.choose_pubaddr()\n if 'key' in self.addresses[address_i]:\n key = self.addresses[address_i]['key']\n secret = self.addresses[address_i]['secret']\n else:\n key, secret = get_key_secret(address)\n\n grains = to_grains(price)\n\n tx = self.mkt_contract.functions.register(\n Web3.toHex(dataset),\n grains,\n address,\n ).buildTransaction(\n {'from': address,\n 'nonce': self.web3.eth.getTransactionCount(address)}\n )\n\n signed_tx = self.sign_transaction(tx)\n\n try:\n tx_hash = '0x{}'.format(\n bin_hex(self.web3.eth.sendRawTransaction(signed_tx))\n )\n print(\n '\\nThis is the TxHash for this transaction: {}'.format(tx_hash)\n )\n\n except Exception as e:\n print('Unable to register the requested dataset: {}'.format(e))\n return\n\n self.check_transaction(tx_hash)\n\n print('Waiting for the transaction to succeed...')\n\n while True:\n try:\n if self.web3.eth.getTransactionReceipt(tx_hash).status:\n break\n else:\n print('\\nTransaction failed. Aborting...')\n return\n except AttributeError:\n pass\n for i in range(0, 10):\n print('.', end='', flush=True)\n time.sleep(1)\n\n print('\\nWarming up the {} dataset'.format(dataset))\n self.create_metadata(\n key=key,\n secret=secret,\n ds_name=dataset,\n data_frequency=freq,\n desc=desc,\n has_history=has_history,\n has_live=has_live,\n )\n print('\\n{} registered successfully'.format(dataset))\n\n def publish(self, dataset, datadir, watch):\n dataset = dataset.lower()\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(dataset)\n ).call()\n\n if not provider_info[4]:\n raise MarketplaceDatasetNotFound(dataset=dataset)\n\n match = next(\n (l for l in self.addresses if l['pubAddr'] == provider_info[0]),\n None\n )\n if not match:\n raise MarketplaceNoAddressMatch(\n dataset=dataset,\n address=provider_info[0])\n\n print('Using address: {} to publish this dataset.'.format(\n provider_info[0]))\n\n if 'key' in match:\n key = match['key']\n secret = match['secret']\n else:\n key, secret = get_key_secret(provider_info[0])\n\n headers = get_signed_headers(dataset, key, secret)\n filenames = glob.glob(os.path.join(datadir, '*.csv'))\n\n if not filenames:\n raise MarketplaceNoCSVFiles(datadir=datadir)\n\n files = []\n for file in filenames:\n files.append(('file', open(file, 'rb')))\n\n r = requests.post('{}/marketplace/publish'.format(AUTH_SERVER),\n files=files,\n headers=headers)\n\n if r.status_code != 200:\n raise MarketplaceHTTPRequest(request='upload file',\n error=r.status_code)\n\n if 'error' in r.json():\n raise MarketplaceHTTPRequest(request='upload file',\n error=r.json()['error'])\n\n print('Dataset {} uploaded successfully.'.format(dataset))\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
microsoft/Protein-Folding | [
"f534b2dd1e3f192fbcdadf234f25828c7f458a58"
] | [
"coevolution_transformer/model/msa_embeddings.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nimport math\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len=1 << 13):\n super(PositionalEncoding, self).__init__()\n self.ninp = d_model\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term) # (L, C)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, idx):\n \"\"\"\n idx: (B, L)\n return: (B, L, C)\n \"\"\"\n return self.pe[idx]\n\n\nclass MSAEmbeddings(nn.Module):\n def __init__(self, msa_gap, embed_dim, dropout):\n super(MSAEmbeddings, self).__init__()\n self.embed_dim = embed_dim\n self.onehot = nn.Embedding(24, 24)\n self.onehot.weight.data = torch.eye(24)\n self.onehot.weight.requires_grad = False\n self.msa_embeddings = nn.Linear((msa_gap * 2 + 2) * 24 + 2, embed_dim)\n self.position_embeddings = PositionalEncoding(embed_dim)\n self.layer_norm = nn.LayerNorm(embed_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, seq_ids, msa_ids, position_ids):\n \"\"\"\n seq_ids: (B, L)\n msa_ids: (B, K, *, L)\n position_ids: (B, L)\n return: (B, K, L, C)\n \"\"\"\n B, K, _, L = msa_ids.shape\n seq = self.onehot(seq_ids)\n msa_ids = msa_ids.transpose(-2, -1)\n boundary = msa_ids[..., -2:].float()\n msa = self.onehot(msa_ids[..., :-2]).reshape(B, K, L, -1)\n msa = torch.cat([seq[:, None].repeat(1, msa.shape[1], 1, 1), msa, boundary], dim=-1)\n msa_emb = self.msa_embeddings(msa)\n pos_emb = self.position_embeddings(position_ids)\n embeddings = msa_emb * math.sqrt(self.embed_dim) + pos_emb[:, None]\n embeddings = self.layer_norm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n"
] | [
[
"torch.nn.Linear",
"torch.cos",
"torch.nn.Embedding",
"torch.sin",
"torch.nn.LayerNorm",
"torch.arange",
"torch.zeros",
"torch.eye",
"torch.nn.Dropout"
]
] |
sunhuaibo/HLA-HED | [
"bb0672e62a20baad80f5f154c9220bf8e5b8b28c"
] | [
"hla_hed.py"
] | [
"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\n# =====================================\n# Author: Huaibo Sun\n# E-mail: [email protected]\n# date: 2022-03-31\n# =====================================\n\nimport os\nimport pandas as pd\nfrom Bio import SeqIO\nfrom pathlib import Path\nfrom itertools import combinations\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\ndef get_opt():\n \"\"\"\n Input HLA file format\n \n Sample A1 A2 B1 B2 C1 C2\n p1 A*01:01 A*01:03 B*07:01 B*07:02 C*01:01 C*01:02\n p2 A*01:01 A*01:03 B*07:01 B*07:02 C*01:01 C*01:02\n \n If you use this tool, please cite the following three papers.\n \n Grantham R. Amino acid difference formula to help explain protein evolution. Science. 1974 Sep 6;185(4154):862-4. doi: 10.1126/science.185.4154.862. PMID: 4843792.\n Pierini F, Lenz TL. Divergent Allele Advantage at Human MHC Genes: Signatures of Past and Ongoing Selection. Mol Biol Evol. 2018 Sep 1;35(9):2145-2158. doi: 10.1093/molbev/msy116. PMID: 29893875; PMCID: PMC6106954.\n Chowell D, Krishna C, Pierini F, Makarov V, Rizvi NA, Kuo F, Morris LGT, Riaz N, Lenz TL, Chan TA. Evolutionary divergence of HLA class I genotype impacts efficacy of cancer immunotherapy. Nat Med. 2019 Nov;25(11):1715-1720. doi: 10.1038/s41591-019-0639-4. Epub 2019 Nov 7. PMID: 31700181; PMCID: PMC7938381.\n \n \"\"\"\n \n script = os.path.dirname(os.path.abspath(__file__))\n parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, epilog=get_opt.__doc__)\n parser.add_argument(\"-d\", default=f\"{script}/database/grantham_matrix.txt\", help=\"Distance matrix for all amino acids, default: database/grantham_matrix.txt. (reference: DOI: 10.1126/science.185.4154.862)\")\n parser.add_argument(\"-f\", default=f\"{script}/database/ABC_prot.fa\", help=\"Amino acid sequences in fasta format, default: database/ABC_prot.fa.\")\n parser.add_argument(\"-i\", required=True, help=\"Input file of tab-delimited with individual HLA typing.\")\n parser.add_argument(\"-p\", action=\"store_true\", help=\"Paired HED score.\")\n parser.add_argument(\"-o\", required=True, help=\"Output file name.\")\n\n parse = parser.parse_args()\n return(parse)\n\ndef check_file(infile):\n if not infile.exists:\n raise Exception(f\"{str(infile)} file is not exist\")\n\ndef read_fasta(infile):\n infile = Path(infile)\n check_file(infile)\n record = SeqIO.parse(infile, \"fasta\")\n seq_array = {seq.id: str(seq.seq) for seq in record}\n seq_len = [len(value) for value in seq_array.values()]\n if len(set(seq_len)) != 1:\n raise Exception(\"Input sequences length is not equality\")\n return(seq_array)\n\ndef read_aa(infile):\n infile = Path(infile)\n check_file(infile)\n df = pd.read_csv(infile, header=0, sep=\"\\t\", index_col=0)\n aa_pairwise_dis = df.to_dict()\n return(aa_pairwise_dis)\n\ndef calculate_distange(hla1, hla2, sequence, distance):\n seq_hla1 = sequence.get(hla1, False)\n seq_hla2 = sequence.get(hla2, False)\n if not seq_hla1 or not seq_hla2:\n return(\"NA\")\n else:\n seq_len = len(seq_hla1)\n dis = 0\n for i in range(seq_len):\n aa1 = seq_hla1[i]\n aa2 = seq_hla2[i]\n dis += distance[aa1][aa2]\n dis = dis / seq_len\n return(dis)\n\n\ndef main():\n opt = get_opt()\n seq_array = read_fasta(opt.f)\n aa_pairwise_dis = read_aa(opt.d)\n\n infile = Path(opt.i)\n outfile = Path(opt.o)\n check_file(infile)\n\n df = pd.read_csv(infile, header=0, sep=\"\\t\")\n \n\n if opt.p:\n df2 = pd.melt(df, id_vars=[\"Sample\"], value_vars=[\"A1\", \"A2\", \"B1\",\"B2\", \"C1\",\"C2\"])\n alleles = set(df2[\"value\"].values.tolist())\n alleles_pair = combinations(alleles, 2)\n \n outheader = [\"Allele1\",\"Allele2\",\"HED\"]\n with open(outfile, \"w\") as fw:\n fw.write(\"\\t\".join(outheader) + \"\\n\")\n for allele1, allele2 in alleles_pair:\n dis_hla_pair = calculate_distange(allele1, allele2, seq_array, aa_pairwise_dis)\n outline = [allele1, allele2, dis_hla_pair]\n outline = [str(x) for x in outline]\n\n fw.write(\"\\t\".join(outline) + \"\\n\")\n else:\n outheader = [\"Sample\",\"HED_A\",\"HED_B\",\"HED_C\",\"Mean_HE\"]\n with open(outfile, \"w\") as fw:\n fw.write(\"\\t\".join(outheader) + \"\\n\")\n for _, line in df.iterrows():\n hla_a1 = line[\"A1\"]\n hla_a2 = line[\"A2\"]\n dis_hla_a = calculate_distange(hla_a1, hla_a2, seq_array, aa_pairwise_dis)\n\n hla_b1 = line[\"B1\"]\n hla_b2 = line[\"B2\"]\n dis_hla_b = calculate_distange(hla_b1, hla_b2, seq_array, aa_pairwise_dis)\n \n hla_c1 = line[\"C1\"]\n hla_c2 = line[\"C2\"]\n dis_hla_c = calculate_distange(hla_c1, hla_c2, seq_array, aa_pairwise_dis)\n\n if dis_hla_a == \"NA\" or dis_hla_b == \"NA\" or dis_hla_c == \"NA\":\n dis_mean = \"NA\"\n else:\n dis_mean = (dis_hla_a + dis_hla_b + dis_hla_c) / 3\n\n outline = [line[\"Sample\"], dis_hla_a, dis_hla_b, dis_hla_c, dis_mean]\n outline = [str(x) for x in outline]\n\n fw.write(\"\\t\".join(outline) + \"\\n\")\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"pandas.read_csv",
"pandas.melt"
]
] |
LeiShi/Synthetic-Diagnostics-Platform | [
"5f1cb5c29d182490acbd4f3c167f0e09ec211236"
] | [
"src/python3/sdp/math/interpolation.py"
] | [
"\"\"\"This module contains some useful interpolation methods\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import BarycentricInterpolator\n\nclass InterpolationError(Exception):\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass OutofBoundError(InterpolationError, ValueError):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\ndef linear_3d_3point(X,Y,Z,x,y,tol = 1e-8):\n \"\"\"3D interpolation method\n Linearly interpolate the value of z for given x,y.\n By using 3 points data, the unknown value of z is assumed on the same plane.\n The method used here is the cross product method. From P(x1,y1,z1),Q(x2,y2,z2),and R(x3,y3,z3), construct 2 vectors on the plane, PQ(x2-x1,y2-y1,z2-z1) and PR(x3-x1,y3-y1,z3-z1). Then do the cross product, PQ*PR = N. This gives the normal vector of the plane. The plane's equation is then 'N dot X = d', where X is an arbitary point and d to be determined. d can be easily gotten from any one of the given points, say P. d = N dot P. Then the equation of the plane is found. The equation can be written as 'ax+by+cz = d', then z can be solved for given x and y.\n \n Arguments:\n x1,y1,z1: coordinates of the first point\n x2,y2,z2: the second point\n x3,y3,z3: the third point\n x,y: the x,y coordinates for the wanted\n\n return value:\n interpolated z value on given (x,y)\n \"\"\"\n x1,x2,x3 = X[0],X[1],X[2]\n y1,y2,y3 = Y[0],Y[1],Y[2]\n z0 = np.max(Z)\n z1,z2,z3 = Z[0]/z0,Z[1]/z0,Z[2]/z0\n\n\n Nx = (y2-y1)*(z3-z1)-(y3-y1)*(z2-z1)\n Ny = (x3-x1)*(z2-z1)-(x2-x1)*(z3-z1)\n Nz = (x2-x1)*(y3-y1)-(x3-x1)*(y2-y1)\n\n z_base = (x2-x1)*(y3-y1)\n\n print(Nx,Ny,Nz,z_base)\n\n if(np.absolute(Nz/z_base) <= tol ):\n raise InterpolationError('3 points interpolation failed: given points are on a plane vertical to XY plane, no z value being able to interpolated.')\n\n d = Nx*x1 + Ny*y1 + Nz*z1\n print(d, d-Nx*x-Ny*y)\n\n return (d - Nx*x - Ny*y)/float(Nz)*z0\n\n\ndef trilinear_interp(X,Y,Z,F,x, fill_value=0.0):\n \"\"\" Trilinear interpolation (3D) for 1 point on a cubic mesh\n See Wikipedia for a better description than the following:\n First choose a direction and interpolate all the corners along this \n direction (so 8pts -> 4pts) at the value of the wanted point.\n Choose a second direction and interpolate the 4pts at the wanted point\n (4pts -> 2pts).\n Finish with the interpolation along the last line\n \n Arguments:\n X -- 1D array containing the X coordinate of F\n Y -- 1D array containing the Y coordinate of F\n Z -- 1D array containing the Z coordinate of F\n F -- 3D array containing the data\n x -- position (3D) where the interpolation is wanted\n\n return value:\n interpolated z value on given (x,y)\n \"\"\"\n raise NameError('Does not work, should use RegularGridInterpolator')\n if len(x.shape) == 1:\n # if outside the box, put the value to fill_value\n if x[0] < X[0] or x[1] < Y[0] or x[2] < Z[0]\\\n or x[0] > X[-1] or x[1] > Y[-1] or x[2] > Z[-1]:\n return fill_value\n else:\n # First find the x,y,z coordinate of the corner of the cube\n indx = np.where(X < x[0])[0].max()\n indy = np.where(Y < x[1])[0].max()\n indz = np.where(Z < x[2])[0].max()\n\n # relative coordinates\n rx = (x[0]-X[indx])/(X[indx+1]-X[indx])\n ry = (x[1]-Y[indy])/(Y[indy+1]-Y[indy])\n rz = (x[2]-Z[indz])/(Z[indz+1]-Z[indz])\n \n # compute the first linear interpolation\n temp = 1-rx\n c00 = F[indx,indy,indz]*temp + F[indx+1,indy,indz]*rx\n c10 = F[indx,indy+1,indz]*temp + F[indx+1,indy+1,indz]*rx\n c01 = F[indx,indy,indz+1]*temp + F[indx+1,indy,indz+1]*rx\n c11 = F[indx,indy+1,indz+1]*temp + F[indx+1,indy+1,indz+1]*rx\n \n # compute the second linear interpolation\n temp = 1-ry\n c0 = c00*temp + c10*ry\n c1 = c01*temp + c11*ry\n \n # compute the last linear interpolation\n return c0*(1-rz) + c1*rz\n elif len(x.shape) == 2:\n \"\"\"this part is the same that before but with a mesh (not only one point).\n the comments will be only for trick due to the shape of the positions\n abd not on the method (look the first part for them)\n \"\"\"\n G = np.zeros(len(x[:,0]))\n # First find the x,y,z coordinate of the corner of the cube\n ind = ~((x[:,0] < X[0]) | (x[:,1] < Y[0]) | (x[:,2] < Z[0]) |\n (x[:,0] > X[-1]) | (x[:,1] > Y[-1]) | (x[:,2] > Z[-1]))\n\n G[~ind] = fill_value\n indx = np.where(X <= x[ind,0])[0].max()\n indy = np.where(Y <= x[ind,1])[0].max()\n indz = np.where(Z <= x[ind,2])[0].max()\n \n # relative coordinates\n rx = (x[ind,0]-X[indx])/(X[indx+1]-X[indx])\n ry = (x[ind,1]-Y[indy])/(Y[indy+1]-Y[indy])\n rz = (x[ind,2]-Z[indz])/(Z[indz+1]-Z[indz])\n \n # compute the first linear interpolation\n temp = 1-rx\n c00 = F[indx,indy,indz]*temp + F[indx+1,indy,indz]*rx\n c10 = F[indx,indy+1,indz]*temp + F[indx+1,indy+1,indz]*rx\n c01 = F[indx,indy,indz+1]*temp + F[indx+1,indy,indz+1]*rx\n c11 = F[indx,indy+1,indz+1]*temp + F[indx+1,indy+1,indz+1]*rx\n \n # compute the second linear interpolation\n temp = 1-ry\n c0 = c00*temp + c10*ry\n c1 = c01*temp + c11*ry\n \n # compute the last linear interpolation\n G[ind] = c0*(1-rz) + c1*rz\n return G\n else:\n raise NameError('Error: wrong shape of the position to interpolate')\n \n\n# BarycentricInterpolator with boundary check\nclass BoundaryWarnBarycentricInterpolator(BarycentricInterpolator):\n \"\"\"Barycentric Interpolator with Boundary Check. Based on \n :py:class:`scipy.interpolate.BarycentricInterpolator`.\n \n The boundary is set as minimun x and maximum x. If called with x outside \n the available range, a OutofBoundError will be raised.\n \n __init__(xi, yi=None, axis=0, bound_error=True, fill_value=0)\n \n :param xi: x coordinates for interpolation\n :type xi: array of float\n :param yi: Optional, y values on each xi location. If not given, need to be\n provided later using :py:method`set_yi` method.\n :type yi: array of float\n :param int axis: the axis of yi along which the interpolator will be \n created.\n :param bool bound_error: If True, out of bound interpolation will result a\n OutofBoundError. Otherwise fill_value will be used\n . Default to be True\n :param float fill_value: If bound_error is False, out of bound values will\n be automatically filled with fill_value.\n \n see :py:class:`scipy.interpolate.BarycentricInterpolator` for further \n information. \n \"\"\"\n \n def __init__(self, xi, yi=None, axis=0, bound_error=True, fill_value=0):\n \n self._xmin = np.min(xi)\n self._xmax = np.max(xi)\n self._bound_error = bound_error\n self._fill_value = fill_value\n \n super(BoundaryWarnBarycentricInterpolator, self).__init__(xi, yi, axis)\n \n \n \n def __call__(self, x):\n if (self._bound_error):\n if np.any(x < self._xmin) or np.any(x > self._xmax):\n raise OutofBoundError('x out of bound! xmin: {}, xmax: {}'.\\\n format(self._xmin, self._xmax))\n return super(BoundaryWarnBarycentricInterpolator, self).__call__(x)\n else:\n outbound_idx = np.logical_or(x < self._xmin, x > self._xmax)\n result = np.empty_like(x)\n result[~outbound_idx] = super(BoundaryWarnBarycentricInterpolator, \n self).__call__(x[~outbound_idx]) \n result[outbound_idx] = self._fill_value\n return result\n \n\n def add_xi(self, xi, yi=None):\n super(BoundaryWarnBarycentricInterpolator, self).add_xi(xi, yi)\n self._xmin = np.min( [np.min(xi), self._xmin] )\n self._xmax = np.max( [np.max(xi), self._xmax] )\n \n \n def set_yi(self, yi, axis=None):\n yi = np.array(yi)\n if not self._bound_error:\n assert yi.ndim == 1\n super(BoundaryWarnBarycentricInterpolator, self).set_yi(yi, axis)\n \n\n "
] | [
[
"numpy.logical_or",
"numpy.any",
"numpy.empty_like",
"numpy.max",
"numpy.min",
"numpy.absolute",
"numpy.array",
"numpy.where"
]
] |
mayureeb/fakenews | [
"c47a72c8bbe4d413b309da0c662da784c002fe3f"
] | [
"Code/sentiment_analysis.py"
] | [
"import pandas as pd\nfrom textblob import TextBlob\n\npd.options.mode.chained_assignment = None # ignores the SettingWithCopy Warning\ndf = pd.read_csv('INPUT.csv', encoding = 'utf8')\ndf['polarity'] = 0.0\ndf['subjectivity'] = 0.0\nfor i in range(0, len(df.index)):\n print(i)\n blob = TextBlob(str(df['text'][i]))\n df['subjectivity'][i] = blob.sentiment.subjectivity\n df['polarity'][i] = blob.sentiment.polarity\n\nprint(df.head())\ndf.to_csv('OUTPUT.csv', encoding = 'utf8')\n"
] | [
[
"pandas.read_csv"
]
] |
LutzGross/fingal | [
"4b6fcc02871e7ba1a98f37ffd18f1a16a5fe6a48"
] | [
"bin/specsim3d/spectralsim.py"
] | [
"#-------------------------------------------------------------------------------\r\n# Name: Spectralsim\r\n# Purpose: Simulation of standard normal random fields\r\n#\r\n# Author: Dr.-Ing. S. Hoerning\r\n#\r\n# Created: 02.05.2018, Centre for Natural Gas, EAIT,\r\n# The University of Queensland, Brisbane, QLD, Australia\r\n#-------------------------------------------------------------------------------\r\n\r\nimport numpy as np\r\nfrom . import covariancefunction as covfun\r\n\r\n\r\nclass spectral_random_field(object):\r\n def __init__(self,\r\n domainsize = (100,100),\r\n covmod = '1.0 Exp(2.)',\r\n periodic = False,\r\n ):\r\n\r\n self.counter = 0\r\n self.periodic = periodic\r\n # create self.xyz for plotting 3d\r\n if len(domainsize) == 3:\r\n self.xyz = np.mgrid[[slice(0,n,1) for n in domainsize]].reshape(3,-1).T\r\n # adjust domainsize by cutoff for non-perjodic output\r\n self.cutoff = 0\r\n if not self.periodic:\r\n cutoff = covfun.find_maximum_range(covmod)\r\n cutoffs = []\r\n for dim in domainsize:\r\n tsize = dim + cutoff\r\n # find closest multiple of 8 that is larger than tsize\r\n m8 = np.int(np.ceil(tsize/8.)*8.)\r\n cutoffs.append(m8 - dim)\r\n\r\n self.cutoff = np.array(cutoffs)\r\n\r\n\r\n self.domainsize = np.array(domainsize)+self.cutoff\r\n self.covmod = covmod\r\n self.ndim = len(self.domainsize)\r\n self.npoints = np.prod(self.domainsize)\r\n\r\n self.grid = np.mgrid[[slice(0,n,1) for n in self.domainsize]]\r\n\r\n # ensure periodicity of domain\r\n for i in range(self.ndim):\r\n self.domainsize = self.domainsize[:,np.newaxis]\r\n self.grid = np.min((self.grid,np.array(self.domainsize)-self.grid),axis=0)\r\n\r\n # compute distances from origin (--> wavenumbers in fourier space)\r\n self.h = ((self.grid**2).sum(axis=0))**0.5\r\n # covariances (in fourier space!!!)\r\n self.Q = covfun.Covariogram(self.h, self.covmod)\r\n\r\n # FFT of covariances\r\n self.FFTQ = np.abs(np.fft.fftn(self.Q))\r\n\r\n # eigenvalues of decomposition\r\n self.sqrtFFTQ = np.sqrt(self.FFTQ / self.npoints)\r\n\r\n self.Y = self.simnew()\r\n\r\n\r\n\r\n def simnew(self):\r\n self.counter += 1\r\n # compute random field via inverse fourier transform\r\n real = np.random.standard_normal(size=self.sqrtFFTQ.shape)\r\n imag = np.random.standard_normal(size=self.sqrtFFTQ.shape)\r\n epsilon = real + 1j*imag\r\n rand = epsilon * self.sqrtFFTQ\r\n self.Y = np.real(np.fft.ifftn(rand))*self.npoints\r\n\r\n if not self.periodic:\r\n # readjust domainsize to correct size (--> no boundary effects...)\r\n gridslice = [slice(0,(self.domainsize.squeeze()-self.cutoff)[i],1)\r\n for i in range(self.ndim)]\r\n self.Y = self.Y[tuple(gridslice)]\r\n self.Y = self.Y.reshape(self.domainsize.squeeze()-self.cutoff)\r\n\r\n return self.Y\r\n\r\n\r\n# TEST CASE\r\nif __name__ == \"__main__\":\r\n from mpl_toolkits.mplot3d import Axes3D\r\n import matplotlib.pyplot as plt\r\n domain = (30, 30, 30)\r\n covmod = '1.0 Exp(4.)'\r\n spec = spectral_random_field(domainsize = domain, covmod = covmod)\r\n field3d = spec.simnew()\r\n\r\n xyz = np.mgrid[[slice(0 , n, 1) for n in domain]].reshape(3,-1).T\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(xyz[:,0], xyz[:,1], xyz[:,2], c=field3d.flatten())\r\n plt.show()\r\n\r\n"
] | [
[
"numpy.random.standard_normal",
"numpy.ceil",
"matplotlib.pyplot.figure",
"numpy.fft.ifftn",
"matplotlib.pyplot.show",
"numpy.prod",
"numpy.sqrt",
"numpy.fft.fftn",
"numpy.array"
]
] |
echaussidon/redrock | [
"9a3d4f0aed8c0792f2cc731dbdf04a99018083bf"
] | [
"py/redrock/templates.py"
] | [
"\"\"\"\nClasses and functions for templates.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nfrom glob import glob\nimport os\nimport traceback\n\nimport numpy as np\nfrom astropy.io import fits\n\nfrom .utils import native_endian, elapsed, transmission_Lyman\n\nfrom .rebin import rebin_template, trapz_rebin\n\n\nclass Template(object):\n \"\"\"A spectral Template PCA object.\n\n The template data is read from a redrock-format template file.\n Alternatively, the data can be specified in the constructor.\n\n Args:\n filename (str): the path to the template file, either absolute or\n relative to the RR_TEMPLATE_DIR environment variable.\n\n \"\"\"\n def __init__(self, filename=None, spectype=None, redshifts=None,\n wave=None, flux=None, subtype=None):\n\n if filename is not None:\n fx = None\n if os.path.exists(filename):\n fx = fits.open(filename, memmap=False)\n else:\n xfilename = os.path.join(os.getenv('RR_TEMPLATE_DIR'), filename)\n if os.path.exists(xfilename):\n fx = fits.open(xfilename, memmap=False)\n else:\n raise IOError('unable to find '+filename)\n\n hdr = fx['BASIS_VECTORS'].header\n if 'VERSION' in hdr:\n self._version = hdr['VERSION']\n else:\n self._version = 'unknown'\n\n self.wave = np.asarray(hdr['CRVAL1'] + \\\n hdr['CDELT1']*np.arange(hdr['NAXIS1']), dtype=np.float64)\n if 'LOGLAM' in hdr and hdr['LOGLAM'] != 0:\n self.wave = 10**self.wave\n\n self.flux = np.asarray(native_endian(fx['BASIS_VECTORS'].data),\n dtype=np.float64)\n\n self._redshifts = None\n\n ## find out if redshift info is present in the file\n old_style_templates = True\n try:\n self._redshifts = native_endian(fx['REDSHIFTS'].data)\n old_style_templates = False\n except KeyError:\n pass\n\n fx.close()\n\n self._rrtype = hdr['RRTYPE'].strip().upper()\n if old_style_templates:\n if self._rrtype == 'GALAXY':\n # redshifts = 10**np.arange(np.log10(1+0.005),\n # np.log10(1+2.0), 1.5e-4) - 1\n self._redshifts = 10**np.arange(np.log10(1-0.005),\n np.log10(1+1.7), 3e-4) - 1\n elif self._rrtype == 'STAR':\n self._redshifts = np.arange(-0.002, 0.00201, 4e-5)\n elif self._rrtype == 'QSO':\n self._redshifts = 10**np.arange(np.log10(1+0.05),\n np.log10(1+6.0), 5e-4) - 1\n else:\n raise ValueError(\"Unknown redshift range to use for \"\n \"template type {}\".format(self._rrtype))\n zmin = self._redshifts[0]\n zmax = self._redshifts[-1]\n print(\"DEBUG: Using default redshift range {:.4f}-{:.4f} for \"\n \"{}\".format(zmin, zmax, os.path.basename(filename)))\n else:\n zmin = self._redshifts[0]\n zmax = self._redshifts[-1]\n print(\"DEBUG: Using redshift range {:.4f}-{:.4f} for \"\n \"{}\".format(zmin, zmax, os.path.basename(filename)))\n\n self._subtype = None\n if 'RRSUBTYP' in hdr:\n self._subtype = hdr['RRSUBTYP'].strip().upper()\n else:\n self._subtype = ''\n\n else:\n self._rrtype = spectype\n self._redshifts = redshifts\n self.wave = wave\n self.flux = flux\n self._subtype = subtype\n\n self._nbasis = self.flux.shape[0]\n self._nwave = self.flux.shape[1]\n\n\n @property\n def nbasis(self):\n return self._nbasis\n\n @property\n def nwave(self):\n return self._nwave\n\n @property\n def template_type(self):\n return self._rrtype\n\n @property\n def sub_type(self):\n return self._subtype\n\n @property\n def full_type(self):\n \"\"\"Return formatted type:subtype string.\n \"\"\"\n if self._subtype != '':\n return '{}:::{}'.format(self._rrtype, self._subtype)\n else:\n return self._rrtype\n\n @property\n def redshifts(self):\n return self._redshifts\n\n\n def eval(self, coeff, wave, z):\n \"\"\"Return template for given coefficients, wavelengths, and redshift\n\n Args:\n coeff : array of coefficients length self.nbasis\n wave : wavelengths at which to evaluate template flux\n z : redshift at which to evaluate template flux\n\n Returns:\n template flux array\n\n Notes:\n A single factor of (1+z)^-1 is applied to the resampled flux\n to conserve integrated flux after redshifting.\n\n \"\"\"\n assert len(coeff) == self.nbasis\n flux = self.flux.T.dot(coeff).T / (1+z)\n return trapz_rebin(self.wave*(1+z), flux, wave)\n\n\n\n\ndef find_templates(template_dir=None):\n \"\"\"Return list of redrock-\\*.fits template files\n\n Search directories in this order, returning results from first one found:\n - template_dir\n - $RR_TEMPLATE_DIR\n - <redrock_code>/templates/\n\n Args:\n template_dir (str): optional directory containing the templates.\n\n Returns:\n list: a list of template files.\n\n \"\"\"\n if template_dir is None:\n if 'RR_TEMPLATE_DIR' in os.environ:\n template_dir = os.environ['RR_TEMPLATE_DIR']\n else:\n thisdir = os.path.dirname(__file__)\n tempdir = os.path.join(os.path.abspath(thisdir), 'templates')\n if os.path.exists(tempdir):\n template_dir = tempdir\n\n if template_dir is None:\n raise IOError(\"ERROR: can't find template_dir, $RR_TEMPLATE_DIR, or {rrcode}/templates/\")\n else:\n print('DEBUG: Read templates from {}'.format(template_dir) )\n\n return sorted(glob(os.path.join(template_dir, 'rrtemplate-*.fits')))\n\n\nclass DistTemplatePiece(object):\n \"\"\"One piece of the distributed template data.\n\n This is a simple container for storing interpolated templates for a set of\n redshift values. It is used for communicating the interpolated templates\n between processes.\n\n In the MPI case, each process will store at most two of these\n simultaneously. This is the data that is computed on a single process and\n passed between processes.\n\n Args:\n index (int): the chunk index of this piece- this corresponds to\n the process rank that originally computed this piece.\n redshifts (array): the redshift range contained in this piece.\n data (list): a list of dictionaries, one for each redshift, and\n each containing the 2D interpolated template values for all\n \"wavehash\" keys.\n\n \"\"\"\n def __init__(self, index, redshifts, data):\n self.index = index\n self.redshifts = redshifts\n self.data = data\n\n\ndef _mp_rebin_template(template, dwave, zlist, qout):\n \"\"\"Function for multiprocessing version of rebinning.\n \"\"\"\n try:\n results = dict()\n for z in zlist:\n binned = rebin_template(template, z, dwave)\n results[z] = binned\n qout.put(results)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n lines = [ \"MP rebin: {}\".format(x) for x in lines ]\n print(\"\".join(lines))\n sys.stdout.flush()\n return\n\n\nclass DistTemplate(object):\n \"\"\"Distributed template data interpolated to all redshifts.\n\n For a given template, the redshifts are distributed among the\n processes in the communicator. Then each process will rebin the\n template to those redshifts for the wavelength grids specified by\n dwave.\n\n Args:\n template (Template): the template to distribute\n dwave (dict): the keys are the \"wavehash\" and the values\n are a 1D array containing the wavelength grid.\n mp_procs (int): if not using MPI, restrict the number of\n multiprocesses to this.\n comm (mpi4py.MPI.Comm): (optional) the MPI communicator.\n\n \"\"\"\n def __init__(self, template, dwave, mp_procs=1, comm=None):\n self._comm = comm\n self._template = template\n self._dwave = dwave\n\n self._comm_rank = 0\n self._comm_size = 1\n if self._comm is not None:\n self._comm_rank = self._comm.rank\n self._comm_size = self._comm.size\n\n self._distredshifts = np.array_split(self._template.redshifts,\n self._comm_size)\n\n myz = self._distredshifts[self._comm_rank]\n nz = len(myz)\n\n data = list()\n\n # In the case of not using MPI (comm == None), one process is rebinning\n # all the templates. In that scenario, use multiprocessing\n # workers to do the rebinning.\n\n if self._comm is not None:\n # MPI case- compute our local redshifts\n for z in myz:\n binned = rebin_template(self._template, z, self._dwave)\n data.append(binned)\n else:\n # We don't have MPI, so use multiprocessing\n import multiprocessing as mp\n\n qout = mp.Queue()\n work = np.array_split(myz, mp_procs)\n procs = list()\n for i in range(mp_procs):\n p = mp.Process(target=_mp_rebin_template,\n args=(self._template, self._dwave, work[i], qout))\n procs.append(p)\n p.start()\n\n # Extract the output into a single list\n results = dict()\n for i in range(mp_procs):\n res = qout.get()\n results.update(res)\n for z in myz:\n data.append(results[z])\n\n # Correct spectra for Lyman-series\n for i, z in enumerate(myz):\n for k in list(self._dwave.keys()):\n T = transmission_Lyman(z,self._dwave[k])\n for vect in range(data[i][k].shape[1]):\n data[i][k][:,vect] *= T\n\n self._piece = DistTemplatePiece(self._comm_rank, myz, data)\n\n\n @property\n def comm(self):\n return self._comm\n\n @property\n def template(self):\n return self._template\n\n @property\n def local(self):\n return self._piece\n\n\n def cycle(self):\n \"\"\"Pass our piece of data to the next process.\n\n If we have returned to our original data, then return True, otherwise\n return False.\n\n Args:\n Nothing\n\n Returns (bool):\n Whether we have finished (True) else False.\n\n \"\"\"\n # If we are not using MPI, this function is a no-op, so just return.\n if self._comm is None:\n return True\n\n rank = self._comm_rank\n nproc = self._comm_size\n\n to_proc = rank + 1\n if to_proc >= nproc:\n to_proc = 0\n\n from_proc = rank - 1\n if from_proc < 0:\n from_proc = nproc - 1\n\n # Send our data and get a request handle for later checking.\n\n req = self._comm.isend(self._piece, to_proc)\n\n # Receive our data\n\n incoming = self._comm.recv(source=from_proc)\n\n # Wait for send to finishself._comm_rank = self._comm.rank\n\n req.wait()\n\n # Now replace our local piece with the new one\n\n self._piece = incoming\n\n # Are we done?\n\n done = False\n if self._piece.index == rank:\n done = True\n\n return done\n\n\ndef load_dist_templates(dwave, templates=None, comm=None, mp_procs=1):\n \"\"\"Read and distribute templates from disk.\n\n This reads one or more template files from disk and distributes them among\n an MPI communicator. Each process will locally store interpolated data\n for a redshift slice of each template. For a single redshift, the template\n is interpolated to the wavelength grids specified by \"dwave\".\n\n As an example, imagine 3 templates with independent redshift ranges. Also\n imagine that the communicator has 2 processes. This function would return\n a list of 3 DistTemplate objects. Within each of those objects, the 2\n processes store the interpolated data for a subset of the redshift range:\n\n DistTemplate #1: zmin1 <---- p0 ----> | <---- p1 ----> zmax1\n DistTemplate #2: zmin2 <-- p0 --> | <-- p1 --> zmax2\n DistTemplate #3: zmin3 <--- p0 ---> | <--- p1 ---> zmax3\n\n Args:\n dwave (dict): the dictionary of wavelength grids. Keys are the\n \"wavehash\" and values are an array of wavelengths.\n templates (str or None): if None, find all templates from the\n redrock template directory. If a path to a file is specified,\n load that single template. If a path to a directory is given,\n load all templates in that directory.\n comm (mpi4py.MPI.Comm): (optional) the MPI communicator.\n mp_procs (int): if not using MPI, restrict the number of\n multiprocesses to this.\n\n Returns:\n list: a list of DistTemplate objects.\n\n \"\"\"\n timer = elapsed(None, \"\", comm=comm)\n\n template_files = None\n\n if (comm is None) or (comm.rank == 0):\n # Only one process needs to do this\n if templates is not None:\n if os.path.isfile(templates):\n # we are using just a single file\n template_files = [ templates ]\n elif os.path.isdir(templates):\n # this is a template dir\n template_files = find_templates(template_dir=templates)\n else:\n print(\"{} is neither a file nor a directory\"\\\n .format(templates))\n sys.stdout.flush()\n if comm is not None:\n comm.Abort()\n else:\n template_files = find_templates()\n\n if comm is not None:\n template_files = comm.bcast(template_files, root=0)\n\n template_data = list()\n if (comm is None) or (comm.rank == 0):\n for t in template_files:\n template_data.append(Template(filename=t))\n\n if comm is not None:\n template_data = comm.bcast(template_data, root=0)\n\n timer = elapsed(timer, \"Read and broadcast of {} templates\"\\\n .format(len(template_files)), comm=comm)\n\n # Compute the interpolated templates in a distributed way with every\n # process generating a slice of the redshift range.\n\n dtemplates = list()\n for t in template_data:\n dtemplates.append(DistTemplate(t, dwave, mp_procs=mp_procs, comm=comm))\n\n timer = elapsed(timer, \"Rebinning templates\", comm=comm)\n\n return dtemplates\n"
] | [
[
"numpy.arange",
"numpy.array_split",
"numpy.log10"
]
] |
guissy/StockRecommendSystem | [
"2e8694d0bb2ceaa42585ee7414564d921cc5a854"
] | [
"Source/FetchData/Fetch_Data_Stock_CHN_Daily.py"
] | [
"import sys, os, time, datetime, warnings, configparser\nimport pandas as pd\nimport numpy as np\nimport tushare as ts\nimport concurrent.futures\nfrom tqdm import tqdm\n\ncur_path = os.path.dirname(os.path.abspath(__file__))\nfor _ in range(2):\n root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]\n cur_path = root_path\nsys.path.append(root_path + \"/\" + 'Source/DataBase/')\nfrom Source.DataBase.DB_API import queryStock, storeStock, queryStockList, storeStockList, queryStockPublishDay, storePublishDay\n\ndef getStocksList(root_path):\n try:\n df = queryStockList(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\")\n df.index = df.index.astype(str).str.zfill(6)\n except Exception as e:\n df = pd.DataFrame()\n\n if df.empty == False: return df\n import subprocess\n subprocess.Popen('brew services restart mongodb'.split())\n stock_info = ts.get_stock_basics()\n listData = pd.DataFrame(stock_info)\n #listData.index.name = 'symbol'\n #listData.index = listData.index.astype(str).str.zfill(6) #[str(symbol).zfill(6) for symbol in listData.index] #listData.index.astype(str).str.zfill(6)\n #print(listData.index)\n #listData['symbol'] = listData['symbol'].str.strip()\n storeStockList(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", listData)\n df = queryStockList(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\")\n df.index = df.index.astype(str).str.zfill(6)\n return df\n\ndef getSingleStock(symbol):\n repeat_times = 1\n message = \"\"\n df = pd.DataFrame()\n\n for _ in range(repeat_times): \n try:\n data = ts.get_hist_data(symbol)\n data.sort_index(ascending=True, inplace=True)\n return data, \"\"\n except Exception as e:\n message = symbol + \" fetch exception: \" + str(e)\n continue \n return df, message\n\ndef getSingleStockByTime(symbol, from_date, till_date):\n start = from_date.split('-')\n start_y, start_m, start_d = start[0], start[1], start[2] # starting date\n\n end = till_date.split('-')\n end_y, end_m, end_d = end[0], end[1], end[2] # until now\n \n repeat_times = 1\n message = \"\"\n df = pd.DataFrame()\n\n for _ in range(repeat_times): \n try:\n data = ts.get_hist_data(symbol, from_date, till_date)\n data.sort_index(ascending=True, inplace=True)\n return data, \"\"\n except Exception as e:\n message = symbol + \" fetch exception: \" + str(e)\n continue \n return df, message\n\ndef judgeOpenDaysInRange(from_date, to_date):\n holidays=[\"2017-01-01\", \"2017-01-02\",\n \"2017-01-27\", \"2017-01-28\", \"2017-01-29\", \"2017-01-30\", \"2017-01-31\", \"2017-02-01\", \"2017-02-02\",\n \"2017-04-02\", \"2017-04-03\", \"2017-04-04\",\n \"2017-05-01\",\n \"2017-05-28\", \"2017-05-29\", \"2017-05-30\",\n \"2017-10-01\", \"2017-10-02\", \"2017-10-03\", \"2017-10-04\", \"2017-10-05\",\"2017-10-06\",\"2017-10-07\",\"2017-10-08\"]\n\n #holidays = cal.holidays(from_date, to_date)\n duedays = pd.bdate_range(from_date, to_date)\n df = pd.DataFrame()\n df['date'] = duedays\n df['holiday'] = duedays.isin(holidays)\n opendays = df[df['holiday'] == False]\n return opendays\n\ndef judgeNeedPostDownload(from_date, to_date):\n today = datetime.datetime.now()\n start_date = pd.Timestamp(from_date)\n end_date = pd.Timestamp(to_date)\n\n if start_date > today: return False \n if end_date > today: to_date = today.strftime(\"%Y-%m-%d\")\n dateList = judgeOpenDaysInRange(from_date, to_date)\n if len(dateList) > 0: return True\n return False\n\n\ndef updateSingleStockData(root_path, symbol, force_check):\n startTime = time.time()\n message = \"\"\n\n if len(symbol) == 0: return startTime, message\n\n till_date = (datetime.datetime.now()).strftime(\"%Y-%m-%d\")\n end_date = pd.Timestamp(till_date)\n \n stockData, lastUpdateTime = queryStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol)\n\n if stockData.empty:\n stockData, message = getSingleStock(symbol)\n if stockData.empty == False:\n storeStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol, stockData)\n return startTime, message\n\n modified = False\n first_date = pd.Timestamp(stockData.index[0])\n last_date = pd.Timestamp(stockData.index[-1])\n updateOnce = end_date > lastUpdateTime\n \n if end_date > last_date and (updateOnce or force_check):\n to_date = (last_date + datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n if judgeNeedPostDownload(to_date, till_date):\n message = message + \", download post data from \" + to_date + \" to \" + till_date\n moreStockData, tempMessage = getSingleStockByTime(symbol, to_date, till_date)\n message = message + tempMessage\n if len(moreStockData) > 0:\n if isinstance(moreStockData.index, pd.DatetimeIndex):\n moreStockData.index = moreStockData.index.strftime(\"%Y-%m-%d\")\n modified = True\n stockData = pd.concat([stockData, moreStockData])\n stockData.index.name = 'date'\n \n if modified:\n stockData = stockData[~stockData.index.duplicated(keep='first')]\n storeStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol, stockData)\n elif updateOnce:\n stockData = stockData[~stockData.index.duplicated(keep='first')]\n storeStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol, stockData)\n message = message + \", nothing updated\"\n else:\n message = \"\"\n\n return startTime, message\n\ndef updateStockData_CHN(root_path, storeType, force_check = False):\n\n symbols = getStocksList(root_path).index.values.tolist()\n\n pbar = tqdm(total=len(symbols))\n\n\n if storeType == 2:\n for symbol in symbols:\n startTime, message = updateSingleStockData(root_path, symbol, force_check)\n outMessage = '%-*s fetched in: %.4s seconds' % (6, symbol, (time.time() - startTime))\n pbar.set_description(outMessage)\n pbar.update(1)\n\n if storeType == 1:\n log_errors = []\n log_update = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n # Start the load operations and mark each future with its URL\n future_to_stock = {executor.submit(updateSingleStockData, root_path, symbol, force_check): symbol for symbol in symbols}\n for future in concurrent.futures.as_completed(future_to_stock):\n stock = future_to_stock[future]\n try:\n startTime, message = future.result()\n except Exception as exc:\n startTime = time.time()\n log_errors.append('%r generated an exception: %s' % (stock, exc))\n else:\n if len(message) > 0: log_update.append(message)\n outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))\n pbar.set_description(outMessage)\n pbar.update(1)\n if len(log_errors) > 0: print(log_errors)\n # if len(log_update) > 0: print(log_update)\n\n pbar.close()\n return symbols\n\nif __name__ == \"__main__\":\n pd.set_option('precision', 3)\n pd.set_option('display.width',1000)\n warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)\n\n config = configparser.ConfigParser()\n config.read(root_path + \"/\" + \"config.ini\")\n storeType = int(config.get('Setting', 'StoreType'))\n\n if storeType == 1:\n from Start_DB_Server import StartServer, ShutdownServer\n # start database server (async)\n thread = StartServer(root_path)\n \n # wait for db start, the standard procedure should listen to \n # the completed event of function \"StartServer\"\n time.sleep(5)\n \n updateStockData_CHN(root_path, storeType)\n\n if storeType == 1:\n # stop database server (sync)\n time.sleep(5)\n ShutdownServer()\n"
] | [
[
"pandas.bdate_range",
"pandas.DataFrame",
"pandas.set_option",
"pandas.concat",
"pandas.Timestamp"
]
] |
ambareeshravi/TrafficSignClassifier_API | [
"8628057439ee70f6d827abf931071e9b6539bd5b"
] | [
"utils.py"
] | [
"'''\nAuthor: Ambareesh Ravi\nDate: Jul 31, 2021\nTitle: utils.py\nDescription:\n Contains utility and helper functions for the project\n'''\n\n# Libraries imports\nimport numpy as np\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nfrom time import time\nfrom glob import glob\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport argparse\nimport cv2\n\n# Global variables\nMANUAL_SEED = 42\nnp.random.seed(42)\n\ndef INFO(s):\n '''\n Prints information in a particular format\n\n Args:\n s - string <str> to be printed\n\n Returns:\n -\n\n Exception:\n -\n '''\n print(\"-\"*40)\n print(\"INFO:\", s)\n print(\"-\"*40)\n\ndef read_directory_content(path):\n '''\n Reads all files in a directory given a path\n \n Args:\n path - path for the directory as <str>\n \n Returns:\n sorted list of files in the directory\n \n Exception:\n -\n '''\n if \"*\" not in path: path = os.path.join(path, \"*\")\n return sorted(glob(path))\n \ndef create_directory(path):\n '''\n Creates a directory given a path if the path does not exist\n \n Args:\n path - path for the directory as <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n # Create a directory\n if not os.path.exists(path): os.mkdir(path)\n\ndef save_image(array, path, resize = False, extension = \".png\"):\n '''\n Saves an array into an image file\n \n Args:\n array - image as a <np.array>\n path - path for the image as <str>\n resize - [optional] to resize image to given size - <tuple> of <int> (w,h)\n extension - [optional] type of image file as <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n # Add image extension\n if extension not in path:\n path = path.split(\".\")[0] + extension\n \n # Save image into a file using PIL Image handle\n img = Image.fromarray(array)\n # Resize image if reaquired\n if resize: img = img.resize(resize)\n # Save image\n img.save(path)\n \ndef read_image(image_path):\n '''\n Reads an image from the given path as a PIL.Image handle\n \n Args:\n image_path - path for the image as <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n return Image.open(image_path)\n\nclass Visualizer:\n def __init__(self,):\n '''\n Initializes the class to visualize results in comparison with the inputs\n \n Args:\n -\n \n Returns:\n -\n \n Exception:\n -\n '''\n pass\n \n def gray2color(self, x):\n '''\n Converts a single channel grayscale image to coloured 3 channel format\n \n Args:\n x - input as <np.array>\n \n Returns:\n -\n \n Exception:\n -\n '''\n return np.repeat(np.expand_dims(x, axis = -1), 3, axis = -1)\n \n def visualize_composite(self, input_image, label, prediction, margin = 8, save_path = None):\n '''\n Function to visualize input, label, prediction together in an image\n \n Args:\n input_image - input RGB image as <np.array>\n label - label binary mask Grayscale image as <np.array>\n prediction - predicted binary mask Grayscale image as <np.array>\n margin - margin between images in terms of pixels in <int>\n save_path - path to save the file <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n rounded_pred = np.round(prediction)\n margin = np.ones((label.shape[0], margin, 3))\n composite = np.hstack((input_image, margin, self.gray2color(label), margin, self.gray2color(rounded_pred)))\n img = Image.fromarray((composite*255).astype(np.uint8))\n if save_path: save_image()\n return img\n"
] | [
[
"numpy.round",
"numpy.ones",
"numpy.expand_dims",
"numpy.random.seed"
]
] |
kadeng/tensorflow_project_workspace | [
"dee284fb2d1796329895130a075cd57a62ea873f"
] | [
"tensorflow/contrib/learn/python/learn/estimators/dnn.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Deep Neural Network estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework import deprecated\nfrom tensorflow.contrib.framework import deprecated_arg_values\nfrom tensorflow.contrib.framework.python.framework import experimental\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.layers.python.layers import optimizers\nfrom tensorflow.contrib.learn.python.learn import evaluable\nfrom tensorflow.contrib.learn.python.learn import metric_spec\nfrom tensorflow.contrib.learn.python.learn import monitors as monitor_lib\nfrom tensorflow.contrib.learn.python.learn import trainable\nfrom tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\nfrom tensorflow.contrib.learn.python.learn.estimators import head as head_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\nfrom tensorflow.contrib.learn.python.learn.estimators import prediction_key\nfrom tensorflow.contrib.learn.python.learn.utils import export\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.summary import summary\n\n_CENTERED_BIAS_WEIGHT = \"centered_bias_weight\"\n\n# The default learning rate of 0.05 is a historical artifact of the initial\n# implementation, but seems a reasonable choice.\n_LEARNING_RATE = 0.05\n\n\ndef _get_feature_dict(features):\n if isinstance(features, dict):\n return features\n return {\"\": features}\n\n\ndef _get_optimizer(optimizer):\n if callable(optimizer):\n return optimizer()\n else:\n return optimizer\n\n\ndef _add_hidden_layer_summary(value, tag):\n summary.scalar(\"%s_fraction_of_zero_values\" % tag, nn.zero_fraction(value))\n summary.histogram(\"%s_activation\" % tag, value)\n\n\ndef _dnn_model_fn(features, labels, mode, params, config=None):\n \"\"\"Deep Neural Net model_fn.\n\n Args:\n features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of\n dtype `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n params: A dict of hyperparameters.\n The following hyperparameters are expected:\n * head: A `_Head` instance.\n * hidden_units: List of hidden units per layer.\n * feature_columns: An iterable containing all the feature columns used by\n the model.\n * optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training. If `None`, will use the Adagrad\n optimizer with a default learning rate of 0.05.\n * activation_fn: Activation function applied to each layer. If `None`,\n will use `tf.nn.relu`.\n * dropout: When not `None`, the probability we will drop out a given\n coordinate.\n * gradient_clip_norm: A float > 0. If provided, gradients are\n clipped to their global norm with this clipping ratio.\n * embedding_lr_multipliers: Optional. A dictionary from\n `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to\n multiply with learning rate for the embedding variables.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n predictions: A dict of `Tensor` objects.\n loss: A scalar containing the loss of the step.\n train_op: The op for training.\n \"\"\"\n head = params[\"head\"]\n hidden_units = params[\"hidden_units\"]\n feature_columns = params[\"feature_columns\"]\n optimizer = params.get(\"optimizer\") or \"Adagrad\"\n activation_fn = params.get(\"activation_fn\")\n dropout = params.get(\"dropout\")\n gradient_clip_norm = params.get(\"gradient_clip_norm\")\n num_ps_replicas = config.num_ps_replicas if config else 0\n embedding_lr_multipliers = params.get(\"embedding_lr_multipliers\", {})\n\n features = _get_feature_dict(features)\n parent_scope = \"dnn\"\n\n input_layer_partitioner = (partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas, min_slice_size=64 << 20))\n input_layer_scope = parent_scope + \"/input_from_feature_columns\"\n with variable_scope.variable_scope(\n input_layer_scope,\n values=list(six.itervalues(features)),\n partitioner=input_layer_partitioner) as scope:\n net = layers.input_from_feature_columns(\n columns_to_tensors=features,\n feature_columns=feature_columns,\n weight_collections=[parent_scope],\n scope=scope)\n\n hidden_layer_partitioner = (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas))\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with variable_scope.variable_scope(\n parent_scope + \"/hiddenlayer_%d\" % layer_id,\n values=[net],\n partitioner=hidden_layer_partitioner) as scope:\n net = layers.fully_connected(\n net,\n num_hidden_units,\n activation_fn=activation_fn,\n variables_collections=[parent_scope],\n scope=scope)\n if dropout is not None and mode == model_fn.ModeKeys.TRAIN:\n net = layers.dropout(net, keep_prob=(1.0 - dropout))\n _add_hidden_layer_summary(net, scope.name)\n\n with variable_scope.variable_scope(\n parent_scope + \"/logits\",\n values=[net],\n partitioner=hidden_layer_partitioner) as scope:\n logits = layers.fully_connected(\n net,\n head.logits_dimension,\n activation_fn=None,\n variables_collections=[parent_scope],\n scope=scope)\n _add_hidden_layer_summary(logits, scope.name)\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n return optimizers.optimize_loss(\n loss=loss,\n global_step=contrib_variables.get_global_step(),\n learning_rate=_LEARNING_RATE,\n optimizer=_get_optimizer(optimizer),\n gradient_multipliers=(\n dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access\n embedding_lr_multipliers, parent_scope, input_layer_scope)),\n clip_gradients=gradient_clip_norm,\n name=parent_scope,\n # Empty summaries to prevent optimizers from logging the training_loss.\n summaries=[])\n\n return head.head_ops(features, labels, mode, _train_op_fn, logits)\n\n\nclass DNNClassifier(evaluable.Evaluable, trainable.Trainable):\n \"\"\"A classifier for TensorFlow DNN models.\n\n Example:\n\n ```python\n sparse_feature_a = sparse_column_with_hash_bucket(...)\n sparse_feature_b = sparse_column_with_hash_bucket(...)\n\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNClassifier(\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNClassifier(\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Input builders\n def input_fn_train: # returns x, y (where y represents label's class index).\n pass\n estimator.fit(input_fn=input_fn_train)\n\n def input_fn_eval: # returns x, y (where y represents label's class index).\n pass\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x) # returns predicted labels (i.e. label's class index).\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column name.\n Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n hidden_units,\n feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column_name=None,\n optimizer=None,\n activation_fn=nn.relu,\n dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=False,\n config=None,\n feature_engineering_fn=None,\n embedding_lr_multipliers=None):\n \"\"\"Initializes a DNNClassifier instance.\n\n Args:\n hidden_units: List of hidden units per layer. All layers are fully\n connected. Ex. `[64, 32]` means first layer has 64 nodes and second one\n has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n n_classes: number of label classes. Default is binary classification.\n It must be greater than 1. Note: Class labels are integers representing\n the class index (i.e. values from 0 to n_classes-1). For arbitrary\n label values (e.g. string labels), convert to class indices first.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: An instance of `tf.Optimizer` used to train the model. If\n `None`, will use an Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n gradient_clip_norm: A float > 0. If provided, gradients are\n clipped to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: `RunConfig` object to configure the runtime settings.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into the model.\n embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to\n a `float` multiplier. Multiplier will be used to multiply with\n learning rate for the embedding variables.\n\n Returns:\n A `DNNClassifier` estimator.\n\n Raises:\n ValueError: If `n_classes` < 2.\n \"\"\"\n self._hidden_units = hidden_units\n self._feature_columns = tuple(feature_columns or [])\n self._enable_centered_bias = enable_centered_bias\n self._estimator = estimator.Estimator(\n model_fn=_dnn_model_fn,\n model_dir=model_dir,\n config=config,\n params={\n \"head\":\n head_lib._multi_class_head( # pylint: disable=protected-access\n n_classes,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias),\n \"hidden_units\":\n hidden_units,\n \"feature_columns\":\n self._feature_columns,\n \"optimizer\":\n optimizer,\n \"activation_fn\":\n activation_fn,\n \"dropout\":\n dropout,\n \"gradient_clip_norm\":\n gradient_clip_norm,\n \"embedding_lr_multipliers\":\n embedding_lr_multipliers,\n },\n feature_engineering_fn=feature_engineering_fn)\n\n def fit(self,\n x=None,\n y=None,\n input_fn=None,\n steps=None,\n batch_size=None,\n monitors=None,\n max_steps=None):\n \"\"\"See trainable.Trainable. Note: Labels must be integer class indices.\"\"\"\n # TODO(roumposg): Remove when deprecated monitors are removed.\n hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)\n self._estimator.fit(x=x,\n y=y,\n input_fn=input_fn,\n steps=steps,\n batch_size=batch_size,\n monitors=hooks,\n max_steps=max_steps)\n return self\n\n def evaluate(self,\n x=None,\n y=None,\n input_fn=None,\n feed_fn=None,\n batch_size=None,\n steps=None,\n metrics=None,\n name=None,\n checkpoint_path=None,\n hooks=None):\n \"\"\"See evaluable.Evaluable. Note: Labels must be integer class indices.\"\"\"\n return self._estimator.evaluate(\n x=x,\n y=y,\n input_fn=input_fn,\n feed_fn=feed_fn,\n batch_size=batch_size,\n steps=steps,\n metrics=metrics,\n name=name,\n checkpoint_path=checkpoint_path,\n hooks=hooks)\n\n @deprecated_arg_values(\n estimator.AS_ITERABLE_DATE,\n estimator.AS_ITERABLE_INSTRUCTIONS,\n as_iterable=False)\n def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):\n \"\"\"Returns predicted classes for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted classes with shape [batch_size] (or an iterable\n of predicted classes if as_iterable is True). Each predicted class is\n represented by its class index (i.e. integer from 0 to n_classes-1).\n \"\"\"\n key = prediction_key.PredictionKey.CLASSES\n preds = self._estimator.predict(\n x=x,\n input_fn=input_fn,\n batch_size=batch_size,\n outputs=[key],\n as_iterable=as_iterable)\n if as_iterable:\n return (pred[key] for pred in preds)\n return preds[key].reshape(-1)\n\n @deprecated_arg_values(\n estimator.AS_ITERABLE_DATE,\n estimator.AS_ITERABLE_INSTRUCTIONS,\n as_iterable=False)\n def predict_proba(self,\n x=None,\n input_fn=None,\n batch_size=None,\n as_iterable=True):\n \"\"\"Returns prediction probabilities for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x and y must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted probabilities with shape [batch_size, n_classes]\n (or an iterable of predicted probabilities if as_iterable is True).\n \"\"\"\n key = prediction_key.PredictionKey.PROBABILITIES\n preds = self._estimator.predict(\n x=x,\n input_fn=input_fn,\n batch_size=batch_size,\n outputs=[key],\n as_iterable=as_iterable)\n if as_iterable:\n return (pred[key] for pred in preds)\n return preds[key]\n\n def _get_predict_ops(self, features):\n \"\"\"See `Estimator` class.\"\"\"\n # This method exists to support some models that use the legacy interface.\n # pylint: disable=protected-access\n return self._estimator._get_predict_ops(features)\n\n def get_variable_names(self):\n \"\"\"Returns list of all variable names in this model.\n\n Returns:\n List of names.\n \"\"\"\n return self._estimator.get_variable_names()\n\n def get_variable_value(self, name):\n \"\"\"Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n `Tensor` object.\n \"\"\"\n return self._estimator.get_variable_value(name)\n\n def export(self,\n export_dir,\n input_fn=None,\n input_feature_key=None,\n use_deprecated_input_fn=True,\n signature_fn=None,\n default_batch_size=1,\n exports_to_keep=None):\n \"\"\"See BaseEstimator.export.\"\"\"\n\n def default_input_fn(unused_estimator, examples):\n return layers.parse_feature_columns_from_examples(examples,\n self._feature_columns)\n\n return self._estimator.export(\n export_dir=export_dir,\n input_fn=input_fn or default_input_fn,\n input_feature_key=input_feature_key,\n use_deprecated_input_fn=use_deprecated_input_fn,\n signature_fn=(signature_fn or\n export.classification_signature_fn_with_prob),\n prediction_key=prediction_key.PredictionKey.PROBABILITIES,\n default_batch_size=default_batch_size,\n exports_to_keep=exports_to_keep)\n\n @experimental\n def export_savedmodel(self,\n export_dir_base,\n input_fn,\n default_output_alternative_key=None,\n assets_extra=None,\n as_text=False,\n exports_to_keep=None):\n return self._estimator.export_savedmodel(\n export_dir_base,\n input_fn,\n default_output_alternative_key=default_output_alternative_key,\n assets_extra=assets_extra,\n as_text=as_text,\n exports_to_keep=exports_to_keep)\n\n @property\n def model_dir(self):\n return self._estimator.model_dir\n\n @property\n @deprecated(\"2016-10-30\",\n \"This method will be removed after the deprecation date. \"\n \"To inspect variables, use get_variable_names() and \"\n \"get_variable_value().\")\n def weights_(self):\n hiddenlayer_weights = [\n self.get_variable_value(\"dnn/hiddenlayer_%d/weights\" % i)\n for i, _ in enumerate(self._hidden_units)\n ]\n logits_weights = [self.get_variable_value(\"dnn/logits/weights\")]\n return hiddenlayer_weights + logits_weights\n\n @property\n @deprecated(\"2016-10-30\",\n \"This method will be removed after the deprecation date. \"\n \"To inspect variables, use get_variable_names() and \"\n \"get_variable_value().\")\n def bias_(self):\n hiddenlayer_bias = [\n self.get_variable_value(\"dnn/hiddenlayer_%d/biases\" % i)\n for i, _ in enumerate(self._hidden_units)\n ]\n logits_bias = [self.get_variable_value(\"dnn/logits/biases\")]\n if self._enable_centered_bias:\n centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]\n else:\n centered_bias = []\n return hiddenlayer_bias + logits_bias + centered_bias\n\n @property\n def config(self):\n return self._estimator.config\n\n\nclass DNNRegressor(evaluable.Evaluable, trainable.Trainable):\n \"\"\"A regressor for TensorFlow DNN models.\n\n Example:\n\n ```python\n sparse_feature_a = sparse_column_with_hash_bucket(...)\n sparse_feature_b = sparse_column_with_hash_bucket(...)\n\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNRegressor(\n feature_columns=[sparse_feature_a, sparse_feature_b],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNRegressor(\n feature_columns=[sparse_feature_a, sparse_feature_b],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.fit(input_fn=input_fn_train)\n\n def input_fn_eval: # returns x, y\n pass\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column name.\n Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n hidden_units,\n feature_columns,\n model_dir=None,\n weight_column_name=None,\n optimizer=None,\n activation_fn=nn.relu,\n dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=False,\n config=None,\n feature_engineering_fn=None,\n label_dimension=1,\n embedding_lr_multipliers=None):\n \"\"\"Initializes a `DNNRegressor` instance.\n\n Args:\n hidden_units: List of hidden units per layer. All layers are fully\n connected. Ex. `[64, 32]` means first layer has 64 nodes and second one\n has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: An instance of `tf.Optimizer` used to train the model. If\n `None`, will use an Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n gradient_clip_norm: A `float` > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: `RunConfig` object to configure the runtime settings.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into the model.\n label_dimension: Dimension of the label for multilabels. Defaults to 1.\n embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to\n a `float` multiplier. Multiplier will be used to multiply with\n learning rate for the embedding variables.\n\n Returns:\n A `DNNRegressor` estimator.\n \"\"\"\n self._feature_columns = tuple(feature_columns or [])\n self._estimator = estimator.Estimator(\n model_fn=_dnn_model_fn,\n model_dir=model_dir,\n config=config,\n params={\n \"head\":\n head_lib._regression_head( # pylint: disable=protected-access\n label_dimension=label_dimension,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias),\n \"hidden_units\":\n hidden_units,\n \"feature_columns\":\n self._feature_columns,\n \"optimizer\":\n optimizer,\n \"activation_fn\":\n activation_fn,\n \"dropout\":\n dropout,\n \"gradient_clip_norm\":\n gradient_clip_norm,\n \"embedding_lr_multipliers\":\n embedding_lr_multipliers,\n },\n feature_engineering_fn=feature_engineering_fn)\n\n def fit(self,\n x=None,\n y=None,\n input_fn=None,\n steps=None,\n batch_size=None,\n monitors=None,\n max_steps=None):\n \"\"\"See trainable.Trainable.\"\"\"\n # TODO(roumposg): Remove when deprecated monitors are removed.\n hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)\n self._estimator.fit(x=x,\n y=y,\n input_fn=input_fn,\n steps=steps,\n batch_size=batch_size,\n monitors=hooks,\n max_steps=max_steps)\n return self\n\n def evaluate(self,\n x=None,\n y=None,\n input_fn=None,\n feed_fn=None,\n batch_size=None,\n steps=None,\n metrics=None,\n name=None,\n checkpoint_path=None,\n hooks=None):\n \"\"\"See evaluable.Evaluable.\"\"\"\n # TODO(zakaria): remove once deprecation is finished (b/31229024)\n custom_metrics = {}\n if metrics:\n for key, metric in six.iteritems(metrics):\n if (not isinstance(metric, metric_spec.MetricSpec) and\n not isinstance(key, tuple)):\n custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric\n else:\n custom_metrics[key] = metric\n\n return self._estimator.evaluate(\n x=x,\n y=y,\n input_fn=input_fn,\n feed_fn=feed_fn,\n batch_size=batch_size,\n steps=steps,\n metrics=custom_metrics,\n name=name,\n checkpoint_path=checkpoint_path,\n hooks=hooks)\n\n @deprecated_arg_values(\n estimator.AS_ITERABLE_DATE,\n estimator.AS_ITERABLE_INSTRUCTIONS,\n as_iterable=False)\n def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):\n \"\"\"Returns predicted scores for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted scores (or an iterable of predicted scores if\n as_iterable is True). If `label_dimension == 1`, the shape of the output\n is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.\n \"\"\"\n key = prediction_key.PredictionKey.SCORES\n preds = self._estimator.predict(\n x=x,\n input_fn=input_fn,\n batch_size=batch_size,\n outputs=[key],\n as_iterable=as_iterable)\n if as_iterable:\n return (pred[key] for pred in preds)\n return preds[key]\n\n def _get_predict_ops(self, features):\n \"\"\"See `Estimator` class.\"\"\"\n # This method exists to support some models that use the legacy interface.\n # pylint: disable=protected-access\n return self._estimator._get_predict_ops(features)\n\n def get_variable_names(self):\n \"\"\"Returns list of all variable names in this model.\n\n Returns:\n List of names.\n \"\"\"\n return self._estimator.get_variable_names()\n\n def get_variable_value(self, name):\n \"\"\"Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n `Tensor` object.\n \"\"\"\n return self._estimator.get_variable_value(name)\n\n def export(self,\n export_dir,\n input_fn=None,\n input_feature_key=None,\n use_deprecated_input_fn=True,\n signature_fn=None,\n default_batch_size=1,\n exports_to_keep=None):\n \"\"\"See BaseEstimator.export.\"\"\"\n\n def default_input_fn(unused_estimator, examples):\n return layers.parse_feature_columns_from_examples(examples,\n self._feature_columns)\n\n return self._estimator.export(\n export_dir=export_dir,\n input_fn=input_fn or default_input_fn,\n input_feature_key=input_feature_key,\n use_deprecated_input_fn=use_deprecated_input_fn,\n signature_fn=signature_fn or export.regression_signature_fn,\n prediction_key=prediction_key.PredictionKey.SCORES,\n default_batch_size=default_batch_size,\n exports_to_keep=exports_to_keep)\n\n @property\n def model_dir(self):\n return self._estimator.model_dir\n\n @property\n def config(self):\n return self._estimator.config\n"
] | [
[
"tensorflow.contrib.layers.input_from_feature_columns",
"tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined._extract_embedding_lr_multipliers",
"tensorflow.contrib.layers.parse_feature_columns_from_examples",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.contrib.layers.dropout",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.summary.summary.histogram",
"tensorflow.contrib.learn.python.learn.estimators.head._regression_head",
"tensorflow.contrib.framework.deprecated",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.contrib.framework.deprecated_arg_values",
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head",
"tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks"
]
] |
EmbeddedML-EDAGroup/Q-PPG | [
"ed42829d0a456db4f0b31d63ba8b22ba483c7b08"
] | [
"precision_search/model/TEMPONet_float.py"
] | [
"#*----------------------------------------------------------------------------*\n#* Copyright (C) 2021 Politecnico di Torino, Italy *\n#* SPDX-License-Identifier: Apache-2.0 *\n#* *\n#* Licensed under the Apache License, Version 2.0 (the \"License\"); *\n#* you may not use this file except in compliance with the License. *\n#* You may obtain a copy of the License at *\n#* *\n#* http://www.apache.org/licenses/LICENSE-2.0 *\n#* *\n#* Unless required by applicable law or agreed to in writing, software *\n#* distributed under the License is distributed on an \"AS IS\" BASIS, *\n#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *\n#* See the License for the specific language governing permissions and *\n#* limitations under the License. *\n#* *\n#* Author: Alessio Burrello *\n#*----------------------------------------------------------------------------*\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom base import BaseModel\nfrom math import ceil\nimport sys\nsys.path.append(\"..\")\nfrom models import quant_module_1d as qm\n\n__all__ = ['TempoNetfloat']\n\n\ndef TempoNetfloat(**kwargs):\n return TEMPONet(**kwargs)\n\n\nclass TEMPONet(BaseModel):\n \"\"\"\n TEMPONet architecture:\n Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:\n - TemporalConvBlock\n - ConvBlock\n Two instances of Regressor followed by a final Linear layer with a single neuron.\n \"\"\"\n\n def __init__(self, dataset_name='PPG_Dalia', dataset_args={}):\n super(TEMPONet, self).__init__()\n\n self.dil = [\n 2, 2, 1,\n 4, 4,\n 8, 8\n ]\n self.rf = [\n 5, 5, 5,\n 9, 9,\n 17, 17\n ]\n self.ch = [\n 32, 32, 64,\n 64, 64, 128,\n 128, 128, 128,\n 256, 128\n ]\n\n # 1st instance of two TempConvBlocks and ConvBlock\n k_tcb00 = ceil(self.rf[0] / self.dil[0])\n self.tcb00 = TempConvBlock(\n ch_in=4,\n ch_out=self.ch[0],\n k_size=k_tcb00,\n dil=self.dil[0],\n pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2\n )\n k_tcb01 = ceil(self.rf[1] / self.dil[1])\n self.tcb01 = TempConvBlock(\n ch_in=self.ch[0],\n ch_out=self.ch[1],\n k_size=k_tcb01,\n dil=self.dil[1],\n pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2\n )\n k_cb0 = ceil(self.rf[2] / self.dil[2])\n self.cb0 = ConvBlock(\n ch_in=self.ch[1],\n ch_out=self.ch[2],\n k_size=k_cb0,\n strd=1,\n pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,\n dilation=self.dil[2]\n )\n\n # 2nd instance of two TempConvBlocks and ConvBlock\n k_tcb10 = ceil(self.rf[3] / self.dil[3])\n self.tcb10 = TempConvBlock(\n ch_in=self.ch[2],\n ch_out=self.ch[3],\n k_size=k_tcb10,\n dil=self.dil[3],\n pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2\n )\n k_tcb11 = ceil(self.rf[4] / self.dil[4])\n self.tcb11 = TempConvBlock(\n ch_in=self.ch[3],\n ch_out=self.ch[4],\n k_size=k_tcb11,\n dil=self.dil[4],\n pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2\n )\n self.cb1 = ConvBlock(\n ch_in=self.ch[4],\n ch_out=self.ch[5],\n k_size=5,\n strd=2,\n pad=2\n )\n\n # 3td instance of TempConvBlock and ConvBlock\n k_tcb20 = ceil(self.rf[5] / self.dil[5])\n self.tcb20 = TempConvBlock(\n ch_in=self.ch[5],\n ch_out=self.ch[6],\n k_size=k_tcb20,\n dil=self.dil[5],\n pad=((k_tcb20 - 1) * self.dil[5] + 1) // 2\n )\n k_tcb21 = ceil(self.rf[6] / self.dil[6])\n self.tcb21 = TempConvBlock(\n ch_in=self.ch[6],\n ch_out=self.ch[7],\n k_size=k_tcb21,\n dil=self.dil[6],\n pad=((k_tcb21 - 1) * self.dil[6] + 1) // 2\n )\n self.cb2 = ConvBlock(\n ch_in=self.ch[7],\n ch_out=self.ch[8],\n k_size=5,\n strd=4,\n pad=4\n )\n\n # 1st instance of regressor\n self.regr0 = Regressor(\n ft_in=self.ch[8] * 4,\n ft_out=self.ch[9]\n )\n\n # 2nd instance of regressor\n self.regr1 = Regressor(\n ft_in=self.ch[9],\n ft_out=self.ch[10]\n )\n\n self.out_neuron = nn.Linear(\n in_features=self.ch[10],\n out_features=1\n )\n\n def forward(self, x):\n x = self.cb0(\n self.tcb01(\n self.tcb00(\n x\n )\n )\n )\n x = self.cb1(\n self.tcb11(\n self.tcb10(\n x\n )\n )\n )\n x = self.cb2(\n self.tcb21(\n self.tcb20(\n x\n )\n )\n )\n\n x = x.flatten(1)\n x = self.regr0(\n x\n )\n x = self.regr1(\n x\n )\n\n x = self.out_neuron(\n x\n )\n return x\n\n\nclass TempConvBlock(BaseModel):\n \"\"\"\n Temporal Convolutional Block composed of one temporal convolutional layers.\n The block is composed of :\n - Conv1d layer\n - Chomp1d layer\n - ReLU layer\n - BatchNorm1d layer\n\n :param ch_in: Number of input channels\n :param ch_out: Number of output channels\n :param k_size: Kernel size\n :param dil: Amount of dilation\n :param pad: Amount of padding\n \"\"\"\n\n def __init__(self, ch_in, ch_out, k_size, dil, pad):\n super(TempConvBlock, self).__init__()\n\n self.tcn0 = nn.Conv1d(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=k_size,\n dilation=dil,\n bias = False,\n padding=pad\n )\n self.relu0 = nn.ReLU6()\n self.bn0 = nn.BatchNorm1d(\n num_features=ch_out\n )\n\n def forward(self, x):\n x = self.relu0(self.bn0(self.tcn0(x)))\n return x\n\n\nclass ConvBlock(BaseModel):\n \"\"\"\n Convolutional Block composed of:\n - Conv1d layer\n - AvgPool1d layer\n - ReLU layer\n - BatchNorm1d layer\n\n :param ch_in: Number of input channels\n :param ch_out: Number of output channels\n :param k_size: Kernel size\n :param strd: Amount of stride\n :param pad: Amount of padding\n \"\"\"\n\n def __init__(self, ch_in, ch_out, k_size, strd, pad, dilation=1):\n super(ConvBlock, self).__init__()\n\n self.conv0 = nn.Conv1d(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=k_size,\n stride=strd,\n dilation=dilation,\n bias = False,\n padding=pad\n )\n self.pool0 = nn.AvgPool1d(\n kernel_size=2,\n stride=2,\n padding=0\n )\n self.relu0 = nn.ReLU6()\n self.bn0 = nn.BatchNorm1d(ch_out)\n\n def forward(self, x):\n x = self.relu0(self.bn0(self.pool0(self.conv0(x))))\n return x\n\n\nclass Regressor(BaseModel):\n \"\"\"\n Regressor block composed of :\n - Linear layer\n - ReLU layer\n - BatchNorm1d layer\n\n :param ft_in: Number of input channels\n :param ft_out: Number of output channels\n \"\"\"\n\n def __init__(self, ft_in, ft_out):\n super(Regressor, self).__init__()\n self.ft_in = ft_in\n self.ft_out = ft_out\n\n self.fc0 = nn.Linear(\n in_features=ft_in,\n out_features=ft_out,\n bias = False\n )\n\n self.relu0 = nn.ReLU6()\n self.bn0 = nn.BatchNorm1d(\n num_features=ft_out\n )\n\n def forward(self, x):\n x = self.relu0(self.bn0(self.fc0(x)))\n return x\n\n\nclass Chomp1d(BaseModel):\n \"\"\"\n Module that perform a chomping operation on the input tensor.\n It is used to chomp the amount of zero-padding added on the right of the input tensor, this operation is necessary to compute causal convolutions.\n :param chomp_size: amount of padding 0s to be removed\n \"\"\"\n\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :-self.chomp_size].contiguous()\n\n"
] | [
[
"torch.nn.AvgPool1d",
"torch.nn.Linear",
"torch.nn.BatchNorm1d",
"torch.nn.ReLU6",
"torch.nn.Conv1d"
]
] |
ai-systems/crossmodal_embedding | [
"5c61775531fd350c48a965450ab5e99b28deec5e"
] | [
"crossmodal_embedding/tasks/crossmodal/training_star_task.py"
] | [
"from prefect import Task\nfrom loguru import logger\nfrom tqdm import tqdm\nfrom crossmodal_embedding.models import CrossModalEmbedding, SiameseNet\nfrom crossmodal_embedding.models import InputData, InputDataTest\nfrom sklearn.metrics import precision_recall_fscore_support, f1_score\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nimport torch.nn as nn\nfrom crossmodal_embedding.util.evaluation import (\n compute_map_basic,\n compute_map_with_unification,\n)\nfrom torch.utils.data import WeightedRandomSampler\nimport sys\nimport json\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass TrainingTaskStar(Task):\n def create_weights(self, df):\n positives = 0\n negatives = 0\n weights = list()\n for index, row in df.iterrows():\n if row[\"score\"] == 0:\n negatives = negatives + 1\n else:\n positives = positives + 1\n\n weight_positive = 1.0 / float(positives)\n weight_negative = 1.0 / float(negatives)\n\n for index, row in df.iterrows():\n if row[\"score\"] == 0:\n weights.append(weight_negative)\n else:\n weights.append(weight_positive)\n return torch.tensor(weights)\n\n def run(\n self,\n train,\n test,\n dev,\n num_negatives,\n output_log,\n output_model,\n vocab_size,\n batch_size=10,\n num_epochs=5,\n learning_rate=0.0001,\n max_sequence_len=100,\n hidden_size=10,\n out_embedding=128,\n attention_heads=5,\n word_embedding=50,\n decay=0.01,\n ):\n\n logger.info(f\" Negative Examples: {num_negatives}\")\n logger.info(\"Let's train the Cross-Modal Embedding ! (^・ω・^ )\")\n # Device configuration\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # Check for multi_GPUS\n multiple_gpus = 0\n\n train_class_weight = self.create_weights(train)\n\n train_dataset = InputData(train)\n logger.info(f\"TRAIN: {len(train_dataset)}\")\n dev_dataset = InputData(dev)\n logger.info(f\"DEV: {len(dev_dataset)}\")\n test_dataset = InputDataTest(test, vocab_size)\n logger.info(f\"TEST: {len(test_dataset)}\")\n sampler_train = WeightedRandomSampler(\n train_class_weight, len(train_class_weight)\n )\n\n # Data loader\n\n train_loader = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=batch_size, sampler=sampler_train,\n )\n\n dev_loader = torch.utils.data.DataLoader(\n dataset=dev_dataset, batch_size=batch_size, shuffle=False\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_dataset, batch_size=batch_size, shuffle=False\n )\n\n model = SiameseNet(\n out_embedding,\n batch_size,\n vocab_size,\n max_len=max_sequence_len,\n hidden_size=hidden_size,\n out_embedding=out_embedding,\n device=device,\n attention_heads=attention_heads,\n word_embedding=word_embedding,\n )\n\n if torch.cuda.device_count() > 1:\n logger.info(\n f\"**********Let's use {torch.cuda.device_count()} GPUs!********\"\n )\n multiple_gpus = 1\n model = nn.DataParallel(model)\n else:\n logger.info(\"********* Only one GPU *******\")\n\n model = model.to(device)\n\n # Loss and optimizer\n criterion = nn.NLLLoss()\n\n optimizer = torch.optim.AdamW(\n model.parameters(), lr=learning_rate, weight_decay=decay\n )\n\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, \"min\", verbose=True, patience=1, cooldown=3\n )\n\n # Train the model\n best_value = 0\n all_best = dict()\n result_dict = dict()\n total_step = len(train_loader)\n for epoch in tqdm(range(num_epochs), desc=f\"Epoch\"):\n epoch_loss = 0.0\n running_loss = 0.0\n\n model.train()\n t = tqdm(iter(train_loader), leave=False, total=len(train_loader))\n for (\n i,\n (statement1, st1_mask, st1_len, statement2, st2_mask, st2_len, score),\n ) in enumerate(t):\n\n # Move tensors to the configured device\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n score = score.to(device)\n optimizer.zero_grad()\n sim = model(\n statement1, st1_mask, st1_len, statement2, st2_mask, st2_len\n )\n\n loss = criterion(sim, score)\n\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.item()\n\n # print statistics\n running_loss += loss.item()\n if i % 10 == 0: \n t.set_description(\"loss: {:.4f}\".format(running_loss / 10))\n running_loss = 0\n\n logger.info(\n f\"********Epoch: {epoch+1} *****Loss: {epoch_loss / len(train_loader)}\"\n )\n result_dict[epoch] = dict()\n result_dict[epoch][\"train_loss\"] = epoch_loss / len(train_loader)\n\n scheduler.step(epoch_loss / len(train_loader))\n if (epoch + 1) % 1 == 0:\n model.eval()\n with torch.no_grad():\n\n logger.info(\"Evaluating on Train set!\")\n t = tqdm(iter(train_loader), leave=False, total=len(train_loader))\n y_pred_list = []\n y_real_list = []\n for (\n i,\n (\n statement1,\n st1_mask,\n st1_len,\n statement2,\n st2_mask,\n st2_len,\n score,\n ),\n ) in enumerate(t):\n\n # Move tensors to the configured device\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n y_real_list.extend(score.cpu().tolist())\n score = score.to(device)\n\n sim = model(\n statement1, st1_mask, st1_len, statement2, st2_mask, st2_len\n )\n y_dev_pred = torch.argmax(sim, dim=1)\n # y_dev_pred = torch.argmax(sim, dim=1)\n y_pred_list.extend(y_dev_pred.cpu().tolist())\n\n f1_value = f1_score(y_real_list, y_pred_list)\n (precision, recall, _, _,) = precision_recall_fscore_support(\n y_real_list, y_pred_list, average=\"binary\"\n )\n # logger.info(\"**** TRAINING SET **** \")\n # logger.info(f\"F1-value: {f1_value}\")\n # logger.info(f\"Precision: {precision}\")\n # logger.info(f\"Recall: {recall}\")\n\n logger.info(\"Evaluating on Dev set!\")\n\n t = tqdm(iter(dev_loader), leave=False, total=len(dev_loader))\n y_pred_list = []\n y_real_list = []\n epoch_test_loss = 0.0\n for (\n i,\n (\n statement1,\n st1_mask,\n st1_len,\n statement2,\n st2_mask,\n st2_len,\n score,\n ),\n ) in enumerate(t):\n\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n y_real_list.extend(score.cpu().tolist())\n score = score.to(device)\n\n sim = model(\n statement1, st1_mask, st2_len, statement2, st2_mask, st2_len\n )\n loss_test = criterion(sim, score)\n epoch_test_loss += loss_test.item()\n y_dev_pred = torch.argmax(sim, dim=1)\n y_pred_list.extend(y_dev_pred.cpu().tolist())\n\n logger.info(f\"DEV LOSS: {epoch_test_loss / len(dev_loader)}\")\n # scheduler.step(epoch_test_loss / len(dev_loader))\n f1_value = f1_score(y_real_list, y_pred_list)\n (precision, recall, _, _,) = precision_recall_fscore_support(\n y_real_list, y_pred_list, average=\"binary\"\n )\n # logger.info(\"**** DEV SET **** \")\n # logger.info(f\"F1-value: {f1_value}\")\n # logger.info(f\"Precision: {precision.tolist()}\")\n # logger.info(f\"Recall: {recall.tolist()}\")\n result_dict[epoch][\"f1\"] = f1_value\n result_dict[epoch][\"precision\"] = precision.tolist()\n result_dict[epoch][\"recall\"] = recall.tolist()\n\n if f1_value > best_value:\n best_value = f1_value\n model = model.to(\"cpu\")\n if multiple_gpus:\n torch.save(\n model.module.state_dict(), f\"./models/{output_model}\",\n )\n else:\n torch.save(\n model.state_dict(), f\"./models/{output_model}\",\n )\n\n all_best[\"f1\"] = f1_value\n all_best[\"precision\"] = precision.tolist()\n all_best[\"recall\"] = recall.tolist()\n model = model.to(device)\n best_model = model\n\n with torch.no_grad():\n best_model.eval()\n logger.info(\"Evaluating on Test set!\")\n all_embeddings = dict()\n t = tqdm(iter(test_loader), leave=False, total=len(test_loader))\n y_pred_list = []\n y_real_list = []\n for (\n i,\n (statement1, st1_mask, st1_len, statement2, st2_mask, st2_len, score),\n ) in enumerate(t):\n\n # Move tensors to the configured device\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n y_real_list.extend(score.cpu().tolist())\n score = score.to(device)\n\n sim = best_model(\n statement1, st1_mask, st1_len, statement2, st2_mask, st2_len\n )\n # y_dev_pred = torch.round(sim)\n y_dev_pred = torch.argmax(sim, dim=1)\n y_pred_list.extend(y_dev_pred.cpu().tolist())\n\n f1_value = f1_score(y_real_list, y_pred_list)\n (precision, recall, _, _,) = precision_recall_fscore_support(\n y_real_list, y_pred_list, average=\"binary\"\n )\n\n logger.info(\"****** PARAMETERS ********\")\n logger.info(f\"Num negatives: {num_negatives}\")\n logger.info(f\"Batch_size: {batch_size}\")\n logger.info(f\"Max len: {max_sequence_len}\")\n logger.info(f\"Word embedding: {word_embedding}\")\n logger.info(f\"Out embedding: {out_embedding}\")\n logger.info(f\"Hidden Size: {hidden_size}\")\n logger.info(f\"Decay: {decay}\")\n logger.info(f\"ATT heads: {attention_heads}\")\n logger.info(f\"Learning rate: {learning_rate}\")\n logger.info(\"****** BEST RESULTS TEST******\")\n logger.info(f\"F1 SCORE {f1_value}\")\n logger.info(f\"PRECISION: {precision}\")\n logger.info(f\"RECALL: {recall}\")\n all_best[\"f1_test\"] = f1_value\n all_best[\"precision_test\"] = precision.tolist()\n all_best[\"recall_test\"] = recall.tolist()\n\n logger.info(\"******** BEST RESULTS DEV **********\")\n logger.info(all_best)\n\n with open(f\"./logs/{output_log}\", \"w\") as f:\n json.dump(result_dict, f)\n with open(f\"./logs/best_{output_log}\", \"w\") as f:\n json.dump(result_dict, f)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.NLLLoss",
"sklearn.metrics.precision_recall_fscore_support",
"torch.argmax",
"torch.no_grad",
"torch.tensor",
"torch.cuda.device_count",
"sklearn.metrics.f1_score",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.DataParallel"
]
] |
YetheYe/Mask_RCNN | [
"6895c617af13ecbf0bb27790e29a6271725cb34f"
] | [
"config.py"
] | [
"\"\"\"\nMask R-CNN\nBase Configurations class.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport math\nimport numpy as np\n\n\n# Base Configuration Class\n# Don't use this class directly. Instead, sub-class it and override\n# the configurations you need to change.\n\nclass Config(object):\n \"\"\"Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n \"\"\"\n # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.\n # Useful if your code needs to do things differently depending on which\n # experiment is running.\n NAME = None # Override in sub-classes\n\n # NUMBER OF GPUs to use. For CPU training, use 1\n GPU_COUNT = 1\n\n # Number of images to train with on each GPU. A 12GB GPU can typically\n # handle 2 images of 1024x1024px.\n # Adjust based on your GPU memory and image sizes. Use the highest\n # number that your GPU can handle for best performance.\n IMAGES_PER_GPU = 2\n\n # Number of training steps per epoch\n # This doesn't need to match the size of the training set. Tensorboard\n # updates are saved at the end of each epoch, so setting this to a\n # smaller number means getting more frequent TensorBoard updates.\n # Validation stats are also calculated at each epoch end and they\n # might take a while, so don't set this too small to avoid spending\n # a lot of time on validation stats.\n STEPS_PER_EPOCH = 1000\n\n # Number of validation steps to run at the end of every training epoch.\n # A bigger number improves accuracy of validation stats, but slows\n # down the training.\n VALIDATION_STEPS = 50\n\n # Backbone network architecture\n # Supported values are: resnet50, resnet101\n BACKBONE = \"resnet101\"\n\n # The strides of each layer of the FPN Pyramid. These values\n # are based on a Resnet101 backbone.\n BACKBONE_STRIDES = [4, 8, 16, 32, 64]\n\n # Number of classification classes (including background)\n NUM_CLASSES = 1 # Override in sub-classes\n\n # Length of square anchor side in pixels\n RPN_ANCHOR_SCALES = (128, 256, 512)\n\n # Ratios of anchors at each cell (width/height)\n # A value of 1 represents a square anchor, and 0.5 is a wide anchor\n RPN_ANCHOR_RATIOS = [0.5, 1, 2]\n\n # Anchor stride\n # If 1 then anchors are created for each cell in the backbone feature map.\n # If 2, then anchors are created for every other cell, and so on.\n RPN_ANCHOR_STRIDE = 1\n\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.7\n\n # How many anchors per image to use for RPN training\n RPN_TRAIN_ANCHORS_PER_IMAGE = 256\n\n # ROIs kept after non-maximum supression (training and inference)\n POST_NMS_ROIS_TRAINING = 2000\n POST_NMS_ROIS_INFERENCE = 1000\n\n # If enabled, resizes instance masks to a smaller size to reduce\n # memory load. Recommended when using high-resolution images.\n USE_MINI_MASK = True\n MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n\n # Input image resizing\n # Images are resized such that the small side is IMAGE_MIN_DIM and\n # the long side is <= IMAGE_MAX_DIM. If both conditions can't be\n # satisfied at the same time then IMAGE_MAX_DIM is enforced.\n # Resizing modes:\n # none: No resizing\n # square: Pad with zeros to make it a square (MAX_DIM, MAX_DIM)\n # TODO: currently, only 'square' mode is supported\n IMAGE_RESIZE_MODE = \"square\"\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 1024\n\n # Image mean (RGB)\n MEAN_PIXEL = np.array([123.7, 116.8, 103.9])\n\n # Number of ROIs per image to feed to classifier/mask heads\n # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n # enough positive proposals to fill this and keep a positive:negative\n # ratio of 1:3. You can increase the number of proposals by adjusting\n # the RPN NMS threshold.\n TRAIN_ROIS_PER_IMAGE = 200\n\n # Percent of positive ROIs used to train classifier/mask heads\n ROI_POSITIVE_RATIO = 0.33\n\n # Pooled ROIs\n POOL_SIZE = 7\n MASK_POOL_SIZE = 14\n MASK_SHAPE = [28, 28]\n\n # Maximum number of ground truth instances to use in one image\n MAX_GT_INSTANCES = 100\n\n # Bounding box refinement standard deviation for RPN and final detections.\n RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])\n BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])\n\n # Max number of final detections\n DETECTION_MAX_INSTANCES = 100\n\n # Minimum probability value to accept a detected instance\n # ROIs below this threshold are skipped\n DETECTION_MIN_CONFIDENCE = 0.5\n\n # Non-maximum suppression threshold for detection\n DETECTION_NMS_THRESHOLD = 0.3\n\n # Learning rate and momentum\n # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes\n # weights to explode. Likely due to differences in optimzer\n # implementation.\n LEARNING_RATE = 0.001\n LEARNING_MOMENTUM = 0.9\n\n # Weight decay regularization\n WEIGHT_DECAY = 0.0001\n\n # Use RPN ROIs or externally generated ROIs for training\n # Keep this True for most situations. Set to False if you want to train\n # the head branches on ROI generated by code rather than the ROIs from\n # the RPN. For example, to debug the classifier head without having to\n # train the RPN.\n USE_RPN_ROIS = True\n\n # Train or freeze batch normalization layers\n # None: Train BN layers. This is the normal mode\n # False: Freeze BN layers. Good when using a small batch size\n # True: (don't use). Set layer in training mode even when inferencing\n TRAIN_BN = False # Defaulting to False since batch size is often small\n\n # Gradient norm clipping\n GRADIENT_CLIP_NORM = 5.0\n\n def __init__(self):\n \"\"\"Set values of computed attributes.\"\"\"\n # Effective batch size\n self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT\n\n # Input image size\n if self.IMAGE_RESIZE_MODE == \"crop\":\n self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3])\n else:\n self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3])\n\n # Image meta data length\n # See compose_image_meta() for details\n self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES\n\n def display(self):\n \"\"\"Display Configuration values.\"\"\"\n print(\"\\nConfigurations:\")\n for a in dir(self):\n if not a.startswith(\"__\") and not callable(getattr(self, a)):\n print(\"{:30} {}\".format(a, getattr(self, a)))\n print(\"\\n\")\n"
] | [
[
"numpy.array"
]
] |
ogrenenmakine/VCL-PL-Semi-Supervised-Learning-from-Noisy-Web-Data-with-Variational-Contrastive-Learning | [
"baef25837ce7e073d03f69a095d1992aa18dd2d5"
] | [
"recognition/alexnet_PD_finetuning.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\nimport math\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nfrom PIL import Image\nimport os\nimport matplotlib.pyplot as plt\nimport time\nfrom torchsummary import summary\nimport config\nfrom facenet_pytorch import training\nfrom torch.utils.data import DataLoader, SubsetRandomSampler\nfrom torch import optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets, transforms\nfrom PIL import Image\nimport glob\nfrom utils.collate import collate_custom\nimport torchvision.models as models\nfrom util import AverageMeter, learning_rate_decay, Logger\nimport collections\n\n# In[ ]:\n\ntransform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)\n ], p=0.8),\n transforms.RandomGrayscale(0.2),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n\n\n# Root directory for dataset\ndata_root = \"/home/mehmetyavuz/datasets/CelebA128/\"\nattr_root = \"/home/mehmetyavuz/datasets/list_attr_celeba.txt\"\n# Number of workers for dataloader\nworkers = 8\n\n# Batch size during training\nbatch_size = 64\n\n# Spatial size of training images. All images will be resized to this\n# size using a transformer.\nimage_size = (128,128)\nepochs = 100\n\n\n# In[ ]:\n\n\nclass CelebA(data.Dataset):\n def __init__(self, data_path, attr_path, image_size, mode, selected_attrs):\n super(CelebA, self).__init__()\n self.data_path = data_path\n att_list = open(attr_path, 'r', encoding='utf-8').readlines()[1].split()\n atts = [att_list.index(att) + 1 for att in selected_attrs]\n images = np.loadtxt(attr_path, skiprows=2, usecols=[0], dtype=np.str)\n labels = np.loadtxt(attr_path, skiprows=2, usecols=atts, dtype=np.int)\n \n self.tf = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n self.tf_a = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(hue=.05, saturation=.05),\n ], p=0.8),\n transforms.RandomGrayscale(0.2),\n ]) \n if mode == 'train':\n self.images = images[:1627]\n self.labels = labels[:1627]\n\n if mode == 'valid':\n self.images = images[162770:182637]\n self.labels = labels[162770:182637]\n\n if mode == 'test':\n self.images = images[182637:]\n self.labels = labels[182637:]\n \n self.length = len(self.images)\n def __getitem__(self, index):\n if index < 16277:\n img = self.tf(self.tf_a(Image.open(os.path.join(self.data_path, self.images[index]))))\n else:\n img = self.tf(Image.open(os.path.join(self.data_path, self.images[index])))\n att = torch.tensor((self.labels[index] + 1) // 2)\n return img, att.to(torch.float32)\n def __len__(self):\n return self.length\n\n\n# In[ ]:\n\n\nattrs_default = [\"5_o_Clock_Shadow\", \"Arched_Eyebrows\", \"Attractive\", \"Bags_Under_Eyes\", \"Bald\", \"Bangs\", \"Big_Lips\", \"Big_Nose\", \"Black_Hair\", \"Blond_Hair\", \"Blurry\", \"Brown_Hair\", \"Bushy_Eyebrows\", \"Chubby\", \"Double_Chin\", \"Eyeglasses\", \"Goatee\", \"Gray_Hair\", \"Heavy_Makeup\", \"High_Cheekbones\", \"Male\", \"Mouth_Slightly_Open\", \"Mustache\", \"Narrow_Eyes\", \"No_Beard\", \"Oval_Face\", \"Pale_Skin\", \"Pointy_Nose\", \"Receding_Hairline\", \"Rosy_Cheeks\", \"Sideburns\", \"Smiling\", \"Straight_Hair\", \"Wavy_Hair\", \"Wearing_Earrings\", \"Wearing_Hat\", \"Wearing_Lipstick\", \"Wearing_Necklace\", \"Wearing_Necktie\", \"Young\"]\n\n\n# In[ ]:\n\n\ndataset = CelebA(data_root, attr_root, image_size, 'train', attrs_default)\ntrain_loader = torch.utils.data.DataLoader(dataset, num_workers=workers, \n batch_size=batch_size, pin_memory=True, collate_fn=collate_custom,\n drop_last=True, shuffle=True)\ndataset = CelebA(data_root, attr_root, image_size, 'valid', attrs_default)\nval_loader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=workers)\ndataset = CelebA(data_root, attr_root, image_size, 'test', attrs_default)\ntest_loader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=workers)\n\n\n# In[ ]:\n\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda:0\")\n\n\n# In[ ]:\n\nresnet = models.__dict__['alexnet'](pretrained=True)\nresnet.classifier[6] = nn.Linear(4096,40,bias=True)\nresnet = torch.nn.DataParallel(resnet)\nresnet.cuda()\n\nresnet.load_state_dict(torch.load('alexnet_pseudolabeling_001_0_normal.pth'))\n\n\n# In[ ]:\n\n\noptimizer = optim.Adam(resnet.parameters(), lr=0.00001)\nscheduler = None\n\n\n# In[ ]:\n\n\nloss_fn = torch.nn.BCEWithLogitsLoss()\nmetrics = {\n 'acc': training.accuracy_ml\n} \n\n\n# In[ ]:\n\n\nprint('\\n\\nInitial')\nprint('-' * 10)\n\nval_loss = 1\nfor epoch in range(epochs):\n print('\\nEpoch {}/{}'.format(epoch + 1, epochs))\n print('-' * 10)\n\n resnet.train() \n training.pass_epoch(\n resnet, loss_fn, train_loader, optimizer, scheduler,\n batch_metrics=metrics, show_running=True, device=device,\n #writer=writer\n )\n \n #if epoch + 1 >= 30:\n resnet.eval()\n val_metrics = training.pass_epoch(\n resnet, loss_fn, val_loader,\n batch_metrics=metrics, show_running=True, device=device,\n #writer=writer\n )\n\n if val_metrics[0].item() < val_loss:\n val_loss = val_metrics[0].item()\n print('Test set Accuracy Lowest Validation Loss:')\n training.pass_epoch(\n resnet, loss_fn, test_loader,\n batch_metrics=metrics, show_running=True, device=device,\n #writer=writer\n )\n torch.save(resnet.state_dict(), \"alexnet_PD_001_0_normal.pth\")\n\n#writer.close()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.load",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.DataParallel",
"torch.device",
"numpy.loadtxt"
]
] |
banboooo044/optimization | [
"a15614b367712d6046311eac311214d27999fc7c"
] | [
"module/LP.py"
] | [
"# date : 2/11/2019\n# author : takeshi\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import display\n\ndef linprog(c,A,comp,b,maximize=True):\n '''\n Maximize(or Minimize) a linear objective function subject to linear equality and inequality constraints.\n\n Linear Programming is intended to solve the following problem form:\n\n Maximize: c * x\n\n Subject to: A * x [comp] b , (x >= 0)\n\n Parameters\n ----------\n c : array_like\n Coefficients of the linear objective function to be maximized.\n A : array_like\n 2-D array which, when matrix-multiplied by x, \n gives the values of constraints at x.\n comp : array_like\n 1-D array of values representing a sign of equality in each constraint (row).\n if value is -1 , it means (<=)\n if value is 0 , it means (=)\n if value is 1 , it means (=>)\n b : array_like\n 1-D array of values representing the RHS of each constraint (row).\n\n maximize : bool, optional\n If True, the linear objective function is to be maximized.\n If False, the linear objective function is to be minimized.\n (the default is True)\n \n Returns\n -------\n pandas.DataFrame\n final simplex table. \n Optimal solution is table['Values'] , and Optimal value is table['z','Values'].\n if x is (1 * n) matrix , x_i ( i >= n ) is Slack Variable.\n '''\n\n # optimize\n def optimize(table,target):\n if not __debug__:\n if target == 'w':\n print(\"Phase 1 : find initial solution\")\n else:\n if maximize:\n print(\"Phase 2 : Maximize the liner objective function\")\n else:\n print(\"Phase 2 : Minimize the liner objective function\")\n baseIndex = table.index.values\n nonBaseIndex = np.setdiff1d(np.vectorize(lambda i : 'x' + str(i))(np.arange(len(table.columns)-1)) ,baseIndex)\n for i in range(100000):\n if not __debug__:\n print(\"roop {0}\".foramt(i))\n display(table)\n nonBaseTable = table.loc[target,nonBaseIndex]\n if ((nonBaseTable < -1e-8).values.sum()) == 0:\n return table\n # 新たな基底変数\n nextIndex = (nonBaseTable.map(lambda x: -x)).idxmax(axis=1)\n # 取り替えられる基底変数\n idx = table.index.get_loc(target)\n tmpLine = (table['Value'].iloc[:idx] / table.loc[ : ,nextIndex].iloc[:idx] )\n prevIndex = str(tmpLine.map(lambda x: float('inf') if x < 0 else x ).idxmin())\n nonBaseIndex[np.where(nonBaseIndex == nextIndex)] = prevIndex\n table = table.rename(index={prevIndex : nextIndex})\n table.loc[nextIndex] /= table.at[nextIndex,nextIndex]\n pivotLine = table.loc[nextIndex]\n unPivotIndex = list(table.index.drop(nextIndex))\n table.loc[unPivotIndex] = table.loc[unPivotIndex].apply(lambda x: x - (x.at[nextIndex]*pivotLine) ,axis=1)\n\n print(\"cannot find base solutions\")\n\n if not maximize: \n c = (-c)\n n,m = A.shape\n slackVariableNum = 0\n artificialVariableNum = 0\n slackVariable = [0] * n\n artificialVariable = [0] * n\n for i in range(n):\n # bの値を全て正の値にしておく\n if b[i] < 0:\n A[i] = -A[i]\n comp[i] = -comp[i]\n b[i] = -b[i]\n # < ( -> スラック変数を導入 )\n if comp[i] == -1:\n slackVariableNum += 1\n slackVariable[i] = 1\n # = ( -> 人為変数を導入 )\n elif comp[i] == 0:\n artificialVariableNum += 1\n artificialVariable[i] = 1\n # > ( -> スラック変数,人為変数を導入 )\n else:\n slackVariableNum += 1\n artificialVariableNum += 1\n slackVariable[i] = -1\n artificialVariable[i] = 1\n\n variableNum = c.shape[0] + slackVariableNum + artificialVariableNum\n addVariableNum = slackVariableNum + artificialVariableNum\n\n # Valueを求める.\n baseIndex = np.empty(n)\n baseValue = np.empty(n)\n A_ = np.append(A , np.zeros((n,addVariableNum)),axis=1)\n slackIter = c.shape[0] \n artificialIter = c.shape[0] + slackVariableNum\n\n # (スラック変数 < 人為変数) の優先順位で基底変数に選ぶ.\n # すると , i 本目の制約条件式のみに登場する変数を選ぶことができる.\n # baseIndex[i] := i 本目の制約条件式のみに登場する変数の番号\n # baseValue[i] := i本目の制約条件式のみに登場する変数の値 ( = Value = b[i] ) となる.\n for i in range(n):\n if slackVariable[i] != 0:\n A_[i,slackIter] = slackVariable[i]\n # 1の場合\n if slackVariable[i] > 0:\n baseIndex[i],baseValue[i] = slackIter, b[i]\n slackIter += 1\n \n if artificialVariable[i] != 0:\n A_[i,artificialIter] = artificialVariable[i]\n baseIndex[i],baseValue[i] = artificialIter, b[i]\n artificialIter += 1 \n\n # フェーズ1 (Valueを見つける)\n # 目的関数の値をzとおく\n # Valueの列を追加\n exA = np.append(baseValue.reshape(n,1),A_,axis=1)\n # zの行を追加\n c_ = np.array([0]*(c.shape[0] + slackVariableNum) + [-1]*(artificialVariableNum))\n c_ = c_[np.vectorize(int)(baseIndex)]\n w = (c_ @ exA).reshape(1,variableNum+1)\n z = np.append(np.append(np.zeros(1),-c),np.array([0]*addVariableNum)).reshape(1,variableNum+1)\n table = np.append(np.append(exA,w,axis=0),z,axis=0)\n # データフレームにする\n df = pd.DataFrame(table,\n columns=['Value']+[ 'x' + str(i) for i in range(variableNum)],\n index= list(np.vectorize(lambda i: 'x' + str(int(i)))(baseIndex)) + ['w','z']\n )\n table = optimize(df,'w')\n if artificialVariableNum != 0:\n table = table.iloc[:,:-artificialVariableNum]\n variableNum -= artificialVariableNum\n table = table.drop('w')\n result = optimize(table,'z')\n if not maximize:\n result['Value']['z'] = -result['Value']['z']\n return result\n\n## Example\nif __name__ == '__main__':\n # maximize 2 * x_0 + 3 * x_1\n # constraints : \n # 1 * x_0 + 2 * x_1 <= 10\n # 2 * x_0 + 1 * x_0 <= 8\n # ( x_0 >= 0 , x_1 >= 0)\n\n c = np.array([ 2,3])\n A = np.array([ [1,2],\n [2,1] ])\n comp = np.array([-1,-1])\n b = np.array([10,8])\n\n # solve\n df = linprog(c,A,comp,b,True)\n # result\n print(df)\n "
] | [
[
"numpy.vectorize",
"numpy.append",
"numpy.empty",
"numpy.zeros",
"numpy.array",
"numpy.where"
]
] |
qftphys/Software-for-visualising-magnetic-layers | [
"7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3"
] | [
"Widgets/openGL_widgets/VectorGLContext.py"
] | [
"from PyQt5.QtWidgets import QWidget\n\nfrom Widgets.openGL_widgets.AbstractGLContext import AbstractGLContext\n\nfrom ColorPolicy import ColorPolicy\n\nfrom ctypes import c_void_p\nfrom PyQt5.Qt import Qt\nfrom PyQt5.QtCore import QPoint, QThread\n\nfrom cython_modules.color_policy import multi_iteration_normalize\nfrom pattern_types.Patterns import AbstractGLContextDecorators\n\nimport numpy as np\nimport OpenGL.GLU as glu\nimport OpenGL.GL as gl\nimport math as mt\nfrom multiprocessing import Pool\nfrom ColorPolicy import ColorPolicy\n\nclass VectorGLContext(AbstractGLContext, QWidget):\n def __init__(self, data_dict):\n super().__init__()\n super().shareData(**data_dict)\n self.prerendering_calculation()\n # self.drawing_function = self.slow_arrow_draw\n self.drawing_function = self.vbo_arrow_draw\n\n def prerendering_calculation(self):\n super().prerendering_calculation()\n if self.normalize:\n VectorGLContext.normalize_specification(self.color_vectors, vbo=True)\n self.interleaved = ColorPolicy.apply_vbo_interleave_format(self.vectors_list,\n self.color_vectors)\n self.buffers = None\n ## pad the color\n self.color_vectors = ColorPolicy.apply_vbo_format(self.color_vectors, k=2)\n self.color_vertices = len(self.vectors_list)\n self.vertices = self.color_vertices*2\n self.color_buffer_len = len(self.color_vectors[0])*4\n self.inter_buffer_len = len(self.interleaved[0])*4\n\n self.__FLOAT_BYTE_SIZE__ = 8\n\n @AbstractGLContextDecorators.recording_decorator\n def slow_arrow_draw(self):\n gl.glLineWidth(2*self.scale)\n gl.glPointSize(3*self.scale)\n for vector, color in zip(self.vectors_list,\n self.color_vectors[self.i]):\n if not np.any(color):\n continue\n self.base_arrow(vector, color)\n\n def base_arrow(self, vector, color):\n gl.glColor3f(*color)\n gl.glBegin(gl.GL_LINES)\n gl.glVertex3f(*vector)\n gl.glVertex3f(vector[0]+color[0], vector[1]+color[1],\n vector[2]+color[2])\n gl.glEnd()\n gl.glBegin(gl.GL_POINTS)\n gl.glVertex3f(vector[0]+color[0], vector[1]+color[1],\n vector[2]+color[2])\n gl.glEnd()\n\n def standard_vbo_draw(self):\n gl.glEnableClientState(gl.GL_COLOR_ARRAY)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[1])\n gl.glColorPointer(3, gl.GL_FLOAT, 0, None)\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[0])\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, None)\n gl.glDrawArrays(gl.GL_LINES, 0, int(self.vertices))\n\n # now the points\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[1])\n gl.glColorPointer(3, gl.GL_FLOAT, 3*self.__FLOAT_BYTE_SIZE__, None)\n\n # stride is 3 bytes (3 floats) VVVCCCVVVCCC etc...\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[0])\n # offset is at 3 indices, so points at 4th vector 3(vertices)*4\n gl.glVertexPointer(3, gl.GL_FLOAT, 3*self.__FLOAT_BYTE_SIZE__,\n c_void_p(4*3))\n gl.glDrawArrays(gl.GL_POINTS, 0, int(self.color_vertices))\n\n gl.glDisableClientState(gl.GL_COLOR_ARRAY)\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n\n\n def vbo_arrow_draw(self):\n if self.buffers is None:\n self.buffers = self.create_vbo()\n else:\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[0])\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.inter_buffer_len,\n np.array(self.interleaved[self.i],\n dtype='float32').flatten())\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[1])\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.color_buffer_len,\n np.array(self.color_vectors[self.i],\n dtype='float32').flatten())\n\n self.standard_vbo_draw()\n\n def create_vbo(self):\n buffers = gl.glGenBuffers(2)\n gl.glLineWidth(2*self.scale)\n gl.glPointSize(3*self.scale)\n # vertices buffer\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffers[0])\n gl.glBufferData(gl.GL_ARRAY_BUFFER,\n np.array(self.interleaved[self.i],\n dtype='float32').flatten(),\n gl.GL_DYNAMIC_DRAW)\n # color buffer\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffers[1])\n gl.glBufferData(gl.GL_ARRAY_BUFFER,\n np.array(self.color_vectors[self.i],\n dtype='float32').flatten(),\n gl.GL_DYNAMIC_DRAW)\n return buffers\n"
] | [
[
"numpy.array",
"numpy.any"
]
] |
BitJetKit/universe | [
"cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c"
] | [
"universe/rewarder/rewarder_session.py"
] | [
"from autobahn.twisted import websocket\nimport logging\nimport numpy as np\nimport threading\nimport time\n\nfrom twisted.python import failure\nfrom twisted.internet import defer, endpoints\nimport twisted.internet.error\n\nfrom universe import utils\nfrom universe.twisty import reactor\nfrom universe.rewarder import connection_timer, env_status, reward_buffer, rewarder_client\nfrom universe.utils import display\n\nlogger = logging.getLogger(__name__)\nextra_logger = logging.getLogger('universe.extra.'+__name__)\n\ndef _ping(client):\n return client.send('v0.control.ping', {}, expect_reply=True)\n\nclass RewarderSession(object):\n def __init__(self):\n self.lock = threading.RLock()\n\n self.i = 0\n\n # Mutated by main thread exclusively\n self.names_by_id = {}\n self.reward_buffers = {}\n self.env_statuses = {}\n self.errors = {}\n self.networks = {}\n\n self.clients = {}\n\n def close(self, name=None, reason=u'closed by RewarderSession.close'):\n if name is None:\n names = list(self.names_by_id.values())\n else:\n logger.info('[%s] Closing rewarder connection', name)\n names = [name]\n self.ids_by_name = {name: id for id, name in self.names_by_id.items()}\n\n for name in names:\n with self.lock:\n id = self.ids_by_name.pop(name, None)\n if id is None:\n # already closed\n continue\n\n del self.names_by_id[id]\n del self.reward_buffers[id]\n del self.env_statuses[id]\n self.errors.pop(id, None)\n\n network = self.networks.pop(id)\n network.close()\n\n client = self.clients.pop(id, None)\n if client is not None:\n reactor.callFromThread(client.close, reason=reason)\n\n def connect(self, name, address, label, password, env_id=None, seed=None, fps=60,\n start_timeout=None, observer=False, skip_network_calibration=False):\n if name in self.reward_buffers:\n self.close(name, reason='closing previous connection to reconnect with the same name')\n\n network = Network()\n self.names_by_id[self.i] = name\n self.reward_buffers[self.i] = reward_buffer.RewardBuffer(label)\n self.env_statuses[self.i] = env_status.EnvStatus(label=label, primary=False)\n self.networks[self.i] = network\n\n reactor.callFromThread(self._connect,\n name=name,\n address=address,\n env_id=env_id,\n seed=seed,\n fps=fps,\n i=self.i,\n network=network,\n env_status=self.env_statuses[self.i],\n reward_buffer=self.reward_buffers[self.i],\n label=label,\n start_timeout=start_timeout,\n password=password,\n observer=observer,\n skip_network_calibration=skip_network_calibration,\n )\n self.i += 1\n return network\n\n def _already_closed(self, i):\n # Lock must be held\n return i not in self.names_by_id\n\n # Call only from Twisted thread\n\n # TODO: probably time to convert to kwargs\n @defer.inlineCallbacks\n def _connect(self, name, address, env_id, seed, fps, i, network, env_status, reward_buffer,\n label, password, start_timeout,\n observer, skip_network_calibration,\n attempt=0, elapsed_sleep_time=0,\n ):\n endpoint = endpoints.clientFromString(reactor, 'tcp:'+address)\n factory = websocket.WebSocketClientFactory('ws://'+address)\n factory.protocol = rewarder_client.RewarderClient\n\n assert password, \"Missing password: {} for rewarder session\".format(password)\n factory.headers = {'authorization': utils.basic_auth_encode(password), 'openai-observer': 'true' if observer else 'false'}\n factory.i = i\n\n # Various important objects\n factory.endpoint = endpoint\n factory.env_status = env_status\n factory.reward_buffer = reward_buffer\n\n # Helpful strings\n factory.label = label\n factory.address = address\n\n # Arguments to always send to the remote reset call\n factory.arg_env_id = env_id\n factory.arg_fps = fps\n\n def record_error(e):\n if isinstance(e, failure.Failure):\n e = e.value\n\n # logger.error('[%s] Recording rewarder error: %s', factory.label, e)\n with self.lock:\n # drop error on the floor if we're already closed\n if self._already_closed(factory.i):\n extra_logger.info('[%s] Ignoring error for already closed connection: %s', label, e)\n elif factory.i not in self.clients:\n extra_logger.info('[%s] Received error for connection which has not been fully initialized: %s', label, e)\n # We could handle this better, but right now we\n # just mark this as a fatal error for the\n # backend. Often it actually is.\n self.errors[factory.i] = e\n else:\n extra_logger.info('[%s] Recording fatal error for connection: %s', label, e)\n self.errors[factory.i] = e\n\n def retriable_error(e, error_message):\n if isinstance(e, failure.Failure):\n e = e.value\n\n if self._already_closed(factory.i):\n logger.error('[%s] Got error, but giving up on reconnecting, since %d already disconnected', factory.label, factory.i)\n return\n\n # Also need to handle DNS errors, so let's just handle everything for now.\n #\n # reason.trap(twisted.internet.error.ConnectError, error.ConnectionError)\n if elapsed_sleep_time < start_timeout:\n sleep = min((2 * attempt+1), 10)\n logger.error('[%s] Waiting on rewarder: %s. Retry in %ds (slept %ds/%ds): %s', factory.label, error_message, sleep, elapsed_sleep_time, start_timeout, e)\n reactor.callLater(\n sleep, self._connect, name=name, address=address,\n env_id=env_id, seed=seed, fps=fps, i=i, network=network,\n env_status=env_status, reward_buffer=reward_buffer, label=label,\n attempt=attempt+1, elapsed_sleep_time=elapsed_sleep_time+sleep,\n start_timeout=start_timeout, password=password,\n observer=observer, skip_network_calibration=skip_network_calibration,\n )\n else:\n logger.error('[%s] %s. Retries exceeded (slept %ds/%ds): %s', factory.label, error_message, elapsed_sleep_time, start_timeout, e)\n record_error(e)\n\n factory.record_error = record_error\n\n try:\n retry_msg = 'establish rewarder TCP connection'\n client = yield endpoint.connect(factory)\n extra_logger.info('[%s] Rewarder TCP connection established', factory.label)\n\n retry_msg = 'complete WebSocket handshake'\n yield client.waitForWebsocketConnection()\n extra_logger.info('[%s] Websocket client successfully connected', factory.label)\n\n if not skip_network_calibration:\n retry_msg = 'run network calibration'\n yield network.calibrate(client)\n extra_logger.info('[%s] Network calibration complete', factory.label)\n\n retry_msg = ''\n\n if factory.arg_env_id is not None:\n # We aren't picky about episode ID: we may have\n # already receieved an env.describe message\n # telling us about a resetting environment, which\n # we don't need to bump post.\n #\n # tl;dr hardcoding 0.0 here avoids a double reset.\n reply = yield self._send_env_reset(client, seed=seed, episode_id='0')\n else:\n # No env_id requested, so we just proceed without a reset\n reply = None\n # We're connected and have measured the\n # network. Mark everything as ready to go.\n with self.lock:\n if factory.i not in self.names_by_id:\n # ID has been popped!\n logger.info('[%s] Rewarder %d started, but has already been closed', factory.label, factory.i)\n client.close(reason='RewarderSession: double-closing, client was closed while RewarderSession was starting')\n elif reply is None:\n logger.info('[%s] Attached to running environment without reset', factory.label)\n else:\n context, req, rep = reply\n logger.info('[%s] Initial reset complete: episode_id=%s', factory.label, rep['headers']['episode_id'])\n self.clients[factory.i] = client\n except Exception as e:\n if retry_msg:\n retriable_error(e, 'failed to ' + retry_msg)\n else:\n record_error(e)\n\n def pop_errors(self):\n errors = {}\n with self.lock:\n if self.errors:\n for i, error in self.errors.items():\n name = self.names_by_id[i]\n errors[name] = error\n self.errors.clear()\n return errors\n\n def reset(self, seed=None, env_id=None):\n with self.lock:\n for i, reward_buffer in self.reward_buffers.items():\n reward_buffer.mask()\n reactor.callFromThread(self._reset, seed=seed, env_id=env_id)\n\n def _reset(self, seed=None, env_id=None):\n with self.lock:\n for client in self.clients.values():\n d = self._send_env_reset(client, seed=seed, env_id=env_id)\n # Total hack to capture the variable in the closure\n def callbacks(client):\n def success(reply): pass\n def fail(reason): client.factory.record_error(reason)\n return success, fail\n success, fail = callbacks(client)\n d.addCallback(success)\n d.addErrback(fail)\n\n def _send_env_reset(self, client, seed=None, episode_id=None, env_id=None):\n if episode_id is None:\n episode_id = client.factory.env_status.episode_id\n logger.info('[%s] Sending reset for env_id=%s fps=%s episode_id=%s', client.factory.label, client.factory.arg_env_id, client.factory.arg_fps, episode_id)\n return client.send_reset(\n env_id=client.factory.arg_env_id if env_id is None else env_id,\n seed=seed,\n fps=client.factory.arg_fps,\n episode_id=episode_id)\n\n def pop(self, warn=True, peek_d=None):\n reward_d = {}\n done_d = {}\n info_d = {}\n err_d = self.pop_errors()\n\n for i, reward_buffer in self.reward_buffers.items():\n name = self.names_by_id[i]\n\n reward, done, info = reward_buffer.pop(peek_d.get(name))\n reward_d[name] = reward\n done_d[name] = done\n info_d[name] = info\n\n # TODO: use FPS here rather than 60\n if warn and any(info.get('stats.reward.count', 0) > 60 for info in info_d.values()):\n logger.warn('WARNING: returning more than 60 aggregated rewards: %s. Either your agent is not keeping up with the framerate, or you should have called \".reset()\" to clear pending rewards and reset the environments to a known state.',\n {name: '{} (episode_id={})'.format(info['stats.reward.count'], info.get('env_status.episode_id')) for name, info in info_d.items()})\n\n return reward_d, done_d, info_d, err_d\n\n def wait(self, timeout=None):\n deadline = time.time() + timeout\n for client in self.clients:\n if timeout is not None:\n remaining_timeout = deadline - time.time()\n else:\n remaining_timeout = None\n client.reward_buffer.wait_for_step(timeout=remaining_timeout)\n\n # Hack to test actions over websockets\n # TODO: Carve websockets out of rewarder pkg (into vnc_env? - and move this there)\n def send_action(self, action_n, env_id):\n reactor.callFromThread(self._send_action, env_id, action_n)\n return self.pop_errors()\n\n def _send_action(self, env_id, action_n):\n with self.lock:\n for n, client in zip(action_n, self.clients.values()):\n self._send_env_action(client, env_id, action_n[n])\n\n def _send_env_action(self, client, env_id, action_n):\n if len(action_n) == 0:\n # Hack to skip empty actions. TODO: Find source (throttle?) and fix\n return\n message = {\n 'env_id': env_id,\n 'action': action_n,\n }\n client.send('v0.agent.action', message, expect_reply=False)\n\n def rewards_count(self):\n # TODO: any reason to lock these?\n return [client.reward_buffer.count for client in self.clients]\n\n def pop_observation(self):\n return [client.reward_buffer.pop_observation() for client in self.clients]\n\n # def _connection_time(self):\n # deferreds = []\n # for client in self.clients:\n # endpoint = client.factory.endpoint\n # d = connection_timer.start(endpoint)\n # deferreds.append(d)\n\n # d = defer.DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)\n # return d\n\n# Run this in Twisty therad\nclass Network(object):\n def __init__(self):\n self.connection_samples = 10\n self.application_ping_samples = 10\n\n self.connection_time_m = None\n self.lock = threading.Lock()\n\n self.recalibrate = None\n self.client = None\n\n self._ntpdate_reversed_clock_skew = None\n self._reversed_clock_skew = None\n\n def active(self):\n with self.lock:\n return self._reversed_clock_skew is not None\n\n # Used by external consumers\n def reversed_clock_skew(self):\n with self.lock:\n if self._ntpdate_clock_skew is not None:\n return self._ntpdate_reversed_clock_skew\n else:\n return self._reversed_clock_skew\n\n def _report(self):\n connection_time = display.display_timestamps(self.connection_time_m)\n if self._ntpdate_clock_skew is not None:\n ntpdate_clock_skew = display.display_timestamp(self._ntpdate_clock_skew[0])\n else:\n ntpdate_clock_skew = None\n clock_skew = display.display_timestamps_pair(self.clock_skew_m)\n application_rtt = display.display_timestamps(self.application_rtt_m)\n request_overhead = display.display_timestamps(self.request_overhead_m)\n response_overhead = display.display_timestamps(self.response_overhead_m)\n\n extra_logger.info('[%s] Network calibration: ntpdate_clock_skew=%s clock_skew=%s connection_time=%s application_rtt=%s request_overhead=%s response_overhead=%s',\n self.client.factory.label, ntpdate_clock_skew, clock_skew, connection_time, application_rtt,\n request_overhead, response_overhead)\n\n def _start(self):\n def calibrate():\n d = defer.Deferred()\n def fail(reason):\n logger.error('[%s] Could not recalibrate network: %s', self.client.factory.label, reason)\n d.addErrback(fail)\n self._start_measure_connection_time(d)\n self._start()\n self.recalibrate = reactor.callLater(5 * 60, calibrate)\n\n def close(self):\n if self.recalibrate:\n try:\n self.recalibrate.cancel()\n except twisted.internet.error.AlreadyCalled:\n pass\n\n # Called externally\n def calibrate(self, client):\n d = defer.Deferred()\n def success(res):\n # If we succeed, kick off the periodic 5 minute\n # recalibrations.\n self._start()\n return res\n d.addCallback(success)\n\n self.client = client\n\n # Kinda a hack. Idea is to try using the ntpdate -q offset if\n # we can.\n skew = self._start_measure_clock_skew()\n def succeed(offset):\n with self.lock:\n self._ntpdate_clock_skew = np.array([offset, offset])\n self._ntpdate_reversed_clock_skew = np.array([-offset, -offset])\n self._start_measure_connection_time(d)\n skew.addCallback(succeed)\n\n def fail(reason):\n with self.lock:\n self._ntpdate_clock_skew = None\n self._ntpdate_reversed_clock_skew = None\n\n extra_logger.info('[%s] Could not determine clock skew with ntpdate; falling back to application-level ping: %s', self.client.factory.label, reason.value)\n self._start_measure_connection_time(d)\n skew.addErrback(fail)\n\n return d\n\n def _start_measure_connection_time(self, d):\n connection_time_m = np.zeros(self.connection_samples)\n self._measure_connection_time(d, connection_time_m, 0)\n\n def _measure_connection_time(self, d, connection_time_m, i):\n extra_logger.debug('[%s] Measuring connection time (%d/%d)', self.client.factory.label, i+1, len(connection_time_m))\n endpoint = self.client.factory.endpoint\n timer = connection_timer.start(endpoint)\n\n def success(delta):\n connection_time_m[i] = delta\n if i+1 < len(connection_time_m):\n self._measure_connection_time(d, connection_time_m, i+1)\n else:\n self.connection_time_m = connection_time_m\n self._start_measure_application_ping(d)\n def fail(reason):\n d.errback(reason)\n timer.addCallback(success)\n timer.addErrback(fail)\n\n def _start_measure_application_ping(self, d=None):\n clock_skew_m = np.zeros((self.application_ping_samples, 2))\n request_overhead_m = np.zeros((self.application_ping_samples))\n response_overhead_m = np.zeros((self.application_ping_samples))\n application_rtt_m = np.zeros((self.application_ping_samples))\n\n self._measure_application_ping(d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, 0)\n\n def _measure_application_ping(self, d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, i):\n extra_logger.debug('[%s] Issuing an application-level ping (%d/%d)', self.client.factory.label, i+1, len(clock_skew_m))\n start = time.time()\n ping = _ping(self.client)\n\n def success(res):\n context, request, response = res\n end = time.time()\n\n request_sent_at = request['headers']['sent_at'] # local\n response_sent_at = response['headers']['sent_at'] # remote\n response_received_at = context['start'] # local\n\n # We try to put bounds on clock skew by subtracting\n # local and remote times, for local and remote events\n # that are causally related.\n #\n # For example, suppose that the following local/remote\n # logical timestamps apply to a request (for a system\n # with clock skew of 100):\n #\n # request_sent local: 0 remote: 100\n # request_recieved local: 1 remote: 101\n # response_sent local: 2 remote: 102\n # response_received local: 3 remote: 103\n #\n # Then:\n #\n # # Remote event *after* local is upper bound\n # request_recieved.remote - request_sent.local = 101\n # # Remote event *before* local is lower bound\n # response_sent.remote - response_received.local = 102 - 3 = 99\n #\n # There's danger of further clock drift over time, but\n # we don't need these to be fully accurate, and this\n # should be fine for now.\n clock_skew_m[i, :] = (response_sent_at-response_received_at, response_sent_at-request_sent_at)\n request_overhead_m[i] = request_sent_at - start\n response_overhead_m[i] = end - response_received_at\n application_rtt_m[i] = response_received_at - request_sent_at\n\n if i+1 < len(clock_skew_m):\n self._measure_application_ping(d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, i+1)\n else:\n self.clock_skew_m = clock_skew_m\n self.request_overhead_m = request_overhead_m\n self.response_overhead_m = response_overhead_m\n self.application_rtt_m = application_rtt_m\n\n self._report()\n self._update_exposed_metrics()\n\n # Ok, all done!\n if d is not None:\n d.callback(self)\n ping.addCallback(success)\n ping.addErrback(d.errback)\n\n def _update_exposed_metrics(self):\n with self.lock:\n self._clock_skew = self.clock_skew_m.mean(axis=0) # add to local time to get remote time, as (min, max) values\n self._reversed_clock_skew = -self._clock_skew[[1, 0]] # add to remote time to get local time, in format (min, max)\n\n\n def _start_measure_clock_skew(self):\n host = self.client.factory.address.split(':')[0]\n return connection_timer.measure_clock_skew(self.client.factory.label, host)\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
chrhenning/uncertainty_based_ood | [
"13c0b9910966544527497497f6ff0441d5334591"
] | [
"nngp/nngp.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2021 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @title :nngp/nngp.py\n# @author :ch\n# @contact :[email protected]\n# @created :04/19/2021\n# @version :1.0\n# @python_version :3.8.5\nr\"\"\"\nDeep Neural Network as Gaussian Process\n---------------------------------------\n\nThe module :mod:`nngp.nngp` implements helper functions for Bayesian inference\nwith Gaussian Processes with a focus on kernels derived from neural network\narchitectures when taken to the infinite-width limit\n(cf. :mod:`nngp.mlp_kernel`).\n\nSpecifically, we consider a Gaussian Process\n:math:`\\mathcal{GP}\\big(\\mu(x), k(x, x')\\big)` with mean function\n:math:`\\mu(\\cdot)` and kernel :math:`k(\\cdot, \\cdot)`. Unless specified\notherwise, we assume the mean function to be :math:`\\mu(x) = 0`. Note, that any\nmultivariate Gaussian prescribed by the :math:`\\mathcal{GP}` at a given set of\ninput locations is consistent (marginalization from any superset of locations\nwill always lead to the same distribution) and adheres exchangibility (order of\ninput locations doesn't affect the distribution except for repositioning the\ncorresponding function values).\n\nFor any given set of inputs :math:`X = x_1, \\dots, x_n`, the\n:math:`\\mathcal{GP}` allows us to specify a prior distribution over function\nvalues :math:`p(f_1, \\dots, f_n; x_1, \\dots, x_n) \\equiv p(F; X)`.\n\nIn addition to inputs :math:`x` and function values :math:`f`, we consider\nobservations :math:`y`, which are obtained via a likelihood function\n:math:`p(y \\mid f)`.\n\nUsing the prior distribution over functions (the :math:`\\mathcal{GP}`) and a\ndataset :math:`\\mathcal{D} = \\{(x_n, y_n)\\}_{n=1}^N` with inputs :math:`X` and\ntargets :math:`Y`, one can form a posterior distribution over function values\n:math:`f` at an unknown location :math:`x^*` via\n\n.. math::\n\n p(f \\mid \\mathcal{D}; x^*) = p(f \\mid Y; x^* X) = \\frac{1}{p(Y; X)} \\\n \\int p(Y \\mid F) p(F, f; X, x^*) \\, dF\n\nPlease see\n`Rasmussen and Williams <http://www.gaussianprocess.org/gpml/chapters/RW.pdf>`__\nfor a broader introduction into Gaussian Processes.\n\"\"\"\nimport torch\nfrom warnings import warn\n\ndef inference_with_isotropic_gaussian_ll(Y, K_train, K_test, K_all, var=1e-10,\n L_mat=None, return_cov=False):\n r\"\"\"Bayesian inference with Gaussian likelihood and :math:`\\mathcal{GP}`\n prior.\n\n Here, we consider the case\n :math:`p(Y \\mid F) = \\mathcal{N}(Y; F, \\sigma_\\epsilon^2 I)`, where the\n posterior predictive :math:`p(f \\mid \\mathcal{D}; x^*)` can be analytically\n computed\n\n .. math::\n\n p(f \\mid \\mathcal{D}; x^*) &= \\mathcal{N}(f; \\mu^*, \\Sigma^*) \\\\ \\\n \\mu^* &= K(x^*, X) \\big( K(X, X) + \\sigma_\\epsilon^2 I \\big)^{-1} Y \\\\ \\\n \\Sigma^* &= k(x^*, x^*) - K(x^*, X) \\big( K(X, X) + \\\n \\sigma_\\epsilon^2 I \\big)^{-1} K(X, x^*)\n\n Args:\n Y (torch.Tensor): The labels :math:`Y` from the training set encoded as\n vector of shape ``[m]`` or ``[m, 1]``.\n K_train (torch.Tensor): The training data kernel matrix :math:`K(X, X)`.\n K_test (torch.Tensor): The test data kernel values :math:`k(x^*, x^*)`.\n This is a vector either of shape ``[n]``, where ``n`` is the number\n test points, or of shape ``[n, 1]``.\n K_all (torch.Tensor): The kernel values between train and test points\n :math:`K(x^*, X)`. This is expected to be matrix of shape ``[n,m]``,\n where ``m`` is the number of training and ``n`` the number of test\n points, or simply a vector of shape ``[m]``, if there is only one\n test point.\n var (float): The variance :math:`\\sigma_\\epsilon^2` of the likelihood.\n L_mat (torch.Tensor, optional): The matrix :math:`L` resulting from a\n Cholesky decomposition of :math:`K(X, X) + \\sigma_\\epsilon^2 I`.\n If provided, the arguments ``K_train`` and ``var`` are ignored.\n\n The function :func:`cholesky_adaptive_noise` may be helpful to\n compute ``L_mat``.\n return_cov (bool): If ``True``, the return value ``cov`` will be the\n full covariance matrix. However, this option requires ``K_test``\n to be the full ``[n, n]`` kernel matrix.\n\n Returns:\n (tuple): Tuple containing:\n\n - **mean** (torch.Tensor): A tensor of shape ``[n]``, where ``n`` is the\n number of test points. The tensor encodes the mean for each test point\n of the posterior predictive :math:`\\mu^*`.\n - **cov** (torch.Tensor): Same as ``mean`` but encoding the variance\n :math:`\\Sigma^*` of each test point, i.e., the diagonal of the full\n covariance matrix.\n \"\"\"\n m = K_train.shape[0] if L_mat is None else L_mat.shape[0]\n n = K_test.shape[0]\n assert Y.numel() == m\n assert K_all.numel() == m*n\n\n if Y.ndim == 1:\n Y = Y.view(-1, 1)\n if return_cov:\n assert K_test.numel() == n*n and K_test.ndim == 2\n elif K_test.ndim == 2:\n K_test = K_test.view(-1)\n if K_all.ndim == 1:\n assert n == 1\n K_all = K_all.view(n, m)\n\n #inv_K = torch.linalg.inv(K_train + var * torch.eye(m).to(K_train.device))\n #mu = torch.matmul(K_all, torch.matmul(inv_K, Y))\n #if return_cov:\n # sigma = K_test - torch.matmul(K_all, torch.matmul(inv_K, K_all.T))\n #else:\n # #sigma = K_test - torch.bmm(K_all.view(n, 1, m), torch.matmul(inv_K,\n # # K_all.view(n, m, 1))).squeeze()\n # sigma = K_test - (K_all * torch.matmul(inv_K,\n # K_all.view(n, m, 1)).squeeze(dim=2)).sum(dim=1)\n\n # Note, direct matrix inversion is considered extremely numerically\n # unstable. Therefore, Rasmussen et al. propose the use of Cholesky\n # decomposition, see Appendix A.4 in\n # http://www.gaussianprocess.org/gpml/chapters/RW.pdf\n if L_mat is None:\n L = torch.linalg.cholesky(K_train + \\\n var * torch.eye(m).to(K_train.device))\n else:\n L = L_mat\n alpha = torch.triangular_solve(torch.triangular_solve(Y, L, upper=False)[0],\n L, upper=False, transpose=True)[0]\n mu = torch.matmul(K_all, alpha)\n\n v = torch.triangular_solve(K_all.T, L, upper=False)[0]\n if return_cov:\n sigma = K_test - torch.matmul(v.T, v)\n else:\n sigma = K_test - (v * v).sum(dim=0)\n\n if torch.any(sigma < 0):\n sigma[sigma < 0] = 1e-5\n warn('Some entries of the covariance matrix are negative and set to ' +\n '1e-5!')\n\n return mu.squeeze(), sigma\n\ndef gen_inference_kernels(X_train, X_test, kernel_func, compute_K_train=True,\n full_K_test=False):\n r\"\"\"Generate the kernel matrices required for inference.\n\n This function generates the kernel matrices / vectors :math:`K(X, X)`,\n :math:`K(x^*, X)` and :math:`K(x^*, x^*)`, where :math:`X` are training\n inputs and :math:`x^*` are unseen points.\n\n Thus, the function can be seen as helper function for functions like\n :func:`inference_with_isotropic_gaussian_ll`.\n\n Args:\n X_train (torch.Tensor): A batch of ``m`` training inputs. The tensor\n should have shape ``[m, d_in]``, where ``d_in`` is the input\n dimensionality. For scalar inputs, one may also pass a tensor of\n shape ``[m]``.\n X_test (torch.Tensor):A batch of ``n`` unseen test inputs.\n kernel_func (func): The kernel function :math:`k(x, x')`. It is expected\n to have an interface for a single input ``X`` as described in\n the docstring of function:`nngp.mlp_kernel.init_kernel`.\n\n .. code-block:: python\n\n def kernel_func(X):\n # Compute kernel values.\n return K\n\n compute_K_train (bool): Whether the kernel matrix :math:`K(X, X)`\n should be computed. If ``False``, the return value ``K_train`` is\n ``None``.\n full_K_test (bool): Whether the full kernel matrix :math:`K(x^*, x^*)`\n of shape ``[n, n]`` should be computed.\n\n Returns:\n (tuple): Tuple containing:\n\n - **K_train** (torch.Tensor or None): :math:`K(X, X)`, a tensor of\n shape ``[m, m]``.\n - **K_test** (torch.Tensor): :math:`K(x^*, x^*)`, a tensor of shape\n ``[n]``\n - **K_all** (torch.Tensor): :math:`K(x^*, X)`, a tensor of shape\n ``[n,m]``\n \"\"\"\n if compute_K_train:\n K_train = kernel_func(X_train)\n else:\n K_train = None\n\n if full_K_test:\n K_test = kernel_func(X_test)\n else:\n K_test = kernel_func((X_test, X_test))\n\n # Contruct tuples between all train samples and all test samples.\n if X_train.ndim == 1: # `d_in == 1`\n X_train = X_train.view(-1, 1)\n if X_test.ndim == 1:\n X_test = X_test.view(-1, 1)\n\n m = X_train.shape[0]\n n = X_test.shape[0]\n\n X_all = (X_train.repeat(n, 1),\n X_test.view(n, 1, -1).repeat(1, m, 1).view(n*m, -1))\n K_all = kernel_func(X_all)\n\n K_all = K_all.view(n, m)\n\n return K_train, K_test, K_all\n\ndef cholesky_adaptive_noise(K_train, var=1e-10, var_step=2.):\n r\"\"\"Cholesky decomposition of a kernel matrix with noise perturbation.\n\n This function computes the Cholesky decomposition of:\n\n .. math::\n\n L L^T = K(X, X) + \\sigma_\\epsilon^2 I\n\n As kernel matrices :math:`K(X, X)` may easily be (numerically) singular,\n tuning the noise :math:`\\sigma_\\epsilon^2` is crucial. Therefore, this\n method will iteratively increase the noise level until the matrix becomes\n non-singular.\n\n Args:\n (....): See docstring of method :meth:`kernel_efficient`.\n var (float or list): The initial variance :math:`\\sigma_\\epsilon^2`.\n If a list of values is provided, then each value in this list is\n consecutively tested until a non-singular matrix is constructed.\n Note, we assume that the list is sorted from small to large. If none\n of the elements in this list will lead to a non-singular matrix, an\n exception is raised.\n var_step (float): If ``var`` is a single value, then the value specified\n here will be iteratively multiplied to increase the variance\n :math:`\\sigma_\\epsilon^2` (therefore ``var_step > 1`` is required).\n\n Returns:\n (tuple): Tuple containing:\n - **L** (torch.Tensor): The matrix :math:`L` resulting from the\n successful Cholesky decomposition.\n - **var_chosen** (float): The variance :math:`\\sigma_\\epsilon^2` that\n was chosen to obtain ``L``.\n \"\"\"\n m = K_train.shape[0]\n\n if not isinstance(var, (list, tuple)):\n assert var_step > 1.\n\n i = 0\n while True:\n if isinstance(var, (list, tuple)):\n if i >= len(var):\n raise RuntimeError('List of variances didn\\'t contain high ' +\n 'enough values.')\n curr_var = var[i]\n else:\n if i == 0:\n curr_var = var\n else:\n curr_var *= var_step\n\n try:\n L = torch.linalg.cholesky(K_train + curr_var * torch.eye(m).to( \\\n K_train.device))\n except:\n i += 1\n continue\n\n return L, curr_var\n\nif __name__ == '__main__':\n pass\n\n\n"
] | [
[
"torch.triangular_solve",
"torch.eye",
"torch.any",
"torch.matmul"
]
] |
cheneyveron/PaddleX | [
"86f73fc6a66b12c638f642524bfd1cf730e26c4b"
] | [
"paddlex/ppdet/modeling/assigners/atss_assigner.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddlex.ppdet.core.workspace import register\nfrom ..ops import iou_similarity\nfrom ..bbox_utils import bbox_center\nfrom .utils import (pad_gt, check_points_inside_bboxes, compute_max_iou_anchor,\n compute_max_iou_gt)\n\n\n@register\nclass ATSSAssigner(nn.Layer):\n \"\"\"Bridging the Gap Between Anchor-based and Anchor-free Detection\n via Adaptive Training Sample Selection\n \"\"\"\n __shared__ = ['num_classes']\n\n def __init__(self,\n topk=9,\n num_classes=80,\n force_gt_matching=False,\n eps=1e-9):\n super(ATSSAssigner, self).__init__()\n self.topk = topk\n self.num_classes = num_classes\n self.force_gt_matching = force_gt_matching\n self.eps = eps\n\n def _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,\n pad_gt_mask):\n pad_gt_mask = pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool)\n gt2anchor_distances_list = paddle.split(\n gt2anchor_distances, num_anchors_list, axis=-1)\n num_anchors_index = np.cumsum(num_anchors_list).tolist()\n num_anchors_index = [0, ] + num_anchors_index[:-1]\n is_in_topk_list = []\n topk_idxs_list = []\n for distances, anchors_index in zip(gt2anchor_distances_list,\n num_anchors_index):\n num_anchors = distances.shape[-1]\n topk_metrics, topk_idxs = paddle.topk(\n distances, self.topk, axis=-1, largest=False)\n topk_idxs_list.append(topk_idxs + anchors_index)\n topk_idxs = paddle.where(pad_gt_mask, topk_idxs,\n paddle.zeros_like(topk_idxs))\n is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)\n is_in_topk = paddle.where(is_in_topk > 1,\n paddle.zeros_like(is_in_topk),\n is_in_topk)\n is_in_topk_list.append(\n is_in_topk.astype(gt2anchor_distances.dtype))\n is_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)\n topk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)\n return is_in_topk_list, topk_idxs_list\n\n @paddle.no_grad()\n def forward(self,\n anchor_bboxes,\n num_anchors_list,\n gt_labels,\n gt_bboxes,\n bg_index,\n gt_scores=None):\n r\"\"\"This code is based on\n https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py\n\n The assignment is done in following steps\n 1. compute iou between all bbox (bbox of all pyramid levels) and gt\n 2. compute center distance between all bbox and gt\n 3. on each pyramid level, for each gt, select k bbox whose center\n are closest to the gt center, so we total select k*l bbox as\n candidates for each gt\n 4. get corresponding iou for the these candidates, and compute the\n mean and std, set mean + std as the iou threshold\n 5. select these candidates whose iou are greater than or equal to\n the threshold as positive\n 6. limit the positive sample's center in gt\n 7. if an anchor box is assigned to multiple gts, the one with the\n highest iou will be selected.\n Args:\n anchor_bboxes (Tensor, float32): pre-defined anchors, shape(L, 4),\n \"xmin, xmax, ymin, ymax\" format\n num_anchors_list (List): num of anchors in each level\n gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)\n gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)\n bg_index (int): background index\n gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,\n shape(B, n, 1), if None, then it will initialize with one_hot label\n Returns:\n assigned_labels (Tensor): (B, L)\n assigned_bboxes (Tensor): (B, L, 4)\n assigned_scores (Tensor): (B, L, C)\n \"\"\"\n gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(\n gt_labels, gt_bboxes, gt_scores)\n assert gt_labels.ndim == gt_bboxes.ndim and \\\n gt_bboxes.ndim == 3\n\n num_anchors, _ = anchor_bboxes.shape\n batch_size, num_max_boxes, _ = gt_bboxes.shape\n\n # negative batch\n if num_max_boxes == 0:\n assigned_labels = paddle.full([batch_size, num_anchors], bg_index)\n assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\n assigned_scores = paddle.zeros(\n [batch_size, num_anchors, self.num_classes])\n return assigned_labels, assigned_bboxes, assigned_scores\n\n # 1. compute iou between gt and anchor bbox, [B, n, L]\n ious = iou_similarity(gt_bboxes.reshape([-1, 4]), anchor_bboxes)\n ious = ious.reshape([batch_size, -1, num_anchors])\n\n # 2. compute center distance between all anchors and gt, [B, n, L]\n gt_centers = bbox_center(gt_bboxes.reshape([-1, 4])).unsqueeze(1)\n anchor_centers = bbox_center(anchor_bboxes)\n gt2anchor_distances = (gt_centers - anchor_centers.unsqueeze(0)) \\\n .norm(2, axis=-1).reshape([batch_size, -1, num_anchors])\n\n # 3. on each pyramid level, selecting topk closest candidates\n # based on the center distance, [B, n, L]\n is_in_topk, topk_idxs = self._gather_topk_pyramid(\n gt2anchor_distances, num_anchors_list, pad_gt_mask)\n\n # 4. get corresponding iou for the these candidates, and compute the\n # mean and std, 5. set mean + std as the iou threshold\n iou_candidates = ious * is_in_topk\n iou_threshold = paddle.index_sample(\n iou_candidates.flatten(stop_axis=-2),\n topk_idxs.flatten(stop_axis=-2))\n iou_threshold = iou_threshold.reshape([batch_size, num_max_boxes, -1])\n iou_threshold = iou_threshold.mean(axis=-1, keepdim=True) + \\\n iou_threshold.std(axis=-1, keepdim=True)\n is_in_topk = paddle.where(\n iou_candidates > iou_threshold.tile([1, 1, num_anchors]),\n is_in_topk, paddle.zeros_like(is_in_topk))\n\n # 6. check the positive sample's center in gt, [B, n, L]\n is_in_gts = check_points_inside_bboxes(anchor_centers, gt_bboxes)\n\n # select positive sample, [B, n, L]\n mask_positive = is_in_topk * is_in_gts * pad_gt_mask\n\n # 7. if an anchor box is assigned to multiple gts,\n # the one with the highest iou will be selected.\n mask_positive_sum = mask_positive.sum(axis=-2)\n if mask_positive_sum.max() > 1:\n mask_multiple_gts = (mask_positive_sum.unsqueeze(1) > 1).tile(\n [1, num_max_boxes, 1])\n is_max_iou = compute_max_iou_anchor(ious)\n mask_positive = paddle.where(mask_multiple_gts, is_max_iou,\n mask_positive)\n mask_positive_sum = mask_positive.sum(axis=-2)\n # 8. make sure every gt_bbox matches the anchor\n if self.force_gt_matching:\n is_max_iou = compute_max_iou_gt(ious) * pad_gt_mask\n mask_max_iou = (is_max_iou.sum(-2, keepdim=True) == 1).tile(\n [1, num_max_boxes, 1])\n mask_positive = paddle.where(mask_max_iou, is_max_iou,\n mask_positive)\n mask_positive_sum = mask_positive.sum(axis=-2)\n assigned_gt_index = mask_positive.argmax(axis=-2)\n assert mask_positive_sum.max() == 1, \\\n (\"one anchor just assign one gt, but received not equals 1. \"\n \"Received: %f\" % mask_positive_sum.max().item())\n\n # assigned target\n batch_ind = paddle.arange(\n end=batch_size, dtype=gt_labels.dtype).unsqueeze(-1)\n assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes\n assigned_labels = paddle.gather(\n gt_labels.flatten(), assigned_gt_index.flatten(), axis=0)\n assigned_labels = assigned_labels.reshape([batch_size, num_anchors])\n assigned_labels = paddle.where(\n mask_positive_sum > 0, assigned_labels,\n paddle.full_like(assigned_labels, bg_index))\n\n assigned_bboxes = paddle.gather(\n gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)\n assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])\n\n assigned_scores = F.one_hot(assigned_labels, self.num_classes)\n if gt_scores is not None:\n gather_scores = paddle.gather(\n pad_gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)\n gather_scores = gather_scores.reshape([batch_size, num_anchors])\n gather_scores = paddle.where(mask_positive_sum > 0, gather_scores,\n paddle.zeros_like(gather_scores))\n assigned_scores *= gather_scores.unsqueeze(-1)\n\n return assigned_labels, assigned_bboxes, assigned_scores\n"
] | [
[
"numpy.cumsum"
]
] |
lucasmtz/ACAR-Net | [
"08a224625f04bbf595baaeb1c79ec491642e0059"
] | [
"models/heads/linear.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision\n\n__all__ = [\"linear\"]\n\n\nclass LinearHead(nn.Module):\n def __init__(self, width, roi_spatial=7, num_classes=60, dropout=0.0, bias=False):\n super().__init__()\n\n self.roi_spatial = roi_spatial\n self.roi_maxpool = nn.MaxPool2d(roi_spatial)\n\n self.fc = nn.Linear(width, num_classes, bias=bias)\n\n if dropout > 0:\n self.dp = nn.Dropout(dropout)\n else:\n self.dp = None\n\n # data: features, rois\n # returns: outputs\n def forward(self, data):\n if not isinstance(data[\"features\"], list):\n features = [data[\"features\"]]\n else:\n features = data[\"features\"]\n\n roi_features = []\n for f in features:\n sp = f.shape\n h, w = sp[3:]\n feats = nn.AdaptiveAvgPool3d((1, h, w))(f).view(-1, sp[1], h, w)\n\n rois = data[\"rois\"].clone()\n rois[:, 1] = rois[:, 1] * w\n rois[:, 2] = rois[:, 2] * h\n rois[:, 3] = rois[:, 3] * w\n rois[:, 4] = rois[:, 4] * h\n rois = rois.detach()\n roi_feats = torchvision.ops.roi_align(feats, rois, (self.roi_spatial, self.roi_spatial))\n roi_feats = self.roi_maxpool(roi_feats).view(-1, sp[1])\n\n roi_features.append(roi_feats)\n\n roi_features = torch.cat(roi_features, dim=1)\n if self.dp is not None:\n roi_features = self.dp(roi_features)\n outputs = self.fc(roi_features)\n\n return {\"outputs\": outputs}\n\n\ndef linear(**kwargs):\n model = LinearHead(**kwargs)\n return model\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool3d",
"torch.cat",
"torch.nn.Dropout"
]
] |
873040/Abhishek | [
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a"
] | [
"research/delf/delf/python/examples/detector.py",
"official/nlp/transformer/transformer_main.py"
] | [
"# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module to construct object detector function.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef MakeDetector(sess, model_dir, import_scope=None):\n \"\"\"Creates a function to detect objects in an image.\n\n Args:\n sess: TensorFlow session to use.\n model_dir: Directory where SavedModel is located.\n import_scope: Optional scope to use for model.\n\n Returns:\n Function that receives an image and returns detection results.\n \"\"\"\n tf.saved_model.loader.load(\n sess, [tf.saved_model.tag_constants.SERVING],\n model_dir,\n import_scope=import_scope)\n import_scope_prefix = import_scope + '/' if import_scope is not None else ''\n input_images = sess.graph.get_tensor_by_name('%sinput_images:0' %\n import_scope_prefix)\n boxes = sess.graph.get_tensor_by_name('%sdetection_boxes:0' %\n import_scope_prefix)\n scores = sess.graph.get_tensor_by_name('%sdetection_scores:0' %\n import_scope_prefix)\n class_indices = sess.graph.get_tensor_by_name('%sdetection_classes:0' %\n import_scope_prefix)\n\n def DetectorFn(images):\n \"\"\"Receives an image and returns detected boxes.\n\n Args:\n images: Uint8 array with shape (batch, height, width 3) containing a batch\n of RGB images.\n\n Returns:\n Tuple (boxes, scores, class_indices).\n \"\"\"\n return sess.run([boxes, scores, class_indices],\n feed_dict={input_images: images})\n\n return DetectorFn\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train and evaluate the Transformer model.\n\nSee README for description of setting the training schedule and evaluating the\nBLEU score.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.modeling import performance\nfrom official.nlp.transformer import compute_bleu\nfrom official.nlp.transformer import data_pipeline\nfrom official.nlp.transformer import metrics\nfrom official.nlp.transformer import misc\nfrom official.nlp.transformer import optimizer\nfrom official.nlp.transformer import transformer\nfrom official.nlp.transformer import translate\nfrom official.nlp.transformer.utils import tokenizer\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import logger\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import keras_utils\n\nINF = int(1e9)\nBLEU_DIR = \"bleu\"\n_SINGLE_SAMPLE = 1\n\n\ndef translate_and_compute_bleu(model,\n params,\n subtokenizer,\n bleu_source,\n bleu_ref,\n distribution_strategy=None):\n \"\"\"Translate file and report the cased and uncased bleu scores.\n\n Args:\n model: A Keras model, used to generate the translations.\n params: A dictionary, containing the translation related parameters.\n subtokenizer: A subtokenizer object, used for encoding and decoding source\n and translated lines.\n bleu_source: A file containing source sentences for translation.\n bleu_ref: A file containing the reference for the translated sentences.\n distribution_strategy: A platform distribution strategy, used for TPU based\n translation.\n\n Returns:\n uncased_score: A float, the case insensitive BLEU score.\n cased_score: A float, the case sensitive BLEU score.\n \"\"\"\n # Create temporary file to store translation.\n tmp = tempfile.NamedTemporaryFile(delete=False)\n tmp_filename = tmp.name\n\n translate.translate_file(\n model,\n params,\n subtokenizer,\n bleu_source,\n output_file=tmp_filename,\n print_all_translations=False,\n distribution_strategy=distribution_strategy)\n\n # Compute uncased and cased bleu scores.\n uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)\n cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)\n os.remove(tmp_filename)\n return uncased_score, cased_score\n\n\ndef evaluate_and_log_bleu(model,\n params,\n bleu_source,\n bleu_ref,\n vocab_file,\n distribution_strategy=None):\n \"\"\"Calculate and record the BLEU score.\n\n Args:\n model: A Keras model, used to generate the translations.\n params: A dictionary, containing the translation related parameters.\n bleu_source: A file containing source sentences for translation.\n bleu_ref: A file containing the reference for the translated sentences.\n vocab_file: A file containing the vocabulary for translation.\n distribution_strategy: A platform distribution strategy, used for TPU based\n translation.\n\n Returns:\n uncased_score: A float, the case insensitive BLEU score.\n cased_score: A float, the case sensitive BLEU score.\n \"\"\"\n subtokenizer = tokenizer.Subtokenizer(vocab_file)\n\n uncased_score, cased_score = translate_and_compute_bleu(\n model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)\n\n logging.info(\"Bleu score (uncased): %s\", uncased_score)\n logging.info(\"Bleu score (cased): %s\", cased_score)\n return uncased_score, cased_score\n\n\nclass TransformerTask(object):\n \"\"\"Main entry of Transformer model.\"\"\"\n\n def __init__(self, flags_obj):\n \"\"\"Init function of TransformerMain.\n\n Args:\n flags_obj: Object containing parsed flag values, i.e., FLAGS.\n\n Raises:\n ValueError: if not using static batch for input data on TPU.\n \"\"\"\n self.flags_obj = flags_obj\n self.predict_model = None\n\n # Add flag-defined parameters to params object\n num_gpus = flags_core.get_num_gpus(flags_obj)\n self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)\n\n params[\"num_gpus\"] = num_gpus\n params[\"use_ctl\"] = flags_obj.use_ctl\n params[\"data_dir\"] = flags_obj.data_dir\n params[\"model_dir\"] = flags_obj.model_dir\n params[\"static_batch\"] = flags_obj.static_batch\n params[\"max_length\"] = flags_obj.max_length\n params[\"decode_batch_size\"] = flags_obj.decode_batch_size\n params[\"decode_max_length\"] = flags_obj.decode_max_length\n params[\"padded_decode\"] = flags_obj.padded_decode\n params[\"num_parallel_calls\"] = (\n flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE)\n\n params[\"use_synthetic_data\"] = flags_obj.use_synthetic_data\n params[\"batch_size\"] = flags_obj.batch_size or params[\"default_batch_size\"]\n params[\"repeat_dataset\"] = None\n params[\"dtype\"] = flags_core.get_tf_dtype(flags_obj)\n params[\"enable_tensorboard\"] = flags_obj.enable_tensorboard\n params[\"enable_metrics_in_training\"] = flags_obj.enable_metrics_in_training\n params[\"steps_between_evals\"] = flags_obj.steps_between_evals\n params[\"enable_checkpointing\"] = flags_obj.enable_checkpointing\n\n self.distribution_strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy=flags_obj.distribution_strategy,\n num_gpus=num_gpus,\n all_reduce_alg=flags_obj.all_reduce_alg,\n num_packs=flags_obj.num_packs,\n tpu_address=flags_obj.tpu or \"\")\n if self.use_tpu:\n params[\"num_replicas\"] = self.distribution_strategy.num_replicas_in_sync\n if not params[\"static_batch\"]:\n raise ValueError(\"TPU requires static batch for input data.\")\n else:\n logging.info(\"Running transformer with num_gpus = %d\", num_gpus)\n\n if self.distribution_strategy:\n logging.info(\"For training, using distribution strategy: %s\",\n self.distribution_strategy)\n else:\n logging.info(\"Not using any distribution strategy.\")\n\n performance.set_mixed_precision_policy(\n params[\"dtype\"],\n flags_core.get_loss_scale(flags_obj, default_for_fp16=\"dynamic\"))\n\n @property\n def use_tpu(self):\n if self.distribution_strategy:\n return isinstance(self.distribution_strategy,\n tf.distribute.experimental.TPUStrategy)\n return False\n\n def train(self):\n \"\"\"Trains the model.\"\"\"\n params = self.params\n flags_obj = self.flags_obj\n # Sets config options.\n keras_utils.set_session_config(enable_xla=flags_obj.enable_xla)\n\n _ensure_dir(flags_obj.model_dir)\n with distribution_utils.get_strategy_scope(self.distribution_strategy):\n model = transformer.create_model(params, is_train=True)\n opt = self._create_optimizer()\n\n current_step = 0\n checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)\n latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info(\"Loaded checkpoint %s\", latest_checkpoint)\n current_step = opt.iterations.numpy()\n\n if params[\"use_ctl\"]:\n train_loss_metric = tf.keras.metrics.Mean(\n \"training_loss\", dtype=tf.float32)\n if params[\"enable_tensorboard\"]:\n summary_writer = tf.compat.v2.summary.create_file_writer(\n flags_obj.model_dir)\n else:\n summary_writer = tf.compat.v2.summary.create_noop_writer()\n train_metrics = [train_loss_metric]\n if params[\"enable_metrics_in_training\"]:\n train_metrics = train_metrics + model.metrics\n else:\n model.compile(opt)\n\n model.summary()\n\n if self.use_tpu:\n # Different from experimental_distribute_dataset,\n # experimental_distribute_datasets_from_function requires\n # per-replica/local batch size.\n params[\"batch_size\"] /= self.distribution_strategy.num_replicas_in_sync\n train_ds = (\n self.distribution_strategy\n .experimental_distribute_datasets_from_function(\n lambda ctx: data_pipeline.train_input_fn(params, ctx)))\n else:\n train_ds = data_pipeline.train_input_fn(params)\n map_data_fn = data_pipeline.map_data_for_transformer_fn\n train_ds = train_ds.map(\n map_data_fn, num_parallel_calls=params[\"num_parallel_calls\"])\n if params[\"use_ctl\"]:\n train_ds_iterator = iter(train_ds)\n\n callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)\n\n # Only TimeHistory callback is supported for CTL\n if params[\"use_ctl\"]:\n callbacks = [cb for cb in callbacks\n if isinstance(cb, keras_utils.TimeHistory)]\n\n # TODO(b/139418525): Refactor the custom training loop logic.\n @tf.function\n def train_steps(iterator, steps):\n \"\"\"Training steps function for TPU runs.\n\n Args:\n iterator: The input iterator of the training dataset.\n steps: An integer, the number of training steps.\n\n Returns:\n A float, the loss value.\n \"\"\"\n\n def _step_fn(inputs):\n \"\"\"Per-replica step function.\"\"\"\n inputs, targets = inputs\n with tf.GradientTape() as tape:\n logits = model([inputs, targets], training=True)\n loss = metrics.transformer_loss(logits, targets,\n params[\"label_smoothing\"],\n params[\"vocab_size\"])\n # Scales the loss, which results in using the average loss across all\n # of the replicas for backprop.\n scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync\n\n # De-dupes variables due to keras tracking issues.\n tvars = list({id(v): v for v in model.trainable_variables}.values())\n grads = tape.gradient(scaled_loss, tvars)\n opt.apply_gradients(zip(grads, tvars))\n # For reporting, the metric takes the mean of losses.\n train_loss_metric.update_state(loss)\n\n for _ in tf.range(steps):\n train_loss_metric.reset_states()\n self.distribution_strategy.run(\n _step_fn, args=(next(iterator),))\n\n cased_score, uncased_score = None, None\n cased_score_history, uncased_score_history = [], []\n while current_step < flags_obj.train_steps:\n remaining_steps = flags_obj.train_steps - current_step\n train_steps_per_eval = (\n remaining_steps if remaining_steps < flags_obj.steps_between_evals\n else flags_obj.steps_between_evals)\n current_iteration = current_step // flags_obj.steps_between_evals\n\n logging.info(\n \"Start train iteration at global step:{}\".format(current_step))\n history = None\n if params[\"use_ctl\"]:\n if not self.use_tpu:\n raise NotImplementedError(\n \"Custom training loop on GPUs is not implemented.\")\n\n # Runs training steps.\n with summary_writer.as_default():\n for cb in callbacks:\n cb.on_epoch_begin(current_iteration)\n cb.on_batch_begin(0)\n\n train_steps(\n train_ds_iterator,\n tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32))\n current_step += train_steps_per_eval\n train_loss = train_loss_metric.result().numpy().astype(float)\n logging.info(\"Train Step: %d/%d / loss = %s\", current_step,\n flags_obj.train_steps, train_loss)\n\n for cb in callbacks:\n cb.on_batch_end(train_steps_per_eval - 1)\n cb.on_epoch_end(current_iteration)\n\n if params[\"enable_tensorboard\"]:\n for metric_obj in train_metrics:\n tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(),\n current_step)\n summary_writer.flush()\n\n for cb in callbacks:\n cb.on_train_end()\n\n if flags_obj.enable_checkpointing:\n # avoid check-pointing when running for benchmarking.\n checkpoint_name = checkpoint.save(\n os.path.join(flags_obj.model_dir,\n \"ctl_step_{}.ckpt\".format(current_step)))\n logging.info(\"Saved checkpoint to %s\", checkpoint_name)\n else:\n if self.use_tpu:\n raise NotImplementedError(\n \"Keras model.fit on TPUs is not implemented.\")\n history = model.fit(\n train_ds,\n initial_epoch=current_iteration,\n epochs=current_iteration + 1,\n steps_per_epoch=train_steps_per_eval,\n callbacks=callbacks,\n # If TimeHistory is enabled, progress bar would be messy. Increase\n # the verbose level to get rid of it.\n verbose=(2 if flags_obj.enable_time_history else 1))\n current_step += train_steps_per_eval\n logging.info(\"Train history: {}\".format(history.history))\n\n logging.info(\"End train iteration at global step:{}\".format(current_step))\n\n if (flags_obj.bleu_source and flags_obj.bleu_ref):\n uncased_score, cased_score = self.eval()\n cased_score_history.append([current_iteration + 1, cased_score])\n uncased_score_history.append([current_iteration + 1, uncased_score])\n\n stats = ({\n \"loss\": train_loss\n } if history is None else misc.build_stats(history, callbacks))\n if uncased_score and cased_score:\n stats[\"bleu_uncased\"] = uncased_score\n stats[\"bleu_cased\"] = cased_score\n stats[\"bleu_uncased_history\"] = uncased_score_history\n stats[\"bleu_cased_history\"] = cased_score_history\n return stats\n\n def eval(self):\n \"\"\"Evaluates the model.\"\"\"\n distribution_strategy = self.distribution_strategy if self.use_tpu else None\n\n # We only want to create the model under DS scope for TPU case.\n # When 'distribution_strategy' is None, a no-op DummyContextManager will\n # be used.\n with distribution_utils.get_strategy_scope(distribution_strategy):\n if not self.predict_model:\n self.predict_model = transformer.create_model(self.params, False)\n self._load_weights_if_possible(\n self.predict_model,\n tf.train.latest_checkpoint(self.flags_obj.model_dir))\n self.predict_model.summary()\n return evaluate_and_log_bleu(\n self.predict_model, self.params, self.flags_obj.bleu_source,\n self.flags_obj.bleu_ref, self.flags_obj.vocab_file,\n distribution_strategy)\n\n def predict(self):\n \"\"\"Predicts result from the model.\"\"\"\n params = self.params\n flags_obj = self.flags_obj\n\n with tf.name_scope(\"model\"):\n model = transformer.create_model(params, is_train=False)\n self._load_weights_if_possible(\n model, tf.train.latest_checkpoint(self.flags_obj.model_dir))\n model.summary()\n subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)\n\n ds = data_pipeline.eval_input_fn(params)\n ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)\n ret = model.predict(ds)\n val_outputs, _ = ret\n length = len(val_outputs)\n for i in range(length):\n translate.translate_from_input(val_outputs[i], subtokenizer)\n\n def _create_callbacks(self, cur_log_dir, init_steps, params):\n \"\"\"Creates a list of callbacks.\"\"\"\n sfunc = optimizer.LearningRateFn(params[\"learning_rate\"],\n params[\"hidden_size\"],\n params[\"learning_rate_warmup_steps\"])\n scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)\n callbacks = misc.get_callbacks(params[\"steps_between_evals\"])\n callbacks.append(scheduler_callback)\n if params[\"enable_checkpointing\"]:\n ckpt_full_path = os.path.join(cur_log_dir, \"cp-{epoch:04d}.ckpt\")\n callbacks.append(\n tf.keras.callbacks.ModelCheckpoint(\n ckpt_full_path, save_weights_only=True))\n return callbacks\n\n def _load_weights_if_possible(self, model, init_weight_path=None):\n \"\"\"Loads model weights when it is provided.\"\"\"\n if init_weight_path:\n logging.info(\"Load weights: {}\".format(init_weight_path))\n # TODO(b/139414977): Having the same variable restoring method for both\n # TPU and GPU.\n if self.use_tpu:\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=self._create_optimizer())\n checkpoint.restore(init_weight_path)\n else:\n model.load_weights(init_weight_path)\n else:\n logging.info(\"Weights not loaded from path:{}\".format(init_weight_path))\n\n def _create_optimizer(self):\n \"\"\"Creates optimizer.\"\"\"\n params = self.params\n lr_schedule = optimizer.LearningRateSchedule(\n params[\"learning_rate\"], params[\"hidden_size\"],\n params[\"learning_rate_warmup_steps\"])\n opt = tf.keras.optimizers.Adam(\n lr_schedule if self.use_tpu else params[\"learning_rate\"],\n params[\"optimizer_adam_beta1\"],\n params[\"optimizer_adam_beta2\"],\n epsilon=params[\"optimizer_adam_epsilon\"])\n\n opt = performance.configure_optimizer(\n opt,\n use_float16=params[\"dtype\"] == tf.float16,\n use_graph_rewrite=self.flags_obj.fp16_implementation == \"graph_rewrite\",\n loss_scale=flags_core.get_loss_scale(\n self.flags_obj, default_for_fp16=\"dynamic\"))\n\n return opt\n\n\ndef _ensure_dir(log_dir):\n \"\"\"Makes log dir if not existed.\"\"\"\n if not tf.io.gfile.exists(log_dir):\n tf.io.gfile.makedirs(log_dir)\n\n\ndef main(_):\n flags_obj = flags.FLAGS\n with logger.benchmark_context(flags_obj):\n task = TransformerTask(flags_obj)\n\n # Execute flag override logic for better model performance\n if flags_obj.tf_gpu_thread_mode:\n keras_utils.set_gpu_thread_mode_and_count(\n per_gpu_thread_count=flags_obj.per_gpu_thread_count,\n gpu_thread_mode=flags_obj.tf_gpu_thread_mode,\n num_gpus=flags_obj.num_gpus,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads)\n\n if flags_obj.mode == \"train\":\n task.train()\n elif flags_obj.mode == \"predict\":\n task.predict()\n elif flags_obj.mode == \"eval\":\n task.eval()\n else:\n raise ValueError(\"Invalid mode {}\".format(flags_obj.mode))\n\n\nif __name__ == \"__main__\":\n logging.set_verbosity(logging.INFO)\n misc.define_transformer_flags()\n app.run(main)\n"
] | [
[
"tensorflow.saved_model.loader.load"
],
[
"tensorflow.io.gfile.exists",
"tensorflow.keras.optimizers.Adam",
"tensorflow.io.gfile.makedirs",
"tensorflow.range",
"tensorflow.compat.v2.summary.create_noop_writer",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.train.latest_checkpoint",
"tensorflow.name_scope",
"tensorflow.GradientTape",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.convert_to_tensor",
"tensorflow.train.Checkpoint",
"tensorflow.keras.metrics.Mean"
]
] |
tonyduan/ge-vae | [
"fe3325cb643900d09536b3e1d964443d25625781"
] | [
"src/models/ep.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.distributions import Bernoulli\nfrom src.modules.attn import MAB, PMA, SAB, ISAB, ISABStack \nfrom src.utils import *\nfrom src.modules.mlp import *\n\n\nclass EdgePredictor(nn.Module):\n\n def __init__(self, embedding_dim, device):\n super().__init__()\n self.pairwise_query = ISABStack(8, embedding_dim, 256, num_heads = 4, \n num_inds = 16, device = device)\n self.device = device\n self.baseline = nn.Parameter(torch.zeros(1, device = device))\n self.scale1 = nn.Parameter(torch.zeros(1, device = device))\n\n def forward(self, E, V):\n mask = construct_embedding_mask(V).byte()\n Z1 = self.pairwise_query(E, mask)\n F = Z1 @ Z1.transpose(1, 2)\n return F * torch.exp(self.scale1) + self.baseline #+ \\\n\n def log_prob_per_edge(self, E, A, V):\n mask = construct_adjacency_mask(V)\n counts = V * (V - 1) / 2\n loss = Bernoulli(logits = self.forward(E, V)).log_prob(A)\n loss = torch.sum(torch.triu(loss, diagonal = 1) * mask, dim = (1, 2))\n return loss #/ counts\n\n"
] | [
[
"torch.zeros",
"torch.triu",
"torch.exp"
]
] |
kunalghosh/Multi_Fidelity_Prediction_GP | [
"c858554f5c1f0c4aafa12cf7c441bd2d56b115f5"
] | [
"mfgp/task2/init_train_idxs.py"
] | [
"# Run `init_train_idxs.py <int: dataset size> <int: initial training set size>`:\n# Creates a `train_idxs.npz` file with the initial set of training indices. \n# e.g `python init_train_idxs.py 64000 1000`\n\nimport sys\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndataset_size = int(sys.argv[1])\ninit_trainset_size = int(sys.argv[2])\nvalidation_set_size = 500 # usually training set is much larger so 500 is reasonable\n\nnp.random.seed(1)\n\ntrain_idxs, remaining_idxs = train_test_split(range(dataset_size), train_size = init_trainset_size, random_state=0)\nvalid_idxs, test_idxs = train_test_split(remaining_idxs, train_size = 500, random_state=0)\n\n# save the values in train_idxs.npy\nnp.savez(\"train_idxs.npz\", train_idxs=train_idxs, valid_idxs=valid_idxs, test_idxs=test_idxs)\n"
] | [
[
"numpy.savez",
"numpy.random.seed",
"sklearn.model_selection.train_test_split"
]
] |
hhy-ee/PedestrianDetection-NohNMS | [
"482078a6bd0ff8cf03fbf7f6988e475f75c56e57"
] | [
"tools/visualize_json_results.py"
] | [
"#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport argparse\nimport json\nimport numpy as np\nimport os\nfrom collections import defaultdict\nimport cv2\nimport tqdm\nfrom fvcore.common.file_io import PathManager\n\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import Boxes, BoxMode, Instances\nfrom detectron2.utils.logger import setup_logger\nfrom detectron2.utils.visualizer import Visualizer\n\n\ndef create_instances(predictions, image_size):\n ret = Instances(image_size)\n\n score = np.asarray([x[\"score\"] for x in predictions])\n chosen = (score > args.conf_threshold).nonzero()[0]\n if chosen.shape[0] == 0:\n return None\n score = score[chosen]\n bbox = np.asarray([predictions[i][\"bbox\"] for i in chosen])\n bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)\n\n labels = np.asarray([dataset_id_map(predictions[i][\"category_id\"]) for i in chosen])\n\n ret.scores = score\n ret.pred_boxes = Boxes(bbox)\n ret.pred_classes = labels\n\n try:\n ret.pred_masks = [predictions[i][\"segmentation\"] for i in chosen]\n except KeyError:\n pass\n return ret\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"A script that visualizes the json predictions from COCO or LVIS dataset.\"\n )\n parser.add_argument(\"--input\", required=True, help=\"JSON file produced by the model\")\n parser.add_argument(\"--output\", required=True, help=\"output directory\")\n parser.add_argument(\"--dataset\", help=\"name of the dataset\", default=\"coco_2017_val\")\n parser.add_argument(\"--conf-threshold\", default=0.5, type=float, help=\"confidence threshold\")\n args = parser.parse_args()\n\n logger = setup_logger()\n\n with PathManager.open(args.input, \"r\") as f:\n predictions = json.load(f)\n\n pred_by_image = defaultdict(list)\n for p in predictions:\n pred_by_image[p[\"image_id\"]].append(p)\n\n dicts = list(DatasetCatalog.get(args.dataset))\n metadata = MetadataCatalog.get(args.dataset)\n if hasattr(metadata, \"thing_dataset_id_to_contiguous_id\"):\n\n def dataset_id_map(ds_id):\n return metadata.thing_dataset_id_to_contiguous_id[ds_id]\n\n elif \"lvis\" in args.dataset:\n # LVIS results are in the same format as COCO results, but have a different\n # mapping from dataset category id to contiguous category id in [0, #categories - 1]\n def dataset_id_map(ds_id):\n return ds_id - 1\n\n else:\n raise ValueError(\"Unsupported dataset: {}\".format(args.dataset))\n\n os.makedirs(args.output, exist_ok=True)\n\n for dic in tqdm.tqdm(dicts):\n img = cv2.imread(dic[\"file_name\"], cv2.IMREAD_COLOR)[:, :, ::-1]\n basename = os.path.basename(dic[\"file_name\"])\n\n predictions = create_instances(pred_by_image[dic[\"image_id\"]], img.shape[:2])\n if predictions is not None:\n vis = Visualizer(img, metadata)\n vis_pred = vis.draw_instance_predictions(predictions).get_image()\n else:\n vis_pred = img\n\n vis = Visualizer(img, metadata)\n vis_gt = vis.draw_dataset_dict(dic).get_image()\n\n concat = np.concatenate((vis_pred, vis_gt), axis=0)\n cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])\n"
] | [
[
"numpy.concatenate",
"numpy.asarray"
]
] |
joshchang1112/gcnn-survey-paper | [
"591af8d6c4374378831cab2cdec79575e2540d79"
] | [
"utils/data_utils.py"
] | [
"#Copyright 2018 Google LLC\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n\n\"\"\"Utils functions to load and process citation data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport pickle as pkl\nimport sys\n\nimport networkx as nx\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nimport tensorflow as tf\nfrom third_party.gcn.gcn.utils import normalize_adj\nfrom third_party.gcn.gcn.utils import parse_index_file\nfrom third_party.gcn.gcn.utils import sample_mask\nfrom third_party.gcn.gcn.utils import sparse_to_tuple\nfrom third_party.gcn.gcn.utils import preprocess_features\n\n\ndef load_test_edge_mask(dataset_str, data_path, drop_edge_prop):\n \"\"\"Remove test edges by loading edge masks.\"\"\"\n edge_mask_path = os.path.join(\n data_path, 'emask.{}.remove{}.npz'.format(dataset_str, drop_edge_prop))\n with tf.gfile.Open(edge_mask_path) as f:\n mask = sp.load_npz(f)\n return mask\n\n\ndef load_edge_masks(dataset_str, data_path, adj_true, drop_edge_prop):\n \"\"\"Loads adjacency matrix as sparse matrix and masks for val & test links.\n\n Args:\n dataset_str: dataset to use\n data_path: path to data folder\n adj_true: true adjacency matrix in dense format,\n drop_edge_prop: proportion of edges to remove.\n\n Returns:\n adj_matrix: adjacency matrix\n train_mask: mask for train edges\n val_mask: mask for val edges\n test_mask: mask for test edges\n \"\"\"\n edge_mask_path = os.path.join(\n data_path, 'emask.{}.remove{}.'.format(dataset_str, drop_edge_prop))\n val_mask = sp.load_npz(edge_mask_path + 'val.npz')\n test_mask = sp.load_npz(edge_mask_path + 'test.npz')\n train_mask = 1. - val_mask.todense() - test_mask.todense()\n # remove val and test edges from true A\n adj_train = np.multiply(adj_true, train_mask)\n train_mask -= np.eye(train_mask.shape[0])\n return adj_train, sparse_to_tuple(val_mask), sparse_to_tuple(\n val_mask), sparse_to_tuple(test_mask)\n\n\ndef add_top_k_edges(data, edge_mask_path, gae_scores_path, topk, nb_nodes,\n norm_adj):\n \"\"\"Loads GAE scores and adds topK edges to train adjacency.\"\"\"\n test_mask = sp.load_npz(os.path.join(edge_mask_path, 'test_mask.npz'))\n train_mask = 1. - test_mask.todense()\n # remove val and test edges from true A\n adj_train_curr = np.multiply(data['adj_true'], train_mask)\n # Predict test edges using precomputed scores\n scores = np.load(os.path.join(gae_scores_path, 'gae_scores.npy'))\n # scores_mask = 1 - np.eye(nb_nodes)\n scores_mask = np.zeros((nb_nodes, nb_nodes))\n scores_mask[:140, 140:] = 1.\n scores_mask[140:, :140] = 1.\n scores = np.multiply(scores, scores_mask).reshape((-1,))\n threshold = scores[np.argsort(-scores)[topk]]\n adj_train_curr += 1 * (scores > threshold).reshape((nb_nodes, nb_nodes))\n adj_train_curr = 1 * (adj_train_curr > 0)\n if norm_adj:\n adj_train_norm = normalize_adj(data['adj_train'])\n else:\n adj_train_norm = sp.coo_matrix(data['adj_train'])\n return adj_train_curr, sparse_to_tuple(adj_train_norm)\n\n\ndef process_adj(adj, model_name):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n if model_name == 'Cheby':\n laplacian = sp.eye(adj.shape[0]) - normalize_adj(adj - sp.eye(adj.shape[0]))\n # TODO(chamii): compare with\n # adj)\n largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n laplacian_norm = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n return laplacian_norm\n else:\n return normalize_adj(adj)\n\n\ndef load_data(dataset_str, data_path):\n if dataset_str in ['cora', 'citeseer', 'pubmed']:\n return load_citation_data(dataset_str, data_path)\n else:\n return load_ppi_data(data_path)\n\n\ndef load_ppi_data(data_path):\n \"\"\"Load PPI dataset.\"\"\"\n with tf.gfile.Open(os.path.join(data_path, 'ppi.edges.npz')) as f:\n adj = sp.load_npz(f)\n\n with tf.gfile.Open(os.path.join(data_path, 'ppi.features.norm.npy')) as f:\n features = np.load(f)\n\n with tf.gfile.Open(os.path.join(data_path, 'ppi.labels.npz')) as f:\n labels = sp.load_npz(f).todense()\n\n train_mask = np.load(\n tf.gfile.Open(os.path.join(data_path, 'ppi.train_mask.npy'))) > 0\n val_mask = np.load(\n tf.gfile.Open(os.path.join(data_path, 'ppi.test_mask.npy'))) > 0\n test_mask = np.load(\n tf.gfile.Open(os.path.join(data_path, 'ppi.test_mask.npy'))) > 0\n\n return adj, features, labels, train_mask, val_mask, test_mask\n\n\ndef load_citation_data(dataset_str, data_path):\n \"\"\"Load data.\"\"\"\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = {}\n for name in names:\n with tf.gfile.Open(\n os.path.join(data_path, 'ind.{}.{}'.format(dataset_str, name)),\n 'rb') as f:\n if sys.version_info > (3, 0):\n objects[name] = pkl.load(f) # , encoding='latin1') comment to pass lint\n else:\n objects[name] = pkl.load(f)\n\n test_idx_reorder = parse_index_file(\n os.path.join(data_path, 'ind.{}.test.index'.format(dataset_str)))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(\n min(test_idx_reorder),\n max(test_idx_reorder) + 1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full),\n objects['x'].shape[1]))\n tx_extended[test_idx_range - min(test_idx_range), :] = objects['tx']\n objects['tx'] = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full),\n objects['y'].shape[1]))\n ty_extended[test_idx_range - min(test_idx_range), :] = objects['ty']\n objects['ty'] = ty_extended\n\n features = sp.vstack((objects['allx'], objects['tx'])).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(objects['graph']))\n\n labels = np.vstack((objects['ally'], objects['ty']))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(objects['y']))\n idx_val = range(len(objects['y']), len(objects['y']) + 500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n features = preprocess_features(features)\n return adj, features, labels, train_mask, val_mask, test_mask\n\n\ndef construct_feed_dict(adj_normalized, adj, features, placeholders):\n # construct feed dictionary\n feed_dict = dict()\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['adj']: adj_normalized})\n feed_dict.update({placeholders['adj_orig']: adj})\n return feed_dict\n\n\ndef mask_val_test_edges(adj, prop):\n \"\"\"Function to mask test and val edges.\"\"\"\n # NOTE: Splits are randomized and results might slightly\n # deviate from reported numbers in the paper.\n\n # Remove diagonal elements\n adj = adj - sp.dia_matrix(\n (adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] * prop))\n # num_val = int(np.floor(edges.shape[0] * 0.05)) # we keep 5% for validation\n # we keep 10% of training edges for validation\n num_val = int(np.floor((edges.shape[0] - num_test) * 0.05))\n\n all_edge_idx = range(edges.shape[0])\n np.random.shuffle(all_edge_idx)\n val_edge_idx = all_edge_idx[:num_val]\n test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]\n test_edges = edges[test_edge_idx]\n val_edges = edges[val_edge_idx]\n train_edges = np.delete(\n edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return np.any(rows_close)\n\n test_edges_false = []\n while len(test_edges_false) < len(test_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], edges_all):\n continue\n if test_edges_false:\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\n continue\n test_edges_false.append([idx_i, idx_j])\n\n val_edges_false = []\n while len(val_edges_false) < len(val_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], train_edges):\n continue\n if ismember([idx_j, idx_i], train_edges):\n continue\n if ismember([idx_i, idx_j], val_edges):\n continue\n if ismember([idx_j, idx_i], val_edges):\n continue\n if val_edges_false:\n if ismember([idx_j, idx_i], np.array(val_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(val_edges_false)):\n continue\n val_edges_false.append([idx_i, idx_j])\n\n assert ~ismember(test_edges_false, edges_all)\n assert ~ismember(val_edges_false, edges_all)\n assert ~ismember(val_edges, train_edges)\n assert ~ismember(test_edges, train_edges)\n assert ~ismember(val_edges, test_edges)\n\n data = np.ones(train_edges.shape[0])\n\n # Re-build adj matrix\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])),\n shape=adj.shape)\n adj_train = adj_train + adj_train.T\n\n # NOTE: these edge lists only contain single direction of edge!\n num_nodes = adj.shape[0]\n val_mask = np.zeros((num_nodes, num_nodes))\n for i, j in val_edges:\n val_mask[i, j] = 1\n val_mask[j, i] = 1\n for i, j in val_edges_false:\n val_mask[i, j] = 1\n val_mask[j, i] = 1\n test_mask = np.zeros((num_nodes, num_nodes))\n for i, j in test_edges:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n for i, j in test_edges_false:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n return adj_train, sparse_to_tuple(val_mask), sparse_to_tuple(test_mask)\n\n\ndef mask_test_edges(adj, prop):\n \"\"\"Function to mask test edges.\n\n Args:\n adj: scipy sparse matrix\n prop: proportion of edges to remove (float in [0, 1])\n\n Returns:\n adj_train: adjacency with edges removed\n test_edges: list of positive and negative test edges\n \"\"\"\n # Remove diagonal elements\n adj = adj - sp.dia_matrix(\n (adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] * prop))\n\n all_edge_idx = range(edges.shape[0])\n np.random.shuffle(all_edge_idx)\n test_edge_idx = all_edge_idx[:num_test]\n test_edges = edges[test_edge_idx]\n train_edges = np.delete(edges, test_edge_idx, axis=0)\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return np.any(rows_close)\n\n test_edges_false = []\n while len(test_edges_false) < len(test_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], edges_all):\n continue\n if test_edges_false:\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\n continue\n test_edges_false.append([idx_i, idx_j])\n\n assert ~ismember(test_edges_false, edges_all)\n assert ~ismember(test_edges, train_edges)\n\n data = np.ones(train_edges.shape[0])\n\n # Re-build adj matrix\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])),\n shape=adj.shape)\n adj_train = adj_train + adj_train.T\n\n # NOTE: these edge lists only contain single direction of edge!\n num_nodes = adj.shape[0]\n test_mask = np.zeros((num_nodes, num_nodes))\n for i, j in test_edges:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n for i, j in test_edges_false:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n return adj_train, sparse_to_tuple(test_mask)\n"
] | [
[
"numpy.ones",
"numpy.multiply",
"numpy.any",
"numpy.argsort",
"scipy.sparse.linalg.eigen.arpack.eigsh",
"numpy.vstack",
"numpy.delete",
"numpy.random.randint",
"numpy.eye",
"numpy.load",
"numpy.zeros",
"scipy.sparse.load_npz",
"scipy.sparse.eye",
"scipy.sparse.coo_matrix",
"numpy.hstack",
"numpy.sort",
"numpy.array",
"scipy.sparse.vstack",
"numpy.random.shuffle",
"scipy.sparse.csr_matrix",
"numpy.floor",
"scipy.sparse.triu",
"numpy.round",
"tensorflow.gfile.Open"
]
] |
yexianyi/AI_Practice | [
"80499ab3a06ac055641aa069fe1e37864c9e41c4"
] | [
"MachineLearning/DecisionTree/loan_delinquency.py"
] | [
"'''\nDecision Tree\nPredict if it is possible to default on the loan\n'''\nimport numpy as np\nfrom sklearn import tree\n\ndata = np.genfromtxt(\"exercise.csv\", delimiter=\",\")\n# get train data set\nx_data = data[1:, 1:-1]\n# get test data set\ny_data = data[1:, -1]\n\nprint(x_data)\nprint(y_data)\n\n# Create decision tree\ndtree = tree.DecisionTreeClassifier(min_samples_leaf=5)\ndtree.fit(x_data, y_data)\nprint(dtree.score(x_data, y_data))\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"numpy.genfromtxt"
]
] |
ffilotto/meshio | [
"4413be41e6a63e33273665986f42dab80d585d10"
] | [
"test/test_flac3d.py"
] | [
"import copy\nimport pathlib\nimport sys\n\nimport helpers\nimport numpy\nimport pytest\n\nimport meshio\n\n\[email protected](\n \"mesh, binary, data\",\n [\n (helpers.tet_mesh, False, []),\n (helpers.hex_mesh, False, []),\n (helpers.tet_mesh, False, [1, 2]),\n (helpers.tet_mesh, True, []),\n (helpers.hex_mesh, True, []),\n (helpers.tet_mesh, True, [1, 2]),\n ],\n)\ndef test(mesh, binary, data):\n if data:\n mesh = copy.deepcopy(mesh)\n mesh.cell_data[\"flac3d:zone\"] = [numpy.array(data)]\n helpers.write_read(\n lambda f, m: meshio.flac3d.write(f, m, binary=binary),\n meshio.flac3d.read,\n mesh,\n 1.0e-15,\n )\n\n\n# the failure perhaps has to do with dictionary ordering\[email protected](sys.version_info < (3, 6), reason=\"Fails with 3.5\")\[email protected](\n \"filename\", [\"flac3d_mesh_ex.f3grid\", \"flac3d_mesh_ex_bin.f3grid\"],\n)\ndef test_reference_file(filename):\n this_dir = pathlib.Path(__file__).resolve().parent\n filename = this_dir / \"meshes\" / \"flac3d\" / filename\n\n mesh = meshio.read(filename)\n\n # points\n assert numpy.isclose(mesh.points.sum(), 307.0)\n\n # cells\n ref_num_cells = [\n (\"hexahedron\", 45),\n (\"pyramid\", 9),\n (\"hexahedron\", 18),\n (\"wedge\", 9),\n (\"hexahedron\", 6),\n (\"wedge\", 3),\n (\"hexahedron\", 6),\n (\"wedge\", 3),\n (\"pyramid\", 6),\n (\"tetra\", 3),\n ]\n assert [(k, len(v)) for k, v in mesh.cells] == ref_num_cells\n # Cell data\n ref_sum_cell_data = [45, 9, 18, 9, 6, 3, 6, 3, 6, 3]\n assert [len(arr) for arr in mesh.cell_data[\"flac3d:zone\"]] == ref_sum_cell_data\n"
] | [
[
"numpy.array"
]
] |
aha66/xarray | [
"3cbd21aa8fd3a57c0dd324f2a276d83829518331"
] | [
"xarray/core/dataset.py"
] | [
"import copy\nimport datetime\nimport functools\nimport inspect\nimport sys\nimport warnings\nfrom collections import defaultdict\nfrom distutils.version import LooseVersion\nfrom html import escape\nfrom numbers import Number\nfrom operator import methodcaller\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n DefaultDict,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom ..coding.cftimeindex import _parse_array_of_cftime_strings\nfrom ..plot.dataset_plot import _Dataset_PlotMethods\nfrom . import (\n alignment,\n dtypes,\n duck_array_ops,\n formatting,\n formatting_html,\n groupby,\n ops,\n resample,\n rolling,\n utils,\n weighted,\n)\nfrom .alignment import _broadcast_helper, _get_broadcast_dims_map_common_coords, align\nfrom .common import (\n DataWithCoords,\n ImplementsDatasetReduce,\n _contains_datetime_like_objects,\n)\nfrom .coordinates import (\n DatasetCoordinates,\n assert_coordinate_consistent,\n remap_label_indexers,\n)\nfrom .duck_array_ops import datetime_to_numeric\nfrom .indexes import (\n Indexes,\n default_indexes,\n isel_variable_and_index,\n propagate_indexes,\n remove_unused_levels_categories,\n roll_index,\n)\nfrom .indexing import is_fancy_indexer\nfrom .merge import (\n dataset_merge_method,\n dataset_update_method,\n merge_coordinates_without_align,\n merge_data_and_coords,\n)\nfrom .missing import get_clean_interp_index\nfrom .options import OPTIONS, _get_keep_attrs\nfrom .pycompat import is_duck_dask_array, sparse_array_type\nfrom .utils import (\n Default,\n Frozen,\n HybridMappingProxy,\n SortedKeysDict,\n _default,\n decode_numpy_dict_values,\n drop_dims_from_indexers,\n either_dict_or_kwargs,\n hashable,\n infix_dims,\n is_dict_like,\n is_scalar,\n maybe_wrap_array,\n)\nfrom .variable import (\n IndexVariable,\n Variable,\n as_variable,\n assert_unique_multiindex_level_names,\n broadcast_variables,\n)\n\nif TYPE_CHECKING:\n from ..backends import AbstractDataStore, ZarrStore\n from .dataarray import DataArray\n from .merge import CoercibleMapping\n\n T_DSorDA = TypeVar(\"T_DSorDA\", DataArray, \"Dataset\")\n\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n\n\n# list of attributes of pd.DatetimeIndex that are ndarrays of time info\n_DATETIMEINDEX_COMPONENTS = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"microsecond\",\n \"nanosecond\",\n \"date\",\n \"time\",\n \"dayofyear\",\n \"weekofyear\",\n \"dayofweek\",\n \"quarter\",\n]\n\n\ndef _get_virtual_variable(\n variables, key: Hashable, level_vars: Mapping = None, dim_sizes: Mapping = None\n) -> Tuple[Hashable, Hashable, Variable]:\n \"\"\"Get a virtual variable (e.g., 'time.year' or a MultiIndex level)\n from a dict of xarray.Variable objects (if possible)\n \"\"\"\n if level_vars is None:\n level_vars = {}\n if dim_sizes is None:\n dim_sizes = {}\n\n if key in dim_sizes:\n data = pd.Index(range(dim_sizes[key]), name=key)\n variable = IndexVariable((key,), data)\n return key, key, variable\n\n if not isinstance(key, str):\n raise KeyError(key)\n\n split_key = key.split(\".\", 1)\n var_name: Optional[str]\n if len(split_key) == 2:\n ref_name, var_name = split_key\n elif len(split_key) == 1:\n ref_name, var_name = key, None\n else:\n raise KeyError(key)\n\n if ref_name in level_vars:\n dim_var = variables[level_vars[ref_name]]\n ref_var = dim_var.to_index_variable().get_level_variable(ref_name)\n else:\n ref_var = variables[ref_name]\n\n if var_name is None:\n virtual_var = ref_var\n var_name = key\n else:\n if _contains_datetime_like_objects(ref_var):\n ref_var = xr.DataArray(ref_var)\n data = getattr(ref_var.dt, var_name).data\n else:\n data = getattr(ref_var, var_name).data\n virtual_var = Variable(ref_var.dims, data)\n\n return ref_name, var_name, virtual_var\n\n\ndef calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashable, int]:\n \"\"\"Calculate the dimensions corresponding to a set of variables.\n\n Returns dictionary mapping from dimension names to sizes. Raises ValueError\n if any of the dimension sizes conflict.\n \"\"\"\n dims: Dict[Hashable, int] = {}\n last_used = {}\n scalar_vars = {k for k, v in variables.items() if not v.dims}\n for k, var in variables.items():\n for dim, size in zip(var.dims, var.shape):\n if dim in scalar_vars:\n raise ValueError(\n \"dimension %r already exists as a scalar variable\" % dim\n )\n if dim not in dims:\n dims[dim] = size\n last_used[dim] = k\n elif dims[dim] != size:\n raise ValueError(\n \"conflicting sizes for dimension %r: \"\n \"length %s on %r and length %s on %r\"\n % (dim, size, k, dims[dim], last_used[dim])\n )\n return dims\n\n\ndef merge_indexes(\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n append: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Merge variables into multi-indexes.\n\n Not public API. Used in Dataset and DataArray set_index\n methods.\n \"\"\"\n vars_to_replace: Dict[Hashable, Variable] = {}\n vars_to_remove: List[Hashable] = []\n dims_to_replace: Dict[Hashable, Hashable] = {}\n error_msg = \"{} is not the name of an existing variable.\"\n\n for dim, var_names in indexes.items():\n if isinstance(var_names, str) or not isinstance(var_names, Sequence):\n var_names = [var_names]\n\n names: List[Hashable] = []\n codes: List[List[int]] = []\n levels: List[List[int]] = []\n current_index_variable = variables.get(dim)\n\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n if (\n current_index_variable is not None\n and var.dims != current_index_variable.dims\n ):\n raise ValueError(\n \"dimension mismatch between %r %s and %r %s\"\n % (dim, current_index_variable.dims, n, var.dims)\n )\n\n if current_index_variable is not None and append:\n current_index = current_index_variable.to_index()\n if isinstance(current_index, pd.MultiIndex):\n names.extend(current_index.names)\n codes.extend(current_index.codes)\n levels.extend(current_index.levels)\n else:\n names.append(\"%s_level_0\" % dim)\n cat = pd.Categorical(current_index.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n if not len(names) and len(var_names) == 1:\n idx = pd.Index(variables[var_names[0]].values)\n\n else: # MultiIndex\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n names.append(n)\n cat = pd.Categorical(var.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n idx = pd.MultiIndex(levels, codes, names=names)\n for n in names:\n dims_to_replace[n] = dim\n\n vars_to_replace[dim] = IndexVariable(dim, idx)\n vars_to_remove.extend(var_names)\n\n new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}\n new_variables.update(vars_to_replace)\n\n # update dimensions if necessary, GH: 3512\n for k, v in new_variables.items():\n if any(d in dims_to_replace for d in v.dims):\n new_dims = [dims_to_replace.get(d, d) for d in v.dims]\n new_variables[k] = v._replace(dims=new_dims)\n new_coord_names = coord_names | set(vars_to_replace)\n new_coord_names -= set(vars_to_remove)\n return new_variables, new_coord_names\n\n\ndef split_indexes(\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n level_coords: Mapping[Hashable, Hashable],\n drop: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Extract (multi-)indexes (levels) as variables.\n\n Not public API. Used in Dataset and DataArray reset_index\n methods.\n \"\"\"\n if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):\n dims_or_levels = [dims_or_levels]\n\n dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)\n dims = []\n for k in dims_or_levels:\n if k in level_coords:\n dim_levels[level_coords[k]].append(k)\n else:\n dims.append(k)\n\n vars_to_replace = {}\n vars_to_create: Dict[Hashable, Variable] = {}\n vars_to_remove = []\n\n for d in dims:\n index = variables[d].to_index()\n if isinstance(index, pd.MultiIndex):\n dim_levels[d] = index.names\n else:\n vars_to_remove.append(d)\n if not drop:\n vars_to_create[str(d) + \"_\"] = Variable(d, index, variables[d].attrs)\n\n for d, levs in dim_levels.items():\n index = variables[d].to_index()\n if len(levs) == index.nlevels:\n vars_to_remove.append(d)\n else:\n vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))\n\n if not drop:\n for lev in levs:\n idx = index.get_level_values(lev)\n vars_to_create[idx.name] = Variable(d, idx, variables[d].attrs)\n\n new_variables = dict(variables)\n for v in set(vars_to_remove):\n del new_variables[v]\n new_variables.update(vars_to_replace)\n new_variables.update(vars_to_create)\n new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)\n\n return new_variables, new_coord_names\n\n\ndef _assert_empty(args: tuple, msg: str = \"%s\") -> None:\n if args:\n raise ValueError(msg % args)\n\n\ndef _check_chunks_compatibility(var, chunks, preferred_chunks):\n for dim in var.dims:\n if dim not in chunks or (dim not in preferred_chunks):\n continue\n\n preferred_chunks_dim = preferred_chunks.get(dim)\n chunks_dim = chunks.get(dim)\n\n if isinstance(chunks_dim, int):\n chunks_dim = (chunks_dim,)\n else:\n chunks_dim = chunks_dim[:-1]\n\n if any(s % preferred_chunks_dim for s in chunks_dim):\n warnings.warn(\n f\"Specified Dask chunks {chunks[dim]} would separate \"\n f\"on disks chunk shape {preferred_chunks[dim]} for dimension {dim}. \"\n \"This could degrade performance. \"\n \"Consider rechunking after loading instead.\",\n stacklevel=2,\n )\n\n\ndef _get_chunk(var, chunks):\n # chunks need to be explicity computed to take correctly into accout\n # backend preferred chunking\n import dask.array as da\n\n if isinstance(var, IndexVariable):\n return {}\n\n if isinstance(chunks, int) or (chunks == \"auto\"):\n chunks = dict.fromkeys(var.dims, chunks)\n\n preferred_chunks = var.encoding.get(\"preferred_chunks\", {})\n preferred_chunks_list = [\n preferred_chunks.get(dim, shape) for dim, shape in zip(var.dims, var.shape)\n ]\n\n chunks_list = [\n chunks.get(dim, None) or preferred_chunks.get(dim, None) for dim in var.dims\n ]\n\n output_chunks_list = da.core.normalize_chunks(\n chunks_list,\n shape=var.shape,\n dtype=var.dtype,\n previous_chunks=preferred_chunks_list,\n )\n\n output_chunks = dict(zip(var.dims, output_chunks_list))\n _check_chunks_compatibility(var, output_chunks, preferred_chunks)\n\n return output_chunks\n\n\ndef _maybe_chunk(\n name,\n var,\n chunks,\n token=None,\n lock=None,\n name_prefix=\"xarray-\",\n overwrite_encoded_chunks=False,\n):\n from dask.base import tokenize\n\n if chunks is not None:\n chunks = {dim: chunks[dim] for dim in var.dims if dim in chunks}\n if var.ndim:\n # when rechunking by different amounts, make sure dask names change\n # by provinding chunks as an input to tokenize.\n # subtle bugs result otherwise. see GH3350\n token2 = tokenize(name, token if token else var._data, chunks)\n name2 = f\"{name_prefix}{name}-{token2}\"\n var = var.chunk(chunks, name=name2, lock=lock)\n\n if overwrite_encoded_chunks and var.chunks is not None:\n var.encoding[\"chunks\"] = tuple(x[0] for x in var.chunks)\n return var\n else:\n return var\n\n\ndef as_dataset(obj: Any) -> \"Dataset\":\n \"\"\"Cast the given object to a Dataset.\n\n Handles Datasets, DataArrays and dictionaries of variables. A new Dataset\n object is only created if the provided object is not already one.\n \"\"\"\n if hasattr(obj, \"to_dataset\"):\n obj = obj.to_dataset()\n if not isinstance(obj, Dataset):\n obj = Dataset(obj)\n return obj\n\n\ndef _get_func_args(func, param_names):\n \"\"\"Use `inspect.signature` to try accessing `func` args. Otherwise, ensure\n they are provided by user.\n \"\"\"\n try:\n func_args = inspect.signature(func).parameters\n except ValueError:\n func_args = {}\n if not param_names:\n raise ValueError(\n \"Unable to inspect `func` signature, and `param_names` was not provided.\"\n )\n if param_names:\n params = param_names\n else:\n params = list(func_args)[1:]\n if any(\n [(p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values()]\n ):\n raise ValueError(\n \"`param_names` must be provided because `func` takes variable length arguments.\"\n )\n return params, func_args\n\n\ndef _initialize_curvefit_params(params, p0, bounds, func_args):\n \"\"\"Set initial guess and bounds for curvefit.\n Priority: 1) passed args 2) func signature 3) scipy defaults\n \"\"\"\n\n def _initialize_feasible(lb, ub):\n # Mimics functionality of scipy.optimize.minpack._initialize_feasible\n lb_finite = np.isfinite(lb)\n ub_finite = np.isfinite(ub)\n p0 = np.nansum(\n [\n 0.5 * (lb + ub) * int(lb_finite & ub_finite),\n (lb + 1) * int(lb_finite & ~ub_finite),\n (ub - 1) * int(~lb_finite & ub_finite),\n ]\n )\n return p0\n\n param_defaults = {p: 1 for p in params}\n bounds_defaults = {p: (-np.inf, np.inf) for p in params}\n for p in params:\n if p in func_args and func_args[p].default is not func_args[p].empty:\n param_defaults[p] = func_args[p].default\n if p in bounds:\n bounds_defaults[p] = tuple(bounds[p])\n if param_defaults[p] < bounds[p][0] or param_defaults[p] > bounds[p][1]:\n param_defaults[p] = _initialize_feasible(bounds[p][0], bounds[p][1])\n if p in p0:\n param_defaults[p] = p0[p]\n return param_defaults, bounds_defaults\n\n\nclass DataVariables(Mapping[Hashable, \"DataArray\"]):\n __slots__ = (\"_dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self._dataset = dataset\n\n def __iter__(self) -> Iterator[Hashable]:\n return (\n key\n for key in self._dataset._variables\n if key not in self._dataset._coord_names\n )\n\n def __len__(self) -> int:\n return len(self._dataset._variables) - len(self._dataset._coord_names)\n\n def __contains__(self, key: Hashable) -> bool:\n return key in self._dataset._variables and key not in self._dataset._coord_names\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key not in self._dataset._coord_names:\n return cast(\"DataArray\", self._dataset[key])\n raise KeyError(key)\n\n def __repr__(self) -> str:\n return formatting.data_vars_repr(self)\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n all_variables = self._dataset.variables\n return Frozen({k: all_variables[k] for k in self})\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._dataset._ipython_key_completions_()\n if key not in self._dataset._coord_names\n ]\n\n\nclass _LocIndexer:\n __slots__ = (\"dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self.dataset = dataset\n\n def __getitem__(self, key: Mapping[Hashable, Any]) -> \"Dataset\":\n if not utils.is_dict_like(key):\n raise TypeError(\"can only lookup dictionaries from Dataset.loc\")\n return self.dataset.sel(key)\n\n\nclass Dataset(Mapping, ImplementsDatasetReduce, DataWithCoords):\n \"\"\"A multi-dimensional, in memory, array database.\n\n A dataset resembles an in-memory representation of a NetCDF file,\n and consists of variables, coordinates and attributes which\n together form a self describing dataset.\n\n Dataset implements the mapping interface with keys given by variable\n names and values given by DataArray objects for each variable name.\n\n One dimensional variables with name equal to their dimension are\n index coordinates used for label based indexing.\n\n To load data from a file or file-like object, use the `open_dataset`\n function.\n\n Parameters\n ----------\n data_vars : dict-like, optional\n A mapping from variable names to :py:class:`~xarray.DataArray`\n objects, :py:class:`~xarray.Variable` objects or to tuples of\n the form ``(dims, data[, attrs])`` which can be used as\n arguments to create a new ``Variable``. Each dimension must\n have the same length in all variables in which it appears.\n\n The following notations are accepted:\n\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (it will be automatically moved to coords, see below)\n\n Each dimension must have the same length in all variables in\n which it appears.\n coords : dict-like, optional\n Another mapping in similar form as the `data_vars` argument,\n except the each item is saved on the dataset as a \"coordinate\".\n These variables have an associated meaning: they describe\n constant/fixed/independent quantities, unlike the\n varying/measured/dependent quantities that belong in\n `variables`. Coordinates values may be given by 1-dimensional\n arrays or scalars, in which case `dims` do not need to be\n supplied: 1D arrays will be assumed to give index values along\n the dimension with the same name.\n\n The following notations are accepted:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (the dimension name is implicitly set to be the same as the\n coord name)\n\n The last notation implies that the coord name is the same as\n the dimension name.\n\n attrs : dict-like, optional\n Global attributes to save on this dataset.\n\n Examples\n --------\n Create data:\n\n >>> np.random.seed(0)\n >>> temperature = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precipitation = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> time = pd.date_range(\"2014-09-06\", periods=3)\n >>> reference_time = pd.Timestamp(\"2014-09-05\")\n\n Initialize a dataset with multiple dimensions:\n\n >>> ds = xr.Dataset(\n ... data_vars=dict(\n ... temperature=([\"x\", \"y\", \"time\"], temperature),\n ... precipitation=([\"x\", \"y\", \"time\"], precipitation),\n ... ),\n ... coords=dict(\n ... lon=([\"x\", \"y\"], lon),\n ... lat=([\"x\", \"y\"], lat),\n ... time=time,\n ... reference_time=reference_time,\n ... ),\n ... attrs=dict(description=\"Weather related data.\"),\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y, time) float64 29.11 18.2 22.83 ... 18.28 16.15 26.63\n precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805\n Attributes:\n description: Weather related data.\n\n Find out where the coldest temperature was and what values the\n other variables had:\n\n >>> ds.isel(ds.temperature.argmin(...))\n <xarray.Dataset>\n Dimensions: ()\n Coordinates:\n lon float64 -99.32\n lat float64 42.21\n time datetime64[ns] 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Data variables:\n temperature float64 7.182\n precipitation float64 8.326\n Attributes:\n description: Weather related data.\n \"\"\"\n\n _attrs: Optional[Dict[Hashable, Any]]\n _cache: Dict[str, Any]\n _coord_names: Set[Hashable]\n _dims: Dict[Hashable, int]\n _encoding: Optional[Dict[Hashable, Any]]\n _close: Optional[Callable[[], None]]\n _indexes: Optional[Dict[Hashable, pd.Index]]\n _variables: Dict[Hashable, Variable]\n\n __slots__ = (\n \"_attrs\",\n \"_cache\",\n \"_coord_names\",\n \"_dims\",\n \"_encoding\",\n \"_close\",\n \"_indexes\",\n \"_variables\",\n \"__weakref__\",\n )\n\n _groupby_cls = groupby.DatasetGroupBy\n _rolling_cls = rolling.DatasetRolling\n _coarsen_cls = rolling.DatasetCoarsen\n _resample_cls = resample.DatasetResample\n _weighted_cls = weighted.DatasetWeighted\n\n def __init__(\n self,\n # could make a VariableArgs to use more generally, and refine these\n # categories\n data_vars: Mapping[Hashable, Any] = None,\n coords: Mapping[Hashable, Any] = None,\n attrs: Mapping[Hashable, Any] = None,\n ):\n # TODO(shoyer): expose indexes as a public argument in __init__\n\n if data_vars is None:\n data_vars = {}\n if coords is None:\n coords = {}\n\n both_data_and_coords = set(data_vars) & set(coords)\n if both_data_and_coords:\n raise ValueError(\n \"variables %r are found in both data_vars and coords\"\n % both_data_and_coords\n )\n\n if isinstance(coords, Dataset):\n coords = coords.variables\n\n variables, coord_names, dims, indexes, _ = merge_data_and_coords(\n data_vars, coords, compat=\"broadcast_equals\"\n )\n\n self._attrs = dict(attrs) if attrs is not None else None\n self._close = None\n self._encoding = None\n self._variables = variables\n self._coord_names = coord_names\n self._dims = dims\n self._indexes = indexes\n\n @classmethod\n def load_store(cls, store, decoder=None) -> \"Dataset\":\n \"\"\"Create a new dataset from the contents of a backends.*DataStore\n object\n \"\"\"\n variables, attributes = store.load()\n if decoder:\n variables, attributes = decoder(variables, attributes)\n obj = cls(variables, attrs=attributes)\n obj.set_close(store.close)\n return obj\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n \"\"\"Low level interface to Dataset contents as dict of Variable objects.\n\n This ordered dictionary is frozen to prevent mutation that could\n violate Dataset invariants. It contains all variable objects\n constituting the Dataset, including both data variables and\n coordinates.\n \"\"\"\n return Frozen(self._variables)\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of global attributes on this dataset\"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)\n\n @property\n def encoding(self) -> Dict:\n \"\"\"Dictionary of global encoding attributes on this dataset\"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: Mapping) -> None:\n self._encoding = dict(value)\n\n @property\n def dims(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n Note that type of this object differs from `DataArray.dims`.\n See `Dataset.sizes` and `DataArray.sizes` for consistently named\n properties.\n \"\"\"\n return Frozen(SortedKeysDict(self._dims))\n\n @property\n def sizes(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n This is an alias for `Dataset.dims` provided for the benefit of\n consistency with `DataArray.sizes`.\n\n See Also\n --------\n DataArray.sizes\n \"\"\"\n return self.dims\n\n def load(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return this dataset.\n Unlike compute, the original dataset is modified and returned.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.compute``.\n\n See Also\n --------\n dask.compute\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data for k, v in self.variables.items() if is_duck_dask_array(v._data)\n }\n if lazy_data:\n import dask.array as da\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = da.compute(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n # load everything else sequentially\n for k, v in self.variables.items():\n if k not in lazy_data:\n v.load()\n\n return self\n\n def __dask_tokenize__(self):\n from dask.base import normalize_token\n\n return normalize_token(\n (type(self), self._variables, self._coord_names, self._attrs)\n )\n\n def __dask_graph__(self):\n graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}\n graphs = {k: v for k, v in graphs.items() if v is not None}\n if not graphs:\n return None\n else:\n try:\n from dask.highlevelgraph import HighLevelGraph\n\n return HighLevelGraph.merge(*graphs.values())\n except ImportError:\n from dask import sharedict\n\n return sharedict.merge(*graphs.values())\n\n def __dask_keys__(self):\n import dask\n\n return [\n v.__dask_keys__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ]\n\n def __dask_layers__(self):\n import dask\n\n return sum(\n [\n v.__dask_layers__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ],\n (),\n )\n\n @property\n def __dask_optimize__(self):\n import dask.array as da\n\n return da.Array.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n import dask.array as da\n\n return da.Array.__dask_scheduler__\n\n def __dask_postcompute__(self):\n return self._dask_postcompute, ()\n\n def __dask_postpersist__(self):\n return self._dask_postpersist, ()\n\n def _dask_postcompute(self, results: \"Iterable[Variable]\") -> \"Dataset\":\n import dask\n\n variables = {}\n results_iter = iter(results)\n\n for k, v in self._variables.items():\n if dask.is_dask_collection(v):\n rebuild, args = v.__dask_postcompute__()\n v = rebuild(next(results_iter), *args)\n variables[k] = v\n\n return Dataset._construct_direct(\n variables,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._close,\n )\n\n def _dask_postpersist(\n self, dsk: Mapping, *, rename: Mapping[str, str] = None\n ) -> \"Dataset\":\n from dask import is_dask_collection\n from dask.highlevelgraph import HighLevelGraph\n from dask.optimization import cull\n\n variables = {}\n\n for k, v in self._variables.items():\n if not is_dask_collection(v):\n variables[k] = v\n continue\n\n if isinstance(dsk, HighLevelGraph):\n # dask >= 2021.3\n # __dask_postpersist__() was called by dask.highlevelgraph.\n # Don't use dsk.cull(), as we need to prevent partial layers:\n # https://github.com/dask/dask/issues/7137\n layers = v.__dask_layers__()\n if rename:\n layers = [rename.get(k, k) for k in layers]\n dsk2 = dsk.cull_layers(layers)\n elif rename: # pragma: nocover\n # At the moment of writing, this is only for forward compatibility.\n # replace_name_in_key requires dask >= 2021.3.\n from dask.base import flatten, replace_name_in_key\n\n keys = [\n replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__())\n ]\n dsk2, _ = cull(dsk, keys)\n else:\n # __dask_postpersist__() was called by dask.optimize or dask.persist\n dsk2, _ = cull(dsk, v.__dask_keys__())\n\n rebuild, args = v.__dask_postpersist__()\n # rename was added in dask 2021.3\n kwargs = {\"rename\": rename} if rename else {}\n variables[k] = rebuild(dsk2, *args, **kwargs)\n\n return Dataset._construct_direct(\n variables,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._close,\n )\n\n def compute(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return a new dataset.\n Unlike load, the original dataset is left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.compute``.\n\n See Also\n --------\n dask.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def _persist_inplace(self, **kwargs) -> \"Dataset\":\n \"\"\"Persist all Dask arrays in memory\"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data for k, v in self.variables.items() if is_duck_dask_array(v._data)\n }\n if lazy_data:\n import dask\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = dask.persist(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n return self\n\n def persist(self, **kwargs) -> \"Dataset\":\n \"\"\"Trigger computation, keeping data as dask arrays\n\n This operation can be used to trigger computation on underlying dask\n arrays, similar to ``.compute()`` or ``.load()``. However this\n operation keeps the data as dask arrays. This is particularly useful\n when using the dask.distributed scheduler and you want to load a large\n amount of data into distributed memory.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n new = self.copy(deep=False)\n return new._persist_inplace(**kwargs)\n\n @classmethod\n def _construct_direct(\n cls,\n variables,\n coord_names,\n dims=None,\n attrs=None,\n indexes=None,\n encoding=None,\n close=None,\n ):\n \"\"\"Shortcut around __init__ for internal use when we want to skip\n costly validation\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n obj = object.__new__(cls)\n obj._variables = variables\n obj._coord_names = coord_names\n obj._dims = dims\n obj._indexes = indexes\n obj._attrs = attrs\n obj._close = close\n obj._encoding = encoding\n return obj\n\n def _replace(\n self,\n variables: Dict[Hashable, Variable] = None,\n coord_names: Set[Hashable] = None,\n dims: Dict[Any, int] = None,\n attrs: Union[Dict[Hashable, Any], None, Default] = _default,\n indexes: Union[Dict[Any, pd.Index], None, Default] = _default,\n encoding: Union[dict, None, Default] = _default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Fastpath constructor for internal use.\n\n Returns an object with optionally with replaced attributes.\n\n Explicitly passed arguments are *not* copied when placed on the new\n dataset. It is up to the caller to ensure that they have the right type\n and are not used elsewhere.\n \"\"\"\n if inplace:\n if variables is not None:\n self._variables = variables\n if coord_names is not None:\n self._coord_names = coord_names\n if dims is not None:\n self._dims = dims\n if attrs is not _default:\n self._attrs = attrs\n if indexes is not _default:\n self._indexes = indexes\n if encoding is not _default:\n self._encoding = encoding\n obj = self\n else:\n if variables is None:\n variables = self._variables.copy()\n if coord_names is None:\n coord_names = self._coord_names.copy()\n if dims is None:\n dims = self._dims.copy()\n if attrs is _default:\n attrs = copy.copy(self._attrs)\n if indexes is _default:\n indexes = copy.copy(self._indexes)\n if encoding is _default:\n encoding = copy.copy(self._encoding)\n obj = self._construct_direct(\n variables, coord_names, dims, attrs, indexes, encoding\n )\n return obj\n\n def _replace_with_new_dims(\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n attrs: Union[Dict[Hashable, Any], None, Default] = _default,\n indexes: Union[Dict[Hashable, pd.Index], None, Default] = _default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Replace variables with recalculated dimensions.\"\"\"\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes, inplace=inplace\n )\n\n def _replace_vars_and_dims(\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n dims: Dict[Hashable, int] = None,\n attrs: Union[Dict[Hashable, Any], None, Default] = _default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Deprecated version of _replace_with_new_dims().\n\n Unlike _replace_with_new_dims(), this method always recalculates\n indexes from variables.\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes=None, inplace=inplace\n )\n\n def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> \"Dataset\":\n if not indexes:\n return self\n\n variables = self._variables.copy()\n new_indexes = dict(self.indexes)\n for name, idx in indexes.items():\n variables[name] = IndexVariable(name, idx)\n new_indexes[name] = idx\n obj = self._replace(variables, indexes=new_indexes)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Hashable, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj\n\n def copy(self, deep: bool = False, data: Mapping = None) -> \"Dataset\":\n \"\"\"Returns a copy of this dataset.\n\n If `deep=True`, a deep copy is made of each of the component variables.\n Otherwise, a shallow copy of each of the component variable is made, so\n that the underlying memory region of the new dataset is the same as in\n the original dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether each component variable is loaded into memory and copied onto\n the new object. Default is False.\n data : dict-like, optional\n Data to use in the new object. Each item in `data` must have same\n shape as corresponding data variable in original. When `data` is\n used, `deep` is ignored for the data variables and only used for\n coords.\n\n Returns\n -------\n object : Dataset\n New object with dimensions, attributes, coordinates, name, encoding,\n and optionally data copied from original.\n\n Examples\n --------\n Shallow copy versus deep copy\n\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset(\n ... {\"foo\": da, \"bar\": (\"x\", [-1, 2])},\n ... coords={\"x\": [\"one\", \"two\"]},\n ... )\n >>> ds.copy()\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n >>> ds_0 = ds.copy(deep=False)\n >>> ds_0[\"foo\"][0, 0] = 7\n >>> ds_0\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n >>> ds\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n Changing the data using the ``data`` argument maintains the\n structure of the original object, but with the new data. Original\n object is unaffected.\n\n >>> ds.copy(data={\"foo\": np.arange(6).reshape(2, 3), \"bar\": [\"a\", \"b\"]})\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) int64 0 1 2 3 4 5\n bar (x) <U1 'a' 'b'\n\n >>> ds\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n See Also\n --------\n pandas.DataFrame.copy\n \"\"\"\n if data is None:\n variables = {k: v.copy(deep=deep) for k, v in self._variables.items()}\n elif not utils.is_dict_like(data):\n raise ValueError(\"Data must be dict-like\")\n else:\n var_keys = set(self.data_vars.keys())\n data_keys = set(data.keys())\n keys_not_in_vars = data_keys - var_keys\n if keys_not_in_vars:\n raise ValueError(\n \"Data must only contain variables in original \"\n \"dataset. Extra variables: {}\".format(keys_not_in_vars)\n )\n keys_missing_from_data = var_keys - data_keys\n if keys_missing_from_data:\n raise ValueError(\n \"Data must contain all variables in original \"\n \"dataset. Data is missing {}\".format(keys_missing_from_data)\n )\n variables = {\n k: v.copy(deep=deep, data=data.get(k))\n for k, v in self._variables.items()\n }\n\n attrs = copy.deepcopy(self._attrs) if deep else copy.copy(self._attrs)\n\n return self._replace(variables, attrs=attrs)\n\n @property\n def _level_coords(self) -> Dict[str, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[str, Hashable] = {}\n for name, index in self.indexes.items():\n if isinstance(index, pd.MultiIndex):\n level_names = index.names\n (dim,) = self.variables[name].dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords\n\n def _copy_listed(self, names: Iterable[Hashable]) -> \"Dataset\":\n \"\"\"Create a new Dataset with the listed variables from this dataset and\n the all relevant coordinates. Skips all validation.\n \"\"\"\n variables: Dict[Hashable, Variable] = {}\n coord_names = set()\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name in names:\n try:\n variables[name] = self._variables[name]\n except KeyError:\n ref_name, var_name, var = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n variables[var_name] = var\n if ref_name in self._coord_names or ref_name in self.dims:\n coord_names.add(var_name)\n if (var_name,) == var.dims:\n indexes[var_name] = var.to_index()\n\n needed_dims: Set[Hashable] = set()\n for v in variables.values():\n needed_dims.update(v.dims)\n\n dims = {k: self.dims[k] for k in needed_dims}\n\n # preserves ordering of coordinates\n for k in self._variables:\n if k not in self._coord_names:\n continue\n\n if set(self.variables[k].dims) <= needed_dims:\n variables[k] = self._variables[k]\n coord_names.add(k)\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n\n return self._replace(variables, coord_names, dims, indexes=indexes)\n\n def _construct_dataarray(self, name: Hashable) -> \"DataArray\":\n \"\"\"Construct a DataArray by indexing this dataset\"\"\"\n from .dataarray import DataArray\n\n try:\n variable = self._variables[name]\n except KeyError:\n _, name, variable = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n\n needed_dims = set(variable.dims)\n\n coords: Dict[Hashable, Variable] = {}\n # preserve ordering\n for k in self._variables:\n if k in self._coord_names and set(self.variables[k].dims) <= needed_dims:\n coords[k] = self.variables[k]\n\n if self._indexes is None:\n indexes = None\n else:\n indexes = {k: v for k, v in self._indexes.items() if k in coords}\n\n return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)\n\n def __copy__(self) -> \"Dataset\":\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None) -> \"Dataset\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n @property\n def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:\n \"\"\"Places to look-up items for attribute-style access\"\"\"\n yield from self._item_sources\n yield self.attrs\n\n @property\n def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:\n \"\"\"Places to look-up items for key-completion\"\"\"\n yield self.data_vars\n yield HybridMappingProxy(keys=self._coord_names, mapping=self.coords)\n\n # virtual coordinates\n yield HybridMappingProxy(keys=self.dims, mapping=self)\n\n # uses empty dict -- everything here can already be found in self.coords.\n yield HybridMappingProxy(keys=self._level_coords, mapping={})\n\n def __contains__(self, key: object) -> bool:\n \"\"\"The 'in' operator will return true or false depending on whether\n 'key' is an array in the dataset or not.\n \"\"\"\n return key in self._variables\n\n def __len__(self) -> int:\n return len(self.data_vars)\n\n def __bool__(self) -> bool:\n return bool(self.data_vars)\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self.data_vars)\n\n def __array__(self, dtype=None):\n raise TypeError(\n \"cannot directly convert an xarray.Dataset into a \"\n \"numpy array. Instead, create an xarray.DataArray \"\n \"first, either with indexing on the Dataset or by \"\n \"invoking the `to_array()` method.\"\n )\n\n @property\n def nbytes(self) -> int:\n return sum(v.nbytes for v in self.variables.values())\n\n @property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing. Only supports __getitem__,\n and only when the key is a dict of the form {dim: labels}.\n \"\"\"\n return _LocIndexer(self)\n\n # FIXME https://github.com/python/mypy/issues/7328\n @overload\n def __getitem__(self, key: Mapping) -> \"Dataset\": # type: ignore[misc]\n ...\n\n @overload\n def __getitem__(self, key: Hashable) -> \"DataArray\": # type: ignore[misc]\n ...\n\n @overload\n def __getitem__(self, key: Any) -> \"Dataset\":\n ...\n\n def __getitem__(self, key):\n \"\"\"Access variables or coordinates this dataset as a\n :py:class:`~xarray.DataArray`.\n\n Indexing with a list of names will return a new ``Dataset`` object.\n \"\"\"\n if utils.is_dict_like(key):\n return self.isel(**cast(Mapping, key))\n\n if hashable(key):\n return self._construct_dataarray(key)\n else:\n return self._copy_listed(np.asarray(key))\n\n def __setitem__(self, key: Hashable, value) -> None:\n \"\"\"Add an array to this dataset.\n\n If value is a `DataArray`, call its `select_vars()` method, rename it\n to `key` and merge the contents of the resulting dataset into this\n dataset.\n\n If value is an `Variable` object (or tuple of form\n ``(dims, data[, attrs])``), add it to this dataset as a new\n variable.\n \"\"\"\n if utils.is_dict_like(key):\n raise NotImplementedError(\n \"cannot yet use a dictionary as a key to set Dataset values\"\n )\n\n self.update({key: value})\n\n def __delitem__(self, key: Hashable) -> None:\n \"\"\"Remove a variable from this dataset.\"\"\"\n del self._variables[key]\n self._coord_names.discard(key)\n if key in self.indexes:\n assert self._indexes is not None\n del self._indexes[key]\n self._dims = calculate_dimensions(self._variables)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore[assignment]\n\n def _all_compat(self, other: \"Dataset\", compat_str: str) -> bool:\n \"\"\"Helper function for equals and identical\"\"\"\n\n # some stores (e.g., scipy) do not seem to preserve order, so don't\n # require matching order for equality\n def compat(x: Variable, y: Variable) -> bool:\n return getattr(x, compat_str)(y)\n\n return self._coord_names == other._coord_names and utils.dict_equiv(\n self._variables, other._variables, compat=compat\n )\n\n def broadcast_equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are broadcast equal if they are equal after\n broadcasting all variables against each other.\n\n For example, variables that are scalar in one dataset but non-scalar in\n the other dataset can still be broadcast equal if the the non-scalar\n variable is a constant.\n\n See Also\n --------\n Dataset.equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False\n\n def equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are equal if they have matching variables and\n coordinates, all of which are equal.\n\n Datasets can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``Dataset``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other: \"Dataset\") -> bool:\n \"\"\"Like equals, but also checks all dataset attributes and the\n attributes on all variables and coordinates.\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.equals\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(\n other, \"identical\"\n )\n except (TypeError, AttributeError):\n return False\n\n @property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._variables, self._dims)\n return Indexes(self._indexes)\n\n @property\n def coords(self) -> DatasetCoordinates:\n \"\"\"Dictionary of xarray.DataArray objects corresponding to coordinate\n variables\n \"\"\"\n return DatasetCoordinates(self)\n\n @property\n def data_vars(self) -> DataVariables:\n \"\"\"Dictionary of DataArray objects corresponding to data variables\"\"\"\n return DataVariables(self)\n\n def set_coords(self, names: \"Union[Hashable, Iterable[Hashable]]\") -> \"Dataset\":\n \"\"\"Given names of one or more variables, set them as coordinates\n\n Parameters\n ----------\n names : hashable or iterable of hashable\n Name(s) of variables in this dataset to convert into coordinates.\n\n Returns\n -------\n Dataset\n\n See Also\n --------\n Dataset.swap_dims\n \"\"\"\n # TODO: allow inserting new coordinates with this method, like\n # DataFrame.set_index?\n # nb. check in self._variables, not self.data_vars to insure that the\n # operation is idempotent\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n obj = self.copy()\n obj._coord_names.update(names)\n return obj\n\n def reset_coords(\n self,\n names: \"Union[Hashable, Iterable[Hashable], None]\" = None,\n drop: bool = False,\n ) -> \"Dataset\":\n \"\"\"Given names of coordinates, reset them to become variables\n\n Parameters\n ----------\n names : hashable or iterable of hashable, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset\n \"\"\"\n if names is None:\n names = self._coord_names - set(self.dims)\n else:\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n bad_coords = set(names) & set(self.dims)\n if bad_coords:\n raise ValueError(\n \"cannot remove index coordinates with reset_coords: %s\" % bad_coords\n )\n obj = self.copy()\n obj._coord_names.difference_update(names)\n if drop:\n for name in names:\n del obj._variables[name]\n return obj\n\n def dump_to_store(self, store: \"AbstractDataStore\", **kwargs) -> None:\n \"\"\"Store dataset contents to a backends.*DataStore object.\"\"\"\n from ..backends.api import dump_to_store\n\n # TODO: rename and/or cleanup this method to make it more consistent\n # with to_netcdf()\n dump_to_store(self, store, **kwargs)\n\n def to_netcdf(\n self,\n path=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n invalid_netcdf: bool = False,\n ) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write dataset contents to a netCDF file.\n\n Parameters\n ----------\n path : str, Path or file-like, optional\n Path to which to save this dataset. File-like objects are only\n supported by the scipy engine. If no path is provided, this\n function returns the resulting netCDF file as bytes; in this case,\n we need to use scipy, which does not support netCDF version 4 (the\n default format becomes NETCDF3_64BIT).\n mode : {\"w\", \"a\"}, default: \"w\"\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n this location will be overwritten. If mode='a', existing variables\n will be overwritten.\n format : {\"NETCDF4\", \"NETCDF4_CLASSIC\", \"NETCDF3_64BIT\", \\\n \"NETCDF3_CLASSIC\"}, optional\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n format='NETCDF4'). The group(s) will be created if necessary.\n engine : {\"netcdf4\", \"scipy\", \"h5netcdf\"}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{\"my_variable\": {\"dtype\": \"int16\", \"scale_factor\": 0.1,\n \"zlib\": True}, ...}``\n\n The `h5netcdf` engine supports both the NetCDF4-style compression\n encoding parameters ``{\"zlib\": True, \"complevel\": 9}`` and the h5py\n ones ``{\"compression\": \"gzip\", \"compression_opts\": 9}``.\n This allows using any compression plugin installed in the HDF5\n library, e.g. LZF.\n\n unlimited_dims : iterable of hashable, optional\n Dimension(s) that should be serialized as unlimited dimensions.\n By default, no dimensions are treated as unlimited dimensions.\n Note that unlimited_dims may also be set via\n ``dataset.encoding[\"unlimited_dims\"]``.\n compute: bool, default: True\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n invalid_netcdf: bool, default: False\n Only valid along with ``engine=\"h5netcdf\"``. If True, allow writing\n hdf5 files which are invalid netcdf as described in\n https://github.com/shoyer/h5netcdf.\n \"\"\"\n if encoding is None:\n encoding = {}\n from ..backends.api import to_netcdf\n\n return to_netcdf(\n self,\n path,\n mode,\n format=format,\n group=group,\n engine=engine,\n encoding=encoding,\n unlimited_dims=unlimited_dims,\n compute=compute,\n invalid_netcdf=invalid_netcdf,\n )\n\n def to_zarr(\n self,\n store: Union[MutableMapping, str, Path] = None,\n chunk_store: Union[MutableMapping, str, Path] = None,\n mode: str = None,\n synchronizer=None,\n group: str = None,\n encoding: Mapping = None,\n compute: bool = True,\n consolidated: bool = False,\n append_dim: Hashable = None,\n region: Mapping[str, slice] = None,\n ) -> \"ZarrStore\":\n \"\"\"Write dataset contents to a zarr group.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n Parameters\n ----------\n store : MutableMapping, str or Path, optional\n Store or path to directory in file system.\n chunk_store : MutableMapping, str or Path, optional\n Store or path to directory in file system only for Zarr array chunks.\n Requires zarr-python v2.4.0 or later.\n mode : {\"w\", \"w-\", \"a\", None}, optional\n Persistence mode: \"w\" means create (overwrite if exists);\n \"w-\" means create (fail if exists);\n \"a\" means override existing variables (create if does not exist).\n If ``append_dim`` is set, ``mode`` can be omitted as it is\n internally set to ``\"a\"``. Otherwise, ``mode`` will default to\n `w-` if not set.\n synchronizer : object, optional\n Zarr array synchronizer.\n group : str, optional\n Group path. (a.k.a. `path` in zarr terminology.)\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{\"my_variable\": {\"dtype\": \"int16\", \"scale_factor\": 0.1,}, ...}``\n compute : bool, optional\n If True write array data immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed to write\n array data later. Metadata is always updated eagerly.\n consolidated : bool, optional\n If True, apply zarr's `consolidate_metadata` function to the store\n after writing metadata.\n append_dim : hashable, optional\n If set, the dimension along which the data will be appended. All\n other dimensions on overriden variables must remain the same size.\n region : dict, optional\n Optional mapping from dimension names to integer slices along\n dataset dimensions to indicate the region of existing zarr array(s)\n in which to write this dataset's data. For example,\n ``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate\n that values should be written to the region ``0:1000`` along ``x``\n and ``10000:11000`` along ``y``.\n\n Two restrictions apply to the use of ``region``:\n\n - If ``region`` is set, _all_ variables in a dataset must have at\n least one dimension in common with the region. Other variables\n should be written in a separate call to ``to_zarr()``.\n - Dimensions cannot be included in both ``region`` and\n ``append_dim`` at the same time. To create empty arrays to fill\n in with ``region``, use a separate call to ``to_zarr()`` with\n ``compute=False``. See \"Appending to existing Zarr stores\" in\n the reference documentation for full details.\n\n References\n ----------\n https://zarr.readthedocs.io/\n\n Notes\n -----\n Zarr chunking behavior:\n If chunks are found in the encoding argument or attribute\n corresponding to any DataArray, those chunks are used.\n If a DataArray is a dask array, it is written with those chunks.\n If not other chunks are found, Zarr uses its own heuristics to\n choose automatic chunk sizes.\n \"\"\"\n from ..backends.api import to_zarr\n\n if encoding is None:\n encoding = {}\n\n return to_zarr(\n self,\n store=store,\n chunk_store=chunk_store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n encoding=encoding,\n compute=compute,\n consolidated=consolidated,\n append_dim=append_dim,\n region=region,\n )\n\n def __repr__(self) -> str:\n return formatting.dataset_repr(self)\n\n def _repr_html_(self):\n if OPTIONS[\"display_style\"] == \"text\":\n return f\"<pre>{escape(repr(self))}</pre>\"\n return formatting_html.dataset_repr(self)\n\n def info(self, buf=None) -> None:\n \"\"\"\n Concise summary of a Dataset variables and attributes.\n\n Parameters\n ----------\n buf : file-like, default: sys.stdout\n writable buffer\n\n See Also\n --------\n pandas.DataFrame.assign\n ncdump : netCDF's ncdump\n \"\"\"\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n lines.append(\"xarray.Dataset {\")\n lines.append(\"dimensions:\")\n for name, size in self.dims.items():\n lines.append(f\"\\t{name} = {size} ;\")\n lines.append(\"\\nvariables:\")\n for name, da in self.variables.items():\n dims = \", \".join(da.dims)\n lines.append(f\"\\t{da.dtype} {name}({dims}) ;\")\n for k, v in da.attrs.items():\n lines.append(f\"\\t\\t{name}:{k} = {v} ;\")\n lines.append(\"\\n// global attributes:\")\n for k, v in self.attrs.items():\n lines.append(f\"\\t:{k} = {v} ;\")\n lines.append(\"}\")\n\n buf.write(\"\\n\".join(lines))\n\n @property\n def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]:\n \"\"\"Block dimensions for this dataset's data or None if it's not a dask\n array.\n \"\"\"\n chunks: Dict[Hashable, Tuple[int, ...]] = {}\n for v in self.variables.values():\n if v.chunks is not None:\n for dim, c in zip(v.dims, v.chunks):\n if dim in chunks and c != chunks[dim]:\n raise ValueError(\n f\"Object has inconsistent chunks along dimension {dim}. \"\n \"This can be fixed by calling unify_chunks().\"\n )\n chunks[dim] = c\n return Frozen(SortedKeysDict(chunks))\n\n def chunk(\n self,\n chunks: Union[\n Number,\n str,\n Mapping[Hashable, Union[None, Number, str, Tuple[Number, ...]]],\n ] = {}, # {} even though it's technically unsafe, is being used intentionally here (#4667)\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"Dataset\":\n \"\"\"Coerce all arrays in this dataset into dask arrays with the given\n chunks.\n\n Non-dask arrays in this dataset will be converted to dask arrays. Dask\n arrays will be rechunked to the given chunk sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, 'auto' or mapping, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{\"x\": 5, \"y\": 5}``.\n name_prefix : str, optional\n Prefix for the name of any new dask arrays.\n token : str, optional\n Token uniquely identifying this dataset.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Dataset\n \"\"\"\n if chunks is None:\n warnings.warn(\n \"None value for 'chunks' is deprecated. \"\n \"It will raise an error in the future. Use instead '{}'\",\n category=FutureWarning,\n )\n chunks = {}\n\n if isinstance(chunks, (Number, str)):\n chunks = dict.fromkeys(self.dims, chunks)\n\n bad_dims = chunks.keys() - self.dims.keys()\n if bad_dims:\n raise ValueError(\n \"some chunks keys are not dimensions on this \" \"object: %s\" % bad_dims\n )\n\n variables = {\n k: _maybe_chunk(k, v, chunks, token, lock, name_prefix)\n for k, v in self.variables.items()\n }\n return self._replace(variables)\n\n def _validate_indexers(\n self, indexers: Mapping[Hashable, Any], missing_dims: str = \"raise\"\n ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]:\n \"\"\"Here we make sure\n + indexer has a valid keys\n + indexer is in a valid data type\n + string indexers are cast to the appropriate date type if the\n associated index is a DatetimeIndex or CFTimeIndex\n \"\"\"\n from .dataarray import DataArray\n\n indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)\n\n # all indexers should be int, slice, np.ndarrays, or Variable\n for k, v in indexers.items():\n if isinstance(v, (int, slice, Variable)):\n yield k, v\n elif isinstance(v, DataArray):\n yield k, v.variable\n elif isinstance(v, tuple):\n yield k, as_variable(v)\n elif isinstance(v, Dataset):\n raise TypeError(\"cannot use a Dataset as an indexer\")\n elif isinstance(v, Sequence) and len(v) == 0:\n yield k, np.empty((0,), dtype=\"int64\")\n else:\n v = np.asarray(v)\n\n if v.dtype.kind in \"US\":\n index = self.indexes[k]\n if isinstance(index, pd.DatetimeIndex):\n v = v.astype(\"datetime64[ns]\")\n elif isinstance(index, xr.CFTimeIndex):\n v = _parse_array_of_cftime_strings(v, index.date_type)\n\n if v.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n yield k, v\n\n def _validate_interp_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Variable]]:\n \"\"\"Variant of _validate_indexers to be used for interpolation\"\"\"\n for k, v in self._validate_indexers(indexers):\n if isinstance(v, Variable):\n if v.ndim == 1:\n yield k, v.to_index_variable()\n else:\n yield k, v\n elif isinstance(v, int):\n yield k, Variable((), v)\n elif isinstance(v, np.ndarray):\n if v.ndim == 0:\n yield k, Variable((), v)\n elif v.ndim == 1:\n yield k, IndexVariable((k,), v)\n else:\n raise AssertionError() # Already tested by _validate_indexers\n else:\n raise TypeError(type(v))\n\n def _get_indexers_coords_and_indexes(self, indexers):\n \"\"\"Extract coordinates and indexes from indexers.\n\n Only coordinate with a name different from any of self.variables will\n be attached.\n \"\"\"\n from .dataarray import DataArray\n\n coords_list = []\n for k, v in indexers.items():\n if isinstance(v, DataArray):\n if v.dtype.kind == \"b\":\n if v.ndim != 1: # we only support 1-d boolean array\n raise ValueError(\n \"{:d}d-boolean array is used for indexing along \"\n \"dimension {!r}, but only 1d boolean arrays are \"\n \"supported.\".format(v.ndim, k)\n )\n # Make sure in case of boolean DataArray, its\n # coordinate also should be indexed.\n v_coords = v[v.values.nonzero()[0]].coords\n else:\n v_coords = v.coords\n coords_list.append(v_coords)\n\n # we don't need to call align() explicitly or check indexes for\n # alignment, because merge_variables already checks for exact alignment\n # between dimension coordinates\n coords, indexes = merge_coordinates_without_align(coords_list)\n assert_coordinate_consistent(self, coords)\n\n # silently drop the conflicted variables.\n attached_coords = {k: v for k, v in coords.items() if k not in self._variables}\n attached_indexes = {\n k: v for k, v in indexes.items() if k not in self._variables\n }\n return attached_coords, attached_indexes\n\n def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n missing_dims: str = \"raise\",\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along the specified\n dimension(s).\n\n This method selects values from each array using its `__getitem__`\n method, except this method does not require knowing the order of\n each array's dimensions.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by integers, slice objects or arrays.\n indexer can be a integer, slice, array-like or DataArray.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables indexed by integers\n instead of making them scalar.\n missing_dims : {\"raise\", \"warn\", \"ignore\"}, default: \"raise\"\n What to do if dimensions that should be selected from are not present in the\n Dataset:\n - \"raise\": raise an exception\n - \"warning\": raise a warning, and ignore the missing dimensions\n - \"ignore\": ignore the missing dimensions\n **indexers_kwargs : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n if any(is_fancy_indexer(idx) for idx in indexers.values()):\n return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims)\n\n # Much faster algorithm for when all indexers are ints, slices, one-dimensional\n # lists, or zero or one-dimensional np.ndarray's\n indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)\n\n variables = {}\n dims: Dict[Hashable, Tuple[int, ...]] = {}\n coord_names = self._coord_names.copy()\n indexes = self._indexes.copy() if self._indexes is not None else None\n\n for var_name, var_value in self._variables.items():\n var_indexers = {k: v for k, v in indexers.items() if k in var_value.dims}\n if var_indexers:\n var_value = var_value.isel(var_indexers)\n if drop and var_value.ndim == 0 and var_name in coord_names:\n coord_names.remove(var_name)\n if indexes:\n indexes.pop(var_name, None)\n continue\n if indexes and var_name in indexes:\n if var_value.ndim == 1:\n indexes[var_name] = var_value.to_index()\n else:\n del indexes[var_name]\n variables[var_name] = var_value\n dims.update(zip(var_value.dims, var_value.shape))\n\n return self._construct_direct(\n variables=variables,\n coord_names=coord_names,\n dims=dims,\n attrs=self._attrs,\n indexes=indexes,\n encoding=self._encoding,\n close=self._close,\n )\n\n def _isel_fancy(\n self,\n indexers: Mapping[Hashable, Any],\n *,\n drop: bool,\n missing_dims: str = \"raise\",\n ) -> \"Dataset\":\n # Note: we need to preserve the original indexers variable in order to merge the\n # coords below\n indexers_list = list(self._validate_indexers(indexers, missing_dims))\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name, var in self.variables.items():\n var_indexers = {k: v for k, v in indexers_list if k in var.dims}\n if drop and name in var_indexers:\n continue # drop this variable\n\n if name in self.indexes:\n new_var, new_index = isel_variable_and_index(\n name, var, self.indexes[name], var_indexers\n )\n if new_index is not None:\n indexes[name] = new_index\n elif var_indexers:\n new_var = var.isel(indexers=var_indexers)\n else:\n new_var = var.copy(deep=False)\n\n variables[name] = new_var\n\n coord_names = self._coord_names & variables.keys()\n selected = self._replace_with_new_dims(variables, coord_names, indexes)\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n coord_names = self._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed by tick labels\n along the specified dimension(s).\n\n In contrast to `Dataset.isel`, indexers for this method should use\n labels instead of integers.\n\n Under the hood, this method is powered by using pandas's powerful Index\n objects. This makes label based indexing essentially just as fast as\n using integer indexing.\n\n It also means this method uses pandas's (well documented) logic for\n indexing. This means you can use string shortcuts for datetime indexes\n (e.g., '2000-01' to select all values in January 2000). It also means\n that slices are treated as inclusive of both the start and stop values,\n unlike normal Python indexing.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by scalars, slices or arrays of tick labels. For dimensions with\n multi-index, the indexer may also be a dict-like object with keys\n matching index level names.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n method : {None, \"nearest\", \"pad\", \"ffill\", \"backfill\", \"bfill\"}, optional\n Method to use for inexact matches:\n\n * None (default): only exact matches\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables in `indexers` instead\n of making them scalar.\n **indexers_kwargs : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n variable and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"sel\")\n pos_indexers, new_indexes = remap_label_indexers(\n self, indexers=indexers, method=method, tolerance=tolerance\n )\n result = self.isel(indexers=pos_indexers, drop=drop)\n return result._overwrite_indexes(new_indexes)\n\n def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the first `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n See Also\n --------\n Dataset.tail\n Dataset.thin\n DataArray.head\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"head\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {k: slice(val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the last `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n See Also\n --------\n Dataset.head\n Dataset.thin\n DataArray.tail\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"tail\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {\n k: slice(-val, None) if val != 0 else slice(val)\n for k, val in indexers.items()\n }\n return self.isel(indexers_slices)\n\n def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along every `n`-th\n value for the specified dimension(s)\n\n Parameters\n ----------\n indexers : dict or int\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n See Also\n --------\n Dataset.head\n Dataset.tail\n DataArray.thin\n \"\"\"\n if (\n not indexers_kwargs\n and not isinstance(indexers, int)\n and not is_dict_like(indexers)\n ):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"thin\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n elif v == 0:\n raise ValueError(\"step cannot be zero\")\n indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def broadcast_like(\n self, other: Union[\"Dataset\", \"DataArray\"], exclude: Iterable[Hashable] = None\n ) -> \"Dataset\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n This is equivalent to xr.broadcast(other, self)[1]\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)\n\n def reindex_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, \"nearest\", \"pad\", \"ffill\", \"backfill\", \"bfill\"}, optional\n Method to use for filling index values from other not found in this\n dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like maps\n variable names to fill values.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but coordinates from the\n other object.\n\n See Also\n --------\n Dataset.reindex\n align\n \"\"\"\n indexers = alignment.reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n copy=copy,\n fill_value=fill_value,\n tolerance=tolerance,\n )\n\n def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto a new set of indexes, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict, optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n method : {None, \"nearest\", \"pad\", \"ffill\", \"backfill\", \"bfill\"}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like,\n maps variable names (including coordinates) to fill values.\n sparse : bool, default: False\n use sparse-array.\n **indexers_kwargs : {dim: indexer, ...}, optional\n Keyword arguments in the same form as ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.reindex_like\n align\n pandas.Index.get_indexer\n\n Examples\n --------\n Create a dataset with some fictional data.\n\n >>> import xarray as xr\n >>> import pandas as pd\n >>> x = xr.Dataset(\n ... {\n ... \"temperature\": (\"station\", 20 * np.random.rand(4)),\n ... \"pressure\": (\"station\", 500 * np.random.rand(4)),\n ... },\n ... coords={\"station\": [\"boston\", \"nyc\", \"seattle\", \"denver\"]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'nyc' 'seattle' 'denver'\n Data variables:\n temperature (station) float64 10.98 14.3 12.06 10.9\n pressure (station) float64 211.8 322.9 218.8 445.9\n >>> x.indexes\n station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')\n\n Create a new index and reindex the dataset. By default values in the new index that\n do not have corresponding records in the dataset are assigned `NaN`.\n\n >>> new_index = [\"boston\", \"austin\", \"seattle\", \"lincoln\"]\n >>> x.reindex({\"station\": new_index})\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 10.98 nan 12.06 nan\n pressure (station) float64 211.8 nan 218.8 nan\n\n We can fill in the missing values by passing a value to the keyword `fill_value`.\n\n >>> x.reindex({\"station\": new_index}, fill_value=0)\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 10.98 0.0 12.06 0.0\n pressure (station) float64 211.8 0.0 218.8 0.0\n\n We can also use different fill values for each variable.\n\n >>> x.reindex(\n ... {\"station\": new_index}, fill_value={\"temperature\": 0, \"pressure\": 100}\n ... )\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 10.98 0.0 12.06 0.0\n pressure (station) float64 211.8 100.0 218.8 100.0\n\n Because the index is not monotonically increasing or decreasing, we cannot use arguments\n to the keyword method to fill the `NaN` values.\n\n >>> x.reindex({\"station\": new_index}, method=\"nearest\")\n Traceback (most recent call last):\n ...\n raise ValueError('index must be monotonic increasing or decreasing')\n ValueError: index must be monotonic increasing or decreasing\n\n To further illustrate the filling functionality in reindex, we will create a\n dataset with a monotonically increasing index (for example, a sequence of dates).\n\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": (\n ... \"time\",\n ... [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12],\n ... ),\n ... \"pressure\": (\"time\", 500 * np.random.rand(6)),\n ... },\n ... coords={\"time\": pd.date_range(\"01/01/2019\", periods=6, freq=\"D\")},\n ... )\n >>> x2\n <xarray.Dataset>\n Dimensions: (time: 6)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06\n Data variables:\n temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12\n pressure (time) float64 481.8 191.7 395.9 264.4 284.0 462.8\n\n Suppose we decide to expand the dataset to cover a wider date range.\n\n >>> time_index2 = pd.date_range(\"12/29/2018\", periods=10, freq=\"D\")\n >>> x2.reindex({\"time\": time_index2})\n <xarray.Dataset>\n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan\n pressure (time) float64 nan nan nan 481.8 ... 264.4 284.0 462.8 nan\n\n The index entries that did not have a value in the original data frame (for example, `2018-12-29`)\n are by default filled with NaN. If desired, we can fill in the missing values using one of several options.\n\n For example, to back-propagate the last valid value to fill the `NaN` values,\n pass `bfill` as an argument to the `method` keyword.\n\n >>> x3 = x2.reindex({\"time\": time_index2}, method=\"bfill\")\n >>> x3\n <xarray.Dataset>\n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan\n pressure (time) float64 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan\n\n Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)\n will not be filled by any of the value propagation schemes.\n\n >>> x2.where(x2.temperature.isnull(), drop=True)\n <xarray.Dataset>\n Dimensions: (time: 1)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03\n Data variables:\n temperature (time) float64 nan\n pressure (time) float64 395.9\n >>> x3.where(x3.temperature.isnull(), drop=True)\n <xarray.Dataset>\n Dimensions: (time: 2)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03 2019-01-07\n Data variables:\n temperature (time) float64 nan nan\n pressure (time) float64 395.9 nan\n\n This is because filling while reindexing does not look at dataset values, but only compares\n the original and desired indexes. If you do want to fill in the `NaN` values present in the\n original dataset, use the :py:meth:`~Dataset.fillna()` method.\n\n \"\"\"\n return self._reindex(\n indexers,\n method,\n tolerance,\n copy,\n fill_value,\n sparse=False,\n **indexers_kwargs,\n )\n\n def _reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n sparse: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"\n same to _reindex but support sparse option\n \"\"\"\n indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n\n bad_dims = [d for d in indexers if d not in self.dims]\n if bad_dims:\n raise ValueError(\"invalid reindex dimensions: %s\" % bad_dims)\n\n variables, indexes = alignment.reindex_variables(\n self.variables,\n self.sizes,\n self.indexes,\n indexers,\n method,\n tolerance,\n copy=copy,\n fill_value=fill_value,\n sparse=sparse,\n )\n coord_names = set(self._coord_names)\n coord_names.update(indexers)\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Multidimensional interpolation of Dataset.\n\n Parameters\n ----------\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n New coordinate can be a scalar, array-like or DataArray.\n If DataArrays are passed as new coordinates, their dimensions are\n used for the broadcasting. Missing values are skipped.\n method : str, optional\n {\"linear\", \"nearest\"} for multidimensional array,\n {\"linear\", \"nearest\", \"zero\", \"slinear\", \"quadratic\", \"cubic\"}\n for 1-dimensional array. \"linear\" is used by default.\n assume_sorted : bool, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs : dict, optional\n Additional keyword arguments passed to scipy's interpolator. Valid\n options and their behavior depend on if 1-dimensional or\n multi-dimensional interpolation is used.\n **coords_kwargs : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated : Dataset\n New dataset on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... data_vars={\n ... \"a\": (\"x\", [5, 7, 4]),\n ... \"b\": (\n ... (\"x\", \"y\"),\n ... [[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]],\n ... ),\n ... },\n ... coords={\"x\": [0, 1, 2], \"y\": [10, 12, 14, 16]},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 10 12 14 16\n Data variables:\n a (x) int64 5 7 4\n b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0\n\n 1D interpolation with the default method (linear):\n\n >>> ds.interp(x=[0, 0.75, 1.25, 1.75])\n <xarray.Dataset>\n Dimensions: (x: 4, y: 4)\n Coordinates:\n * y (y) int64 10 12 14 16\n * x (x) float64 0.0 0.75 1.25 1.75\n Data variables:\n a (x) float64 5.0 6.5 6.25 4.75\n b (x, y) float64 1.0 4.0 2.0 nan 1.75 6.25 ... nan 5.0 nan 5.25 nan\n\n 1D interpolation with a different method:\n\n >>> ds.interp(x=[0, 0.75, 1.25, 1.75], method=\"nearest\")\n <xarray.Dataset>\n Dimensions: (x: 4, y: 4)\n Coordinates:\n * y (y) int64 10 12 14 16\n * x (x) float64 0.0 0.75 1.25 1.75\n Data variables:\n a (x) float64 5.0 7.0 7.0 4.0\n b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 ... 6.0 nan 6.0 nan 5.0 8.0\n\n 1D extrapolation:\n\n >>> ds.interp(\n ... x=[1, 1.5, 2.5, 3.5],\n ... method=\"linear\",\n ... kwargs={\"fill_value\": \"extrapolate\"},\n ... )\n <xarray.Dataset>\n Dimensions: (x: 4, y: 4)\n Coordinates:\n * y (y) int64 10 12 14 16\n * x (x) float64 1.0 1.5 2.5 3.5\n Data variables:\n a (x) float64 7.0 5.5 2.5 -0.5\n b (x, y) float64 2.0 7.0 6.0 nan 4.0 nan ... 4.5 nan 12.0 nan 3.5 nan\n\n 2D interpolation:\n\n >>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method=\"linear\")\n <xarray.Dataset>\n Dimensions: (x: 4, y: 3)\n Coordinates:\n * x (x) float64 0.0 0.75 1.25 1.75\n * y (y) int64 11 13 15\n Data variables:\n a (x) float64 5.0 6.5 6.25 4.75\n b (x, y) float64 2.5 3.0 nan 4.0 5.625 nan nan nan nan nan nan nan\n \"\"\"\n from . import missing\n\n if kwargs is None:\n kwargs = {}\n\n coords = either_dict_or_kwargs(coords, coords_kwargs, \"interp\")\n indexers = dict(self._validate_interp_indexers(coords))\n\n if coords:\n # This avoids broadcasting over coordinates that are both in\n # the original array AND in the indexing array. It essentially\n # forces interpolation along the shared coordinates.\n sdims = (\n set(self.dims)\n .intersection(*[set(nx.dims) for nx in indexers.values()])\n .difference(coords.keys())\n )\n indexers.update({d: self.variables[d] for d in sdims})\n\n obj = self if assume_sorted else self.sortby([k for k in coords])\n\n def maybe_variable(obj, k):\n # workaround to get variable for dimension without coordinate.\n try:\n return obj._variables[k]\n except KeyError:\n return as_variable((k, range(obj.dims[k])))\n\n def _validate_interp_indexer(x, new_x):\n # In the case of datetimes, the restrictions placed on indexers\n # used with interp are stronger than those which are placed on\n # isel, so we need an additional check after _validate_indexers.\n if _contains_datetime_like_objects(\n x\n ) and not _contains_datetime_like_objects(new_x):\n raise TypeError(\n \"When interpolating over a datetime-like \"\n \"coordinate, the coordinates to \"\n \"interpolate to must be either datetime \"\n \"strings or datetimes. \"\n \"Instead got\\n{}\".format(new_x)\n )\n return x, new_x\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in obj._variables.items():\n if name in indexers:\n continue\n\n if var.dtype.kind in \"uifc\":\n var_indexers = {\n k: _validate_interp_indexer(maybe_variable(obj, k), v)\n for k, v in indexers.items()\n if k in var.dims\n }\n variables[name] = missing.interp(var, var_indexers, method, **kwargs)\n elif all(d not in indexers for d in var.dims):\n # keep unrelated object array\n variables[name] = var\n\n coord_names = obj._coord_names & variables.keys()\n indexes = {k: v for k, v in obj.indexes.items() if k not in indexers}\n selected = self._replace_with_new_dims(\n variables.copy(), coord_names, indexes=indexes\n )\n\n # attach indexer as coordinate\n variables.update(indexers)\n for k, v in indexers.items():\n assert isinstance(v, Variable)\n if v.dims == (k,):\n indexes[k] = v.to_index()\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n\n coord_names = obj._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"Dataset\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling the out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset. Missing values are skipped.\n method : str, optional\n {\"linear\", \"nearest\"} for multidimensional array,\n {\"linear\", \"nearest\", \"zero\", \"slinear\", \"quadratic\", \"cubic\"}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted : bool, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs : dict, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated : Dataset\n Another dataset by interpolating this dataset's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataset has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n Dataset.interp\n Dataset.reindex_like\n \"\"\"\n if kwargs is None:\n kwargs = {}\n coords = alignment.reindex_like_indexers(self, other)\n\n numeric_coords: Dict[Hashable, pd.Index] = {}\n object_coords: Dict[Hashable, pd.Index] = {}\n for k, v in coords.items():\n if v.dtype.kind in \"uifcMm\":\n numeric_coords[k] = v\n else:\n object_coords[k] = v\n\n ds = self\n if object_coords:\n # We do not support interpolation along object coordinate.\n # reindex instead.\n ds = self.reindex(object_coords)\n return ds.interp(numeric_coords, method, assume_sorted, kwargs)\n\n # Helper methods for rename()\n def _rename_vars(self, name_dict, dims_dict):\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n var = v.copy(deep=False)\n var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n name = name_dict.get(k, k)\n if name in variables:\n raise ValueError(f\"the new name {name!r} conflicts\")\n variables[name] = var\n if k in self._coord_names:\n coord_names.add(name)\n return variables, coord_names\n\n def _rename_dims(self, name_dict):\n return {name_dict.get(k, k): v for k, v in self.dims.items()}\n\n def _rename_indexes(self, name_dict, dims_set):\n if self._indexes is None:\n return None\n indexes = {}\n for k, v in self.indexes.items():\n new_name = name_dict.get(k, k)\n if new_name not in dims_set:\n continue\n if isinstance(v, pd.MultiIndex):\n new_names = [name_dict.get(k, k) for k in v.names]\n index = v.rename(names=new_names)\n else:\n index = v.rename(new_name)\n indexes[new_name] = index\n return indexes\n\n def _rename_all(self, name_dict, dims_dict):\n variables, coord_names = self._rename_vars(name_dict, dims_dict)\n dims = self._rename_dims(dims_dict)\n indexes = self._rename_indexes(name_dict, dims.keys())\n return variables, coord_names, dims, indexes\n\n def rename(\n self,\n name_dict: Mapping[Hashable, Hashable] = None,\n **names: Hashable,\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n **names : optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename_vars\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename\")\n for k in name_dict.keys():\n if k not in self and k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or dimension in this dataset\" % k\n )\n\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict=name_dict\n )\n assert_unique_multiindex_level_names(variables)\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def rename_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed dimensions only.\n\n Parameters\n ----------\n dims_dict : dict-like, optional\n Dictionary whose keys are current dimension names and\n whose values are the desired names. The desired names must\n not be the name of an existing dimension or Variable in the Dataset.\n **dims : optional\n Keyword form of ``dims_dict``.\n One of dims_dict or dims must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_vars\n DataArray.rename\n \"\"\"\n dims_dict = either_dict_or_kwargs(dims_dict, dims, \"rename_dims\")\n for k, v in dims_dict.items():\n if k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"dimension in this dataset\" % k\n )\n if v in self.dims or v in self:\n raise ValueError(\n f\"Cannot rename {k} to {v} because {v} already exists. \"\n \"Try using swap_dims instead.\"\n )\n\n variables, coord_names, sizes, indexes = self._rename_all(\n name_dict={}, dims_dict=dims_dict\n )\n return self._replace(variables, coord_names, dims=sizes, indexes=indexes)\n\n def rename_vars(\n self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables including coordinates\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or coordinate names and\n whose values are the desired names.\n **names : optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables including coordinates\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename_vars\")\n for k in name_dict:\n if k not in self:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or coordinate in this dataset\" % k\n )\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict={}\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def swap_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims_kwargs\n ) -> \"Dataset\":\n \"\"\"Returns a new object with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names.\n **dims_kwargs : {existing_dim: new_dim, ...}, optional\n The keyword arguments form of ``dims_dict``.\n One of dims_dict or dims_kwargs must be provided.\n\n Returns\n -------\n swapped : Dataset\n Dataset with swapped dimensions.\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... data_vars={\"a\": (\"x\", [5, 7]), \"b\": (\"x\", [0.1, 2.4])},\n ... coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2)\n Coordinates:\n * x (x) <U1 'a' 'b'\n y (x) int64 0 1\n Data variables:\n a (x) int64 5 7\n b (x) float64 0.1 2.4\n\n >>> ds.swap_dims({\"x\": \"y\"})\n <xarray.Dataset>\n Dimensions: (y: 2)\n Coordinates:\n x (y) <U1 'a' 'b'\n * y (y) int64 0 1\n Data variables:\n a (y) int64 5 7\n b (y) float64 0.1 2.4\n\n >>> ds.swap_dims({\"x\": \"z\"})\n <xarray.Dataset>\n Dimensions: (z: 2)\n Coordinates:\n x (z) <U1 'a' 'b'\n y (z) int64 0 1\n Dimensions without coordinates: z\n Data variables:\n a (z) int64 5 7\n b (z) float64 0.1 2.4\n\n See Also\n --------\n Dataset.rename\n DataArray.swap_dims\n \"\"\"\n # TODO: deprecate this method in favor of a (less confusing)\n # rename_dims() method that only renames dimensions.\n\n dims_dict = either_dict_or_kwargs(dims_dict, dims_kwargs, \"swap_dims\")\n for k, v in dims_dict.items():\n if k not in self.dims:\n raise ValueError(\n \"cannot swap from dimension %r because it is \"\n \"not an existing dimension\" % k\n )\n if v in self.variables and self.variables[v].dims != (k,):\n raise ValueError(\n \"replacement dimension %r is not a 1D \"\n \"variable along the old dimension %r\" % (v, k)\n )\n\n result_dims = {dims_dict.get(dim, dim) for dim in self.dims}\n\n coord_names = self._coord_names.copy()\n coord_names.update({dim for dim in dims_dict.values() if dim in self.variables})\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n for k, v in self.variables.items():\n dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n if k in result_dims:\n var = v.to_index_variable()\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n else:\n new_index = var.to_index()\n if new_index.nlevels == 1:\n # make sure index name matches dimension name\n new_index = new_index.rename(k)\n indexes[k] = new_index\n else:\n var = v.to_base_variable()\n var.dims = dims\n variables[k] = var\n\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def expand_dims(\n self,\n dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n **dim_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, mapping, or None\n Dimensions to include on the new variable. If provided as hashable\n or sequence of hashable, then dimensions are inserted with length\n 1. If provided as a mapping, then the keys are the new dimensions\n and the values are either integers (giving the length of the new\n dimensions) or array-like (giving the coordinates of the new\n dimensions).\n axis : int, sequence of int, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence or ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if dim is None:\n pass\n elif isinstance(dim, Mapping):\n # We're later going to modify dim in place; don't tamper with\n # the input\n dim = dict(dim)\n elif isinstance(dim, int):\n raise TypeError(\n \"dim should be hashable or sequence of hashables or mapping\"\n )\n elif isinstance(dim, str) or not isinstance(dim, Sequence):\n dim = {dim: 1}\n elif isinstance(dim, Sequence):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = {d: 1 for d in dim}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n assert isinstance(dim, MutableMapping)\n\n if axis is None:\n axis = list(range(len(dim)))\n elif not isinstance(axis, Sequence):\n axis = [axis]\n\n if len(dim) != len(axis):\n raise ValueError(\"lengths of dim and axis should be identical.\")\n for d in dim:\n if d in self.dims:\n raise ValueError(f\"Dimension {d} already exists.\")\n if d in self._variables and not utils.is_scalar(self._variables[d]):\n raise ValueError(\n \"{dim} already exists as coordinate or\"\n \" variable name.\".format(dim=d)\n )\n\n variables: Dict[Hashable, Variable] = {}\n coord_names = self._coord_names.copy()\n # If dim is a dict, then ensure that the values are either integers\n # or iterables.\n for k, v in dim.items():\n if hasattr(v, \"__iter__\"):\n # If the value for the new dimension is an iterable, then\n # save the coordinates to the variables dict, and set the\n # value within the dim dict to the length of the iterable\n # for later use.\n variables[k] = xr.IndexVariable((k,), v)\n coord_names.add(k)\n dim[k] = variables[k].size\n elif isinstance(v, int):\n pass # Do nothing if the dimensions value is just an int\n else:\n raise TypeError(\n \"The value of new dimension {k} must be \"\n \"an iterable or an int\".format(k=k)\n )\n\n for k, v in self._variables.items():\n if k not in dim:\n if k in coord_names: # Do not change coordinates\n variables[k] = v\n else:\n result_ndim = len(v.dims) + len(axis)\n for a in axis:\n if a < -result_ndim or result_ndim - 1 < a:\n raise IndexError(\n f\"Axis {a} of variable {k} is out of bounds of the \"\n f\"expanded dimension size {result_ndim}\"\n )\n\n axis_pos = [a if a >= 0 else result_ndim + a for a in axis]\n if len(axis_pos) != len(set(axis_pos)):\n raise ValueError(\"axis should not contain duplicate values\")\n # We need to sort them to make sure `axis` equals to the\n # axis positions of the result array.\n zip_axis_dim = sorted(zip(axis_pos, dim.items()))\n\n all_dims = list(zip(v.dims, v.shape))\n for d, c in zip_axis_dim:\n all_dims.insert(d, c)\n variables[k] = v.set_dims(dict(all_dims))\n else:\n # If dims includes a label of a non-dimension coordinate,\n # it will be promoted to a 1D coordinate with a single value.\n variables[k] = v.set_dims(k).to_index_variable()\n\n new_dims = self._dims.copy()\n new_dims.update(dim)\n\n return self._replace_vars_and_dims(\n variables, dims=new_dims, coord_names=coord_names\n )\n\n def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]],\n ) -> \"Dataset\":\n \"\"\"Set Dataset (multi-)indexes using one or more existing coordinates\n or variables.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs : optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(\n ... data=np.ones((2, 3)),\n ... dims=[\"x\", \"y\"],\n ... coords={\"x\": range(2), \"y\": range(3), \"a\": (\"x\", [3, 4])},\n ... )\n >>> ds = xr.Dataset({\"v\": arr})\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n >>> ds.set_index(x=\"a\")\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n\n See Also\n --------\n Dataset.reset_index\n Dataset.swap_dims\n \"\"\"\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n variables, coord_names = merge_indexes(\n indexes, self._variables, self._coord_names, append=append\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n ) -> \"Dataset\":\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : str or list\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.set_index\n \"\"\"\n variables, coord_names = split_indexes(\n dims_or_levels,\n self._variables,\n self._coord_names,\n cast(Mapping[Hashable, Hashable], self._level_coords),\n drop=drop,\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n **dim_order_kwargs: Sequence[int],\n ) -> \"Dataset\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs : optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced\n coordinates.\n \"\"\"\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n variables = self._variables.copy()\n indexes = dict(self.indexes)\n for dim, order in dim_order.items():\n coord = self._variables[dim]\n index = self.indexes[dim]\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(f\"coordinate {dim} has no MultiIndex\")\n new_index = index.reorder_levels(order)\n variables[dim] = IndexVariable(coord.dims, new_index)\n indexes[dim] = new_index\n\n return self._replace(variables, indexes=indexes)\n\n def _stack_once(self, dims, new_dim):\n if ... in dims:\n dims = list(infix_dims(dims, self.dims))\n variables = {}\n for name, var in self.variables.items():\n if name not in dims:\n if any(d in var.dims for d in dims):\n add_dims = [d for d in dims if d not in var.dims]\n vdims = list(var.dims) + add_dims\n shape = [self.dims[d] for d in vdims]\n exp_var = var.set_dims(vdims, shape)\n stacked_var = exp_var.stack(**{new_dim: dims})\n variables[name] = stacked_var\n else:\n variables[name] = var.copy(deep=False)\n\n # consider dropping levels that are unused?\n levels = [self.get_index(dim) for dim in dims]\n idx = utils.multiindex_from_product_levels(levels, names=dims)\n variables[new_dim] = IndexVariable(new_dim, idx)\n\n coord_names = set(self._coord_names) - set(dims) | {new_dim}\n\n indexes = {k: v for k, v in self.indexes.items() if k not in dims}\n indexes[new_dim] = idx\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable],\n ) -> \"Dataset\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : mapping of hashable to sequence of hashable\n Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new\n dimensions, and the existing dimensions that they replace. An\n ellipsis (`...`) will be replaced by all unlisted dimensions.\n Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over\n all dimensions.\n **dimensions_kwargs\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Dataset\n Dataset with stacked data.\n\n See Also\n --------\n Dataset.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result\n\n def to_stacked_array(\n self,\n new_dim: Hashable,\n sample_dims: Sequence[Hashable],\n variable_dim: str = \"variable\",\n name: Hashable = None,\n ) -> \"DataArray\":\n \"\"\"Combine variables of differing dimensionality into a DataArray\n without broadcasting.\n\n This method is similar to Dataset.to_array but does not broadcast the\n variables.\n\n Parameters\n ----------\n new_dim : hashable\n Name of the new stacked coordinate\n sample_dims : sequence of hashable\n Dimensions that **will not** be stacked. Each array in the dataset\n must share these dimensions. For machine learning applications,\n these define the dimensions over which samples are drawn.\n variable_dim : str, optional\n Name of the level in the stacked coordinate which corresponds to\n the variables.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n stacked : DataArray\n DataArray with the specified dimensions and data variables\n stacked together. The stacked coordinate is named ``new_dim``\n and represented by a MultiIndex object with a level containing the\n data variable names. The name of this level is controlled using\n the ``variable_dim`` argument.\n\n See Also\n --------\n Dataset.to_array\n Dataset.stack\n DataArray.to_unstacked_dataset\n\n Examples\n --------\n >>> data = xr.Dataset(\n ... data_vars={\n ... \"a\": ((\"x\", \"y\"), [[0, 1, 2], [3, 4, 5]]),\n ... \"b\": (\"x\", [6, 7]),\n ... },\n ... coords={\"y\": [\"u\", \"v\", \"w\"]},\n ... )\n\n >>> data\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) <U1 'u' 'v' 'w'\n Dimensions without coordinates: x\n Data variables:\n a (x, y) int64 0 1 2 3 4 5\n b (x) int64 6 7\n\n >>> data.to_stacked_array(\"z\", sample_dims=[\"x\"])\n <xarray.DataArray 'a' (x: 2, z: 4)>\n array([[0, 1, 2, 6],\n [3, 4, 5, 7]])\n Coordinates:\n * z (z) MultiIndex\n - variable (z) object 'a' 'a' 'a' 'b'\n - y (z) object 'u' 'v' 'w' nan\n Dimensions without coordinates: x\n\n \"\"\"\n stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)\n\n for variable in self:\n dims = self[variable].dims\n dims_include_sample_dims = set(sample_dims) <= set(dims)\n if not dims_include_sample_dims:\n raise ValueError(\n \"All variables in the dataset must contain the \"\n \"dimensions {}.\".format(dims)\n )\n\n def ensure_stackable(val):\n assign_coords = {variable_dim: val.name}\n for dim in stacking_dims:\n if dim not in val.dims:\n assign_coords[dim] = None\n\n expand_dims = set(stacking_dims).difference(set(val.dims))\n expand_dims.add(variable_dim)\n # must be list for .expand_dims\n expand_dims = list(expand_dims)\n\n return (\n val.assign_coords(**assign_coords)\n .expand_dims(expand_dims)\n .stack({new_dim: (variable_dim,) + stacking_dims})\n )\n\n # concatenate the arrays\n stackable_vars = [ensure_stackable(self[key]) for key in self.data_vars]\n data_array = xr.concat(stackable_vars, dim=new_dim)\n\n # coerce the levels of the MultiIndex to have the same type as the\n # input dimensions. This code is messy, so it might be better to just\n # input a dummy value for the singleton dimension.\n idx = data_array.indexes[new_dim]\n levels = [idx.levels[0]] + [\n level.astype(self[level.name].dtype) for level in idx.levels[1:]\n ]\n new_idx = idx.set_levels(levels)\n data_array[new_dim] = IndexVariable(new_dim, new_idx)\n\n if name is not None:\n data_array.name = name\n\n return data_array\n\n def _unstack_once(self, dim: Hashable, fill_value) -> \"Dataset\":\n index = self.get_index(dim)\n index = remove_unused_levels_categories(index)\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in self.variables.items():\n if name != dim:\n if dim in var.dims:\n if isinstance(fill_value, Mapping):\n fill_value_ = fill_value[name]\n else:\n fill_value_ = fill_value\n\n variables[name] = var._unstack_once(\n index=index, dim=dim, fill_value=fill_value_\n )\n else:\n variables[name] = var\n\n for name, lev in zip(index.names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(index.names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def _unstack_full_reindex(\n self, dim: Hashable, fill_value, sparse: bool\n ) -> \"Dataset\":\n index = self.get_index(dim)\n index = remove_unused_levels_categories(index)\n full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)\n\n # take a shortcut in case the MultiIndex was not modified.\n if index.equals(full_idx):\n obj = self\n else:\n obj = self._reindex(\n {dim: full_idx}, copy=False, fill_value=fill_value, sparse=sparse\n )\n\n new_dim_names = index.names\n new_dim_sizes = [lev.size for lev in index.levels]\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in obj.variables.items():\n if name != dim:\n if dim in var.dims:\n new_dims = dict(zip(new_dim_names, new_dim_sizes))\n variables[name] = var.unstack({dim: new_dims})\n else:\n variables[name] = var\n\n for name, lev in zip(new_dim_names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(new_dim_names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def unstack(\n self,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n fill_value: Any = dtypes.NA,\n sparse: bool = False,\n ) -> \"Dataset\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : hashable or iterable of hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n fill_value : scalar or dict-like, default: nan\n value to be filled. If a dict-like, maps variable names to\n fill values. If not provided or if the dict-like does not\n contain all variables, the dtype's NA value will be used.\n sparse : bool, default: False\n use sparse-array if True\n\n Returns\n -------\n unstacked : Dataset\n Dataset with unstacked data.\n\n See Also\n --------\n Dataset.stack\n \"\"\"\n if dim is None:\n dims = [\n d for d in self.dims if isinstance(self.get_index(d), pd.MultiIndex)\n ]\n else:\n if isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = [dim]\n else:\n dims = list(dim)\n\n missing_dims = [d for d in dims if d not in self.dims]\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n non_multi_dims = [\n d for d in dims if not isinstance(self.get_index(d), pd.MultiIndex)\n ]\n if non_multi_dims:\n raise ValueError(\n \"cannot unstack dimensions that do not \"\n \"have a MultiIndex: %s\" % non_multi_dims\n )\n\n result = self.copy(deep=False)\n for dim in dims:\n\n if (\n # Dask arrays don't support assignment by index, which the fast unstack\n # function requires.\n # https://github.com/pydata/xarray/pull/4746#issuecomment-753282125\n any(is_duck_dask_array(v.data) for v in self.variables.values())\n # Sparse doesn't currently support (though we could special-case\n # it)\n # https://github.com/pydata/sparse/issues/422\n or any(\n isinstance(v.data, sparse_array_type)\n for v in self.variables.values()\n )\n or sparse\n # numpy full_like only added `shape` in 1.17\n or LooseVersion(np.__version__) < LooseVersion(\"1.17\")\n # Until https://github.com/pydata/xarray/pull/4751 is resolved,\n # we check explicitly whether it's a numpy array. Once that is\n # resolved, explicitly exclude pint arrays.\n # # pint doesn't implement `np.full_like` in a way that's\n # # currently compatible.\n # # https://github.com/pydata/xarray/pull/4746#issuecomment-753425173\n # # or any(\n # # isinstance(v.data, pint_array_type) for v in self.variables.values()\n # # )\n or any(\n not isinstance(v.data, np.ndarray) for v in self.variables.values()\n )\n ):\n result = result._unstack_full_reindex(dim, fill_value, sparse)\n else:\n result = result._unstack_once(dim, fill_value)\n return result\n\n def update(self, other: \"CoercibleMapping\") -> \"Dataset\":\n \"\"\"Update this dataset's variables with those from another dataset.\n\n Just like :py:meth:`dict.update` this is a in-place operation.\n\n Parameters\n ----------\n other : Dataset or mapping\n Variables with which to update this dataset. One of:\n\n - Dataset\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n\n Returns\n -------\n updated : Dataset\n Updated dataset. Note that since the update is in-place this is the input\n dataset.\n\n It is deprecated since version 0.17 and scheduled to be removed in 0.19.\n\n Raises\n ------\n ValueError\n If any dimensions would have inconsistent sizes in the updated\n dataset.\n\n See Also\n --------\n Dataset.assign\n \"\"\"\n merge_result = dataset_update_method(self, other)\n return self._replace(inplace=True, **merge_result._asdict())\n\n def merge(\n self,\n other: Union[\"CoercibleMapping\", \"DataArray\"],\n overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: Any = dtypes.NA,\n combine_attrs: str = \"override\",\n ) -> \"Dataset\":\n \"\"\"Merge the arrays of two datasets into a single dataset.\n\n This method generally does not allow for overriding data, with the\n exception of attributes, which are ignored on the second dataset.\n Variables with the same name are checked for conflicts via the equals\n or identical methods.\n\n Parameters\n ----------\n other : Dataset or mapping\n Dataset or variables to merge with this dataset.\n overwrite_vars : hashable or iterable of hashable, optional\n If provided, update variables of these name(s) without checking for\n conflicts in this dataset.\n compat : {\"broadcast_equals\", \"equals\", \"identical\", \\\n \"no_conflicts\"}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n join : {\"outer\", \"inner\", \"left\", \"right\", \"exact\"}, optional\n Method for joining ``self`` and ``other`` along shared dimensions:\n\n - 'outer': use the union of the indexes\n - 'inner': use the intersection of the indexes\n - 'left': use indexes from ``self``\n - 'right': use indexes from ``other``\n - 'exact': error instead of aligning non-equal indexes\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like, maps\n variable names (including coordinates) to fill values.\n combine_attrs : {\"drop\", \"identical\", \"no_conflicts\", \"drop_conflicts\", \\\n \"override\"}, default: \"override\"\n String indicating how to combine attrs of the objects being merged:\n\n - \"drop\": empty attrs on returned Dataset.\n - \"identical\": all attrs must be the same on every object.\n - \"no_conflicts\": attrs from all objects are combined, any that have\n the same name must also have the same value.\n - \"drop_conflicts\": attrs from all objects are combined, any that have\n the same name but different values are dropped.\n - \"override\": skip comparing and copy attrs from the first dataset to\n the result.\n\n Returns\n -------\n merged : Dataset\n Merged dataset.\n\n Raises\n ------\n MergeError\n If any variables conflict (see ``compat``).\n \"\"\"\n other = other.to_dataset() if isinstance(other, xr.DataArray) else other\n merge_result = dataset_merge_method(\n self,\n other,\n overwrite_vars=overwrite_vars,\n compat=compat,\n join=join,\n fill_value=fill_value,\n combine_attrs=combine_attrs,\n )\n return self._replace(**merge_result._asdict())\n\n def _assert_all_in_dataset(\n self, names: Iterable[Hashable], virtual_okay: bool = False\n ) -> None:\n bad_names = set(names) - set(self._variables)\n if virtual_okay:\n bad_names -= self.virtual_variables\n if bad_names:\n raise ValueError(\n \"One or more of the specified variables \"\n \"cannot be found in this dataset\"\n )\n\n def drop_vars(\n self, names: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop variables from this dataset.\n\n Parameters\n ----------\n names : hashable or iterable of hashable\n Name(s) of variables to drop.\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if any of the variable\n passed are not in the dataset. If 'ignore', any given names that are in the\n dataset are dropped and no error is raised.\n\n Returns\n -------\n dropped : Dataset\n\n \"\"\"\n # the Iterable check is required for mypy\n if is_scalar(names) or not isinstance(names, Iterable):\n names = {names}\n else:\n names = set(names)\n if errors == \"raise\":\n self._assert_all_in_dataset(names)\n\n variables = {k: v for k, v in self._variables.items() if k not in names}\n coord_names = {k for k in self._coord_names if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k not in names}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def drop(self, labels=None, dim=None, *, errors=\"raise\", **labels_kwargs):\n \"\"\"Backward compatible method based on `drop_vars` and `drop_sel`\n\n Using either `drop_vars` or `drop_sel` is encouraged\n\n See Also\n --------\n Dataset.drop_vars\n Dataset.drop_sel\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if is_dict_like(labels) and not isinstance(labels, dict):\n warnings.warn(\n \"dropping coordinates using `drop` is be deprecated; use drop_vars.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.drop_vars(labels, errors=errors)\n\n if labels_kwargs or isinstance(labels, dict):\n if dim is not None:\n raise ValueError(\"cannot specify dim and dict-like arguments.\")\n labels = either_dict_or_kwargs(labels, labels_kwargs, \"drop\")\n\n if dim is None and (is_scalar(labels) or isinstance(labels, Iterable)):\n warnings.warn(\n \"dropping variables using `drop` will be deprecated; using drop_vars is encouraged.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.drop_vars(labels, errors=errors)\n if dim is not None:\n warnings.warn(\n \"dropping labels using list-like labels is deprecated; using \"\n \"dict-like arguments with `drop_sel`, e.g. `ds.drop_sel(dim=[labels]).\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.drop_sel({dim: labels}, errors=errors, **labels_kwargs)\n\n warnings.warn(\n \"dropping labels using `drop` will be deprecated; using drop_sel is encouraged.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.drop_sel(labels, errors=errors)\n\n def drop_sel(self, labels=None, *, errors=\"raise\", **labels_kwargs):\n \"\"\"Drop index labels from this dataset.\n\n Parameters\n ----------\n labels : mapping of hashable to Any\n Index labels to drop\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if\n any of the index labels passed are not\n in the dataset. If 'ignore', any given labels that are in the\n dataset are dropped and no error is raised.\n **labels_kwargs : {dim: label, ...}, optional\n The keyword arguments form of ``dim`` and ``labels``\n\n Returns\n -------\n dropped : Dataset\n\n Examples\n --------\n >>> data = np.arange(6).reshape(2, 3)\n >>> labels = [\"a\", \"b\", \"c\"]\n >>> ds = xr.Dataset({\"A\": ([\"x\", \"y\"], data), \"y\": labels})\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) <U1 'a' 'b' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 1 2 3 4 5\n >>> ds.drop_sel(y=[\"a\", \"c\"])\n <xarray.Dataset>\n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) <U1 'b'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 1 4\n >>> ds.drop_sel(y=\"b\")\n <xarray.Dataset>\n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) <U1 'a' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 2 3 5\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n labels = either_dict_or_kwargs(labels, labels_kwargs, \"drop_sel\")\n\n ds = self\n for dim, labels_for_dim in labels.items():\n # Don't cast to set, as it would harm performance when labels\n # is a large numpy array\n if utils.is_scalar(labels_for_dim):\n labels_for_dim = [labels_for_dim]\n labels_for_dim = np.asarray(labels_for_dim)\n try:\n index = self.get_index(dim)\n except KeyError:\n raise ValueError(\"dimension %r does not have coordinate labels\" % dim)\n new_index = index.drop(labels_for_dim, errors=errors)\n ds = ds.loc[{dim: new_index}]\n return ds\n\n def drop_isel(self, indexers=None, **indexers_kwargs):\n \"\"\"Drop index positions from this Dataset.\n\n Parameters\n ----------\n indexers : mapping of hashable to Any\n Index locations to drop\n **indexers_kwargs : {dim: position, ...}, optional\n The keyword arguments form of ``dim`` and ``positions``\n\n Returns\n -------\n dropped : Dataset\n\n Raises\n ------\n IndexError\n\n Examples\n --------\n >>> data = np.arange(6).reshape(2, 3)\n >>> labels = [\"a\", \"b\", \"c\"]\n >>> ds = xr.Dataset({\"A\": ([\"x\", \"y\"], data), \"y\": labels})\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) <U1 'a' 'b' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 1 2 3 4 5\n >>> ds.drop_isel(y=[0, 2])\n <xarray.Dataset>\n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) <U1 'b'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 1 4\n >>> ds.drop_isel(y=1)\n <xarray.Dataset>\n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) <U1 'a' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 2 3 5\n \"\"\"\n\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"drop_isel\")\n\n ds = self\n dimension_index = {}\n for dim, pos_for_dim in indexers.items():\n # Don't cast to set, as it would harm performance when labels\n # is a large numpy array\n if utils.is_scalar(pos_for_dim):\n pos_for_dim = [pos_for_dim]\n pos_for_dim = np.asarray(pos_for_dim)\n index = self.get_index(dim)\n new_index = index.delete(pos_for_dim)\n dimension_index[dim] = new_index\n ds = ds.loc[dimension_index]\n return ds\n\n def drop_dims(\n self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop dimensions and associated variables from this dataset.\n\n Parameters\n ----------\n drop_dims : hashable or iterable of hashable\n Dimension or dimensions to drop.\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if any of the\n dimensions passed are not in the dataset. If 'ignore', any given\n labels that are in the dataset are dropped and no error is raised.\n\n Returns\n -------\n obj : Dataset\n The dataset without the given dimensions (or any variables\n containing those dimensions)\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if\n any of the dimensions passed are not\n in the dataset. If 'ignore', any given dimensions that are in the\n dataset are dropped and no error is raised.\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):\n drop_dims = {drop_dims}\n else:\n drop_dims = set(drop_dims)\n\n if errors == \"raise\":\n missing_dims = drop_dims - set(self.dims)\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}\n return self.drop_vars(drop_vars)\n\n def transpose(self, *dims: Hashable) -> \"Dataset\":\n \"\"\"Return a new Dataset object with all array dimensions transposed.\n\n Although the order of dimensions on each array will change, the dataset\n dimensions themselves will remain in fixed (sorted) order.\n\n Parameters\n ----------\n *dims : hashable, optional\n By default, reverse the dimensions on each array. Otherwise,\n reorder the dimensions to this order.\n\n Returns\n -------\n transposed : Dataset\n Each array in the dataset (including) coordinates will be\n transposed to the given order.\n\n Notes\n -----\n This operation returns a view of each array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded into memory.\n\n See Also\n --------\n numpy.transpose\n DataArray.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims) and ... not in dims:\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted dataset dimensions (%s)\" % (dims, tuple(self.dims))\n )\n ds = self.copy()\n for name, var in self._variables.items():\n var_dims = tuple(dim for dim in dims if dim in (var.dims + (...,)))\n ds._variables[name] = var.transpose(*var_dims)\n return ds\n\n def dropna(\n self,\n dim: Hashable,\n how: str = \"any\",\n thresh: int = None,\n subset: Iterable[Hashable] = None,\n ):\n \"\"\"Returns a new dataset with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {\"any\", \"all\"}, default: \"any\"\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default: None\n If supplied, require this many non-NA values.\n subset : iterable of hashable, optional\n Which variables to check for missing values. By default, all\n variables in the dataset are checked.\n\n Returns\n -------\n Dataset\n \"\"\"\n # TODO: consider supporting multiple dimensions? Or not, given that\n # there are some ugly edge cases, e.g., pandas's dropna differs\n # depending on the order of the supplied axes.\n\n if dim not in self.dims:\n raise ValueError(\"%s must be a single dataset dimension\" % dim)\n\n if subset is None:\n subset = iter(self.data_vars)\n\n count = np.zeros(self.dims[dim], dtype=np.int64)\n size = np.int_(0) # for type checking\n\n for k in subset:\n array = self._variables[k]\n if dim in array.dims:\n dims = [d for d in array.dims if d != dim]\n count += np.asarray(array.count(dims)) # type: ignore[attr-defined]\n size += np.prod([self.dims[d] for d in dims])\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == size\n elif how == \"all\":\n mask = count > 0\n elif how is not None:\n raise ValueError(\"invalid how option: %s\" % how)\n else:\n raise TypeError(\"must specify how or thresh\")\n\n return self.isel({dim: mask})\n\n def fillna(self, value: Any) -> \"Dataset\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray, DataArray, dict or Dataset\n Used to fill all matching missing values in this dataset's data\n variables. Scalars, ndarrays or DataArrays arguments are used to\n fill all data with aligned coordinates (for DataArrays).\n Dictionaries or datasets match data variables and then align\n coordinates if necessary.\n\n Returns\n -------\n Dataset\n\n Examples\n --------\n >>> import numpy as np\n >>> import xarray as xr\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, 4]),\n ... },\n ... coords={\"x\": [0, 1, 2, 3]},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 nan 2.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0\n C (x) float64 nan nan nan 5.0\n D (x) float64 nan 3.0 nan 4.0\n\n Replace all `NaN` values with 0s.\n\n >>> ds.fillna(0)\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 0.0 1.0\n C (x) float64 0.0 0.0 0.0 5.0\n D (x) float64 0.0 3.0 0.0 4.0\n\n Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.\n\n >>> values = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n >>> ds.fillna(value=values)\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 1.0 1.0\n C (x) float64 2.0 2.0 2.0 5.0\n D (x) float64 3.0 3.0 3.0 4.0\n \"\"\"\n if utils.is_dict_like(value):\n value_keys = getattr(value, \"data_vars\", value).keys()\n if not set(value_keys) <= set(self.data_vars.keys()):\n raise ValueError(\n \"all variables in the argument to `fillna` \"\n \"must be contained in the original dataset\"\n )\n out = ops.fillna(self, value)\n return out\n\n def interpolate_na(\n self,\n dim: Hashable = None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, Hashable] = True,\n max_gap: Union[\n int, float, str, pd.Timedelta, np.timedelta64, datetime.timedelta\n ] = None,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Fill in NaNs by interpolating according to different methods.\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to interpolate.\n method : str, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to :py:func:`numpy.interp`\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial':\n are passed to :py:func:`scipy.interpolate.interp1d`. If\n ``method='polynomial'``, the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their\n respective :py:class:`scipy.interpolate` classes.\n use_coordinate : bool, str, default: True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is\n used. If ``use_coordinate`` is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default: None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit. This filling is done regardless of the size of\n the gap in the data. To only interpolate over gaps less than a given length,\n see ``max_gap``.\n max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None\n Maximum size of gap, a continuous sequence of NaNs, that will be filled.\n Use None for no limit. When interpolating along a datetime64 dimension\n and ``use_coordinate=True``, ``max_gap`` can be one of the following:\n\n - a string that is valid input for pandas.to_timedelta\n - a :py:class:`numpy.timedelta64` object\n - a :py:class:`pandas.Timedelta` object\n - a :py:class:`datetime.timedelta` object\n\n Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled\n dimensions has not been implemented yet. Gap length is defined as the difference\n between coordinate values at the first data point after a gap and the last value\n before a gap. For gaps at the beginning (end), gap length is defined as the difference\n between coordinate values at the first (last) valid data point and the first (last) NaN.\n For example, consider::\n\n <xarray.DataArray (x: 9)>\n array([nan, nan, nan, 1., nan, nan, 4., nan, nan])\n Coordinates:\n * x (x) int64 0 1 2 3 4 5 6 7 8\n\n The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively\n kwargs : dict, optional\n parameters passed verbatim to the underlying interpolation function\n\n Returns\n -------\n interpolated: Dataset\n Filled in Dataset.\n\n See Also\n --------\n numpy.interp\n scipy.interpolate\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, 3, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1, 7]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5, 0]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, -1, 4]),\n ... },\n ... coords={\"x\": [0, 1, 2, 3, 4]},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n A (x) float64 nan 2.0 3.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0 7.0\n C (x) float64 nan nan nan 5.0 0.0\n D (x) float64 nan 3.0 nan -1.0 4.0\n\n >>> ds.interpolate_na(dim=\"x\", method=\"linear\")\n <xarray.Dataset>\n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n A (x) float64 nan 2.0 3.0 1.5 0.0\n B (x) float64 3.0 4.0 2.5 1.0 7.0\n C (x) float64 nan nan nan 5.0 0.0\n D (x) float64 nan 3.0 1.0 -1.0 4.0\n\n >>> ds.interpolate_na(dim=\"x\", method=\"linear\", fill_value=\"extrapolate\")\n <xarray.Dataset>\n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n A (x) float64 1.0 2.0 3.0 1.5 0.0\n B (x) float64 3.0 4.0 2.5 1.0 7.0\n C (x) float64 20.0 15.0 10.0 5.0 0.0\n D (x) float64 5.0 3.0 1.0 -1.0 4.0\n \"\"\"\n from .missing import _apply_over_vars_with_dim, interp_na\n\n new = _apply_over_vars_with_dim(\n interp_na,\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n max_gap=max_gap,\n **kwargs,\n )\n return new\n\n def ffill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default: None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import _apply_over_vars_with_dim, ffill\n\n new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)\n return new\n\n def bfill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default: None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import _apply_over_vars_with_dim, bfill\n\n new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)\n return new\n\n def combine_first(self, other: \"Dataset\") -> \"Dataset\":\n \"\"\"Combine two Datasets, default to data_vars of self.\n\n The new coordinates follow the normal broadcasting and alignment rules\n of ``join='outer'``. Vacant cells in the expanded coordinates are\n filled with np.nan.\n\n Parameters\n ----------\n other : Dataset\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n Dataset\n \"\"\"\n out = ops.fillna(self, other, join=\"outer\", dataset_join=\"outer\")\n return out\n\n def reduce(\n self,\n func: Callable,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n numeric_only: bool = False,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Reduce this dataset by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`. By default `func` is\n applied over all dimensions.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default: False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n **kwargs : Any\n Additional keyword arguments passed on to ``func``.\n\n Returns\n -------\n reduced : Dataset\n Dataset with this object's DataArrays replaced with new DataArrays\n of summarized data and the indicated dimension(s) removed.\n \"\"\"\n if \"axis\" in kwargs:\n raise ValueError(\n \"passing 'axis' to Dataset reduce methods is ambiguous.\"\n \" Please use 'dim' instead.\"\n )\n\n if dim is None or dim is ...:\n dims = set(self.dims)\n elif isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = {dim}\n else:\n dims = set(dim)\n\n missing_dimensions = [d for d in dims if d not in self.dims]\n if missing_dimensions:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dimensions\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in self._variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if name in self.coords:\n if not reduce_dims:\n variables[name] = var\n else:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or (var.dtype == np.bool_)\n ):\n if len(reduce_dims) == 1:\n # unpack dimensions for the benefit of functions\n # like np.argmin which can't handle tuple arguments\n (reduce_dims,) = reduce_dims\n elif len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None # type: ignore[assignment]\n variables[name] = var.reduce(\n func,\n dim=reduce_dims,\n keep_attrs=keep_attrs,\n keepdims=keepdims,\n **kwargs,\n )\n\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n attrs = self.attrs if keep_attrs else None\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n\n def map(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Apply a function to each variable in this dataset\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form `func(x, *args, **kwargs)`\n to transform each DataArray `x` in this dataset into another\n DataArray.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False, the new object will\n be returned without attributes.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs : Any\n Keyword arguments passed on to `func`.\n\n Returns\n -------\n applied : Dataset\n Resulting dataset from applying ``func`` to each data variable.\n\n Examples\n --------\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({\"foo\": da, \"bar\": (\"x\", [-1, 2])})\n >>> ds\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n >>> ds.map(np.fabs)\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 0.9773\n bar (x) float64 1.0 2.0\n \"\"\"\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n variables = {\n k: maybe_wrap_array(v, func(v, *args, **kwargs))\n for k, v in self.data_vars.items()\n }\n if keep_attrs:\n for k, v in variables.items():\n v._copy_attrs_from(self.data_vars[k])\n attrs = self.attrs if keep_attrs else None\n return type(self)(variables, attrs=attrs)\n\n def apply(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"\n Backward compatible implementation of ``map``\n\n See Also\n --------\n Dataset.map\n \"\"\"\n warnings.warn(\n \"Dataset.apply may be deprecated in the future. Using Dataset.map is encouraged\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.map(func, keep_attrs, args, **kwargs)\n\n def assign(\n self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable\n ) -> \"Dataset\":\n \"\"\"Assign new data variables to a Dataset, returning a new object\n with all the original variables in addition to the new ones.\n\n Parameters\n ----------\n variables : mapping of hashable to Any\n Mapping from variables names to the new values. If the new values\n are callable, they are computed on the Dataset and assigned to new\n data variables. If the values are not callable, (e.g. a DataArray,\n scalar, or array), they are simply assigned.\n **variables_kwargs\n The keyword arguments form of ``variables``.\n One of variables or variables_kwargs must be provided.\n\n Returns\n -------\n ds : Dataset\n A new Dataset with the new variables in addition to all the\n existing variables.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your arguments may not\n be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign`` is\n possible, but you cannot reference other variables created within the\n same ``assign`` call.\n\n See Also\n --------\n pandas.DataFrame.assign\n\n Examples\n --------\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": (\n ... (\"lat\", \"lon\"),\n ... 20 * np.random.rand(4).reshape(2, 2),\n ... ),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n\n Where the value is a callable, evaluated on dataset:\n\n >>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62\n\n Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:\n\n >>> x.assign(temperature_f=x[\"temperature_c\"] * 9 / 5 + 32)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62\n\n \"\"\"\n variables = either_dict_or_kwargs(variables, variables_kwargs, \"assign\")\n data = self.copy()\n # do all calculations first...\n results = data._calc_assign_results(variables)\n # ... and then assign\n data.update(results)\n return data\n\n def to_array(self, dim=\"variable\", name=None):\n \"\"\"Convert this dataset into an xarray.DataArray\n\n The data variables of this dataset will be broadcast against each other\n and stacked along the first axis of the new array. All coordinates of\n this dataset will remain coordinates.\n\n Parameters\n ----------\n dim : str, optional\n Name of the new dimension.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n array : xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n\n data_vars = [self.variables[k] for k in self.data_vars]\n broadcast_vars = broadcast_variables(*data_vars)\n data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)\n\n coords = dict(self.coords)\n coords[dim] = list(self.data_vars)\n indexes = propagate_indexes(self._indexes)\n\n dims = (dim,) + broadcast_vars[0].dims\n\n return DataArray(\n data, coords, dims, attrs=self.attrs, name=name, indexes=indexes\n )\n\n def _normalize_dim_order(\n self, dim_order: List[Hashable] = None\n ) -> Dict[Hashable, int]:\n \"\"\"\n Check the validity of the provided dimensions if any and return the mapping\n between dimension name and their size.\n\n Parameters\n ----------\n dim_order\n Dimension order to validate (default to the alphabetical order if None).\n\n Returns\n -------\n result\n Validated dimensions mapping.\n\n \"\"\"\n if dim_order is None:\n dim_order = list(self.dims)\n elif set(dim_order) != set(self.dims):\n raise ValueError(\n \"dim_order {} does not match the set of dimensions of this \"\n \"Dataset: {}\".format(dim_order, list(self.dims))\n )\n\n ordered_dims = {k: self.dims[k] for k in dim_order}\n\n return ordered_dims\n\n def _to_dataframe(self, ordered_dims: Mapping[Hashable, int]):\n columns = [k for k in self.variables if k not in self.dims]\n data = [\n self._variables[k].set_dims(ordered_dims).values.reshape(-1)\n for k in columns\n ]\n index = self.coords.to_index([*ordered_dims])\n return pd.DataFrame(dict(zip(columns, data)), index=index)\n\n def to_dataframe(self, dim_order: List[Hashable] = None) -> pd.DataFrame:\n \"\"\"Convert this dataset into a pandas.DataFrame.\n\n Non-index variables in this dataset form the columns of the\n DataFrame. The DataFrame is indexed by the Cartesian product of\n this dataset's indices.\n\n Parameters\n ----------\n dim_order\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting\n dataframe.\n\n If provided, must include all dimensions of this dataset. By\n default, dimensions are sorted alphabetically.\n\n Returns\n -------\n result\n Dataset as a pandas DataFrame.\n\n \"\"\"\n\n ordered_dims = self._normalize_dim_order(dim_order=dim_order)\n\n return self._to_dataframe(ordered_dims=ordered_dims)\n\n def _set_sparse_data_from_dataframe(\n self, idx: pd.Index, arrays: List[Tuple[Hashable, np.ndarray]], dims: tuple\n ) -> None:\n from sparse import COO\n\n if isinstance(idx, pd.MultiIndex):\n coords = np.stack([np.asarray(code) for code in idx.codes], axis=0)\n is_sorted = idx.is_lexsorted()\n shape = tuple(lev.size for lev in idx.levels)\n else:\n coords = np.arange(idx.size).reshape(1, -1)\n is_sorted = True\n shape = (idx.size,)\n\n for name, values in arrays:\n # In virtually all real use cases, the sparse array will now have\n # missing values and needs a fill_value. For consistency, don't\n # special case the rare exceptions (e.g., dtype=int without a\n # MultiIndex).\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n values = np.asarray(values, dtype=dtype)\n\n data = COO(\n coords,\n values,\n shape,\n has_duplicates=False,\n sorted=is_sorted,\n fill_value=fill_value,\n )\n self[name] = (dims, data)\n\n def _set_numpy_data_from_dataframe(\n self, idx: pd.Index, arrays: List[Tuple[Hashable, np.ndarray]], dims: tuple\n ) -> None:\n if not isinstance(idx, pd.MultiIndex):\n for name, values in arrays:\n self[name] = (dims, values)\n return\n\n # NB: similar, more general logic, now exists in\n # variable.unstack_once; we could consider combining them at some\n # point.\n\n shape = tuple(lev.size for lev in idx.levels)\n indexer = tuple(idx.codes)\n\n # We already verified that the MultiIndex has all unique values, so\n # there are missing values if and only if the size of output arrays is\n # larger that the index.\n missing_values = np.prod(shape) > idx.shape[0]\n\n for name, values in arrays:\n # NumPy indexing is much faster than using DataFrame.reindex() to\n # fill in missing values:\n # https://stackoverflow.com/a/35049899/809705\n if missing_values:\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n data = np.full(shape, fill_value, dtype)\n else:\n # If there are no missing values, keep the existing dtype\n # instead of promoting to support NA, e.g., keep integer\n # columns as integers.\n # TODO: consider removing this special case, which doesn't\n # exist for sparse=True.\n data = np.zeros(shape, values.dtype)\n data[indexer] = values\n self[name] = (dims, data)\n\n @classmethod\n def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> \"Dataset\":\n \"\"\"Convert a pandas.DataFrame into an xarray.Dataset\n\n Each column will be converted into an independent variable in the\n Dataset. If the dataframe's index is a MultiIndex, it will be expanded\n into a tensor product of one-dimensional indices (filling in missing\n values with NaN). This method will produce a Dataset very similar to\n that on which the 'to_dataframe' method was called, except with\n possibly redundant dimensions (since all dataset variables will have\n the same dimensionality)\n\n Parameters\n ----------\n dataframe : DataFrame\n DataFrame from which to copy data and indices.\n sparse : bool, default: False\n If true, create a sparse arrays instead of dense numpy arrays. This\n can potentially save a large amount of memory if the DataFrame has\n a MultiIndex. Requires the sparse package (sparse.pydata.org).\n\n Returns\n -------\n New Dataset.\n\n See Also\n --------\n xarray.DataArray.from_series\n pandas.DataFrame.to_xarray\n \"\"\"\n # TODO: Add an option to remove dimensions along which the variables\n # are constant, to enable consistent serialization to/from a dataframe,\n # even if some variables have different dimensionality.\n\n if not dataframe.columns.is_unique:\n raise ValueError(\"cannot convert DataFrame with non-unique columns\")\n\n idx = remove_unused_levels_categories(dataframe.index)\n\n if isinstance(idx, pd.MultiIndex) and not idx.is_unique:\n raise ValueError(\n \"cannot convert a DataFrame with a non-unique MultiIndex into xarray\"\n )\n\n # Cast to a NumPy array first, in case the Series is a pandas Extension\n # array (which doesn't have a valid NumPy dtype)\n # TODO: allow users to control how this casting happens, e.g., by\n # forwarding arguments to pandas.Series.to_numpy?\n arrays = [(k, np.asarray(v)) for k, v in dataframe.items()]\n\n obj = cls()\n\n if isinstance(idx, pd.MultiIndex):\n dims = tuple(\n name if name is not None else \"level_%i\" % n\n for n, name in enumerate(idx.names)\n )\n for dim, lev in zip(dims, idx.levels):\n obj[dim] = (dim, lev)\n else:\n index_name = idx.name if idx.name is not None else \"index\"\n dims = (index_name,)\n obj[index_name] = (dims, idx)\n\n if sparse:\n obj._set_sparse_data_from_dataframe(idx, arrays, dims)\n else:\n obj._set_numpy_data_from_dataframe(idx, arrays, dims)\n return obj\n\n def to_dask_dataframe(self, dim_order=None, set_index=False):\n \"\"\"\n Convert this dataset into a dask.dataframe.DataFrame.\n\n The dimensions, coordinates and data variables in this dataset form\n the columns of the DataFrame.\n\n Parameters\n ----------\n dim_order : list, optional\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting dask\n dataframe.\n\n If provided, must include all dimensions of this dataset. By\n default, dimensions are sorted alphabetically.\n set_index : bool, optional\n If set_index=True, the dask DataFrame is indexed by this dataset's\n coordinate. Since dask DataFrames do not support multi-indexes,\n set_index only works if the dataset only contains one dimension.\n\n Returns\n -------\n dask.dataframe.DataFrame\n \"\"\"\n\n import dask.array as da\n import dask.dataframe as dd\n\n ordered_dims = self._normalize_dim_order(dim_order=dim_order)\n\n columns = list(ordered_dims)\n columns.extend(k for k in self.coords if k not in self.dims)\n columns.extend(self.data_vars)\n\n series_list = []\n for name in columns:\n try:\n var = self.variables[name]\n except KeyError:\n # dimension without a matching coordinate\n size = self.dims[name]\n data = da.arange(size, chunks=size, dtype=np.int64)\n var = Variable((name,), data)\n\n # IndexVariable objects have a dummy .chunk() method\n if isinstance(var, IndexVariable):\n var = var.to_base_variable()\n\n dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data\n series = dd.from_array(dask_array.reshape(-1), columns=[name])\n series_list.append(series)\n\n df = dd.concat(series_list, axis=1)\n\n if set_index:\n dim_order = [*ordered_dims]\n\n if len(dim_order) == 1:\n (dim,) = dim_order\n df = df.set_index(dim)\n else:\n # triggers an error about multi-indexes, even if only one\n # dimension is passed\n df = df.set_index(dim_order)\n\n return df\n\n def to_dict(self, data=True):\n \"\"\"\n Convert this dataset to a dictionary following xarray naming\n conventions.\n\n Converts all variables and attributes to native Python objects\n Useful for converting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See Also\n --------\n Dataset.from_dict\n \"\"\"\n d = {\n \"coords\": {},\n \"attrs\": decode_numpy_dict_values(self.attrs),\n \"dims\": dict(self.dims),\n \"data_vars\": {},\n }\n for k in self.coords:\n d[\"coords\"].update({k: self[k].variable.to_dict(data=data)})\n for k in self.data_vars:\n d[\"data_vars\"].update({k: self[k].variable.to_dict(data=data)})\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Convert a dictionary into an xarray.Dataset.\n\n Input dict can take several forms:\n\n .. code:: python\n\n d = {\n \"t\": {\"dims\": (\"t\"), \"data\": t},\n \"a\": {\"dims\": (\"t\"), \"data\": x},\n \"b\": {\"dims\": (\"t\"), \"data\": y},\n }\n\n d = {\n \"coords\": {\"t\": {\"dims\": \"t\", \"data\": t, \"attrs\": {\"units\": \"s\"}}},\n \"attrs\": {\"title\": \"air temperature\"},\n \"dims\": \"t\",\n \"data_vars\": {\n \"a\": {\"dims\": \"t\", \"data\": x},\n \"b\": {\"dims\": \"t\", \"data\": y},\n },\n }\n\n where \"t\" is the name of the dimesion, \"a\" and \"b\" are names of data\n variables and t, x, and y are lists, numpy.arrays or pandas objects.\n\n Parameters\n ----------\n d : dict-like\n Mapping with a minimum structure of\n ``{\"var_0\": {\"dims\": [..], \"data\": [..]}, \\\n ...}``\n\n Returns\n -------\n obj : xarray.Dataset\n\n See also\n --------\n Dataset.to_dict\n DataArray.from_dict\n \"\"\"\n\n if not {\"coords\", \"data_vars\"}.issubset(set(d)):\n variables = d.items()\n else:\n import itertools\n\n variables = itertools.chain(\n d.get(\"coords\", {}).items(), d.get(\"data_vars\", {}).items()\n )\n try:\n variable_dict = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\")) for k, v in variables\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict without the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n obj = cls(variable_dict)\n\n # what if coords aren't dims?\n coords = set(d.get(\"coords\", {})) - set(d.get(\"dims\", {}))\n obj = obj.set_coords(coords)\n\n obj.attrs.update(d.get(\"attrs\", {}))\n\n return obj\n\n @staticmethod\n def _unary_op(f):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n variables = {}\n keep_attrs = kwargs.pop(\"keep_attrs\", None)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=True)\n for k, v in self._variables.items():\n if k in self._coord_names:\n variables[k] = v\n else:\n variables[k] = f(v, *args, **kwargs)\n if keep_attrs:\n variables[k].attrs = v._attrs\n attrs = self._attrs if keep_attrs else None\n return self._replace_with_new_dims(variables, attrs=attrs)\n\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False, join=None):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n return NotImplemented\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n if isinstance(other, (DataArray, Dataset)):\n self, other = align(self, other, join=align_type, copy=False)\n g = f if not reflexive else lambda x, y: f(y, x)\n ds = self._calculate_binary_op(g, other, join=align_type)\n return ds\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a Dataset and \"\n \"a grouped object are not permitted\"\n )\n # we don't actually modify arrays in-place with in-place Dataset\n # arithmetic -- this lets us automatically align things\n if isinstance(other, (DataArray, Dataset)):\n other = other.reindex_like(self, copy=False)\n g = ops.inplace_to_noninplace_op(f)\n ds = self._calculate_binary_op(g, other, inplace=True)\n self._replace_with_new_dims(\n ds._variables,\n ds._coord_names,\n attrs=ds._attrs,\n indexes=ds._indexes,\n inplace=True,\n )\n return self\n\n return func\n\n def _calculate_binary_op(self, f, other, join=\"inner\", inplace=False):\n def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):\n if inplace and set(lhs_data_vars) != set(rhs_data_vars):\n raise ValueError(\n \"datasets must have the same data variables \"\n \"for in-place arithmetic operations: %s, %s\"\n % (list(lhs_data_vars), list(rhs_data_vars))\n )\n\n dest_vars = {}\n\n for k in lhs_data_vars:\n if k in rhs_data_vars:\n dest_vars[k] = f(lhs_vars[k], rhs_vars[k])\n elif join in [\"left\", \"outer\"]:\n dest_vars[k] = f(lhs_vars[k], np.nan)\n for k in rhs_data_vars:\n if k not in dest_vars and join in [\"right\", \"outer\"]:\n dest_vars[k] = f(rhs_vars[k], np.nan)\n return dest_vars\n\n if utils.is_dict_like(other) and not isinstance(other, Dataset):\n # can't use our shortcut of doing the binary operation with\n # Variable objects, so apply over our data vars instead.\n new_data_vars = apply_over_both(\n self.data_vars, other, self.data_vars, other\n )\n return Dataset(new_data_vars)\n\n other_coords = getattr(other, \"coords\", None)\n ds = self.coords.merge(other_coords)\n\n if isinstance(other, Dataset):\n new_vars = apply_over_both(\n self.data_vars, other.data_vars, self.variables, other.variables\n )\n else:\n other_variable = getattr(other, \"variable\", other)\n new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}\n ds._variables.update(new_vars)\n ds._dims = calculate_dimensions(ds._variables)\n return ds\n\n def _copy_attrs_from(self, other):\n self.attrs = other.attrs\n for v in other.variables:\n if v in self.variables:\n self.variables[v].attrs = other.variables[v].attrs\n\n def diff(self, dim, n=1, label=\"upper\"):\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : str\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : str, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n .. note::\n `n` matches numpy's behavior and is different from pandas' first\n argument named `periods`.\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", [5, 5, 6, 6])})\n >>> ds.diff(\"x\")\n <xarray.Dataset>\n Dimensions: (x: 3)\n Dimensions without coordinates: x\n Data variables:\n foo (x) int64 0 1 0\n >>> ds.diff(\"x\", 2)\n <xarray.Dataset>\n Dimensions: (x: 2)\n Dimensions without coordinates: x\n Data variables:\n foo (x) int64 1 -1\n\n See Also\n --------\n Dataset.differentiate\n \"\"\"\n if n == 0:\n return self\n if n < 0:\n raise ValueError(f\"order `n` must be non-negative but got {n}\")\n\n # prepare slices\n kwargs_start = {dim: slice(None, -1)}\n kwargs_end = {dim: slice(1, None)}\n\n # prepare new coordinate\n if label == \"upper\":\n kwargs_new = kwargs_end\n elif label == \"lower\":\n kwargs_new = kwargs_start\n else:\n raise ValueError(\"The 'label' argument has to be either 'upper' or 'lower'\")\n\n variables = {}\n\n for name, var in self.variables.items():\n if dim in var.dims:\n if name in self.data_vars:\n variables[name] = var.isel(**kwargs_end) - var.isel(**kwargs_start)\n else:\n variables[name] = var.isel(**kwargs_new)\n else:\n variables[name] = var\n\n indexes = dict(self.indexes)\n if dim in indexes:\n indexes[dim] = indexes[dim][kwargs_new[dim]]\n\n difference = self._replace_with_new_dims(variables, indexes=indexes)\n\n if n > 1:\n return difference.diff(dim, n - 1)\n else:\n return difference\n\n def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"Shift this dataset by an offset along one or more dimensions.\n\n Only data variables are moved; coordinates stay in place. This is\n consistent with the behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : mapping of hashable to int\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like, maps\n variable names (including coordinates) to fill values.\n **shifts_kwargs\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n\n Returns\n -------\n shifted : Dataset\n Dataset with the same coordinates and attributes but shifted data\n variables.\n\n See Also\n --------\n roll\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", list(\"abcde\"))})\n >>> ds.shift(x=2)\n <xarray.Dataset>\n Dimensions: (x: 5)\n Dimensions without coordinates: x\n Data variables:\n foo (x) object nan nan 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n fill_value_ = (\n fill_value.get(name, dtypes.NA)\n if isinstance(fill_value, dict)\n else fill_value\n )\n\n var_shifts = {k: v for k, v in shifts.items() if k in var.dims}\n variables[name] = var.shift(fill_value=fill_value_, shifts=var_shifts)\n else:\n variables[name] = var\n\n return self._replace(variables)\n\n def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):\n \"\"\"Roll this dataset by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n shifts : dict, optional\n A dict with keys matching dimensions and values given\n by integers to rotate each of the given dimensions. Positive\n offsets roll to the right; negative offsets roll to the left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : {dim: offset, ...}, optional\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n Returns\n -------\n rolled : Dataset\n Dataset with the same coordinates and attributes but rolled\n variables.\n\n See Also\n --------\n shift\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", list(\"abcde\"))})\n >>> ds.roll(x=2)\n <xarray.Dataset>\n Dimensions: (x: 5)\n Dimensions without coordinates: x\n Data variables:\n foo (x) <U1 'd' 'e' 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n if roll_coords is None:\n warnings.warn(\n \"roll_coords will be set to False in the future.\"\n \" Explicitly set roll_coords to silence warning.\",\n FutureWarning,\n stacklevel=2,\n )\n roll_coords = True\n\n unrolled_vars = () if roll_coords else self.coords\n\n variables = {}\n for k, v in self.variables.items():\n if k not in unrolled_vars:\n variables[k] = v.roll(\n **{k: s for k, s in shifts.items() if k in v.dims}\n )\n else:\n variables[k] = v\n\n if roll_coords:\n indexes = {}\n for k, v in self.indexes.items():\n (dim,) = self.variables[k].dims\n if dim in shifts:\n indexes[k] = roll_index(v, shifts[dim])\n else:\n indexes[k] = v\n else:\n indexes = dict(self.indexes)\n\n return self._replace(variables, indexes=indexes)\n\n def sortby(self, variables, ascending=True):\n \"\"\"\n Sort object by labels or values (along an axis).\n\n Sorts the dataset, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables : str, DataArray, or list of str or DataArray\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords/data_vars whose values are used to sort the dataset.\n ascending : bool, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted : Dataset\n A new dataset where all the specified dims are sorted by dim\n labels.\n \"\"\"\n from .dataarray import DataArray\n\n if not isinstance(variables, list):\n variables = [variables]\n else:\n variables = variables\n variables = [v if isinstance(v, DataArray) else self[v] for v in variables]\n aligned_vars = align(self, *variables, join=\"left\")\n aligned_self = aligned_vars[0]\n aligned_other_vars = aligned_vars[1:]\n vars_by_dim = defaultdict(list)\n for data_array in aligned_other_vars:\n if data_array.ndim != 1:\n raise ValueError(\"Input DataArray is not 1-D.\")\n (key,) = data_array.dims\n vars_by_dim[key].append(data_array)\n\n indices = {}\n for key, arrays in vars_by_dim.items():\n order = np.lexsort(tuple(reversed(arrays)))\n indices[key] = order if ascending else order[::-1]\n return aligned_self.isel(**indices)\n\n def quantile(\n self,\n q,\n dim=None,\n interpolation=\"linear\",\n numeric_only=False,\n keep_attrs=None,\n skipna=True,\n ):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements for each variable\n in the Dataset.\n\n Parameters\n ----------\n q : float or array-like of float\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"}, default: \"linear\"\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n skipna : bool, optional\n Whether to skip missing values when aggregating.\n\n Returns\n -------\n quantiles : Dataset\n If `q` is a single quantile, then the result is a scalar for each\n variable in data_vars. If multiple percentiles are given, first\n axis of the result corresponds to the quantile and a quantile\n dimension is added to the return Dataset. The other dimensions are\n the dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanquantile, numpy.quantile, pandas.Series.quantile, DataArray.quantile\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... {\"a\": ((\"x\", \"y\"), [[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]])},\n ... coords={\"x\": [7, 9], \"y\": [1, 1.5, 2, 2.5]},\n ... )\n >>> ds.quantile(0) # or ds.quantile(0, dim=...)\n <xarray.Dataset>\n Dimensions: ()\n Coordinates:\n quantile float64 0.0\n Data variables:\n a float64 0.7\n >>> ds.quantile(0, dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 4)\n Coordinates:\n * y (y) float64 1.0 1.5 2.0 2.5\n quantile float64 0.0\n Data variables:\n a (y) float64 0.7 4.2 2.6 1.5\n >>> ds.quantile([0, 0.5, 1])\n <xarray.Dataset>\n Dimensions: (quantile: 3)\n Coordinates:\n * quantile (quantile) float64 0.0 0.5 1.0\n Data variables:\n a (quantile) float64 0.7 3.4 9.4\n >>> ds.quantile([0, 0.5, 1], dim=\"x\")\n <xarray.Dataset>\n Dimensions: (quantile: 3, y: 4)\n Coordinates:\n * y (y) float64 1.0 1.5 2.0 2.5\n * quantile (quantile) float64 0.0 0.5 1.0\n Data variables:\n a (quantile, y) float64 0.7 4.2 2.6 1.5 3.6 ... 1.7 6.5 7.3 9.4 1.9\n \"\"\"\n\n if isinstance(dim, str):\n dims = {dim}\n elif dim in [None, ...]:\n dims = set(self.dims)\n else:\n dims = set(dim)\n\n _assert_empty(\n [d for d in dims if d not in self.dims],\n \"Dataset does not contain the dimensions: %s\",\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n variables = {}\n for name, var in self.variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if reduce_dims or not var.dims:\n if name not in self.coords:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or var.dtype == np.bool_\n ):\n if len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None\n variables[name] = var.quantile(\n q,\n dim=reduce_dims,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n skipna=skipna,\n )\n\n else:\n variables[name] = var\n\n # construct the new dataset\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n new = self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n return new.assign_coords(quantile=q)\n\n def rank(self, dim, pct=False, keep_attrs=None):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within\n that set.\n Ranks begin at 1, not 0. If pct is True, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : Dataset\n Variables that do not depend on `dim` are dropped.\n \"\"\"\n if dim not in self.dims:\n raise ValueError(\"Dataset does not contain the dimension: %s\" % dim)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n if dim in var.dims:\n variables[name] = var.rank(dim, pct=pct)\n else:\n variables[name] = var\n\n coord_names = set(self.coords)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return self._replace(variables, coord_names, attrs=attrs)\n\n def differentiate(self, coord, edge_order=1, datetime_unit=None):\n \"\"\" Differentiate with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord : str\n The coordinate to be used to compute the gradient.\n edge_order : {1, 2}, default: 1\n N-th order accurate differences at the boundaries.\n datetime_unit : None or {\"Y\", \"M\", \"W\", \"D\", \"h\", \"m\", \"s\", \"ms\", \\\n \"us\", \"ns\", \"ps\", \"fs\", \"as\"}, default: None\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: Dataset\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n \"\"\"\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(f\"Coordinate {coord} does not exist.\")\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)\n\n variables = {}\n for k, v in self.variables.items():\n if k in self.data_vars and dim in v.dims and k not in self.coords:\n if _contains_datetime_like_objects(v):\n v = v._to_numeric(datetime_unit=datetime_unit)\n grad = duck_array_ops.gradient(\n v.data, coord_var, edge_order=edge_order, axis=v.get_axis_num(dim)\n )\n variables[k] = Variable(v.dims, grad)\n else:\n variables[k] = v\n return self._replace(variables)\n\n def integrate(\n self, coord: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None\n ) -> \"Dataset\":\n \"\"\"Integrate along the given coordinate using the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord : hashable, or sequence of hashable\n Coordinate(s) used for the integration.\n datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \\\n 'ps', 'fs', 'as'}, optional\n Specify the unit if datetime coordinate is used.\n\n Returns\n -------\n integrated : Dataset\n\n See also\n --------\n DataArray.integrate\n numpy.trapz : corresponding numpy function\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... data_vars={\"a\": (\"x\", [5, 5, 6, 6]), \"b\": (\"x\", [1, 2, 1, 0])},\n ... coords={\"x\": [0, 1, 2, 3], \"y\": (\"x\", [1, 7, 3, 5])},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n y (x) int64 1 7 3 5\n Data variables:\n a (x) int64 5 5 6 6\n b (x) int64 1 2 1 0\n >>> ds.integrate(\"x\")\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n a float64 16.5\n b float64 3.5\n >>> ds.integrate(\"y\")\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n a float64 20.0\n b float64 4.0\n \"\"\"\n if not isinstance(coord, (list, tuple)):\n coord = (coord,)\n result = self\n for c in coord:\n result = result._integrate_one(c, datetime_unit=datetime_unit)\n return result\n\n def _integrate_one(self, coord, datetime_unit=None):\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(f\"Coordinate {coord} does not exist.\")\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._replace(\n data=datetime_to_numeric(coord_var.data, datetime_unit=datetime_unit)\n )\n\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n if k in self.coords:\n if dim not in v.dims:\n variables[k] = v\n coord_names.add(k)\n else:\n if k in self.data_vars and dim in v.dims:\n if _contains_datetime_like_objects(v):\n v = datetime_to_numeric(v, datetime_unit=datetime_unit)\n integ = duck_array_ops.trapz(\n v.data, coord_var.data, axis=v.get_axis_num(dim)\n )\n v_dims = list(v.dims)\n v_dims.remove(dim)\n variables[k] = Variable(v_dims, integ)\n else:\n variables[k] = v\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n @property\n def real(self):\n return self.map(lambda x: x.real, keep_attrs=True)\n\n @property\n def imag(self):\n return self.map(lambda x: x.imag, keep_attrs=True)\n\n plot = utils.UncachedAccessor(_Dataset_PlotMethods)\n\n def filter_by_attrs(self, **kwargs):\n \"\"\"Returns a ``Dataset`` with variables that match specific conditions.\n\n Can pass in ``key=value`` or ``key=callable``. A Dataset is returned\n containing only the variables for which all the filter tests pass.\n These tests are either ``key=value`` for which the attribute ``key``\n has the exact value ``value`` or the callable passed into\n ``key=callable`` returns True. The callable will be passed a single\n value, either the value of the attribute ``key`` or ``None`` if the\n DataArray does not have an attribute with the name ``key``.\n\n Parameters\n ----------\n **kwargs\n key : str\n Attribute name.\n value : callable or obj\n If value is a callable, it should return a boolean in the form\n of bool = func(attr) where attr is da.attrs[key].\n Otherwise, value will be compared to the each\n DataArray's attrs[key].\n\n Returns\n -------\n new : Dataset\n New dataset with variables filtered by attribute.\n\n Examples\n --------\n >>> # Create an example dataset:\n >>> temp = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precip = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> dims = [\"x\", \"y\", \"time\"]\n >>> temp_attr = dict(standard_name=\"air_potential_temperature\")\n >>> precip_attr = dict(standard_name=\"convective_precipitation_flux\")\n >>> ds = xr.Dataset(\n ... {\n ... \"temperature\": (dims, temp, temp_attr),\n ... \"precipitation\": (dims, precip, precip_attr),\n ... },\n ... coords={\n ... \"lon\": ([\"x\", \"y\"], lon),\n ... \"lat\": ([\"x\", \"y\"], lat),\n ... \"time\": pd.date_range(\"2014-09-06\", periods=3),\n ... \"reference_time\": pd.Timestamp(\"2014-09-05\"),\n ... },\n ... )\n >>> # Get variables matching a specific standard_name.\n >>> ds.filter_by_attrs(standard_name=\"convective_precipitation_flux\")\n <xarray.Dataset>\n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Dimensions without coordinates: x, y\n Data variables:\n precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805\n >>> # Get all variables that have a standard_name attribute.\n >>> standard_name = lambda v: v is not None\n >>> ds.filter_by_attrs(standard_name=standard_name)\n <xarray.Dataset>\n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y, time) float64 29.11 18.2 22.83 ... 18.28 16.15 26.63\n precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805\n\n \"\"\"\n selection = []\n for var_name, variable in self.variables.items():\n has_value_flag = False\n for attr_name, pattern in kwargs.items():\n attr_value = variable.attrs.get(attr_name)\n if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:\n has_value_flag = True\n else:\n has_value_flag = False\n break\n if has_value_flag is True:\n selection.append(var_name)\n return self[selection]\n\n def unify_chunks(self) -> \"Dataset\":\n \"\"\"Unify chunk size along all chunked dimensions of this Dataset.\n\n Returns\n -------\n Dataset with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n dask.array.core.unify_chunks\n \"\"\"\n\n try:\n self.chunks\n except ValueError: # \"inconsistent chunks\"\n pass\n else:\n # No variables with dask backend, or all chunks are already aligned\n return self.copy()\n\n # import dask is placed after the quick exit test above to allow\n # running this method if dask isn't installed and there are no chunks\n import dask.array\n\n ds = self.copy()\n\n dims_pos_map = {dim: index for index, dim in enumerate(ds.dims)}\n\n dask_array_names = []\n dask_unify_args = []\n for name, variable in ds.variables.items():\n if isinstance(variable.data, dask.array.Array):\n dims_tuple = [dims_pos_map[dim] for dim in variable.dims]\n dask_array_names.append(name)\n dask_unify_args.append(variable.data)\n dask_unify_args.append(dims_tuple)\n\n _, rechunked_arrays = dask.array.core.unify_chunks(*dask_unify_args)\n\n for name, new_array in zip(dask_array_names, rechunked_arrays):\n ds.variables[name]._data = new_array\n\n return ds\n\n def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n template: Union[\"DataArray\", \"Dataset\"] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each block of this Dataset.\n\n .. warning::\n This method is experimental and its signature may change.\n\n Parameters\n ----------\n func : callable\n User-provided function that accepts a Dataset as its first\n parameter. The function will receive a subset or 'block' of this Dataset (see below),\n corresponding to one chunk along each chunked dimension. ``func`` will be\n executed as ``func(subset_dataset, *subset_args, **kwargs)``.\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot add a new chunked dimension.\n args : sequence\n Passed to func after unpacking and subsetting any xarray objects by blocks.\n xarray objects in args must be aligned with obj, otherwise an error is raised.\n kwargs : mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n subset to blocks. Passing dask collections in kwargs is not allowed.\n template : DataArray or Dataset, optional\n xarray object representing the final result after compute is called. If not provided,\n the function will be first run on mocked-up data, that looks like this object but\n has sizes 0, to determine properties of the returned object such as dtype,\n variable names, attributes, new dimensions and new indexes (if any).\n ``template`` must be provided if the function changes the size of existing dimensions.\n When provided, ``attrs`` on variables in `template` are copied over to the result. Any\n ``attrs`` set by ``func`` will be ignored.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of the\n function.\n\n Notes\n -----\n This function is designed for when ``func`` needs to manipulate a whole xarray object\n subset to each block. Each block is loaded into memory. In the more common case where\n ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.\n\n If none of the variables in this object is backed by dask arrays, calling this function is\n equivalent to calling ``func(obj, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks\n xarray.DataArray.map_blocks\n\n Examples\n --------\n Calculate an anomaly from climatology using ``.groupby()``. Using\n ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,\n its indices, and its methods like ``.groupby()``.\n\n >>> def calculate_anomaly(da, groupby_type=\"time.month\"):\n ... gb = da.groupby(groupby_type)\n ... clim = gb.mean(dim=\"time\")\n ... return gb - clim\n ...\n >>> time = xr.cftime_range(\"1990-01\", \"1992-01\", freq=\"M\")\n >>> month = xr.DataArray(time.month, coords={\"time\": time}, dims=[\"time\"])\n >>> np.random.seed(123)\n >>> array = xr.DataArray(\n ... np.random.rand(len(time)),\n ... dims=[\"time\"],\n ... coords={\"time\": time, \"month\": month},\n ... ).chunk()\n >>> ds = xr.Dataset({\"a\": array})\n >>> ds.map_blocks(calculate_anomaly, template=ds).compute()\n <xarray.Dataset>\n Dimensions: (time: 24)\n Coordinates:\n * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00\n month (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 1 2 3 4 5 6 7 8 9 10 11 12\n Data variables:\n a (time) float64 0.1289 0.1132 -0.0856 ... 0.2287 0.1906 -0.05901\n\n Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments\n to the function being applied in ``xr.map_blocks()``:\n\n >>> ds.map_blocks(\n ... calculate_anomaly,\n ... kwargs={\"groupby_type\": \"time.year\"},\n ... template=ds,\n ... )\n <xarray.Dataset>\n Dimensions: (time: 24)\n Coordinates:\n * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00\n month (time) int64 dask.array<chunksize=(24,), meta=np.ndarray>\n Data variables:\n a (time) float64 dask.array<chunksize=(24,), meta=np.ndarray>\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs, template)\n\n def polyfit(\n self,\n dim: Hashable,\n deg: int,\n skipna: bool = None,\n rcond: float = None,\n w: Union[Hashable, Any] = None,\n full: bool = False,\n cov: Union[bool, str] = False,\n ):\n \"\"\"\n Least squares polynomial fit.\n\n This replicates the behaviour of `numpy.polyfit` but differs by skipping\n invalid values when `skipna = True`.\n\n Parameters\n ----------\n dim : hashable\n Coordinate along which to fit the polynomials.\n deg : int\n Degree of the fitting polynomial.\n skipna : bool, optional\n If True, removes all invalid values before fitting each 1D slices of the array.\n Default is True if data is stored in a dask.array or if there is any\n invalid values, False otherwise.\n rcond : float, optional\n Relative condition number to the fit.\n w : hashable or Any, optional\n Weights to apply to the y-coordinate of the sample points.\n Can be an array-like object or the name of a coordinate in the dataset.\n full : bool, optional\n Whether to return the residuals, matrix rank and singular values in addition\n to the coefficients.\n cov : bool or str, optional\n Whether to return to the covariance matrix in addition to the coefficients.\n The matrix is not scaled if `cov='unscaled'`.\n\n Returns\n -------\n polyfit_results : Dataset\n A single dataset which contains (for each \"var\" in the input dataset):\n\n [var]_polyfit_coefficients\n The coefficients of the best fit for each variable in this dataset.\n [var]_polyfit_residuals\n The residuals of the least-square computation for each variable (only included if `full=True`)\n When the matrix rank is deficient, np.nan is returned.\n [dim]_matrix_rank\n The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`)\n The rank is computed ignoring the NaN values that might be skipped.\n [dim]_singular_values\n The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`)\n [var]_polyfit_covariance\n The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`)\n\n Warns\n -----\n RankWarning\n The rank of the coefficient matrix in the least-squares fit is deficient.\n The warning is not raised with in-memory (not dask) data and `full=True`.\n\n See Also\n --------\n numpy.polyfit\n numpy.polyval\n xarray.polyval\n \"\"\"\n variables = {}\n skipna_da = skipna\n\n x = get_clean_interp_index(self, dim, strict=False)\n xname = \"{}_\".format(self[dim].name)\n order = int(deg) + 1\n lhs = np.vander(x, order)\n\n if rcond is None:\n rcond = (\n x.shape[0] * np.core.finfo(x.dtype).eps # type: ignore[attr-defined]\n )\n\n # Weights:\n if w is not None:\n if isinstance(w, Hashable):\n w = self.coords[w]\n w = np.asarray(w)\n if w.ndim != 1:\n raise TypeError(\"Expected a 1-d array for weights.\")\n if w.shape[0] != lhs.shape[0]:\n raise TypeError(\"Expected w and {} to have the same length\".format(dim))\n lhs *= w[:, np.newaxis]\n\n # Scaling\n scale = np.sqrt((lhs * lhs).sum(axis=0))\n lhs /= scale\n\n degree_dim = utils.get_temp_dimname(self.dims, \"degree\")\n\n rank = np.linalg.matrix_rank(lhs)\n\n if full:\n rank = xr.DataArray(rank, name=xname + \"matrix_rank\")\n variables[rank.name] = rank\n sing = np.linalg.svd(lhs, compute_uv=False)\n sing = xr.DataArray(\n sing,\n dims=(degree_dim,),\n coords={degree_dim: np.arange(rank - 1, -1, -1)},\n name=xname + \"singular_values\",\n )\n variables[sing.name] = sing\n\n for name, da in self.data_vars.items():\n if dim not in da.dims:\n continue\n\n if is_duck_dask_array(da.data) and (\n rank != order or full or skipna is None\n ):\n # Current algorithm with dask and skipna=False neither supports\n # deficient ranks nor does it output the \"full\" info (issue dask/dask#6516)\n skipna_da = True\n elif skipna is None:\n skipna_da = bool(np.any(da.isnull()))\n\n dims_to_stack = [dimname for dimname in da.dims if dimname != dim]\n stacked_coords: Dict[Hashable, DataArray] = {}\n if dims_to_stack:\n stacked_dim = utils.get_temp_dimname(dims_to_stack, \"stacked\")\n rhs = da.transpose(dim, *dims_to_stack).stack(\n {stacked_dim: dims_to_stack}\n )\n stacked_coords = {stacked_dim: rhs[stacked_dim]}\n scale_da = scale[:, np.newaxis]\n else:\n rhs = da\n scale_da = scale\n\n if w is not None:\n rhs *= w[:, np.newaxis]\n\n with warnings.catch_warnings():\n if full: # Copy np.polyfit behavior\n warnings.simplefilter(\"ignore\", np.RankWarning)\n else: # Raise only once per variable\n warnings.simplefilter(\"once\", np.RankWarning)\n\n coeffs, residuals = duck_array_ops.least_squares(\n lhs, rhs.data, rcond=rcond, skipna=skipna_da\n )\n\n if isinstance(name, str):\n name = \"{}_\".format(name)\n else:\n # Thus a ReprObject => polyfit was called on a DataArray\n name = \"\"\n\n coeffs = xr.DataArray(\n coeffs / scale_da,\n dims=[degree_dim] + list(stacked_coords.keys()),\n coords={degree_dim: np.arange(order)[::-1], **stacked_coords},\n name=name + \"polyfit_coefficients\",\n )\n if dims_to_stack:\n coeffs = coeffs.unstack(stacked_dim)\n variables[coeffs.name] = coeffs\n\n if full or (cov is True):\n residuals = xr.DataArray(\n residuals if dims_to_stack else residuals.squeeze(),\n dims=list(stacked_coords.keys()),\n coords=stacked_coords,\n name=name + \"polyfit_residuals\",\n )\n if dims_to_stack:\n residuals = residuals.unstack(stacked_dim)\n variables[residuals.name] = residuals\n\n if cov:\n Vbase = np.linalg.inv(np.dot(lhs.T, lhs))\n Vbase /= np.outer(scale, scale)\n if cov == \"unscaled\":\n fac = 1\n else:\n if x.shape[0] <= order:\n raise ValueError(\n \"The number of data points must exceed order to scale the covariance matrix.\"\n )\n fac = residuals / (x.shape[0] - order)\n covariance = xr.DataArray(Vbase, dims=(\"cov_i\", \"cov_j\")) * fac\n variables[name + \"polyfit_covariance\"] = covariance\n\n return Dataset(data_vars=variables, attrs=self.attrs.copy())\n\n def pad(\n self,\n pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,\n mode: str = \"constant\",\n stat_length: Union[\n int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]\n ] = None,\n constant_values: Union[\n int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]\n ] = None,\n end_values: Union[\n int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]\n ] = None,\n reflect_type: str = None,\n **pad_width_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Pad this dataset along one or more dimensions.\n\n .. warning::\n This function is experimental and its behaviour is likely to change\n especially regarding padding of dimension coordinates (or IndexVariables).\n\n When using one of the modes (\"edge\", \"reflect\", \"symmetric\", \"wrap\"),\n coordinates will be padded with the same mode, otherwise coordinates\n are padded using the \"constant\" mode with fill_value dtypes.NA.\n\n Parameters\n ----------\n pad_width : mapping of hashable to tuple of int\n Mapping with the form of {dim: (pad_before, pad_after)}\n describing the number of values padded along each dimension.\n {dim: pad} is a shortcut for pad_before = pad_after = pad\n mode : str, default: \"constant\"\n One of the following string values (taken from numpy docs).\n\n 'constant' (default)\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n Pads with the linear ramp between end_value and the\n array edge value.\n 'maximum'\n Pads with the maximum value of all or part of the\n vector along each axis.\n 'mean'\n Pads with the mean value of all or part of the\n vector along each axis.\n 'median'\n Pads with the median value of all or part of the\n vector along each axis.\n 'minimum'\n Pads with the minimum value of all or part of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n Pads with the wrap of the vector along the axis.\n The first values are used to pad the end and the\n end values are used to pad the beginning.\n stat_length : int, tuple or mapping of hashable to tuple, default: None\n Used in 'maximum', 'mean', 'median', and 'minimum'. Number of\n values at edge of each axis used to calculate the statistic value.\n {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique\n statistic lengths along each dimension.\n ((before, after),) yields same before and after statistic lengths\n for each dimension.\n (stat_length,) or int is a shortcut for before = after = statistic\n length for all axes.\n Default is ``None``, to use the entire axis.\n constant_values : scalar, tuple or mapping of hashable to tuple, default: 0\n Used in 'constant'. The values to set the padded values for each\n axis.\n ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique\n pad constants along each dimension.\n ``((before, after),)`` yields same before and after constants for each\n dimension.\n ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for\n all dimensions.\n Default is 0.\n end_values : scalar, tuple or mapping of hashable to tuple, default: 0\n Used in 'linear_ramp'. The values used for the ending value of the\n linear_ramp and that will form the edge of the padded array.\n ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique\n end values along each dimension.\n ``((before, after),)`` yields same before and after end values for each\n axis.\n ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for\n all axes.\n Default is 0.\n reflect_type : {\"even\", \"odd\"}, optional\n Used in \"reflect\", and \"symmetric\". The \"even\" style is the\n default with an unaltered reflection around the edge value. For\n the \"odd\" style, the extended part of the array is created by\n subtracting the reflected values from two times the edge value.\n **pad_width_kwargs\n The keyword arguments form of ``pad_width``.\n One of ``pad_width`` or ``pad_width_kwargs`` must be provided.\n\n Returns\n -------\n padded : Dataset\n Dataset with the padded coordinates and data.\n\n See Also\n --------\n Dataset.shift, Dataset.roll, Dataset.bfill, Dataset.ffill, numpy.pad, dask.array.pad\n\n Notes\n -----\n By default when ``mode=\"constant\"`` and ``constant_values=None``, integer types will be\n promoted to ``float`` and padded with ``np.nan``. To avoid type promotion\n specify ``constant_values=np.nan``\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", range(5))})\n >>> ds.pad(x=(1, 2))\n <xarray.Dataset>\n Dimensions: (x: 8)\n Dimensions without coordinates: x\n Data variables:\n foo (x) float64 nan 0.0 1.0 2.0 3.0 4.0 nan nan\n \"\"\"\n pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, \"pad\")\n\n if mode in (\"edge\", \"reflect\", \"symmetric\", \"wrap\"):\n coord_pad_mode = mode\n coord_pad_options = {\n \"stat_length\": stat_length,\n \"constant_values\": constant_values,\n \"end_values\": end_values,\n \"reflect_type\": reflect_type,\n }\n else:\n coord_pad_mode = \"constant\"\n coord_pad_options = {}\n\n variables = {}\n for name, var in self.variables.items():\n var_pad_width = {k: v for k, v in pad_width.items() if k in var.dims}\n if not var_pad_width:\n variables[name] = var\n elif name in self.data_vars:\n variables[name] = var.pad(\n pad_width=var_pad_width,\n mode=mode,\n stat_length=stat_length,\n constant_values=constant_values,\n end_values=end_values,\n reflect_type=reflect_type,\n )\n else:\n variables[name] = var.pad(\n pad_width=var_pad_width,\n mode=coord_pad_mode,\n **coord_pad_options, # type: ignore[arg-type]\n )\n\n return self._replace_vars_and_dims(variables)\n\n def idxmin(\n self,\n dim: Hashable = None,\n skipna: bool = None,\n fill_value: Any = dtypes.NA,\n keep_attrs: bool = None,\n ) -> \"Dataset\":\n \"\"\"Return the coordinate label of the minimum value along a dimension.\n\n Returns a new `Dataset` named after the dimension with the values of\n the coordinate labels along that dimension corresponding to minimum\n values along that dimension.\n\n In comparison to :py:meth:`~Dataset.argmin`, this returns the\n coordinate label while :py:meth:`~Dataset.argmin` returns the index.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to apply `idxmin`. This is optional for 1D\n variables, but required for variables with 2 or more dimensions.\n skipna : bool or None, default: None\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for ``float``, ``complex``, and ``object``\n dtypes; other dtypes either do not have a sentinel missing value\n (``int``) or ``skipna=True`` has not been implemented\n (``datetime64`` or ``timedelta64``).\n fill_value : Any, default: NaN\n Value to be filled in case all of the values along a dimension are\n null. By default this is NaN. The fill value and result are\n automatically converted to a compatible dtype if possible.\n Ignored if ``skipna`` is False.\n keep_attrs : bool, default: False\n If True, the attributes (``attrs``) will be copied from the\n original object to the new one. If False (default), the new object\n will be returned without attributes.\n\n Returns\n -------\n reduced : Dataset\n New `Dataset` object with `idxmin` applied to its data and the\n indicated dimension removed.\n\n See Also\n --------\n DataArray.idxmin, Dataset.idxmax, Dataset.min, Dataset.argmin\n\n Examples\n --------\n >>> array1 = xr.DataArray(\n ... [0, 2, 1, 0, -2], dims=\"x\", coords={\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n ... )\n >>> array2 = xr.DataArray(\n ... [\n ... [2.0, 1.0, 2.0, 0.0, -2.0],\n ... [-4.0, np.NaN, 2.0, np.NaN, -2.0],\n ... [np.NaN, np.NaN, 1.0, np.NaN, np.NaN],\n ... ],\n ... dims=[\"y\", \"x\"],\n ... coords={\"y\": [-1, 0, 1], \"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]},\n ... )\n >>> ds = xr.Dataset({\"int\": array1, \"float\": array2})\n >>> ds.min(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 -2\n float (y) float64 -2.0 -4.0 1.0\n >>> ds.argmin(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 4\n float (y) int64 4 0 2\n >>> ds.idxmin(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int <U1 'e'\n float (y) object 'e' 'a' 'c'\n \"\"\"\n return self.map(\n methodcaller(\n \"idxmin\",\n dim=dim,\n skipna=skipna,\n fill_value=fill_value,\n keep_attrs=keep_attrs,\n )\n )\n\n def idxmax(\n self,\n dim: Hashable = None,\n skipna: bool = None,\n fill_value: Any = dtypes.NA,\n keep_attrs: bool = None,\n ) -> \"Dataset\":\n \"\"\"Return the coordinate label of the maximum value along a dimension.\n\n Returns a new `Dataset` named after the dimension with the values of\n the coordinate labels along that dimension corresponding to maximum\n values along that dimension.\n\n In comparison to :py:meth:`~Dataset.argmax`, this returns the\n coordinate label while :py:meth:`~Dataset.argmax` returns the index.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to apply `idxmax`. This is optional for 1D\n variables, but required for variables with 2 or more dimensions.\n skipna : bool or None, default: None\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for ``float``, ``complex``, and ``object``\n dtypes; other dtypes either do not have a sentinel missing value\n (``int``) or ``skipna=True`` has not been implemented\n (``datetime64`` or ``timedelta64``).\n fill_value : Any, default: NaN\n Value to be filled in case all of the values along a dimension are\n null. By default this is NaN. The fill value and result are\n automatically converted to a compatible dtype if possible.\n Ignored if ``skipna`` is False.\n keep_attrs : bool, default: False\n If True, the attributes (``attrs``) will be copied from the\n original object to the new one. If False (default), the new object\n will be returned without attributes.\n\n Returns\n -------\n reduced : Dataset\n New `Dataset` object with `idxmax` applied to its data and the\n indicated dimension removed.\n\n See Also\n --------\n DataArray.idxmax, Dataset.idxmin, Dataset.max, Dataset.argmax\n\n Examples\n --------\n >>> array1 = xr.DataArray(\n ... [0, 2, 1, 0, -2], dims=\"x\", coords={\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n ... )\n >>> array2 = xr.DataArray(\n ... [\n ... [2.0, 1.0, 2.0, 0.0, -2.0],\n ... [-4.0, np.NaN, 2.0, np.NaN, -2.0],\n ... [np.NaN, np.NaN, 1.0, np.NaN, np.NaN],\n ... ],\n ... dims=[\"y\", \"x\"],\n ... coords={\"y\": [-1, 0, 1], \"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]},\n ... )\n >>> ds = xr.Dataset({\"int\": array1, \"float\": array2})\n >>> ds.max(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 2\n float (y) float64 2.0 2.0 1.0\n >>> ds.argmax(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 1\n float (y) int64 0 2 2\n >>> ds.idxmax(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int <U1 'b'\n float (y) object 'a' 'c' 'c'\n \"\"\"\n return self.map(\n methodcaller(\n \"idxmax\",\n dim=dim,\n skipna=skipna,\n fill_value=fill_value,\n keep_attrs=keep_attrs,\n )\n )\n\n def argmin(self, dim=None, **kwargs):\n \"\"\"Indices of the minima of the member variables.\n\n If there are multiple minima, the indices of the first one found will be\n returned.\n\n Parameters\n ----------\n dim : str, optional\n The dimension over which to find the minimum. By default, finds minimum over\n all dimensions - for now returning an int for backward compatibility, but\n this is deprecated, in future will be an error, since DataArray.argmin will\n return a dict with indices for all dimensions, which does not make sense for\n a Dataset.\n keep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n skipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).\n\n Returns\n -------\n result : Dataset\n\n See Also\n --------\n DataArray.argmin\n \"\"\"\n if dim is None:\n warnings.warn(\n \"Once the behaviour of DataArray.argmin() and Variable.argmin() without \"\n \"dim changes to return a dict of indices of each dimension, for \"\n \"consistency it will be an error to call Dataset.argmin() with no argument,\"\n \"since we don't return a dict of Datasets.\",\n DeprecationWarning,\n stacklevel=2,\n )\n if (\n dim is None\n or (not isinstance(dim, Sequence) and dim is not ...)\n or isinstance(dim, str)\n ):\n # Return int index if single dimension is passed, and is not part of a\n # sequence\n argmin_func = getattr(duck_array_ops, \"argmin\")\n return self.reduce(argmin_func, dim=dim, **kwargs)\n else:\n raise ValueError(\n \"When dim is a sequence or ..., DataArray.argmin() returns a dict. \"\n \"dicts cannot be contained in a Dataset, so cannot call \"\n \"Dataset.argmin() with a sequence or ... for dim\"\n )\n\n def argmax(self, dim=None, **kwargs):\n \"\"\"Indices of the maxima of the member variables.\n\n If there are multiple maxima, the indices of the first one found will be\n returned.\n\n Parameters\n ----------\n dim : str, optional\n The dimension over which to find the maximum. By default, finds maximum over\n all dimensions - for now returning an int for backward compatibility, but\n this is deprecated, in future will be an error, since DataArray.argmax will\n return a dict with indices for all dimensions, which does not make sense for\n a Dataset.\n keep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n skipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).\n\n Returns\n -------\n result : Dataset\n\n See Also\n --------\n DataArray.argmax\n\n \"\"\"\n if dim is None:\n warnings.warn(\n \"Once the behaviour of DataArray.argmin() and Variable.argmin() without \"\n \"dim changes to return a dict of indices of each dimension, for \"\n \"consistency it will be an error to call Dataset.argmin() with no argument,\"\n \"since we don't return a dict of Datasets.\",\n DeprecationWarning,\n stacklevel=2,\n )\n if (\n dim is None\n or (not isinstance(dim, Sequence) and dim is not ...)\n or isinstance(dim, str)\n ):\n # Return int index if single dimension is passed, and is not part of a\n # sequence\n argmax_func = getattr(duck_array_ops, \"argmax\")\n return self.reduce(argmax_func, dim=dim, **kwargs)\n else:\n raise ValueError(\n \"When dim is a sequence or ..., DataArray.argmin() returns a dict. \"\n \"dicts cannot be contained in a Dataset, so cannot call \"\n \"Dataset.argmin() with a sequence or ... for dim\"\n )\n\n def query(\n self,\n queries: Mapping[Hashable, Any] = None,\n parser: str = \"pandas\",\n engine: str = None,\n missing_dims: str = \"raise\",\n **queries_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Return a new dataset with each array indexed along the specified\n dimension(s), where the indexers are given as strings containing\n Python expressions to be evaluated against the data variables in the\n dataset.\n\n Parameters\n ----------\n queries : dict, optional\n A dict with keys matching dimensions and values given by strings\n containing Python expressions to be evaluated against the data variables\n in the dataset. The expressions will be evaluated using the pandas\n eval() function, and can contain any valid Python expressions but cannot\n contain any Python statements.\n parser : {\"pandas\", \"python\"}, default: \"pandas\"\n The parser to use to construct the syntax tree from the expression.\n The default of 'pandas' parses code slightly different than standard\n Python. Alternatively, you can parse an expression using the 'python'\n parser to retain strict Python semantics.\n engine: {\"python\", \"numexpr\", None}, default: None\n The engine used to evaluate the expression. Supported engines are:\n - None: tries to use numexpr, falls back to python\n - \"numexpr\": evaluates expressions using numexpr\n - \"python\": performs operations as if you had eval’d in top level python\n missing_dims : {\"raise\", \"warn\", \"ignore\"}, default: \"raise\"\n What to do if dimensions that should be selected from are not present in the\n Dataset:\n - \"raise\": raise an exception\n - \"warning\": raise a warning, and ignore the missing dimensions\n - \"ignore\": ignore the missing dimensions\n **queries_kwargs : {dim: query, ...}, optional\n The keyword arguments form of ``queries``.\n One of queries or queries_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the results of the appropriate\n queries.\n\n See Also\n --------\n Dataset.isel\n pandas.eval\n\n \"\"\"\n\n # allow queries to be given either as a dict or as kwargs\n queries = either_dict_or_kwargs(queries, queries_kwargs, \"query\")\n\n # check queries\n for dim, expr in queries.items():\n if not isinstance(expr, str):\n msg = f\"expr for dim {dim} must be a string to be evaluated, {type(expr)} given\"\n raise ValueError(msg)\n\n # evaluate the queries to create the indexers\n indexers = {\n dim: pd.eval(expr, resolvers=[self], parser=parser, engine=engine)\n for dim, expr in queries.items()\n }\n\n # apply the selection\n return self.isel(indexers, missing_dims=missing_dims)\n\n def curvefit(\n self,\n coords: Union[Union[str, \"DataArray\"], Iterable[Union[str, \"DataArray\"]]],\n func: Callable[..., Any],\n reduce_dims: Union[Hashable, Iterable[Hashable]] = None,\n skipna: bool = True,\n p0: Dict[str, Any] = None,\n bounds: Dict[str, Any] = None,\n param_names: Sequence[str] = None,\n kwargs: Dict[str, Any] = None,\n ):\n \"\"\"\n Curve fitting optimization for arbitrary functions.\n\n Wraps `scipy.optimize.curve_fit` with `apply_ufunc`.\n\n Parameters\n ----------\n coords : DataArray, str or sequence of DataArray, str\n Independent coordinate(s) over which to perform the curve fitting. Must share\n at least one dimension with the calling object. When fitting multi-dimensional\n functions, supply `coords` as a sequence in the same order as arguments in\n `func`. To fit along existing dimensions of the calling object, `coords` can\n also be specified as a str or sequence of strs.\n func : callable\n User specified function in the form `f(x, *params)` which returns a numpy\n array of length `len(x)`. `params` are the fittable parameters which are optimized\n by scipy curve_fit. `x` can also be specified as a sequence containing multiple\n coordinates, e.g. `f((x0, x1), *params)`.\n reduce_dims : str or sequence of str\n Additional dimension(s) over which to aggregate while fitting. For example,\n calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will\n aggregate all lat and lon points and fit the specified function along the\n time dimension.\n skipna : bool, optional\n Whether to skip missing values when fitting. Default is True.\n p0 : dictionary, optional\n Optional dictionary of parameter names to initial guesses passed to the\n `curve_fit` `p0` arg. If none or only some parameters are passed, the rest will\n be assigned initial values following the default scipy behavior.\n bounds : dictionary, optional\n Optional dictionary of parameter names to bounding values passed to the\n `curve_fit` `bounds` arg. If none or only some parameters are passed, the rest\n will be unbounded following the default scipy behavior.\n param_names: seq, optional\n Sequence of names for the fittable parameters of `func`. If not supplied,\n this will be automatically determined by arguments of `func`. `param_names`\n should be manually supplied when fitting a function that takes a variable\n number of parameters.\n kwargs : dictionary\n Additional keyword arguments to passed to scipy curve_fit.\n\n Returns\n -------\n curvefit_results : Dataset\n A single dataset which contains:\n\n [var]_curvefit_coefficients\n The coefficients of the best fit.\n [var]_curvefit_covariance\n The covariance matrix of the coefficient estimates.\n\n See also\n --------\n Dataset.polyfit\n scipy.optimize.curve_fit\n \"\"\"\n from scipy.optimize import curve_fit\n\n if p0 is None:\n p0 = {}\n if bounds is None:\n bounds = {}\n if kwargs is None:\n kwargs = {}\n\n if not reduce_dims:\n reduce_dims_ = []\n elif isinstance(reduce_dims, str) or not isinstance(reduce_dims, Iterable):\n reduce_dims_ = [reduce_dims]\n else:\n reduce_dims_ = list(reduce_dims)\n\n if (\n isinstance(coords, str)\n or isinstance(coords, xr.DataArray)\n or not isinstance(coords, Iterable)\n ):\n coords = [coords]\n coords_ = [self[coord] if isinstance(coord, str) else coord for coord in coords]\n\n # Determine whether any coords are dims on self\n for coord in coords_:\n reduce_dims_ += [c for c in self.dims if coord.equals(self[c])]\n reduce_dims_ = list(set(reduce_dims_))\n preserved_dims = list(set(self.dims) - set(reduce_dims_))\n if not reduce_dims_:\n raise ValueError(\n \"No arguments to `coords` were identified as a dimension on the calling \"\n \"object, and no dims were supplied to `reduce_dims`. This would result \"\n \"in fitting on scalar data.\"\n )\n\n # Broadcast all coords with each other\n coords_ = xr.broadcast(*coords_)\n coords_ = [\n coord.broadcast_like(self, exclude=preserved_dims) for coord in coords_\n ]\n\n params, func_args = _get_func_args(func, param_names)\n param_defaults, bounds_defaults = _initialize_curvefit_params(\n params, p0, bounds, func_args\n )\n n_params = len(params)\n kwargs.setdefault(\"p0\", [param_defaults[p] for p in params])\n kwargs.setdefault(\n \"bounds\",\n [\n [bounds_defaults[p][0] for p in params],\n [bounds_defaults[p][1] for p in params],\n ],\n )\n\n def _wrapper(Y, *coords_, **kwargs):\n # Wrap curve_fit with raveled coordinates and pointwise NaN handling\n x = np.vstack([c.ravel() for c in coords_])\n y = Y.ravel()\n if skipna:\n mask = np.all([np.any(~np.isnan(x), axis=0), ~np.isnan(y)], axis=0)\n x = x[:, mask]\n y = y[mask]\n if not len(y):\n popt = np.full([n_params], np.nan)\n pcov = np.full([n_params, n_params], np.nan)\n return popt, pcov\n x = np.squeeze(x)\n popt, pcov = curve_fit(func, x, y, **kwargs)\n return popt, pcov\n\n result = xr.Dataset()\n for name, da in self.data_vars.items():\n if name is xr.core.dataarray._THIS_ARRAY:\n name = \"\"\n else:\n name = f\"{str(name)}_\"\n\n popt, pcov = xr.apply_ufunc(\n _wrapper,\n da,\n *coords_,\n vectorize=True,\n dask=\"parallelized\",\n input_core_dims=[reduce_dims_ for d in range(len(coords_) + 1)],\n output_core_dims=[[\"param\"], [\"cov_i\", \"cov_j\"]],\n dask_gufunc_kwargs={\n \"output_sizes\": {\n \"param\": n_params,\n \"cov_i\": n_params,\n \"cov_j\": n_params,\n },\n },\n output_dtypes=(np.float64, np.float64),\n exclude_dims=set(reduce_dims_),\n kwargs=kwargs,\n )\n result[name + \"curvefit_coefficients\"] = popt\n result[name + \"curvefit_covariance\"] = pcov\n\n result = result.assign_coords(\n {\"param\": params, \"cov_i\": params, \"cov_j\": params}\n )\n result.attrs = self.attrs.copy()\n\n return result\n\n\nops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)\n"
] | [
[
"scipy.optimize.curve_fit",
"numpy.issubdtype",
"numpy.vander",
"numpy.asarray",
"numpy.int_",
"pandas.Categorical",
"numpy.isfinite",
"numpy.linalg.matrix_rank",
"numpy.isnan",
"numpy.zeros",
"pandas.MultiIndex.from_product",
"numpy.arange",
"numpy.core.finfo",
"numpy.prod",
"pandas.Index",
"numpy.empty",
"numpy.squeeze",
"numpy.linalg.svd",
"pandas.MultiIndex",
"pandas.eval",
"numpy.dot",
"numpy.full",
"numpy.outer",
"numpy.datetime_data"
]
] |
mimikaTU/pandas | [
"4fb963b6a3261940de5891323a8d217087a2a9a1"
] | [
"pandas/util/testing.py"
] | [
"from __future__ import division\n# pylint: disable-msg=W0402\n\nimport re\nimport string\nimport sys\nimport tempfile\nimport warnings\nimport inspect\nimport os\nimport subprocess\nimport locale\nimport traceback\n\nfrom datetime import datetime\nfrom functools import wraps\nfrom contextlib import contextmanager\n\nfrom numpy.random import randn, rand\nimport numpy as np\n\nimport pandas as pd\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.dtypes.missing import array_equivalent\nfrom pandas.core.dtypes.common import (\n is_datetimelike_v_numeric,\n is_datetimelike_v_object,\n is_number, is_bool,\n needs_i8_conversion,\n is_categorical_dtype,\n is_interval_dtype,\n is_sequence,\n is_list_like)\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.core.algorithms import take_1d\nimport pandas.core.common as com\n\nimport pandas.compat as compat\nfrom pandas.compat import (\n filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,\n raise_with_traceback, httplib, StringIO, PY3)\n\nfrom pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,\n DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,\n Index, MultiIndex,\n Series, DataFrame, Panel)\n\nfrom pandas._libs import testing as _testing\nfrom pandas.io.common import urlopen\n\n\nN = 30\nK = 4\n_RAISE_NETWORK_ERROR_DEFAULT = False\n\n# set testing_mode\n_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)\n\n\ndef set_testing_mode():\n # set the testing mode filters\n testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')\n if 'deprecate' in testing_mode:\n warnings.simplefilter('always', _testing_mode_warnings)\n\n\ndef reset_testing_mode():\n # reset the testing mode filters\n testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')\n if 'deprecate' in testing_mode:\n warnings.simplefilter('ignore', _testing_mode_warnings)\n\n\nset_testing_mode()\n\n\ndef reset_display_options():\n \"\"\"\n Reset the display options for printing and representing objects.\n \"\"\"\n\n pd.reset_option('^display.', silent=True)\n\n\ndef round_trip_pickle(obj, path=None):\n \"\"\"\n Pickle an object and then read it again.\n\n Parameters\n ----------\n obj : pandas object\n The object to pickle and then re-read.\n path : str, default None\n The path where the pickled object is written and then read.\n\n Returns\n -------\n round_trip_pickled_object : pandas object\n The original object that was pickled and then re-read.\n \"\"\"\n\n if path is None:\n path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))\n with ensure_clean(path) as path:\n pd.to_pickle(obj, path)\n return pd.read_pickle(path)\n\n\ndef round_trip_pathlib(writer, reader, path=None):\n \"\"\"\n Write an object to file specified by a pathlib.Path and read it back\n\n Parameters\n ----------\n writer : callable bound to pandas object\n IO writing function (e.g. DataFrame.to_csv )\n reader : callable\n IO reading function (e.g. pd.read_csv )\n path : str, default None\n The path where the object is written and then read.\n\n Returns\n -------\n round_trip_object : pandas object\n The original object that was serialized and then re-read.\n \"\"\"\n\n import pytest\n Path = pytest.importorskip('pathlib').Path\n if path is None:\n path = '___pathlib___'\n with ensure_clean(path) as path:\n writer(Path(path))\n obj = reader(Path(path))\n return obj\n\n\ndef round_trip_localpath(writer, reader, path=None):\n \"\"\"\n Write an object to file specified by a py.path LocalPath and read it back\n\n Parameters\n ----------\n writer : callable bound to pandas object\n IO writing function (e.g. DataFrame.to_csv )\n reader : callable\n IO reading function (e.g. pd.read_csv )\n path : str, default None\n The path where the object is written and then read.\n\n Returns\n -------\n round_trip_object : pandas object\n The original object that was serialized and then re-read.\n \"\"\"\n import pytest\n LocalPath = pytest.importorskip('py.path').local\n if path is None:\n path = '___localpath___'\n with ensure_clean(path) as path:\n writer(LocalPath(path))\n obj = reader(LocalPath(path))\n return obj\n\n\n@contextmanager\ndef decompress_file(path, compression):\n \"\"\"\n Open a compressed file and return a file object\n\n Parameters\n ----------\n path : str\n The path where the file is read from\n\n compression : {'gzip', 'bz2', 'zip', 'xz', None}\n Name of the decompression to use\n\n Returns\n -------\n f : file object\n \"\"\"\n\n if compression is None:\n f = open(path, 'rb')\n elif compression == 'gzip':\n import gzip\n f = gzip.open(path, 'rb')\n elif compression == 'bz2':\n import bz2\n f = bz2.BZ2File(path, 'rb')\n elif compression == 'xz':\n lzma = compat.import_lzma()\n f = lzma.LZMAFile(path, 'rb')\n elif compression == 'zip':\n import zipfile\n zip_file = zipfile.ZipFile(path)\n zip_names = zip_file.namelist()\n if len(zip_names) == 1:\n f = zip_file.open(zip_names.pop())\n else:\n raise ValueError('ZIP file {} error. Only one file per ZIP.'\n .format(path))\n else:\n msg = 'Unrecognized compression type: {}'.format(compression)\n raise ValueError(msg)\n\n yield f\n f.close()\n\n\ndef assert_almost_equal(left, right, check_exact=False,\n check_dtype='equiv', check_less_precise=False,\n **kwargs):\n \"\"\"\n Check that the left and right objects are approximately equal.\n\n Parameters\n ----------\n left : object\n right : object\n check_exact : bool, default False\n Whether to compare number exactly.\n check_dtype: bool, default True\n check dtype if both a and b are the same type\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n \"\"\"\n if isinstance(left, pd.Index):\n return assert_index_equal(left, right, check_exact=check_exact,\n exact=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n elif isinstance(left, pd.Series):\n return assert_series_equal(left, right, check_exact=check_exact,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n elif isinstance(left, pd.DataFrame):\n return assert_frame_equal(left, right, check_exact=check_exact,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n else:\n # other sequences\n if check_dtype:\n if is_number(left) and is_number(right):\n # do not compare numeric classes, like np.float64 and float\n pass\n elif is_bool(left) and is_bool(right):\n # do not compare bool classes, like np.bool_ and bool\n pass\n else:\n if (isinstance(left, np.ndarray) or\n isinstance(right, np.ndarray)):\n obj = 'numpy array'\n else:\n obj = 'Input'\n assert_class_equal(left, right, obj=obj)\n return _testing.assert_almost_equal(\n left, right,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n\ndef _check_isinstance(left, right, cls):\n \"\"\"\n Helper method for our assert_* methods that ensures that\n the two objects being compared have the right type before\n proceeding with the comparison.\n\n Parameters\n ----------\n left : The first object being compared.\n right : The second object being compared.\n cls : The class type to check against.\n\n Raises\n ------\n AssertionError : Either `left` or `right` is not an instance of `cls`.\n \"\"\"\n\n err_msg = \"{name} Expected type {exp_type}, found {act_type} instead\"\n cls_name = cls.__name__\n\n if not isinstance(left, cls):\n raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,\n act_type=type(left)))\n if not isinstance(right, cls):\n raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,\n act_type=type(right)))\n\n\ndef assert_dict_equal(left, right, compare_keys=True):\n\n _check_isinstance(left, right, dict)\n return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)\n\n\ndef randbool(size=(), p=0.5):\n return rand(*size) <= p\n\n\nRANDS_CHARS = np.array(list(string.ascii_letters + string.digits),\n dtype=(np.str_, 1))\nRANDU_CHARS = np.array(list(u(\"\").join(map(unichr, lrange(1488, 1488 + 26))) +\n string.digits), dtype=(np.unicode_, 1))\n\n\ndef rands_array(nchars, size, dtype='O'):\n \"\"\"Generate an array of byte strings.\"\"\"\n retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))\n .view((np.str_, nchars)).reshape(size))\n if dtype is None:\n return retval\n else:\n return retval.astype(dtype)\n\n\ndef randu_array(nchars, size, dtype='O'):\n \"\"\"Generate an array of unicode strings.\"\"\"\n retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))\n .view((np.unicode_, nchars)).reshape(size))\n if dtype is None:\n return retval\n else:\n return retval.astype(dtype)\n\n\ndef rands(nchars):\n \"\"\"\n Generate one random byte string.\n\n See `rands_array` if you want to create an array of random strings.\n\n \"\"\"\n return ''.join(np.random.choice(RANDS_CHARS, nchars))\n\n\ndef randu(nchars):\n \"\"\"\n Generate one random unicode string.\n\n See `randu_array` if you want to create an array of random unicode strings.\n\n \"\"\"\n return ''.join(np.random.choice(RANDU_CHARS, nchars))\n\n\ndef close(fignum=None):\n from matplotlib.pyplot import get_fignums, close as _close\n\n if fignum is None:\n for fignum in get_fignums():\n _close(fignum)\n else:\n _close(fignum)\n\n\n# -----------------------------------------------------------------------------\n# locale utilities\n\n\ndef check_output(*popenargs, **kwargs):\n # shamelessly taken from Python 2.7 source\n r\"\"\"Run command with arguments and return its output as a byte string.\n\n If the exit code was non-zero it raises a CalledProcessError. The\n CalledProcessError object will have the return code in the returncode\n attribute and output in the output attribute.\n\n The arguments are the same as for the Popen constructor. Example:\n\n >>> check_output([\"ls\", \"-l\", \"/dev/null\"])\n 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\\n'\n\n The stdout argument is not allowed as it is used internally.\n To capture standard error in the result, use stderr=STDOUT.\n\n >>> check_output([\"/bin/sh\", \"-c\",\n ... \"ls -l non_existent_file ; exit 0\"],\n ... stderr=STDOUT)\n 'ls: non_existent_file: No such file or directory\\n'\n \"\"\"\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd, output=output)\n return output\n\n\ndef _default_locale_getter():\n try:\n raw_locales = check_output(['locale -a'], shell=True)\n except subprocess.CalledProcessError as e:\n raise type(e)(\"{exception}, the 'locale -a' command cannot be found \"\n \"on your system\".format(exception=e))\n return raw_locales\n\n\ndef get_locales(prefix=None, normalize=True,\n locale_getter=_default_locale_getter):\n \"\"\"Get all the locales that are available on the system.\n\n Parameters\n ----------\n prefix : str\n If not ``None`` then return only those locales with the prefix\n provided. For example to get all English language locales (those that\n start with ``\"en\"``), pass ``prefix=\"en\"``.\n normalize : bool\n Call ``locale.normalize`` on the resulting list of available locales.\n If ``True``, only locales that can be set without throwing an\n ``Exception`` are returned.\n locale_getter : callable\n The function to use to retrieve the current locales. This should return\n a string with each locale separated by a newline character.\n\n Returns\n -------\n locales : list of strings\n A list of locale strings that can be set with ``locale.setlocale()``.\n For example::\n\n locale.setlocale(locale.LC_ALL, locale_string)\n\n On error will return None (no locale available, e.g. Windows)\n\n \"\"\"\n try:\n raw_locales = locale_getter()\n except Exception:\n return None\n\n try:\n # raw_locales is \"\\n\" separated list of locales\n # it may contain non-decodable parts, so split\n # extract what we can and then rejoin.\n raw_locales = raw_locales.split(b'\\n')\n out_locales = []\n for x in raw_locales:\n if PY3:\n out_locales.append(str(\n x, encoding=pd.options.display.encoding))\n else:\n out_locales.append(str(x))\n\n except TypeError:\n pass\n\n if prefix is None:\n return _valid_locales(out_locales, normalize)\n\n found = re.compile('{prefix}.*'.format(prefix=prefix)) \\\n .findall('\\n'.join(out_locales))\n return _valid_locales(found, normalize)\n\n\n@contextmanager\ndef set_locale(new_locale, lc_var=locale.LC_ALL):\n \"\"\"Context manager for temporarily setting a locale.\n\n Parameters\n ----------\n new_locale : str or tuple\n A string of the form <language_country>.<encoding>. For example to set\n the current locale to US English with a UTF8 encoding, you would pass\n \"en_US.UTF-8\".\n\n Notes\n -----\n This is useful when you want to run a particular block of code under a\n particular locale, without globally setting the locale. This probably isn't\n thread-safe.\n \"\"\"\n current_locale = locale.getlocale()\n\n try:\n locale.setlocale(lc_var, new_locale)\n\n try:\n normalized_locale = locale.getlocale()\n except ValueError:\n yield new_locale\n else:\n if com._all_not_none(*normalized_locale):\n yield '.'.join(normalized_locale)\n else:\n yield new_locale\n finally:\n locale.setlocale(lc_var, current_locale)\n\n\ndef _can_set_locale(lc):\n \"\"\"Check to see if we can set a locale without throwing an exception.\n\n Parameters\n ----------\n lc : str\n The locale to attempt to set.\n\n Returns\n -------\n isvalid : bool\n Whether the passed locale can be set\n \"\"\"\n try:\n with set_locale(lc):\n pass\n except locale.Error: # horrible name for a Exception subclass\n return False\n else:\n return True\n\n\ndef _valid_locales(locales, normalize):\n \"\"\"Return a list of normalized locales that do not throw an ``Exception``\n when set.\n\n Parameters\n ----------\n locales : str\n A string where each locale is separated by a newline.\n normalize : bool\n Whether to call ``locale.normalize`` on each locale.\n\n Returns\n -------\n valid_locales : list\n A list of valid locales.\n \"\"\"\n if normalize:\n normalizer = lambda x: locale.normalize(x.strip())\n else:\n normalizer = lambda x: x.strip()\n\n return list(filter(_can_set_locale, map(normalizer, locales)))\n\n# -----------------------------------------------------------------------------\n# Stdout / stderr decorators\n\n\ndef capture_stdout(f):\n \"\"\"\n Decorator to capture stdout in a buffer so that it can be checked\n (or suppressed) during testing.\n\n Parameters\n ----------\n f : callable\n The test that is capturing stdout.\n\n Returns\n -------\n f : callable\n The decorated test ``f``, which captures stdout.\n\n Examples\n --------\n\n >>> from pandas.util.testing import capture_stdout\n >>>\n >>> import sys\n >>>\n >>> @capture_stdout\n ... def test_print_pass():\n ... print(\"foo\")\n ... out = sys.stdout.getvalue()\n ... assert out == \"foo\\n\"\n >>>\n >>> @capture_stdout\n ... def test_print_fail():\n ... print(\"foo\")\n ... out = sys.stdout.getvalue()\n ... assert out == \"bar\\n\"\n ...\n AssertionError: assert 'foo\\n' == 'bar\\n'\n \"\"\"\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n sys.stdout = StringIO()\n f(*args, **kwargs)\n finally:\n sys.stdout = sys.__stdout__\n\n return wrapper\n\n\ndef capture_stderr(f):\n \"\"\"\n Decorator to capture stderr in a buffer so that it can be checked\n (or suppressed) during testing.\n\n Parameters\n ----------\n f : callable\n The test that is capturing stderr.\n\n Returns\n -------\n f : callable\n The decorated test ``f``, which captures stderr.\n\n Examples\n --------\n\n >>> from pandas.util.testing import capture_stderr\n >>>\n >>> import sys\n >>>\n >>> @capture_stderr\n ... def test_stderr_pass():\n ... sys.stderr.write(\"foo\")\n ... out = sys.stderr.getvalue()\n ... assert out == \"foo\\n\"\n >>>\n >>> @capture_stderr\n ... def test_stderr_fail():\n ... sys.stderr.write(\"foo\")\n ... out = sys.stderr.getvalue()\n ... assert out == \"bar\\n\"\n ...\n AssertionError: assert 'foo\\n' == 'bar\\n'\n \"\"\"\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n sys.stderr = StringIO()\n f(*args, **kwargs)\n finally:\n sys.stderr = sys.__stderr__\n\n return wrapper\n\n# -----------------------------------------------------------------------------\n# Console debugging tools\n\n\ndef debug(f, *args, **kwargs):\n from pdb import Pdb as OldPdb\n try:\n from IPython.core.debugger import Pdb\n kw = dict(color_scheme='Linux')\n except ImportError:\n Pdb = OldPdb\n kw = {}\n pdb = Pdb(**kw)\n return pdb.runcall(f, *args, **kwargs)\n\n\ndef pudebug(f, *args, **kwargs):\n import pudb\n return pudb.runcall(f, *args, **kwargs)\n\n\ndef set_trace():\n from IPython.core.debugger import Pdb\n try:\n Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)\n except Exception:\n from pdb import Pdb as OldPdb\n OldPdb().set_trace(sys._getframe().f_back)\n\n# -----------------------------------------------------------------------------\n# contextmanager to ensure the file cleanup\n\n\n@contextmanager\ndef ensure_clean(filename=None, return_filelike=False):\n \"\"\"Gets a temporary path and agrees to remove on close.\n\n Parameters\n ----------\n filename : str (optional)\n if None, creates a temporary file which is then removed when out of\n scope. if passed, creates temporary file with filename as ending.\n return_filelike : bool (default False)\n if True, returns a file-like which is *always* cleaned. Necessary for\n savefig and other functions which want to append extensions.\n \"\"\"\n filename = filename or ''\n fd = None\n\n if return_filelike:\n f = tempfile.TemporaryFile(suffix=filename)\n try:\n yield f\n finally:\n f.close()\n else:\n # don't generate tempfile if using a path with directory specified\n if len(os.path.dirname(filename)):\n raise ValueError(\"Can't pass a qualified name to ensure_clean()\")\n\n try:\n fd, filename = tempfile.mkstemp(suffix=filename)\n except UnicodeEncodeError:\n import pytest\n pytest.skip('no unicode file names on this system')\n\n try:\n yield filename\n finally:\n try:\n os.close(fd)\n except Exception as e:\n print(\"Couldn't close file descriptor: {fdesc} (file: {fname})\"\n .format(fdesc=fd, fname=filename))\n try:\n if os.path.exists(filename):\n os.remove(filename)\n except Exception as e:\n print(\"Exception on removing file: {error}\".format(error=e))\n\n\ndef get_data_path(f=''):\n \"\"\"Return the path of a data file, these are relative to the current test\n directory.\n \"\"\"\n # get our callers file\n _, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]\n base_dir = os.path.abspath(os.path.dirname(filename))\n return os.path.join(base_dir, 'data', f)\n\n# -----------------------------------------------------------------------------\n# Comparators\n\n\ndef equalContents(arr1, arr2):\n \"\"\"Checks if the set of unique elements of arr1 and arr2 are equivalent.\n \"\"\"\n return frozenset(arr1) == frozenset(arr2)\n\n\ndef assert_index_equal(left, right, exact='equiv', check_names=True,\n check_less_precise=False, check_exact=True,\n check_categorical=True, obj='Index'):\n \"\"\"Check that left and right Index are equal.\n\n Parameters\n ----------\n left : Index\n right : Index\n exact : bool / string {'equiv'}, default False\n Whether to check the Index class, dtype and inferred_type\n are identical. If 'equiv', then RangeIndex can be substituted for\n Int64Index as well.\n check_names : bool, default True\n Whether to check the names attribute.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_exact : bool, default True\n Whether to compare number exactly.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n obj : str, default 'Index'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n def _check_types(l, r, obj='Index'):\n if exact:\n assert_class_equal(left, right, exact=exact, obj=obj)\n assert_attr_equal('dtype', l, r, obj=obj)\n # allow string-like to have different inferred_types\n if l.inferred_type in ('string', 'unicode'):\n assert r.inferred_type in ('string', 'unicode')\n else:\n assert_attr_equal('inferred_type', l, r, obj=obj)\n\n def _get_ilevel_values(index, level):\n # accept level number only\n unique = index.levels[level]\n labels = index.labels[level]\n filled = take_1d(unique.values, labels, fill_value=unique._na_value)\n values = unique._shallow_copy(filled, name=index.names[level])\n return values\n\n # instance validation\n _check_isinstance(left, right, Index)\n\n # class / dtype comparison\n _check_types(left, right, obj=obj)\n\n # level comparison\n if left.nlevels != right.nlevels:\n msg1 = '{obj} levels are different'.format(obj=obj)\n msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)\n msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)\n raise_assert_detail(obj, msg1, msg2, msg3)\n\n # length comparison\n if len(left) != len(right):\n msg1 = '{obj} length are different'.format(obj=obj)\n msg2 = '{length}, {left}'.format(length=len(left), left=left)\n msg3 = '{length}, {right}'.format(length=len(right), right=right)\n raise_assert_detail(obj, msg1, msg2, msg3)\n\n # MultiIndex special comparison for little-friendly error messages\n if left.nlevels > 1:\n for level in range(left.nlevels):\n # cannot use get_level_values here because it can change dtype\n llevel = _get_ilevel_values(left, level)\n rlevel = _get_ilevel_values(right, level)\n\n lobj = 'MultiIndex level [{level}]'.format(level=level)\n assert_index_equal(llevel, rlevel,\n exact=exact, check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact, obj=lobj)\n # get_level_values may change dtype\n _check_types(left.levels[level], right.levels[level], obj=obj)\n\n if check_exact:\n if not left.equals(right):\n diff = np.sum((left.values != right.values)\n .astype(int)) * 100.0 / len(left)\n msg = '{obj} values are different ({pct} %)'.format(\n obj=obj, pct=np.round(diff, 5))\n raise_assert_detail(obj, msg, left, right)\n else:\n _testing.assert_almost_equal(left.values, right.values,\n check_less_precise=check_less_precise,\n check_dtype=exact,\n obj=obj, lobj=left, robj=right)\n\n # metadata comparison\n if check_names:\n assert_attr_equal('names', left, right, obj=obj)\n if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):\n assert_attr_equal('freq', left, right, obj=obj)\n if (isinstance(left, pd.IntervalIndex) or\n isinstance(right, pd.IntervalIndex)):\n assert_attr_equal('closed', left, right, obj=obj)\n\n if check_categorical:\n if is_categorical_dtype(left) or is_categorical_dtype(right):\n assert_categorical_equal(left.values, right.values,\n obj='{obj} category'.format(obj=obj))\n\n\ndef assert_class_equal(left, right, exact=True, obj='Input'):\n \"\"\"checks classes are equal.\"\"\"\n\n def repr_class(x):\n if isinstance(x, Index):\n # return Index as it is to include values in the error message\n return x\n\n try:\n return x.__class__.__name__\n except AttributeError:\n return repr(type(x))\n\n if exact == 'equiv':\n if type(left) != type(right):\n # allow equivalence of Int64Index/RangeIndex\n types = set([type(left).__name__, type(right).__name__])\n if len(types - set(['Int64Index', 'RangeIndex'])):\n msg = '{obj} classes are not equivalent'.format(obj=obj)\n raise_assert_detail(obj, msg, repr_class(left),\n repr_class(right))\n elif exact:\n if type(left) != type(right):\n msg = '{obj} classes are different'.format(obj=obj)\n raise_assert_detail(obj, msg, repr_class(left),\n repr_class(right))\n\n\ndef assert_attr_equal(attr, left, right, obj='Attributes'):\n \"\"\"checks attributes are equal. Both objects must have attribute.\n\n Parameters\n ----------\n attr : str\n Attribute name being compared.\n left : object\n right : object\n obj : str, default 'Attributes'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n left_attr = getattr(left, attr)\n right_attr = getattr(right, attr)\n\n if left_attr is right_attr:\n return True\n elif (is_number(left_attr) and np.isnan(left_attr) and\n is_number(right_attr) and np.isnan(right_attr)):\n # np.nan\n return True\n\n try:\n result = left_attr == right_attr\n except TypeError:\n # datetimetz on rhs may raise TypeError\n result = False\n if not isinstance(result, bool):\n result = result.all()\n\n if result:\n return True\n else:\n msg = 'Attribute \"{attr}\" are different'.format(attr=attr)\n raise_assert_detail(obj, msg, left_attr, right_attr)\n\n\ndef assert_is_valid_plot_return_object(objs):\n import matplotlib.pyplot as plt\n if isinstance(objs, (pd.Series, np.ndarray)):\n for el in objs.ravel():\n msg = ('one of \\'objs\\' is not a matplotlib Axes instance, type '\n 'encountered {name!r}').format(name=el.__class__.__name__)\n assert isinstance(el, (plt.Axes, dict)), msg\n else:\n assert isinstance(objs, (plt.Artist, tuple, dict)), \\\n ('objs is neither an ndarray of Artist instances nor a '\n 'single Artist instance, tuple, or dict, \"objs\" is a {name!r}'\n ).format(name=objs.__class__.__name__)\n\n\ndef isiterable(obj):\n return hasattr(obj, '__iter__')\n\n\ndef is_sorted(seq):\n if isinstance(seq, (Index, Series)):\n seq = seq.values\n # sorting does not change precisions\n return assert_numpy_array_equal(seq, np.sort(np.array(seq)))\n\n\ndef assert_categorical_equal(left, right, check_dtype=True,\n obj='Categorical', check_category_order=True):\n \"\"\"Test that Categoricals are equivalent.\n\n Parameters\n ----------\n left, right : Categorical\n Categoricals to compare\n check_dtype : bool, default True\n Check that integer dtype of the codes are the same\n obj : str, default 'Categorical'\n Specify object name being compared, internally used to show appropriate\n assertion message\n check_category_order : bool, default True\n Whether the order of the categories should be compared, which\n implies identical integer codes. If False, only the resulting\n values are compared. The ordered attribute is\n checked regardless.\n \"\"\"\n _check_isinstance(left, right, Categorical)\n\n if check_category_order:\n assert_index_equal(left.categories, right.categories,\n obj='{obj}.categories'.format(obj=obj))\n assert_numpy_array_equal(left.codes, right.codes,\n check_dtype=check_dtype,\n obj='{obj}.codes'.format(obj=obj))\n else:\n assert_index_equal(left.categories.sort_values(),\n right.categories.sort_values(),\n obj='{obj}.categories'.format(obj=obj))\n assert_index_equal(left.categories.take(left.codes),\n right.categories.take(right.codes),\n obj='{obj}.values'.format(obj=obj))\n\n assert_attr_equal('ordered', left, right, obj=obj)\n\n\ndef raise_assert_detail(obj, message, left, right, diff=None):\n if isinstance(left, np.ndarray):\n left = pprint_thing(left)\n elif is_categorical_dtype(left):\n left = repr(left)\n if isinstance(right, np.ndarray):\n right = pprint_thing(right)\n elif is_categorical_dtype(right):\n right = repr(right)\n\n msg = \"\"\"{obj} are different\n\n{message}\n[left]: {left}\n[right]: {right}\"\"\".format(obj=obj, message=message, left=left, right=right)\n\n if diff is not None:\n msg += \"\\n[diff]: {diff}\".format(diff=diff)\n\n raise AssertionError(msg)\n\n\ndef assert_numpy_array_equal(left, right, strict_nan=False,\n check_dtype=True, err_msg=None,\n obj='numpy array', check_same=None):\n \"\"\" Checks that 'np.ndarray' is equivalent\n\n Parameters\n ----------\n left : np.ndarray or iterable\n right : np.ndarray or iterable\n strict_nan : bool, default False\n If True, consider NaN and None to be different.\n check_dtype: bool, default True\n check dtype if both a and b are np.ndarray\n err_msg : str, default None\n If provided, used as assertion message\n obj : str, default 'numpy array'\n Specify object name being compared, internally used to show appropriate\n assertion message\n check_same : None|'copy'|'same', default None\n Ensure left and right refer/do not refer to the same memory area\n \"\"\"\n\n # instance validation\n # Show a detailed error message when classes are different\n assert_class_equal(left, right, obj=obj)\n # both classes must be an np.ndarray\n _check_isinstance(left, right, np.ndarray)\n\n def _get_base(obj):\n return obj.base if getattr(obj, 'base', None) is not None else obj\n\n left_base = _get_base(left)\n right_base = _get_base(right)\n\n if check_same == 'same':\n if left_base is not right_base:\n msg = \"{left!r} is not {right!r}\".format(\n left=left_base, right=right_base)\n raise AssertionError(msg)\n elif check_same == 'copy':\n if left_base is right_base:\n msg = \"{left!r} is {right!r}\".format(\n left=left_base, right=right_base)\n raise AssertionError(msg)\n\n def _raise(left, right, err_msg):\n if err_msg is None:\n if left.shape != right.shape:\n raise_assert_detail(obj, '{obj} shapes are different'\n .format(obj=obj), left.shape, right.shape)\n\n diff = 0\n for l, r in zip(left, right):\n # count up differences\n if not array_equivalent(l, r, strict_nan=strict_nan):\n diff += 1\n\n diff = diff * 100.0 / left.size\n msg = '{obj} values are different ({pct} %)'.format(\n obj=obj, pct=np.round(diff, 5))\n raise_assert_detail(obj, msg, left, right)\n\n raise AssertionError(err_msg)\n\n # compare shape and values\n if not array_equivalent(left, right, strict_nan=strict_nan):\n _raise(left, right, err_msg)\n\n if check_dtype:\n if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):\n assert_attr_equal('dtype', left, right, obj=obj)\n\n return True\n\n\ndef assert_extension_array_equal(left, right):\n \"\"\"Check that left and right ExtensionArrays are equal.\n\n Parameters\n ----------\n left, right : ExtensionArray\n The two arrays to compare\n\n Notes\n -----\n Missing values are checked separately from valid values.\n A mask of missing values is computed for each and checked to match.\n The remaining all-valid values are cast to object dtype and checked.\n \"\"\"\n assert isinstance(left, ExtensionArray)\n assert left.dtype == right.dtype\n left_na = left.isna()\n right_na = right.isna()\n assert_numpy_array_equal(left_na, right_na)\n\n left_valid = left[~left_na].astype(object)\n right_valid = right[~right_na].astype(object)\n\n assert_numpy_array_equal(left_valid, right_valid)\n\n\n# This could be refactored to use the NDFrame.equals method\ndef assert_series_equal(left, right, check_dtype=True,\n check_index_type='equiv',\n check_series_type=True,\n check_less_precise=False,\n check_names=True,\n check_exact=False,\n check_datetimelike_compat=False,\n check_categorical=True,\n obj='Series'):\n \"\"\"Check that left and right Series are equal.\n\n Parameters\n ----------\n left : Series\n right : Series\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n check_index_type : bool / string {'equiv'}, default 'equiv'\n Whether to check the Index class, dtype and inferred_type\n are identical.\n check_series_type : bool, default True\n Whether to check the Series class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_exact : bool, default False\n Whether to compare number exactly.\n check_names : bool, default True\n Whether to check the Series and Index names attribute.\n check_datetimelike_compat : bool, default False\n Compare datetime-like which is comparable ignoring dtype.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n obj : str, default 'Series'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n # instance validation\n _check_isinstance(left, right, Series)\n\n if check_series_type:\n # ToDo: There are some tests using rhs is sparse\n # lhs is dense. Should use assert_class_equal in future\n assert isinstance(left, type(right))\n # assert_class_equal(left, right, obj=obj)\n\n # length comparison\n if len(left) != len(right):\n msg1 = '{len}, {left}'.format(len=len(left), left=left.index)\n msg2 = '{len}, {right}'.format(len=len(right), right=right.index)\n raise_assert_detail(obj, 'Series length are different', msg1, msg2)\n\n # index comparison\n assert_index_equal(left.index, right.index, exact=check_index_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj='{obj}.index'.format(obj=obj))\n\n if check_dtype:\n # We want to skip exact dtype checking when `check_categorical`\n # is False. We'll still raise if only one is a `Categorical`,\n # regardless of `check_categorical`\n if (is_categorical_dtype(left) and is_categorical_dtype(right) and\n not check_categorical):\n pass\n else:\n assert_attr_equal('dtype', left, right)\n\n if check_exact:\n assert_numpy_array_equal(left.get_values(), right.get_values(),\n check_dtype=check_dtype,\n obj='{obj}'.format(obj=obj),)\n elif check_datetimelike_compat:\n # we want to check only if we have compat dtypes\n # e.g. integer and M|m are NOT compat, but we can simply check\n # the values in that case\n if (is_datetimelike_v_numeric(left, right) or\n is_datetimelike_v_object(left, right) or\n needs_i8_conversion(left) or\n needs_i8_conversion(right)):\n\n # datetimelike may have different objects (e.g. datetime.datetime\n # vs Timestamp) but will compare equal\n if not Index(left.values).equals(Index(right.values)):\n msg = ('[datetimelike_compat=True] {left} is not equal to '\n '{right}.').format(left=left.values, right=right.values)\n raise AssertionError(msg)\n else:\n assert_numpy_array_equal(left.get_values(), right.get_values(),\n check_dtype=check_dtype)\n elif is_interval_dtype(left) or is_interval_dtype(right):\n # TODO: big hack here\n left = pd.IntervalIndex(left)\n right = pd.IntervalIndex(right)\n assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj))\n\n else:\n _testing.assert_almost_equal(left.get_values(), right.get_values(),\n check_less_precise=check_less_precise,\n check_dtype=check_dtype,\n obj='{obj}'.format(obj=obj))\n\n # metadata comparison\n if check_names:\n assert_attr_equal('name', left, right, obj=obj)\n\n if check_categorical:\n if is_categorical_dtype(left) or is_categorical_dtype(right):\n assert_categorical_equal(left.values, right.values,\n obj='{obj} category'.format(obj=obj))\n\n\n# This could be refactored to use the NDFrame.equals method\ndef assert_frame_equal(left, right, check_dtype=True,\n check_index_type='equiv',\n check_column_type='equiv',\n check_frame_type=True,\n check_less_precise=False,\n check_names=True,\n by_blocks=False,\n check_exact=False,\n check_datetimelike_compat=False,\n check_categorical=True,\n check_like=False,\n obj='DataFrame'):\n \"\"\"Check that left and right DataFrame are equal.\n\n Parameters\n ----------\n left : DataFrame\n right : DataFrame\n check_dtype : bool, default True\n Whether to check the DataFrame dtype is identical.\n check_index_type : bool / string {'equiv'}, default False\n Whether to check the Index class, dtype and inferred_type\n are identical.\n check_column_type : bool / string {'equiv'}, default False\n Whether to check the columns class, dtype and inferred_type\n are identical.\n check_frame_type : bool, default False\n Whether to check the DataFrame class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_names : bool, default True\n Whether to check the Index names attribute.\n by_blocks : bool, default False\n Specify how to compare internal data. If False, compare by columns.\n If True, compare by blocks.\n check_exact : bool, default False\n Whether to compare number exactly.\n check_datetimelike_compat : bool, default False\n Compare datetime-like which is comparable ignoring dtype.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n check_like : bool, default False\n If true, ignore the order of rows & columns\n obj : str, default 'DataFrame'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n # instance validation\n _check_isinstance(left, right, DataFrame)\n\n if check_frame_type:\n # ToDo: There are some tests using rhs is SparseDataFrame\n # lhs is DataFrame. Should use assert_class_equal in future\n assert isinstance(left, type(right))\n # assert_class_equal(left, right, obj=obj)\n\n # shape comparison\n if left.shape != right.shape:\n raise_assert_detail(obj,\n 'DataFrame shape mismatch',\n '{shape!r}'.format(shape=left.shape),\n '{shape!r}'.format(shape=right.shape))\n\n if check_like:\n left, right = left.reindex_like(right), right\n\n # index comparison\n assert_index_equal(left.index, right.index, exact=check_index_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj='{obj}.index'.format(obj=obj))\n\n # column comparison\n assert_index_equal(left.columns, right.columns, exact=check_column_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj='{obj}.columns'.format(obj=obj))\n\n # compare by blocks\n if by_blocks:\n rblocks = right._to_dict_of_blocks()\n lblocks = left._to_dict_of_blocks()\n for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):\n assert dtype in lblocks\n assert dtype in rblocks\n assert_frame_equal(lblocks[dtype], rblocks[dtype],\n check_dtype=check_dtype, obj='DataFrame.blocks')\n\n # compare by columns\n else:\n for i, col in enumerate(left.columns):\n assert col in right\n lcol = left.iloc[:, i]\n rcol = right.iloc[:, i]\n assert_series_equal(\n lcol, rcol, check_dtype=check_dtype,\n check_index_type=check_index_type,\n check_less_precise=check_less_precise,\n check_exact=check_exact, check_names=check_names,\n check_datetimelike_compat=check_datetimelike_compat,\n check_categorical=check_categorical,\n obj='DataFrame.iloc[:, {idx}]'.format(idx=i))\n\n\ndef assert_panel_equal(left, right,\n check_dtype=True,\n check_panel_type=False,\n check_less_precise=False,\n check_names=False,\n by_blocks=False,\n obj='Panel'):\n \"\"\"Check that left and right Panels are equal.\n\n Parameters\n ----------\n left : Panel (or nd)\n right : Panel (or nd)\n check_dtype : bool, default True\n Whether to check the Panel dtype is identical.\n check_panel_type : bool, default False\n Whether to check the Panel class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_names : bool, default True\n Whether to check the Index names attribute.\n by_blocks : bool, default False\n Specify how to compare internal data. If False, compare by columns.\n If True, compare by blocks.\n obj : str, default 'Panel'\n Specify the object name being compared, internally used to show\n the appropriate assertion message.\n \"\"\"\n\n if check_panel_type:\n assert_class_equal(left, right, obj=obj)\n\n for axis in left._AXIS_ORDERS:\n left_ind = getattr(left, axis)\n right_ind = getattr(right, axis)\n assert_index_equal(left_ind, right_ind, check_names=check_names)\n\n if by_blocks:\n rblocks = right._to_dict_of_blocks()\n lblocks = left._to_dict_of_blocks()\n for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):\n assert dtype in lblocks\n assert dtype in rblocks\n array_equivalent(lblocks[dtype].values, rblocks[dtype].values)\n else:\n\n # can potentially be slow\n for i, item in enumerate(left._get_axis(0)):\n msg = \"non-matching item (right) '{item}'\".format(item=item)\n assert item in right, msg\n litem = left.iloc[i]\n ritem = right.iloc[i]\n assert_frame_equal(litem, ritem,\n check_less_precise=check_less_precise,\n check_names=check_names)\n\n for i, item in enumerate(right._get_axis(0)):\n msg = \"non-matching item (left) '{item}'\".format(item=item)\n assert item in left, msg\n\n\n# -----------------------------------------------------------------------------\n# Sparse\n\n\ndef assert_sp_array_equal(left, right, check_dtype=True):\n \"\"\"Check that the left and right SparseArray are equal.\n\n Parameters\n ----------\n left : SparseArray\n right : SparseArray\n check_dtype : bool, default True\n Whether to check the data dtype is identical.\n \"\"\"\n\n _check_isinstance(left, right, pd.SparseArray)\n\n assert_numpy_array_equal(left.sp_values, right.sp_values,\n check_dtype=check_dtype)\n\n # SparseIndex comparison\n assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)\n assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)\n\n if not left.sp_index.equals(right.sp_index):\n raise_assert_detail('SparseArray.index', 'index are not equal',\n left.sp_index, right.sp_index)\n\n assert_attr_equal('fill_value', left, right)\n if check_dtype:\n assert_attr_equal('dtype', left, right)\n assert_numpy_array_equal(left.values, right.values,\n check_dtype=check_dtype)\n\n\ndef assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,\n check_series_type=True, check_names=True,\n obj='SparseSeries'):\n \"\"\"Check that the left and right SparseSeries are equal.\n\n Parameters\n ----------\n left : SparseSeries\n right : SparseSeries\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n exact_indices : bool, default True\n check_series_type : bool, default True\n Whether to check the SparseSeries class is identical.\n check_names : bool, default True\n Whether to check the SparseSeries name attribute.\n obj : str, default 'SparseSeries'\n Specify the object name being compared, internally used to show\n the appropriate assertion message.\n \"\"\"\n _check_isinstance(left, right, pd.SparseSeries)\n\n if check_series_type:\n assert_class_equal(left, right, obj=obj)\n\n assert_index_equal(left.index, right.index,\n obj='{obj}.index'.format(obj=obj))\n\n assert_sp_array_equal(left.block.values, right.block.values)\n\n if check_names:\n assert_attr_equal('name', left, right)\n if check_dtype:\n assert_attr_equal('dtype', left, right)\n\n assert_numpy_array_equal(left.values, right.values)\n\n\ndef assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,\n check_frame_type=True, obj='SparseDataFrame'):\n \"\"\"Check that the left and right SparseDataFrame are equal.\n\n Parameters\n ----------\n left : SparseDataFrame\n right : SparseDataFrame\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n exact_indices : bool, default True\n SparseSeries SparseIndex objects must be exactly the same,\n otherwise just compare dense representations.\n check_frame_type : bool, default True\n Whether to check the SparseDataFrame class is identical.\n obj : str, default 'SparseDataFrame'\n Specify the object name being compared, internally used to show\n the appropriate assertion message.\n \"\"\"\n _check_isinstance(left, right, pd.SparseDataFrame)\n\n if check_frame_type:\n assert_class_equal(left, right, obj=obj)\n\n assert_index_equal(left.index, right.index,\n obj='{obj}.index'.format(obj=obj))\n assert_index_equal(left.columns, right.columns,\n obj='{obj}.columns'.format(obj=obj))\n\n for col, series in compat.iteritems(left):\n assert (col in right)\n # trade-off?\n\n if exact_indices:\n assert_sp_series_equal(series, right[col],\n check_dtype=check_dtype)\n else:\n assert_series_equal(series.to_dense(), right[col].to_dense(),\n check_dtype=check_dtype)\n\n assert_attr_equal('default_fill_value', left, right, obj=obj)\n\n # do I care?\n # assert(left.default_kind == right.default_kind)\n\n for col in right:\n assert (col in left)\n\n# -----------------------------------------------------------------------------\n# Others\n\n\ndef assert_contains_all(iterable, dic):\n for k in iterable:\n assert k in dic, \"Did not contain item: '{key!r}'\".format(key=k)\n\n\ndef assert_copy(iter1, iter2, **eql_kwargs):\n \"\"\"\n iter1, iter2: iterables that produce elements\n comparable with assert_almost_equal\n\n Checks that the elements are equal, but not\n the same object. (Does not check that items\n in sequences are also not the same object)\n \"\"\"\n for elem1, elem2 in zip(iter1, iter2):\n assert_almost_equal(elem1, elem2, **eql_kwargs)\n msg = (\"Expected object {obj1!r} and object {obj2!r} to be \"\n \"different objects, but they were the same object.\"\n ).format(obj1=type(elem1), obj2=type(elem2))\n assert elem1 is not elem2, msg\n\n\ndef getCols(k):\n return string.ascii_uppercase[:k]\n\n\ndef getArangeMat():\n return np.arange(N * K).reshape((N, K))\n\n\n# make index\ndef makeStringIndex(k=10, name=None):\n return Index(rands_array(nchars=10, size=k), name=name)\n\n\ndef makeUnicodeIndex(k=10, name=None):\n return Index(randu_array(nchars=10, size=k), name=name)\n\n\ndef makeCategoricalIndex(k=10, n=3, name=None, **kwargs):\n \"\"\" make a length k index or n categories \"\"\"\n x = rands_array(nchars=4, size=n)\n return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)\n\n\ndef makeIntervalIndex(k=10, name=None, **kwargs):\n \"\"\" make a length k IntervalIndex \"\"\"\n x = np.linspace(0, 100, num=(k + 1))\n return IntervalIndex.from_breaks(x, name=name, **kwargs)\n\n\ndef makeBoolIndex(k=10, name=None):\n if k == 1:\n return Index([True], name=name)\n elif k == 2:\n return Index([False, True], name=name)\n return Index([False, True] + [False] * (k - 2), name=name)\n\n\ndef makeIntIndex(k=10, name=None):\n return Index(lrange(k), name=name)\n\n\ndef makeUIntIndex(k=10, name=None):\n return Index([2**63 + i for i in lrange(k)], name=name)\n\n\ndef makeRangeIndex(k=10, name=None, **kwargs):\n return RangeIndex(0, k, 1, name=name, **kwargs)\n\n\ndef makeFloatIndex(k=10, name=None):\n values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)\n return Index(values * (10 ** np.random.randint(0, 9)), name=name)\n\n\ndef makeDateIndex(k=10, freq='B', name=None, **kwargs):\n dt = datetime(2000, 1, 1)\n dr = bdate_range(dt, periods=k, freq=freq, name=name)\n return DatetimeIndex(dr, name=name, **kwargs)\n\n\ndef makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):\n return TimedeltaIndex(start='1 day', periods=k, freq=freq,\n name=name, **kwargs)\n\n\ndef makePeriodIndex(k=10, name=None, **kwargs):\n dt = datetime(2000, 1, 1)\n dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)\n return dr\n\n\ndef makeMultiIndex(k=10, names=None, **kwargs):\n return MultiIndex.from_product(\n (('foo', 'bar'), (1, 2)), names=names, **kwargs)\n\n\ndef all_index_generator(k=10):\n \"\"\"Generator which can be iterated over to get instances of all the various\n index classes.\n\n Parameters\n ----------\n k: length of each of the index instances\n \"\"\"\n all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,\n makeUnicodeIndex, makeDateIndex, makePeriodIndex,\n makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,\n makeIntervalIndex,\n makeCategoricalIndex]\n for make_index_func in all_make_index_funcs:\n yield make_index_func(k=k)\n\n\ndef index_subclass_makers_generator():\n make_index_funcs = [\n makeDateIndex, makePeriodIndex,\n makeTimedeltaIndex, makeRangeIndex,\n makeIntervalIndex, makeCategoricalIndex,\n makeMultiIndex\n ]\n for make_index_func in make_index_funcs:\n yield make_index_func\n\n\ndef all_timeseries_index_generator(k=10):\n \"\"\"Generator which can be iterated over to get instances of all the classes\n which represent time-seires.\n\n Parameters\n ----------\n k: length of each of the index instances\n \"\"\"\n make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]\n for make_index_func in make_index_funcs:\n yield make_index_func(k=k)\n\n\n# make series\ndef makeFloatSeries(name=None):\n index = makeStringIndex(N)\n return Series(randn(N), index=index, name=name)\n\n\ndef makeStringSeries(name=None):\n index = makeStringIndex(N)\n return Series(randn(N), index=index, name=name)\n\n\ndef makeObjectSeries(name=None):\n dateIndex = makeDateIndex(N)\n dateIndex = Index(dateIndex, dtype=object)\n index = makeStringIndex(N)\n return Series(dateIndex, index=index, name=name)\n\n\ndef getSeriesData():\n index = makeStringIndex(N)\n return {c: Series(randn(N), index=index) for c in getCols(K)}\n\n\ndef makeTimeSeries(nper=None, freq='B', name=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)\n\n\ndef makePeriodSeries(nper=None, name=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makePeriodIndex(nper), name=name)\n\n\ndef getTimeSeriesData(nper=None, freq='B'):\n return {c: makeTimeSeries(nper, freq) for c in getCols(K)}\n\n\ndef getPeriodData(nper=None):\n return {c: makePeriodSeries(nper) for c in getCols(K)}\n\n\n# make frame\ndef makeTimeDataFrame(nper=None, freq='B'):\n data = getTimeSeriesData(nper, freq)\n return DataFrame(data)\n\n\ndef makeDataFrame():\n data = getSeriesData()\n return DataFrame(data)\n\n\ndef getMixedTypeDict():\n index = Index(['a', 'b', 'c', 'd', 'e'])\n\n data = {\n 'A': [0., 1., 2., 3., 4.],\n 'B': [0., 1., 0., 1., 0.],\n 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],\n 'D': bdate_range('1/1/2009', periods=5)\n }\n\n return index, data\n\n\ndef makeMixedDataFrame():\n return DataFrame(getMixedTypeDict()[1])\n\n\ndef makePeriodFrame(nper=None):\n data = getPeriodData(nper)\n return DataFrame(data)\n\n\ndef makePanel(nper=None):\n with warnings.catch_warnings(record=True):\n cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]\n data = {c: makeTimeDataFrame(nper) for c in cols}\n return Panel.fromDict(data)\n\n\ndef makePeriodPanel(nper=None):\n with warnings.catch_warnings(record=True):\n cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]\n data = {c: makePeriodFrame(nper) for c in cols}\n return Panel.fromDict(data)\n\n\ndef makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,\n idx_type=None):\n \"\"\"Create an index/multindex with given dimensions, levels, names, etc'\n\n nentries - number of entries in index\n nlevels - number of levels (> 1 produces multindex)\n prefix - a string prefix for labels\n names - (Optional), bool or list of strings. if True will use default\n names, if false will use no names, if a list is given, the name of\n each level in the index will be taken from the list.\n ndupe_l - (Optional), list of ints, the number of rows for which the\n label will repeated at the corresponding level, you can specify just\n the first few, the rest will use the default ndupe_l of 1.\n len(ndupe_l) <= nlevels.\n idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\"/\"p\"/\"td\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n \"td\" create a datetime index.\n\n if unspecified, string labels will be generated.\n \"\"\"\n\n if ndupe_l is None:\n ndupe_l = [1] * nlevels\n assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)\n assert (names is None or names is False or\n names is True or len(names) is nlevels)\n assert idx_type is None or \\\n (idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)\n\n if names is True:\n # build default names\n names = [prefix + str(i) for i in range(nlevels)]\n if names is False:\n # pass None to index constructor for no name\n names = None\n\n # make singelton case uniform\n if isinstance(names, compat.string_types) and nlevels == 1:\n names = [names]\n\n # specific 1D index type requested?\n idx_func = dict(i=makeIntIndex, f=makeFloatIndex,\n s=makeStringIndex, u=makeUnicodeIndex,\n dt=makeDateIndex, td=makeTimedeltaIndex,\n p=makePeriodIndex).get(idx_type)\n if idx_func:\n idx = idx_func(nentries)\n # but we need to fill in the name\n if names:\n idx.name = names[0]\n return idx\n elif idx_type is not None:\n raise ValueError('\"{idx_type}\" is not a legal value for `idx_type`, '\n 'use \"i\"/\"f\"/\"s\"/\"u\"/\"dt/\"p\"/\"td\".'\n .format(idx_type=idx_type))\n\n if len(ndupe_l) < nlevels:\n ndupe_l.extend([1] * (nlevels - len(ndupe_l)))\n assert len(ndupe_l) == nlevels\n\n assert all(x > 0 for x in ndupe_l)\n\n tuples = []\n for i in range(nlevels):\n def keyfunc(x):\n import re\n numeric_tuple = re.sub(r\"[^\\d_]_?\", \"\", x).split(\"_\")\n return lmap(int, numeric_tuple)\n\n # build a list of lists to create the index from\n div_factor = nentries // ndupe_l[i] + 1\n cnt = Counter()\n for j in range(div_factor):\n label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)\n cnt[label] = ndupe_l[i]\n # cute Counter trick\n result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]\n tuples.append(result)\n\n tuples = lzip(*tuples)\n\n # convert tuples to index\n if nentries == 1:\n # we have a single level of tuples, i.e. a regular Index\n index = Index(tuples[0], name=names[0])\n elif nlevels == 1:\n name = None if names is None else names[0]\n index = Index((x[0] for x in tuples), name=name)\n else:\n index = MultiIndex.from_tuples(tuples, names=names)\n return index\n\n\ndef makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,\n c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,\n c_ndupe_l=None, r_ndupe_l=None, dtype=None,\n c_idx_type=None, r_idx_type=None):\n \"\"\"\n nrows, ncols - number of data rows/cols\n c_idx_names, idx_names - False/True/list of strings, yields No names ,\n default names or uses the provided names for the levels of the\n corresponding index. You can provide a single string when\n c_idx_nlevels ==1.\n c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex\n r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex\n data_gen_f - a function f(row,col) which return the data value\n at that position, the default generator used yields values of the form\n \"RxCy\" based on position.\n c_ndupe_l, r_ndupe_l - list of integers, determines the number\n of duplicates for each label at a given level of the corresponding\n index. The default `None` value produces a multiplicity of 1 across\n all levels, i.e. a unique index. Will accept a partial list of length\n N < idx_nlevels, for just the first N levels. If ndupe doesn't divide\n nrows/ncol, the last label might have lower multiplicity.\n dtype - passed to the DataFrame constructor as is, in case you wish to\n have more control in conjuncion with a custom `data_gen_f`\n r_idx_type, c_idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\"/\"td\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n \"td\" create a timedelta index.\n\n if unspecified, string labels will be generated.\n\n Examples:\n\n # 5 row, 3 columns, default names on both, single index on both axis\n >> makeCustomDataframe(5,3)\n\n # make the data a random int between 1 and 100\n >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))\n\n # 2-level multiindex on rows with each label duplicated\n # twice on first level, default names on both axis, single\n # index on both axis\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])\n\n # DatetimeIndex on row, index with unicode labels on columns\n # no names on either axis\n >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,\n r_idx_type=\"dt\",c_idx_type=\"u\")\n\n # 4-level multindex on rows with names provided, 2-level multindex\n # on columns with default labels and default names.\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=4,\n r_idx_names=[\"FEE\",\"FI\",\"FO\",\"FAM\"],\n c_idx_nlevels=2)\n\n >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n \"\"\"\n\n assert c_idx_nlevels > 0\n assert r_idx_nlevels > 0\n assert r_idx_type is None or \\\n (r_idx_type in ('i', 'f', 's',\n 'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)\n assert c_idx_type is None or \\\n (c_idx_type in ('i', 'f', 's',\n 'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)\n\n columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',\n names=c_idx_names, ndupe_l=c_ndupe_l,\n idx_type=c_idx_type)\n index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',\n names=r_idx_names, ndupe_l=r_ndupe_l,\n idx_type=r_idx_type)\n\n # by default, generate data based on location\n if data_gen_f is None:\n data_gen_f = lambda r, c: \"R{rows}C{cols}\".format(rows=r, cols=c)\n\n data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]\n\n return DataFrame(data, index, columns, dtype=dtype)\n\n\ndef _create_missing_idx(nrows, ncols, density, random_state=None):\n if random_state is None:\n random_state = np.random\n else:\n random_state = np.random.RandomState(random_state)\n\n # below is cribbed from scipy.sparse\n size = int(np.round((1 - density) * nrows * ncols))\n # generate a few more to ensure unique values\n min_rows = 5\n fac = 1.02\n extra_size = min(size + min_rows, fac * size)\n\n def _gen_unique_rand(rng, _extra_size):\n ind = rng.rand(int(_extra_size))\n return np.unique(np.floor(ind * nrows * ncols))[:size]\n\n ind = _gen_unique_rand(random_state, extra_size)\n while ind.size < size:\n extra_size *= 1.05\n ind = _gen_unique_rand(random_state, extra_size)\n\n j = np.floor(ind * 1. / nrows).astype(int)\n i = (ind - j * nrows).astype(int)\n return i.tolist(), j.tolist()\n\n\ndef makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,\n c_idx_names=True, r_idx_names=True,\n c_idx_nlevels=1, r_idx_nlevels=1,\n data_gen_f=None,\n c_ndupe_l=None, r_ndupe_l=None, dtype=None,\n c_idx_type=None, r_idx_type=None):\n \"\"\"\n Parameters\n ----------\n Density : float, optional\n Float in (0, 1) that gives the percentage of non-missing numbers in\n the DataFrame.\n random_state : {np.random.RandomState, int}, optional\n Random number generator or random seed.\n\n See makeCustomDataframe for descriptions of the rest of the parameters.\n \"\"\"\n df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,\n r_idx_names=r_idx_names,\n c_idx_nlevels=c_idx_nlevels,\n r_idx_nlevels=r_idx_nlevels,\n data_gen_f=data_gen_f,\n c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,\n dtype=dtype, c_idx_type=c_idx_type,\n r_idx_type=r_idx_type)\n\n i, j = _create_missing_idx(nrows, ncols, density, random_state)\n df.values[i, j] = np.nan\n return df\n\n\ndef makeMissingDataframe(density=.9, random_state=None):\n df = makeDataFrame()\n i, j = _create_missing_idx(*df.shape, density=density,\n random_state=random_state)\n df.values[i, j] = np.nan\n return df\n\n\ndef add_nans(panel):\n I, J, N = panel.shape\n for i, item in enumerate(panel.items):\n dm = panel[item]\n for j, col in enumerate(dm.columns):\n dm[col][:i + j] = np.NaN\n return panel\n\n\ndef add_nans_panel4d(panel4d):\n for l, label in enumerate(panel4d.labels):\n panel = panel4d[label]\n add_nans(panel)\n return panel4d\n\n\nclass TestSubDict(dict):\n\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n\ndef optional_args(decorator):\n \"\"\"allows a decorator to take optional positional and keyword arguments.\n Assumes that taking a single, callable, positional argument means that\n it is decorating a function, i.e. something like this::\n\n @my_decorator\n def function(): pass\n\n Calls decorator with decorator(f, *args, **kwargs)\"\"\"\n\n @wraps(decorator)\n def wrapper(*args, **kwargs):\n def dec(f):\n return decorator(f, *args, **kwargs)\n\n is_decorating = not kwargs and len(args) == 1 and callable(args[0])\n if is_decorating:\n f = args[0]\n args = []\n return dec(f)\n else:\n return dec\n\n return wrapper\n\n\n# skip tests on exceptions with this message\n_network_error_messages = (\n # 'urlopen error timed out',\n # 'timeout: timed out',\n # 'socket.timeout: timed out',\n 'timed out',\n 'Server Hangup',\n 'HTTP Error 503: Service Unavailable',\n '502: Proxy Error',\n 'HTTP Error 502: internal error',\n 'HTTP Error 502',\n 'HTTP Error 503',\n 'HTTP Error 403',\n 'HTTP Error 400',\n 'Temporary failure in name resolution',\n 'Name or service not known',\n 'Connection refused',\n 'certificate verify',\n)\n\n# or this e.errno/e.reason.errno\n_network_errno_vals = (\n 101, # Network is unreachable\n 111, # Connection refused\n 110, # Connection timed out\n 104, # Connection reset Error\n 54, # Connection reset by peer\n 60, # urllib.error.URLError: [Errno 60] Connection timed out\n)\n\n# Both of the above shouldn't mask real issues such as 404's\n# or refused connections (changed DNS).\n# But some tests (test_data yahoo) contact incredibly flakey\n# servers.\n\n# and conditionally raise on these exception types\n_network_error_classes = (IOError, httplib.HTTPException)\n\nif sys.version_info >= (3, 3):\n _network_error_classes += (TimeoutError,) # noqa\n\n\ndef can_connect(url, error_classes=_network_error_classes):\n \"\"\"Try to connect to the given url. True if succeeds, False if IOError\n raised\n\n Parameters\n ----------\n url : basestring\n The URL to try to connect to\n\n Returns\n -------\n connectable : bool\n Return True if no IOError (unable to connect) or URLError (bad url) was\n raised\n \"\"\"\n try:\n with urlopen(url):\n pass\n except error_classes:\n return False\n else:\n return True\n\n\n@optional_args\ndef network(t, url=\"http://www.google.com\",\n raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,\n check_before_test=False,\n error_classes=_network_error_classes,\n skip_errnos=_network_errno_vals,\n _skip_on_messages=_network_error_messages,\n ):\n \"\"\"\n Label a test as requiring network connection and, if an error is\n encountered, only raise if it does not find a network connection.\n\n In comparison to ``network``, this assumes an added contract to your test:\n you must assert that, under normal conditions, your test will ONLY fail if\n it does not have network connectivity.\n\n You can call this in 3 ways: as a standard decorator, with keyword\n arguments, or with a positional argument that is the url to check.\n\n Parameters\n ----------\n t : callable\n The test requiring network connectivity.\n url : path\n The url to test via ``pandas.io.common.urlopen`` to check\n for connectivity. Defaults to 'http://www.google.com'.\n raise_on_error : bool\n If True, never catches errors.\n check_before_test : bool\n If True, checks connectivity before running the test case.\n error_classes : tuple or Exception\n error classes to ignore. If not in ``error_classes``, raises the error.\n defaults to IOError. Be careful about changing the error classes here.\n skip_errnos : iterable of int\n Any exception that has .errno or .reason.erno set to one\n of these values will be skipped with an appropriate\n message.\n _skip_on_messages: iterable of string\n any exception e for which one of the strings is\n a substring of str(e) will be skipped with an appropriate\n message. Intended to suppress errors where an errno isn't available.\n\n Notes\n -----\n * ``raise_on_error`` supercedes ``check_before_test``\n\n Returns\n -------\n t : callable\n The decorated test ``t``, with checks for connectivity errors.\n\n Example\n -------\n\n Tests decorated with @network will fail if it's possible to make a network\n connection to another URL (defaults to google.com)::\n\n >>> from pandas.util.testing import network\n >>> from pandas.io.common import urlopen\n >>> @network\n ... def test_network():\n ... with urlopen(\"rabbit://bonanza.com\"):\n ... pass\n Traceback\n ...\n URLError: <urlopen error unknown url type: rabit>\n\n You can specify alternative URLs::\n\n >>> @network(\"http://www.yahoo.com\")\n ... def test_something_with_yahoo():\n ... raise IOError(\"Failure Message\")\n >>> test_something_with_yahoo()\n Traceback (most recent call last):\n ...\n IOError: Failure Message\n\n If you set check_before_test, it will check the url first and not run the\n test on failure::\n\n >>> @network(\"failing://url.blaher\", check_before_test=True)\n ... def test_something():\n ... print(\"I ran!\")\n ... raise ValueError(\"Failure\")\n >>> test_something()\n Traceback (most recent call last):\n ...\n\n Errors not related to networking will always be raised.\n \"\"\"\n from pytest import skip\n t.network = True\n\n @compat.wraps(t)\n def wrapper(*args, **kwargs):\n if check_before_test and not raise_on_error:\n if not can_connect(url, error_classes):\n skip()\n try:\n return t(*args, **kwargs)\n except Exception as e:\n errno = getattr(e, 'errno', None)\n if not errno and hasattr(errno, \"reason\"):\n errno = getattr(e.reason, 'errno', None)\n\n if errno in skip_errnos:\n skip(\"Skipping test due to known errno\"\n \" and error {error}\".format(error=e))\n\n try:\n e_str = traceback.format_exc(e)\n except Exception:\n e_str = str(e)\n\n if any(m.lower() in e_str.lower() for m in _skip_on_messages):\n skip(\"Skipping test because exception \"\n \"message is known and error {error}\".format(error=e))\n\n if not isinstance(e, error_classes):\n raise\n\n if raise_on_error or can_connect(url, error_classes):\n raise\n else:\n skip(\"Skipping test due to lack of connectivity\"\n \" and error {error}\".format(e))\n\n return wrapper\n\n\nwith_connectivity_check = network\n\n\nclass SimpleMock(object):\n\n \"\"\"\n Poor man's mocking object\n\n Note: only works for new-style classes, assumes __getattribute__ exists.\n\n >>> a = type(\"Duck\",(),{})\n >>> a.attr1,a.attr2 =\"fizz\",\"buzz\"\n >>> b = SimpleMock(a,\"attr1\",\"bar\")\n >>> b.attr1 == \"bar\" and b.attr2 == \"buzz\"\n True\n >>> a.attr1 == \"fizz\" and a.attr2 == \"buzz\"\n True\n \"\"\"\n\n def __init__(self, obj, *args, **kwds):\n assert(len(args) % 2 == 0)\n attrs = kwds.get(\"attrs\", {})\n for k, v in zip(args[::2], args[1::2]):\n # dict comprehensions break 2.6\n attrs[k] = v\n self.attrs = attrs\n self.obj = obj\n\n def __getattribute__(self, name):\n attrs = object.__getattribute__(self, \"attrs\")\n obj = object.__getattribute__(self, \"obj\")\n return attrs.get(name, type(obj).__getattribute__(obj, name))\n\n\n@contextmanager\ndef stdin_encoding(encoding=None):\n \"\"\"\n Context manager for running bits of code while emulating an arbitrary\n stdin encoding.\n\n >>> import sys\n >>> _encoding = sys.stdin.encoding\n >>> with stdin_encoding('AES'): sys.stdin.encoding\n 'AES'\n >>> sys.stdin.encoding==_encoding\n True\n\n \"\"\"\n import sys\n\n _stdin = sys.stdin\n sys.stdin = SimpleMock(sys.stdin, \"encoding\", encoding)\n yield\n sys.stdin = _stdin\n\n\ndef assert_raises_regex(_exception, _regexp, _callable=None,\n *args, **kwargs):\n r\"\"\"\n Check that the specified Exception is raised and that the error message\n matches a given regular expression pattern. This may be a regular\n expression object or a string containing a regular expression suitable\n for use by `re.search()`. This is a port of the `assertRaisesRegexp`\n function from unittest in Python 2.7.\n\n Examples\n --------\n >>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')\n >>> import re\n >>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')\n\n If an exception of a different type is raised, it bubbles up.\n\n >>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')\n Traceback (most recent call last):\n ...\n ValueError: invalid literal for int() with base 10: 'XYZ'\n >>> dct = dict()\n >>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')\n Traceback (most recent call last):\n ...\n AssertionError: \"pear\" does not match \"'apple'\"\n\n You can also use this in a with statement.\n >>> with assert_raises_regex(TypeError, 'unsupported operand type\\(s\\)'):\n ... 1 + {}\n >>> with assert_raises_regex(TypeError, 'banana'):\n ... 'apple'[0] = 'b'\n Traceback (most recent call last):\n ...\n AssertionError: \"banana\" does not match \"'str' object does not support \\\nitem assignment\"\n \"\"\"\n manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)\n if _callable is not None:\n with manager:\n _callable(*args, **kwargs)\n else:\n return manager\n\n\nclass _AssertRaisesContextmanager(object):\n \"\"\"\n Context manager behind `assert_raises_regex`.\n \"\"\"\n\n def __init__(self, exception, regexp=None):\n \"\"\"\n Initialize an _AssertRaisesContextManager instance.\n\n Parameters\n ----------\n exception : class\n The expected Exception class.\n regexp : str, default None\n The regex to compare against the Exception message.\n \"\"\"\n\n self.exception = exception\n\n if regexp is not None and not hasattr(regexp, \"search\"):\n regexp = re.compile(regexp, re.DOTALL)\n\n self.regexp = regexp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, trace_back):\n expected = self.exception\n\n if not exc_type:\n exp_name = getattr(expected, \"__name__\", str(expected))\n raise AssertionError(\"{name} not raised.\".format(name=exp_name))\n\n return self.exception_matches(exc_type, exc_value, trace_back)\n\n def exception_matches(self, exc_type, exc_value, trace_back):\n \"\"\"\n Check that the Exception raised matches the expected Exception\n and expected error message regular expression.\n\n Parameters\n ----------\n exc_type : class\n The type of Exception raised.\n exc_value : Exception\n The instance of `exc_type` raised.\n trace_back : stack trace object\n The traceback object associated with `exc_value`.\n\n Returns\n -------\n is_matched : bool\n Whether or not the Exception raised matches the expected\n Exception class and expected error message regular expression.\n\n Raises\n ------\n AssertionError : The error message provided does not match\n the expected error message regular expression.\n \"\"\"\n\n if issubclass(exc_type, self.exception):\n if self.regexp is not None:\n val = str(exc_value)\n\n if not self.regexp.search(val):\n msg = '\"{pat}\" does not match \"{val}\"'.format(\n pat=self.regexp.pattern, val=val)\n e = AssertionError(msg)\n raise_with_traceback(e, trace_back)\n\n return True\n else:\n # Failed, so allow Exception to bubble up.\n return False\n\n\n@contextmanager\ndef assert_produces_warning(expected_warning=Warning, filter_level=\"always\",\n clear=None, check_stacklevel=True):\n \"\"\"\n Context manager for running code expected to either raise a specific\n warning, or not raise any warnings. Verifies that the code raises the\n expected warning, and that it does not raise any other unexpected\n warnings. It is basically a wrapper around ``warnings.catch_warnings``.\n\n Parameters\n ----------\n expected_warning : {Warning, False, None}, default Warning\n The type of Exception raised. ``exception.Warning`` is the base\n class for all warnings. To check that no warning is returned,\n specify ``False`` or ``None``.\n filter_level : str, default \"always\"\n Specifies whether warnings are ignored, displayed, or turned\n into errors.\n Valid values are:\n\n * \"error\" - turns matching warnings into exceptions\n * \"ignore\" - discard the warning\n * \"always\" - always emit a warning\n * \"default\" - print the warning the first time it is generated\n from each location\n * \"module\" - print the warning the first time it is generated\n from each module\n * \"once\" - print the warning the first time it is generated\n\n clear : str, default None\n If not ``None`` then remove any previously raised warnings from\n the ``__warningsregistry__`` to ensure that no warning messages are\n suppressed by this context manager. If ``None`` is specified,\n the ``__warningsregistry__`` keeps track of which warnings have been\n shown, and does not show them again.\n check_stacklevel : bool, default True\n If True, displays the line that called the function containing\n the warning to show were the function is called. Otherwise, the\n line that implements the function is displayed.\n\n Examples\n --------\n >>> import warnings\n >>> with assert_produces_warning():\n ... warnings.warn(UserWarning())\n ...\n >>> with assert_produces_warning(False):\n ... warnings.warn(RuntimeWarning())\n ...\n Traceback (most recent call last):\n ...\n AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].\n >>> with assert_produces_warning(UserWarning):\n ... warnings.warn(RuntimeWarning())\n Traceback (most recent call last):\n ...\n AssertionError: Did not see expected warning of class 'UserWarning'.\n\n ..warn:: This is *not* thread-safe.\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n\n if clear is not None:\n # make sure that we are clearning these warnings\n # if they have happened before\n # to guarantee that we will catch them\n if not is_list_like(clear):\n clear = [clear]\n for m in clear:\n try:\n m.__warningregistry__.clear()\n except Exception:\n pass\n\n saw_warning = False\n warnings.simplefilter(filter_level)\n yield w\n extra_warnings = []\n\n for actual_warning in w:\n if (expected_warning and issubclass(actual_warning.category,\n expected_warning)):\n saw_warning = True\n\n if check_stacklevel and issubclass(actual_warning.category,\n (FutureWarning,\n DeprecationWarning)):\n from inspect import getframeinfo, stack\n caller = getframeinfo(stack()[2][0])\n msg = (\"Warning not set with correct stacklevel. \"\n \"File where warning is raised: {actual} != \"\n \"{caller}. Warning message: {message}\"\n ).format(actual=actual_warning.filename,\n caller=caller.filename,\n message=actual_warning.message)\n assert actual_warning.filename == caller.filename, msg\n else:\n extra_warnings.append(actual_warning.category.__name__)\n if expected_warning:\n msg = \"Did not see expected warning of class {name!r}.\".format(\n name=expected_warning.__name__)\n assert saw_warning, msg\n assert not extra_warnings, (\"Caused unexpected warning(s): {extra!r}.\"\n ).format(extra=extra_warnings)\n\n\nclass RNGContext(object):\n \"\"\"\n Context manager to set the numpy random number generator speed. Returns\n to the original value upon exiting the context manager.\n\n Parameters\n ----------\n seed : int\n Seed for numpy.random.seed\n\n Examples\n --------\n\n with RNGContext(42):\n np.random.randn()\n \"\"\"\n\n def __init__(self, seed):\n self.seed = seed\n\n def __enter__(self):\n\n self.start_state = np.random.get_state()\n np.random.seed(self.seed)\n\n def __exit__(self, exc_type, exc_value, traceback):\n\n np.random.set_state(self.start_state)\n\n\n@contextmanager\ndef use_numexpr(use, min_elements=None):\n from pandas.core.computation import expressions as expr\n if min_elements is None:\n min_elements = expr._MIN_ELEMENTS\n\n olduse = expr._USE_NUMEXPR\n oldmin = expr._MIN_ELEMENTS\n expr.set_use_numexpr(use)\n expr._MIN_ELEMENTS = min_elements\n yield\n expr._MIN_ELEMENTS = oldmin\n expr.set_use_numexpr(olduse)\n\n\ndef test_parallel(num_threads=2, kwargs_list=None):\n \"\"\"Decorator to run the same function multiple times in parallel.\n\n Parameters\n ----------\n num_threads : int, optional\n The number of times the function is run in parallel.\n kwargs_list : list of dicts, optional\n The list of kwargs to update original\n function kwargs on different threads.\n Notes\n -----\n This decorator does not pass the return value of the decorated function.\n\n Original from scikit-image:\n\n https://github.com/scikit-image/scikit-image/pull/1519\n\n \"\"\"\n\n assert num_threads > 0\n has_kwargs_list = kwargs_list is not None\n if has_kwargs_list:\n assert len(kwargs_list) == num_threads\n import threading\n\n def wrapper(func):\n @wraps(func)\n def inner(*args, **kwargs):\n if has_kwargs_list:\n update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])\n else:\n update_kwargs = lambda i: kwargs\n threads = []\n for i in range(num_threads):\n updated_kwargs = update_kwargs(i)\n thread = threading.Thread(target=func, args=args,\n kwargs=updated_kwargs)\n threads.append(thread)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n return inner\n return wrapper\n\n\nclass SubclassedSeries(Series):\n _metadata = ['testattr', 'name']\n\n @property\n def _constructor(self):\n return SubclassedSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedDataFrame\n\n\nclass SubclassedDataFrame(DataFrame):\n _metadata = ['testattr']\n\n @property\n def _constructor(self):\n return SubclassedDataFrame\n\n @property\n def _constructor_sliced(self):\n return SubclassedSeries\n\n\nclass SubclassedSparseSeries(pd.SparseSeries):\n _metadata = ['testattr']\n\n @property\n def _constructor(self):\n return SubclassedSparseSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedSparseDataFrame\n\n\nclass SubclassedSparseDataFrame(pd.SparseDataFrame):\n _metadata = ['testattr']\n\n @property\n def _constructor(self):\n return SubclassedSparseDataFrame\n\n @property\n def _constructor_sliced(self):\n return SubclassedSparseSeries\n\n\nclass SubclassedCategorical(Categorical):\n\n @property\n def _constructor(self):\n return SubclassedCategorical\n\n\n@contextmanager\ndef patch(ob, attr, value):\n \"\"\"Temporarily patch an attribute of an object.\n\n Parameters\n ----------\n ob : any\n The object to patch. This must support attribute assignment for `attr`.\n attr : str\n The name of the attribute to patch.\n value : any\n The temporary attribute to assign.\n\n Examples\n --------\n >>> class C(object):\n ... attribute = 'original'\n ...\n >>> C.attribute\n 'original'\n >>> with patch(C, 'attribute', 'patched'):\n ... in_context = C.attribute\n ...\n >>> in_context\n 'patched'\n >>> C.attribute # the value is reset when the context manager exists\n 'original'\n\n Correctly replaces attribute when the manager exits with an exception.\n >>> with patch(C, 'attribute', 'patched'):\n ... in_context = C.attribute\n ... raise ValueError()\n Traceback (most recent call last):\n ...\n ValueError\n >>> in_context\n 'patched'\n >>> C.attribute\n 'original'\n \"\"\"\n noattr = object() # mark that the attribute never existed\n old = getattr(ob, attr, noattr)\n setattr(ob, attr, value)\n try:\n yield\n finally:\n if old is noattr:\n delattr(ob, attr)\n else:\n setattr(ob, attr, old)\n\n\n@contextmanager\ndef set_timezone(tz):\n \"\"\"Context manager for temporarily setting a timezone.\n\n Parameters\n ----------\n tz : str\n A string representing a valid timezone.\n\n Examples\n --------\n\n >>> from datetime import datetime\n >>> from dateutil.tz import tzlocal\n >>> tzlocal().tzname(datetime.now())\n 'IST'\n\n >>> with set_timezone('US/Eastern'):\n ... tzlocal().tzname(datetime.now())\n ...\n 'EDT'\n \"\"\"\n\n import os\n import time\n\n def setTZ(tz):\n if tz is None:\n try:\n del os.environ['TZ']\n except KeyError:\n pass\n else:\n os.environ['TZ'] = tz\n time.tzset()\n\n orig_tz = os.environ.get('TZ')\n setTZ(tz)\n try:\n yield\n finally:\n setTZ(orig_tz)\n\n\ndef _make_skipna_wrapper(alternative, skipna_alternative=None):\n \"\"\"Create a function for calling on an array.\n\n Parameters\n ----------\n alternative : function\n The function to be called on the array with no NaNs.\n Only used when 'skipna_alternative' is None.\n skipna_alternative : function\n The function to be called on the original array\n\n Returns\n -------\n skipna_wrapper : function\n \"\"\"\n if skipna_alternative:\n def skipna_wrapper(x):\n return skipna_alternative(x.values)\n else:\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n return skipna_wrapper\n"
] | [
[
"pandas.compat.iteritems",
"pandas.Series",
"pandas.core.dtypes.common.is_number",
"pandas.IntervalIndex",
"pandas._libs.testing.assert_almost_equal",
"pandas.bdate_range",
"pandas.compat.map",
"numpy.random.seed",
"pandas.io.common.urlopen",
"matplotlib.pyplot.get_fignums",
"numpy.random.RandomState",
"pandas.compat.zip",
"pandas.RangeIndex",
"pandas.reset_option",
"pandas.core.common._all_not_none",
"pandas.core.dtypes.common.is_list_like",
"pandas.PeriodIndex",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.compat.wraps",
"pandas.compat.callable",
"numpy.random.choice",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.algorithms.take_1d",
"pandas.compat.StringIO",
"numpy.random.rand",
"numpy.isnan",
"pandas.core.computation.expressions.set_use_numexpr",
"pandas.core.dtypes.common.is_datetimelike_v_object",
"pandas.core.dtypes.common.needs_i8_conversion",
"numpy.linspace",
"pandas.compat.import_lzma",
"pandas.compat.lzip",
"pandas.compat.raise_with_traceback",
"numpy.random.set_state",
"pandas.read_pickle",
"pandas.compat.Counter",
"pandas.core.dtypes.common.is_datetimelike_v_numeric",
"numpy.random.get_state",
"pandas._libs.testing.assert_dict_equal",
"pandas.MultiIndex.from_product",
"pandas.IntervalIndex.from_breaks",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.TimedeltaIndex",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"matplotlib.pyplot.close",
"pandas.to_pickle",
"numpy.prod",
"pandas.Index",
"numpy.array",
"pandas.core.dtypes.common.is_bool",
"pandas.compat.u",
"numpy.random.random_sample",
"pandas.DatetimeIndex",
"pandas.Panel.fromDict",
"pandas.DataFrame",
"numpy.random.randn",
"pandas.compat.lmap",
"numpy.floor",
"pandas.compat.range",
"pandas.compat.lrange",
"numpy.round",
"pandas.core.dtypes.common.is_sequence",
"numpy.random.randint",
"pandas.core.dtypes.missing.array_equivalent"
]
] |
ScottBrian/scottbrian_algo1 | [
"57cd8fc5674507db51b1c887d5f9a68462b0ca9d"
] | [
"tests/test_scottbrian_algo1/test_algo_api.py"
] | [
"\"\"\"test_algo_api.py module.\"\"\"\n\n# from datetime import datetime, timedelta\nimport pytest\n# import sys\n# from pathlib import Path\nimport numpy as np\nimport pandas as pd # type: ignore\nimport string\nimport math\n\nfrom typing import Any, List, NamedTuple\n# from typing_extensions import Final\n\nfrom ibapi.tag_value import TagValue # type: ignore\nfrom ibapi.contract import ComboLeg # type: ignore\nfrom ibapi.contract import DeltaNeutralContract\nfrom ibapi.contract import Contract, ContractDetails\n\nfrom scottbrian_algo1.algo_api import AlgoApp, AlreadyConnected, \\\n DisconnectLockHeld, ConnectTimeout, RequestTimeout, DisconnectDuringRequest\n\nfrom scottbrian_algo1.algo_maps import get_contract_dict, get_contract_obj\nfrom scottbrian_algo1.algo_maps import get_contract_details_obj\n\n# from scottbrian_utils.diag_msg import diag_msg\n# from scottbrian_utils.file_catalog import FileCatalog\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n###############################################################################\n# TestAlgoAppConnect class\n###############################################################################\nclass TestAlgoAppConnect:\n \"\"\"TestAlgoAppConnect class.\"\"\"\n\n def test_mock_connect_to_ib(self,\n algo_app: \"AlgoApp\"\n ) -> None:\n \"\"\"Test connecting to IB.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # we are testing connect_to_ib and the subsequent code that gets\n # control as a result, such as getting the first requestID and then\n # starting a separate thread for the run loop.\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_mock_connect_to_ib_with_timeout(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test connecting to IB.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # we are testing connect_to_ib with a simulated timeout\n logger.debug(\"about to connect\")\n with pytest.raises(ConnectTimeout):\n algo_app.connect_to_ib(\"127.0.0.1\",\n mock_ib.PORT_FOR_REQID_TIMEOUT,\n client_id=0)\n\n # verify that algo_app is not connected\n verify_algo_app_disconnected(algo_app)\n assert algo_app.request_id == 0\n\n def test_connect_to_ib_already_connected(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test connecting to IB.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # first, connect normally to mock_ib\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_PAPER_TRADING,\n client_id=0)\n # verify that algo_app is connected\n verify_algo_app_connected(algo_app)\n\n # try to connect again - should get error\n with pytest.raises(AlreadyConnected):\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_PAPER_TRADING,\n client_id=0)\n\n # verify that algo_app is still connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_connect_to_ib_with_lock_held(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test connecting to IB with disconnect lock held.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # obtain the disconnect lock\n logger.debug(\"about to obtain disconnect lock\")\n algo_app.disconnect_lock.acquire()\n\n # try to connect - should get error\n with pytest.raises(DisconnectLockHeld):\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is still simply initialized\n verify_algo_app_initialized(algo_app)\n\n # def test_real_connect_to_IB(self) -> None:\n # \"\"\"Test connecting to IB.\n #\n # Args:\n # algo_app: instance of AlgoApp from conftest pytest fixture\n # monkeypatch: pytest fixture\n #\n # \"\"\"\n # proj_dir = Path.cwd().resolve().parents[1] # back two directories\n # test_cat = \\\n # FileCatalog({'symbols': Path(proj_dir / 't_datasets/symbols.csv')\n # })\n # algo_app = AlgoApp(test_cat)\n # verify_algo_app_initialized(algo_app)\n #\n # # we are testing connect_to_ib and the subsequent code that gets\n # # control as a result, such as getting the first requestID and then\n # # starting a separate thread for the run loop.\n # logger.debug(\"about to connect\")\n # connect_ans = algo_app.connect_to_ib(\"127.0.0.1\", 7496, client_id=0)\n #\n # # verify that algo_app is connected and alive with a valid reqId\n # assert connect_ans\n # assert algo_app.run_thread.is_alive()\n # assert algo_app.isConnected()\n # assert algo_app.request_id == 1\n #\n # algo_app.disconnect_from_ib()\n # assert not algo_app.run_thread.is_alive()\n # assert not algo_app.isConnected()\n\n\n###############################################################################\n# connect disconnect verification\n###############################################################################\ndef verify_algo_app_initialized(algo_app: \"AlgoApp\") -> None:\n \"\"\"Helper function to verify the also_app instance is initialized.\n\n Args:\n algo_app: instance of AlgoApp that is to be checked\n\n \"\"\"\n assert len(algo_app.ds_catalog) > 0\n assert algo_app.request_id == 0\n assert algo_app.symbols.empty\n assert algo_app.stock_symbols.empty\n assert algo_app.response_complete_event.is_set() is False\n assert algo_app.nextValidId_event.is_set() is False\n assert algo_app.__repr__() == 'AlgoApp(ds_catalog)'\n # assert algo_app.run_thread is None\n\n\ndef verify_algo_app_connected(algo_app: \"AlgoApp\") -> None:\n \"\"\"Helper function to verify we are connected to ib.\n\n Args:\n algo_app: instance of AlgoApp that is to be checked\n\n \"\"\"\n assert algo_app.run_thread.is_alive()\n assert algo_app.isConnected()\n assert algo_app.request_id == 1\n\n\ndef verify_algo_app_disconnected(algo_app: \"AlgoApp\") -> None:\n \"\"\"Helper function to verify we are disconnected from ib.\n\n Args:\n algo_app: instance of AlgoApp that is to be checked\n\n \"\"\"\n assert not algo_app.run_thread.is_alive()\n assert not algo_app.isConnected()\n\n\n###############################################################################\n###############################################################################\n# matching symbols\n###############################################################################\n###############################################################################\nclass ExpCounts(NamedTuple):\n \"\"\"NamedTuple for the expected counts.\"\"\"\n sym_non_recursive: int\n sym_recursive: int\n stock_sym_non_recursive: int\n stock_sym_recursive: int\n\n\nclass SymDfs:\n \"\"\"Saved sym dfs.\"\"\"\n def __init__(self,\n mock_sym_df: Any,\n sym_df: Any,\n mock_stock_sym_df: Any,\n stock_sym_df: Any) -> None:\n \"\"\"Initialize the SymDfs.\n\n Args:\n mock_sym_df: mock sym DataFrame\n sym_df: symbol DataFrame\n mock_stock_sym_df: mock stock symbol DataFrame\n stock_sym_df: stock symbols dataFrame\n\n \"\"\"\n self.mock_sym_df = mock_sym_df\n self.sym_df = sym_df\n self.mock_stock_sym_df = mock_stock_sym_df\n self.stock_sym_df = stock_sym_df\n\n\nclass TestAlgoAppMatchingSymbols:\n \"\"\"TestAlgoAppMatchingSymbols class.\"\"\"\n def test_request_symbols_all_combos(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test request_symbols with all patterns.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n try:\n for idx, search_pattern in enumerate(\n mock_ib.search_patterns()):\n exp_counts = get_exp_number(search_pattern, mock_ib)\n # verify symbol table has zero entries for the symbol\n logger.info(\"calling verify_match_symbols req_type 1 \"\n \"sym %s num %d\", search_pattern, idx)\n algo_app.symbols = pd.DataFrame()\n algo_app.stock_symbols = pd.DataFrame()\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=1)\n\n logger.info(\"calling verify_match_symbols req_type 2 \"\n \"sym %s num %d\", search_pattern, idx)\n algo_app.symbols = pd.DataFrame()\n algo_app.stock_symbols = pd.DataFrame()\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=2)\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_request_symbols_zero_result(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test request_symbols with pattern that finds exactly 1 symbol.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n try:\n exp_counts = ExpCounts(0, 0, 0, 0)\n\n # verify symbol table has zero entries for the symbols\n for idx, search_pattern in enumerate(\n mock_ib.no_find_search_patterns()):\n logger.info(\"calling verify_match_symbols req_type 1 \"\n \"sym %s num %d\", search_pattern, idx)\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=1)\n\n logger.info(\"calling verify_match_symbols req_type 2 \"\n \"sym %s num %d\", search_pattern, idx)\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=2)\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols_timeout(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols gets timeout.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n mock_ib.PORT_FOR_SIMULATE_REQUEST_TIMEOUT,\n client_id=0)\n verify_algo_app_connected(algo_app)\n\n with pytest.raises(RequestTimeout):\n algo_app.request_symbols('A')\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols_disconnect(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols gets disconnected while waiting.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n mock_ib.\n PORT_FOR_SIMULATE_REQUEST_DISCONNECT,\n client_id=0)\n verify_algo_app_connected(algo_app)\n\n with pytest.raises(DisconnectDuringRequest):\n algo_app.request_symbols('A')\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols with pattern that finds no symbols.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n sym_dfs = SymDfs(pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame())\n # full_stock_sym_match_descs = pd.DataFrame()\n # stock_symbols_ds = pd.DataFrame()\n # full_sym_match_descs = pd.DataFrame()\n # symbols_ds = pd.DataFrame()\n # we need to loop from A to Z\n for letter in string.ascii_uppercase:\n logger.debug(\"about to verify_get_symbols for letter %s\",\n letter)\n # full_stock_sym_match_descs, stock_symbols_ds,\\\n # full_sym_match_descs, symbols_ds = \\\n sym_dfs = verify_get_symbols(letter,\n algo_app,\n mock_ib,\n sym_dfs)\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols_with_connect_disconnect(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols with pattern that finds no symbols.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n sym_dfs = SymDfs(pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame())\n # full_stock_sym_match_descs = pd.DataFrame()\n # full_sym_match_descs = pd.DataFrame()\n # stock_symbols_ds = pd.DataFrame()\n # symbols_ds = pd.DataFrame()\n # we need to loop from A to Z\n for letter in string.ascii_uppercase:\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n logger.debug(\"about to verify_get_symbols for letter %s\",\n letter)\n # full_stock_sym_match_descs, stock_symbols_ds, \\\n # full_sym_match_descs, symbols_ds = \\\n sym_dfs = verify_get_symbols(letter,\n algo_app,\n mock_ib,\n sym_dfs)\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n\n\n###############################################################################\n# matching symbols verification\n###############################################################################\ndef verify_match_symbols(algo_app: \"AlgoApp\",\n mock_ib: Any,\n pattern: str,\n exp_counts: ExpCounts,\n req_type: int = 1) -> None:\n \"\"\"Verify that we find symbols correctly.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n pattern: symbols to use for searching\n exp_counts: recursive and non-recursive matches expected\n req_type: indicates which request to do\n\n \"\"\"\n assert req_type == 1 or req_type == 2\n if req_type == 1:\n logger.debug(\"about to request_symbols for %s\", pattern)\n algo_app.request_symbols(pattern)\n # assert algo_app.request_id == 2\n else: # req_type == 2:\n logger.debug(\"about to get_symbols_recursive for %s\", pattern)\n algo_app.get_symbols_recursive(pattern)\n assert algo_app.request_id >= 2\n # algo_app.stock_symbols.drop_duplicates(inplace=True)\n\n logger.debug(\"getting stock_sym_match_descs\")\n symbol_starts_with_pattern = \\\n mock_ib.contract_descriptions['symbol'].map(\n lambda symbol: symbol.startswith(pattern))\n stock_sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & (mock_ib.contract_descriptions['secType'] == 'STK')\n & (mock_ib.contract_descriptions['currency'] == 'USD')\n & (if_opt_in_derivativeSecTypes(mock_ib.contract_descriptions)),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n\n sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & ((mock_ib.contract_descriptions['secType'] != 'STK')\n | (mock_ib.contract_descriptions['currency'] != 'USD')\n | if_opt_not_in_derivativeSecTypes(mock_ib.contract_descriptions)\n ),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n\n logger.debug(\"verifying results counts\")\n\n if req_type == 1:\n assert len(algo_app.stock_symbols) \\\n == exp_counts.stock_sym_non_recursive\n assert len(algo_app.symbols) == exp_counts.sym_non_recursive\n assert len(stock_sym_match_descs) == exp_counts.stock_sym_recursive\n assert len(sym_match_descs) == exp_counts.sym_recursive\n else:\n assert len(algo_app.stock_symbols) == exp_counts.stock_sym_recursive\n assert len(algo_app.symbols) == exp_counts.sym_recursive\n assert len(stock_sym_match_descs) == exp_counts.stock_sym_recursive\n assert len(sym_match_descs) == exp_counts.sym_recursive\n\n logger.debug(\"verifying results match DataFrame\")\n if exp_counts.stock_sym_recursive > 0:\n if req_type == 1:\n stock_sym_match_descs = stock_sym_match_descs.iloc[\n 0:exp_counts.stock_sym_non_recursive]\n stock_sym_match_descs = stock_sym_match_descs.set_index(\n ['conId']).sort_index()\n\n algo_app.stock_symbols.sort_index(inplace=True)\n comp_df = algo_app.stock_symbols.compare(stock_sym_match_descs)\n assert comp_df.empty\n\n if exp_counts.sym_recursive > 0:\n if req_type == 1:\n sym_match_descs = sym_match_descs.iloc[\n 0:exp_counts.sym_non_recursive]\n sym_match_descs = sym_match_descs.set_index(\n ['conId']).sort_index()\n\n algo_app.symbols.sort_index(inplace=True)\n comp_df = algo_app.symbols.compare(sym_match_descs)\n assert comp_df.empty\n logger.debug(\"all results verified for req_type %d\", req_type)\n\n\ndef if_opt_in_derivativeSecTypes(df: Any) -> Any:\n \"\"\"Find the symbols that have options.\n\n Args:\n df: pandas DataFrame of symbols\n\n Returns:\n array of boolean values used in pandas loc function\n\n \"\"\"\n ret_array = np.full(len(df), False)\n for i in range(len(df)):\n if 'OPT' in df.iloc[i].derivativeSecTypes:\n ret_array[i] = True\n return ret_array\n\n\ndef if_opt_not_in_derivativeSecTypes(df: Any) -> Any:\n \"\"\"Find the symbols that do not have options.\n\n Args:\n df: pandas DataFrame of symbols\n\n Returns:\n array of boolean values used in pandas loc function\n\n \"\"\"\n ret_array = np.full(len(df), True)\n for i in range(len(df)):\n if 'OPT' in df.iloc[i].derivativeSecTypes:\n ret_array[i] = False\n return ret_array\n\n\ndef get_exp_number(search_pattern: str, mock_ib: Any) -> ExpCounts:\n \"\"\"Helper function to get number of expected symbols.\n\n Args:\n search_pattern: search arg as string of one or more chars\n mock_ib: mock of ib\n\n Returns:\n number of expected matches for recursive and non-recursive requests\n \"\"\"\n combo_factor = (1 + 3 + 3**2 + 3**3)\n if len(search_pattern) > 4:\n # 5 or more chars will never match (for our mock setup)\n return ExpCounts(0, 0, 0, 0)\n if search_pattern[0] not in string.ascii_uppercase[0:17]:\n return ExpCounts(0, 0, 0, 0) # not in A-Q, inclusive\n if len(search_pattern) >= 2:\n if search_pattern[1] not in string.ascii_uppercase[1:3] + '.':\n return ExpCounts(0, 0, 0, 0) # not in 'BC.'\n combo_factor = (1 + 3 + 3**2)\n if len(search_pattern) >= 3:\n if search_pattern[2] not in string.ascii_uppercase[2:5]:\n return ExpCounts(0, 0, 0, 0) # not in 'CDE'\n combo_factor = (1 + 3)\n if len(search_pattern) == 4:\n if search_pattern[3] not in string.ascii_uppercase[3:5] + '.':\n return ExpCounts(0, 0, 0, 0) # not in 'DE.'\n combo_factor = 1\n\n num_stock_sym_combos = 0\n num_sym_combos = 0\n combo = mock_ib.get_combos(search_pattern[0])\n\n for item in combo:\n if item[0] == 'STK' and item[2] == 'USD' and 'OPT' in item[3]:\n num_stock_sym_combos += 1\n else:\n num_sym_combos += 1\n exp_stock_sym_recursive = num_stock_sym_combos * combo_factor\n exp_sym_recursive = num_sym_combos * combo_factor\n exp_stock_sym_non_recursive = \\\n math.ceil(min(16, len(combo) * combo_factor)\n * (num_stock_sym_combos / len(combo)))\n exp_sym_non_recursive = \\\n math.floor(min(16, len(combo) * combo_factor)\n * (num_sym_combos / len(combo)))\n\n return ExpCounts(exp_sym_non_recursive,\n exp_sym_recursive,\n exp_stock_sym_non_recursive,\n exp_stock_sym_recursive\n )\n\n\ndef verify_get_symbols(letter: str,\n algo_app: \"AlgoApp\",\n mock_ib: Any,\n sym_dfs: SymDfs) -> SymDfs:\n \"\"\"Verify get_symbols.\n\n Args:\n letter: the single letter we are collecting symbols for\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n sym_dfs: saved DataFrames between calls\n\n Returns:\n updated sym_dfs\n\n \"\"\"\n if letter != 'A':\n # verify the symbol_status ds\n symbols_status_path = \\\n algo_app.ds_catalog.get_path('symbols_status')\n logger.info('symbols_status_path: %s', symbols_status_path)\n\n assert symbols_status_path.exists()\n symbols_status = pd.read_csv(symbols_status_path,\n header=0,\n index_col=0)\n test_letter = symbols_status.iloc[0, 0]\n assert test_letter == letter\n\n exp_counts = get_exp_number(letter, mock_ib)\n logger.debug(\"about to get_symbols for %s\", letter)\n algo_app.get_symbols()\n assert algo_app.request_id >= 2\n\n logger.debug(\"getting stock_sym_match_descs for %s\", letter)\n symbol_starts_with_pattern = \\\n mock_ib.contract_descriptions['symbol'].map(\n lambda symbol: symbol.startswith(letter))\n stock_sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & (mock_ib.contract_descriptions['secType'] == 'STK')\n & (mock_ib.contract_descriptions['currency'] == 'USD')\n & (if_opt_in_derivativeSecTypes(\n mock_ib.contract_descriptions)),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n\n sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & ((mock_ib.contract_descriptions['secType'] != 'STK')\n | (mock_ib.contract_descriptions['currency'] != 'USD')\n | if_opt_not_in_derivativeSecTypes(mock_ib.contract_descriptions)\n ),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n # we expect the stock_symbols to accumulate and grow, so the\n # number should now be what was there from the previous\n # iteration of this loop plus what we just now added\n assert len(stock_sym_match_descs) == exp_counts.stock_sym_recursive\n assert len(algo_app.stock_symbols) == (\n exp_counts.stock_sym_recursive + len(sym_dfs.stock_sym_df))\n\n assert len(sym_match_descs) == exp_counts.sym_recursive\n assert len(algo_app.symbols) == (\n exp_counts.sym_recursive + len(sym_dfs.sym_df))\n\n if exp_counts.stock_sym_recursive > 0:\n stock_sym_match_descs = stock_sym_match_descs.set_index(\n ['conId']).sort_index()\n sym_dfs.mock_stock_sym_df \\\n = sym_dfs.mock_stock_sym_df.append(stock_sym_match_descs)\n sym_dfs.mock_stock_sym_df.sort_index(inplace=True)\n\n # check the data set\n stock_symbols_path = algo_app.ds_catalog.get_path('stock_symbols')\n logger.info('stock_symbols_path: %s', stock_symbols_path)\n\n sym_dfs.stock_sym_df = pd.read_csv(stock_symbols_path,\n header=0,\n index_col=0,\n converters={\n 'derivativeSecTypes':\n lambda x: eval(x)})\n comp_df = algo_app.stock_symbols.compare(sym_dfs.stock_sym_df)\n assert comp_df.empty\n\n comp_df = algo_app.stock_symbols.compare(sym_dfs.mock_stock_sym_df)\n assert comp_df.empty\n\n if exp_counts.sym_recursive > 0:\n sym_match_descs = sym_match_descs.set_index(\n ['conId']).sort_index()\n sym_dfs.mock_sym_df = \\\n sym_dfs.mock_sym_df.append(sym_match_descs)\n sym_dfs.mock_sym_df.sort_index(inplace=True)\n\n # check the data set\n symbols_path = \\\n algo_app.ds_catalog.get_path('symbols')\n logger.info('symbols_path: %s', symbols_path)\n\n sym_dfs.sym_df = pd.read_csv(symbols_path,\n header=0,\n index_col=0,\n converters={\n 'derivativeSecTypes':\n lambda x: eval(x)})\n\n comp_df = algo_app.symbols.compare(sym_dfs.sym_df)\n assert comp_df.empty\n\n comp_df = algo_app.symbols.compare(sym_dfs.mock_sym_df)\n assert comp_df.empty\n\n return sym_dfs\n\n\n###############################################################################\n###############################################################################\n# error path\n###############################################################################\n###############################################################################\nclass TestErrorPath:\n \"\"\"Class to test error path.\"\"\"\n def test_error_path_by_request_when_not_connected(self,\n algo_app: \"AlgoApp\",\n capsys: Any) -> None:\n \"\"\"Test the error callback by any request while not connected.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n capsys: pytest fixture to capture print output\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n\n logger.debug(\"about to request time\")\n algo_app.reqCurrentTime()\n captured = capsys.readouterr().out\n assert captured == 'Error: -1 504 Not connected' + '\\n'\n\n\n###############################################################################\n###############################################################################\n# contract details\n###############################################################################\n###############################################################################\nclass TestAlgoAppContractDetails:\n \"\"\"TestAlgoAppContractDetails class.\"\"\"\n\n def test_get_contract_details_0_entries(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for non-existent conId.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_1_entry(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for 1 entry.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n contract.conId = 7001\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_2_entries(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for 2 entries.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n contract.conId = 7001\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001])\n\n contract.conId = 7002\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001, 7002])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_duplicates(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for 3 entries plus a duplicate.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n contract.conId = 7001\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001])\n\n contract.conId = 7002\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001, 7002])\n\n contract.conId = 7001 # try to add 7001 again\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001, 7002])\n\n contract.conId = 7003\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib,\n [7001, 7002, 7003])\n\n contract.conId = 7002 # another duplicate\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib,\n [7001, 7002, 7003])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_many_entries(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for many entries.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n try:\n conId_list = []\n for conId in range(7001, 7033):\n contract = Contract() # create an empty contract\n contract.conId = conId\n conId_list.append(conId)\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract,\n algo_app,\n mock_ib,\n conId_list)\n finally:\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n\n###############################################################################\n# contract details verification\n###############################################################################\ndef verify_contract_details(contract: \"Contract\",\n algo_app: \"AlgoApp\",\n mock_ib: Any,\n conId_list: List[int]) -> None:\n \"\"\"Verify contract details.\n\n Args:\n contract: the contract used to get details\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n conId_list: list of con ids\n\n \"\"\"\n assert len(algo_app.contract_details) == len(conId_list)\n\n if len(conId_list) > 0:\n # first, save the algo_app contracts and contract_details\n contracts_ds = algo_app.contracts\n contract_details_ds = algo_app.contract_details\n\n # next, reload algo_app contracts and contract_details from csv\n # so we can test that they were saved and restored\n # correctly (i.e., we will compare them against\n # what we just loaded)\n contracts_path = algo_app.ds_catalog.get_path('contracts')\n logger.info('contracts_path: %s', contracts_path)\n algo_app.contracts = algo_app.load_contracts(contracts_path)\n algo_app.load_contract_details()\n\n # print('contract_details_ds:\\n', contract_details_ds)\n # print('contract_details_ds.__dict__:\\n',\n # contract_details_ds.__dict__)\n\n for conId in conId_list:\n # match_desc = mock_ib.contract_descriptions.loc[\n # mock_ib.contract_descriptions['conId'] == conId]\n\n # match_desc = match_desc.iloc[0]\n\n contract1 = get_contract_obj(\n algo_app.contracts.loc[conId].to_dict())\n\n contract2 = get_contract_obj(contracts_ds.loc[conId].to_dict())\n\n compare_contracts(contract1,\n contract2)\n\n contract3 = get_contract_from_mock_desc(conId, mock_ib)\n\n compare_contracts(contract1,\n contract3)\n\n contract_details1 = get_contract_details_obj(\n algo_app.contract_details.loc[conId].to_dict())\n\n contract_details2 = get_contract_details_obj(\n contract_details_ds.loc[conId].to_dict())\n\n compare_contract_details(contract_details1,\n contract_details2)\n\n contract_details3 = \\\n get_contract_details_from_mock_desc(conId, mock_ib)\n\n compare_contract_details(contract_details1,\n contract_details3)\n\n\n###############################################################################\n###############################################################################\n# TestExtraContractFields\n###############################################################################\n###############################################################################\nclass TestExtraContractFields:\n \"\"\"TestExtraContractFields class.\"\"\"\n\n ###########################################################################\n # test_contract_combo_legs\n ###########################################################################\n def test_contract_extra_fields(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test combo legs in contract.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n num_contracts = 50\n contract_list = []\n contract_df = pd.DataFrame()\n # get the path for saving/loading the combo legs contract df\n extra_contract_path = \\\n algo_app.ds_catalog.get_path('extra_contract')\n logger.info('extra_contract_path: %s', extra_contract_path)\n\n for i in range(num_contracts):\n conId = 7001 + i\n contract = get_contract_from_mock_desc(conId,\n mock_ib,\n include_extra_details=True)\n\n # add combo legs\n combo_leg_list = build_combo_legs(i, mock_ib)\n if combo_leg_list:\n contract.comboLegs = combo_leg_list\n elif i % 2 == 1: # empty list\n # empty list for odd, None for even\n contract.comboLegs = []\n\n contract_list.append(contract)\n contract_dict = get_contract_dict(contract)\n contract_df = \\\n contract_df.append(pd.DataFrame(contract_dict,\n index=[contract.conId]))\n # Save dataframe to csv\n contract_df.to_csv(extra_contract_path)\n\n # read dataframe from csv\n contract_df2 = algo_app.load_contracts(extra_contract_path)\n\n for i in range(num_contracts):\n contract1 = contract_list[i]\n contract_dict2 = contract_df2.iloc[i].to_dict()\n contract2 = get_contract_obj(contract_dict2)\n\n compare_contracts(contract1, contract2)\n\n\n###############################################################################\n# build_combo_legs\n###############################################################################\ndef build_combo_legs(idx: int,\n mock_ib: Any) -> List[ComboLeg]:\n \"\"\"Build the combo leg list for a contract.\n\n Args:\n idx: the index of the entry being built\n mock_ib: pytest fixture of contract_descriptions\n\n Returns:\n list with zero or more ComboLeg items\n\n \"\"\"\n num_combo_legs = idx % 4 # vary the number built from 0 to 3\n combo_leg_list = []\n for j in range(num_combo_legs):\n combo_leg = ComboLeg()\n combo_leg.conId = \\\n mock_ib.combo_legs.cl_conId.iloc[idx + j]\n combo_leg.ratio = \\\n mock_ib.combo_legs.cl_ratio.iloc[idx + j]\n combo_leg.action = \\\n mock_ib.combo_legs.cl_action.iloc[idx + j]\n combo_leg.exchange = \\\n mock_ib.combo_legs.cl_exchange.iloc[idx + j]\n combo_leg.openClose = \\\n mock_ib.combo_legs.cl_openClose.iloc[idx + j]\n combo_leg.shortSaleSlot = \\\n mock_ib.combo_legs.cl_shortSaleSlot.iloc[idx + j]\n combo_leg.designatedLocation = \\\n mock_ib.combo_legs.cl_designatedLocation.iloc[idx + j]\n combo_leg.exemptCode = \\\n mock_ib.combo_legs.cl_exemptCode.iloc[idx + j]\n\n combo_leg_list.append(combo_leg)\n\n return combo_leg_list\n\n\n###############################################################################\n# get_contract_from_mock_desc\n###############################################################################\ndef get_contract_from_mock_desc(conId: int,\n mock_ib: Any,\n include_extra_details: bool = False\n ) -> Contract:\n \"\"\"Build and return a contract from the mock description.\n\n Args:\n conId: index of mock_desc and mock_dnc to use\n mock_ib: contains contract data frames\n include_extra_details: include more details beyond what is\n returned for reqContractDetails\n\n Returns:\n Contract with fields from input mock_desc and mock_dnc\n\n \"\"\"\n ret_con = Contract()\n ret_con.conId = mock_ib.contract_descriptions.at[conId, 'conId'] # cd\n ret_con.symbol = mock_ib.contract_descriptions.at[conId, 'symbol'] # cd\n ret_con.secType = mock_ib.contract_descriptions.at[conId, 'secType'] # cd\n\n if mock_ib.contract_descriptions.at[conId, 'lastTradeDateOrContractMonth']:\n split_date = \\\n mock_ib.contract_descriptions.at[\n conId, 'lastTradeDateOrContractMonth'].split()\n if len(split_date) > 0: # very well better be!\n ret_con.lastTradeDateOrContractMonth = split_date[0]\n\n ret_con.strike = mock_ib.contract_descriptions.at[conId, 'strike'] # cd\n ret_con.right = mock_ib.contract_descriptions.at[conId, 'right'] # cd\n ret_con.multiplier = \\\n mock_ib.contract_descriptions.at[conId, 'multiplier'] # cd\n ret_con.exchange = \\\n mock_ib.contract_descriptions.at[conId, 'exchange'] # cd\n ret_con.primaryExchange = \\\n mock_ib.contract_descriptions.at[conId, 'primaryExchange'] # cd\n ret_con.currency = \\\n mock_ib.contract_descriptions.at[conId, 'currency'] # cd\n ret_con.localSymbol = \\\n mock_ib.contract_descriptions.at[conId, 'localSymbol'] # cd\n ret_con.tradingClass = \\\n mock_ib.contract_descriptions.at[conId, 'tradingClass'] # cd\n\n ###########################################################################\n # following fields are not included with reqContractDetails\n ###########################################################################\n if include_extra_details:\n ret_con.includeExpired = \\\n mock_ib.contract_descriptions.at[conId, 'includeExpired']\n ret_con.secIdType = mock_ib.contract_descriptions.at[conId,\n 'secIdType']\n ret_con.secId = mock_ib.contract_descriptions.at[conId, 'secId']\n\n # combos\n ret_con.comboLegsDescrip = \\\n mock_ib.contract_descriptions.at[conId, 'comboLegsDescrip']\n # ret_con.comboLegs = mock_ib.contract_descriptions.comboLegs\n\n # build a delta_neutral_contract every third time\n if (conId % 3) == 0:\n delta_neutral_contract = DeltaNeutralContract()\n # item() is used to convert numpy.int64 to python int\n delta_neutral_contract.conId = \\\n mock_ib.delta_neutral_contract.at[conId, 'conId']\n delta_neutral_contract.delta = \\\n mock_ib.delta_neutral_contract.at[conId, 'delta']\n delta_neutral_contract.price = \\\n mock_ib.delta_neutral_contract.at[conId, 'price']\n\n ret_con.deltaNeutralContract = delta_neutral_contract\n\n return ret_con\n\n\n###############################################################################\n# get_contract_details_from_mock_desc\n###############################################################################\ndef get_contract_details_from_mock_desc(conId: int,\n mock_ib: Any\n ) -> ContractDetails:\n \"\"\"Build and return a contract_details from the mock description.\n\n Args:\n conId: index of entry to use\n mock_ib: DataFrame with values for contract_details\n\n Returns:\n ContractDetails with fields from input mock_desc\n\n \"\"\"\n ret_con = ContractDetails()\n ret_con.contract = get_contract_from_mock_desc(conId, mock_ib)\n ret_con.marketName = \\\n mock_ib.contract_descriptions.at[conId, 'marketName'] # cd\n ret_con.minTick = mock_ib.contract_descriptions.at[conId, 'minTick'] # cd\n ret_con.orderTypes = \\\n mock_ib.contract_descriptions.at[conId, 'orderTypes'] # cd\n ret_con.validExchanges = \\\n mock_ib.contract_descriptions.at[conId, 'validExchanges'] # cd\n ret_con.priceMagnifier = \\\n mock_ib.contract_descriptions.at[conId, 'priceMagnifier'] # cd\n ret_con.underConId = \\\n mock_ib.contract_descriptions.at[conId, 'underConId'] # cd\n ret_con.longName = mock_ib.contract_descriptions.at[conId,\n 'longName'] # cd\n ret_con.contractMonth = \\\n mock_ib.contract_descriptions.at[conId, 'contractMonth'] # cd\n ret_con.industry = mock_ib.contract_descriptions.at[conId,\n 'industry'] # cd\n ret_con.category = mock_ib.contract_descriptions.at[conId,\n 'category'] # cd\n ret_con.subcategory = \\\n mock_ib.contract_descriptions.at[conId, 'subcategory'] # cd\n ret_con.timeZoneId = \\\n mock_ib.contract_descriptions.at[conId, 'timeZoneId'] # cd\n ret_con.tradingHours = \\\n mock_ib.contract_descriptions.at[conId, 'tradingHours'] # cd\n ret_con.liquidHours = \\\n mock_ib.contract_descriptions.at[conId, 'liquidHours'] # cd\n ret_con.evRule = mock_ib.contract_descriptions.at[conId, 'evRule'] # cd\n ret_con.evMultiplier = \\\n mock_ib.contract_descriptions.at[conId, 'evMultiplier'] # cd\n ret_con.mdSizeMultiplier = \\\n mock_ib.contract_descriptions.at[conId, 'mdSizeMultiplier'] # cd\n ret_con.aggGroup = mock_ib.contract_descriptions.at[conId,\n 'aggGroup'] # cd\n ret_con.underSymbol = \\\n mock_ib.contract_descriptions.at[conId, 'underSymbol'] # cd\n ret_con.underSecType = \\\n mock_ib.contract_descriptions.at[conId, 'underSecType'] # cd\n ret_con.marketRuleIds = \\\n mock_ib.contract_descriptions.at[conId, 'marketRuleIds'] # cd\n\n secIdList = mock_ib.contract_descriptions.at[conId, 'secIdList']\n new_secIdList = []\n for j in range(0,\n 2 * mock_ib.contract_descriptions.at[conId,\n 'secIdListCount'],\n 2):\n tag = secIdList[j]\n value = secIdList[j+1]\n tag_value = TagValue(tag, value)\n new_secIdList.append(tag_value)\n ret_con.secIdList = new_secIdList # cd\n\n ret_con.realExpirationDate = \\\n mock_ib.contract_descriptions.at[conId, 'realExpirationDate'] # cd\n\n # last trade time come from lastTradeDate as 'date time' (i.e., 2 items)\n if mock_ib.contract_descriptions.at[conId, 'lastTradeDateOrContractMonth']:\n split_date = \\\n mock_ib.contract_descriptions.at[\n conId, 'lastTradeDateOrContractMonth'].split()\n if len(split_date) > 1:\n ret_con.lastTradeTime = split_date[1]\n\n ret_con.stockType = mock_ib.contract_descriptions.at[conId,\n 'stockType'] # cd\n\n return ret_con\n\n\n###############################################################################\n# compare_tag_value\n###############################################################################\ndef compare_tag_value(tag_value1: TagValue,\n tag_value2: TagValue\n ) -> None:\n \"\"\"Compare two tag_value objects for equality.\n\n Args:\n tag_value1: tag_value 1\n tag_value2: tag_value 2\n\n \"\"\"\n assert tag_value1.tag == tag_value2.tag\n\n assert isinstance(tag_value1.tag, str)\n\n assert isinstance(tag_value2.tag, str)\n\n assert tag_value1.value == tag_value2.value\n\n assert isinstance(tag_value1.value, str)\n\n assert isinstance(tag_value2.value, str)\n\n\n###############################################################################\n# compare_combo_legs\n###############################################################################\ndef compare_combo_legs(cl1: ComboLeg,\n cl2: ComboLeg\n ) -> None:\n \"\"\"Compare two combo leg objects for equality.\n\n Args:\n cl1: combo leg 1\n cl2: combo leg 2\n\n \"\"\"\n assert cl1.conId == cl2.conId\n\n assert cl1.ratio == cl2.ratio\n\n assert cl1.action == cl2.action\n\n assert cl1.exchange == cl2.exchange\n\n assert cl1.openClose == cl2.openClose\n\n assert cl1.shortSaleSlot == cl2.shortSaleSlot\n\n assert cl1.designatedLocation == cl2.designatedLocation\n\n assert cl1.exemptCode == cl2.exemptCode\n\n verify_combo_leg_types(cl1)\n verify_combo_leg_types(cl1)\n\n\n###############################################################################\n# verify_combo_leg_types\n###############################################################################\ndef verify_combo_leg_types(combo_leg: ComboLeg) -> None:\n \"\"\"Verify that combo_leg fields are correct type.\n\n Args:\n combo_leg: combo_leg to verify\n\n \"\"\"\n assert isinstance(combo_leg.conId, (int, np.int64))\n\n assert isinstance(combo_leg.ratio, (int, np.int64))\n\n assert isinstance(combo_leg.action, str)\n\n assert isinstance(combo_leg.exchange, str)\n\n assert isinstance(combo_leg.openClose, (int, np.int64))\n\n assert isinstance(combo_leg.shortSaleSlot, (int, np.int64))\n\n assert isinstance(combo_leg.designatedLocation, str)\n\n assert isinstance(combo_leg.exemptCode, (int, np.int64))\n\n\n###############################################################################\n# compare_delta_neutral_contracts\n###############################################################################\ndef compare_delta_neutral_contracts(con1: DeltaNeutralContract,\n con2: DeltaNeutralContract\n ) -> None:\n \"\"\"Compare two delta neutral contracts for equality.\n\n Args:\n con1: contract 1\n con2: contract 2\n\n \"\"\"\n assert con1.conId == con2.conId\n\n assert isinstance(con1.conId, (int, np.int64))\n\n assert isinstance(con2.conId, int)\n\n assert con1.delta == con2.delta\n\n assert isinstance(con1.delta, float)\n\n assert isinstance(con2.delta, float)\n\n assert con1.price == con2.price\n\n assert isinstance(con1.price, float)\n\n assert isinstance(con2.price, float)\n\n\n###############################################################################\n# compare_contracts\n###############################################################################\ndef compare_contracts(con1: Contract, con2: Contract) -> None:\n \"\"\"Compare two contracts for equality.\n\n Args:\n con1: contract 1\n con2: contract 2\n\n \"\"\"\n assert con1.conId == con2.conId\n\n assert con1.symbol == con2.symbol\n\n assert con1.secType == con2.secType\n\n assert (con1.lastTradeDateOrContractMonth\n == con2.lastTradeDateOrContractMonth)\n\n assert con1.strike == con2.strike\n\n assert con1.right == con2.right\n\n assert con1.multiplier == con2.multiplier\n\n assert con1.exchange == con2.exchange\n\n assert con1.primaryExchange == con2.primaryExchange\n\n assert con1.currency == con2.currency\n\n assert con1.localSymbol == con2.localSymbol\n\n assert con1.tradingClass == con2.tradingClass\n\n assert con1.includeExpired == con2.includeExpired\n\n assert con1.secIdType == con2.secIdType\n\n assert con1.secId == con2.secId\n\n # combos\n assert con1.comboLegsDescrip == con2.comboLegsDescrip\n\n if con1.comboLegs and con2.comboLegs:\n assert len(con1.comboLegs) == len(con2.comboLegs)\n\n for i in range(len(con1.comboLegs)):\n compare_combo_legs(con1.comboLegs[i],\n con2.comboLegs[i])\n else: # check whether one contract has it and the other does not\n assert not (con1.comboLegs or con2.comboLegs)\n\n if con1.deltaNeutralContract and con2.deltaNeutralContract:\n compare_delta_neutral_contracts(con1.deltaNeutralContract,\n con2.deltaNeutralContract)\n else: # check whether one contract has it and one does not\n assert not (con1.deltaNeutralContract or con2.deltaNeutralContract)\n\n verify_contract_types(con1)\n verify_contract_types(con2)\n\n\n###############################################################################\n# verify_contract_types\n###############################################################################\ndef verify_contract_types(contract: Contract) -> None:\n \"\"\"Verify that contract fields are correct type.\n\n Args:\n contract: contract to verify\n\n \"\"\"\n assert isinstance(contract.conId, (int, np.int64))\n\n assert isinstance(contract.symbol, str)\n\n assert isinstance(contract.secType, str)\n\n assert isinstance(contract.lastTradeDateOrContractMonth, str)\n\n assert isinstance(contract.strike, float)\n\n assert isinstance(contract.right, str)\n\n assert isinstance(contract.multiplier, str)\n\n assert isinstance(contract.exchange, str)\n\n assert isinstance(contract.primaryExchange, str)\n\n assert isinstance(contract.currency, str)\n\n assert isinstance(contract.localSymbol, str)\n\n assert isinstance(contract.tradingClass, str)\n\n assert isinstance(contract.includeExpired, (bool, np.bool_))\n\n assert isinstance(contract.secIdType, str)\n\n assert isinstance(contract.secId, str)\n\n # combos\n assert isinstance(contract.comboLegsDescrip, str)\n\n assert isinstance(contract.comboLegs, (list, type(None)))\n\n if contract.comboLegs:\n for combo_leg in contract.comboLegs:\n assert isinstance(combo_leg, ComboLeg)\n\n assert isinstance(contract.deltaNeutralContract,\n (DeltaNeutralContract, type(None)))\n\n\n###############################################################################\n# compare_contract_details\n###############################################################################\ndef compare_contract_details(con1: ContractDetails,\n con2: ContractDetails\n ) -> None:\n \"\"\"Compare two contract_details for equality.\n\n Args:\n con1: contract_details 1\n con2: contract_details 2\n\n \"\"\"\n if con1.contract and con2.contract:\n compare_contracts(con1.contract, con2.contract)\n\n else: # check whether one contract_details has it, one does not\n assert not (con1.contract or con2.contract)\n\n assert con1.marketName == con2.marketName\n\n assert con1.minTick == con2.minTick\n\n assert con1.orderTypes == con2.orderTypes\n\n assert con1.validExchanges == con2.validExchanges\n\n assert con1.priceMagnifier == con2.priceMagnifier\n\n assert con1.underConId == con2.underConId\n\n assert con1.longName == con2.longName\n\n assert con1.contractMonth == con2.contractMonth\n\n assert con1.industry == con2.industry\n\n assert con1.category == con2.category\n\n assert con1.subcategory == con2.subcategory\n\n assert con1.timeZoneId == con2.timeZoneId\n\n assert con1.tradingHours == con2.tradingHours\n\n assert con1.liquidHours == con2.liquidHours\n\n assert con1.evRule == con2.evRule\n\n assert con1.evMultiplier == con2.evMultiplier\n\n assert con1.mdSizeMultiplier == con2.mdSizeMultiplier\n\n assert con1.aggGroup == con2.aggGroup\n\n assert con1.underSymbol == con2.underSymbol\n\n assert con1.underSecType == con2.underSecType\n\n assert con1.marketRuleIds == con2.marketRuleIds\n\n if con1.secIdList and con2.secIdList:\n assert len(con1.secIdList) == len(con2.secIdList)\n for i in range(len(con1.secIdList)):\n compare_tag_value(con1.secIdList[i], con2.secIdList[i])\n else: # check whether one contract_details has it, one does not\n assert not (con1.secIdList or con2.secIdList)\n\n assert con1.realExpirationDate == con2.realExpirationDate\n\n assert con1.lastTradeTime == con2.lastTradeTime\n\n assert con1.stockType == con2.stockType\n\n # BOND values\n assert con1.cusip == con2.cusip\n\n assert con1.ratings == con2.ratings\n\n assert con1.descAppend == con2.descAppend\n\n assert con1.bondType == con2.bondType\n\n assert con1.couponType == con2.couponType\n\n assert con1.callable == con2.callable\n\n assert con1.putable == con2.putable\n\n assert con1.coupon == con2.coupon\n\n assert con1.convertible == con2.convertible\n\n assert con1.maturity == con2.maturity\n\n assert con1.issueDate == con2.issueDate\n\n assert con1.nextOptionDate == con2.nextOptionDate\n\n assert con1.nextOptionType == con2.nextOptionType\n\n assert con1.nextOptionPartial == con2.nextOptionPartial\n\n assert con1.notes == con2.notes\n\n\n###############################################################################\n# fundamental data\n###############################################################################\n# class TestAlgoAppFundamentalData:\n# \"\"\"TestAlgoAppContractDetails class.\"\"\"\n#\n# def test_get_contract_details_0_entries(self,\n# algo_app: \"AlgoApp\",\n# mock_ib: Any\n# ) -> None:\n# \"\"\"Test contract details for non-existent conId.\n#\n# Args:\n# algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n# mock_ib: pytest fixture of contract_descriptions\n#\n# \"\"\"\n# verify_algo_app_initialized(algo_app)\n#\n# logger.debug(\"about to connect\")\n# algo_app.connect_to_ib(\"127.0.0.1\",\n# algo_app.PORT_FOR_LIVE_TRADING,\n# client_id=0)\n#\n# # verify that algo_app is connected and alive with a valid reqId\n# verify_algo_app_connected(algo_app)\n#\n# contract = Contract() # create an empty contract with conId of 0\n# algo_app.get_contract_details(contract)\n#\n# verify_contract_details(contract, algo_app, mock_ib, [0])\n#\n# algo_app.disconnect_from_ib()\n# verify_algo_app_disconnected(algo_app)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
bsierieb1/SCDCdm_public | [
"db610c1bda904f79a8142da767cf8e62d1cd8d32"
] | [
"paper_simulation_scripts/run_one_job.py"
] | [
"\"\"\"\nThis script is executed in each job on the server to run simulation studies on all the parameters that are passed to it\n\"\"\"\nimport sys\nimport ast\nimport numpy as np\n\nfrom scdcdm.util import multi_parameter_sampling as mult\n\n# Convert string parameters to lists\ncases = ast.literal_eval(sys.argv[1])\nprint(\"cases:\", cases)\nK = ast.literal_eval(sys.argv[2])\nprint(\"K:\", K)\nn_total = ast.literal_eval(sys.argv[3])\nprint(\"n_total:\", n_total)\nn_samples = ast.literal_eval(sys.argv[4])\nprint(\"n_samples:\", n_samples)\nprint(sys.argv[5])\nb_true = ast.literal_eval(sys.argv[5])\nprint(\"b_true:\", b_true)\nw_true = ast.literal_eval(sys.argv[6])\nprint(\"w_true:\", w_true)\nnum_results = ast.literal_eval(sys.argv[7])\nprint(\"num_results:\", num_results)\nn = ast.literal_eval(sys.argv[8])\nprint(\"n:\", n)\n\n# Run simulation study\n\np = mult.MultiParamSimulation(cases, K, n_total, n_samples, b_true, w_true, num_results,\n baseline_index=4, formula=\"x_0\")\n\np.simulate()\n\np.save(path=\"/home/icb/johannes.ostner/compositional_diff/compositionalDiff-johannes_tests_2/benchmark_results/overall_benchmark/\",\n filename=\"result_b_\" + str(np.round(b_true, 3)).replace(\" \", \" \") + \"_w_\" + str(w_true) + \"_round_\" + str(n))\n"
] | [
[
"numpy.round"
]
] |
yupeijei1997/unif | [
"16685a89446e6ce14080439162a9bfd0c75f0521"
] | [
"uf/application/uda.py"
] | [
"# coding:=utf-8\n# Copyright 2021 Tencent. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n''' Applications based on UDA. '''\n\nimport numpy as np\n\nfrom uf.tools import tf\nfrom .base import ClassifierModule\nfrom .bert import BERTClassifier, get_bert_config, get_key_to_depths\nfrom uf.modeling.bert import BERTEncoder\nfrom uf.modeling.uda import UDADecoder\nfrom uf.tokenization.word_piece import get_word_piece_tokenizer\nimport uf.utils as utils\nimport uf.modeling.util as util\n\n\n\nclass UDAClassifier(BERTClassifier, ClassifierModule):\n ''' Single-label classifier on UDA. '''\n _INFER_ATTRIBUTES = BERTClassifier._INFER_ATTRIBUTES\n\n def __init__(self,\n config_file,\n vocab_file,\n max_seq_length=128,\n label_size=None,\n init_checkpoint=None,\n output_dir=None,\n gpu_ids=None,\n drop_pooler=False,\n uda_softmax_temp=-1,\n uda_confidence_thresh=-1,\n tsa_schedule='linear',\n do_lower_case=True,\n truncate_method='LIFO'):\n super(ClassifierModule, self).__init__(\n init_checkpoint, output_dir, gpu_ids)\n\n self.batch_size = 0\n self.max_seq_length = max_seq_length\n self.label_size = label_size\n self.truncate_method = truncate_method\n self._drop_pooler = drop_pooler\n self._uda_softmax_temp = uda_softmax_temp\n self._uda_confidence_thresh = uda_confidence_thresh\n self._tsa_schedule = tsa_schedule\n self._id_to_label = None\n self.__init_args__ = locals()\n\n self.bert_config = get_bert_config(config_file)\n self.tokenizer = get_word_piece_tokenizer(vocab_file, do_lower_case)\n self._key_to_depths = get_key_to_depths(\n self.bert_config.num_hidden_layers)\n\n if '[CLS]' not in self.tokenizer.vocab:\n self.tokenizer.add('[CLS]')\n self.bert_config.vocab_size += 1\n tf.logging.info('Add necessary token `[CLS]` into vocabulary.')\n if '[SEP]' not in self.tokenizer.vocab:\n self.tokenizer.add('[SEP]')\n self.bert_config.vocab_size += 1\n tf.logging.info('Add necessary token `[SEP]` into vocabulary.')\n\n def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None,\n is_training=False):\n self._assert_legal(X, y, sample_weight, X_tokenized)\n\n # simplified when not training\n if not is_training:\n return super().convert(\n X, y, sample_weight, X_tokenized, is_training)\n\n if is_training:\n assert y is not None, '`y` can\\'t be None.'\n\n n_inputs = None\n data = {}\n\n # convert X\n if X or X_tokenized:\n tokenized = False if X else X_tokenized\n (input_ids, input_mask, segment_ids,\n aug_input_ids, aug_input_mask, aug_segment_ids,\n is_supervised) = self._convert_X_reimp(\n X_tokenized if tokenized else X, y, tokenized=tokenized)\n data['input_ids'] = np.array(input_ids, dtype=np.int32)\n data['input_mask'] = np.array(input_mask, dtype=np.int32)\n data['segment_ids'] = np.array(segment_ids, dtype=np.int32)\n data['aug_input_ids'] = np.array(aug_input_ids, dtype=np.int32)\n data['aug_input_mask'] = np.array(aug_input_mask, dtype=np.int32)\n data['aug_segment_ids'] = np.array(aug_segment_ids, dtype=np.int32)\n data['is_supervised'] = np.array(is_supervised, dtype=np.int32)\n n_inputs = len(input_ids)\n\n if n_inputs < self.batch_size:\n self.batch_size = max(n_inputs, len(self._gpu_ids))\n\n # convert y\n if y:\n label_ids = self._convert_y(y)\n data['label_ids'] = np.array(label_ids, dtype=np.int32)\n\n # convert sample_weight\n if is_training or y:\n sample_weight = self._convert_sample_weight(\n sample_weight, n_inputs)\n data['sample_weight'] = np.array(sample_weight, dtype=np.float32)\n\n return data\n\n def _convert_X_reimp(self, X_target, y, tokenized):\n\n # tokenize input texts\n sup_ori_input_tokens = []\n aug_input_tokens = []\n is_supervised = []\n for ex_id, example in enumerate(X_target):\n try:\n label = y[ex_id]\n\n if label is None:\n assert len(example) == 2\n sup_ori_input_tokens.append(\n self._convert_x(example[0], tokenized))\n aug_input_tokens.append(\n self._convert_x(example[1], tokenized))\n is_supervised.append(0)\n else:\n sup_ori_input_tokens.append(\n self._convert_x(example, tokenized))\n aug_input_tokens.append([])\n is_supervised.append(1)\n except AssertionError:\n raise AssertionError (\n 'Must have exactly two inputs for an '\n 'unsupervised example, respectively original '\n 'and augmented.')\n except Exception:\n raise ValueError(\n 'Wrong input format (line %d): \\'%s\\'. '\n % (ex_id, example))\n\n input_ids = []\n input_mask = []\n segment_ids = []\n for ex_id, segments in enumerate(sup_ori_input_tokens):\n _input_tokens = ['[CLS]']\n _input_ids = []\n _input_mask = [1]\n _segment_ids = [0]\n\n utils.truncate_segments(\n segments, self.max_seq_length - len(segments) - 1,\n truncate_method=self.truncate_method)\n for s_id, segment in enumerate(segments):\n _segment_id = min(s_id, 1)\n _input_tokens.extend(segment + ['[SEP]'])\n _input_mask.extend([1] * (len(segment) + 1))\n _segment_ids.extend([_segment_id] * (len(segment) + 1))\n\n _input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)\n\n # padding\n for _ in range(self.max_seq_length - len(_input_ids)):\n _input_ids.append(0)\n _input_mask.append(0)\n _segment_ids.append(0)\n\n input_ids.append(_input_ids)\n input_mask.append(_input_mask)\n segment_ids.append(_segment_ids)\n\n aug_input_ids = []\n aug_input_mask = []\n aug_segment_ids = []\n for ex_id, segments in enumerate(aug_input_tokens):\n _input_tokens = ['[CLS]']\n _input_ids = []\n _input_mask = [1]\n _segment_ids = [0]\n\n utils.truncate_segments(\n segments, self.max_seq_length - len(segments) - 1,\n truncate_method=self.truncate_method)\n for s_id, segment in enumerate(segments):\n _segment_id = min(s_id, 1)\n _input_tokens.extend(segment + ['[SEP]'])\n _input_mask.extend([1] * (len(segment) + 1))\n _segment_ids.extend([_segment_id] * (len(segment) + 1))\n\n _input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)\n\n # padding\n for _ in range(self.max_seq_length - len(_input_ids)):\n _input_ids.append(0)\n _input_mask.append(0)\n _segment_ids.append(0)\n\n aug_input_ids.append(_input_ids)\n aug_input_mask.append(_input_mask)\n aug_segment_ids.append(_segment_ids)\n\n return (input_ids, input_mask, segment_ids,\n aug_input_ids, aug_input_mask, aug_segment_ids,\n is_supervised)\n\n def _convert_y(self, y):\n label_set = set(y)\n if None in label_set:\n label_set -= {None}\n\n # automatically set `label_size`\n if self.label_size:\n assert len(label_set) <= self.label_size, (\n 'Number of unique `y`s exceeds `label_size`.')\n else:\n self.label_size = len(label_set)\n\n # automatically set `id_to_label`\n if not self._id_to_label:\n self._id_to_label = list(label_set)\n try:\n # Allign if user inputs continual integers.\n # e.g. [2, 0, 1]\n self._id_to_label = list(sorted(self._id_to_label))\n except Exception:\n pass\n if len(self._id_to_label) < self.label_size:\n for i in range(len(self._id_to_label), self.label_size):\n self._id_to_label.append(i)\n\n # automatically set `label_to_id` for prediction\n self._label_to_id = {\n label: index for index, label in enumerate(self._id_to_label)}\n\n label_ids = [self._label_to_id[label]\n if label is not None else -1 for label in y]\n return label_ids\n\n def _set_placeholders(self, target, on_export=False, **kwargs):\n self.placeholders = {\n 'input_ids': utils.get_placeholder(\n target, 'input_ids',\n [None, self.max_seq_length], tf.int32),\n 'input_mask': utils.get_placeholder(\n target, 'input_mask',\n [None, self.max_seq_length], tf.int32),\n 'segment_ids': utils.get_placeholder(\n target, 'segment_ids',\n [None, self.max_seq_length], tf.int32),\n 'label_ids': utils.get_placeholder(\n target, 'label_ids', [None], tf.int32),\n }\n if kwargs.get('is_training'):\n self.placeholders['aug_input_ids'] = utils.get_placeholder(\n target, 'aug_input_ids',\n [None, self.max_seq_length], tf.int32)\n self.placeholders['aug_input_mask'] = utils.get_placeholder(\n target, 'aug_input_mask',\n [None, self.max_seq_length], tf.int32)\n self.placeholders['aug_segment_ids'] = utils.get_placeholder(\n target, 'aug_segment_ids',\n [None, self.max_seq_length], tf.int32)\n self.placeholders['is_supervised'] = utils.get_placeholder(\n target, 'is_supervised',\n [None], tf.float32)\n if not on_export:\n self.placeholders['sample_weight'] = \\\n utils.get_placeholder(\n target, 'sample_weight',\n [None], tf.float32)\n\n def _forward(self, is_training, split_placeholders, **kwargs):\n\n if not is_training:\n return super()._forward(is_training, split_placeholders, **kwargs)\n\n aug_input_ids = tf.boolean_mask(\n split_placeholders['aug_input_ids'],\n mask=(1.0 - split_placeholders['is_supervised']),\n axis=0)\n aug_input_mask = tf.boolean_mask(\n split_placeholders['aug_input_mask'],\n mask=(1.0 - split_placeholders['is_supervised']),\n axis=0)\n aug_segment_ids = tf.boolean_mask(\n split_placeholders['aug_segment_ids'],\n mask=(1.0 - split_placeholders['is_supervised']),\n axis=0)\n input_ids = tf.concat(\n [split_placeholders['input_ids'],\n aug_input_ids], axis=0)\n input_mask = tf.concat(\n [split_placeholders['input_mask'],\n aug_input_mask], axis=0)\n segment_ids = tf.concat(\n [split_placeholders['segment_ids'],\n aug_segment_ids], axis=0)\n encoder = BERTEncoder(\n bert_config=self.bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n scope='bert',\n drop_pooler=self._drop_pooler,\n **kwargs)\n encoder_output = encoder.get_pooled_output()\n\n label_ids = split_placeholders['label_ids']\n is_expanded = tf.zeros_like(label_ids, dtype=tf.float32)\n batch_size = util.get_shape_list(aug_input_ids)[0]\n aug_is_expanded = tf.ones((batch_size), dtype=tf.float32)\n is_expanded = tf.concat([is_expanded, aug_is_expanded], axis=0)\n decoder = UDADecoder(\n is_training=is_training,\n input_tensor=encoder_output,\n is_supervised=split_placeholders['is_supervised'],\n is_expanded=is_expanded,\n label_ids=label_ids,\n label_size=self.label_size,\n sample_weight=split_placeholders.get('sample_weight'),\n scope='cls/seq_relationship',\n global_step=self._global_step,\n num_train_steps=self.total_steps,\n uda_softmax_temp=self._uda_softmax_temp,\n uda_confidence_thresh=self._uda_confidence_thresh,\n tsa_schedule=self._tsa_schedule,\n **kwargs)\n (total_loss, losses, probs, preds) = decoder.get_forward_outputs()\n return (total_loss, losses, probs, preds)\n\n def _get_fit_ops(self, as_feature=False):\n ops = [self._train_op,\n self._preds['preds'],\n self._losses['supervised'],\n self._losses['unsupervised'],\n ]\n if as_feature:\n ops.extend([self.placeholders['is_supervised'],\n self.placeholders['label_ids']])\n return ops\n\n def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):\n\n if as_feature:\n batch_is_sup = output_arrays[-2]\n batch_labels = output_arrays[-1]\n else:\n batch_is_sup = feed_dict[self.placeholders['is_supervised']]\n batch_labels = feed_dict[self.placeholders['label_ids']]\n\n # accuracy\n batch_preds = output_arrays[1]\n accuracy = np.sum((batch_preds == batch_labels) * batch_is_sup) / \\\n np.sum(batch_is_sup)\n\n # supervised loss\n batch_sup_losses = output_arrays[2]\n sup_loss = np.mean(batch_sup_losses)\n\n # supervised loss\n batch_unsup_losses = output_arrays[3]\n unsup_loss = np.mean(batch_unsup_losses)\n\n info = ''\n info += ', accuracy %.4f' % accuracy\n info += ', supervised loss %.6f' % sup_loss\n info += ', unsupervised loss %.6f' % unsup_loss\n\n return info\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.mean"
]
] |
ege-erdil/logistic-fit | [
"7c6cc9ed35877ed8d142dd75b7b98658e19cf7cb"
] | [
"logistic_fit.py"
] | [
"from autograd import grad\r\nimport autograd.numpy as np\r\nfrom scipy.stats import logistic, norm\r\nfrom scipy.optimize import minimize\r\n\r\ndef logistic_pdf(x, loc, scale):\r\n y = (x - loc)/scale\r\n return np.exp(-y)/(scale * (1 + np.exp(-y))**2)\r\n\r\ndef logistic_cdf(x, loc, scale):\r\n y = (x-loc)/scale\r\n if y < -100:\r\n return 0\r\n elif y > 100:\r\n return 1\r\n else:\r\n return 1/(1 + np.exp(-y))\r\n\r\ndef logistic_logpdf(x, loc, scale):\r\n y = (x - loc)/scale\r\n if y < -250:\r\n return y - np.log(scale)\r\n elif y > 250:\r\n return -y - np.log(scale)\r\n else:\r\n return -y - np.log(scale) - 2 * np.log(1 + np.exp(-y))\r\n\r\ndef square_dist(a1, a2):\r\n s = 0\r\n for k in range(len(a1)):\r\n s += (a1[k] - a2[k])**2\r\n return s\r\n\r\ndef log_likelihood_logistic(data, params):\r\n n = len(data)\r\n c = (len(params) + 1)//3\r\n r = 0\r\n\r\n if (len(params) + 1) % 3 != 0:\r\n print(\"Parameters specified incorrectly!\")\r\n return None\r\n\r\n else:\r\n weights = [1]\r\n for k in range(c-1):\r\n weights.append(np.exp(params[2*c + k]))\r\n s = np.sum(weights)\r\n for x in data:\r\n pdf_list = [logistic_logpdf(x, params[2*j], np.exp(params[2*j+1])) for j in range(c)]\r\n pdf_list_avg = np.sum(pdf_list)/c\r\n pdf_list_n = [weights[j] * np.exp(pdf_list[j] - pdf_list_avg) for j in range(c)]\r\n \r\n r += (pdf_list_avg + np.log(np.sum(pdf_list_n)/s))/n\r\n return r\r\n\r\ndef cdf_loss(percentiles, params):\r\n n = len(percentiles)\r\n c = (len(params) + 1)//3\r\n r = 0\r\n\r\n if (len(params) + 1) % 3 != 0:\r\n print(\"Parameters specified incorrectly!\")\r\n return None\r\n\r\n else:\r\n weights = [1]\r\n for k in range(c-1):\r\n weights.append(np.exp(params[2*c + k]))\r\n s = np.sum(weights)\r\n for q in range(1, n):\r\n cdf_list = [logistic_cdf(percentiles[q-1], params[2*j], np.exp(params[2*j+1])) for j in range(c)]\r\n cdf_list_n = [weights[j] * cdf_list[j] for j in range(c)]\r\n \r\n r += (np.sum(cdf_list_n)/s - q/n)**2/n\r\n return r\r\n\r\n\r\ndef estimate(data, bins=20, num = 1, tol = 0.01, maxiter = 100):\r\n fit_params = np.zeros(3*num - 1)\r\n a = np.average(data)\r\n s = np.log(np.std(data))\r\n percentiles = [np.percentile(data, k) for k in range(100//bins, 100, 100//bins)]\r\n for i in range(num):\r\n fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)\r\n fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)\r\n\r\n def training_loss(params):\r\n return cdf_loss(percentiles, params) + 0.0001 * np.dot(params[2*num:], params[2*num:])\r\n \r\n training_loss_jac = grad(training_loss)\r\n\r\n res = minimize(training_loss, jac=training_loss_jac, x0=fit_params, method=\"BFGS\", options = {\"maxiter\": maxiter, \"gtol\": tol})\r\n print(res)\r\n final_params = res.x\r\n for i in range(num):\r\n final_params[2*i+1] = np.exp(final_params[2*i+1])\r\n results = []\r\n for i in range(num):\r\n results.append(final_params[2*i])\r\n results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])\r\n\r\n for i in range(num-1):\r\n results.append(final_params[2*num + i])\r\n\r\n return results\r\n\r\ndef estimate_log(data, num = 1, tol = 0.01, maxiter = 100):\r\n fit_params = np.zeros(3*num - 1)\r\n a = np.average(data)\r\n s = np.log(np.std(data))\r\n for i in range(num):\r\n fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)\r\n fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)\r\n \r\n def training_likelihood(params):\r\n return log_likelihood_logistic(data, params)\r\n\r\n def training_loss(params):\r\n return -log_likelihood_logistic(data, params)\r\n \r\n training_likelihood_jac = grad(training_likelihood)\r\n training_loss_jac = grad(training_loss)\r\n\r\n res = minimize(training_loss, jac=training_loss_jac, x0=fit_params, method=\"BFGS\", options = {\"maxiter\": maxiter, \"gtol\": tol})\r\n print(res)\r\n final_params = res.x\r\n for i in range(num):\r\n final_params[2*i+1] = np.exp(final_params[2*i+1])\r\n results = []\r\n for i in range(num):\r\n results.append(final_params[2*i])\r\n results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])\r\n\r\n for i in range(num-1):\r\n results.append(final_params[2*num + i])\r\n\r\n return results\r\n\r\ndef estimate_powell(data, num = 1, tol = 0.01, maxiter = 100):\r\n fit_params = np.zeros(3*num - 1)\r\n a = np.average(data)\r\n s = np.log(np.std(data))\r\n for i in range(num):\r\n fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)\r\n fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)\r\n \r\n def training_likelihood(params):\r\n return log_likelihood_logistic(data, params)\r\n\r\n def training_loss(params):\r\n return -log_likelihood_logistic(data, params)\r\n \r\n training_likelihood_jac = grad(training_likelihood)\r\n training_loss_jac = grad(training_loss)\r\n\r\n res = minimize(training_loss, x0=fit_params, method=\"Powell\", tol=tol, options = {\"maxiter\": maxiter})\r\n print(res)\r\n final_params = res.x\r\n for i in range(num):\r\n final_params[2*i+1] = np.exp(final_params[2*i+1])\r\n results = []\r\n for i in range(num):\r\n results.append(final_params[2*i])\r\n results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])\r\n\r\n for i in range(num-1):\r\n results.append(final_params[2*num + i])\r\n\r\n return results\r\n"
] | [
[
"scipy.optimize.minimize",
"scipy.stats.logistic.isf"
]
] |
Dangaran/home_station_project | [
"890b342e79e3dd493a8f418ed9283f0d444e5073"
] | [
"info_summary/get_summary_pdf.py"
] | [
"import requests\nimport pandas as pd\nfrom plotnine import *\nimport json\nimport time\nfrom fpdf import FPDF\nfrom datetime import datetime\n\n\n# change pandas display options\npd.options.display.max_columns = 101\npd.options.display.max_rows = 200\npd.options.display.precision = 7\n\n# get aemet and home information\nlast_day = {\n 'date_start': int(time.time()) - 86400,\n 'date_end': int(time.time())\n}\nresponse_aemet = requests.post('url_to_aws_lambda/get-aemet-data', json=last_day)\naemet_info = json.loads(response_aemet.text)\n\nresponse_home = requests.post('url_to_aws_lambda/get-home-data', json=last_day)\nhome_info = json.loads(response_home.text)\n\n\n# merge dataframes\naemet_info_df = pd.DataFrame(aemet_info)\naemet_info_df.sort_values(by=\"timestamp\", inplace=True)\n\nhome_info_df = pd.DataFrame(home_info)\nhome_info_df.sort_values(by=\"timestamp\", inplace=True)\n\nlast_day_info = pd.merge(aemet_info_df, home_info_df, on='timestamp', suffixes=(\"_aemet\", \"_home\"))\n\nlast_day_info = last_day_info.iloc[100:124, :]\n\n\n\n# -----------------------------------------------------------\n# \n# TEMPERATURE ANALYSIS\n#\n# -----------------------------------------------------------\n# prepare data for plotting\nhome_temp_threshold = 20\n# transform hour column to string and sort them \nlast_day_info['hour'] = last_day_info['hour'].astype(str) \nlast_day_info['hour'] = pd.Categorical(last_day_info['hour'], categories=last_day_info['hour'])\n\n# melt data to plot temperatures\ntemp_data_to_plot = last_day_info.melt(id_vars=['hour'], value_vars=['thermal_sensation', 'temperature_aemet', 'temperature_home'], var_name='temp_loc', value_name='temp_value')\n\n# change temp_loc to more readable strings for plotting\ntemp_data_to_plot['temp_loc'].replace({'thermal_sensation': 'Thermal sensation (outside)', \n 'temperature_aemet': 'Temperature (outside)',\n 'temperature_home': 'Temperature (home)',}, inplace=True)\n\n# get home data\nhome_temp_plot = temp_data_to_plot.loc[temp_data_to_plot.temp_loc == 'Temperature (home)', :]\n\n# make the plot\ntemp_plot = ggplot(temp_data_to_plot, aes(x = 'hour', y = 'temp_value', color = 'temp_loc', group = 'temp_loc')) +\\\n geom_line() +\\\n geom_point(size = .5) +\\\n geom_point(aes(x='hour', y='temp_value'), size = .5, color = ['#FF6633' if value <= home_temp_threshold else '#64f564' for value in list(home_temp_plot['temp_value'])], data = home_temp_plot) +\\\n geom_hline(aes(yintercept= home_temp_threshold), size = 1, linetype = 'dotted', alpha = .2) +\\\n labs(title = 'Differences in temperature between outside and inside your house', x = 'Hour', y = 'Temperature (ºC)', color='') +\\\n scale_color_manual(values = ['#64f564', '#e6454a', '#6bb8ff']) +\\\n theme_classic() +\\\n theme(plot_title=element_text(face='bold', ha= 'center', size = 10))\n\nggsave(plot=temp_plot, filename='./today_plots/temp_plot.png', dpi=100)\n\n\n\n\n# -----------------------------------------------------------\n# \n# HUMIDITY ANALYSIS\n#\n# -----------------------------------------------------------\n# prepare plot\nhum_data_to_plot = last_day_info.melt(id_vars=['hour'], value_vars=['humidity_home', 'humidity_aemet'], var_name='hum_loc', value_name='hum_value')\nhum_data_to_plot.hum_value = pd.to_numeric(hum_data_to_plot.hum_value, errors = 'raise')\nhum_data_to_plot['hum_loc'].replace({'humidity_aemet': 'Humidity (outside)',\n 'humidity_home': 'Humidity (home)',}, inplace=True)\n\n\n# create the plot\nhum_plot = ggplot(hum_data_to_plot, aes(x = 'hour', y = 'hum_value', fill = 'hum_loc')) +\\\n geom_bar(stat = 'identity', position='dodge', color = 'grey') +\\\n labs(title = 'Differences in humidity between outside and inside your house', x = 'Hour', y = 'Relative humidity (%)', fill='') +\\\n scale_fill_manual(values = ['#9da6d4', '#4f66e0']) +\\\n theme_classic() +\\\n theme(plot_title=element_text(face='bold', ha= 'center', size = 10))\n\nggsave(plot=hum_plot, filename='./today_plots/hum_plot.png', dpi=100)\n\n\n\n# -----------------------------------------------------------\n# \n# WIND ANALYSIS\n#\n# -----------------------------------------------------------\n# Wind information\n# avg and max speed\navg_wind_speed = round(last_day_info.avg_wind_speed.apply(lambda x: int(x)).mean(), 2)\nmax_wind_speed = round(last_day_info.max_wind_speed.apply(lambda x: int(x)).max(), 2)\n\n# prepare plot\n# count number of cardinal directions \ncardinal_dir_list = ['N', 'NE', 'E', 'SE', 'S', 'SO', 'O', 'NO']\nwind_dir_df = last_day_info.wind_direction.value_counts().to_frame()\nwind_dir_df.reset_index(inplace =True)\nwind_dir_df.rename(columns = {'index': 'cardinal_direction'}, inplace = True)\nwind_dir_df\n\n# complete cardinal column\nmissing_dir = list(set(cardinal_dir_list) - set(wind_dir_df.cardinal_direction.to_list()))\nfor direction in missing_dir:\n wind_dir_df = wind_dir_df.append({'cardinal_direction': direction,\n 'wind_direction': 0}, ignore_index=True)\n\nwind_dir_df\n# create column with correct order to plot\nwind_dir_df = wind_dir_df.sort_values(by = 'cardinal_direction').reset_index(drop = True)\nwind_dir_df['cardinal_order'] = [2, 0, 1, 7, 6, 4, 3, 5]\nwind_dir_df = wind_dir_df.sort_values(by = 'cardinal_order')\nwind_dir_df.index = wind_dir_df.cardinal_order\n\n\n# create x and y axis\nwind_dir_df['x_axis'] = [0,\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NE', 'wind_direction']),\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'E', 'wind_direction']), \n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SE', 'wind_direction']),\n 0,\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SO', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'O', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NO', 'wind_direction'])] \n\nwind_dir_df['y_axis'] = [int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'N', 'wind_direction']),\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NE', 'wind_direction']),\n 0,\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SE', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'S', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SO', 'wind_direction']),\n 0,\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NO', 'wind_direction'])] \n\n# remove 0 columns to plot\nwind_dir_df = wind_dir_df.loc[wind_dir_df.wind_direction != 0, :]\n\n# create the plot\nwind_plot = ggplot(aes(x = 'x_axis', y = 'y_axis'), wind_dir_df) +\\\n geom_point(size = .3, color = 'darkgreen') +\\\n geom_polygon(alpha = .2) +\\\n xlim(-24, 24) +\\\n ylim(-24, 24) +\\\n geom_segment(aes(x=0, xend=22, y=0, yend=0), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n geom_segment(aes(x=0, xend=-22, y=0, yend=0), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n geom_segment(aes(x=0, xend=0, y=0, yend=22), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n geom_segment(aes(x=0, xend=0, y=0, yend=-22), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n annotate('text', x=23, y= 0, label = 'E', color = 'darkgreen') +\\\n annotate('text', x=-23.3, y= 0, label = 'O', color = 'darkgreen') +\\\n annotate('text', x=0, y= 24, label = 'N', color = 'darkgreen') +\\\n annotate('text', x=0, y= -24, label = 'S', color = 'darkgreen') +\\\n labs(title = 'Wind direction over the last 24 hours', x = '', y = '') +\\\n theme_classic() +\\\n theme(plot_title=element_text(face='bold', ha= 'center', size = 15),\n panel_grid_major = element_blank(), \n panel_grid_minor = element_blank(), \n panel_background = element_blank(),\n axis_line = element_blank(),\n axis_ticks_major = element_blank(),\n axis_text = element_blank())\n \nggsave(plot=wind_plot, filename='./today_plots/wind_plot.png', dpi=100)\n\n\n\n\n# -----------------------------------------------------------\n# \n# SKY ANALYSIS\n#\n# -----------------------------------------------------------\nmost_common_sky = last_day_info.sky_condition.value_counts().idxmax()\nsnow_probability = round(last_day_info.snow_probability.apply(lambda x: int(x)).mean(), 2)\nprecipitation_probability = round(last_day_info.precipitation_probability.apply(lambda x: int(x)).mean(), 2)\nmost_common_warning_lvl = last_day_info.warning_level.value_counts().idxmax()\ntotal_precipitation = round(last_day_info.precipitation.apply(lambda x: int(x)).sum(), 2)\n\n\n\n\n# -----------------------------------------------------------\n# \n# PEOPLE ANALYSIS\n#\n# -----------------------------------------------------------\n# Check number of people\npeople_df = last_day_info.loc[:, ['hour', 'pic_name']]\npeople_df.pic_name = people_df.pic_name.fillna('No_0_data')\npeople_df['people_count'] = people_df.pic_name.apply(lambda x: int(x.split('_')[1]))\n\nhours_with_people_at_home = people_df.loc[people_df.people_count > 0].shape[0]\nmost_people_in_room = people_df.people_count.value_counts(ascending = True).index[0]\n\nrows_with_most_people = people_df.loc[people_df.people_count == most_people_in_room]\nhours_with_most_people = rows_with_most_people.hour.to_list()\npics_names = rows_with_most_people.pic_name.to_list()\n\n\n\n\n# -----------------------------------------------------------\n# \n# PDF CREATION\n#\n# -----------------------------------------------------------\n# export information in pdf\n# extract date\ntoday_timestamp = int(last_day_info.timestamp.reset_index(drop =True)[5])\ntoday_date = datetime.utcfromtimestamp(today_timestamp).strftime('%d/%m/%Y')\n\n\n# create pdf to export\npdf = FPDF()\npdf.add_page()\npdf.set_xy(0, 5)\npdf.set_font('arial', 'B', 12)\npdf.cell(0, 10, 'Home report from {}'.format(today_date), 0, 2, 'C') # title\npdf.cell(5)\n# subtitle\npdf.set_font('arial', '', 10)\npdf.cell(0, 10, 'This report was extracted from the information gathered by the sensors from your Raspberry and Aemet.', 0, 2, 'C')\npdf.set_font('arial', 'B', 12)\n\n# First analysis - Temperature and Humidity\npdf.cell(60, 10, 'Temperature Analysis:', 0, 0, 'R')\npdf.cell(85, 10, 'Humidity Analysis:', 0, 2, 'R')\n\npdf.image('./today_plots/temp_plot.png', x = 3, y = 35, w = 110, h = 70, type = '', link = '')\npdf.image('./today_plots/hum_plot.png', x = 110, y = 35, w = 100, h = 70, type = '', link = '')\n\n# second analysis - Sky and wind\npdf.set_x(60)\npdf.set_y(110)\n\npdf.cell(0, 10, 'Sky Analysis:', 0, 2, 'L')\n\npdf.set_font('arial', '', 10)\npdf.cell(0, 7, 'Most common sky in 24 hours: {}'.format(most_common_sky), 0, 2, 'L')\npdf.cell(0, 7, 'Most common warning level in 24 hours: {}'.format(most_common_warning_lvl), 0, 2, 'L')\npdf.cell(0, 7, 'Probability of Precipitation in 24 hours: {} %'.format(precipitation_probability), 0, 2, 'L')\npdf.cell(0, 7, 'Probability of Snow in 24 hours: {} %'.format(snow_probability), 0, 2, 'L')\npdf.cell(0, 7, 'Total Precipitation in 24 hours: {} mm'.format(total_precipitation), 0, 2, 'L')\n\npdf.image('./today_plots/wind_plot.png', x = 110, y = 112, w = 70, h = 60, type = '', link = '')\n\n# third analysis - Pictures from people\npdf.set_y(170)\n\npdf.set_font('arial', 'B', 12)\npdf.cell(0, 10, 'Camera Analysis:', 0, 2, 'L')\n\npdf.set_font('arial', '', 10)\npdf.cell(0, 7, 'Number of hours with people at home: {}'.format(hours_with_people_at_home), 0, 2, 'L')\npdf.cell(0, 7, 'How many people were in the room at the time of maximum capacity?: {}'.format(most_people_in_room), 0, 2, 'L')\npdf.cell(0, 7, 'How many hours was the house with the maximum number of people?: {}'.format(rows_with_most_people.shape[0]), 0, 2, 'L')\npdf.cell(0, 7, 'What were the hours when the house had the maximum number of people?: {}'.format(', '.join(hours_with_most_people)), 0, 2, 'L')\npdf.cell(0, 7, 'What are the pictura names that correspond to those hours?: {}'.format(', '.join(pics_names)), 0, 2, 'L')\n\npdf.image('../rapsberry/camera/images/{}'.format(pics_names[0]), x = 15, y = 200, w = 70, h = 60, type = '', link = '')\n\n# save output\npdf.output('test.pdf', 'F')\n\n\n"
] | [
[
"pandas.to_numeric",
"pandas.DataFrame",
"pandas.merge",
"pandas.Categorical"
]
] |
Tbarkin121/Tensegrity_IsaacGym | [
"0b6b5227e76b18396862c242a4e8e743248844b3"
] | [
"training/utils/utils.py"
] | [
"# Copyright (c) 2018-2021, NVIDIA Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# python\n\nimport numpy as np\nimport torch\nimport random\nimport os\n\ndef set_np_formatting():\n \"\"\" formats numpy print \"\"\"\n np.set_printoptions(edgeitems=30, infstr='inf',\n linewidth=4000, nanstr='nan', precision=2,\n suppress=False, threshold=10000, formatter=None)\n\n\ndef set_seed(seed, torch_deterministic=False):\n \"\"\" set seed across modules \"\"\"\n if seed == -1 and torch_deterministic:\n seed = 42\n elif seed == -1:\n seed = np.random.randint(0, 10000)\n print(\"Setting seed: {}\".format(seed))\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if torch_deterministic:\n # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.use_deterministic_algorithms(True)\n else:\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False\n\n return seed\n\n# EOF\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.random.seed",
"numpy.set_printoptions",
"torch.use_deterministic_algorithms",
"numpy.random.randint"
]
] |
monperrus/iFixR | [
"5548f3ba91341dc9e73057269f8c01a0b1b6fc68"
] | [
"code/common/preprocessing.py"
] | [
"from nltk.tokenize import RegexpTokenizer\n# from stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom string import punctuation\nimport re\nfrom nltk.corpus import stopwords\nen_stop = stopwords.words('english')\nfrom nltk.corpus import wordnet\nimport html\n\nfrom common.commons import *\nCODE_PATH = os.environ[\"CODE_PATH\"]\n\nimport spacy\nnlp = spacy.load('en_core_web_lg', disable=['parser', 'tagger', 'ner'])\nnlp.max_length =100000000\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport sys\n\ndef preprocessingCodeElementsList(res):\n printDetail = False\n if isinstance(res, list):\n merged = str()\n for r in res:\n if isinstance(r, list):\n merged = merged + ' ' + ' '.join(r)\n else:\n merged = merged +' ' + r\n else:\n merged=res\n\n res = html.unescape(merged)\n\n tokens = getTokens(res,printDetail)\n\n stripped = []\n for t in tokens:\n splits = re.split('\\.|\\(|\\)|:|>|<|:|=|/|\\\\\\\\|\\'|-',t)\n for s in splits:\n stripped.append(s)\n punc = removeEndingPunct(stripped,printDetail)\n\n non_empty = [i for i in punc if i != '']\n\n stripped = removeEndingPunct(non_empty,printDetail)\n\n camelCase = handleCamelCase(stripped,printDetail,True)\n\n underScore = handleUnderScore(camelCase,printDetail,True)\n\n lower = [i.lower() for i in underScore]\n\n stopped_tokens = [i for i in lower if not i in en_stop]\n\n stem2 = stem(stopped_tokens,printDetail)\n if printDetail:\n print('=====CLEANED=========')\n print(stem2)\n\n return stem2\n\ndef preprocessingNL(res):\n\n printDetail = False\n\n if isinstance(res, list):\n merged = str()\n for r in res:\n if isinstance(r, list):\n merged = merged + ' ' + ' '.join(r)\n else:\n merged = merged +' ' + r\n else:\n merged=res\n\n res = html.unescape(merged)\n html_decoded_string = res.replace(\"&\", \"&\").replace(\""\", '\"').replace(\"'\", \"'\").replace(\">\",\n \">\").replace(\n \"<\", \"<\")\n html_decoded_string = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '',html_decoded_string)\n\n tokens = getTokens(html_decoded_string,printDetail)\n\n stripped = []\n for t in tokens:\n splits = re.split('\\.|\\(|\\)|:|>|<|:|=|/|\\\\\\\\|\\'|-',t)\n for s in splits:\n stripped.append(s)\n punc = removeEndingPunct(stripped,printDetail)\n\n non_empty = [i for i in punc if i != '']\n\n stripped = removeEndingPunct(non_empty,printDetail)\n\n camelCase = handleCamelCase(stripped,printDetail,True)\n\n underScore = handleUnderScore(camelCase,printDetail,True)\n\n lower = [i.lower() for i in underScore]\n\n stopped_tokens = [i for i in lower if not i in en_stop]\n\n nonDigit = [i for i in stopped_tokens if (not i.isdigit())]\n\n doc = nlp(' '.join(nonDigit))\n newWord = []\n for token in doc:\n if(token.text in nlp.vocab):\n newWord.append(token.text)\n\n stem2 = stem(newWord,printDetail)\n\n if printDetail:\n print('=====CLEANED=========')\n print(stem2)\n\n return stem2\n\ndef getTokens(re,printDetail=False):\n tokenizer = RegexpTokenizer(r'\\S+')\n tokens = tokenizer.tokenize(re)\n if printDetail:\n print('=====TOKENS=========')\n print(tokens)\n\n return tokens\n\ndef charLength(x, l=3):\n if x.isalpha() and len(x) >= l:\n return True\n else:\n return False\n\n\ndef removeEndingPunct(re,printDetail):\n stripped = [i.strip(punctuation) for i in re]\n if printDetail:\n print('=====removeEndingPunct=========')\n print(stripped)\n return stripped\n\ndef handleCamelCase(re,printDetail=False,keepOriginal = False):\n camelCased = list()\n\n for i in re:\n listOfCC = camel_case_split(i)\n camelCased.extend(listOfCC)\n if i not in listOfCC and keepOriginal:\n camelCased.append(i)\n\n if printDetail:\n print('=====CAMEL CASE=========')\n print(camelCased)\n return camelCased\n\ndef handleUnderScore(re,printDetail=False,keepOriginal = False):\n underScored = list()\n for i in re:\n listOfCC = i.split('_')\n underScored.extend(listOfCC)\n if i not in listOfCC and keepOriginal:\n underScored.append(i)\n\n if printDetail:\n print('=====UNDER SCORE=========')\n print(underScored)\n\n return underScored\n\ndef camel_case_split(identifier):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\n res = [m.group(0) for m in matches]\n\n return res\n\ndef stem(res,printDetail):\n p_stemmer = PorterStemmer()\n stemmed_tokens = [p_stemmer.stem(i.strip()) for i in res if i]\n if printDetail:\n print('=====STEMMED=========')\n print(stemmed_tokens)\n return stemmed_tokens\n\ndef isEnglish(word_to_test):\n if not wordnet.synsets(word_to_test):\n #Not an English Word\n #TODO\n word_to_test\n #print word_to_test\n else:\n return word_to_test\n\n\ndef dummy_fun(doc):\n return doc\n\ndef calculateTfIdfCodeElementsList(aCorpus):\n global progress\n progress = 0\n v = TfidfVectorizer(tokenizer=dummy_fun,stop_words=None,lowercase=False,sublinear_tf=True)#,max_df=0.7,min_df=3)\n m = v.fit(aCorpus)\n return v\n\ndef calculateTfIdfNLList(aCorpus):\n global progress\n progress = 0\n v = TfidfVectorizer(tokenizer=dummy_fun,stop_words=None,lowercase=False,sublinear_tf=True)#,max_df=0.7,min_df=3)\n m = v.fit(aCorpus)\n return v\n\ndef getDTMNL(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\ndef getDTMCE(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\n\ndef getBRDTM(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\n\n\ndef getBRDTMCEs(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\n"
] | [
[
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
egilbertson-ucsf/algHW2 | [
"eec0f4e42e27d4c7633cc907d6f523285fadd79c"
] | [
"hw2skeleton/k_means.py"
] | [
"from hw2skeleton import cluster as cl\nfrom hw2skeleton import io\nimport sklearn.metrics as sk\nimport os\nimport pandas as pd\nimport numpy as np\nimport math\naa3 = \"ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR\".split()\naa_df = pd.DataFrame(0, index=list(aa3), columns=['Count'])\n\n\ndef calc_avg_site_length(sites):\n '''\n calculate the average size of an active site\n for use in generating random sites\n '''\n ss = []\n for site in sites:\n ss.append(len(site.residues))\n\n return [sum(ss) / len(sites), max(ss), min(ss)]\n\n\ndef generate_random_site(sites):\n '''\n generate a random site by filling in a 1x20 vector repr of amino acids with counts\n '''\n lens = calc_avg_site_length(sites)\n num_res = np.random.randint(lens[2],lens[1])\n site = aa_df.copy()\n\n for pos in range(num_res):\n aa = np.random.randint(0,19)\n site.iloc[aa] += 1\n\n return site\n\ndef generate_k_random_centroids(k, sites):\n '''\n generate k random sites using above function\n '''\n centroids = {}\n for i in range(k):\n centroids[i] = generate_random_site(sites)\n return centroids\n\ndef assign_single_site_to_cluster(site, centroids):\n '''\n check which cluster centroid is closest to the given site and assign the\n site to that cluster\n '''\n loc = site.counts\n dists = {}\n for c in centroids.keys():\n dist = cl.compute_similarity(loc, centroids[c])\n dists[dist] = c\n closest = dists[min(dists.keys())]\n return closest\n\ndef assign_all_sites_to_cluster(sites, centroids, clusters):\n '''\n loop through all sites and assign them to the appropriate clusters\n '''\n for site in sites:\n close = assign_single_site_to_cluster(site, centroids)\n if close not in clusters:\n clusters[close] = [site]\n else:\n clusters[close].append(site)\n for cent in centroids:\n if cent not in clusters:\n clusters[cent] = []\n return clusters\n\ndef compute_cluster_center(cluster_list, sites_dict):\n '''\n compute the center of a cluster by taking the average of the vector representations\n of all sites in the cluster\n '''\n sites = aa_df.copy()\n for j in cluster_list:\n if isinstance(j, str):\n sites += sites_dict[j].counts\n else:\n sites += j.counts\n return sites / len(sites)\n\ndef get_new_centroids(clusters, sites_dict=None):\n '''\n use the compute_cluster_center function to get the new centroids after updating\n assignments\n '''\n centroids = {}\n for cluster in clusters.keys():\n centroids[cluster] = compute_cluster_center(clusters[cluster], sites_dict)\n return centroids\n\ndef check_change_in_centroids(old_centroids, new_centroids):\n ''' check how far the centroids have moved '''\n diff = 0\n for c in old_centroids.keys():\n diff += cl.compute_similarity(old_centroids[c], new_centroids[c])\n return diff\n\ndef one_full_k_means(sites, k):\n ''' using all above functions, one full iteration of k means'''\n centroids = generate_k_random_centroids(k, sites)\n clusters = {}\n clusters = assign_all_sites_to_cluster(sites, centroids, clusters)\n new_centroids = get_new_centroids(clusters)\n old_diff = check_change_in_centroids(centroids, new_centroids)\n new_diff = 0\n while old_diff - new_diff > 0.00001:\n old_diff = check_change_in_centroids(centroids, new_centroids)\n centroids = new_centroids.copy()\n clusters = {}\n clusters = assign_all_sites_to_cluster(sites, centroids, clusters)\n new_centroids = get_new_centroids(clusters)\n new_diff = check_change_in_centroids(centroids, new_centroids)\n return clusters, centroids\n\ndef compute_similarity_matrix(sites):\n ''' copy of computer similarity matrix from utils '''\n\n simMat = []\n names = []\n for i in range(len(sites)):\n names.append(sites[i].name)\n row = []\n for j in range(len(sites)):\n row.append(cl.compute_similarity(sites[i].counts,sites[j].counts))\n simMat.append(row)\n simMat = pd.DataFrame(simMat, columns = names, index = names)\n\n return simMat\n\ndef make_cluster_assign_df(clusters, simMat):\n ''' make a nice df repr of the cluster assignments'''\n assgn = pd.DataFrame(index = simMat.index, columns = ['Cluster Assignment'])\n for cluster in clusters.keys():\n for site in clusters[cluster]:\n assgn.loc[site.name] = cluster\n return assgn\n\ndef avg_sl(sites, k, simMat):\n ''' average silhouette_score for i random starts of k means for k clusters'''\n\n scores = []\n c_list = []\n for i in range(1):\n clusters, centroids = one_full_k_means(sites, k)\n assgn = make_cluster_assign_df(clusters, simMat)\n c_list.append(clusters)\n scores.append(sk.silhouette_score(simMat, assgn['Cluster Assignment'], metric='precomputed'))\n return scores, clusters\n\n\n\ndef k_means(sites=None):\n ''' run k means '''\n sites = io.read_active_sites('data')\n simMat = compute_similarity_matrix(sites)\n points = [[],[]]\n clusters = []\n for i in range(2,5):\n points[0].append(i)\n temp = avg_sl(sites, i , simMat)\n points[1].append(temp[0])\n clusters.append(temp[1])\n\n return clusters[points[1].index(max(points[1]))], max(points[1])\n"
] | [
[
"numpy.random.randint",
"pandas.DataFrame",
"sklearn.metrics.silhouette_score"
]
] |
rhong3/CPTAC-UCEC | [
"ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9"
] | [
"Scripts/Legacy/line1prep.py"
] | [
"import pandas as pd\n\nlabels = pd.read_csv('../Fusion_dummy_His_MUT_joined.csv', header=0)\n# line = pd.read_csv('../../Line1.csv', header=0)\nline = pd.read_csv('../EC_cyclin_expression.csv', header=0)\n\n# line['name'] = line['Proteomics_Participant_ID']\n# line = line.drop(['Proteomics_Participant_ID', 'Histologic_type', 'Genomics_subtype', 'TP53_TP53'], axis=1)\n# labels = labels.join(line.set_index('name'), on='name')\n# labels['LINE1_ORF1p'] = (labels['LINE1_ORF1p'].dropna() > 0).astype(int)\n# labels['RAD50-S635'] = (labels['RAD50-S635'].dropna() > 0).astype(int)\n# labels['NBN-S343'] = (labels['NBN-S343'].dropna() > 0).astype(int)\n# labels['ATR-T1989'] = (labels['ATR-T1989'].dropna() > 0).astype(int)\n# labels['ATM-S1981'] = (labels['ATM-S1981'].dropna() > 0).astype(int)\n\nline['name'] = line['Sample_ID'].str.slice(start=0, stop=9)\n\nline = line.drop(['Sample_ID', 'Genomic_subtype'], axis=1)\nlabels = labels.join(line.set_index('name'), on='name')\nlabels['CCND1'] = (labels['CCND1'].dropna() > 0).astype(int)\nlabels['CCNE1'] = (labels['CCNE1'].dropna() > 0).astype(int)\nlabels['CCNA2'] = (labels['CCNA2'].dropna() > 0).astype(int)\nlabels['CCNB1'] = (labels['CCNB1'].dropna() > 0).astype(int)\n\nlabels.to_csv('../Fusion_dummy_His_MUT_joined.csv', index=False)\n"
] | [
[
"pandas.read_csv"
]
] |
dd-dos/sentence-transformers | [
"8f9c36b788e15141f723d80fea67ed16785cd18e"
] | [
"sentence_transformers/datasets/SentenceLabelDataset.py"
] | [
"from torch.utils.data import Dataset\nfrom typing import List\nimport bisect\nimport torch\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\nfrom .. import SentenceTransformer\nfrom ..readers.InputExample import InputExample\nfrom multiprocessing import Pool, cpu_count\nimport multiprocessing\n\nclass SentenceLabelDataset(Dataset):\n \"\"\"\n Dataset for training with triplet loss.\n This dataset takes a list of sentences grouped by their label and uses this grouping to dynamically select a\n positive example from the same group and a negative example from the other sentences for a selected anchor sentence.\n\n This dataset should be used in combination with dataset_reader.LabelSentenceReader\n\n One iteration over this dataset selects every sentence as anchor once.\n\n This also uses smart batching like SentenceDataset.\n \"\"\"\n\n def __init__(self, examples: List[InputExample], model: SentenceTransformer, provide_positive: bool = True,\n provide_negative: bool = True,\n parallel_tokenization: bool = True,\n max_processes: int = 4,\n chunk_size: int = 5000):\n \"\"\"\n Converts input examples to a SentenceLabelDataset usable to train the model with\n SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader\n\n Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels\n and should be used in combination with dataset_reader.LabelSentenceReader.\n\n Labels with only one example are ignored.\n\n smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.\n\n :param examples:\n the input examples for the training\n :param model\n the Sentence BERT model for the conversion\n :param provide_positive:\n set this to False, if you don't need a positive example (e.g. for BATCH_HARD_TRIPLET_LOSS).\n :param provide_negative:\n set this to False, if you don't need a negative example (e.g. for BATCH_HARD_TRIPLET_LOSS\n or MULTIPLE_NEGATIVES_RANKING_LOSS).\n :param parallel_tokenization\n If true, multiple processes will be started for the tokenization\n :param max_processes\n Maximum number of processes started for tokenization. Cannot be larger can cpu_count()\n :param chunk_size\n #chunk_size number of examples are send to each process. Larger values increase overall tokenization speed\n \"\"\"\n self.model = model\n self.groups_right_border = []\n self.grouped_inputs = []\n self.grouped_labels = []\n self.num_labels = 0\n self.max_processes = min(max_processes, cpu_count())\n self.chunk_size = chunk_size\n self.parallel_tokenization = parallel_tokenization\n\n if self.parallel_tokenization:\n if multiprocessing.get_start_method() != 'fork':\n logging.info(\"Parallel tokenization is only available on Unix systems which allow to fork processes. Fall back to sequential tokenization\")\n self.parallel_tokenization = False\n\n self.convert_input_examples(examples, model)\n\n self.idxs = np.arange(len(self.grouped_inputs))\n\n self.provide_positive = provide_positive\n self.provide_negative = provide_negative\n\n\n def convert_input_examples(self, examples: List[InputExample], model: SentenceTransformer):\n \"\"\"\n Converts input examples to a SentenceLabelDataset.\n\n Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels\n and should be used in combination with dataset_reader.LabelSentenceReader.\n\n Labels with only one example are ignored.\n\n :param examples:\n the input examples for the training\n :param model\n the Sentence Transformer model for the conversion\n :param is_pretokenized\n If set to true, no tokenization will be applied. It is expected that the input is tokenized via model.tokenize\n \"\"\"\n\n inputs = []\n labels = []\n\n label_sent_mapping = {}\n too_long = 0\n label_type = None\n\n logging.info(\"Start tokenization\")\n if not self.parallel_tokenization or self.max_processes == 1 or len(examples) <= self.chunk_size:\n tokenized_texts = [self.tokenize_example(example) for example in examples]\n else:\n logging.info(\"Use multi-process tokenization with {} processes\".format(self.max_processes))\n self.model.to('cpu')\n with Pool(self.max_processes) as p:\n tokenized_texts = list(p.imap(self.tokenize_example, examples, chunksize=self.chunk_size))\n\n # Group examples and labels\n # Add examples with the same label to the same dict\n for ex_index, example in enumerate(tqdm(examples, desc=\"Convert dataset\")):\n if label_type is None:\n if isinstance(example.label, int):\n label_type = torch.long\n elif isinstance(example.label, float):\n label_type = torch.float\n tokenized_text = tokenized_texts[ex_index][0]\n\n if hasattr(model, 'max_seq_length') and model.max_seq_length is not None and model.max_seq_length > 0 and len(tokenized_text) > model.max_seq_length:\n too_long += 1\n\n if example.label in label_sent_mapping:\n label_sent_mapping[example.label].append(ex_index)\n else:\n label_sent_mapping[example.label] = [ex_index]\n\n inputs.append(tokenized_text)\n labels.append(example.label)\n\n # Group sentences, such that sentences with the same label\n # are besides each other. Only take labels with at least 2 examples\n distinct_labels = list(label_sent_mapping.keys())\n for i in range(len(distinct_labels)):\n label = distinct_labels[i]\n if len(label_sent_mapping[label]) >= 2:\n self.grouped_inputs.extend([inputs[j] for j in label_sent_mapping[label]])\n self.grouped_labels.extend([labels[j] for j in label_sent_mapping[label]])\n self.groups_right_border.append(len(self.grouped_inputs)) #At which position does this label group / bucket end?\n self.num_labels += 1\n\n self.grouped_labels = torch.tensor(self.grouped_labels, dtype=label_type)\n logging.info(\"Num sentences: %d\" % (len(self.grouped_inputs)))\n logging.info(\"Sentences longer than max_seqence_length: {}\".format(too_long))\n logging.info(\"Number of labels with >1 examples: {}\".format(len(distinct_labels)))\n\n\n def tokenize_example(self, example):\n if example.texts_tokenized is not None:\n return example.texts_tokenized\n\n return [self.model.tokenize(text) for text in example.texts]\n\n def __getitem__(self, item):\n if not self.provide_positive and not self.provide_negative:\n return [self.grouped_inputs[item]], self.grouped_labels[item]\n\n # Anchor element\n anchor = self.grouped_inputs[item]\n\n # Check start and end position for this label in our list of grouped sentences\n group_idx = bisect.bisect_right(self.groups_right_border, item)\n left_border = 0 if group_idx == 0 else self.groups_right_border[group_idx - 1]\n right_border = self.groups_right_border[group_idx]\n\n if self.provide_positive:\n positive_item_idx = np.random.choice(np.concatenate([self.idxs[left_border:item], self.idxs[item + 1:right_border]]))\n positive = self.grouped_inputs[positive_item_idx]\n else:\n positive = []\n\n if self.provide_negative:\n negative_item_idx = np.random.choice(np.concatenate([self.idxs[0:left_border], self.idxs[right_border:]]))\n negative = self.grouped_inputs[negative_item_idx]\n else:\n negative = []\n\n return [anchor, positive, negative], self.grouped_labels[item]\n\n\n def __len__(self):\n return len(self.grouped_inputs)"
] | [
[
"numpy.concatenate",
"torch.tensor"
]
] |
erfanMhi/Cooperative-Coevolution-Transfer-Optimization | [
"e75b7930bd8b55a160668b1039ac154a0d0270d7"
] | [
"main_multi.py"
] | [
"\nimport argparse\nimport os\nimport queue\n\nimport multiprocessing as mp\n# import SharedArray as sa\nimport numpy as np\n\n\nfrom copy import deepcopy\nfrom time import time\nfrom pprint import pprint\nfrom utils.data_manipulators import *\nfrom evolution.operators import *\nfrom to.probabilistic_model import ProbabilisticModel\nfrom to.mixture_model import MixtureModel\nfrom evolution.chromosome import *\n\n\nclass EAProcess(mp.Process):\n def __init__(self, dims, psize, gen, problem, shared_queue, \n shared_array, t_lock, list_lock, return_list, transfer_interval=2):\n super(EAProcess, self).__init__()\n self.dims = dims\n self.psize = psize\n print('hi')\n self.gen = gen\n self.problem = problem\n self.shared_queue = shared_queue\n self.shared_array = shared_array\n # self.shared_lock = shared_lock\n self.t_lock = t_lock\n self.list_lock = list_lock\n self.transfer_interval = transfer_interval\n self.reinitialize()\n self.return_list = return_list\n\n def reinitialize(self):\n\n self.fitness_hist = np.zeros((self.gen, self.psize))\n self.fitness_time = np.zeros((self.gen))\n\n init_func = lambda n: np.round(np.random.rand(n))\n self.pop = get_pop_init(self.psize, self.dims, init_func)\n\n def _ea(self):\n \n start = time()\n\n for i in range(self.psize): self.pop[i].fitness_calc(self.problem)\n\n self.bestfitness = np.max(self.pop).fitness\n self.fitness = Chromosome.fitness_to_numpy(self.pop)\n self.fitness_hist[0, :] = self.fitness\n\n self.fitness_time[0] = start - time()\n\n\n \n for i in range(1, self.gen):\n start = time()\n\n if i%self.transfer_interval == 0 and i//self.transfer_interval == 1:\n print('transfer start')\n self.t_lock.release()\n\n \n if i%self.transfer_interval == 0:\n recieved_pops = None\n try:\n while True:\n if recieved_pops is None:\n recieved_pops = list(self.shared_queue.get(block=True))\n else:\n recieved_pops += list(self.shared_queue.get(block=False))\n \n except queue.Empty:\n print('Queue is empty now')\n print('recieved_pops: ', len(recieved_pops))\n self.pop = total_selection_pop(np.concatenate((self.pop, recieved_pops)), self.psize)\n\n offsprings = total_crossover(self.pop)\n\n for j in range(self.psize): offsprings[j].mutation(1/self.dims)\n\n # Fitness Calculation\n cfitness = np.zeros(self.psize)\n for j in range(self.psize): \n cfitness[j] = offsprings[j].fitness_calc(self.problem)\n\n\n self.pop, self.fitness = total_selection(np.concatenate((self.pop, offsprings)),\n np.concatenate((self.fitness, cfitness)), self.psize)\n\n self.fitness_hist[i, :] = self.fitness\n\n if self.fitness[0] > self.bestfitness:\n self.bestfitness = self.fitness[0]\n\n print('Generation %d best fitness = %f' % (i, self.bestfitness))\n\n self.list_lock.acquire()\n self.shared_array[:] = Chromosome.genes_to_list(self.pop)\n self.list_lock.release()\n\n self.fitness_time[i] = time() - start\n\n print('Shared Array is now available')\n\n self.return_list.append([self.fitness_time, self.fitness_hist]) \n \n\n\n def run(self):\n\n # When target array is prepared it will be unlocked\n print ('called run method in process: %s' %self.name)\n self._ea()\n return\n\n\nclass TransferProcess(mp.Process):\n def __init__(self, dims, problem, mutation_strength,\n sample_size, sub_sample_size, src_models,\n shared_queue, shared_array, t_lock,\n list_lock, transfer_interval=2):\n super(TransferProcess, self).__init__()\n self.dims = dims\n self.problem = problem\n self.src_models = src_models\n self.mutation_strength = mutation_strength\n self.sample_size = sample_size\n self.sub_sample_size = sub_sample_size\n self.shared_queue = shared_queue\n self.shared_array = shared_array\n # self.shared_lock = shared_lock\n self.t_lock = t_lock\n self.list_lock = list_lock\n self.transfer_interval = transfer_interval\n self.reinitialize()\n \n def reinitialize(self):\n\n # self.fitness_hist = np.zeros((self.gen, self.psize))\n # self.fitness_time = np.zeros((self.gen))\n\n dims_s2 = len(self.src_models)+1\n self.second_specie = StrategyChromosome(dims_s2)\n\n def _transfer_ea(self):\n prev_samples = None\n genes_differ = None\n\n target_model = ProbabilisticModel(modelType='umd')\n\n self.list_lock.acquire()\n target_array = np.array(self.shared_array[:])\n self.list_lock.release()\n\n target_model.buildModel(target_array)\n\n _, sampled_offsprings, prev_samples = \\\n self.second_specie.fitness_calc(self.problem, self.src_models, target_model, self.sample_size,\n self.sub_sample_size, mutation_vec=genes_differ, prev_samples=deepcopy(prev_samples),\n efficient_version=True)\n\n self.shared_queue.put(sampled_offsprings)\n\n while True:\n offspring = deepcopy(self.second_specie)\n\n genes_differ = offspring.mutation(self.mutation_strength, 0, 1)\n\n target_model = ProbabilisticModel(modelType='umd')\n\n self.list_lock.acquire()\n target_array = np.array(self.shared_array[:])\n self.list_lock.release()\n\n target_model.buildModel(target_array)\n\n _, sampled_offsprings, prev_samples_tmp = \\\n offspring.fitness_calc(self.problem, self.src_models, target_model, self.sample_size,\n self.sub_sample_size, mutation_vec=genes_differ, prev_samples=deepcopy(prev_samples),\n efficient_version=True)\n\n self.shared_queue.put(sampled_offsprings)\n \n self.second_specie, self.mutation_strength, is_off_selected = selection_adoption(self.second_specie, offspring, self.mutation_strength)\n\n if is_off_selected:\n prev_samples = prev_samples_tmp\n # second_species_gen_num += 1\n # while True:\n\n\n\n def run(self):\n\n self.t_lock.acquire()\n print ('called run method in process: %s' %self.name)\n self._transfer_ea()\n return\n\ndef get_args():\n parser = argparse.ArgumentParser(description='CoOperative CoEvolution Transfer Optimization Algorithm for Solving Multi-location Inventory Planning with Lateral Transshipments')\n\n\n parser.add_argument('--stop_condition', default=True, \n type=bool, nargs='?',\n help=\"Stop after i number of iteraction if fitness didn't changed\")\n\n parser.add_argument('--reps', default=1,\n type=int, nargs='?',\n help='Number of repetition')\n\n parser.add_argument('--delta', default=2,\n type=int, nargs='?',\n help='Step for switiching between transfer optimization and evolutionary operations')\n \n # parser.add_argument('--buildmodel', default=True,\n # type=bool, nargs='?',\n # help='Should we build source models?')\n\n parser.add_argument('--src_version', default='v1',\n type=str, nargs='?',\n help='What version of source models should be used?')\n\n parser.add_argument('--s1_psize', default=50,\n type=int, nargs='?',\n help='Population size for the first species?')\n \n # parser.add_argument('--s2_psize', default=20,\n # type=int, nargs='?',\n # help='Population size for the second species?')\n\n parser.add_argument('--sample_size', default=50,\n type=int, nargs='?',\n help='Number of samples generated from each AlphaChromosome?')\n\n parser.add_argument('--sub_sample_size', default=50,\n type=int, nargs='?',\n help='How many samples should we take from sample_size number of samples generated?') \n \n # parser.add_argument('-v', dest='version', default='v1',\n # type=str, nargs='?',\n # help='What version should be executed?')\n\n parser.add_argument('--mutation_strength', default=1,\n type=int, nargs='?',\n help='The same step-size which we use in evolution strategy')\n \n parser.add_argument('--injection_type', default='elite',\n type=str, nargs='?',\n help='What method do you want to use for injection of species 2 to species 1?')\n\n parser.add_argument('--to_repititon_num', default=1,\n type=int, nargs='?',\n help='How many time should we repeat the transferring step in evolution strategy?')\n \n parser.add_argument('--selection_version', default='v1',\n type=str, nargs='?',\n help='What selection version should we use in evolution strategy E(1 + 1)?')\n\n parser.add_argument('-c', default=2,\n type=int, nargs='?',\n help='Parameter of E(1 + 1) algorithm selection')\n\n parser.add_argument('--efficient_version', default=False,\n type=bool, nargs='?',\n help='Efficient version of evaluation strategy version?')\n\n parser.add_argument('--transfer_repeat_num', default=None,\n type=int, nargs='?',\n help=''' Number of times transfer optimization should be run.\n if it is None, it will be repeated in every delta iteration''')\n\n\n # parser.add_argument('-q', dest='matrix_num', default='a',\n # type=str, nargs='?',\n # help='T^0_H matrix selector for section b')\n\n return parser.parse_args()\n\ndef main_multi(args):\n\n # constants\n models_path = 'models'\n source_models_path = os.path.join(models_path, 'knapsack_source_models')\n knapsack_problem_path = 'problems/knapsack'\n\n dims = 1000\n psize = args.s1_psize\n mutation_strength = args.mutation_strength\n reps = args.reps\n transfer_interval = args.delta\n sub_sample_size = args.sub_sample_size\n sample_size = args.sample_size\n gen = 100\n\n # Loading Target Problem\n target_problem = Tools.load_from_file(os.path.join(knapsack_problem_path, 'KP_uc_ak'))\n\n # Loading Source Models\n src_models = Tools.load_from_file(source_models_path + '_{}'.format(args.src_version))\n\n main_m = mp.Manager()\n return_list = main_m.list()\n for i in range(reps):\n # Shared Variables\n m = mp.Manager()\n shared_queue = m.Queue()\n shared_array = m.list([[0 for j in range(dims)] for i in range(psize)])\n # prep_lock = m.Lock() # This lock is used for starting transfer learning\n # prep_lock.acquire()\n list_lock = m.Lock() # \\\\ for synchronozing read & write of the list\n # q_lock = m.Lock() # \\\\ for synchronozing put & get of the queue\n transfer_lock = m.Lock() # \\\\ will synchronize the transfer_interval for EAProcess\n transfer_lock.acquire()\n\n\n ea_process = EAProcess(dims, psize, gen, target_problem, shared_queue,\n shared_array, transfer_lock, list_lock, return_list,\n transfer_interval=transfer_interval)\n \n \n tr_process = TransferProcess(dims, target_problem, mutation_strength,\n sample_size, sub_sample_size, src_models,\n shared_queue, shared_array, transfer_lock,\n list_lock, transfer_interval=transfer_interval) \n\n ea_process.start()\n tr_process.start()\n\n ea_process.join()\n tr_process.terminate()\n tr_process.join()\n \n Tools.save_to_file(args.save_path, return_list[:])\n\n\nif __name__ == '__main__':\n args = get_args()\n main_multi(args)\n \n"
] | [
[
"numpy.zeros",
"numpy.max",
"numpy.random.rand",
"numpy.array",
"numpy.concatenate"
]
] |
Juan0001/yellowbrick-docs-zh | [
"36275d9704fc2a946c5bec5f802106bb5281efd1"
] | [
"tests/dataset.py"
] | [
"# tests.dataset\n# Helper functions for tests that utilize downloadable datasets.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Thu Oct 13 19:55:53 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: dataset.py [8f4de77] [email protected] $\n\n\"\"\"\nHelper functions for tests that utilize downloadable datasets.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport shutil\nimport hashlib\nimport zipfile\nimport numpy as np\n\nfrom sklearn.datasets.base import Bunch\n\ntry:\n import requests\nexcept ImportError:\n requests = None\n\n\n##########################################################################\n## Fixtures\n##########################################################################\n\nDATASETS = {\n 'concrete': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/concrete.zip',\n 'signature': 'b9ea5f26a7bb272a040e2f1a993b26babbf8dc4a04ab8198bb315ca66d71f10d',\n 'type': 'numpy',\n },\n 'energy': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/energy.zip',\n 'signature': '19fb86f3bcdde208eed46944172cb643ef6a7d58da103fb568fae43205ed89d3',\n 'type': 'numpy',\n },\n 'credit': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/credit.zip',\n 'signature': '4a91339c69f55e18f3f48004328fbcb7868070b618208fed099920427b084e5e',\n 'type': 'numpy',\n },\n 'occupancy': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/occupancy.zip',\n 'signature': '429cfe376dc9929a1fa528da89f0e1626e34e19695f3f555d8954025bbc522b8',\n 'type': 'numpy',\n },\n 'mushroom': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/mushroom.zip',\n 'signature': '884c43cb70db35d211c67b1cf6a3683b2b4569393d2789d5c07840da4dc85ba8',\n 'type': 'numpy',\n },\n 'hobbies': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/hobbies.zip',\n 'signature': '415c8f68df1486d5d84a1d1757a5aa3035aef5ad63ede5013c261d622fbd29d8',\n 'type': 'corpus',\n },\n 'game': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/game.zip',\n 'signature': 'b1bd85789a014a898daa34cb5f89ceab6d2cd6488a2e572187e34aa4ec21a43b',\n 'type': 'numpy',\n },\n 'bikeshare': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/bikeshare.zip',\n 'signature': 'a9b440f65549746dff680c92ff8bdca3c7265f09db1cf09e708e6e26fc8aba44',\n 'type': 'numpy',\n },\n}\n\nFIXTURES = os.path.join(os.path.dirname(__file__), \"fixtures\")\n\n\n##########################################################################\n## Test Cases that Require Download\n##########################################################################\n\nclass DatasetMixin(object):\n \"\"\"\n Mixin for unittest.TestCase class to download datasets from S3 for\n testing real world machine learning visual diagnostics.\n \"\"\"\n\n @staticmethod\n def sha256sum(path, blocksize=65536):\n \"\"\"\n Computes the SHA256 signature of a file to verify that the file has not\n been modified in transit and that it is the correct version of the data.\n \"\"\"\n sig = hashlib.sha256()\n with open(path, 'rb') as f:\n buf = f.read(blocksize)\n while len(buf) > 0:\n sig.update(buf)\n buf = f.read(blocksize)\n return sig.hexdigest()\n\n\n @staticmethod\n def download_data(url, path=FIXTURES, signature=None, extract=True):\n \"\"\"\n Downloads the zipped data set specified at the given URL, saving it to\n the output path specified. This function verifies the download with the\n given signature (if supplied) and extracts the zip file if requested.\n \"\"\"\n if requests is None:\n raise ImportError(\n \"The requests module is required to download data --\\n\"\n \"please install it with pip install requests.\"\n )\n\n # Create the output directory if it does not exist\n if not os.path.exists(path):\n os.mkdir(path)\n\n # Get the name of the file from the URL\n name = os.path.basename(url)\n dlpath = os.path.join(path, name)\n\n # Fetch the response in a streaming fashion and write it to disk.\n response = requests.get(url, stream=True)\n with open(dlpath, 'wb') as f:\n for chunk in response.iter_content(65536):\n f.write(chunk)\n\n # If verify, compare the signature\n if signature is not None:\n dlsignature = DatasetMixin.sha256sum(dlpath)\n if signature != dlsignature:\n raise ValueError(\n \"Download signature does not match hardcoded signature!\"\n )\n\n # If extract, extract the zipfile.\n if extract:\n zf = zipfile.ZipFile(dlpath)\n zf.extractall(path)\n\n\n @staticmethod\n def download_all(path=FIXTURES, verify=True, extract=True):\n \"\"\"\n Downloads all the example datasets. If verify is True then compare the\n download signature with the hardcoded signature. If extract is True then\n extract the contents of the zipfile to the given path.\n \"\"\"\n for name, meta in DATASETS.items():\n url = meta['url']\n signature = meta['signature'] if verify else None\n\n DatasetMixin.download_data(\n url, path=path, signature=signature, extract=extract\n )\n\n @staticmethod\n def remove_all(fixtures=FIXTURES):\n \"\"\"\n Removes all the downloaded datasets as clean up\n \"\"\"\n shutil.rmtree(fixtures)\n\n @staticmethod\n def load_data(name, fixtures=FIXTURES):\n \"\"\"\n Loads the numpy matrix from the specified data set, downloads it if\n it hasn't already been downloaded.\n \"\"\"\n # Just in case this is a corpus data set, then do that instead.\n if DATASETS[name]['type'] == 'corpus':\n return DatasetMixin.load_corpus(name, fixtures)\n\n path = os.path.join(fixtures, name, \"{}.csv\".format(name))\n if not os.path.exists(path):\n DatasetMixin.download_all(path=fixtures)\n\n return np.genfromtxt(path, dtype=float, delimiter=',', names=True)\n\n @staticmethod\n def load_corpus(name, fixtures=FIXTURES):\n \"\"\"\n Loads a sklearn Bunch with the corpus and downloads it if it hasn't\n already been downloaded. Used to test text visualizers.\n \"\"\"\n path = os.path.join(fixtures, name)\n if not os.path.exists(path):\n DatasetMixin.download_all(path=fixtures)\n\n # Read the directories in the directory as the categories.\n categories = [\n cat for cat in os.listdir(path)\n if os.path.isdir(os.path.join(path, cat))\n ]\n\n files = [] # holds the file names relative to the root\n data = [] # holds the text read from the file\n target = [] # holds the string of the category\n\n # Load the data from the files in the corpus\n for cat in categories:\n for name in os.listdir(os.path.join(path, cat)):\n files.append(os.path.join(path, cat, name))\n target.append(cat)\n\n with open(os.path.join(path, cat, name), 'r') as f:\n data.append(f.read())\n\n # Return the data bunch for use similar to the newsgroups example\n return Bunch(\n categories=categories,\n files=files,\n data=data,\n target=target,\n )\n"
] | [
[
"numpy.genfromtxt",
"sklearn.datasets.base.Bunch"
]
] |
takluyver/xray | [
"80c30ae343a2171c541da0387fed3926004030a7"
] | [
"test/test_conventions.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport warnings\n\nfrom xray import conventions\nfrom . import TestCase, requires_netCDF4\n\n\nclass TestMaskedAndScaledArray(TestCase):\n def test(self):\n x = conventions.MaskedAndScaledArray(np.arange(3), fill_value=0)\n self.assertEqual(x.dtype, np.dtype('float'))\n self.assertEqual(x.shape, (3,))\n self.assertEqual(x.size, 3)\n self.assertEqual(x.ndim, 1)\n self.assertEqual(len(x), 3)\n self.assertArrayEqual([np.nan, 1, 2], x)\n\n x = conventions.MaskedAndScaledArray(np.arange(3), add_offset=1)\n self.assertArrayEqual(np.arange(3) + 1, x)\n\n x = conventions.MaskedAndScaledArray(np.arange(3), scale_factor=2)\n self.assertArrayEqual(2 * np.arange(3), x)\n\n x = conventions.MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]), -99, 0.01, 1)\n expected = np.array([np.nan, 0.99, 1, 1.01, 1.02])\n self.assertArrayEqual(expected, x)\n\n def test_0d(self):\n x = conventions.MaskedAndScaledArray(np.array(0), fill_value=0)\n self.assertTrue(np.isnan(x))\n self.assertTrue(np.isnan(x[...]))\n\n x = conventions.MaskedAndScaledArray(np.array(0), fill_value=10)\n self.assertEqual(0, x[...])\n\n\nclass TestCharToStringArray(TestCase):\n def test(self):\n array = np.array(list('abc'))\n actual = conventions.CharToStringArray(array)\n expected = np.array('abc')\n self.assertEqual(actual.dtype, expected.dtype)\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(actual.size, expected.size)\n self.assertEqual(actual.ndim, expected.ndim)\n with self.assertRaises(TypeError):\n len(actual)\n self.assertArrayEqual(expected, actual)\n with self.assertRaises(IndexError):\n actual[:2]\n self.assertEqual(str(actual), 'abc')\n\n array = np.array([list('abc'), list('cdf')])\n actual = conventions.CharToStringArray(array)\n expected = np.array(['abc', 'cdf'])\n self.assertEqual(actual.dtype, expected.dtype)\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(actual.size, expected.size)\n self.assertEqual(actual.ndim, expected.ndim)\n self.assertEqual(len(actual), len(expected))\n self.assertArrayEqual(expected, actual)\n self.assertArrayEqual(expected[:1], actual[:1])\n with self.assertRaises(IndexError):\n actual[:, :2]\n\n\nclass TestDatetime(TestCase):\n @requires_netCDF4\n def test_cf_datetime(self):\n import netCDF4 as nc4\n for num_dates, units in [\n (np.arange(100), 'days since 2000-01-01'),\n (np.arange(100).reshape(10, 10), 'days since 2000-01-01'),\n (12300 + np.arange(50), 'hours since 1680-01-01 00:00:00'),\n (10, 'days since 2000-01-01'),\n ([10], 'days since 2000-01-01'),\n ([[10]], 'days since 2000-01-01'),\n ([10, 10], 'days since 2000-01-01'),\n (0, 'days since 1000-01-01'),\n ([0], 'days since 1000-01-01'),\n ([[0]], 'days since 1000-01-01'),\n (np.arange(20), 'days since 1000-01-01'),\n (np.arange(0, 100000, 10000), 'days since 1900-01-01')\n ]:\n for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:\n expected = nc4.num2date(num_dates, units, calendar)\n actual = conventions.decode_cf_datetime(num_dates, units, calendar)\n if (isinstance(actual, np.ndarray)\n and np.issubdtype(actual.dtype, np.datetime64)):\n self.assertEqual(actual.dtype, np.dtype('M8[ns]'))\n # For some reason, numpy 1.8 does not compare ns precision\n # datetime64 arrays as equal to arrays of datetime objects,\n # but it works for us precision. Thus, convert to us\n # precision for the actual array equal comparison...\n actual_cmp = actual.astype('M8[us]')\n else:\n actual_cmp = actual\n self.assertArrayEqual(expected, actual_cmp)\n encoded, _, _ = conventions.encode_cf_datetime(actual, units, calendar)\n self.assertArrayEqual(num_dates, np.around(encoded))\n if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1\n and '1000' not in units):\n # verify that wrapping with a pandas.Index works\n # note that it *does not* currently work to even put\n # non-datetime64 compatible dates into a pandas.Index :(\n encoded, _, _ = conventions.encode_cf_datetime(\n pd.Index(actual), units, calendar)\n self.assertArrayEqual(num_dates, np.around(encoded))\n\n @requires_netCDF4\n def test_cf_datetime_nan(self):\n for num_dates, units, expected_list in [\n ([np.nan], 'days since 2000-01-01', ['NaT']),\n ([np.nan, 0], 'days since 2000-01-01',\n ['NaT', '2000-01-01T00:00:00Z']),\n ([np.nan, 0, 1], 'days since 2000-01-01',\n ['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']),\n ]:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'All-NaN')\n actual = conventions.decode_cf_datetime(num_dates, units)\n expected = np.array(expected_list, dtype='datetime64[ns]')\n self.assertArrayEqual(expected, actual)\n\n def test_guess_time_units(self):\n for dates, expected in [(pd.date_range('1900-01-01', periods=5),\n 'days since 1900-01-01 00:00:00'),\n (pd.date_range('1900-01-01 12:00:00', freq='H',\n periods=2),\n 'hours since 1900-01-01 12:00:00'),\n (['1900-01-01', '1900-01-02',\n '1900-01-02 00:00:01'],\n 'seconds since 1900-01-01 00:00:00')]:\n self.assertEquals(expected, conventions.guess_time_units(dates))\n"
] | [
[
"pandas.date_range",
"numpy.dtype",
"numpy.issubdtype",
"numpy.arange",
"numpy.isnan",
"numpy.array",
"numpy.around",
"pandas.Index"
]
] |
Gabvaztor/tensorflowCode | [
"e206ea4544552b87c2d43274cea3182f6b385a87"
] | [
"src/examples/animations/AnimationGif.py"
] | [
"#IMPORTAMOS LIBRERIAS.\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport animatplot as amp\n\n#INTRODUCIMOS DATOS.\nx = np.linspace(0, 1, 50)\nt = np.linspace(0, 1, 20)\n\n\nX, T = np.meshgrid(x, t)\nY = np.zeros(int(51*(X+T)))\n\n#CREAMOS OBJETO \"timeline\".\ntimeline = amp.Timeline(t, units='s', fps=60)\n\n#GENERAMOS ANIMACIÓN.\nblock = amp.blocks.Line(X, Y, marker=\".\", linestyle=\"-\", color=\"r\")\nanim = amp.Animation([block],timeline)\n\n#DEFINICIÓN DE ETIQUETAS PARA TITULO Y EJES.\nplt.title(\"Sine Wave\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\n\n#GUARDAMOS ANIMACIÓN.\n#anim.save_gif('graph_anim.gif')\n\n#INTRODUCIMOS LÍNEA DE TIEMPO\n#Y BOTÓN PAUSE/PLAY\nanim.controls()\n\n#REPRESENTAMOS GRÁFICA.\nplt.show()"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.meshgrid",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
edgargmartinez/OpenPNM | [
"c68745993b3e9895f53938164a9cf6305500748e"
] | [
"tests/unit/models/physics/MeniscusTest.py"
] | [
"import openpnm as op\nimport openpnm.models.physics as pm\nimport scipy as sp\n\n\nclass MeniscusTest:\n\n def setup_class(self):\n sp.random.seed(1)\n self.net = op.network.Cubic(shape=[5, 1, 5], spacing=5e-5)\n self.geo = op.geometry.StickAndBall(network=self.net,\n pores=self.net.pores(),\n throats=self.net.throats())\n self.phase = op.phases.Water(network=self.net)\n self.phys = op.physics.Standard(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n\n def test_toroidal_touch(self):\n phys = self.phys\n r_tor = 1e-6\n self.geo['throat.touch_length'] = 2e-6\n phys.add_model(propname='throat.tor_max',\n model=pm.meniscus.purcell,\n mode='max',\n r_toroid=r_tor)\n phys.add_model(propname='throat.tor_touch',\n model=pm.meniscus.purcell,\n mode='touch',\n r_toroid=r_tor)\n assert sp.any(phys['throat.tor_touch'] < phys['throat.tor_max'])\n\n def test_sinusoidal_touch(self):\n phys = self.phys\n self.geo['throat.amplitude'] = 5e-6\n self.geo['throat.touch_length'] = 1e-6\n phys.add_model(propname='throat.sin_pressure_max',\n model=pm.meniscus.sinusoidal,\n mode='max')\n phys.add_model(propname='throat.sin_pressure_touch',\n model=pm.meniscus.sinusoidal,\n mode='touch')\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n assert sp.any((phys['throat.sin_pressure_touch'] <\n phys['throat.sin_pressure_max']))\n\n def test_sinusoidal(self):\n phys = self.phys\n self.geo['throat.amplitude'] = 5e-6\n phys.add_model(propname='throat.sin_pressure',\n model=pm.meniscus.sinusoidal,\n mode='max')\n phys.add_model(propname='throat.sin_meniscus',\n model=pm.meniscus.sinusoidal,\n mode='men',\n target_Pc=5000)\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n\n def test_toroidal(self):\n phys = self.phys\n r_tor = 1e-6\n phys.add_model(propname='throat.purcell_pressure',\n model=pm.capillary_pressure.purcell,\n r_toroid=r_tor)\n phys.add_model(propname='throat.tor_pressure',\n model=pm.meniscus.purcell,\n mode='max',\n r_toroid=r_tor,\n num_points=1000)\n phys.add_model(propname='throat.tor_meniscus',\n model=pm.meniscus.purcell,\n mode='men',\n r_toroid=r_tor,\n target_Pc=5000)\n a = sp.around(phys['throat.purcell_pressure'], 10)\n b = sp.around(phys['throat.tor_pressure'], 10)\n assert sp.allclose(a, b)\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n\n def test_general_toroidal(self):\n phys = self.phys\n r_tor = 1e-6\n phys.add_model(propname='throat.purcell_pressure',\n model=pm.capillary_pressure.purcell,\n r_toroid=r_tor)\n phys['throat.scale_a'] = r_tor\n phys['throat.scale_b'] = r_tor\n phys.add_model(propname='throat.general_pressure',\n model=pm.meniscus.general_toroidal,\n mode='max',\n num_points=1000)\n a = sp.around(phys['throat.purcell_pressure'], 10)\n b = sp.around(phys['throat.general_pressure'], 10)\n assert sp.allclose(a, b)\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n\n\nif __name__ == '__main__':\n\n t = MeniscusTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n"
] | [
[
"scipy.any",
"scipy.random.seed",
"scipy.allclose",
"scipy.around"
]
] |
naver/cog | [
"5b34ca90757116b9cfae11d8838927ba73e1ede8"
] | [
"logreg.py"
] | [
"# ImageNet-CoG Benchmark\n# Copyright 2021-present NAVER Corp.\n# 3-Clause BSD License\n\nimport argparse\nimport copy\nimport logging\nimport math\nimport os\nimport shutil\nimport time\n\nimport optuna\nimport torch as th\n\nimport feature_ops\nimport metrics\nimport utils\nfrom iterators import TorchIterator\nfrom meters import AverageMeter, ProgressMeter\n\nlogger = logging.getLogger()\n\n\nclass LogReg:\n \"\"\"\n Logistic regression classifier with mini-batch SGD.\n \"\"\"\n\n def __init__(self, args, cfg):\n self.args = args\n self.cfg = cfg\n\n # load the training set features\n trainset = feature_ops.load_feature_set(\n args.train_features_path, \"train\", cfg.CLF.NORM_FTS\n )\n\n if args.val:\n # randomly split the training set into train + val\n logger.info(\"Splitting the training set into train and val\")\n trainset, testset = feature_ops.split_trainset(trainset, cfg.CLF.VAL_PERC)\n else:\n # load the test set\n testset = feature_ops.load_feature_set(args.test_features_path, \"test\", cfg.CLF.NORM_FTS)\n\n if cfg.CLF.N_SHOT > 0:\n logger.info(\n \"Simulating few-shot learning setting, {} images per class.\".format(\n cfg.CLF.N_SHOT\n )\n )\n trainset = feature_ops.make_fewshot_dataset(trainset, cfg.CLF.N_SHOT)\n\n self.trainset = trainset\n self.testset = testset\n self.trainset.print_info()\n self.testset.print_info()\n\n # determine number of cases\n if len(list(self.trainset.y.shape)) == 1:\n classes = th.unique(self.trainset.y)\n assert th.all(classes == th.unique(self.testset.y))\n args.n_classes = classes.size(0)\n\n # move all features to the device\n if args.device == \"cuda\":\n feature_ops.move_data_to_cuda([self.trainset, self.testset])\n\n def __call__(self, trial=None):\n \"\"\"\n The function called by Optuna.\n \"\"\"\n # empty the cache allocated in the previous call\n th.cuda.empty_cache()\n\n args = copy.deepcopy(self.args)\n cfg = self.cfg\n\n x_train = self.trainset.x\n y_train = self.trainset.y\n x_test = self.testset.x\n y_test = self.testset.y\n\n # create training and test set iterators\n train_iter = TorchIterator((x_train, y_train), cfg.CLF.BATCH_SIZE, shuffle=True)\n test_iter = TorchIterator((x_test, y_test), cfg.CLF.BATCH_SIZE, shuffle=False)\n\n # define logistic classifier\n model = th.nn.Linear(x_train.size(1), args.n_classes).to(args.device)\n crit = th.nn.CrossEntropyLoss().to(args.device)\n\n # sample a learning rate and weight decay\n if trial is not None:\n lr_intv = cfg.CLF.LR_INTV\n wd_intv = cfg.CLF.WD_INTV\n args.lr = trial.suggest_loguniform(\"lr\", lr_intv[0], lr_intv[1])\n args.wd = trial.suggest_loguniform(\"wd\", wd_intv[0], wd_intv[1])\n optim = th.optim.SGD(\n model.parameters(), lr=args.lr, momentum=args.mom, weight_decay=args.wd\n )\n\n args.exp_dir = os.path.join(\n args.output_dir,\n \"{}-lr-{}_wd-{}\".format(\"val\" if args.val else \"final\", args.lr, args.wd),\n )\n os.makedirs(args.exp_dir, exist_ok=True)\n\n # write the model definition into exp_dir\n utils.write_to_file(str(model), os.path.join(args.exp_dir, \"model.txt\"))\n\n # logs computed during training / evaluation\n args.logs = {\n \"train/loss\": [],\n \"train/top1\": [],\n \"train/top5\": [],\n \"test/loss\": [],\n \"test/top1\": [],\n \"test/top5\": [],\n \"lr\": [],\n }\n\n # predictions over the evaluation sets\n args.preds = []\n\n for epoch in range(cfg.CLF.N_EPOCHS):\n if not args.val:\n logger.info(f\"**Epoch:{epoch}**\")\n args.epoch = epoch\n train_stat = train(train_iter, model, crit, optim, epoch, args)\n validate(test_iter, model, crit, args)\n adjust_learning_rate(optim, args, cfg)\n\n # if something went wrong during training\n # e.g. SGD diverged\n if train_stat == -1:\n break\n\n # save the logs\n utils.save_pickle(args.logs, f\"{args.exp_dir}/logs.pkl\")\n\n # save the predictions\n utils.save_pickle(args.preds, f\"{args.exp_dir}/preds.pkl\")\n\n # save the whole args, for ease of access\n utils.save_pickle(vars(args), f\"{args.exp_dir}/args.pkl\")\n\n # save also the final model\n th.save(\n {\n \"model\": model.state_dict(),\n },\n f\"{args.exp_dir}/model.pth\",\n )\n\n # return the last test accuracy\n return args.logs[\"test/top1\"][-1]\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n \"\"\"\n Train the classifier for one epoch.\n \"\"\"\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n top1 = AverageMeter(\"Acc@1\", \":6.2f\")\n top5 = AverageMeter(\"Acc@5\", \":6.2f\")\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch),\n )\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (fts, lbls) in enumerate(train_loader):\n fts = fts.to(args.device)\n lbls = lbls.to(args.device)\n\n # compute output\n output = model(fts)\n loss = criterion(output, lbls)\n\n if not th.isfinite(loss):\n logger.info(\"Loss ({}) is not finite, terminating\".format(loss.item()))\n optimizer.zero_grad()\n return -1\n\n # measure accuracy and record loss\n acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5))\n losses.update(loss.item(), fts.size(0))\n top1.update(acc1.item(), fts.size(0))\n top5.update(acc5.item(), fts.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (not args.val) and (i % args.print_freq == 0):\n progress.display(i)\n\n args.logs[\"train/loss\"].append(losses.avg)\n args.logs[\"train/top1\"].append(top1.avg)\n args.logs[\"train/top5\"].append(top5.avg)\n return 0\n\n\ndef validate(val_loader, model, criterion, args):\n losses = AverageMeter(\"Loss\", \":.4e\")\n top1 = AverageMeter(\"Acc@1\", \":6.2f\")\n top5 = AverageMeter(\"Acc@5\", \":6.2f\")\n\n # switch to evaluate mode\n model.eval()\n\n # keep predictions per class\n preds = th.ones(len(val_loader.tensors[0]), dtype=th.int32, device=args.device) * -1.\n six = 0\n\n with th.no_grad():\n for i, (fts, lbls) in enumerate(val_loader):\n fts = fts.to(args.device)\n lbls = lbls.to(args.device)\n bs = fts.size(0)\n\n # compute output\n output = model(fts)\n loss = criterion(output, lbls)\n\n # store the predicted classes\n preds[six:six + bs] = th.argmax(output, dim=1)\n six += bs\n\n # measure accuracy and record loss\n acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5))\n losses.update(loss.item(), bs)\n top1.update(acc1[0].item(), bs)\n top5.update(acc5[0].item(), bs)\n\n # make sure that there is no invalid prediction\n assert th.all(preds >= 0).item()\n args.preds.append(preds.detach().cpu())\n\n args.logs[\"test/loss\"].append(losses.avg)\n args.logs[\"test/top1\"].append(top1.avg)\n args.logs[\"test/top5\"].append(top5.avg)\n\n if not args.val:\n logger.info(\n \" * Acc@1:{top1.avg:.3f} - Acc@5:{top5.avg:.3f}\".format(\n top1=top1, top5=top5\n )\n )\n\n\ndef adjust_learning_rate(optimizer, args, cfg):\n \"\"\"Decay the learning rate based on cosine schedule\"\"\"\n lr = args.lr\n lr *= 0.5 * (1.0 + math.cos(math.pi * args.epoch / cfg.CLF.N_EPOCHS))\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n args.logs[\"lr\"].append(lr)\n\n\ndef save_checkpoint(state, is_best, filename=\"checkpoint.pth.tar\"):\n th.save(state, filename)\n if is_best:\n shutil.copyfile(filename, \"model_best.pth.tar\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=utils.none_or_string_flag,\n help='Name of the model in the <model_title>_<architecture_name> form.'\n 'See the table of models in ./prepare_models/README.md for all the model names we support.'\n 'This is an optional argument that needs to be set along with --models_root_dir and --dataset.'\n 'When these three arguments are set, the script will load features from:'\n '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/features_*/X_Y.pth.'\n 'If you would like to load pre-extracted features from somewhere else'\n 'then ignore this argument and provide the --train_features_dir and --test_features_dir arguments accordingly')\n parser.add_argument('--models_root_dir', type=utils.none_or_string_flag,\n help='Root directory for all models, see prepare_models/README.md for a detailed explanation.'\n 'This is an optional argument that needs to be set along with --model and --dataset.'\n 'Please see the help message for the --model argument as well.')\n parser.add_argument(\"--dataset\", type=utils.none_or_string_flag,\n help=\"On which dataset to learn classifiers\"\n 'Possible values are (\"in1k\", \"cog_l1\", \"cog_l2\", \"cog_l3\", \"cog_l4\", \"cog_l5\")'\n 'This is an optional argument that needs to be set along with --models_root_dir and --model.'\n 'Please see the help message for the --model argument as well.')\n parser.add_argument('--train_features_dir', type=utils.none_or_string_flag,\n help='Path to the directory containing pre-extracted training set features.'\n 'We expect a features file \"X_Y.pth\" under <train_features_dir>.'\n 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.')\n parser.add_argument('--test_features_dir', type=utils.none_or_string_flag,\n help='Path to the directory containing pre-extracted test set features.'\n 'We expect a features file \"X_Y.pth\" under <test_features_dir>.'\n 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.')\n parser.add_argument('--output_dir', type=utils.none_or_string_flag,\n help='Where to log program logs.'\n 'This is an optional argument that needs to be set if --models_root_dir is not set.'\n 'If not provided, we try to save the logs under'\n '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/eval_logreg/seed*')\n # learning rate and momentum are tuned in this program, do not manually set.\n parser.add_argument(\"--lr\", type=float, default=0.0, help=\"initial learning rate\")\n parser.add_argument(\"--wd\", type=float, default=0.0, help=\"weight decay\")\n parser.add_argument(\"--mom\", type=float, default=0.9, help=\"momentum\")\n # program-related options\n parser.add_argument(\"--print_freq\", default=100, type=int, help=\"print frequency (default: 10)\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\")\n # optionally to overwrite the default config\n parser.add_argument(\"opts\", default=None,\n help=\"see configs/default.py for all options\",\n nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if args.device == \"cuda\" and not th.cuda.is_available():\n print(\"CUDA is not available, I will run on CPU.\")\n args.device = \"cpu\"\n\n # load the config file\n # create output directory,\n # locate pre-extracted features,\n # initialize program logger,\n # save args and cfg\n # this function sets the following arg variables:\n # - train_features_path, type=str\n # - test_features_path, type=str\n # - output_dir, type=str\n args, cfg = utils.init_program(args, _for=\"logreg\")\n\n # tune hyper-parameters with optuna\n logger.info(\"Running Optuna...\")\n hps_sampler = optuna.samplers.TPESampler(multivariate=True, seed=cfg.EVAL.SEED)\n study = optuna.create_study(sampler=hps_sampler, direction=\"maximize\")\n\n args.val = True\n logreg = LogReg(args, cfg)\n study.optimize(logreg, n_trials=cfg.CLF.N_TRIALS, n_jobs=1, show_progress_bar=False)\n utils.save_pickle(study, os.path.join(args.output_dir, \"study.pkl\"))\n\n logger.info(\"\")\n logger.info(\"*\" * 50)\n logger.info(\"Hyper-parameter search ended\")\n logger.info(\"best_trial:\")\n logger.info(str(study.best_trial))\n logger.info(\"best_params:\")\n logger.info(str(study.best_params))\n logger.info(\"*\" * 50)\n logger.info(\"\")\n\n # train the final classifier with the tuned hyper-parameters\n del logreg\n th.cuda.empty_cache()\n args.lr = study.best_params[\"lr\"]\n args.wd = study.best_params[\"wd\"]\n args.val = False\n logreg = LogReg(args, cfg)\n logreg()\n"
] | [
[
"torch.cuda.empty_cache",
"torch.argmax",
"torch.save",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.all",
"torch.cuda.is_available",
"torch.unique",
"torch.isfinite"
]
] |
RomainClaret/msc.ml.labs | [
"4e6b8e1c1ab841ab8ebbaee13f6ae43e9a1c44a5"
] | [
"lab4/predict_income_romain_claret_and_sylvain_robert-nicoud_lab4.py"
] | [
"#!/usr/bin/env python3\n# 12.04.21\n# Assignment lab 04\n\n# Master Class: Machine Learning (5MI2018)\n# Faculty of Economic Science\n# University of Neuchatel (Switzerland)\n# Lab 4, see ML21_Exercise_4.pdf for more information\n\n# https://github.com/RomainClaret/msc.ml.labs\n\n# Authors: \n# - Romain Claret @RomainClaret\n# - Sylvain Robert-Nicoud @Nic0uds\n\nimport warnings\nimport pickle\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\n\nwarnings.filterwarnings(\"ignore\")\n\n\n# SPLITING ADULT.TEST FILE IN SUBFILES\n#spliting the adult.test file into several files to simulate weeks\n\nfilename = 'adult.test'\nfile_handler = open(filename, 'r').readlines()[1:]\nprefix_file = \"adult_2021_cw_\"\nweek_number = 1\nsplit_into = 10\nline_count = 0\nfile_length = len(file_handler)\n\nfor i in range(0,file_length):\n if i % ((file_length)//split_into) == 0 and i+((file_length//split_into)//2) < file_length:\n open(str(prefix_file)+str(week_number) + \".csv\", \"w+\").writelines(file_handler[i:i+(file_length//split_into)])\n week_number += 1\n\n\n# RUN PIPELINE MODEL FROM OTHER FILE\n#input file, and save the predictions into a different file.\n#Example:\n#Let's say you have the input data weekly in the file adult_2021_cw_12.csv.\n#This second script should read the input from this file and use the classifier to make predictions and write those predictions in the file adult_2021_cw_12_pred.csv .\n\n# load pipeline model\npipeline_model = pickle.load( open(\"grid_search_model.pickle\", \"rb\" ))\n\nweeks_count = 10\nfilename = 'adult.test'\nprefix_file = \"adult_2021_cw_\"\n\n# get the features names and the values of the categories from adult.names (build a dictionary)\ndata_dict = {}\nwith open('adult.names') as f:\n for l in f:\n if l[0] == '|' or ':' not in l: continue\n c = l.split(':')\n if c[1].startswith(' continuous'): data_dict[c[0]] = \"\"\n else: data_dict[c[0]] = c[1].replace(\"\\n\",\"\").replace(\".\",\"\").replace(\" \",\"\").split(\",\")\n \nheader = list(data_dict.keys())+['income']\n\n# for each week based on a count and a naming convention\nfor i in range (weeks_count):\n filename = str(prefix_file)+str(i+1)+\".csv\"\n df_weekly = pd.read_table(filename, sep=r',\\s', na_values='?', skiprows=[0], header=None, names=header).dropna()\n \n drop_list = [\"education\", \"occupation\", \"relationship\"]\n df_weekly = df_weekly.drop(columns=drop_list)\n \n dict_replace = {\n 'marital-status' : {\n 'Never-married': 'Not-Married',\n 'Married-civ-spouse': 'Married',\n 'Divorced': 'Not-Married',\n 'Married-spouse-absent': 'Married',\n 'Separated': 'Married',\n 'Married-AF-spouse': 'Married',\n 'Widowed': 'Not-Married'\n },\n 'workclass': {\n 'State-gov': 'Government',\n 'Self-emp-not-inc': 'Self-Employment',\n 'Federal-gov': 'Government',\n 'Local-gov': 'Government',\n 'Self-emp-inc': 'Self-Employment'\n }\n }\n\n df_weekly.replace(dict_replace, inplace=True)\n \n df_weekly[\"income\"].replace({\"<=50K.\": \"<=50K\", \">50K.\": \">50K\"}, inplace=True)\n \n for l in [\"marital-status\", \"sex\", \"income\"]:\n l_enc = LabelEncoder()\n encoder_weekly = l_enc.fit(df_weekly[l])\n df_weekly[\"encoded_\"+l] = encoder_weekly.transform(df_weekly[l])\n \n y_hat_dtree_weekly = pipeline_model.predict(df_weekly)\n \n pref_filename = str(prefix_file)+str(i+1)+\"_pred.csv\"\n print(pref_filename, \"accuracy_score:\",accuracy_score(df_weekly[\"encoded_income\"],y_hat_dtree_weekly),\"\\n\")\n \n # save the prediction into file\n pd.DataFrame(y_hat_dtree_weekly).to_csv(str(pref_filename),header=[\"pred_income\"], index=None)\n \n # lab 03 results:\n # adult_2021_cw_1.csv accuracy_score: 0.8293736501079914 \n # adult_2021_cw_2.csv accuracy_score: 0.8503253796095445 \n # adult_2021_cw_3.csv accuracy_score: 0.8427807486631016 \n # adult_2021_cw_4.csv accuracy_score: 0.8307860262008734 \n # adult_2021_cw_5.csv accuracy_score: 0.8507462686567164 \n # adult_2021_cw_6.csv accuracy_score: 0.854978354978355 \n # adult_2021_cw_7.csv accuracy_score: 0.8545454545454545 \n # adult_2021_cw_8.csv accuracy_score: 0.8514531754574811 \n # adult_2021_cw_9.csv accuracy_score: 0.8296943231441049 \n # adult_2021_cw_10.csv accuracy_score: 0.8574537540805223 "
] | [
[
"pandas.read_table",
"sklearn.metrics.accuracy_score",
"pandas.DataFrame",
"sklearn.preprocessing.LabelEncoder"
]
] |
YunYang1994/CodeFun | [
"36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068"
] | [
"detect_image.py"
] | [
"#! /usr/bin/env python\n# coding=utf-8\n#================================================================\n# Copyright (C) 2020 * Ltd. All rights reserved.\n#\n# Editor : VIM\n# File name : detect_image.py\n# Author : YunYang1994\n# Created date: 2020-03-19 14:05:53\n# Description :\n#\n#================================================================\n\n\nimport os\nimport cv2\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom PIL import Image, ImageFont, ImageDraw\nfrom mtcnn import pnet, rnet, onet\nfrom models import IResnet\nfrom utils import detect_face, align_face, recognize_face\n\nmodel = IResnet(tflite_model=\"IResnet.tflite\")\nfont = ImageFont.truetype('weghts/HuaWenXinWei-1.ttf', 30)\nimage = cv2.imread(\"/Users/yangyun/多人照片/5.jpg\")\n\nimage_h, image_w, _ = image.shape\n\norg_image = image.copy()\nimage = cv2.cvtColor(image ,cv2.COLOR_BGR2RGB)\ntotal_boxes, points = detect_face(image, 20, pnet, rnet, onet, [0.6, 0.7, 0.9], 0.709)\n\nfor idx, (bounding_box, keypoints) in enumerate(zip(total_boxes, points.T)):\n bounding_boxes = {\n 'box': [int(bounding_box[0]), int(bounding_box[1]),\n int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])],\n 'confidence': bounding_box[-1],\n 'keypoints': {\n 'left_eye': (int(keypoints[0]), int(keypoints[5])),\n 'right_eye': (int(keypoints[1]), int(keypoints[6])),\n 'nose': (int(keypoints[2]), int(keypoints[7])),\n 'mouth_left': (int(keypoints[3]), int(keypoints[8])),\n 'mouth_right': (int(keypoints[4]), int(keypoints[9])),\n }\n }\n\n bounding_box = bounding_boxes['box']\n keypoints = bounding_boxes['keypoints']\n\n cv2.circle(org_image,(keypoints['left_eye']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['right_eye']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['nose']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['mouth_left']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['mouth_right']),2, (255,0,0), 3)\n cv2.rectangle(org_image,\n (bounding_box[0], bounding_box[1]),\n (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),\n (0,255,0), 2)\n # align face and extract it out\n align_image = align_face(image, keypoints)\n\n marigin = 16\n xmin = max(bounding_box[0] - marigin, 0)\n ymin = max(bounding_box[1] - marigin, 0)\n xmax = min(bounding_box[0] + bounding_box[2] + marigin, image_w)\n ymax = min(bounding_box[1] + bounding_box[3] + marigin, image_h)\n\n crop_image = align_image[ymin:ymax, xmin:xmax, :]\n if crop_image is not None:\n t1 = time.time()\n embedding = model(crop_image)\n person = recognize_face(embedding)\n\n org_image_pil = Image.fromarray(org_image)\n draw = ImageDraw.Draw(org_image_pil)\n text_size = draw.textsize(person, font)\n draw.text((bounding_box[0], bounding_box[1]-16), person, fill=(0, 0, 255), font=font)\n org_image = np.array(org_image_pil)\n\n t2 = time.time()\n print(\"time: %.2fms\" %((t2-t1)*1000))\n\norg_image = cv2.cvtColor(org_image, cv2.COLOR_BGR2RGB)\nimage = Image.fromarray(org_image)\nimage.show()\n# image.save(\"test.png\")\n"
] | [
[
"numpy.array"
]
] |
lRomul/argus-bengali-ai | [
"e64374230f5390a17305769126ff4bfc9a2a8644"
] | [
"src/draw.py"
] | [
"import time\nimport random\nimport numpy as np\nfrom pathlib import Path\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom src import config\n\n\ndef draw_grapheme(grapheme, font_path, size=(137, 236)):\n height, width = size\n image = Image.new('RGB', (width, height))\n draw = ImageDraw.Draw(image)\n font_size = np.random.randint(70, 110)\n font = ImageFont.truetype(str(font_path), font_size)\n w, h = draw.textsize(grapheme, font=font)\n width_ratio = np.random.uniform(1.5, 2.5)\n height_ratio = np.random.uniform(2.5, 3.5)\n fill = np.random.randint(200, 255)\n draw.text(((width - w) / width_ratio, (height - h) / height_ratio),\n grapheme, font=font, fill=fill)\n image = image.filter(ImageFilter.BLUR)\n return np.array(image)[:, :, 0]\n\n\ndef get_draw_data():\n graphemes = []\n for grapheme_root_idx, grapheme_root in config.class_map['grapheme_root'].items():\n for vowel_diacritic_idx, vowel_diacritic in config.class_map['vowel_diacritic'].items():\n for consonant_diacritic_idx, consonant_diacritic in config.class_map['consonant_diacritic'].items():\n consonant_diacritic, grapheme_root, vowel_diacritic = [c if c != '0' else '' for c in\n [consonant_diacritic, grapheme_root,\n vowel_diacritic]]\n\n grapheme = consonant_diacritic + grapheme_root + vowel_diacritic\n graphemes.append({\n 'grapheme': grapheme,\n 'grapheme_root': grapheme_root_idx,\n 'vowel_diacritic': vowel_diacritic_idx,\n 'consonant_diacritic': consonant_diacritic_idx\n })\n return graphemes\n\n\nclass BengaliDrawDataset(Dataset):\n def __init__(self,\n fonts_dir,\n transform=None,\n mixer=None):\n self.fonts_dir = fonts_dir\n self.transform = transform\n self.mixer = mixer\n self.data = get_draw_data()\n self.font_paths = sorted(Path(fonts_dir).glob('*.ttf'))\n\n def __len__(self):\n return len(self.data)\n\n def get_sample(self, idx):\n sample = self.data[idx]\n\n font_path = np.random.choice(self.font_paths)\n image = draw_grapheme(sample['grapheme'], font_path,\n size=config.raw_image_shape)\n\n grapheme = torch.tensor(sample['grapheme_root'], dtype=torch.int64)\n vowel = torch.tensor(sample['vowel_diacritic'], dtype=torch.int64)\n consonant = torch.tensor(sample['consonant_diacritic'], dtype=torch.int64)\n target = grapheme, vowel, consonant\n\n return image, target\n\n def _set_random_seed(self, idx):\n seed = int(time.time() * 1000.0) + idx\n random.seed(seed)\n np.random.seed(seed % (2**32 - 1))\n\n @torch.no_grad()\n def __getitem__(self, idx):\n self._set_random_seed(idx)\n\n image, target = self.get_sample(idx)\n if self.mixer is not None:\n image, target = self.mixer(self, image, target)\n if self.transform is not None:\n image = self.transform(image)\n return image, target\n"
] | [
[
"numpy.random.uniform",
"torch.no_grad",
"torch.tensor",
"numpy.random.choice",
"numpy.random.seed",
"numpy.array",
"numpy.random.randint"
]
] |
dd-dos/Emotion-detection | [
"23eb94cbceb70890cf6b0f63e84d80eae7336c85"
] | [
"src/dataset_prepare.py"
] | [
"import numpy as np\nimport pandas as pd \nfrom PIL import Image\nfrom tqdm import tqdm\nimport os\n\n# convert string to integer\ndef atoi(s):\n n = 0\n for i in s:\n n = n*10 + ord(i) - ord(\"0\")\n return n\n\n# making folders\nouter_names = ['test','train']\ninner_names = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral']\nos.makedirs('data', exist_ok=True)\nfor outer_name in outer_names:\n os.makedirs(os.path.join('data',outer_name), exist_ok=True)\n for inner_name in inner_names:\n os.makedirs(os.path.join('data',outer_name,inner_name), exist_ok=True)\n\n# to keep count of each category\nangry = 0\ndisgusted = 0\nfearful = 0\nhappy = 0\nsad = 0\nsurprised = 0\nneutral = 0\nangry_test = 0\ndisgusted_test = 0\nfearful_test = 0\nhappy_test = 0\nsad_test = 0\nsurprised_test = 0\nneutral_test = 0\n\ndf = pd.read_csv('./fer2013.csv')\nmat = np.zeros((48,48),dtype=np.uint8)\nprint(\"Saving images...\")\n\n# read the csv file line by line\nfor i in tqdm(range(len(df))):\n txt = df['pixels'][i]\n words = txt.split()\n \n # the image size is 48x48\n for j in range(2304):\n xind = j // 48\n yind = j % 48\n mat[xind][yind] = atoi(words[j])\n\n img = Image.fromarray(mat)\n # train\n if i < 28709:\n if df['emotion'][i] == 0:\n img.save('./data/train/angry/im'+str(angry)+'.png')\n angry += 1\n elif df['emotion'][i] == 1:\n img.save('./data/train/disgusted/im'+str(disgusted)+'.png')\n disgusted += 1\n elif df['emotion'][i] == 2:\n img.save('./data/train/fearful/im'+str(fearful)+'.png')\n fearful += 1\n elif df['emotion'][i] == 3:\n img.save('./data/train/happy/im'+str(happy)+'.png')\n happy += 1\n elif df['emotion'][i] == 4:\n img.save('./data/train/sad/im'+str(sad)+'.png')\n sad += 1\n elif df['emotion'][i] == 5:\n img.save('./data/train/surprised/im'+str(surprised)+'.png')\n surprised += 1\n elif df['emotion'][i] == 6:\n img.save('./data/train/neutral/im'+str(neutral)+'.png')\n neutral += 1\n\n # test\n else:\n if df['emotion'][i] == 0:\n img.save('./data/test/angry/im'+str(angry_test)+'.png')\n angry_test += 1\n elif df['emotion'][i] == 1:\n img.save('./data/test/disgusted/im'+str(disgusted_test)+'.png')\n disgusted_test += 1\n elif df['emotion'][i] == 2:\n img.save('./data/test/fearful/im'+str(fearful_test)+'.png')\n fearful_test += 1\n elif df['emotion'][i] == 3:\n img.save('./data/test/happy/im'+str(happy_test)+'.png')\n happy_test += 1\n elif df['emotion'][i] == 4:\n img.save('./data/test/sad/im'+str(sad_test)+'.png')\n sad_test += 1\n elif df['emotion'][i] == 5:\n img.save('./data/test/surprised/im'+str(surprised_test)+'.png')\n surprised_test += 1\n elif df['emotion'][i] == 6:\n img.save('./data/test/neutral/im'+str(neutral_test)+'.png')\n neutral_test += 1\n\nprint(\"Done!\")"
] | [
[
"pandas.read_csv",
"numpy.zeros"
]
] |
tzachar/addons | [
"e352207da32e4670a36a295ea477c476118cb0d9"
] | [
"tensorflow_addons/layers/normalizations.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Orginal implementation from keras_contrib/layer/normalization\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass GroupNormalization(tf.keras.layers.Layer):\n \"\"\"Group normalization layer.\n\n Group Normalization divides the channels into groups and computes\n within each group the mean and variance for normalization.\n Empirically, its accuracy is more stable than batch norm in a wide\n range of small batch sizes, if learning rate is adjusted linearly\n with batch sizes.\n\n Relation to Layer Normalization:\n If the number of groups is set to 1, then this operation becomes identical\n to Layer Normalization.\n\n Relation to Instance Normalization:\n If the number of groups is set to the\n input dimension (number of groups is equal\n to number of channels), then this operation becomes\n identical to Instance Normalization.\n\n Arguments\n groups: Integer, the number of groups for Group Normalization.\n Can be in the range [1, N] where N is the input dimension.\n The input dimension must be divisible by the number of groups.\n axis: Integer, the axis that should be normalized.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor.\n If False, `beta` is ignored.\n scale: If True, multiply by `gamma`.\n If False, `gamma` is not used.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n beta_constraint: Optional constraint for the beta weight.\n gamma_constraint: Optional constraint for the gamma weight.\n\n Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape\n Same shape as input.\n References\n - [Group Normalization](https://arxiv.org/abs/1803.08494)\n \"\"\"\n\n def __init__(self,\n groups=2,\n axis=-1,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer='zeros',\n gamma_initializer='ones',\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs):\n super(GroupNormalization, self).__init__(**kwargs)\n self.supports_masking = True\n self.groups = groups\n self.axis = axis\n self.epsilon = epsilon\n self.center = center\n self.scale = scale\n self.beta_initializer = tf.keras.initializers.get(beta_initializer)\n self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)\n self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)\n self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)\n self.beta_constraint = tf.keras.constraints.get(beta_constraint)\n self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)\n self._check_axis()\n\n def build(self, input_shape):\n\n self._check_if_input_shape_is_none(input_shape)\n self._set_number_of_groups_for_instance_norm(input_shape)\n self._check_size_of_dimensions(input_shape)\n self._create_input_spec(input_shape)\n\n self._add_gamma_weight(input_shape)\n self._add_beta_weight(input_shape)\n self.built = True\n super(GroupNormalization, self).build(input_shape)\n\n def call(self, inputs):\n\n input_shape = tf.keras.backend.int_shape(inputs)\n tensor_input_shape = tf.shape(inputs)\n\n reshaped_inputs, group_shape = self._reshape_into_groups(\n inputs, input_shape, tensor_input_shape)\n\n normalized_inputs = self._apply_normalization(reshaped_inputs,\n input_shape)\n\n outputs = tf.reshape(normalized_inputs, tensor_input_shape)\n\n return outputs\n\n def get_config(self):\n config = {\n 'groups':\n self.groups,\n 'axis':\n self.axis,\n 'epsilon':\n self.epsilon,\n 'center':\n self.center,\n 'scale':\n self.scale,\n 'beta_initializer':\n tf.keras.initializers.serialize(self.beta_initializer),\n 'gamma_initializer':\n tf.keras.initializers.serialize(self.gamma_initializer),\n 'beta_regularizer':\n tf.keras.regularizers.serialize(self.beta_regularizer),\n 'gamma_regularizer':\n tf.keras.regularizers.serialize(self.gamma_regularizer),\n 'beta_constraint':\n tf.keras.constraints.serialize(self.beta_constraint),\n 'gamma_constraint':\n tf.keras.constraints.serialize(self.gamma_constraint)\n }\n base_config = super(GroupNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):\n\n group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]\n group_shape[self.axis] = input_shape[self.axis] // self.groups\n group_shape.insert(1, self.groups)\n group_shape = tf.stack(group_shape)\n reshaped_inputs = tf.reshape(inputs, group_shape)\n return reshaped_inputs, group_shape\n\n def _apply_normalization(self, reshaped_inputs, input_shape):\n\n group_shape = tf.keras.backend.int_shape(reshaped_inputs)\n group_reduction_axes = list(range(len(group_shape)))\n # Remember the ordering of the tensor is [batch, group , steps]. Jump\n # the first 2 to calculate the variance and the mean\n mean, variance = tf.nn.moments(\n reshaped_inputs, group_reduction_axes[2:], keepdims=True)\n\n gamma, beta = self._get_reshaped_weights(input_shape)\n normalized_inputs = tf.nn.batch_normalization(\n reshaped_inputs,\n mean=mean,\n variance=variance,\n scale=gamma,\n offset=beta,\n variance_epsilon=self.epsilon)\n return normalized_inputs\n\n def _get_reshaped_weights(self, input_shape):\n broadcast_shape = self._create_broadcast_shape(input_shape)\n gamma = None\n beta = None\n if self.scale:\n gamma = tf.reshape(self.gamma, broadcast_shape)\n\n if self.center:\n beta = tf.reshape(self.beta, broadcast_shape)\n return gamma, beta\n\n def _check_if_input_shape_is_none(self, input_shape):\n dim = input_shape[self.axis]\n if dim is None:\n raise ValueError('Axis ' + str(self.axis) + ' of '\n 'input tensor should have a defined dimension '\n 'but the layer received an input with shape ' +\n str(input_shape) + '.')\n\n def _set_number_of_groups_for_instance_norm(self, input_shape):\n dim = input_shape[self.axis]\n\n if self.groups == -1:\n self.groups = dim\n\n def _check_size_of_dimensions(self, input_shape):\n\n dim = input_shape[self.axis]\n if dim < self.groups:\n raise ValueError(\n 'Number of groups (' + str(self.groups) + ') cannot be '\n 'more than the number of channels (' + str(dim) + ').')\n\n if dim % self.groups != 0:\n raise ValueError(\n 'Number of groups (' + str(self.groups) + ') must be a '\n 'multiple of the number of channels (' + str(dim) + ').')\n\n def _check_axis(self):\n\n if self.axis == 0:\n raise ValueError(\n \"You are trying to normalize your batch axis. Do you want to \"\n \"use tf.layer.batch_normalization instead\")\n\n def _create_input_spec(self, input_shape):\n\n dim = input_shape[self.axis]\n self.input_spec = tf.keras.layers.InputSpec(\n ndim=len(input_shape), axes={self.axis: dim})\n\n def _add_gamma_weight(self, input_shape):\n\n dim = input_shape[self.axis]\n shape = (dim,)\n\n if self.scale:\n self.gamma = self.add_weight(\n shape=shape,\n name='gamma',\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint)\n else:\n self.gamma = None\n\n def _add_beta_weight(self, input_shape):\n\n dim = input_shape[self.axis]\n shape = (dim,)\n\n if self.center:\n self.beta = self.add_weight(\n shape=shape,\n name='beta',\n initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,\n constraint=self.beta_constraint)\n else:\n self.beta = None\n\n def _create_broadcast_shape(self, input_shape):\n broadcast_shape = [1] * len(input_shape)\n broadcast_shape[self.axis] = input_shape[self.axis] // self.groups\n broadcast_shape.insert(1, self.groups)\n return broadcast_shape\n\n\[email protected]_keras_serializable(package='Addons')\nclass InstanceNormalization(GroupNormalization):\n \"\"\"Instance normalization layer.\n\n Instance Normalization is an specific case of ```GroupNormalization```since\n it normalizes all features of one channel. The Groupsize is equal to the\n channel size. Empirically, its accuracy is more stable than batch norm in a\n wide range of small batch sizes, if learning rate is adjusted linearly\n with batch sizes.\n\n Arguments\n axis: Integer, the axis that should be normalized.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor.\n If False, `beta` is ignored.\n scale: If True, multiply by `gamma`.\n If False, `gamma` is not used.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n beta_constraint: Optional constraint for the beta weight.\n gamma_constraint: Optional constraint for the gamma weight.\n\n Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape\n Same shape as input.\n\n References\n - [Instance Normalization: The Missing Ingredient for Fast Stylization]\n (https://arxiv.org/abs/1607.08022)\n \"\"\"\n\n def __init__(self, **kwargs):\n if \"groups\" in kwargs:\n logging.warning(\"The given value for groups will be overwritten.\")\n\n kwargs[\"groups\"] = -1\n super(InstanceNormalization, self).__init__(**kwargs)\n"
] | [
[
"tensorflow.nn.moments",
"tensorflow.keras.initializers.get",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.nn.batch_normalization",
"tensorflow.keras.constraints.get",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.constraints.serialize",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.regularizers.serialize",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.regularizers.get"
]
] |
mutazag/mdsi | [
"efecc8f650ddf6866154389f98d4ce0a9803db18"
] | [
"misc/learnpy/k-means/loadiris.py"
] | [
"import pandas as pd\nfrom sklearn import datasets\n\n\n# load iris data set\niris = datasets.load_iris()\nprint(iris)\n\nspecies = [iris.target_names[x] for x in iris.target]\n\niris = pd.DataFrame(iris['data'], columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']) \niris['Species'] = species\n\n\niris.head()\niris.dtypes\n\n\n# quick count\niris['count'] = 1\niris[['Species', 'count']].groupby('Species').count()\niris.groupby('Species').count()\n\n\n\n# plot the data set \n# %matplotlib inline\ndef plot_iris(iris, col1, col2):\n print(\"plot_iris\")\n import seaborn as sns\n import matplotlib.pyplot as plt\n sns.lmplot(x = col1, y=col2, \n data = iris, \n hue = \"Species\", \n fit_reg=False)\n plt.xlabel(col1)\n plt.ylabel(col2)\n plt.title('Iris species show by color')\n plt.show() \n\nplot_iris(iris, 'Petal_Width', 'Sepal_Length') \n\nplot_iris(iris, 'Sepal_Width', 'Sepal_Length')\n\n\n# preparing numeric featurs by scaling\n\nfrom sklearn.preprocessing import scale\n\nimport pandas as pd \n\nnum_cols = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']\niris_scaled = scale(iris[num_cols])\niris_scaled = pd.DataFrame(iris_scaled, columns = num_cols)\nprint(iris_scaled.describe().round(3))\n\n# coding string col 'species' as numeric using a dictionary \nlevels = {'setosa':0, \n 'versicolor':1, \n 'virginica':2}\n\n# add coded species to the new scaled iris data frame \niris_scaled['Species'] = [levels[x] for x in iris['Species']]\niris_scaled.head()\nplot_iris(iris_scaled, 'Sepal_Width', 'Sepal_Length')\n\n\n\n## split the data into training and tes using Bernoulli sampling \n\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nnp.random.seed(3456) \niris_split = train_test_split(np.asmatrix(iris_scaled), test_size = 75)\n\niris_train_features = iris_split[0][:,:4]\niris_train_labels = np.ravel(iris_split[0][:,4])\n\niris_test_features = iris_split[1][:,:4]\niris_test_labels = np.ravel(iris_split[1][:,4])\n\nprint(iris_train_features.shape)\nprint(iris_train_labels.shape)\n\nprint(iris_test_features.shape)\nprint(iris_test_labels.shape)\n\n# Train and Eval KNN model \n\n#fit model \nfrom sklearn.neighbors import KNeighborsClassifier\nKNN_mod = KNeighborsClassifier(n_neighbors=3) # this is K \nKNN_mod.fit(iris_train_features, iris_train_labels)\n\n#test model on test data set\niris_test = pd.DataFrame(iris_test_features, columns = num_cols)\niris_test['predicted'] = KNN_mod.predict(iris_test_features)\niris_test['actuals'] = iris_test_labels\niris_test['correct'] = [1 if x == z else 0 for x, z in zip(iris_test['predicted'], iris_test_labels)]\n\n# calculate some accuracy measure \naccuracy = 100 * float(sum(iris_test['correct'])) / float(iris_test.shape[0])\nprint(accuracy)\n\niris_test[iris_test.correct != 1]\niris_test.loc[iris_test[\"correct\"] != 1]\n\n\n\n\n# plotting the predicted values and highliting incorrectly classified observations \n\nlevels = {0:'setosa', 1:'versicolor', 2:'virginica'}\niris_test['Species'] = [levels[x] for x in iris_test['predicted']]\nmarkers = {1:'^', 0:'o'}\ncolors = {'setosa':'blue', 'versicolor':'green', 'virginica':'red'}\ndef plot_shapes(df, col1,col2, markers, colors):\n import matplotlib.pyplot as plt\n import seaborn as sns\n ax = plt.figure(figsize=(6, 6)).gca() # define plot axis\n for m in markers: # iterate over marker dictioary keys\n for c in colors: # iterate over color dictionary keys\n df_temp = df[(df['correct'] == m) & (df['Species'] == c)]\n sns.regplot(x = col1, y = col2, \n data = df_temp, \n fit_reg = False, \n scatter_kws={'color': colors[c]},\n marker = markers[m],\n ax = ax)\n plt.xlabel(col1)\n plt.ylabel(col2)\n plt.title('Iris species by color')\n return 'Done'\nplot_shapes(iris_test, 'Petal_Width', 'Sepal_Length', markers, colors)\nplot_shapes(iris_test, 'Sepal_Width', 'Sepal_Length', markers, colors)"
] | [
[
"matplotlib.pyplot.figure",
"sklearn.preprocessing.scale",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.ravel",
"matplotlib.pyplot.title",
"numpy.asmatrix",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.xlabel",
"sklearn.datasets.load_iris"
]
] |
urialon/bottleneck | [
"481fbb95edc6ae711da40b6305b40c12ce6a6d29"
] | [
"run-gat-2-8.py"
] | [
"import main\nfrom common import Task, STOP, GNN_TYPE\nfrom attrdict import AttrDict\nfrom experiment import Experiment\nimport torch\n\noverride_params = {\n 2: {'batch_size': 64, 'eval_every': 1000},\n 3: {'batch_size': 64},\n 4: {'batch_size': 1024},\n 5: {'batch_size': 1024},\n 6: {'batch_size': 1024},\n 7: {'batch_size': 2048},\n 8: {'batch_size': 1024, 'accum_grad': 2}, # effective batch size of 2048, with less GPU memory\n}\n\n\nclass Results:\n def __init__(self, train_acc, test_acc, epoch):\n self.train_acc = train_acc\n self.test_acc = test_acc\n self.epoch = epoch\n\n\nif __name__ == '__main__':\n\n task = Task.DICTIONARY\n gnn_type = GNN_TYPE.GAT\n stopping_criterion = STOP.TRAIN\n min_depth = 2\n max_depth = 8\n\n results_all_depths = {}\n for depth in range(min_depth, max_depth + 1):\n num_layers = depth + 1\n args = main.get_fake_args(task=task, depth=depth, num_layers=num_layers, loader_workers=7,\n type=gnn_type, stop=stopping_criterion,\n no_activation=True, no_residual=False)\n if depth in override_params:\n for key, value in AttrDict(override_params[depth]).items():\n args[key] = value\n train_acc, test_acc, epoch = Experiment(args).run()\n torch.cuda.empty_cache()\n results_all_depths[depth] = Results(train_acc=train_acc, test_acc=test_acc, epoch=epoch)\n print()\n\n print(f'Task: {task}')\n print('depth, train_acc, test_acc, epoch, train_acc, test_acc, epoch,')\n for depth in range(min_depth, max_depth + 1):\n res = results_all_depths[depth]\n print(f'{depth}, {res.train_acc}, {res.test_acc}, {res.epoch}')\n"
] | [
[
"torch.cuda.empty_cache"
]
] |
savinshynu/turbo_seti | [
"7d756f130af5a323403affcdcb9f9bfa62325836"
] | [
"test/fb_cases_util.py"
] | [
"r'''\nUtility functions for test_fb_cases.py\n'''\n\nfrom os import mkdir, remove\nfrom os.path import dirname\nfrom shutil import rmtree\nimport logging\nimport pandas as pd\nimport numpy as np\nimport setigen as stg\nfrom turbo_seti.find_doppler.find_doppler import FindDoppler\nfrom fb_cases_def import HERE, DEBUGGING, RTOL_DIFF, TestResultRecord, SetigenParms\n\nDF_REFERENCE = HERE + '/fb_dat_reference.txt'\nSEP = r'\\s+'\n\n\ndef initialize(arg_dir):\n r'''\n Recreate working directory, TESTDIR.\n Load result reference tables (2).\n '''\n rmtree(arg_dir, ignore_errors=True)\n mkdir(arg_dir)\n df = pd.read_csv(DF_REFERENCE, sep=SEP, engine='python', comment='#')\n nrows = len(df)\n if nrows < 1:\n raise ValueError('initialize: Empty reference table')\n if nrows % 2 != 0:\n raise ValueError('initialize: Reference table row count ({}) is not divisible by 2'\n .format(nrows))\n if DEBUGGING:\n print('initialize: Test case reference results: \\n', df)\n ref_tophit_1 = []\n ref_tophit_2 = []\n jj = 0\n while jj < nrows:\n record = TestResultRecord()\n record.fdir = int(df['fdir'][jj])\n record.drsign = int(df['drsign'][jj])\n record.tophit_id = int(df['tophit'][jj])\n record.drate = float(df['drate'][jj])\n record.snr = float(df['snr'][jj])\n record.freq = float(df['freq'][jj])\n record.index = int(df['index'][jj])\n ref_tophit_1.append(record)\n if DEBUGGING:\n print('initialize: appended for hit_1:\\n', record.to_string() )\n jj += 1\n del record\n record = TestResultRecord()\n record.fdir = int(df['fdir'][jj])\n record.drsign = int(df['drsign'][jj])\n record.tophit_id = int(df['tophit'][jj])\n record.drate = float(df['drate'][jj])\n record.snr = float(df['snr'][jj])\n record.freq = float(df['freq'][jj])\n record.index = int(df['index'][jj])\n ref_tophit_2.append(record)\n if DEBUGGING:\n print('initialize: appended for hit_2:\\n', record.to_string() )\n jj += 1\n if DEBUGGING:\n print('initialize: {} test cases loaded.'.format(len(ref_tophit_1)))\n return ref_tophit_1, ref_tophit_2\n\n\ndef generate_fil_file(outpath, flag_fascending, flag_sign_drift_rate):\n r'''\n Using setigen, generate a filterbank file.\n\n Parameters:\n outpath - full path of where to store the resultant filterbank file.\n flag_fascending - use an ascending (+1) or descending (-1) sequence of frequencies\n flag_sign_drift_rate - use a positive (+1) or negative (-1) drift rate\n '''\n if DEBUGGING:\n print('generate_fil_file: flag_fascending={}, flag_sign_drift_rate={}'\n .format(flag_fascending, flag_sign_drift_rate))\n\n # Set up setigne parameters\n stg_parms = SetigenParms()\n if flag_sign_drift_rate < 0:\n stg_parms.drift_rate_1 = -stg_parms.drift_rate_1\n stg_parms.drift_rate_2 = -stg_parms.drift_rate_2\n stg_parms.drift_rate_3 = -stg_parms.drift_rate_3\n stg_parms.drift_rate_4 = -stg_parms.drift_rate_4\n stg_parms.drift_rate_5 = -stg_parms.drift_rate_5\n\n # Instantiate a setigen Frame object\n frame = stg.Frame(fchans=stg_parms.fchans,\n tchans=stg_parms.tchans,\n df=stg_parms.df,\n dt=stg_parms.dt,\n fch1=stg_parms.fch1,\n ascending=(flag_fascending > 0))\n\n # Add noise to stg object.\n frame.add_noise(x_mean=0, x_std=stg_parms.noise_std, noise_type='gaussian')\n\n # Signal 1 will be detected.\n signal_1_intensity = frame.get_intensity(snr=stg_parms.snr_1)\n frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_1),\n drift_rate=stg_parms.drift_rate_1,\n level=signal_1_intensity,\n width=stg_parms.width_1,\n f_profile_type='gaussian')\n\n # Signal 2 will be detected.\n signal_2_intensity = frame.get_intensity(snr=stg_parms.snr_2)\n frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_2),\n drift_rate=stg_parms.drift_rate_2,\n level=signal_2_intensity,\n width=stg_parms.width_2,\n f_profile_type='gaussian')\n\n # Signal 3 is a symmetric signal with three Gaussians\n # that will fall below the SNR requirements.\n signal_3_intensity = frame.get_intensity(snr=stg_parms.snr_3)\n frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_3),\n drift_rate=stg_parms.drift_rate_3),\n stg.constant_t_profile(level=1),\n stg.multiple_gaussian_f_profile(width=stg_parms.width_3),\n stg.constant_bp_profile(level=signal_3_intensity))\n\n # Signal 4 is a symmetric signal with three Gaussians\n # that will be drifting too quickly.\n signal_4_intensity = frame.get_intensity(snr=stg_parms.snr_4)\n frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_4),\n drift_rate=stg_parms.drift_rate_4),\n stg.constant_t_profile(level=1),\n stg.multiple_gaussian_f_profile(width=stg_parms.width_4),\n stg.constant_bp_profile(level=signal_4_intensity))\n\n # Signal 5 is similar to signal 4 but drifting in the opposite direction.\n signal_5_intensity = frame.get_intensity(snr=stg_parms.snr_5)\n frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_5),\n drift_rate=stg_parms.drift_rate_5),\n stg.constant_t_profile(level=1),\n stg.multiple_gaussian_f_profile(width=stg_parms.width_5),\n stg.constant_bp_profile(level=signal_5_intensity))\n\n # Save the frame as a filterbank file.\n frame.save_fil(filename=outpath)\n print(\"generate_fil_file: generated {}\".format(outpath))\n del frame\n\n\ndef make_one_dat_file(arg_path_fil, min_drift=0.0, max_drift=4.0, min_snr=25.0, remove_h5=True):\n r'''\n Make a single DAT file:\n * Instantiate the FindDoppler class object.\n * With the object, search the H5, creating the DAT file\n and a LOG file (not used).\n '''\n if max_drift is None:\n raise ValueError('make_one_dat_file: max_drift not set')\n woutdir = dirname(arg_path_fil)\n fdop = FindDoppler(datafile=arg_path_fil,\n min_drift=min_drift,\n max_drift=max_drift,\n snr=min_snr,\n log_level_int=logging.WARNING,\n out_dir=woutdir)\n fdop.search()\n path_h5_file = arg_path_fil.replace('.fil', '.h5')\n if remove_h5:\n remove(path_h5_file)\n\n\ndef get_case_results(arg_path_dat):\n r'''From the DAT file, extract the data for all top hits.'''\n df = pd.read_csv(arg_path_dat, header=None, sep=SEP, engine='python', comment='#')\n nrows = len(df)\n if nrows != 2:\n raise ValueError('get_case_results: Expected 2 rows in DAT but observed {} rows'\n .format(nrows))\n\n obs_tophit_1 = TestResultRecord()\n obs_tophit_1.tophit_id = int(df[0][0]) # 1st col, 1st row\n obs_tophit_1.drate = float(df[1][0])\n obs_tophit_1.snr = float(df[2][0])\n obs_tophit_1.freq = float(df[4][0])\n obs_tophit_1.index = int(df[5][0])\n\n obs_tophit_2 = TestResultRecord()\n obs_tophit_2.tophit_id = int(df[0][1]) # 1st col, 2nd row\n obs_tophit_2.drate = float(df[1][1])\n obs_tophit_2.snr = float(df[2][1])\n obs_tophit_2.freq = float(df[4][1])\n obs_tophit_2.index = int(df[5][1])\n\n return obs_tophit_1, obs_tophit_2\n\n\ndef case_comparison(obs_tophit, ref_tophit, max_drift):\n r'''Compare DAT file observations to the reference.'''\n if obs_tophit is None:\n if ref_tophit is None:\n return # success, both None\n # ref_tophit defined, obs_tophit is None\n raise ValueError('case_comparison: FAILED, max_drift={}\\nobs_tophit is None\\nref_tophit:::{}'\n .format(max_drift, ref_tophit.to_string()))\n if ref_tophit is None: # obs_tophit defined, ref_tophit is None\n raise ValueError('case_comparison: FAILED, max_drift={}\\nref_tophit is None\\nobs_tophit:::{}'\n .format(max_drift, obs_tophit.to_string()))\n\n if obs_tophit.tophit_id == ref_tophit.tophit_id \\\n and np.isclose(obs_tophit.drate, ref_tophit.drate, rtol=RTOL_DIFF) \\\n and np.isclose(obs_tophit.snr, ref_tophit.snr, rtol=RTOL_DIFF) \\\n and np.isclose(obs_tophit.freq, ref_tophit.freq, rtol=RTOL_DIFF) \\\n and obs_tophit.index == ref_tophit.index:\n return # success\n\n # Some field(s) did not compare correctly.\n raise ValueError('case_comparison: FAILED, max_drift={}\\nobs_tophit:::{}\\nref_tophit:::{}'\n .format(max_drift, obs_tophit.to_string(), ref_tophit.to_string()))\n\nif __name__ == '__main__':\n # __main__ is a developer unit test, not normally to be executed.\n from fb_cases_def import TESTDIR, PATH_FIL_FILE, MIN_SNR\n rmtree(TESTDIR, ignore_errors=True)\n mkdir(TESTDIR)\n generate_fil_file(PATH_FIL_FILE, -1, -1)\n make_one_dat_file(PATH_FIL_FILE, max_drift=5, min_snr=MIN_SNR)\n"
] | [
[
"pandas.read_csv",
"numpy.isclose"
]
] |
DebeshJha/tensorflow-1 | [
"2b5a225c49d25273532d11c424d37ce394d7579a"
] | [
"tensorflow/python/ipu/utils.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nGeneral utilities\n~~~~~~~~~~~~~~~~~\n\"\"\"\n\nimport collections\nfrom enum import Enum\nimport os\nimport time\nimport numpy as np\n\nfrom tensorflow.compiler.plugin.poplar.driver.config_pb2 import IpuOptions\nfrom tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent\nfrom tensorflow.compiler.plugin.poplar.driver import config_pb2\nfrom tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops\n# pylint: disable=unused-import\n# These imports are only here to make it easier for the Tensorflow Wheel users\n# to use these functions:\n# ```\n# from tensorflow.python import ipu\n# ...\n# ipu.utils.export_variables_from_live_session(...)\n# ```\nfrom tensorflow.compiler.plugin.poplar.tools.tensorflow_weights_extractor import (\n export_variables_from_live_session, export_variables_from_live_model,\n import_data_in_live_session, import_data_in_live_model)\n# pylint: enable=unused-import\nfrom tensorflow.compat.v1 import executing_eagerly\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.ipu import ipu_infeed_queue\nfrom tensorflow.python.ipu import dataset_extractor\n\n\nclass SelectionOrder(Enum):\n \"\"\"Depending on the communication pattern of the model, the order in\n which the IPUs are selected and mapped to shards can impact the performance.\n\n For example, given a model which executes on multiple IPUs:\n\n .. code-block:: python\n\n def sharded_graph(pa, pb, pc, pd):\n with ipu.scopes.ipu_shard(0):\n o1 = pa + pb\n with ipu.scopes.ipu_shard(1):\n o2 = o1 + pc\n with ipu.scopes.ipu_shard(2):\n o3 = o2 + pd\n return o3\n\n and a typical machine with 8 Graphcore C2 cards:\n\n .. code-block:: none\n\n _______ _______\n | | | |\n | 14 |=============| 15 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 12 |=============| 13 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 10 |=============| 11 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 8 |=============| 9 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 6 |=============| 7 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 4 |=============| 5 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 2 |=============| 3 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 0 |=============| 1 |\n |_______| |_______|\n\n (where each numbered square represents an IPU with the given device ID and the\n == and || connections represent IPUs being directly connected via IPU-Links)\n\n we can see that the `ipu_shard(0)` directly communicates with `ipu_shard(1)`\n and that `ipu_shard(1)` directly communicates with `ipu_shard(2)`.\n If the shards 0, 1, 2 were mapped to IPUs 0, 1, 2 in that order, then the\n communication between shards 1 and 2 would not have a direct connection via an\n IPU-Link and would have to perform a \"hop\" via an IPU.\n If the shards 0, 1, 2 were mapped to IPUs 0, 1, 3 in that order, then the\n communication between shards 1 and 2 would have a direct connection via an\n IPU-Link which will reduce the communication cost.\n\n This Enum class is used to control the order in which the IPUs are selected.\n Currently, the following IPU selection orderings are supported:\n\n * `AUTO`: automatically try and select the best selection given the network.\n * `ZIGZAG`: follow the natural ordering of IPUs. In the above example, the\n IPUs would be selected in the following order:\n `0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15`.\n * `SNAKE`: select IPUs such that each consecutive shard is directly\n connected via IPU-Links to the shard before and after. In the above example,\n the IPUs would be selected in the following order:\n `0, 1, 3, 2, 4, 5, 7, 6, 8, 9, 11, 10, 12, 13, 15, 14`.\n * `HOOF`: select IPUs such that each consecutive shard is directly\n connected via IPU-Links to the shard before and after and the last and first\n shard are on the same C2 cards. In the above example, the IPUs would be\n selected in the following order:\n `0, 2, 4, 6, 8, 10, 12, 14, 15, 13, 11, 9, 7, 5, 3, 1`.\n\n The `SNAKE` and `HOOF` IPU selection orders are particularly beneficial for\n pipelined models.\n \"\"\"\n AUTO = config_pb2.IpuSelectionOrder.Value(\"AUTO\")\n ZIGZAG = config_pb2.IpuSelectionOrder.Value(\"ZIGZAG\")\n SNAKE = config_pb2.IpuSelectionOrder.Value(\"SNAKE\")\n HOOF = config_pb2.IpuSelectionOrder.Value(\"HOOF\")\n\n\nclass ExecutionProfileType(Enum):\n \"\"\"The execution profile type indicates the desired information in the\n execution profile.\n\n * `NO_PROFILE` indicates that there should be no execution profiling.\n * `DEVICE_PROFILE` indicates that the execution profile should contain only\n device wide events.\n * `IPU_PROFILE` indicates that the profile should contain IPU level\n execution events.\n * `TILE_PROFILE` indicates that the profile should contain Tile level\n execution events.\n \"\"\"\n NO_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"NO_PROFILE\")\n DEVICE_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"DEVICE_PROFILE\")\n IPU_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"IPU_PROFILE\")\n TILE_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"TILE_PROFILE\")\n\n\nclass DeviceConnectionType(Enum):\n \"\"\"Enumeration to describe the mechanism used to attach to the Poplar\n device.\n\n * `ALWAYS` indicates that the system will attach when configuring the\n device.\n * `ON_DEMAND` will defer connection to when the IPU is needed.\n * `NEVER` will never try to attach to a device. Used when compiling offline.\n \"\"\"\n ALWAYS = config_pb2.IpuDeviceConnectionType.Value(\"ALWAYS\")\n ON_DEMAND = config_pb2.IpuDeviceConnectionType.Value(\"ON_DEMAND\")\n NEVER = config_pb2.IpuDeviceConnectionType.Value(\"NEVER\")\n\n\ndef configure_ipu_system(config, device=\"cpu\"):\n \"\"\"Configure an IPU system. Passing an IpuOptions protobuf created by the\n ``create_ipu_config`` function.\n\n Args:\n config: An IpuOptions configuration protobuf\n device: The CPU device which is local to the IPU hardware\n\n Returns:\n None\n \"\"\"\n if not isinstance(config, config_pb2.IpuOptions):\n raise Exception(\"`config` must be an IpuOptions instance\")\n\n g = ops.Graph()\n with g.as_default():\n with ops.device(device):\n cfg_op = gen_ipu_ops.ipu_configure_hardware(config.SerializeToString())\n\n with session_lib.Session(graph=g) as sess:\n sess.run(cfg_op)\n\n\ndef get_ipu_config(session=None):\n \"\"\"Get the configuration of an IPU system.\n\n Args:\n session: An optional session on which to execute.\n\n Returns:\n A list of IpuOption instances, one for each PoplarExecutor.\n \"\"\"\n configurations = None\n\n # Get the serialized output.\n if executing_eagerly():\n assert not session, \"No session is required for eager execution.\"\n configurations = gen_ipu_ops.ipu_get_configuration().numpy()\n else:\n s = session if session else session_lib.Session()\n configurations = s.run(gen_ipu_ops.ipu_get_configuration())\n\n # Deserialize and determine if a valid config exists,\n # i.e. user has succesfully called ipu_configure_hardware.\n deserialized = []\n valid = False\n for conf in configurations:\n # Deserialize.\n opt = IpuOptions()\n opt.ParseFromString(conf)\n deserialized.append(opt)\n\n valid |= len(opt.device_config) > 0\n\n if not valid:\n raise RuntimeError(\"No IPU devices configured.\")\n\n return deserialized\n\n\ndef get_num_of_ipus_in_device(ipu_device, device=\"cpu\"):\n \"\"\"Get the number of physical IPUs\n\n Args:\n ipu_device: The IPU device for which to get the number of devices for.\n device: The CPU device which is local to the IPU hardware.\n\n Returns:\n A number of physical IPUs configured for a particular TF device.\n \"\"\"\n\n g = ops.Graph()\n with g.as_default():\n with ops.device(device):\n cfg_op = gen_ipu_ops.ipu_get_num_devices(ipu_device)\n\n with session_lib.Session(graph=g) as sess:\n return sess.run(cfg_op)\n\n\ndef running_on_ipu_model():\n \"\"\" Check if XLA is configured to run on the ipu model.\n\n Returns:\n True if XLA is configured to run on the ipu model.\n False if XLA is configured to run on real hardware.\n \"\"\"\n return \"--use_ipu_model\" in os.environ.get(\"TF_POPLAR_FLAGS\", \"\")\n\n\[email protected]_args(None, \"Use set_optimization_options() instead.\",\n \"max_cross_replica_sum_buffer_size\",\n \"max_inter_ipu_copies_buffer_size\")\ndef create_ipu_config(profiling=False,\n enable_ipu_events=False,\n use_poplar_text_report=False,\n use_poplar_cbor_report=False,\n profile_execution=None,\n enable_poplar_serialized_graph=False,\n report_every_nth_execution=0,\n max_report_size=0x10000000,\n report_directory=\"\",\n scheduler_selection=\"\",\n always_rearrange_copies_on_the_host=False,\n merge_infeed_io_copies=False,\n disable_graph_convolution_caching=False,\n disable_graph_outlining=False,\n retain_control_dependencies=False,\n max_cross_replica_sum_buffer_size=0,\n max_inter_ipu_copies_buffer_size=0,\n max_scheduler_lookahead_depth=5,\n max_scheduler_search_space_size=64,\n prefetch_data_streams=True,\n selection_order=None,\n enable_experimental_remote_buffer_embedding=False):\n \"\"\"Create an empty IPU session configuration structure.\n\n Args:\n profiling: Enable compilation reports, and IPU trace events.\n enable_ipu_events: Enable IPU trace events without poplar reports.\n use_poplar_text_report: Enable the Poplar textual report summary.\n use_poplar_cbor_report: Enable the Poplar CBOR reports.\n profile_execution: Include Poplar execution profiles in the execution\n events. Can only be enabled if `profiling` is also enabled. If set, can be\n `True`, 'False`, or a member of the `ExecutionProfileType` enumeration.\n A `True` value indicates `ExecutionProfileType.DEVICE_PROFILE`.\n enable_poplar_serialized_graph: Create the Poplar serialized graph and\n include in the IPU compilation trace events.\n report_every_nth_execution: Only produce an execution report on every Nth\n execution. 0 = One report only.\n max_report_size: The maximum size of Poplar profiles to include in the\n profile events.\n report_directory: When set, reports will be written to files in this\n directory, instead of being written into the events. The events will\n contain the full paths of the report files.\n scheduler_selection: When set, this forces the compiler to use a specific\n scheduler when ordering the instructions. See the documentation for a\n list of valid schedulers.\n always_rearrange_copies_on_the_host: *** Experimental Flag ***\n The data which is streamed to/from the device might be stored in different\n layouts on the device and on the host. If that is the case the\n rearrangment is performed on the device by default. By enabling this\n option the rearrangment will be performed on the host at the expense of\n latency.\n merge_infeed_io_copies: When true, this flag will merge the streamed\n host->device input copies into one larger copy. This may reduce the time\n to copy data from the host, at the expense of increasing the live tensor\n memory on the device.\n disable_graph_convolution_caching: By default, the convolution operation\n searches for an equivalent cached operation, and uses this instead of\n creating a new convolution. Setting this flag forces the creation of a\n new convolution. This can improve runtime at the expense of graph size.\n disable_graph_outlining: By default, some operations, such as matrix\n multiplications, which occur in the graph multiple times but with\n different input tensors might be optimised to reduce the total code size\n of the graph at the expense of the execution time. Setting this flag will\n disable these optimisations. This option is not valid for the convolution\n operation (also see disable_graph_convolution_caching)\n retain_control_dependencies: Deprecated.\n max_cross_replica_sum_buffer_size: The maximum number of bytes that can be\n waiting before a cross replica sum op is scheduled.\n max_inter_ipu_copies_buffer_size: The maximum number of bytes that can be\n waiting before a inter IPU copy between IPUs is scheduled.\n max_scheduler_lookahead_depth: The maximum distance to look into the future\n when considering valid schedules.\n max_scheduler_search_space_size: The maximum number of nodes to consider\n when building the tree of future schedules.\n prefetch_data_streams: When set to true, the prefetching of data for data\n streams on the host will be overlapped with execution on the IPU.\n selection_order: the order in which IPUs are selected and mapped to physical\n IPU devices when using a multi-IPU devices (see `SelectionOrder`). When\n not specified, then automatic selection order is used, otherwise an\n instance of `SelectionOrder`.\n enable_experimental_remote_buffer_embedding: When set to true,\n `HostEmbedding` will make use of poplar remote buffers.\n\n Returns:\n An IpuOptions configuration protobuf, suitable for passing to\n ``configure_ipu_system``\n \"\"\"\n if profiling and enable_ipu_events:\n raise Exception(\n \"`profiling` and `enable_ipu_events` are mutually exclusive\")\n\n if retain_control_dependencies:\n raise Exception(\"`retain_control_dependencies` is deprecated\")\n\n selection_order = selection_order if selection_order else SelectionOrder.AUTO\n profile_execution = profile_execution if profile_execution \\\n else ExecutionProfileType.NO_PROFILE\n\n if isinstance(profile_execution, (np.bool_, bool)):\n if profile_execution:\n profile_execution = ExecutionProfileType.DEVICE_PROFILE\n else:\n profile_execution = ExecutionProfileType.NO_PROFILE\n\n if (profile_execution != ExecutionProfileType.NO_PROFILE and not profiling):\n raise Exception(\"`profiling` is required when `profile_execution` is set\")\n\n if not isinstance(profile_execution, ExecutionProfileType):\n raise Exception(\"`profile_execution` must be True, False, or an \"\n \"ExecutionProfileType instance\")\n\n opts = config_pb2.IpuOptions()\n\n # Default initialize IpuOptions() attributes here.\n opts.creator_id = config_pb2.IpuOptionsCreator.IPU_UTILS\n opts.ipu_model_config.compile_ipu_code = True\n opts.enable_multi_slice_combiner = False\n opts.enable_matmul_combiner = False\n opts.enable_gather_simplifier = False\n opts.device_connection_type = DeviceConnectionType.ALWAYS.value\n opts.speed_size_config.allow_recompute = False\n\n # Configure IpuOptions according to the passed arguments.\n opts.profiling.enable_ipu_trace_events = profiling or enable_ipu_events\n opts.profiling.enable_compilation_trace = profiling\n opts.profiling.enable_io_trace = profiling\n opts.profiling.execution_trace_type = profile_execution.value\n opts.profiling.enable_poplar_reports_text = use_poplar_text_report\n opts.profiling.enable_poplar_reports_cbor = use_poplar_cbor_report\n opts.profiling.enable_poplar_graph = enable_poplar_serialized_graph\n opts.profiling.report_every_nth_execution = report_every_nth_execution\n opts.profiling.max_report_size = max_report_size\n opts.profiling.report_directory = report_directory\n\n opts.speed_size_config.always_rearrange_copies_on_the_host = \\\n always_rearrange_copies_on_the_host\n opts.speed_size_config.merge_infeed_io_copies = merge_infeed_io_copies\n opts.speed_size_config.disable_graph_convolution_caching = \\\n disable_graph_convolution_caching\n opts.speed_size_config.disable_graph_outlining = \\\n disable_graph_outlining\n opts.speed_size_config.scheduler_selection = scheduler_selection\n\n opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size\n opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size\n\n opts.max_scheduler_lookahead_depth = max_scheduler_lookahead_depth\n opts.max_scheduler_search_space_size = max_scheduler_search_space_size\n\n opts.prefetch_data_streams = prefetch_data_streams\n opts.selection_order = selection_order.value\n\n opts.verified_transfers.enabled = False\n opts = set_verification_options(opts, VerificationOptions())\n\n opts.enable_experimental_remote_buffer_embedding = \\\n enable_experimental_remote_buffer_embedding\n\n return opts\n\n\ndef set_serialization_options(opts, output_folder=\"\"):\n \"\"\" Enable / disable the serialization to disk of the compiled executables.\n\n .. code-block:: python\n\n # Create a device that will save to disk all the compiled executables.\n opts = create_ipu_config()\n opts = set_serialization_options(opts,\n output_folder=\"/tmp/my_network\")\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n output_folder: Where to save the compiled executables.\n Set to \"\" to disable serialization.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.serialization_folder = output_folder\n return opts\n\n\ndef set_optimization_options(opts,\n combine_embedding_lookups=False,\n combine_matmuls=False,\n max_cross_replica_sum_buffer_size=0,\n max_reduce_scatter_buffer_size=0,\n max_inter_ipu_copies_buffer_size=0,\n max_send_recv_cluster_size=0,\n gather_simplifier=False,\n triangular_solve_expander_block_size=0):\n \"\"\"Set the IPU options related to performance / optimizations.\n\n .. code-block:: python\n\n # Create a device with fusion for multiSlices sharing the same input\n # enabled.\n opts = create_ipu_config()\n opts = set_optimization_options(opts,\n combine_embedding_lookups=True)\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n combine_embedding_lookups: Fuse embedding lookups on the same tensor. This\n might improve performance but increase memory usage.\n combine_matmuls: Fuse matmul operations if they share the same weights or\n the same input.\n max_cross_replica_sum_buffer_size: The maximum number of bytes that can be\n waiting before a cross replica sum op is scheduled.\n max_reduce_scatter_buffer_size: The maximum number of bytes that can be\n waiting before a reduce scatter op is scheduled.\n max_inter_ipu_copies_buffer_size: The maximum number of bytes that can be\n waiting before a inter IPU copy between IPUs is scheduled.\n max_send_recv_cluster_size: The maximum number of bytes that can be waiting\n before a cluster of send/recv instructions to/from the host is scheduled.\n These are lowered to stream copies that can be merged by Poplar.\n gather_simplifier: Will enable more aggressive optimisation\n for embedding lookups.\n triangular_solve_expander_block_size: Defines size for triangular solver\n expander blocks. 0 - implementation defined default.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n # Internally embedding lookups are implemented using multiSlice operations.\n opts.enable_multi_slice_combiner = combine_embedding_lookups\n opts.enable_matmul_combiner = combine_matmuls\n opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size\n opts.max_reduce_scatter_buffer_size = max_reduce_scatter_buffer_size\n opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size\n opts.max_send_recv_cluster_size = max_send_recv_cluster_size\n opts.enable_gather_simplifier = gather_simplifier\n opts.triangular_solve_expander_block_size = \\\n triangular_solve_expander_block_size\n\n return opts\n\n\ndef set_norm_options(opts, use_stable_statistics=False):\n \"\"\"Set the IPU options related to norms.\n\n Args:\n use_stable_statistics: If True, computes the mean first and subtracts\n the activations by it before computing the variance. The\n implementation with this flag set to True is slower than when set\n to False.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.use_stable_norm_statistics = use_stable_statistics\n\n return opts\n\n\ndef set_transfer_options(opts, use_verified_transfers=False):\n \"\"\"Set the IPU options related to Poplar data transfers.\n\n Args:\n opts: An IpuOptions session control protobuf.\n use_verified_transfers: If True, use Poplar's verified transfers.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.verified_transfers.enabled = use_verified_transfers\n\n return opts\n\n\nclass KeyId:\n def __init__(self, key=0, start_id=-1):\n self.key = key\n self.start_id = start_id\n\n\nclass VerificationOptions:\n \"\"\"Store pairs of key / id to use for each type of data used in the graph.\n Does nothing unless verified transfers have been enabled by calling\n `set_transfer_options(opts, use_verified_transfers=True)`\n and an instance of this class has been set by calling\n `set_verification_options`:\n\n .. code-block:: python\n\n o = VerificationOptions()\n o.inputs.key = 1\n o.infeeds[\"infeed\"].key = 3\n set_verification_options(opts, o)\n\n \"\"\"\n def __init__(self):\n self.inputs = KeyId()\n self.input_parameters = KeyId()\n self.outputs = KeyId()\n self.output_parameters = KeyId()\n self.infeeds = collections.defaultdict(KeyId)\n self.outfeeds = collections.defaultdict(KeyId)\n self.checkpoint_in = KeyId(0, 0)\n self.checkpoint_out = KeyId(0, 0)\n\n\ndef set_verification_options(opts, verification_options):\n \"\"\"Set the pairs or key / id to use for each type of data used in the graph\n when verified transfers are enabled.\n\n .. code-block:: python\n\n # Create a device which will use verified transfers with different keys.\n opts = create_ipu_config()\n opts = set_transfer_options(opts, use_verified_transfers=True)\n o = VerificationOptions()\n o.input_parameters = KeyId(1)\n o.infeeds[\"training_feed\"] = KeyId(2)\n opts = set_verification_options(opts, o)\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n verification_options: a VerificationOptions object that contains\n the keys / ids to use.\n \"\"\"\n if not isinstance(verification_options, VerificationOptions):\n raise Exception(\n \"`verification_options` must be of type VerificationOptions\")\n\n def _cp_key_and_id(src, dst):\n dst.key = src.key\n dst.start_id = src.start_id\n\n for attr in [\n \"inputs\", \"input_parameters\", \"outputs\", \"output_parameters\",\n \"checkpoint_in\", \"checkpoint_out\"\n ]:\n _cp_key_and_id(getattr(verification_options, attr),\n getattr(opts.verified_transfers, attr))\n\n for name, options in verification_options.infeeds.items():\n _cp_key_and_id(options, opts.verified_transfers.infeeds[name])\n\n for name, options in verification_options.outfeeds.items():\n _cp_key_and_id(options, opts.verified_transfers.outfeeds[name])\n\n return opts\n\n\ndef set_compilation_options(opts, compilation_options=None):\n \"\"\"Set the IPU compilation options for the session.\n\n .. code-block:: python\n\n # Create a device with debug execution profile flag set to \"compute_sets\"\n opts = create_ipu_config()\n opts = set_compilation_options(opts,\n compilation_options={\"debug.instrument\": \"true\",\n \"debug.allowOutOfMemory\": \"true\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n compilation_options: A dictionary of poplar compilation option flags to be\n sent to the executor.\n\n Returns:\n The IpuOptions configuration protobuf, with engine compilation options set.\n \"\"\"\n if compilation_options:\n if not isinstance(compilation_options, dict):\n raise Exception(\"`compilation_options` must be a dictionary\")\n\n for (option_name, value) in compilation_options.items():\n compilation_option = opts.compilation_options.add()\n compilation_option.option = option_name\n compilation_option.value = value\n\n return opts\n\n\ndef set_convolution_options(opts, convolution_options=None):\n \"\"\"Set the IPU convolution options for the session.\n\n .. code-block:: python\n\n # Set \"availableMemoryProportion\" flag to \"0.1\"\n opts = create_ipu_config()\n opts = set_convolution_options(opts,\n convolution_options={\"availableMemoryProportion\": \"0.1\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n convolution_options: A dictionary of poplar option flags for\n convolutions. The \"availableMemoryProportion\" flag indicates the\n proportion of tile memory to be made available as\n temporary memory for convolutions (float between 0 and 1.0).\n Less temporary memory will generally result in a convolution that\n takes more cycles to complete. However, because always live memory\n (such as control code and vertex state) is not tracked when planning it,\n a convolution using less temporary memory may use more memory overall,\n due to an increase of always live memory.\n\n Returns:\n The IpuOptions configuration protobuf, with convolution options set.\n \"\"\"\n if convolution_options:\n if not isinstance(convolution_options, dict):\n raise Exception(\"`convolution_options` must be a dictionary\")\n\n for (option_name, value) in convolution_options.items():\n opt = opts.convolution_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\ndef set_matmul_options(opts, matmul_options=None, clear_pass_type=False):\n \"\"\"Set the IPU matrix multiplication options for the session.\n\n .. code-block:: python\n\n # Set \"availableMemoryProportion\" flag to \"0.5\"\n opts = create_ipu_config()\n opts = set_matmul_options(opts,\n matmul_options={\"availableMemoryProportion\": \"0.5\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n matmul_options: A dictionary containing the poplar option flag\n \"availableMemoryProportion\" for the matrix multiplication operations.\n It indicates the proportion of tile memory to be made available as\n temporary memory for the matrix multiplications (float between 0 and 1.0).\n Less temporary memory will generally result in a multiplication that\n takes more cycles to complete. However, because always live memory\n (like code and vertex state) is not tracked when planning it,\n a multiplication using less temporary memory may use more memory overall,\n due to an increase of always live memory.\n clear_pass_type: When set to True, the Pass type will not\n be set in the options passed to the poplar operation.\n\n Returns:\n The IpuOptions configuration protobuf, with matmul options set.\n \"\"\"\n if matmul_options:\n if not isinstance(matmul_options, dict):\n raise Exception(\"`matmul_options` must be a dictionary\")\n\n for (option_name, value) in matmul_options.items():\n opt = opts.matmul_options.add()\n opt.option = option_name\n opt.value = value\n\n opts.clear_matmul_pass_type = clear_pass_type\n\n return opts\n\n\ndef set_pooling_options(opts, pooling_options=None):\n \"\"\"Set the IPU pooling compilation options for the session.\n\n .. code-block:: python\n\n # Set \"poolUseIntrospectiveMapping\" flag to \"false\"\n opts = create_ipu_config()\n opts = set_pooling_options(opts,\n pooling_options={\"poolUseIntrospectiveMapping\": \"false\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n pooling_options: A dictionary of poplar option flags for the pooling\n operation.\n\n Returns:\n The IpuOptions configuration protobuf, with pooling options set.\n \"\"\"\n if pooling_options:\n if not isinstance(pooling_options, dict):\n raise Exception(\"`pooling_options` must be a dictionary\")\n\n for (option_name, value) in pooling_options.items():\n opt = opts.pooling_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\[email protected]_args(\n None, \"report_options is deprecated, use graph_options and\"\n \" execution_options instead\", \"report_options\")\ndef set_report_options(opts,\n report_options=None,\n graph_options=None,\n execution_options=None):\n \"\"\"Set the options used to influence Poplar graph and execution reports\n generation.\n\n\n .. code-block:: python\n\n opts = create_ipu_config()\n opts = set_report_options(opts,\n report_options={\"reportOption1\": \"false\"},\n graph_options={\"graphOptions\": \"false\"},\n execution_options={\"executionOptions\": \"false\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n report_options: (Deprecated) A dictionary of poplar option flags for\n the report generation.\n graph_options: A dictionary of poplar option flags for the graph report\n generation.\n execution_options: A dictionary of poplar option flags for the execution\n report generation.\n\n Returns:\n The IpuOptions configuration protobuf, with convolution options set.\n \"\"\"\n def use_report_options():\n if report_options:\n if not isinstance(report_options, dict):\n raise Exception(\"`report_options` must be a dictionary\")\n return report_options\n\n if not graph_options:\n graph_options = use_report_options()\n\n if graph_options:\n if not isinstance(graph_options, dict):\n raise Exception(\"`graph_options` must be a dictionary\")\n\n for (option_name, value) in graph_options.items():\n opt = opts.profiling.graph_options.add()\n opt.option = option_name\n opt.value = value\n\n if not execution_options:\n execution_options = use_report_options()\n\n if execution_options:\n if not isinstance(execution_options, dict):\n raise Exception(\"`execution_options` must be a dictionary\")\n\n for (option_name, value) in execution_options.items():\n opt = opts.profiling.execution_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\ndef set_ipu_model_options(opts, compile_ipu_code=True):\n \"\"\"Set the IPU Model options.\n\n Args:\n compile_ipu_code: Whether or not to actually compile real IPU code for\n modelling.\n\n Returns:\n The IpuOptions configuration protobuf, with IPU model options set.\n \"\"\"\n opts.ipu_model_config.compile_ipu_code = compile_ipu_code\n\n return opts\n\n\[email protected]_args(\n None,\n \"Pipelining recomputation will recompute all the non-stateful operations \"\n \"when recomputation is enabled.\",\n \"allow_stateful_recompute\",\n)\ndef set_recomputation_options(opts,\n allow_recompute=True,\n allow_stateful_recompute=None): # pylint: disable=unused-argument\n \"\"\"Set re-computation options.\n\n Args:\n allow_recompute: Whether or not to re-compute instructions during training.\n If this is enabled then we will attempt to pattern match\n instructions/pipeline stages in the forward pass and recompute them in the\n backward pass to avoid having to preserve activations which increase the\n maximum memory liveness. Enabling this option can reduce memory usage at\n the expense of extra computation. Any stateful operations cannot be\n recomputed.\n allow_stateful_recompute: Deprecated.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n\n opts.speed_size_config.allow_recompute = allow_recompute\n\n return opts\n\n\ndef set_floating_point_behaviour_options(opts,\n inv=True,\n div0=True,\n oflo=True,\n esr=True,\n nanoo=True):\n \"\"\"Set the IPU floating point control behaviour bits\n\n See the Poplar API documentation for poplar::FloatingPointBehaviour.\n\n Args:\n inv: If true a floating point invalid operation (defined by IEEE 754)\n will cause an exception.\n div0: If true a floating point divide by zero operation will cause an\n exception.\n oflo: If true a floating point overflow will cause an exception.\n esr: Enable stochastic rounding.\n nanoo: Enable Not-a-Number on overflow mode.\n \"\"\"\n opts.floating_point_behaviour.flags_set = True\n opts.floating_point_behaviour.inv = inv\n opts.floating_point_behaviour.div0 = div0\n opts.floating_point_behaviour.oflo = oflo\n opts.floating_point_behaviour.esr = esr\n opts.floating_point_behaviour.nanoo = nanoo\n\n return opts\n\n\ndef set_gcl_options(opts, num_io_tiles=0, gcl_options=None):\n \"\"\"Set the IPU options for the Graphcore Communication Library.\n\n Args:\n num_io_tiles: Number of tiles to reserve per IPU for the GCL collective\n operations.\n gcl_options: A dictionary with options for configuring the GCL collective\n operations.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.gcl_num_io_tiles = num_io_tiles\n\n if gcl_options:\n if not isinstance(gcl_options, dict):\n raise TypeError(\"`gcl_options` must be a dictionary\")\n\n for (option_name, value) in gcl_options.items():\n opt = opts.gcl_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\ndef auto_select_ipus(opts, num_ipus):\n \"\"\"Configure the IPUs to be used by the session.\n\n The configuration describes a system consisting of multiple Tensorflow\n devices, each with control of one of more IPUs. The devices will be labeled\n ``/device:IPU:0``, ``/device:IPU:1`` and so on.\n\n Each device can control a specific number of IPUs, given by the ``num_ipus``\n parameter. The system will automatically select IPU configurations from the\n available IPUs, where they match the desired number of IPUs.\n\n Examples:\n\n\n .. code-block:: python\n\n # Create a single device, with one IPU\n opts = create_ipu_config()\n opts = auto_select_ipus(opts, num_ipus=1)\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two devices, with 2 IPUs per device.\n opts = create_ipu_config()\n opts = auto_select_ipus(opts, num_ipus=[2,2])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two devices, with 1 IPU in the first device and 2 IPUs\n # in the second device.\n opts = create_ipu_config()\n opts = auto_select_ipus(opts, num_ipus=[1,2])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n num_ipus: List of IPUs per Tensorflow device\n\n Returns:\n The IpuOptions configuration protobuf, configured for auto-selecting a set\n of IPU devices.\n \"\"\"\n if opts.device_config:\n raise Exception(\"IPU devices have already been configured.\")\n\n if not isinstance(num_ipus, (int, list, tuple)):\n raise Exception(\"`num_ipus` must be an integer, list or tuple.\")\n\n if isinstance(num_ipus, int):\n dev = opts.device_config.add()\n dev.auto_count = num_ipus\n else:\n for n in num_ipus:\n dev = opts.device_config.add()\n dev.auto_count = n\n\n return opts\n\n\ndef select_ipus(opts, indices):\n \"\"\"Configure the IPUs to be used by the session.\n\n The configuration describes a system consisting of multiple Tensorflow\n devices, each with control of one of more IPUs. The Tensorflow devices will be\n labeled ``/device:IPU:0``, ``/device:IPU:1`` and so on.\n\n Each Tensorflow device uses a specific configuration consisting of one or more\n IPUs from the list of devices. These can be found by running the Graphcore\n utility ``gc-info -l``. For instance, the following listing shows the device\n configurations available on a system with 16 IPUs.\n\n .. code-block:: shell\n\n user@host:~$ gc-info -l\n Graphcore device listing:\n\n -+- Id: [0], type: [PCIe], PCI Domain: [0000:1a:00.0]\n -+- Id: [1], type: [PCIe], PCI Domain: [0000:1b:00.0]\n -+- Id: [2], type: [PCIe], PCI Domain: [0000:23:00.0]\n -+- Id: [3], type: [PCIe], PCI Domain: [0000:24:00.0]\n -+- Id: [4], type: [PCIe], PCI Domain: [0000:3d:00.0]\n -+- Id: [5], type: [PCIe], PCI Domain: [0000:3e:00.0]\n -+- Id: [6], type: [PCIe], PCI Domain: [0000:43:00.0]\n -+- Id: [7], type: [PCIe], PCI Domain: [0000:44:00.0]\n -+- Id: [8], type: [PCIe], PCI Domain: [0000:8b:00.0]\n -+- Id: [9], type: [PCIe], PCI Domain: [0000:8c:00.0]\n -+- Id: [10], type: [PCIe], PCI Domain: [0000:8e:00.0]\n -+- Id: [11], type: [PCIe], PCI Domain: [0000:8f:00.0]\n -+- Id: [12], type: [PCIe], PCI Domain: [0000:b8:00.0]\n -+- Id: [13], type: [PCIe], PCI Domain: [0000:b9:00.0]\n -+- Id: [14], type: [PCIe], PCI Domain: [0000:ba:00.0]\n -+- Id: [15], type: [PCIe], PCI Domain: [0000:bb:00.0]\n -+- Id: [16], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n -+- Id: [17], type: [Multi IPU]\n |--- PCIe Id: [4], DNC Id: [0], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [1], PCI Domain: [0000:43:00.0]\n -+- Id: [18], type: [Multi IPU]\n |--- PCIe Id: [3], DNC Id: [0], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [1], PCI Domain: [0000:1b:00.0]\n -+- Id: [19], type: [Multi IPU]\n |--- PCIe Id: [2], DNC Id: [0], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [1], PCI Domain: [0000:1a:00.0]\n -+- Id: [20], type: [Multi IPU]\n |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0]\n -+- Id: [21], type: [Multi IPU]\n |--- PCIe Id: [12], DNC Id: [0], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [1], PCI Domain: [0000:ba:00.0]\n -+- Id: [22], type: [Multi IPU]\n |--- PCIe Id: [9], DNC Id: [0], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [1], PCI Domain: [0000:8f:00.0]\n -+- Id: [23], type: [Multi IPU]\n |--- PCIe Id: [10], DNC Id: [0], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [1], PCI Domain: [0000:8b:00.0]\n -+- Id: [24], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0]\n -+- Id: [25], type: [Multi IPU]\n |--- PCIe Id: [3], DNC Id: [0], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [1], PCI Domain: [0000:1b:00.0]\n |--- PCIe Id: [2], DNC Id: [2], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [3], PCI Domain: [0000:1a:00.0]\n -+- Id: [26], type: [Multi IPU]\n |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0]\n |--- PCIe Id: [12], DNC Id: [2], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [3], PCI Domain: [0000:ba:00.0]\n -+- Id: [27], type: [Multi IPU]\n |--- PCIe Id: [9], DNC Id: [0], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [1], PCI Domain: [0000:8f:00.0]\n |--- PCIe Id: [10], DNC Id: [2], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [3], PCI Domain: [0000:8b:00.0]\n -+- Id: [28], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0]\n |--- PCIe Id: [3], DNC Id: [4], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [5], PCI Domain: [0000:1b:00.0]\n |--- PCIe Id: [2], DNC Id: [6], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [7], PCI Domain: [0000:1a:00.0]\n -+- Id: [29], type: [Multi IPU]\n |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0]\n |--- PCIe Id: [12], DNC Id: [2], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [3], PCI Domain: [0000:ba:00.0]\n |--- PCIe Id: [9], DNC Id: [4], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [5], PCI Domain: [0000:8f:00.0]\n |--- PCIe Id: [10], DNC Id: [6], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [7], PCI Domain: [0000:8b:00.0]\n -+- Id: [30], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0]\n |--- PCIe Id: [3], DNC Id: [4], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [5], PCI Domain: [0000:1b:00.0]\n |--- PCIe Id: [2], DNC Id: [6], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [7], PCI Domain: [0000:1a:00.0]\n |--- PCIe Id: [13], DNC Id: [8], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [9], PCI Domain: [0000:bb:00.0]\n |--- PCIe Id: [12], DNC Id: [10], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [11], PCI Domain: [0000:ba:00.0]\n |--- PCIe Id: [9], DNC Id: [12], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [13], PCI Domain: [0000:8f:00.0]\n |--- PCIe Id: [10], DNC Id: [14], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [15], PCI Domain: [0000:8b:00.0]\n\n Examples based on the listing above:\n\n .. code-block:: python\n\n # Create a single device with 1 IPU at PCI address 0000:1a:00.0 by using\n # IPU configuration index 0\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[0])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create a single device with 1 IPU at PCI address 0000:8b:00.0 by using\n # IPU configuration index 8\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[8])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two TensorFlow devices, with one IPU each, being devices at\n # indices 0 and 1\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[0, 1])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two TensorFlow devices, with four IPUs each. The device\n # configurations at indices 24 (0000:3e:00.0, 0000:44:00.0, 0000:3d:00.0,\n # 000:43:00.0) and 25 (0000:24:00.0, 0000:1b:00.0, 0000:23:00.0,\n # 00:1a:00.0)\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[24, 25])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create four TensorFlow devices each with one IPU, at addresses\n # 0000:1a:00.0, 0000:1b:00.0, 0000:23:00.0, 0000:24:00.0.\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[0, 1, 2, 3])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n indices: List of IPU configuration indices.\n Returns:\n The IpuOptions configuration protobuf, with a number of devices selected by\n IPU configuration index.\n \"\"\"\n\n if opts.device_config:\n raise Exception(\"IPU devices have already been configured.\")\n\n if not isinstance(indices, (list, tuple)):\n raise Exception(\"`indices` must be a list or tuple.\")\n\n if len(set(indices)) != len(indices):\n raise Exception(\"All device indeicies in `indices` must be unique.\")\n\n for i in indices:\n dev = opts.device_config.add()\n dev.cfg_index = i\n\n return opts\n\n\ndef set_ipu_connection_type(opts, connection_type=None, ipu_version=None):\n \"\"\" Configure when to attach to the device.\n\n .. code-block:: python\n\n # Compile without attaching to the device.\n opts = create_ipu_config()\n opts = set_ipu_connection_type(opts,\n DeviceConnectionType.ON_DEMAND))\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n connection_type: One of `DeviceConnectionType`.\n Defaults to `DeviceConnectionType.ALWAYS` if None.\n\n ipu_version: Version of the IPU hardware used. Required if the\n `connection_type` provided is `DeviceConnectionType.NEVER`.\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n connection_type = connection_type if connection_type \\\n else DeviceConnectionType.ALWAYS\n\n if connection_type == DeviceConnectionType.NEVER and ipu_version is None:\n raise Exception(\"`ipu_version` must be set when `connection_type` is set \"\n \"to `DeviceConnectionType.NEVER`\")\n opts.device_connection_type = connection_type.value\n\n if ipu_version is not None:\n opts.ipu_version = ipu_version\n opts.has_ipu_version = True\n\n return opts\n\n\ndef reset_ipu_seed(seed, device=\"/device:IPU:0\", cpu_device=\"cpu\"):\n \"\"\"Reset the seed used to generate stateful random numbers and perform\n stochastic rounding.\n\n Args:\n seed: The new random number generator seed.\n device: The device to which the seed will be applied.\n cpu_device: The CPU device which is on the same hardware to the IPU device.\n\n Returns:\n None\n \"\"\"\n g = ops.Graph()\n with g.as_default():\n with ops.device(cpu_device):\n cfg_op = gen_ipu_ops.ipu_reset_seed(device, seed)\n\n with session_lib.Session(graph=g) as sess:\n sess.run(cfg_op)\n\n\ndef extract_all_strings_from_event_trace(events):\n \"\"\"Extract a concatenation of all data strings from an IPU event trace.\n\n Args:\n events: An array of IPU events as returned from the ``ipu_compile_summary``\n operation.\n\n Returns:\n A string containing the concatenation of all of the data fields of the\n events.\n\n \"\"\"\n result = \"\"\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n\n result = result + (\"-\" * 70) + \"\\n=> @ \" + \\\n time.strftime('%F %T %z', time.localtime(evt.timestamp)) + \": \"\n\n if evt.type == IpuTraceEvent.COMPILE_BEGIN:\n evt_str = \"Compile begin: \" + \\\n evt.compile_begin.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.COMPILE_END:\n evt_str = \"Compile end: \" + \\\n evt.compile_end.module_name.decode('utf-8') + \"\\n\" + \\\n \"Duration: \" + str(evt.compile_end.duration) + \" us\\n\" + \\\n evt.compile_end.compilation_report.decode('utf-8')\n elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER:\n evt_str = \"Host->Device\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER:\n evt_str = \"Device->Host\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.LOAD_ENGINE:\n evt_str = \"Load engine: \" + \\\n evt.load_engine.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.EXECUTE:\n evt_str = \"Execute: \" + \\\n evt.execute.module_name.decode('utf-8') + \"\\n\" + \\\n evt.execute.execution_report.decode('utf-8')\n else:\n evt_str = \"Unknown event\"\n\n result = result + evt_str + '\\n'\n\n return result\n\n\ndef extract_all_types_from_event_trace(events):\n \"\"\"Return a list of the types of each event in an event trace tensor\n\n Args:\n events: A tensor containing a list of IPU events as protobuf strings\n\n Returns:\n A list containing the type of each event\n \"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt.type]\n return result\n\n\ndef extract_all_events(events):\n \"\"\"Extract a list containing each event as an event object\n\n Args:\n events: A tensor containing a list of IPU events as protobuf strings\n\n Returns:\n A list containing IpuTraceEvent objects\n \"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt]\n return result\n\n\ndef extract_compile_reports(events):\n \"\"\"Get a list of all compiler reports in the event list.\n\n Args:\n events: A list of trace event serialized protobufs.\n\n Returns:\n A list of tuples containing the module name and report.\"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.COMPILE_END:\n try:\n module = evt.compile_end.module_name.decode('utf-8')\n rep = evt.compile_end.compilation_report.decode('utf-8')\n if rep:\n result += [(module, rep)]\n except UnicodeDecodeError:\n pass\n return result\n\n\ndef extract_poplar_serialized_graphs(events):\n \"\"\"Get a list of all poplar serialized graphs in the event list.\n\n Args:\n events: A list of trace event serialized protobufs.\n\n Returns:\n A list of tuples containing the module name and report.\"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.COMPILE_END:\n try:\n rep = evt.compile_end.poplar_graph.decode('utf-8')\n except UnicodeDecodeError:\n rep = evt.compile_end.poplar_graph\n\n module = evt.compile_end.module_name.decode('utf-8')\n if rep:\n result += [(module, rep)]\n return result\n\n\ndef extract_execute_reports(events):\n \"\"\"Get a list of all compiler reports in the event list.\n\n Args:\n events: A list of trace event serialized protobufs.\n\n Returns:\n A list of tuples containing the module name and report.\"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.EXECUTE:\n try:\n module = evt.execute.module_name.decode('utf-8')\n rep = evt.execute.execution_report.decode('utf-8')\n if rep:\n result += [(module, rep)]\n except UnicodeDecodeError:\n pass\n return result\n\n\ndef move_variable_initialization_to_cpu(graph=None):\n \"\"\"For all variables in the VARIABLES collection, move any initialization\n ops onto the CPU.\n\n Args:\n graph: Operations are moved around on this graph. The default graph will be\n used if not specified.\n\n Returns:\n None\n \"\"\"\n if not graph:\n graph = ops.get_default_graph()\n\n with ops.device(\"/device:CPU:0\"):\n control_flow_ops.no_op(name=\"cpu\")\n variables = []\n for v in graph.get_collection('variables'):\n # We assume a distribution strategy knows better how to\n # initialize its own variables, so skip those.\n if not isinstance(v, values.DistributedVariable):\n variables.append(v)\n\n def _uses_resource(op):\n \"\"\" Helper to determine if an op uses a resource \"\"\"\n return any(input_tensor.dtype == 'resource' for input_tensor in op.inputs)\n\n init_ops = []\n dep_ops = [v.initializer.inputs[1].op for v in variables]\n visited = set()\n\n # Depth-first search up the graph starting from all variables in VARIABLES\n # Place all touched ops on the CPU, but do not touch or search ops that use\n # resource tensors, otherwise device colocation could be violated.\n while dep_ops:\n op = dep_ops.pop()\n if op not in visited and not _uses_resource(op):\n visited.add(op)\n init_ops += [op]\n dep_ops += [x.op for x in op.inputs]\n\n # pylint: disable=protected-access\n for op in init_ops:\n op._set_device('/device:CPU:0')\n op._set_attr(\n '_class',\n attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(\n s=[b'loc:@cpu'])))\n op._set_attr('_XlaCompile', attr_value_pb2.AttrValue(b=False))\n op._set_attr('_XlaScope', attr_value_pb2.AttrValue(s=b''))\n # pylint: enable=protected-access\n\n return\n\n\ndef export_dataset_to_file(dataset_or_infeed,\n output_filename,\n num_elements,\n feed_name=\"\",\n apply_options=True):\n \"\"\"Export as binary `num_elements` from the given `infeed` to the specified\n `output_filename`.\n\n If the infeed elements are tuples then one file per tuple element will be\n created.\n For example, if `dataset` looks like\n\n .. code-block:: python\n\n [{ \"a\": A_0, \"b\": B_0}, { \"a\": A_1, \"b\": B_1}, ...]\n\n then `export_dataset_to_file(dataset, \"my_dataset.bin\", 100)` will generate:\n\n .. code-block:: python\n\n my_dataset.0.bin # Contains tensors [ A_0, A_1, ..., A_99]\n my_dataset.1.bin # Contains tensors [ B_0, B_1, ..., B_99]\n\n Args:\n dataset_or_infeed: An unary dataset with the same input and output\n structure or an `IPUInfeedQueue`.\n output_filename: Where to export the tensors to.\n num_elements: Number of elements to export from the dataset.\n feed_name: Specify the feed name.\n apply_options: Whether to apply optimization options which can improve the\n dataset performance.\n \"\"\"\n assert isinstance(dataset_or_infeed,\n (dataset_ops.Dataset, ipu_infeed_queue.IPUInfeedQueue))\n if isinstance(dataset_or_infeed, ipu_infeed_queue.IPUInfeedQueue):\n dataset = dataset_or_infeed._dataset # pylint: disable=protected-access\n feed_name = feed_name or dataset_or_infeed._id # pylint: disable=protected-access\n else:\n dataset = dataset_or_infeed\n if apply_options:\n dataset = dataset._apply_options() # pylint: disable=protected-access\n\n extractor = dataset_extractor.dataset_extractor(dataset, num_elements,\n output_filename, feed_name)\n with ops.device(\"cpu\"), session_lib.Session() as sess:\n sess.run(extractor)\n\n\ndef export_inputs_to_file(inputs, output_filename, feed_dict):\n \"\"\"Export as binary the list of `inputs` provided to the specified\n `output_filename`.\n\n Args:\n inputs: List of graph inputs to export.\n output_filename: Where to export the tensors to.\n feed_dict: Feed dictionary containing the inputs' values.\n \"\"\"\n\n with ops.device(\"cpu\"), session_lib.Session() as sess:\n sess.run(dataset_extractor.export_variables(inputs, output_filename),\n feed_dict)\n"
] | [
[
"tensorflow.python.ipu.dataset_extractor.dataset_extractor",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuDeviceConnectionType.Value",
"tensorflow.python.ipu.dataset_extractor.export_variables",
"tensorflow.python.framework.ops.Graph",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuOptions",
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.python.client.session.Session",
"tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue",
"tensorflow.python.framework.ops.device",
"tensorflow.compiler.plugin.poplar.driver.trace_pb2.IpuTraceEvent.FromString",
"tensorflow.compiler.plugin.poplar.ops.gen_ipu_ops.ipu_get_configuration",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuExecutionProfileType.Value",
"tensorflow.compiler.plugin.poplar.ops.gen_ipu_ops.ipu_reset_seed",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuSelectionOrder.Value",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.compiler.plugin.poplar.ops.gen_ipu_ops.ipu_get_num_devices",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.control_flow_ops.no_op"
]
] |
Lakonik/EPro-PnP | [
"931df847190ce10eddd1dc3e3168ce1a2f295ffa"
] | [
"EPro-PnP-Det/epropnp_det/core/bbox_3d/misc.py"
] | [
"\"\"\"\nCopyright (C) 2010-2022 Alibaba Group Holding Limited.\nThis file is modified from\nhttps://github.com/tjiiv-cprg/MonoRUn\n\"\"\"\n\nimport math\nimport numpy as np\nimport torch\nfrom pytorch3d.structures.meshes import Meshes\n\nfrom epropnp_det.ops.iou3d.iou3d_utils import nms_gpu\n\n\ndef gen_unit_noc(num_pts, device=None):\n indices = torch.arange(0, num_pts, dtype=torch.float32, device=device) + 0.5\n phi = torch.arccos(1 - 2 * indices / num_pts)\n theta = math.pi * (1 + 5**0.5) * indices\n xyz = torch.stack(\n (torch.cos(theta) * torch.sin(phi),\n torch.sin(theta) * torch.sin(phi),\n torch.cos(phi)), dim=-1)\n return xyz\n\n\ndef project_to_image_r_mat(\n x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200,\n return_z=False, return_clip_mask=False):\n \"\"\"\n Args:\n x3d (torch.Tensor): shape (*, num_points, 3)\n r_mat (torch.Tensor): shape (*, 3, 3)\n t_vec (torch.Tensor): shape (*, 3) in format [x, y, z]\n cam_intrinsic (torch.Tensor): shape (*, 3, 3)\n img_shapes (torch.Tensor): shape (*, 2)\n\n Returns:\n Tensor: x2d_proj, shape (*, num_points, 2)\n \"\"\"\n proj_r_mats = cam_intrinsic @ r_mat # (*, 3, 3)\n proj_t_vecs = cam_intrinsic @ t_vec.unsqueeze(-1) # (*, 3, 1)\n # (*, num_points, 3) = ((*, 3, 3) @ (*, 3, num_points) + (*, 3, 1)).T\n xyz_proj = (proj_r_mats @ x3d.transpose(-1, -2) + proj_t_vecs).transpose(-1, -2)\n z_proj = xyz_proj[..., 2:] # (*, num_points, 1)\n if return_clip_mask:\n z_clip_mask = z_proj < z_min\n z_proj = z_proj.clamp(min=z_min)\n x2d_proj = xyz_proj[..., :2] / z_proj # (*, num_points, 2)\n # clip to border\n x2d_min = -allowed_border - 0.5 # Number\n x2d_max = img_shapes[..., None, [1, 0]] + (allowed_border - 0.5) # (*, 1, 2)\n if return_clip_mask:\n x2d_clip_mask = (x2d_proj < x2d_min) | (x2d_proj > x2d_max)\n clip_mask = z_clip_mask.squeeze(-1) | x2d_clip_mask.any(-1) # (*, num_points)\n x2d_proj = torch.min(x2d_proj.clamp(min=x2d_min), x2d_max)\n if not return_z:\n if not return_clip_mask:\n return x2d_proj\n else:\n return x2d_proj, clip_mask\n else:\n if not return_clip_mask:\n return x2d_proj, z_proj\n else:\n return x2d_proj, z_proj, clip_mask\n\n\ndef project_to_image(\n x3d, pose, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200,\n return_z=False, return_clip_mask=False):\n \"\"\"\n Args:\n x3d (torch.Tensor): shape (*, num_points, 3)\n pose (torch.Tensor): shape (*, 4) in format [x, y, z, yaw]\n cam_intrinsic (torch.Tensor): shape (*, 3, 3)\n img_shapes (torch.Tensor): shape (*, 2)\n\n Returns:\n Tensor: x2d_proj, shape (*, num_points, 2)\n \"\"\"\n r_mat = yaw_to_rot_mat(pose[..., 3])\n t_vec = pose[..., :3]\n return project_to_image_r_mat(x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min,\n allowed_border, return_z, return_clip_mask)\n\n\ndef yaw_to_rot_mat(yaw):\n \"\"\"\n Args:\n yaw: (*)\n\n Returns:\n rot_mats: (*, 3, 3)\n \"\"\"\n if isinstance(yaw, torch.Tensor):\n pkg = torch\n device_kwarg = dict(device=yaw.device)\n else:\n pkg = np\n device_kwarg = dict()\n sin_yaw = pkg.sin(yaw)\n cos_yaw = pkg.cos(yaw)\n # [[ cos_yaw, 0, sin_yaw],\n # [ 0, 1, 0],\n # [-sin_yaw, 0, cos_yaw]]\n rot_mats = pkg.zeros(yaw.shape + (3, 3), dtype=pkg.float32, **device_kwarg)\n rot_mats[..., 0, 0] = cos_yaw\n rot_mats[..., 2, 2] = cos_yaw\n rot_mats[..., 0, 2] = sin_yaw\n rot_mats[..., 2, 0] = -sin_yaw\n rot_mats[..., 1, 1] = 1\n return rot_mats\n\n\ndef rot_mat_to_yaw(rot_mat):\n \"\"\"\n Args:\n rot_mat: (*, 3, 3)\n\n Returns:\n yaw: (*)\n \"\"\"\n if isinstance(rot_mat, torch.Tensor):\n atan2 = torch.atan2\n else:\n atan2 = np.arctan2\n yaw = atan2(rot_mat[..., 0, 2] - rot_mat[..., 2, 0], rot_mat[..., 0, 0] + rot_mat[..., 2, 2])\n return yaw\n\n\ndef box_mesh():\n return Meshes(\n verts=[torch.tensor([[-1, -1, 1],\n [ 1, -1, 1],\n [-1, 1, 1],\n [ 1, 1, 1],\n [-1, -1, -1],\n [ 1, -1, -1],\n [-1, 1, -1],\n [ 1, 1, -1]], dtype=torch.float32)],\n faces=[torch.tensor([[0, 1, 2],\n [1, 3, 2],\n [2, 3, 7],\n [2, 7, 6],\n [1, 7, 3],\n [1, 5, 7],\n [6, 7, 4],\n [7, 5, 4],\n [0, 4, 1],\n [1, 4, 5],\n [2, 6, 4],\n [0, 2, 4]], dtype=torch.int)])\n\n\ndef compute_box_3d(bbox_3d):\n \"\"\"\n Args:\n bbox_3d: (*, 7)\n\n Returns:\n corners: (*, 8, 3)\n edge_corner_idx: (12, 2)\n \"\"\"\n bs = bbox_3d.shape[:-1]\n rotation_matrix = yaw_to_rot_mat(bbox_3d[..., 6]) # (*bs, 3, 3)\n edge_corner_idx = np.array([[0, 1],\n [1, 2],\n [2, 3],\n [3, 0],\n [4, 5],\n [5, 6],\n [6, 7],\n [7, 4],\n [0, 4],\n [1, 5],\n [2, 6],\n [3, 7]])\n corners = np.array([[ 0.5, 0.5, 0.5],\n [ 0.5, 0.5, -0.5],\n [-0.5, 0.5, -0.5],\n [-0.5, 0.5, 0.5],\n [ 0.5, -0.5, 0.5],\n [ 0.5, -0.5, -0.5],\n [-0.5, -0.5, -0.5],\n [-0.5, -0.5, 0.5]], dtype=np.float32)\n if isinstance(bbox_3d, torch.Tensor):\n edge_corner_idx = torch.from_numpy(edge_corner_idx).to(device=bbox_3d.device)\n corners = torch.from_numpy(corners).to(device=bbox_3d.device)\n corners = corners * bbox_3d[..., None, :3] # (*bs, 8, 3)\n corners = (rotation_matrix[..., None, :, :] @ corners[..., None]).reshape(*bs, 8, 3) \\\n + bbox_3d[..., None, 3:6]\n return corners, edge_corner_idx\n\n\ndef edge_intersection(corners, edge_corner_idx, clip_axis, clip_val, op, edge_valid_mask=None):\n \"\"\"\n Args:\n corners: (bs, 8, 3/2)\n edge_corner_idx: (12, 2)\n clip_val: (bs, )\n edge_valid_mask: (bs, 12)\n \"\"\"\n if op == 'greater':\n op = torch.greater\n elif op == 'less':\n op = torch.less\n if edge_valid_mask is None:\n edge_valid_mask = corners.new_ones(\n (corners.size(0), edge_corner_idx.size(0)), dtype=torch.bool)\n corners_inside = op(corners[..., clip_axis], clip_val[:, None]) # (bs, 8)\n # compute z intersection\n edges_0_inside = corners_inside[:, edge_corner_idx[:, 0]] # (bs, 12)\n edges_1_inside = corners_inside[:, edge_corner_idx[:, 1]] # (bs, 12)\n edges_clipped = (edges_0_inside ^ edges_1_inside) & edge_valid_mask # (bs, 12)\n edges_clipped_idx = edges_clipped.nonzero() # (num_nonzero, 2) in [bs_ind, edge_ind]\n if edges_clipped_idx.shape[0] > 0:\n edge_corner_idx_to_clip = edge_corner_idx[edges_clipped_idx[:, 1], :] # (num_nonzero, 2)\n edges_0 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 0], :] # (num_nonzero, 3)\n edges_1 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 1], :] # (num_nonzero, 3)\n axval0 = edges_0[:, clip_axis] # (num_nonzero, )\n axval1 = edges_1[:, clip_axis]\n clip_val_ = clip_val[edges_clipped_idx[:, 0]]\n weight_0 = axval1 - clip_val_ # (num_nonzero, )\n weight_1 = clip_val_ - axval0\n intersection = (edges_0 * weight_0[:, None] + edges_1 * weight_1[:, None]\n ) * (1 / (axval1 - axval0)).clamp(min=-1e6, max=1e6)[:, None] # (num_nonzero, 3)\n clip_idx = torch.where(op(axval0, clip_val_),\n edge_corner_idx_to_clip[:, 1],\n edge_corner_idx_to_clip[:, 0]) # (num_nonzero, )\n corners[edges_clipped_idx[:, 0], clip_idx, :] = intersection # replace clipped corners with intersection\n corners_inside[edges_clipped_idx[:, 0], clip_idx] = True\n edge_valid_mask &= corners_inside[:, edge_corner_idx[:, 0]] & corners_inside[:, edge_corner_idx[:, 1]]\n else:\n edge_valid_mask &= edges_0_inside & edges_1_inside\n return corners, corners_inside, edge_valid_mask\n\n\ndef bboxes_3d_to_2d(bbox_3d, cam_intrinsic, imsize, z_clip=0.1, min_size=4.0, clip=False):\n \"\"\"\n Args:\n bbox_3d: (bs, 7)\n cam_intrinsic: (bs, 3, 3)\n imsize: (bs, 2) in [h, w]\n \"\"\"\n assert bbox_3d.dim() == 2\n bs = bbox_3d.size(0)\n if bs > 0:\n # (bs, 8, 3), (12, 2)\n corners, edge_corner_idx = compute_box_3d(bbox_3d)\n corners, in_front, edge_valid_mask = edge_intersection(\n corners, edge_corner_idx, 2, corners.new_tensor([z_clip]).expand(bs), 'greater')\n pts_2d = corners @ cam_intrinsic.transpose(-1, -2)\n pts_2d = pts_2d[..., :2] / pts_2d[..., 2:].clamp(min=z_clip) + 0.5 # (bs, 8, 2)\n in_canvas = in_front\n if clip:\n pts_2d, in_canvas_x0, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 0, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask)\n pts_2d, in_canvas_y0, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 1, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask)\n pts_2d, in_canvas_x1, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 0, imsize[:, 1], 'less', edge_valid_mask)\n pts_2d, in_canvas_y1, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 1, imsize[:, 0], 'less', edge_valid_mask)\n in_canvas = in_canvas & in_canvas_x0 & in_canvas_x1 & in_canvas_y0 & in_canvas_y1 # (bs, 8)\n not_in_canvas = ~in_canvas\n pts_2d[not_in_canvas] = imsize[:, None, [1, 0]].expand(-1, 8, -1)[not_in_canvas]\n x0y0 = pts_2d.min(dim=1)[0].clamp(min=0) # (bs, 2)\n pts_2d[not_in_canvas] = 0\n x1y1 = torch.minimum(pts_2d.max(dim=1)[0], imsize[:, [1, 0]])\n bbox = torch.cat((x0y0, x1y1), dim=1) # (bs, 4)\n bbox_valid_mask = (x1y1 - x0y0).min(dim=1)[0] >= min_size # (bs, )\n else:\n bbox = bbox_3d.new_empty((0, 4))\n bbox_valid_mask = bbox_3d.new_empty((0, ), dtype=torch.bool)\n return bbox, bbox_valid_mask\n\n\ndef xywhr2xyxyr(boxes_xywhr):\n \"\"\"Convert a rotated boxes in XYWHR format to XYXYR format.\n\n Args:\n boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.\n\n Returns:\n torch.Tensor: Converted boxes in XYXYR format.\n \"\"\"\n boxes = torch.zeros_like(boxes_xywhr)\n half_w = boxes_xywhr[:, 2] / 2 # l in bbox_3d\n half_h = boxes_xywhr[:, 3] / 2 # w in bbox_3d\n # x in cam coord\n boxes[:, 0] = boxes_xywhr[:, 0] - half_w\n # z in cam coord, mirrored_direction\n boxes[:, 1] = boxes_xywhr[:, 1] - half_h\n boxes[:, 2] = boxes_xywhr[:, 0] + half_w\n boxes[:, 3] = boxes_xywhr[:, 1] + half_h\n boxes[:, 4] = boxes_xywhr[:, 4]\n return boxes\n\n\ndef batched_bev_nms(bbox_3d, batch_inds, nms_thr=0.25):\n \"\"\"\n Args:\n bbox_3d (Tensor): tensor shape (N, 8+),\n in format [l, h, w, x, y, z, ry, score, ind, *]\n batch_inds (Tensor): tensor shape (N, )\n nms_thr (float)\n\n Returns:\n Tuple:\n bbox_3d_out (Tensor)\n keep_inds (Tensor)\n \"\"\"\n n = bbox_3d.size(0)\n if n > 1:\n boxes_for_nms = xywhr2xyxyr(\n bbox_3d[:, [3, 5, 0, 2, 6]])\n offset_unit = (boxes_for_nms[:, :4].max() - boxes_for_nms[:, :4].min()) * 2\n boxes_for_nms[:, :4] = boxes_for_nms[:, :4] + (offset_unit * batch_inds)[:, None]\n keep_inds = nms_gpu(\n boxes_for_nms, bbox_3d[:, 7], nms_thr)\n else:\n keep_inds = bbox_3d.new_zeros(0, dtype=torch.int64)\n bbox_3d_out = bbox_3d[keep_inds]\n return bbox_3d_out, keep_inds\n"
] | [
[
"torch.cos",
"torch.zeros_like",
"torch.tensor",
"torch.sin",
"torch.arange",
"torch.from_numpy",
"torch.arccos",
"numpy.array",
"torch.cat"
]
] |
mengjian0502/GroupLasso_Quant | [
"1c54c940739babf86e362ffc57752c2aa4c8986d"
] | [
"models/resnet_cifar_quant.py"
] | [
"\"\"\"\nResNet on CIFAR10\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom .quant import ClippedReLU, int_conv2d, int_linear\nfrom .mpdr_score import get_mpdr_score\nimport math\n\nclass DownsampleA(nn.Module):\n\n def __init__(self, nIn, nOut, stride):\n super(DownsampleA, self).__init__()\n assert stride == 2\n self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)\n\n def forward(self, x):\n x = self.avg(x)\n return torch.cat((x, x.mul(0)), 1)\n\n\nclass ResNetBasicblock(nn.Module):\n expansion = 1\n \"\"\"\n RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)\n \"\"\"\n def __init__(self, inplanes, planes, stride=1, downsample=None, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):\n super(ResNetBasicblock, self).__init__() \n # self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) # quantization\n self.conv_a = int_conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization\n self.bn_a = nn.BatchNorm2d(planes)\n self.relu1 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits\n # self.relu1 = nn.ReLU(inplace=True)\n\n self.conv_b = int_conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization\n # self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # quantization\n self.bn_b = nn.BatchNorm2d(planes)\n self.relu2 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits\n self.downsample = downsample\n\n def forward(self, x):\n residual = x\n\n basicblock = self.conv_a(x)\n basicblock = self.bn_a(basicblock)\n basicblock = self.relu1(basicblock)\n\n basicblock = self.conv_b(basicblock)\n basicblock = self.bn_b(basicblock)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n \n return self.relu2(residual + basicblock)\n\n\nclass CifarResNet(nn.Module):\n \"\"\"\n ResNet optimized for the Cifar dataset, as specified in\n https://arxiv.org/abs/1512.03385.pdf\n \"\"\"\n def __init__(self, depth, num_classes, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):\n \"\"\" Constructor\n Args:\n depth: number of layers.\n num_classes: number of classes\n base_width: base width\n \"\"\"\n super(CifarResNet, self).__init__()\n\n block = ResNetBasicblock\n \n\n #Model type specifies number of layers for CIFAR-10 and CIFAR-100 model\n assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'\n layer_blocks = (depth - 2) // 6\n print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))\n self.num_classes = num_classes\n self.ch_group = ch_group\n # self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.conv_1_3x3 = int_conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the first conv layer\n self.relu0 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True)\n self.bn_1 = nn.BatchNorm2d(16)\n\n self.inplanes = 16\n self.stage_1 = self._make_layer(block, 16, layer_blocks, 1, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)\n self.stage_2 = self._make_layer(block, 32, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)\n self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)\n self.avgpool = nn.AvgPool2d(8)\n self.classifier = int_linear(64*block.expansion, num_classes, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the last fc layer\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n #m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n init.kaiming_normal_(m.weight)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n int_conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv_1_3x3(x)\n x = self.relu0(self.bn_1(x))\n x = self.stage_1(x)\n x = self.stage_2(x)\n x = self.stage_3(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n return self.classifier(x)\n\n def get_group_val(self):\n val = torch.Tensor()\n\n if torch.cuda.is_available():\n val = val.cuda()\n\n count = 0\n for m in self.modules():\n if isinstance(m, int_conv2d):\n kw = m.weight.size(2)\n if kw != 1:\n if not count in [0]:\n w_l = m.weight\n num_group = w_l.size(0) * w_l.size(1) // self.ch_group\n w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)\n w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))\n \n g = w_l.pow(2).sum(dim=1).pow(1/2)\n val = torch.cat((val.view(-1), g.view(-1)))\n count += 1\n return val\n\n def get_global_thre(self, ratio):\n grp_val = self.get_group_val()\n # grp_mean = grp_val.mean()\n\n # threshold = ratio * grp_mean\n sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))\n thre_index = int(grp_val.data.numel() * ratio)\n threshold = sorted_block_values[thre_index]\n return threshold\n\n def get_group_mp(self):\n val = torch.Tensor()\n\n if torch.cuda.is_available():\n val = val.cuda()\n\n count = 0\n for m in self.modules():\n if isinstance(m, int_conv2d):\n kw = m.weight.size(2)\n if kw != 1:\n if not count in [0]:\n w_l = m.weight\n num_group = w_l.size(0) * w_l.size(1) // self.ch_group\n w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)\n w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))\n\n g = w_l.abs().mean(dim=1)\n val = torch.cat((val.view(-1), g.view(-1)))\n count += 1\n return val\n\n def get_global_mp_thre(self, ratio):\n grp_val = self.get_group_mp()\n sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))\n thre_index = int(grp_val.data.numel() * ratio)\n threshold = sorted_block_values[thre_index]\n return threshold\n\n def get_group_mpdr(self):\n val = torch.Tensor()\n\n if torch.cuda.is_available():\n val = val.cuda()\n\n count = 0\n for m in self.modules():\n if isinstance(m, int_conv2d):\n kw = m.weight.size(2)\n if kw != 1:\n if not count in [0]:\n w_l = get_mpdr_score(m.weight)\n\n num_group = w_l.size(0) * w_l.size(1) // self.ch_group\n w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)\n w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))\n\n g = w_l.mean(dim=1) # compute the mean of the mpdr score\n val = torch.cat((val.view(-1), g.view(-1)))\n count += 1\n return val\n\n def get_global_mpdr_thre(self, ratio):\n grp_val = self.get_group_mpdr()\n sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))\n thre_index = int(grp_val.data.numel() * ratio)\n threshold = sorted_block_values[thre_index]\n return threshold\n\nclass resnet20_quant:\n base=CifarResNet\n args = list()\n kwargs = {'depth': 20}\n\nclass resnet32_quant:\n base=CifarResNet\n args = list()\n kwargs = {'depth': 32}\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.cuda.is_available",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.Tensor"
]
] |
clabrugere/numpy-basics | [
"81efb4b8ac58fc17dc8f6c676004bbc3a99a92c3"
] | [
"models/utils.py"
] | [
"import numpy as np\n\n\ndef confusion_matrix(y_true, y_hat, threshold=.5):\n \n def _to_class(y):\n return np.array([1 if i >= threshold else 0 for i in y])\n \n n_classes = len(np.unique(y_true))\n cm = np.zeros((n_classes, n_classes))\n y_hat = _to_class(y_hat)\n \n for a, p in zip(y_true, y_hat):\n cm[a, p] += 1\n \n return cm\n\ndef f1_score(cm):\n precision = cm[0, 0] / cm[0, :].sum()\n recall = cm[0, 0] / cm[:, 0].sum()\n return 2 * (precision * recall) / (precision + recall)"
] | [
[
"numpy.array",
"numpy.unique",
"numpy.zeros"
]
] |
sergevkim/sonata | [
"2250b60174628ee76fb7d54bf50e4b8b07b505d5"
] | [
"sonata/datamodules/base_datamodule.py"
] | [
"from abc import ABC, abstractmethod\nfrom pathlib import Path\n\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass BaseDataModule(ABC):\n def __init__(\n self,\n data_path: Path,\n batch_size: int,\n num_workers: int,\n ):\n super().__init__()\n self.data_path = data_path\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n @staticmethod\n def prepare_data(\n data_path: Path,\n ):\n pass\n\n @abstractmethod\n def setup(\n self,\n val_ratio: float,\n ) -> None:\n pass\n\n def train_dataloader(self) -> DataLoader:\n train_dataloader = DataLoader(\n dataset=self.train_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n )\n\n return train_dataloader\n\n def val_dataloader(self) -> DataLoader:\n val_dataloader = DataLoader(\n dataset=self.val_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n )\n\n return val_dataloader\n\n def test_dataloader(self):\n pass\n\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
gpescia/MyNetKet | [
"958510966a5870d9d491de0628903cf1fc210921"
] | [
"netket/operator/boson.py"
] | [
"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom netket.utils.types import DType\n\nfrom netket.hilbert import AbstractHilbert\n\nfrom ._local_operator import LocalOperator as _LocalOperator\n\n\ndef destroy(\n hilbert: AbstractHilbert, site: int, dtype: DType = float\n) -> _LocalOperator:\n \"\"\"\n Builds the boson destruction operator :math:`\\\\hat{a}` acting on the `site`-th of the\n Hilbert space `hilbert`.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n\n Returns:\n The resulting Local Operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n D = np.array([np.sqrt(m) for m in np.arange(1, N)])\n mat = np.diag(D, 1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef create(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the boson creation operator :math:`\\\\hat{a}^\\\\dagger` acting on the `site`-th of the\n Hilbert space `hilbert`.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n\n Returns:\n The resulting Local Operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n D = np.array([np.sqrt(m) for m in np.arange(1, N)])\n mat = np.diag(D, -1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef number(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the number operator :math:`\\\\hat{a}^\\\\dagger\\\\hat{a}` acting on the `site`-th of the\n Hilbert space `hilbert`.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n\n Returns:\n The resulting Local Operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n D = np.array([m for m in np.arange(0, N)])\n mat = np.diag(D, 0)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef proj(\n hilbert: AbstractHilbert, site: int, n: int, dtype: DType = float\n) -> _LocalOperator:\n \"\"\"\n Builds the projector operator :math:`|n\\\\rangle\\\\langle n |` acting on the `site`-th of the\n Hilbert space `hilbert` and collapsing on the state with `n` bosons.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n n: the state on which to project\n\n Returns:\n the resulting operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n if n >= N:\n raise ValueError(\"Cannot project on a state above the cutoff.\")\n\n D = np.array([0 for m in np.arange(0, N)])\n D[n] = 1\n mat = np.diag(D, 0)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\n# clean up the module\ndel AbstractHilbert, DType\n"
] | [
[
"numpy.sqrt",
"numpy.arange",
"numpy.diag"
]
] |
kingoflolz/DALL-E | [
"d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7"
] | [
"examples/pure_jax.py"
] | [
"import io\n\nimport jax\nimport requests\nimport PIL\nfrom PIL import ImageOps\n\nimport numpy as np\nimport jax.numpy as jnp\n\nfrom dall_e_jax import get_encoder, get_decoder, map_pixels, unmap_pixels\n\ntarget_image_size = 256\n\n\ndef download_image(url):\n resp = requests.get(url)\n resp.raise_for_status()\n return PIL.Image.open(io.BytesIO(resp.content))\n\n\ndef preprocess(img):\n img = ImageOps.fit(img, [target_image_size,] * 2, method=0, bleed=0.0, centering=(0.5, 0.5))\n\n img = np.expand_dims(np.transpose(np.array(img).astype(np.float32)/255, (2, 0, 1)), 0)\n return map_pixels(img)\n\n\njax_enc_fn, jax_enc_params = get_encoder(\"encoder.pkl\")\njax_dec_fn, jax_dec_params = get_decoder(\"decoder.pkl\")\n\nx = preprocess(download_image('https://assets.bwbx.io/images/users/iqjWHBFdfxIU/iKIWgaiJUtss/v2/1000x-1.jpg'))\n\nz_logits = jax_enc_fn(jax_enc_params, x)\n\nz = jnp.argmax(z_logits, axis=1)\nz = jnp.transpose(jax.nn.one_hot(z, num_classes=8192), (0, 3, 1, 2))\n\nx_stats = jax_dec_fn(jax_dec_params, z)\n\nx_rec = unmap_pixels(jax.nn.sigmoid(x_stats[:, :3]))\nx_rec = np.transpose((np.array(x_rec[0]) * 255).astype(np.uint8), (1, 2, 0))\n\nPIL.Image.fromarray(x_rec).save('reconstructed.png')\n"
] | [
[
"numpy.array"
]
] |
williamsashbee/Confident_classifier | [
"cba3ef862b310afc3af6c4a62b524f032f45549e"
] | [
"src/run_joint_confidence_cdcOriginalGan.py"
] | [
"##############################################\n# This code is based on samples from pytorch #\n##############################################\n# Writer: Kimin Lee \n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport data_loader\nimport numpy as np\nimport torchvision.utils as vutils\nimport models\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"5\"\n\n# Training settings\nparser = argparse.ArgumentParser(description='Training code - joint confidence')\nparser.add_argument('--batch-size', type=int, default=128, help='input batch size for training')\nparser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate')\nparser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, help='random seed')\nparser.add_argument('--log-interval', type=int, default=100,\n help='how many batches to wait before logging training status')\nparser.add_argument('--dataset', default='mnist', help='cifar10 | svhn')\nparser.add_argument('--dataroot', required=True, help='path to dataset')\nparser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--wd', type=float, default=0.0, help='weight decay')\nparser.add_argument('--droprate', type=float, default=0.1, help='learning rate decay')\nparser.add_argument('--decreasing_lr', default='60', help='decreasing strategy')\nparser.add_argument('--num_classes', type=int, default=10, help='the # of classes')\nparser.add_argument('--beta', type=float, default=1, help='penalty parameter for KL term')\n\nargs = parser.parse_args()\n\nif args.dataset == 'cifar10':\n args.beta = 0.1\n args.batch_size = 64\n\nprint(args)\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nprint(\"Random Seed: \", args.seed)\ntorch.manual_seed(args.seed)\n\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\nprint('load data: ', args.dataset)\nif args.dataset=='mnist':\n transform = transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.repeat(3, 1, 1)),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True, transform=transform),\n batch_size=128, shuffle=True)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=False, download=True, transform=transform),\n batch_size=128, shuffle=True)\nelse:\n train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot)\n\n\ntransform = transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.repeat(3, 1, 1)),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\n\ntrain_loader_mnist = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True, transform=transform),\n batch_size=128, shuffle=True)\n\nprint('Load model')\nmodel = models.vgg13()\nprint(model)\n\nprint('load GAN')\nnz = 100\nG = models.cdcOriginalGenerator(1, nz, 64, 3) # ngpu, nz, ngf, nc\nD = models.cdcOriginalDiscriminator(1, 3, 64) # ngpu, nc, ndf\nG.weight_init(mean=0.0, std=0.02)\nD.weight_init(mean=0.0, std=0.02)\n\n# Initial setup for GAN\nreal_label = 1\nfake_label = 0\ncriterion = nn.BCELoss()\nnz = 100\n\n\nprint('Setup optimizer')\nlr = 0.0002\nbatch_size = 128\noptimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\nG_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))\nD_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))\n\ndecreasing_lr = list(map(int, args.decreasing_lr.split(',')))\n\nonehot = torch.zeros(10, 10).cuda()\nonehot = onehot.scatter_(1, torch.cuda.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1), 1).view(10, 10, 1, 1)\nimg_size = 32\nnum_labels = 10\nfraction = 1\nfill = torch.zeros([num_labels, num_labels, img_size / fraction, img_size / fraction]).cuda()\nfor i in range(num_labels):\n fill[i, i, :, :] = 1\nfill = fill.cuda()\n# os.environ[\"CUDA_LAUNCH_BLOCKING\"]=\"1\"\n\n# Binary Cross Entropy loss\nBCE_loss = nn.BCELoss()\n# fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)\nfixed_noise = torch.randn((64, 100)).view(-1, 100, 1, 1)\nfixed_label = None\n\nif args.cuda:\n model.cuda()\n D.cuda()\n G.cuda()\n criterion.cuda()\n fixed_noise = fixed_noise.cuda()\n\nfirst = True\ndef train(epoch):\n model.train()\n # D_train_loss = 0\n # G_train_loss = 3\n trg = 0\n trd = 0\n i = 0\n\n for batch_idx, (data, y_labels) in enumerate(train_loader):\n uniform_dist = torch.Tensor(data.size(0), args.num_classes).fill_((1. / args.num_classes)).cuda()\n x_ = data.cuda()\n assert x_[0, :, :, :].shape == (3, 32, 32)\n global first\n if first:\n global fixed_noise\n global fixed_label\n\n first = False\n fixed_label = onehot[y_labels.squeeze()[:64]]\n print(\"saving fixed_label!\")\n vutils.save_image(data[:64],\n '{}/{}jointConfidencerealReference{}.png'.format(args.outf, args.dataset, epoch),\n normalize=True)\n\n # train discriminator D\n D.zero_grad()\n y_ = y_labels\n mini_batch = x_.size()[0]\n\n y_real_ = torch.ones(mini_batch)\n y_fake_ = torch.zeros(mini_batch)\n y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda())\n\n y_fill_ = fill[y_.squeeze().tolist()]\n # y_fill_ = fill[y_]\n\n assert y_fill_[0, y_.squeeze().tolist()[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch\n\n x_, y_fill_ = Variable(x_.cuda()), Variable(y_fill_.cuda())\n\n D_result = D(x_, y_fill_).squeeze()\n D_real_loss = BCE_loss(D_result, y_real_)\n\n z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)\n y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (mini_batch, 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch\n\n z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())\n\n G_result = G(z_, y_label_)\n D_result = D(G_result, y_fill_).squeeze()\n\n D_fake_loss = BCE_loss(D_result, y_fake_)\n D_fake_score = D_result.data.mean()\n\n D_train_loss = D_real_loss + D_fake_loss\n trg += 1\n if D_train_loss > .1:\n trd += 1\n D_train_loss.backward()\n D_optimizer.step()\n\n # D_losses.append(D_train_loss.item())\n\n # train generator G\n G.zero_grad()\n\n z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)\n y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n\n z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())\n\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (mini_batch, 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch\n\n G_result = G(z_, y_label_)\n D_result = D(G_result, y_fill_).squeeze()\n\n G_train_loss = BCE_loss(D_result, y_real_)\n\n # minimize the true distribution\n KL_fake_output = F.log_softmax(model(G_result))\n errG_KL = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes\n generator_loss = G_train_loss + args.beta * errG_KL # 12.0, .65, 0e-8\n generator_loss.backward()\n\n G_optimizer.step()\n # G_losses.append(G_train_loss.item())\n ###########################\n # (3) Update classifier #\n ###########################\n # cross entropy loss\n\n optimizer.zero_grad()\n x_ = Variable(x_)\n\n output = F.log_softmax(model(x_))\n loss = F.nll_loss(output.cuda(), y_labels.type(torch.cuda.LongTensor).squeeze())\n\n # KL divergence\n\n ####\n z_ = torch.randn((data.shape[0], 100)).view(-1, 100, 1, 1).cuda()\n y_ = (torch.rand(data.shape[0], 1) * num_labels).type(torch.LongTensor).squeeze().cuda()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (data.shape[0], 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * data.shape[0]\n\n G_result = G(z_, y_label_)\n # !!!#D_result = D(G_result, y_fill_).squeeze()\n\n ####\n KL_fake_output = F.log_softmax(model(G_result))\n KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes\n\n total_loss = loss + args.beta * KL_loss_fake\n # total_loss = loss\n total_loss.backward()\n optimizer.step()\n\n if batch_idx % args.log_interval == 0:\n print(\n \"Epoch {} , Descriminator loss {:.6f} Generator loss {:.6f} traingenerator {:.6f} traindiscriminator {:.6f}\".format(\n epoch, D_train_loss, G_train_loss, trg, trd))\n print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))\n\n # print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(\n # epoch, batch_idx * len(data), len(train_loader.dataset),\n # 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))\n fake = G(fixed_noise.cuda(), fixed_label)\n vutils.save_image(fake.data, '%s/MNISTcDCgan_samples_epoch_%03d.png' % (args.outf, epoch), normalize=True)\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n correct = 0\n total = 0\n for data, target in test_loader:\n total += data.size(0)\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n # data, target = Variable(data, volatile=True), Variable(target)\n output = F.log_softmax(model(data))\n target = target.type(\n torch.LongTensor) # https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/4\n if args.cuda:\n output = output.cuda()\n target = target.cuda()\n target = torch.squeeze(target)\n\n test_loss += F.nll_loss(output, target).data.item()\n pred = output.data.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(target.data).cpu().sum()\n\n test_loss = test_loss\n test_loss /= len(test_loader) # loss function already averages over batch size\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, total,\n 100. * correct / total))\n\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch)\n test(epoch)\n if epoch in decreasing_lr:\n G_optimizer.param_groups[0]['lr'] *= args.droprate\n D_optimizer.param_groups[0]['lr'] *= args.droprate\n optimizer.param_groups[0]['lr'] *= args.droprate\n if epoch % 20 == 0:\n # do checkpointing\n torch.save(G.state_dict(), '%s/netG_epoch_%d.pth' % (args.outf, epoch))\n torch.save(D.state_dict(), '%s/netD_epoch_%d.pth' % (args.outf, epoch))\n torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % (args.outf, epoch))\n"
] | [
[
"torch.ones",
"torch.cuda.manual_seed",
"torch.randn",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.nn.functional.nll_loss",
"torch.rand",
"torch.nn.functional.kl_div",
"torch.cuda.is_available",
"torch.zeros",
"torch.nn.BCELoss",
"torch.squeeze",
"torch.cuda.LongTensor"
]
] |
7125messi/rencommend_system_learning | [
"4a8bcef241c4c0357cfbe4d1a9828b847974b69c"
] | [
"Chapter2/LFM.py"
] | [
"# 导入包\nimport random\nimport math\nimport numpy as np\nimport time\nfrom tqdm import tqdm\nfrom tqdm import trange\n\n# 1 通用函数定义\n## 定义装饰器,监控运行时间\ndef timmer(func):\n def wrapper(*args, **kwargs):\n start_time = time.time()\n res = func(*args, **kwargs)\n stop_time = time.time()\n print('Func {},run time:{}'.format(func.__name__,stop_time - start_time))\n return res\n return wrapper\n\n## 数据处理相关\n### load data\n### split data\nclass Dataset():\n def __init__(self,fp):\n self.data = self.loadData(fp)\n\n @timmer\n def loadData(self,fp):\n data = []\n for l in open(fp):\n data.append(tuple(map(int, l.strip().split('::')[:2])))\n return data\n\n @timmer\n def splitData(self, M, k, seed=1):\n '''\n :params: data, 加载的所有(user, item)数据条目\n :params: M, 划分的数目,最后需要取M折的平均\n :params: k, 本次是第几次划分,k~[0, M)\n :params: seed, random的种子数,对于不同的k应设置成一样的\n :return: train, test\n '''\n train , test = [], []\n random.seed(seed)\n for user, item in self.data:\n # 这里与书中的不一致,本人认为取M-1较为合理,因randint是左右都覆盖的\n if random.randint(0, M-1) == k: \n test.append((user, item))\n else:\n train.append((user, item))\n\n # 处理成字典的形式,user->set(items)\n def convert_dict(data):\n data_dict = {}\n for user, item in data:\n if user not in data_dict:\n data_dict[user] = set()\n data_dict[user].add(item)\n data_dict = {k: list(data_dict[k]) for k in data_dict}\n return data_dict\n\n return convert_dict(train), convert_dict(test)\n\n## 评价指标\n### Precision\n### Recall\n### Coverage\n### Popularity(Novelty)\n\nclass Metric():\n def __init__(self, train, test, GetRecommendation):\n '''\n :params: train, 训练数据\n :params: test, 测试数据\n :params: GetRecommendation, 为某个用户获取推荐物品的接口函数\n '''\n self.train = train\n self.test = test\n self.GetRecommendation = GetRecommendation\n self.recs = self.getRec()\n \n # 为test中的每个用户进行推荐\n def getRec(self):\n recs = {}\n for user in self.test:\n rank = self.GetRecommendation(user)\n recs[user] = rank\n return recs\n \n # 定义精确率指标计算方式\n def precision(self):\n all, hit = 0, 0\n for user in self.test:\n test_items = set(self.test[user])\n rank = self.recs[user]\n for item, score in rank:\n if item in test_items:\n hit += 1\n all += len(rank)\n return round(hit / all * 100, 2)\n \n # 定义召回率指标计算方式\n def recall(self):\n all, hit = 0, 0\n for user in self.test:\n test_items = set(self.test[user])\n rank = self.recs[user]\n for item, score in rank:\n if item in test_items:\n hit += 1\n all += len(test_items)\n return round(hit / all * 100, 2)\n \n # 定义覆盖率指标计算方式\n def coverage(self):\n all_item, recom_item = set(), set()\n for user in self.test:\n for item in self.train[user]:\n all_item.add(item)\n rank = self.recs[user]\n for item, score in rank:\n recom_item.add(item)\n return round(len(recom_item) / len(all_item) * 100, 2)\n \n # 定义新颖度指标计算方式\n def popularity(self):\n # 计算物品的流行度\n item_pop = {}\n for user in self.train:\n for item in self.train[user]:\n if item not in item_pop:\n item_pop[item] = 0\n item_pop[item] += 1\n\n num, pop = 0, 0\n for user in self.test:\n rank = self.recs[user]\n for item, score in rank:\n # 取对数,防止因长尾问题带来的被流行物品所主导\n pop += math.log(1 + item_pop[item])\n num += 1\n return round(pop / num, 6)\n \n def eval(self):\n metric = {'Precision': self.precision(),\n 'Recall': self.recall(),\n 'Coverage': self.coverage(),\n 'Popularity': self.popularity()}\n print('Metric:', metric)\n return metric\n\n# 2 LFM算法实现\ndef LFM(train,ratio,K,lr,step,lmbda,N):\n '''\n :params: train, 训练数据\n :params: ratio, 负采样的正负比例\n :params: K, 隐语义个数\n :params: lr, 初始学习率\n :params: step, 迭代次数\n :params: lmbda, 正则化系数\n :params: N, 推荐TopN物品的个数\n :return: GetRecommendation, 获取推荐结果的接口\n '''\n all_items = {}\n for user in train:\n for item in train[user]:\n if item not in all_items:\n all_items[item] = 0\n all_items[item] += 1\n\n all_items = list(all_items.items())\n items = [x[0] for x in all_items]\n pops = [x[1] for x in all_items]\n\n # 负采样函数(按照流行度就行采样)\n def nSample(data,ratio):\n new_data = {}\n # 正样本\n for user in data:\n if user not in new_data:\n new_data[user] = {}\n for item in data[user]:\n new_data[user][item] = 1\n # 负样本\n for user in new_data:\n seen = set(new_data[user])\n pos_num = len(seen)\n item = np.random.choice(items, int(pos_num * ratio * 3), pops)\n item = [x for x in item if x not in seen][:int(pos_num * ratio)]\n new_data[user].update({x: 0 for x in item})\n \n return new_data\n\n # 训练\n P, Q = {}, {}\n for user in train:\n P[user] = np.random.random(K)\n for item in items:\n Q[item] = np.random.random(K)\n \n for s in trange(step):\n data = nSample(train, ratio)\n for user in data:\n for item in data[user]:\n eui = data[user][item] - (P[user] * Q[item]).sum()\n P[user] += lr * (Q[item] * eui - lmbda * P[user])\n Q[item] += lr * (P[user] * eui - lmbda * Q[item])\n lr *= 0.9 # 调整学习率\n \n # 获取接口函数\n def GetRecommendation(user):\n seen_items = set(train[user])\n recs = {}\n for item in items:\n if item not in seen_items:\n recs[item] = (P[user] * Q[item]).sum()\n recs = list(sorted(recs.items(), key=lambda x: x[1], reverse=True))[:N]\n return recs\n \n return GetRecommendation\n\n# 3 LFM实验\n## M=8, N=10, ratio=[1, 2, 3, 5, 10, 20]\n\nclass Experiment():\n def __init__(self, M, N, ratio=1,\n K=100, lr=0.02, step=100, lmbda=0.01, fp='../dataset/ml-1m/ratings.dat'):\n '''\n :params: M, 进行多少次实验\n :params: N, TopN推荐物品的个数\n :params: ratio, 正负样本比例\n :params: K, 隐语义个数\n :params: lr, 学习率\n :params: step, 训练步数\n :params: lmbda, 正则化系数\n :params: fp, 数据文件路径\n '''\n self.M = M\n self.K = K\n self.N = N\n self.ratio = ratio\n self.lr = lr\n self.step = step\n self.lmbda = lmbda\n self.fp = fp\n self.alg = LFM\n \n # 定义单次实验\n @timmer\n def worker(self, train, test):\n '''\n :params: train, 训练数据集\n :params: test, 测试数据集\n :return: 各指标的值\n '''\n getRecommendation = self.alg(train, self.ratio, self.K, \n self.lr, self.step, self.lmbda, self.N)\n metric = Metric(train, test, getRecommendation)\n return metric.eval()\n \n # 多次实验取平均\n @timmer\n def run(self):\n metrics = {'Precision': 0, 'Recall': 0, \n 'Coverage': 0, 'Popularity': 0}\n dataset = Dataset(self.fp)\n for ii in range(self.M):\n train, test = dataset.splitData(self.M, ii)\n print('Experiment {}:'.format(ii))\n metric = self.worker(train, test)\n metrics = {k: metrics[k]+metric[k] for k in metrics}\n metrics = {k: metrics[k] / self.M for k in metrics}\n print('Average Result (M={}, N={}, ratio={}): {}'.format(\\\n self.M, self.N, self.ratio, metrics))\n\n# LFM实验(运行时间较长,这里没贴实验结果)\nM, N = 8, 10\nfor r in [1, 2, 3, 5, 10, 20]:\n exp = Experiment(M, N, ratio=r)\n exp.run()"
] | [
[
"numpy.random.random"
]
] |
JCSDA/mpas-jedi | [
"e0780d1fd295912ee4cfb758854c52b6764d4ab9"
] | [
"graphics/basic_plot_functions.py"
] | [
"#!/usr/bin/env python3\n\nfrom copy import deepcopy\nimport cartopy.crs as ccrs\nimport datetime as dt\nimport logging\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nimport matplotlib\nmatplotlib.use('AGG')\nimport matplotlib.axes as maxes\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nfrom matplotlib.colors import BoundaryNorm\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport plot_utils as pu\nimport var_utils as vu\nimport os\n\n_logger = logging.getLogger(__name__)\n\ncmGray = plt.cm.get_cmap(\"gist_gray\")\ncmRainbow = plt.cm.get_cmap(\"gist_rainbow\")\ncmSpectral = plt.cm.get_cmap(\"nipy_spectral\")\ncmHeat = plt.cm.get_cmap(\"gist_heat\")\ncmOcean = plt.cm.get_cmap(\"ocean\")\ncmNCAR = plt.cm.get_cmap(\"gist_ncar\")\n\nWhiteBlack1 = cmGray(np.linspace(1.0,0.0,17)) # white to black (-90 to -74 C)\nBlackRed = cmHeat(np.linspace(0.0,0.5,10)) #black to red (-74 to -65 C)\nROYG = cmSpectral(np.linspace(0.9,0.43,27)) # red, orange, yellow, green, blue (-65 to -39 C)\n#GreenBlue = cmNCAR(np.linspace(0.05,0.1,8)) # green to blue (-39 to -32 C)\n#BlueCyan = cmRainbow(np.linspace(0.8,0.6,13)) # blue to cyan (-32 to -20 C)\nGreenBlueCyan = cmNCAR(np.linspace(0.05,0.2,20)) # green to blue (-39 to -20 C)\n#WhiteBlack2 = cmGray(np.linspace(0.9,0.0,51)) # white to black (-20 to 30 C)\nMVW = cmNCAR(np.linspace(0.8,0.98,21)) # magenta to violet to white (-20 to 0 C)\nWhiteBlack2 = cmGray(np.linspace(0.9,0.0,31)) # white to black (0 to 30 C)\n\n#btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlue, BlueCyan, WhiteBlack2))\n#btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, WhiteBlack2))\nbtcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, MVW, WhiteBlack2))\n\nbtCMap = colors.ListedColormap(btcolors)\n\n#This script includes basic plotting functions.\n\ndistriZooms = {}\n\n#Full Earth\ndistriZooms['default'] = {\n 'cLon': None,\n 'minLon': -180,\n 'maxLon': 180,\n 'minLat': -90,\n 'maxLat': 90,\n}\ndistriZooms['abi'] = {\n 'cLon': -75.2,\n 'minLon': None,\n 'maxLon': None,\n 'minLat': None,\n 'maxLat': None,\n}\ndistriZooms['ahi'] = {\n 'cLon': 140.7,\n 'minLon': None,\n 'maxLon': None,\n 'minLat': None,\n 'maxLat': None,\n}\n\ndef plotDistri(lats,lons,values, \\\n ObsType,VarName,var_unit,out_name,nstation,levbin, \\\n dmin=None,dmax=None,dotsize=6,color=\"rainbow\"):\n#================================================================\n#INPUTS:\n# lats - latitude\n# lons - longitude\n# values - values will be plotted\n# ObsType - observation type\n# VarName - variable name\n# var_unit - variable units\n# out_name - will be included in output file name. It can be experiment name.\n# nstation - station numbers for sondes.\n# levbin - plot all levels together (levbin=all); or plot every level.\n# dmin, dmax - min/max values of colorbars, optional\n# dotsize - dot size, optional\n# color - color scheme, optional\n#================================================================\n# For some plots that need to change longitude from [-180,180] to [0,360]\n# tmp = np.logical_not(lons > 0)\n# lons[tmp] = lons[tmp] + 360\n\n#set map=======================================================================\n cLon = distriZooms['default']['cLon']\n minLon = distriZooms['default']['minLon']\n maxLon = distriZooms['default']['maxLon']\n minLat = distriZooms['default']['minLat']\n maxLat = distriZooms['default']['maxLat']\n\n for key, val in distriZooms.items():\n if key in ObsType:\n cLon = val['cLon']\n minLon = val['minLon']\n maxLon = val['maxLon']\n minLat = val['minLat']\n maxLat = val['maxLat']\n\n if cLon is not None:\n fig = plt.figure(figsize=(5,5))\n ax = fig.add_subplot(projection=ccrs.Orthographic(cLon))\n else:\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(projection=ccrs.PlateCarree())\n\n ax.set_global()\n\n#draw points onto map =========================================================\n if color == \"BT\":\n if (\"abi\" in ObsType or \"ahi\" in ObsType):\n cm = btCMap\n if dmin is None: dmin = 183\n if dmax is None: dmax = 303\n else:\n cm = plt.cm.get_cmap(\"gist_ncar\")\n if dmin is None: dmin = 190\n if dmax is None: dmax = 270\n else:\n cm = plt.cm.get_cmap(color)\n\n finite = np.isfinite(values)\n if (((\"abi\" in ObsType or \"ahi\" in ObsType)\n and finite.sum() > 4e4)\n or \"model\" in ObsType):\n # option 1: smoothed contours (note: color bar is not quite right)\n # sc=m.contourf(lons[finite], lats[finite], values[finite],\n # cm.N, cmap = cm, vmin = dmin, vmax = dmax,\n # latlon = True, tri = True, extend='both')\n\n # option 2: pixel contours\n # first sort by longitude to avoid bug for cyclic projections in basemap\n lonsPlot = lons[finite]\n lonsPlot[lonsPlot > 180.0] -= 360.0 # fixes latitude swap bug for cyclic projections\n latsPlot = lats[finite]\n valuesPlot = values[finite]\n lonSort = np.argsort(lonsPlot)\n\n p = plt.pcolor(lonsPlot[lonSort], latsPlot[lonSort], valuesPlot[lonSort],\n transform = ccrs.PlateCarree(),\n cmap = cm, vmin = dmin, vmax = dmax,\n latlon = True, tri = True)\n\n else:\n p=ax.scatter(lons[finite], lats[finite], c=values[finite],\n transform = ccrs.PlateCarree(),\n cmap= cm, s = dotsize)\n ax.gridlines(draw_labels=True, xlocs=np.arange(-180,180,60),linestyle='--')\n\n ax.coastlines()\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"bottom\",size=\"5%\", pad=0.3,axes_class=plt.Axes)\n\n #fig.add_axes(cax)\n plt.colorbar(p,cax=cax,orientation='horizontal') #,cax=cax,ax=ax,orientation='horizontal')\n\n#set title ===================================================================\n if nstation == 0 or ObsType == 'satwind':\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)])), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n else:\n if ObsType[:6] == 'gnssro':\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s nprofile:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n elif ObsType == 'aircraft':\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s nflight:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n else:\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s nstation:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n\n plt.savefig('distri_%s_%s_%s.png'%(VarName,out_name,levbin),dpi=200,bbox_inches='tight')\n plt.close()\n\n\ndef scatterMapFields(\n lonVals, latVals, fields,\n filename,\n minLon = -180., maxLon = 180.,\n minLat = -90., maxLat = 90.,\n cLon = None,\n projection = 'default',\n dmin = None, dmax = None,\n markers = {},\n sizes = {},\n cmap = 'gist_ncar',\n cbarType = None,\n c = {},\n logVLim = 1.e-12,\n ):\n\n # setup map\n cLons = np.asarray([])\n lonVals_180 = {}\n\n for name in lonVals.keys():\n cLon = None\n\n # 0 < longitude <= 360\n lonVals_360 = deepcopy(lonVals[name])\n while np.max(lonVals_360) >= 360.0:\n lonVals_360[lonVals_360 >= 360.0] -= 360.0\n while np.min(lonVals_360) < 0.0:\n lonVals_360[lonVals_360 < 0.0] += 360.0\n\n # -180 < longitude <= 180\n lonVals_180[name] = deepcopy(lonVals_360)\n lonVals_180[name][lonVals_180[name] > 180.0] -= 360.0\n\n for lon in [lonVals_360, lonVals_180[name]]:\n if np.max(lon) - np.min(lon) <= 180.0:\n cLon = 0.5*(np.max(lon) + np.min(lon))\n\n cLons = np.append(cLons, cLon)\n\n anycLonNone = np.any([c is None for c in cLons])\n\n if anycLonNone:\n # plot entire Earth\n fig = plt.figure(figsize=(5,5))\n ax = fig.add_subplot(projection=ccrs.Mollweide(0.0))\n\n else:\n # plot single projected side of Earth\n cLon = cLons[0]\n if cLon > 180.0: cLon-=360.0\n fig = plt.figure(figsize=(5,5))\n ax = fig.add_subplot(projection=ccrs.Orthographic(cLon))\n\n assert (cbarType is None or cbarType in ['Log', 'SymLog']), \\\n 'scatterMapFields: invalid cbarType: '+cbarType\n\n for name, field in fields.items():\n f = c=c.get(name, field)\n finite = np.isfinite(f)\n lons = lonVals_180[name][finite]\n lats = latVals[name][finite]\n f = f[finite]\n\n ## transform to pcolormesh and cartopy conventions\n # longitude monotonically increasing\n lonSort = np.argsort(lons)\n lons = lons[lonSort]\n lats = lats[lonSort]\n f = f[lonSort]\n\n if dmin is None:\n vmin = f.min()\n else:\n vmin = dmin\n if dmax is None:\n vmax = f.max()\n else:\n vmax = dmax\n\n if cbarType is None:\n norm = None\n elif cbarType == 'Log':\n if vmin <= logVLim: vmin = logVLim\n f[f < vmin] = vmin\n norm=colors.LogNorm(vmin=vmin, vmax=vmax)\n elif cbarType == 'SymLog':\n norm=colors.SymLogNorm(vmin=vmin, vmax=vmax,\n linthresh=1.e-4*vmax, linscale=1.0, base=10)\n\n sc = ax.scatter(lons, lats, c=f,\n s = sizes.get(name, 1),\n cmap = cmap,\n norm = norm,\n marker = markers.get(name, '.'), linewidth = 0,\n transform=ccrs.PlateCarree(),\n )\n\n # show full projection extent\n ax.set_global()\n\n # add coastlines\n ax.coastlines()\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"bottom\",size=\"5%\", pad=0.3,axes_class=plt.Axes)\n cb = plt.colorbar(sc, cax=cax, orientation='horizontal')\n\n plt.savefig(filename, dpi=200, bbox_inches='tight')\n plt.close()\n\ndef plotTimeserial2D(Stats,xlabeltime,ylevels,VarName):\n#================================================================\n#INPUTS:\n# Stats - statistics\n# xlabeltime - time labels for x-axis \n# ylevels - vertical levels for y-axis\n# VarName - variable name\n#================================================================\n zgrid = np.loadtxt(\"/glade/work/jban/pandac/fix_input/graphics/zgrid_v55.txt\")\n\n fig, ax1 = plt.subplots()\n\n xarray = range(len(xlabeltime))\n valuemin = np.amin(Stats)\n valuemax = np.amax(Stats)\n # yonggangyu introduce epsilon and xi for plotting absolutely zero field,\n # solving vmin, vcenter, vmax ascending order issue\n epsilon = 1.e-8\n if (valuemin > 0 or valuemax < 0):\n color = 'rainbow'\n plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,cmap=color)\n xi=-1\n else:\n cmap = 'coolwarm'\n if ( -valuemin < epsilon and valuemax < epsilon ):\n xi=1\n valuemin = -epsilon\n valuemax = epsilon\n elif ( -valuemin < epsilon and valuemax > epsilon ):\n xi=2\n valuemin = -epsilon\n elif ( -valuemin > epsilon and valuemax < epsilon ):\n xi=3\n valuemax = epsilon\n else:\n xi=4\n #print('xi= '+str(xi)+' valuemin= ',str(valuemin)+' valuemax= ',str(valuemax))\n norm = matplotlib.colors.DivergingNorm(vmin=valuemin, vcenter=0, vmax=valuemax)\n plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,norm=norm,cmap=cmap)\n xarray = range(len(xlabeltime))\n major_ticks = np.arange(0, 56, 5)\n ax1.set_yticks(major_ticks)\n ax1.set_ylim([0,54])\n ax1.set_ylabel('Vertical level',fontsize=15)\n\n ax2 = ax1.twinx()\n ax2.set_yticks(major_ticks-1)\n ax2.set_yticklabels((zgrid[::5]).astype(int))\n\n ax2.set_ylabel('Height (m)',fontsize=13)\n\n FCDay = ''.join(VarName.split(\"_\")[1:][:-3])\n if (FCDay == 'day0.0'):\n ax1.set_xlabel('Analysis Time',fontsize=15)\n ax1.set_xticks(xarray[::4])\n ax1.set_xticklabels(xlabeltime[::4],rotation=90)\n elif (FCDay == 'day0.25'):\n ax1.set_xlabel( '6h Forecast',fontsize=15)\n ax1.set_xticks(xarray[::4])\n ax1.set_xticklabels(xlabeltime[::4],rotation=90)\n else:\n ax1.set_xlabel( 'Lead Time',fontsize=15)\n\n plt.colorbar(extend='both',orientation=\"horizontal\",pad=0.2)\n ax1.grid(True)\n region = ''.join(VarName.split(\"_\")[2:][:-2])\n var = ''.join(VarName.split(\"_\")[3:][:-1])\n stats = ''.join(VarName.split(\"_\")[4:])\n plt.title(stats+' variable:'+vu.varDictModel[var][1]+'('+ vu.varDictModel[var][0]+') '+region, fontsize = 12)\n plt.savefig(VarName+'_TS_2d.png',dpi=200,bbox_inches='tight')\n plt.close()\n\nmaxLegendEntries = 12\n\n###############################################################################\nlenWarnSer = 0\nnanWarnSer = 0\ndef plotSeries(fig, \\\n linesVals, xVals, \\\n linesLabel, \\\n title=\"\", dataLabel=\"y\", \\\n sciticks=False, logscale= False, signdef=False, \\\n indepLabel=\"x\", invert_ind_axis=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n linesValsMinCI=None, linesValsMaxCI=None, \\\n dmin=np.NaN, dmax=np.NaN, \\\n lineAttribOffset=0, \\\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# linesVals - dependent variable (list of arrays)\n# xVals - independent variable on x-axis (array)\n# linesLabel - legend label for linesVals (list)\n\n# title - subplot title, optional\n# dataLabel - label for linesVals, optional\n# sciticks - whether linesVals needs scientific formatting for ticks, optional\n# logscale - y-axis is scaled logarithmically, optional, overrides sciticks\n# signdef - whether linesVals is positive/negative definite, optional\n# indepLabel - label for xVals, optional\n# invert_ind_axis - whether to invert x-axis orientation, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# linesValsMinCI - minimum error bound for linesVals (list of arrays), optional\n# linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional\n# Note: linesValsMinCI and linesValsMaxCI must be specified together\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# dmin, dmax - min/max values of linesVals, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = np.asarray([])\n nLines = 0\n for iline, lineVals in enumerate(linesVals):\n if np.all(np.isnan(lineVals)):\n global nanWarnSer\n if nanWarnSer==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n nanWarnSer=nanWarnSer+1\n continue\n if len(lineVals)!=len(xVals):\n global lenWarnSer\n if lenWarnSer==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n lenWarnSer=lenWarnSer+1\n continue\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset)\n\n ax.plot(xVals, lineVals, \\\n color=pColor, \\\n label=linesLabel[iline], \\\n ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \\\n linewidth=0.5)\n nLines += 1\n plotVals = np.append(plotVals, lineVals)\n\n # Add shaded error regions if specified\n if linesValsMinCI is not None and \\\n linesValsMaxCI is not None:\n\n # test statistical significance versus zero\n if signdef:\n significant = np.empty(len(lineVals))\n significant[:] = np.NaN\n else:\n significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline])\n significant = np.array([x if np.isfinite(x) else -1.0 for x in significant])\n\n lineArr = np.array(lineVals)\n xArr = np.array(xVals)\n negsiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] < 0.0)],dtype=int)\n if len(negsiginds) > 0:\n ax.plot(xArr[negsiginds], lineArr[negsiginds], \\\n color=pColor, \\\n ls='', \\\n marker='v', \\\n markersize=1.5)\n\n possiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] > 0.0)],dtype=int)\n if len(possiginds) > 0:\n ax.plot(xArr[possiginds], lineArr[possiginds], \\\n color=pColor, \\\n ls='', \\\n marker='^', \\\n markersize=1.5)\n\n ax.plot(xVals, linesValsMinCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.plot(xVals, linesValsMaxCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.0, alpha = 0.1)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n where=significant > 0.0, \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.2, alpha = 0.3)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # add horizontal zero line for unbounded quantities\n if not signdef:\n ax.plot([xVals[0], xVals[-1]], [0., 0.], ls=\"--\", c=\".3\", \\\n linewidth=0.7,markersize=0)\n\n # standardize x-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef)\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n isLogScale = logscale\n if logscale:\n nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals))\n if nonzero.sum() > 0:\n vmin = np.nanmin(np.abs(plotVals[nonzero]))\n vmax = np.nanmax(np.abs(plotVals[nonzero]))\n if signdef:\n # log tick labels look bad for single decade\n if vmax / vmin > 10.0:\n ax.set_yscale('log')\n else:\n isLogScale = False\n else:\n ax.set_yscale('symlog')\n else:\n isLogScale = False\n\n if isLogScale and np.isfinite(maxdval) and maxdval > 0.:\n ax.set_ylim(None, maxdval)\n if np.abs(vmin) > 0.:\n ax.set_ylim(vmin, None)\n\n if not isLogScale:\n if sciticks:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_ylim(mindval,maxdval)\n if maxdval-mindval < 1.0 or \\\n maxdval-mindval > 100.0:\n ax.tick_params(axis='y',rotation=-35)\n ax.yaxis.get_offset_text().set_fontsize(3)\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(indepLabel,fontsize=4)\n if interiorLabels or iy == ny-1:\n ax.set_ylabel(dataLabel,fontsize=4)\n\n #legend\n if nLines <= maxLegendEntries:\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n if invert_ind_axis:\n ax.invert_xaxis()\n\n ax.grid()\n\n return\n\n###############################################################################\nlenWarnProf = 0\nnanWarnProf = 0\ndef plotProfile(fig, \\\n linesVals, yVals, \\\n linesLabel, \\\n title=\"\", dataLabel=\"x\", \\\n sciticks=False, logscale=False, signdef=False, \\\n indepLabel=\"y\", invert_ind_axis=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n linesValsMinCI=None, linesValsMaxCI=None, \\\n dmin=np.NaN, dmax=np.NaN, \\\n lineAttribOffset=0, \\\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# linesVals - dependent variable (list of arrays)\n# yVals - independent variable on y-axis (array)\n# linesLabel - legend label for linesVals (list)\n\n# title - subplot title, optional\n# dataLabel - label for linesVals, optional\n# sciticks - whether linesVals needs scientific formatting for ticks, optional\n# logscale - x-axis is scaled logarithmically, optional, overrides sciticks\n# signdef - whether linesVals is positive/negative definite, optional\n# indepLabel - label for yVals, optional\n# invert_ind_axis - whether to invert y-axis orientation, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# linesValsMinCI - minimum error bound for linesVals (list of arrays), optional\n# linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional\n# Note: linesValsMinCI and linesValsMaxCI must be specified together\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# dmin, dmax - min/max values of linesVals, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = np.asarray([])\n nLines = 0\n for iline, lineVals in enumerate(linesVals):\n if np.all(np.isnan(lineVals)):\n global nanWarnProf\n if nanWarnProf==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n nanWarnProf=nanWarnProf+1\n continue\n if len(lineVals)!=len(yVals):\n global lenWarnProf\n if lenWarnProf==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n lenWarnProf=lenWarnProf+1\n continue\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset)\n\n ax.plot(lineVals, yVals, \\\n color=pColor, \\\n label=linesLabel[iline], \\\n ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \\\n linewidth=0.5)\n nLines += 1\n plotVals = np.append(plotVals,lineVals)\n\n # Add shaded error regions if specified\n if linesValsMinCI is not None and \\\n linesValsMaxCI is not None:\n\n # test statistical significance versus zero\n if signdef:\n significant = np.empty(len(lineVals))\n significant[:] = np.NaN\n else:\n significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline])\n significant = np.array([x if np.isfinite(x) else -1.0 for x in significant])\n\n lineArr = np.array(lineVals)\n yArr = np.array(yVals)\n negsiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] < 0.0)],dtype=int)\n if len(negsiginds) > 0:\n ax.plot(lineArr[negsiginds], yArr[negsiginds], \\\n color=pColor, \\\n ls='', \\\n marker='<', \\\n markersize=1.5)\n\n possiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] > 0.0)],dtype=int)\n if len(possiginds) > 0:\n ax.plot(lineArr[possiginds], yArr[possiginds], \\\n color=pColor, \\\n ls='', \\\n marker='>', \\\n markersize=1.5)\n\n ax.plot(linesValsMinCI[iline], yVals, \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.plot(linesValsMaxCI[iline], yVals, \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.0, alpha = 0.1)\n ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n where=significant > 0.0, \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.2, alpha = 0.3)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # add vertical zero line for unbounded quantities\n if not signdef:\n ax.plot([0., 0.], [yVals[0], yVals[-1]], ls=\"--\", c=\".3\", \\\n linewidth=0.7,markersize=0)\n\n # standardize x-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef)\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n isLogScale = logscale\n if logscale:\n nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals))\n if nonzero.sum() > 0:\n vmin = np.nanmin(np.abs(plotVals[nonzero]))\n vmax = np.nanmax(np.abs(plotVals[nonzero]))\n if signdef:\n # log tick labels look bad for single decade\n if vmax / vmin > 10.0:\n ax.set_xscale('log')\n else:\n isLogScale = False\n else:\n ax.set_xscale('symlog')\n else:\n isLogScale = False\n\n if isLogScale and np.isfinite(maxdval) and maxdval > 0.:\n ax.set_xlim(None, maxdval)\n if np.abs(mindval) > 0.:\n ax.set_xlim(mindval, None)\n\n if not isLogScale:\n if sciticks:\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_xlim(mindval,maxdval)\n if maxdval-mindval < 1.0 or \\\n maxdval-mindval > 100.0:\n ax.tick_params(axis='x',rotation=-35)\n ax.xaxis.get_offset_text().set_fontsize(3)\n\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(dataLabel,fontsize=4)\n if interiorLabels or iy == ny-1:\n ax.set_ylabel(indepLabel,fontsize=4)\n\n #legend\n if nLines <= maxLegendEntries:\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n if invert_ind_axis:\n ax.invert_yaxis()\n\n ax.grid()\n\n return\n\n\n###############################################################################\nlenWarnTS=0\nnanWarnTS=0\ndef plotTimeSeries(fig, \\\n xsDates, linesVals, \\\n linesLabel, \\\n title=\"\", dataLabel=\"\", \\\n sciticks=False, logscale = False, signdef=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n linesValsMinCI=None, linesValsMaxCI=None, \\\n dmin=np.NaN, dmax=np.NaN, \\\n lineAttribOffset=0, \\\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# xsDates - date x-values (list/array or list of lists/arrays\n# of float seconds, dt.timedelta, dt.datetime)\n# linesVals - dependent variable (list of arrays)\n# linesLabel - legend label for linesVals (list)\n\n# title - subplot title, optional\n# dataLabel - label for linesVals, optional\n# sciticks - whether linesVals needs scientific formatting for ticks, optional\n# logscale - y-axis is scaled logarithmically, optional, overrides sciticks\n# signdef - whether linesVals is positive/negative definite, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# linesValsMinCI - minimum error bound for linesVals (list of arrays), optional\n# linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional\n# Note: linesValsMinCI and linesValsMaxCI must be specified together\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# dmin, dmax - min/max values of linesVals, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = np.asarray([])\n nLines = 0\n jline = 0\n for iline, lineVals in enumerate(linesVals):\n if np.all(np.isnan(lineVals)):\n global nanWarnTS\n if nanWarnTS==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n nanWarnTS=nanWarnTS+1\n continue\n\n #float xVals\n if isinstance(xsDates[0],(list,np.ndarray)):\n xVals = pu.TDeltas2Seconds(xsDates[min([iline,len(xsDates)-1])])\n else:\n xVals = pu.TDeltas2Seconds(xsDates)\n\n if len(lineVals)!=len(xVals):\n global lenWarnTS\n if lenWarnTS==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n lenWarnTS=lenWarnTS+1\n continue\n\n if jline == 0:\n minX = xVals[0]\n maxX = xVals[-1]\n else:\n minX = min([xVals[0], minX])\n maxX = max([xVals[-1], maxX])\n jline += 1\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset)\n\n ax.plot(xVals, lineVals, \\\n label=linesLabel[iline], \\\n color=pColor, \\\n ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \\\n linewidth=0.5)\n nLines += 1\n plotVals = np.append(plotVals, lineVals)\n\n # Add shaded CI regions if specified\n if linesValsMinCI is not None and \\\n linesValsMaxCI is not None:\n\n # test statistical significance versus zero\n if signdef:\n significant = np.empty(len(lineVals))\n significant[:] = np.NaN\n else:\n significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline])\n significant = np.array([x if np.isfinite(x) else -1.0 for x in significant])\n\n lineArr = np.array(lineVals)\n xArr = np.array(xVals)\n negsiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] < 0.0)],dtype=int)\n if len(negsiginds) > 0:\n ax.plot(xArr[negsiginds], lineArr[negsiginds], \\\n color=pColor, \\\n ls='', \\\n marker='v', \\\n markersize=1.5)\n\n possiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] > 0.0)],dtype=int)\n if len(possiginds) > 0:\n ax.plot(xArr[possiginds], lineArr[possiginds], \\\n color=pColor, \\\n ls='', \\\n marker='^', \\\n markersize=1.5)\n\n ax.plot(xVals, linesValsMinCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.plot(xVals, linesValsMaxCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.0, alpha = 0.1)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n where=significant > 0.0, \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.2, alpha = 0.3)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # standardize y-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef)\n\n # add horizontal zero line for unbounded quantities\n if not signdef:\n ax.plot([minX, maxX], [0., 0.], ls=\"--\", c=\".3\", \\\n linewidth=0.7,markersize=0)\n\n #axes settings\n if isinstance(xsDates[0],(list,np.ndarray)):\n pu.format_x_for_dates(ax, xsDates[0])\n else:\n pu.format_x_for_dates(ax, xsDates)\n\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n isLogScale = logscale\n if logscale:\n nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals))\n if nonzero.sum() > 0:\n vmin = np.nanmin(np.abs(plotVals[nonzero]))\n vmax = np.nanmax(np.abs(plotVals[nonzero]))\n if signdef:\n # log tick labels look bad for single decade\n if vmax / vmin > 10.0:\n ax.set_yscale('log')\n else:\n isLogScale = False\n else:\n ax.set_yscale('symlog')\n else:\n isLogScale = False\n\n if isLogScale and np.isfinite(maxdval) and maxdval > 0.:\n ax.set_ylim(None, maxdval)\n if np.abs(vmin) > 0.:\n ax.set_ylim(vmin, None)\n\n if not isLogScale:\n if sciticks:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_ylim(mindval,maxdval)\n if maxdval-mindval < 1.0 or \\\n maxdval-mindval > 100.0:\n ax.tick_params(axis='y',rotation=-35)\n ax.yaxis.get_offset_text().set_fontsize(3)\n\n ax.grid()\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_ylabel(dataLabel,fontsize=4)\n\n #legend\n if nLines <= maxLegendEntries:\n if legend_inside:\n #INSIDE AXES\n nlcol = np.int(np.ceil(np.sqrt(nLines)))\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=nlcol)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n\n return\n\n\n###############################################################################\ndef plotTimeSeries2D(fig, \\\n xDates, yVals, contourVals, \\\n title=\"\", clabel=\"\", \\\n sciticks=False, logscale=False, signdef=False, \\\n dataLabel=\"y\", invert_ind_axis=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n dmin=np.NaN, dmax=np.NaN,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# xDates - date x-values (array of float seconds, dt.timedelta, dt.datetime)\n# yVals - second independent variable\n# contourVals - dependent variable (2d array)\n\n# title - subplot title, optional\n# clabel - label for dependent variable, optional\n# sciticks - whether contourVals needs scientific formatting for ticks, optional\n# logscale - whether contours are spaced logarithmically, optional, overrides sciticks\n# signdef - whether contourVals is positive/negative definite, optional\n# dataLabel - label for yVals, optional\n# invert_ind_axis - whether to invert y-axis orientation, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# dmin, dmax - min/max values of contourVals, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n if (np.isnan(contourVals)).all():\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n xVals = pu.TDeltas2Seconds(xDates)\n\n # standardize c-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,contourVals,signdef)\n if signdef:\n cmapName = 'BuPu'\n nlevs = 18\n\n # scientific contours\n cint = contourVals.astype(int)\n isInt = np.all((contourVals - cint) == 0)\n if isInt:\n minscid = np.nanmax(np.array([1., dmin]))\n else:\n minscid = maxdval*1.e-5\n lognorm = colors.LogNorm(vmin=minscid, vmax=maxdval)\n else:\n cmapName = 'seismic'\n nlevs = 28\n\n # scientific contours\n lognorm = colors.SymLogNorm(vmin=mindval, vmax=maxdval,\n linthresh=1.e-3*maxdval, linscale=1.3, base=10)\n\n # plot contour\n # option 1: smoothed contours\n #cp = ax.contourf(xVals, yVals, contourVals, nlevs, cmap=cmapName, extend='both', \\\n # vmin=mindval, vmax=maxdval)\n\n # option 2: pixel contours\n cmap = plt.get_cmap(cmapName)\n cmap.set_bad(color = 'k', alpha = 1.0)\n if logscale:\n norm = lognorm\n else:\n levels = mticker.MaxNLocator(nbins=nlevs).tick_values(mindval,maxdval)\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n xVals_pcolor, yVals_pcolor = transformXY_for_pcolor(xVals,yVals)\n cp = ax.pcolormesh(xVals_pcolor, yVals_pcolor, contourVals, cmap=cmap, norm=norm)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #axes settings\n pu.format_x_for_dates(ax, xDates)\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_ylabel(dataLabel,fontsize=4)\n if interiorLabels or ix == nx-1:\n #colorbar\n m = plt.cm.ScalarMappable(cmap=cmap)\n m.set_array(contourVals)\n m.set_norm(norm)\n if (np.isfinite(mindval) and\n np.isfinite(maxdval) and\n not logscale):\n m.set_clim(mindval,maxdval)\n cb = plt.colorbar(m, ax=ax)\n #scientific formatting\n if sciticks and not logscale:\n cb.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n cb.ax.yaxis.get_offset_text().set_fontsize(3)\n\n cb.ax.tick_params(labelsize=3)\n cb.set_label(clabel,fontsize=5)\n\n if invert_ind_axis:\n ax.invert_yaxis()\n\n # optionally add a grid\n #ax.grid()\n\n return\n\n\n###############################################################################\ndef transformXY_for_pcolor(xs,ys):\n # adjust centered x and y values to edges to work with pcolormesh \n # note: works best for regularly spaced data\n xs_diff = xs[1] - xs[0]\n # extend xs by 2\n # fill in first endpoint\n xs_extend = [xs[0]-xs_diff]\n # fill in internal values\n for x in xs: xs_extend.append(x)\n # fill in last endpoint\n xs_extend.append(xs_extend[-1]+(xs[-1]-xs[-2]))\n # calculate the midpoints\n xs_pcolormesh_midpoints = []\n for ii, x in enumerate(xs_extend[:-1]):\n xs_pcolormesh_midpoints.append(x+0.5*(xs_extend[ii+1] - xs_extend[ii]))\n\n ys_diff = ys[1] - ys[0]\n # extend ys by 2\n # fill in first endpoint\n ys_extend = [ys[0]-ys_diff]\n # fill in internal values\n for y in ys: ys_extend.append(y)\n # fill in last endpoint\n ys_extend.append(ys_extend[-1]+(ys[-1]-ys[-2]))\n # calculate the midpoints\n ys_pcolormesh_midpoints = []\n for ii, y in enumerate(ys_extend[:-1]):\n ys_pcolormesh_midpoints.append(y+0.5*(ys_extend[ii+1] - ys_extend[ii]))\n\n return xs_pcolormesh_midpoints, ys_pcolormesh_midpoints\n\n\n###############################################################################\nlenWarnPDF = 0\nnanWarnPDF = 0\ndef plotPDF(fig,\n countsVals, xVals,\n countsLabel,\n title=\"\",\n indepLabel=\"x\",\n ny=1, nx=1, nplots=1, iplot=0,\n lineAttribOffset=1,\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# countsVals - list of arrays, each containing counts across xVals\n# xVals - independent variable on x-axis (array)\n# countsLabel - legend label for countsVals (list)\n\n# title - subplot title, optional\n# indepLabel - label for xVals, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add counts\n plotVals = []\n nPDFs = 0\n for ihist, countVals in enumerate(countsVals):\n if np.all(np.isnan(countVals)):\n global nanWarnPDF\n if nanWarnPDF==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+countsLabel[ihist])\n nanWarnPDF=nanWarnPDF+1\n continue\n if len(countVals)!=len(xVals):\n global lenWarnPDF\n if lenWarnPDF==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+countsLabel[ihist])\n lenWarnPDF=lenWarnPDF+1\n continue\n\n # Plot line for each countVals that has non-missing data\n\n # assume constant dx between bins\n dx = xVals[1] - xVals[0]\n\n ax.plot(xVals, np.divide(countVals,np.sum(countVals)*dx),\n color=pu.plotColor(len(countsVals),ihist+lineAttribOffset),\n label=countsLabel[ihist],\n ls=pu.plotLineStyle(len(countsVals),ihist+lineAttribOffset),\n linewidth=0.5)\n nPDFs = nPDFs + 1\n plotVals.append(countVals)\n\n\n if nPDFs == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # add a standard normal pdf\n from scipy.stats import norm\n ax.plot(xVals, norm.pdf(xVals),\n color='k',\n ls='-',\n linewidth=0.35,\n label='N(0,1)'\n )\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n plt.yscale('log')\n ax.set_ylim(bottom=1.e-6)\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(indepLabel,fontsize=4)\n ax.set_ylabel('PDF',fontsize=4)\n\n #legend\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n ax.grid()\n\n return\n\n\n###############################################################################\nlenWarnRamp = 0\nnanWarnRamp = 0\ndef plotfitRampComposite(fig,\n xVals,\n countVals,\n meanVals,\n rmsVals,\n stdVals,\n title=\"\", dataLabel=\"y\", \\\n indepLabel=\"x\",\n ny=1, nx=1, nplots=1, iplot=0,\n lineAttribOffset=1,\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# countVals - Count of quantity (array)\n# meanVals - Mean of quantity (array)\n# rmsVals - RMS of quantity (array)\n# stdVals - STD of quantity (array)\n\n# xVals - independent variable on x-axis (array)\n\n# title - subplot title, optional\n# dataLabel - label for y-axis, optional\n# indepLabel - label for xVals, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = []\n nLines = 0\n linesLabel = ['RMS','STD','Mean']\n for iline, lineVals in enumerate([rmsVals,stdVals,meanVals]):\n if np.all(np.isnan(lineVals)):\n global nanWarnRamp\n if nanWarnRamp==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n nanWarnRamp=nanWarnRamp+1\n continue\n if len(lineVals)!=len(xVals):\n global lenWarnRamp\n if lenWarnRamp==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n lenWarnRamp=lenWarnRamp+1\n continue\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(4,iline+lineAttribOffset)\n\n ax.plot(xVals, lineVals,\n color=pColor,\n label=linesLabel[iline],\n ls=pu.plotLineStyle(4,iline+lineAttribOffset),\n linewidth=0.6)\n nLines += 1\n plotVals.append(lineVals)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # Add fit for stdVals here using info from countVals\n ind0 = np.argmax(countVals)\n\n indexMaxX4Std = 0\n for ii, std in enumerate(stdVals):\n if np.isfinite(std): indexMaxX4Std = ii\n indexMaxX = indexMaxX4Std\n maxCount = 0\n for ii, count in enumerate(countVals):\n if count > maxCount: maxCount = count\n if count < 0.002*maxCount:\n indexMaxX = ii\n break\n if indexMaxX > indexMaxX4Std:\n ind1 = np.argmax(stdVals[0:indexMaxX4Std])\n else:\n ind1 = np.argmax(stdVals[0:indexMaxX])\n\n weights = [0.2]*(ind1-ind0+1)\n weights[0] = 1.0\n p = np.polyfit(xVals[ind0:ind1+1],stdVals[ind0:ind1+1],1,\n w=weights)\n\n X0 = xVals[ind0]\n ERR0 = X0 * p[0] + p[1]\n\n # X1 = xVals[ind1]\n # ERR1 = X1 * p[0] + p[1]\n ERR1 = stdVals[ind1]\n X1 = (ERR1 - p[1]) / p[0]\n\n\n ERRfitDict = {\n 'bu':{\n 'X': [round(X0,2), round(X1,2)],\n 'ERR': [round(ERR0,2), round(ERR1,2)],\n },\n 'YAML':{\n 'X0': [round(X0,2)],\n 'X1': [round(X1,2)],\n 'ERR0': [round(ERR0,2)],\n 'ERR1': [round(ERR1,2)],\n },\n }\n\n fitX = np.asarray([0.0] + ERRfitDict['bu']['X'] + [xVals[indexMaxX4Std]])\n fitERR = np.asarray([ERR0] + ERRfitDict['bu']['ERR'] + [ERR1])\n\n plotVals.append(fitERR)\n\n pColor = pu.plotColor(4,1+lineAttribOffset)\n\n ax.plot(fitX, fitERR,\n color=pColor,\n label='Fit-STD',\n ls='-.',\n linewidth=1.2,\n marker='+',\n ms=1.5\n )\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n # standardize x-limits\n mindval, maxdval = pu.get_clean_ax_limits(plotVals=plotVals)\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_ylim(mindval,maxdval)\n\n #handle interior subplot ticks/labels\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(indepLabel,fontsize=4)\n if interiorLabels or iy == ny-1:\n ax.set_ylabel(dataLabel,fontsize=4)\n\n #legend\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n ax.grid()\n\n # Add count on RHS y-axis\n ax2 = ax.twinx()\n color = 'black'\n if interiorLabels or ix == nx:\n ax2.set_ylabel('Count',fontsize=4,color=color)\n ax2.plot(xVals[:indexMaxX4Std], countVals[:indexMaxX4Std],\n color=color,\n label='Count',\n ls=':',\n linewidth=0.5)\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.yaxis.set_tick_params(labelsize=3)\n plt.yscale('log')\n ax2.set_ylim(bottom=100.)\n\n return ERRfitDict\n"
] | [
[
"numpy.sum",
"numpy.multiply",
"matplotlib.colors.BoundaryNorm",
"matplotlib.pyplot.contourf",
"numpy.any",
"matplotlib.pyplot.yscale",
"numpy.asarray",
"numpy.argsort",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.colors.DivergingNorm",
"numpy.amax",
"numpy.polyfit",
"numpy.isfinite",
"numpy.append",
"matplotlib.colors.SymLogNorm",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.get_cmap",
"numpy.isnan",
"matplotlib.use",
"numpy.linspace",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.arange",
"matplotlib.pyplot.cm.get_cmap",
"numpy.all",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"matplotlib.pyplot.colorbar",
"matplotlib.ticker.MaxNLocator",
"matplotlib.colors.ListedColormap",
"numpy.amin",
"numpy.array",
"numpy.concatenate",
"numpy.loadtxt"
]
] |
Christoper-Harvey/1st-Capstone | [
"93630a4d5f4a2d939c8b5f74f11b5b33052e3f72"
] | [
"DeepReinforcementLearning/funcs.py"
] | [
"import numpy as np\nimport random\n\nimport loggers as lg\n\nfrom game import Game, GameState\nfrom model import Residual_CNN\n\nfrom agent import Agent, User\n\nimport config\n\ndef playMatchesBetweenVersions(env, run_version, player1version, player2version, EPISODES, logger, turns_until_tau0, goes_first = 0):\n \n if player1version == -1:\n player1 = User('player1', env.state_size, env.action_size)\n else:\n player1_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS)\n\n if player1version > 0:\n player1_network = player1_NN.read(env.name, run_version, player1version)\n player1_NN.model.set_weights(player1_network.get_weights()) \n player1 = Agent('player1', env.state_size, env.action_size, config.p1_MCTS_SIMS, config.CPUCT, player1_NN)\n\n if player2version == -1:\n player2 = User('player2', env.state_size, env.action_size)\n else:\n player2_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS)\n \n if player2version > 0:\n player2_network = player2_NN.read(env.name, run_version, player2version)\n player2_NN.model.set_weights(player2_network.get_weights())\n player2 = Agent('player2', env.state_size, env.action_size, config.p2_MCTS_SIMS, config.CPUCT, player2_NN)\n\n scores, memory, points, sp_scores = playMatches(player1, player2, EPISODES, logger, turns_until_tau0, None, goes_first)\n\n return (scores, memory, points, sp_scores)\n\n\ndef playMatches(player1, player2, EPISODES, logger, turns_until_tau0, memory = None, goes_first = 0):\n\n env = Game()\n scores = {player1.name:0, \"drawn\": 0, player2.name:0}\n sp_scores = {'sp':0, \"drawn\": 0, 'nsp':0}\n points = {player1.name:[], player2.name:[]}\n\n for e in range(EPISODES):\n\n logger.info('====================')\n logger.info('EPISODE %d OF %d', e+1, EPISODES)\n logger.info('====================')\n\n print (str(e+1) + ' ', end='')\n\n state = env.reset()\n \n done = 0\n turn = 0\n player1.mcts = None\n player2.mcts = None\n\n if goes_first == 0:\n player1Starts = random.randint(0,1) * 2 - 1\n else:\n player1Starts = goes_first\n\n if player1Starts == 1:\n players = {1:{\"agent\": player1, \"name\":player1.name}\n , -1: {\"agent\": player2, \"name\":player2.name}\n }\n logger.info(player1.name + ' plays as X')\n else:\n players = {1:{\"agent\": player2, \"name\":player2.name}\n , -1: {\"agent\": player1, \"name\":player1.name}\n }\n logger.info(player2.name + ' plays as X')\n logger.info('--------------')\n\n env.gameState.render(logger)\n\n while done == 0:\n turn = turn + 1\n \n #### Run the MCTS algo and return an action\n if turn < turns_until_tau0:\n action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 1)\n else:\n action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 0)\n\n if memory != None:\n ####Commit the move to memory\n memory.commit_stmemory(env.identities, state, pi)\n\n\n logger.info('action: %d', action)\n for r in range(env.grid_shape[0]):\n logger.info(['----' if x == 0 else '{0:.2f}'.format(np.round(x,2)) for x in pi[env.grid_shape[1]*r : (env.grid_shape[1]*r + env.grid_shape[1])]])\n logger.info('MCTS perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(MCTS_value,2))\n logger.info('NN perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(NN_value,2))\n logger.info('====================')\n\n ### Do the action\n state, value, done, _ = env.step(action) #the value of the newState from the POV of the new playerTurn i.e. -1 if the previous player played a winning move\n \n env.gameState.render(logger)\n\n if done == 1: \n if memory != None:\n #### If the game is finished, assign the values correctly to the game moves\n for move in memory.stmemory:\n if move['playerTurn'] == state.playerTurn:\n move['value'] = value\n else:\n move['value'] = -value\n \n memory.commit_ltmemory()\n \n if value == 1:\n logger.info('%s WINS!', players[state.playerTurn]['name'])\n scores[players[state.playerTurn]['name']] = scores[players[state.playerTurn]['name']] + 1\n if state.playerTurn == 1: \n sp_scores['sp'] = sp_scores['sp'] + 1\n else:\n sp_scores['nsp'] = sp_scores['nsp'] + 1\n\n elif value == -1:\n logger.info('%s WINS!', players[-state.playerTurn]['name'])\n scores[players[-state.playerTurn]['name']] = scores[players[-state.playerTurn]['name']] + 1\n \n if state.playerTurn == 1: \n sp_scores['nsp'] = sp_scores['nsp'] + 1\n else:\n sp_scores['sp'] = sp_scores['sp'] + 1\n\n else:\n logger.info('DRAW...')\n scores['drawn'] = scores['drawn'] + 1\n sp_scores['drawn'] = sp_scores['drawn'] + 1\n\n pts = state.score\n points[players[state.playerTurn]['name']].append(pts[0])\n points[players[-state.playerTurn]['name']].append(pts[1])\n\n return (scores, memory, points, sp_scores)\n"
] | [
[
"numpy.round"
]
] |
sebasj13/topas-create-graphs | [
"5ccdbcbbe39461917cc015aa59805e518421431c"
] | [
"topasgraphsim/src/functions/dp.py"
] | [
"import numpy as np\nimport scipy.integrate as integrate\nimport scipy.interpolate as interpolate\n\n\ndef calculate_parameters(axis, dose, cax=False):\n\n \"\"\"\n A function to calculate the relevant\n descriptive parameters of dose profiles.\n \"\"\"\n\n interpolated_axis = np.linspace(axis[0], axis[-1], len(axis) * 100)\n akima_dose_interpolator = interpolate.Akima1DInterpolator(axis, dose)\n interpolated_dose = np.flip(akima_dose_interpolator.__call__(interpolated_axis))\n\n D0 = (\n interpolated_dose[int(len(interpolated_dose) / 2)]\n + interpolated_dose[int(len(interpolated_dose) / 2) - 1]\n ) / 2\n XL20 = interpolated_axis[: int(len(interpolated_axis) / 2)][\n (\n np.abs(\n interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.2 * max(dose)\n )\n ).argmin()\n ]\n XL50 = interpolated_axis[: int(len(interpolated_axis) / 2)][\n (\n np.abs(\n interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.5 * max(dose)\n )\n ).argmin()\n ]\n XL80 = interpolated_axis[: int(len(interpolated_axis) / 2)][\n (\n np.abs(\n interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.8 * max(dose)\n )\n ).argmin()\n ]\n XR20 = interpolated_axis[int(len(interpolated_axis) / 2) :][\n (\n np.abs(\n interpolated_dose[\n int(len(interpolated_axis) / 2) : len(interpolated_axis)\n ]\n - 0.2 * max(dose)\n )\n ).argmin()\n ]\n XR50 = interpolated_axis[int(len(interpolated_axis) / 2) :][\n (\n np.abs(\n interpolated_dose[\n int(len(interpolated_axis) / 2) : len(interpolated_axis)\n ]\n - 0.5 * max(dose)\n )\n ).argmin()\n ]\n XR80 = interpolated_axis[int(len(interpolated_axis) / 2) :][\n (\n np.abs(\n interpolated_dose[\n int(len(interpolated_axis) / 2) : len(interpolated_axis)\n ]\n - 0.8 * max(dose)\n )\n ).argmin()\n ]\n\n HWB = round(abs(XR50 - XL50), 3)\n CAXdev = round(XL50 + 0.5 * HWB, 3)\n\n Dose80 = [value for value in dose if value >= 0.8 * max(dose)]\n\n if cax == True:\n return CAXdev\n\n flat_krieger = round(\n max([value for value in dose if value >= 0.95 * max(dose)])\n - min([value for value in dose if value >= 0.95 * max(dose)]) / D0,\n 5,\n )\n flat_stddev = round(np.std(Dose80), 3)\n\n if len(Dose80) % 2 != 0:\n Dose80 = (\n Dose80[0 : int(len(Dose80) / 2)]\n + Dose80[int(len(Dose80) / 2) + 1 : len(Dose80)]\n )\n\n S = round(\n max(\n [Dose80[i - 1] / Dose80[len(Dose80) - i] for i in range(1, len(Dose80) + 1)]\n ),\n 3,\n )\n\n Lpenumbra = round(abs(XL80 - XL20 + CAXdev), 3)\n Rpenumbra = round(abs(XR80 - XR20 + CAXdev), 3)\n\n XL20index = np.where(interpolated_axis == XL20)[0][0]\n XL80index = np.where(interpolated_axis == XL80)[0][0]\n XR20index = np.where(interpolated_axis == XR20)[0][0]\n XR80index = np.where(interpolated_axis == XR80)[0][0]\n Lintegral = round(\n abs(\n integrate.simps(\n interpolated_dose[XL20index:XL80index],\n interpolated_axis[XL20index:XL80index],\n )\n ),\n 3,\n )\n Rintegral = round(\n abs(\n integrate.simps(\n interpolated_dose[XR80index:XR20index],\n interpolated_axis[XR80index:XR20index],\n )\n ),\n 3,\n )\n\n if CAXdev > 150:\n raise Exception\n\n return [\n HWB,\n CAXdev,\n flat_krieger,\n flat_stddev,\n S,\n Lpenumbra,\n Rpenumbra,\n Lintegral,\n Rintegral,\n ]\n"
] | [
[
"numpy.std",
"numpy.where",
"scipy.interpolate.Akima1DInterpolator",
"scipy.integrate.simps"
]
] |
simondlevy/gym-copter | [
"7236769b7586b92026d4b47f12363258c84d9508"
] | [
"nengo/copter.py"
] | [
"'''\nQuadcopter class for Nengo adaptive controller\n\nCopyright (C) 2021 Xuan Choo, Simon D. Levy\n\nMIT License\n'''\n\nimport nengo\nimport gym\nimport numpy as np\n\nfrom adaptive import run\n\n\nclass Copter:\n\n def __init__(self, seed=None):\n\n self.env = gym.make('gym_copter:Hover1D-v0')\n self.reset(seed)\n\n def reset(self, seed):\n\n self.state = self.env.reset()\n\n def step(self, u):\n\n u = np.clip(u, 0, 1)\n\n self.env.render()\n\n z, dz, = self.state\n\n # Negate for NED => ENU\n z, dz = -z, -dz\n\n print('%f | %+3.3f %+3.3f' % (u, z, dz))\n\n self.state, _reward, _done, _info = self.env.step((u,))\n\n return z, dz\n\n def set_extra_force(self, force):\n\n self.extra_mass = force\n\n def generate_html(self, desired):\n '''\n Copter is simulated externally\n '''\n return None\n\n\nwith nengo.Network(seed=3) as model:\n\n run(Copter, 'Copter', 'Position', 'Wind Force')\n"
] | [
[
"numpy.clip"
]
] |
chasingegg/Data_Science | [
"a499866ff92aa1107057b20563564bdd89fc370f"
] | [
"Python/textrank/textrank.py"
] | [
"#!/usr/src/env python\n# -*- coding: utf-8 -*-\n# TextRank 博客 http://xiaosheng.me/2017/04/08/article49/\n# 从PageRank转变而来,可以用来做关键字的提取。TextRank的计算公式其实跟PageRank可以认为是一样的\n# 只不过就是要考虑权重的因素(算PageRank的时候就是均摊权值)\n# 在TextRank构建的图中,节点是句子,权值就是两个句子的相似程度 \n\n# 提取关键字的时候,单词作为图的节点,把权值都设成1,此时其实退化成PageRank\n# 把文本拆分成单词,将这一些单词设定一个简单的滑动窗口,每个窗口内的任意两个单词之间存在一条边\n\n# 如果是要提取关键句,一般认为所有句子都是相邻的,不需要窗口提取。相似程度的计算公式一般是重合\n# 单词数量除以总单词数量\n\nimport sys\nimport pandas as pd\nimport jieba.analyse\n\ndef textrank(data, topK):\n idList, titleList, abstractList = data['id'], data['title'], data['abstract']\n ids, title, keys = [], [], []\n for i in range(len(idList)):\n text = '%s。%s' % (titleList[i], abstractList[i]) #拼接\n jieba.analyse.set_stop_words('data/stopWord.txt')\n print(\"\\\"\", titleList[i], \"\\\"\", \" 10 keywords - TextRank :\")\n keywords = jieba.analyse.textrank(text, topK = topK, allowPOS=('n','nz','v','vd','vn','l','a','d'))\n word_split = \" \".join(keywords)\n print(word_split)\n keys.append(word_split.encode(\"utf-8\"))\n ids.append(idList[i])\n title.append(titleList[i])\n result = pd.DataFrame({\"id\":ids, \"title\":title, \"key\":keys}, columns=['id', 'title', 'key'])\n return result\n\nif __name__ == \"__main__\":\n dataFile = 'data/sample_data.csv'\n data = pd.read_csv(dataFile)\n result = textrank(data, 10)\n result.to_csv(\"result/keys_textrank.csv\", index=False)"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
1in1/Python-Baseball | [
"4c76d65330ff7eb88c87057be02bbddb50dd325b"
] | [
"stats/data.py"
] | [
"import os\nimport glob\nimport pandas as pd\n\ngame_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))\ngame_files.sort()\n\ngame_frames = []\nfor game_file in game_files:\n game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])\n game_frames.append(game_frame)\n\ngames = pd.concat(game_frames)\ngames.loc[games['multi5'] == '??', ['multi5']] = ''\n\nidentifiers = games['multi2'].str.extract(r'(.LS(\\d{4})\\d{5})')\nidentifiers = identifiers.fillna(method='ffill')\nidentifiers.columns = ['game_id', 'year']\ngames = pd.concat([games, identifiers], axis=1, sort=False)\ngames = games.fillna(' ')\ngames.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])\nprint(games.head())\n\n"
] | [
[
"pandas.read_csv",
"pandas.concat",
"pandas.Categorical"
]
] |
RobertRosca/PyFstat | [
"1c9568bb3dc87c3d33aeb41b3f572e9990665372"
] | [
"examples/other_examples/PyFstat_example_twoF_cumulative.py"
] | [
"\"\"\"\nCumulative coherent 2F\n======================\n\nCompute the cumulative coherent F-statistic of a signal candidate.\n\"\"\"\n\n\nimport os\nimport numpy as np\nimport pyfstat\n\nfrom pyfstat.helper_functions import get_predict_fstat_parameters_from_dict\n\nlabel = \"PyFstat_example_twoF_cumulative\"\noutdir = os.path.join(\"PyFstat_example_data\", label)\n\n# Properties of the GW data\ngw_data = {\n \"sqrtSX\": 1e-23,\n \"tstart\": 1000000000,\n \"duration\": 100 * 86400,\n \"detectors\": \"H1,L1\",\n \"Band\": 4,\n \"Tsft\": 1800,\n}\n\n# Properties of the signal\ndepth = 100\nphase_parameters = {\n \"F0\": 30.0,\n \"F1\": -1e-10,\n \"F2\": 0,\n \"Alpha\": np.radians(83.6292),\n \"Delta\": np.radians(22.0144),\n \"tref\": gw_data[\"tstart\"],\n \"asini\": 10,\n \"period\": 10 * 3600 * 24,\n \"tp\": gw_data[\"tstart\"] + gw_data[\"duration\"] / 2.0,\n \"ecc\": 0,\n \"argp\": 0,\n}\namplitude_parameters = {\n \"h0\": gw_data[\"sqrtSX\"] / depth,\n \"cosi\": 1,\n \"phi\": np.pi,\n \"psi\": np.pi / 8,\n}\n\nPFS_input = get_predict_fstat_parameters_from_dict(\n {**phase_parameters, **amplitude_parameters}\n)\n\n# Let me grab tref here, since it won't really be needed in phase_parameters\ntref = phase_parameters.pop(\"tref\")\ndata = pyfstat.BinaryModulatedWriter(\n label=label,\n outdir=outdir,\n tref=tref,\n **gw_data,\n **phase_parameters,\n **amplitude_parameters,\n)\ndata.make_data()\n\n# The predicted twoF, given by lalapps_predictFstat can be accessed by\ntwoF = data.predict_fstat()\nprint(\"Predicted twoF value: {}\\n\".format(twoF))\n\n# Create a search object for each of the possible SFT combinations\n# (H1 only, L1 only, H1 + L1).\nifo_constraints = [\"L1\", \"H1\", None]\ncompute_fstat_per_ifo = [\n pyfstat.ComputeFstat(\n sftfilepattern=os.path.join(\n data.outdir,\n (f\"{ifo_constraint[0]}*.sft\" if ifo_constraint is not None else \"*.sft\"),\n ),\n tref=data.tref,\n binary=phase_parameters.get(\"asini\", 0),\n minCoverFreq=-0.5,\n maxCoverFreq=-0.5,\n )\n for ifo_constraint in ifo_constraints\n]\n\nfor ind, compute_f_stat in enumerate(compute_fstat_per_ifo):\n compute_f_stat.plot_twoF_cumulative(\n label=label + (f\"_{ifo_constraints[ind]}\" if ind < 2 else \"_H1L1\"),\n outdir=outdir,\n savefig=True,\n CFS_input=phase_parameters,\n PFS_input=PFS_input,\n custom_ax_kwargs={\n \"title\": \"How does 2F accumulate over time?\",\n \"label\": \"Cumulative 2F\"\n + (f\" {ifo_constraints[ind]}\" if ind < 2 else \" H1 + L1\"),\n },\n )\n"
] | [
[
"numpy.radians"
]
] |
bugface/transformers | [
"ba286fe7d51db12ad663effac83bed8199dd7141"
] | [
"src/transformers/models/unispeech/modeling_unispeech.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch UniSpeech model.\"\"\"\n\nimport math\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...deepspeed import is_deepspeed_zero3_enabled\nfrom ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import torch_int_div\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_unispeech import UniSpeechConfig\n\n\nlogger = logging.get_logger(__name__)\n\n\n_HIDDEN_STATES_START_POSITION = 2\n\n# General docstring\n_CONFIG_FOR_DOC = \"UniSpeechConfig\"\n_PROCESSOR_FOR_DOC = \"Wav2Vec2Processor\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"patrickvonplaten/unispeech-large-1500h-cv-timit\"\n_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]\n\n# CTC docstring\n_CTC_EXPECTED_OUTPUT = \"'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'\"\n_CTC_EXPECTED_LOSS = 17.17\n\n# Audio class docstring\n_FEAT_EXTRACTOR_FOR_DOC = \"Wav2Vec2FeatureExtractor\"\n_SEQ_CLASS_CHECKPOINT = \"hf-internal-testing/tiny-random-unispeech\"\n_SEQ_CLASS_EXPECTED_OUTPUT = \"'LABEL_0'\" # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n_SEQ_CLASS_EXPECTED_LOSS = 0.66 # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n\nUNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/unispeech-large-1500h-cv\",\n \"microsoft/unispeech-large-multi-lingual-1500h-cv\",\n # See all UniSpeech models at https://huggingface.co/models?filter=unispeech\n]\n\n\n@dataclass\nclass UniSpeechForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.\n\n Args:\n loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):\n Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official\n paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.\n projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):\n Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked\n projected quantized states.\n projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):\n Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive\n target vectors for contrastive loss.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n projected_states: torch.FloatTensor = None\n projected_quantized_states: torch.FloatTensor = None\n codevector_perplexity: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices\ndef _compute_mask_indices(\n shape: Tuple[int, int],\n mask_prob: float,\n mask_length: int,\n attention_mask: Optional[torch.LongTensor] = None,\n min_masks: int = 0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\n ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on\n CPU as part of the preprocessing during training.\n\n Args:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension.\n \"\"\"\n batch_size, sequence_length = shape\n\n if mask_length < 1:\n raise ValueError(\"`mask_length` has to be bigger than 0.\")\n\n if mask_length > sequence_length:\n raise ValueError(\n f\"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}\"\n f\" and `sequence_length`: {sequence_length}`\"\n )\n\n # epsilon is used for probabilistic rounding\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n\n # make sure num masked span <= sequence_length\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n\n # make sure num_masked span is also <= input_length - (mask_length - 1)\n if input_length - (mask_length - 1) < num_masked_span:\n num_masked_span = max(input_length - (mask_length - 1), 0)\n\n return num_masked_span\n\n # compute number of masked spans in batch\n input_lengths = (\n attention_mask.sum(-1).detach().tolist()\n if attention_mask is not None\n else [sequence_length for _ in range(batch_size)]\n )\n\n # SpecAugment mask to fill\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)\n spec_aug_mask_idxs = []\n\n max_num_masked_span = compute_num_masked_span(sequence_length)\n\n if max_num_masked_span == 0:\n return spec_aug_mask\n\n for input_length in input_lengths:\n # compute num of masked spans for this input\n num_masked_span = compute_num_masked_span(input_length)\n\n # get random indices to mask\n spec_aug_mask_idx = np.random.choice(\n np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False\n )\n\n # pick first sampled index that will serve as a dummy index to pad vector\n # to ensure same dimension for all batches due to probabilistic rounding\n # Picking first sample just pads those vectors twice.\n if len(spec_aug_mask_idx) == 0:\n # this case can only happen if `input_length` is strictly smaller then\n # `sequence_length` in which case the last token has to be a padding\n # token which we can use as a dummy mask id\n dummy_mask_idx = sequence_length - 1\n else:\n dummy_mask_idx = spec_aug_mask_idx[0]\n\n spec_aug_mask_idx = np.concatenate(\n [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]\n )\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n\n # expand masked indices to masked spans\n spec_aug_mask_idxs = np.broadcast_to(\n spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n\n # add offset to the starting indexes so that that indexes now create a span\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(\n batch_size, max_num_masked_span * mask_length\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n\n # ensure that we cannot have indices larger than sequence_length\n if spec_aug_mask_idxs.max() > sequence_length - 1:\n spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1\n\n # scatter indices to mask\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n\n return spec_aug_mask\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeech\nclass UniSpeechNoLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeech\nclass UniSpeechLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n\n hidden_states = hidden_states.transpose(-2, -1)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states.transpose(-2, -1)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeech\nclass UniSpeechGroupNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeech\nclass UniSpeechPositionalConvEmbedding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.hidden_size,\n config.hidden_size,\n kernel_size=config.num_conv_pos_embeddings,\n padding=config.num_conv_pos_embeddings // 2,\n groups=config.num_conv_pos_embedding_groups,\n )\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_v)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_g)\n else:\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n\n self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n\n hidden_states = self.conv(hidden_states)\n hidden_states = self.padding(hidden_states)\n hidden_states = self.activation(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeech\nclass UniSpeechSamePadLayer(nn.Module):\n def __init__(self, num_conv_pos_embeddings):\n super().__init__()\n self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0\n\n def forward(self, hidden_states):\n if self.num_pad_remove > 0:\n hidden_states = hidden_states[:, :, : -self.num_pad_remove]\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeech\nclass UniSpeechFeatureEncoder(nn.Module):\n \"\"\"Construct the features from raw audio waveform\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n if config.feat_extract_norm == \"group\":\n conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [\n UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)\n for i in range(config.num_feat_extract_layers - 1)\n ]\n elif config.feat_extract_norm == \"layer\":\n conv_layers = [\n UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)\n ]\n else:\n raise ValueError(\n f\"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']\"\n )\n self.conv_layers = nn.ModuleList(conv_layers)\n self.gradient_checkpointing = False\n self._requires_grad = True\n\n def _freeze_parameters(self):\n for param in self.parameters():\n param.requires_grad = False\n self._requires_grad = False\n\n def forward(self, input_values):\n hidden_states = input_values[:, None]\n\n # make sure hidden_states require grad for gradient_checkpointing\n if self._requires_grad and self.training:\n hidden_states.requires_grad = True\n\n for conv_layer in self.conv_layers:\n if self._requires_grad and self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(conv_layer),\n hidden_states,\n )\n else:\n hidden_states = conv_layer(hidden_states)\n\n return hidden_states\n\n\nclass UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):\n def __init__(self, config):\n super().__init__(config)\n warnings.warn(\n f\"The class `{self.__class__.__name__}` has been depreciated \"\n \"and will be removed in Transformers v5. \"\n f\"Use `{self.__class__.__bases__[0].__name__}` instead.\",\n FutureWarning,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeech\nclass UniSpeechFeatureProjection(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)\n self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)\n self.dropout = nn.Dropout(config.feat_proj_dropout)\n\n def forward(self, hidden_states):\n # non-projected hidden states are needed for quantization\n norm_hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.projection(norm_hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states, norm_hidden_states\n\n\n# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeech\nclass UniSpeechAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n\n bsz, tgt_len, _ = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if layer_head_mask is not None:\n if layer_head_mask.size() != (self.num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is\"\n f\" {layer_head_mask.size()}\"\n )\n attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if output_attentions:\n # this operation is a bit awkward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to be reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n\n # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be\n # partitioned aross GPUs when using tensor-parallelism.\n attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech\nclass UniSpeechFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate_dropout = nn.Dropout(config.activation_dropout)\n\n self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.output_dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.intermediate_dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.intermediate_dropout(hidden_states)\n\n hidden_states = self.output_dense(hidden_states)\n hidden_states = self.output_dropout(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech\nclass UniSpeechEncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = UniSpeechAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=False,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = UniSpeechFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states, attn_weights, _ = self.attention(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states + self.feed_forward(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech\nclass UniSpeechEncoderLayerStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = UniSpeechAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=False,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = UniSpeechFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states = self.layer_norm(hidden_states)\n hidden_states, attn_weights, _ = self.attention(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech\nclass UniSpeechEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens output 0\n hidden_states[~attention_mask] = 0.0\n\n # extend attention_mask\n attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.expand(\n attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]\n )\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n\n for layer in self.layers:\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = layer(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech\nclass UniSpeechEncoderStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens are not attended to\n hidden_states[~attention_mask] = 0\n\n # extend attention_mask\n attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.expand(\n attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]\n )\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n\n for layer in self.layers:\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = layer(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass UniSpeechGumbelVectorQuantizer(nn.Module):\n \"\"\"\n Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH\n GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.num_groups = config.num_codevector_groups\n self.num_vars = config.num_codevectors_per_group\n\n if config.codevector_dim % self.num_groups != 0:\n raise ValueError(\n f\"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`\"\n f\" {self.num_groups} for concatenation\"\n )\n\n # storage for codebook variables (codewords)\n self.codevectors = nn.Parameter(\n torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)\n )\n self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)\n\n # can be decayed for training\n self.temperature = 2\n\n @staticmethod\n def _compute_perplexity(probs):\n marginal_probs = probs.mean(dim=0)\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n\n def forward(self, hidden_states):\n batch_size, sequence_length, hidden_size = hidden_states.shape\n\n # project to codevector dim\n hidden_states = self.weight_proj(hidden_states)\n hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)\n\n if self.training:\n # sample code vector probs via gumbel in differentiateable way\n codevector_probs = nn.functional.gumbel_softmax(\n hidden_states.float(), tau=self.temperature, hard=True\n ).type_as(hidden_states)\n\n # compute perplexity\n codevector_soft_dist = torch.softmax(\n hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1\n )\n perplexity = self._compute_perplexity(codevector_soft_dist)\n else:\n # take argmax in non-differentiable way\n # comptute hard codevector distribution (one hot)\n codevector_idx = hidden_states.argmax(dim=-1)\n codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(\n -1, codevector_idx.view(-1, 1), 1.0\n )\n codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)\n\n perplexity = self._compute_perplexity(codevector_probs)\n\n codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)\n # use probs to retrieve codevectors\n codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors\n codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)\n codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)\n\n return codevectors, perplexity\n\n\nclass UniSpeechPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = UniSpeechConfig\n base_model_prefix = \"unispeech\"\n main_input_name = \"input_values\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n # gumbel softmax requires special init\n if isinstance(module, UniSpeechGumbelVectorQuantizer):\n module.weight_proj.weight.data.normal_(mean=0.0, std=1)\n module.weight_proj.bias.data.zero_()\n nn.init.uniform_(module.codevectors)\n elif isinstance(module, UniSpeechPositionalConvEmbedding):\n nn.init.normal_(\n module.conv.weight,\n mean=0,\n std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),\n )\n nn.init.constant_(module.conv.bias, 0)\n elif isinstance(module, UniSpeechFeatureProjection):\n k = math.sqrt(1 / module.projection.in_features)\n nn.init.uniform_(module.projection.weight, a=-k, b=k)\n nn.init.uniform_(module.projection.bias, a=-k, b=k)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, nn.Conv1d):\n nn.init.kaiming_normal_(module.weight)\n\n if module.bias is not None:\n k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))\n nn.init.uniform_(module.bias, a=-k, b=k)\n\n def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return torch_int_div(input_length - kernel_size, stride) + 1\n\n for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):\n input_lengths = _conv_out_length(input_lengths, kernel_size, stride)\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):\n # Effectively attention_mask.sum(-1), but not inplace to be able to run\n # on inference mode.\n non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]\n output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)\n batch_size = attention_mask.shape[0]\n\n attention_mask = torch.zeros(\n (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n # these two operations makes sure that all values before the output lengths idxs are attended to\n attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()\n return attention_mask\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):\n module.gradient_checkpointing = value\n\n\nUNISPEECH_START_DOCSTRING = r\"\"\"\n UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled\n Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,\n Michael Zeng, Xuedong Huang.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\nUNISPEECH_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for\n padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for\n details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should\n **not** be passed to avoid degraded performance when doing batched inference. For such models\n `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these\n models also yield slightly different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.\",\n UNISPEECH_START_DOCSTRING,\n)\nclass UniSpeechModel(UniSpeechPreTrainedModel):\n def __init__(self, config: UniSpeechConfig):\n super().__init__(config)\n self.config = config\n self.feature_extractor = UniSpeechFeatureEncoder(config)\n self.feature_projection = UniSpeechFeatureProjection(config)\n\n if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:\n self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())\n\n if config.do_stable_layer_norm:\n self.encoder = UniSpeechEncoderStableLayerNorm(config)\n else:\n self.encoder = UniSpeechEncoder(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states\n def _mask_hidden_states(\n self,\n hidden_states: torch.FloatTensor,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Masks extracted features along time axis and/or along feature axis according to\n [SpecAugment](https://arxiv.org/abs/1904.08779).\n \"\"\"\n\n # `config.apply_spec_augment` can set masking to False\n if not getattr(self.config, \"apply_spec_augment\", True):\n return hidden_states\n\n # generate indices & apply SpecAugment along time axis\n batch_size, sequence_length, hidden_size = hidden_states.size()\n\n if mask_time_indices is not None:\n # apply SpecAugment along time axis with given mask_time_indices\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n elif self.config.mask_time_prob > 0 and self.training:\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.config.mask_time_prob,\n mask_length=self.config.mask_time_length,\n attention_mask=attention_mask,\n min_masks=self.config.mask_time_min_masks,\n )\n mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n\n if self.config.mask_feature_prob > 0 and self.training:\n # generate indices & apply SpecAugment along feature axis\n mask_feature_indices = _compute_mask_indices(\n (batch_size, hidden_size),\n mask_prob=self.config.mask_feature_prob,\n mask_length=self.config.mask_feature_length,\n min_masks=self.config.mask_feature_min_masks,\n )\n mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)\n mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)\n hidden_states[mask_feature_indices] = 0\n\n return hidden_states\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Wav2Vec2BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return Wav2Vec2BaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"UniSpeech Model with a vector-quantization module and ctc loss for pre-training.\"\"\", UNISPEECH_START_DOCSTRING\n)\nclass UniSpeechForPreTraining(UniSpeechPreTrainedModel):\n def __init__(self, config: UniSpeechConfig):\n super().__init__(config)\n self.unispeech = UniSpeechModel(config)\n self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)\n\n self.quantizer = UniSpeechGumbelVectorQuantizer(config)\n self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)\n self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)\n\n self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)\n self.dropout = nn.Dropout(config.final_dropout)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def set_gumbel_temperature(self, temperature: int):\n \"\"\"\n Set the Gumbel softmax temperature to a given value. Only necessary for training\n \"\"\"\n self.quantizer.temperature = temperature\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.unispeech.feature_extractor._freeze_parameters()\n\n @staticmethod\n def compute_contrastive_logits(\n target_features: torch.FloatTensor,\n negative_features: torch.FloatTensor,\n predicted_features: torch.FloatTensor,\n temperature: int = 1,\n ):\n \"\"\"\n Compute logits for contrastive loss based using cosine similarity as the distance measure between\n `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.\n \"\"\"\n target_features = torch.cat([target_features, negative_features], dim=0)\n\n logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)\n logits = logits.type_as(target_features)\n\n # apply temperature\n logits = logits / temperature\n return logits\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, UniSpeechForPreTrainingOutput]:\n r\"\"\"\n mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict\n masked extracted features in *config.proj_codevector_dim* space.\n sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):\n Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.\n Required input for pre-training.\n\n Returns:\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import Wav2Vec2FeatureExtractor, UniSpeechForPreTraining\n >>> from transformers.models.unispeech.modeling_unispeech import _compute_mask_indices\n\n >>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n ... \"hf-internal-testing/tiny-random-unispeech-sat\"\n ... )\n >>> model = UniSpeechForPreTraining.from_pretrained(\"microsoft/unispeech-large-1500h-cv\")\n >>> # TODO: Add full pretraining example\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.unispeech(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n transformer_features = outputs[0]\n\n # quantize all (unmasked) extracted features and project to final vq dim\n extract_features = self.dropout_features(outputs[1])\n quantized_features, codevector_perplexity = self.quantizer(extract_features)\n\n # project quantized features twice\n quantized_features = self.project_q(quantized_features)\n quantized_features = self.project_hid(quantized_features)\n\n prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(\n self.config.replace_prob\n )\n prob_replace_matrix = prob_replace_matrix.transpose(0, 1)\n sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)\n sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)\n sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)\n logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (\n quantized_features.masked_fill(~sampled_replace_matrix, 0.0)\n )\n\n # project to ctc units\n logits = self.dropout(logits)\n logits = self.ctc_proj(logits)\n\n # TODO(PVP) - add negative sampling & loss computation\n loss = None\n if not return_dict:\n if loss is not None:\n return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n\n return UniSpeechForPreTrainingOutput(\n loss=loss,\n projected_states=transformer_features,\n projected_quantized_states=quantized_features,\n codevector_perplexity=codevector_perplexity,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n UNISPEECH_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH\nclass UniSpeechForCTC(UniSpeechPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.unispeech = UniSpeechModel(config)\n self.dropout = nn.Dropout(config.final_dropout)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n output_hidden_size = (\n config.output_hidden_size if hasattr(config, \"add_adapter\") and config.add_adapter else config.hidden_size\n )\n self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.unispeech.feature_extractor._freeze_parameters()\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_CTC_EXPECTED_OUTPUT,\n expected_loss=_CTC_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, CausalLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.unispeech(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n \"\"\",\n UNISPEECH_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH\nclass UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)\"\n )\n self.unispeech = UniSpeechModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)\n self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.unispeech.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.unispeech.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_SEQ_CLASS_CHECKPOINT,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,\n expected_loss=_SEQ_CLASS_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.unispeech(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n if attention_mask is None:\n pooled_output = hidden_states.mean(dim=1)\n else:\n padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)\n hidden_states[~padding_mask] = 0.0\n pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"numpy.ones",
"torch.stack",
"torch.nn.functional.softmax",
"torch.log",
"torch.nn.ModuleList",
"torch.cat",
"torch.nn.Dropout",
"torch.bmm",
"torch.nn.init.kaiming_normal_",
"torch.nn.GroupNorm",
"torch.nn.functional.dropout",
"torch.backends.cudnn.flags",
"torch.nn.LayerNorm",
"torch.arange",
"numpy.random.rand",
"torch.bernoulli",
"numpy.random.uniform",
"torch.ones_like",
"torch.ones",
"torch.nn.functional.ctc_loss",
"numpy.zeros",
"torch.nn.init.uniform_",
"torch.tensor",
"numpy.arange",
"torch.nn.Conv1d",
"numpy.broadcast_to",
"torch.nn.utils.weight_norm",
"numpy.array",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.CrossEntropyLoss",
"torch.zeros",
"numpy.put_along_axis"
]
] |
sashuIya/ssd.pytorch | [
"fe7d8722414fef4cce32f67422c896ef0c45d6bc"
] | [
"layers/box_utils.py"
] | [
"import torch\n\n\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2], 1) # w, h\n\n\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [A,4].\n box_b: (tensor) bounding boxes, Shape: [B,4].\n Return:\n (tensor) intersection area, Shape: [A,B].\n \"\"\"\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2]-box_b[:, 0]) *\n (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n threshold: (float) The overlap threshold used when mathing boxes.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n variances: (tensor) Variances corresponding to each prior coord,\n Shape: [num_priors, 4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.\n idx: (int) current batch index\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n # jaccard index\n overlaps = jaccard(\n truths,\n point_form(priors)\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1)\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n best_prior_idx.squeeze_(1)\n best_prior_overlap.squeeze_(1)\n best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)):\n best_truth_idx[best_prior_idx[j]] = j\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n conf[best_truth_overlap < threshold] = 0 # label as background\n loc = encode(matches, priors, variances)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior\n\n\ndef encode(matched, priors, variances):\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n we have matched (based on jaccard overlap) with the prior boxes.\n Args:\n matched: (tensor) Coords of ground truth for each prior in point-form\n Shape: [num_priors, 4].\n priors: (tensor) Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n encoded boxes (tensor), Shape: [num_priors, 4]\n \"\"\"\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.5, top_k=200):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n \"\"\"\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count\n"
] | [
[
"torch.exp",
"torch.mul",
"torch.log",
"torch.index_select",
"torch.cat",
"torch.clamp"
]
] |
meet-seth/Coursera-Deep-Learning | [
"195fad43e99de5efe6491817ad2b79e12665cc2a"
] | [
"Natural Language Processing with Attention Models/Week 4 - Chatbot/w4_unittest.py"
] | [
"import numpy as np\nimport trax\n#from trax import layers as tl\n#from trax.fastmath import numpy as fastnp\n#from trax.supervised import training\n\n# UNIT TEST for UNQ_C1\ndef test_get_conversation(target):\n\n data = {'file1.json': {'log':[{'text': 'hi'},\n {'text': 'hello'},\n {'text': 'nice'}]},\n 'file2.json':{'log':[{'text': 'a b'}, \n {'text': ''}, \n {'text': 'good '}, \n {'text': 'no?'}]}}\n \n res1 = target('file1.json', data)\n res2 = target('file2.json', data)\n \n expected1 = ' Person 1: hi Person 2: hello Person 1: nice'\n expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'\n\n success = 0\n fails = 0\n \n try:\n assert res1 == expected1\n success += 1\n except ValueError:\n print('Error in test 1 \\nResult : ', res1, 'x \\nExpected: ', expected1)\n fails += 1\n try:\n assert res2 == expected2\n success += 1\n except:\n print('Error in test 2 \\nResult : ', res2, ' \\nExpected: ', expected2)\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n\n\n# UNIT TEST for UNQ_C2\ndef test_reversible_layer_forward(target):\n f1 = lambda x: x + 2\n g1 = lambda x: x * 3\n \n f2 = lambda x: x + 1\n g2 = lambda x: x * 2\n \n input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])\n \n input_vector2 = np.array([1] * 128)\n expected2 = np.array([3] * 64 + [7] * 64)\n \n success = 0\n fails = 0\n try:\n res = target(input_vector1, f1, g1)\n assert isinstance(res, np.ndarray)\n success += 1\n except:\n print('Wrong type! Output is not of type np.ndarray')\n fails += 1\n try:\n res = target(input_vector1, f1, g1)\n assert np.allclose(res, expected1)\n success += 1\n except ValueError:\n print('Error in test 1 \\nResult : ', res, 'x \\nExpected: ', expected1)\n fails += 1\n try:\n res = target(input_vector2, f2, g2)\n assert np.allclose(res, expected2)\n success += 1\n except:\n print('Error in test 2 \\nResult : ', res, ' \\nExpected: ', expected2)\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n\n\n# UNIT TEST for UNQ_C3\ndef test_reversible_layer_reverse(target):\n \n f1 = lambda x: x + 2\n g1 = lambda x: x * 3\n \n f2 = lambda x: x + 1\n g2 = lambda x: x * 2\n \n input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])\n \n input_vector2 = np.array([1] * 128)\n expected2 = np.array([1] * 64 + [-1] * 64)\n \n success = 0\n fails = 0\n try:\n res = target(input_vector1, f1, g1)\n assert isinstance(res, np.ndarray)\n success += 1\n except:\n print('Wrong type! Output is not of type np.ndarray')\n fails += 1\n try:\n res = target(input_vector1, f1, g1)\n assert np.allclose(res, expected1)\n success += 1\n except ValueError:\n print('Error in test 1 \\nResult : ', res, 'x \\nExpected: ', expected1)\n fails += 1\n try:\n res = target(input_vector2, f2, g2)\n assert np.allclose(res, expected2)\n success += 1\n except:\n print('Error in test 2 \\nResult : ', res, ' \\nExpected: ', expected2)\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n \n\n# UNIT TEST for UNQ_C4\ndef test_ReformerLM(target):\n test_cases = [\n {\n \"name\":\"layer_len_check\",\n \"expected\":11,\n \"error\":\"We found {} layers in your model. It should be 11.\\nCheck the LSTM stack before the dense layer\"\n },\n {\n \"name\":\"simple_test_check\",\n \"expected\":\"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]\",\n \"error\":\"The ReformerLM is not defined properly.\"\n }\n ]\n temp_model = target('train')\n \n success = 0\n fails = 0\n \n for test_case in test_cases:\n try:\n if test_case['name'] == \"simple_test_check\":\n assert test_case[\"expected\"] == str(temp_model).replace(' ', '').replace('\\n','')\n success += 1\n if test_case['name'] == \"layer_len_check\":\n if test_case[\"expected\"] == len(temp_model.sublayers):\n success += 1\n else:\n print(test_case[\"error\"].format(len(temp_model.sublayers))) \n fails += 1\n except:\n print(test_case['error'])\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n\n\n# UNIT TEST for UNQ_C5\ndef test_tasks(train_task, eval_task):\n target = train_task\n success = 0\n fails = 0\n \n # Test the labeled data parameter for train_task\n try:\n strlabel = str(target._labeled_data)\n assert (\"generator\" in strlabel) and (\"add_loss_weights\" in strlabel)\n success += 1\n except:\n fails += 1\n print(\"Wrong labeled data parameter in train_task\")\n \n # Test the cross entropy loss data parameter\n try:\n strlabel = str(target._loss_layer)\n assert(strlabel == \"CrossEntropyLoss_in3\")\n success += 1\n except:\n fails += 1\n print(\"Wrong loss functions. CrossEntropyLoss_in3 was expected\")\n \n # Test the optimizer parameter\n try:\n assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))\n success += 1\n except:\n fails += 1\n print(\"Wrong optimizer\")\n \n # Test the schedule parameter\n try:\n assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))\n success += 1\n except:\n fails += 1\n print(\"Wrong learning rate schedule type\")\n \n # Test the _n_steps_per_checkpoint parameter\n try:\n assert(target._n_steps_per_checkpoint==10)\n success += 1\n except:\n fails += 1\n print(\"Wrong checkpoint step frequency\")\n \n target = eval_task\n # Test the labeled data parameter for eval_task\n try:\n strlabel = str(target._labeled_data)\n assert (\"generator\" in strlabel) and (\"add_loss_weights\" in strlabel)\n success += 1\n except:\n fails += 1\n print(\"Wrong labeled data parameter in eval_task\")\n \n # Test the metrics in eval_task \n try:\n strlabel = str(target._metrics).replace(' ', '')\n assert(strlabel == \"[CrossEntropyLoss_in3,Accuracy_in3]\")\n success += 1\n except:\n fails += 1\n print(f\"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]\")\n \n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n \n\n"
] | [
[
"numpy.array",
"numpy.allclose"
]
] |
PeterDomanski/agents | [
"63c1c76f16f2068a637b26282c34a8825583e73e"
] | [
"tf_agents/bandits/agents/neural_linucb_agent_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.agents.neural_linucb_agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tf_agents.bandits.agents import neural_linucb_agent\nfrom tf_agents.bandits.agents import utils as bandit_utils\nfrom tf_agents.bandits.drivers import driver_utils\nfrom tf_agents.bandits.policies import policy_utilities\nfrom tf_agents.networks import network\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TF internal\n\n\ntfd = tfp.distributions\n\n\nclass DummyNet(network.Network):\n\n def __init__(self, observation_spec, encoding_dim=10):\n super(DummyNet, self).__init__(\n observation_spec, state_spec=(), name='DummyNet')\n context_dim = observation_spec.shape[0]\n self._layers.append(\n tf.keras.layers.Dense(\n encoding_dim,\n kernel_initializer=tf.compat.v1.initializers.constant(\n np.ones([context_dim, encoding_dim])),\n bias_initializer=tf.compat.v1.initializers.constant(\n np.zeros([encoding_dim]))))\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n inputs = tf.cast(inputs, tf.float32)\n for layer in self.layers:\n inputs = layer(inputs)\n return inputs, network_state\n\n\ndef test_cases():\n return parameterized.named_parameters(\n {\n 'testcase_name': '_batch1_contextdim10',\n 'batch_size': 1,\n 'context_dim': 10,\n }, {\n 'testcase_name': '_batch4_contextdim5',\n 'batch_size': 4,\n 'context_dim': 5,\n })\n\n\ndef _get_initial_and_final_steps(batch_size, context_dim):\n observation = np.array(range(batch_size * context_dim)).reshape(\n [batch_size, context_dim])\n reward = np.random.uniform(0.0, 1.0, [batch_size])\n initial_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],\n name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n tf.constant(observation, dtype=tf.float32,\n shape=[batch_size, context_dim], name='observation'))\n final_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],\n name='step_type'),\n tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n tf.constant(observation + 100.0, dtype=tf.float32,\n shape=[batch_size, context_dim], name='observation'))\n return initial_step, final_step\n\n\ndef _get_initial_and_final_steps_with_action_mask(batch_size,\n context_dim,\n num_actions=None):\n observation = np.array(range(batch_size * context_dim)).reshape(\n [batch_size, context_dim])\n observation = tf.constant(observation, dtype=tf.float32)\n mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)\n reward = np.random.uniform(0.0, 1.0, [batch_size])\n initial_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.FIRST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n (observation, mask))\n final_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.LAST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n (observation + 100.0, mask))\n return initial_step, final_step\n\n\ndef _get_action_step(action):\n return policy_step.PolicyStep(\n action=tf.convert_to_tensor(action),\n info=policy_utilities.PolicyInfo())\n\n\ndef _get_experience(initial_step, action_step, final_step):\n single_experience = driver_utils.trajectory_for_bandit(\n initial_step, action_step, final_step)\n # Adds a 'time' dimension.\n return tf.nest.map_structure(\n lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),\n single_experience)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(NeuralLinUCBAgentTest, self).setUp()\n tf.compat.v1.enable_resource_variables()\n\n @test_cases()\n def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):\n num_actions = 5\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n\n encoder = DummyNet(observation_spec)\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=0,\n encoding_dim=10,\n optimizer=None)\n self.evaluate(agent.initialize())\n\n @test_cases()\n def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):\n num_actions = 5\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n\n encoder = DummyNet(observation_spec)\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=10,\n encoding_dim=10,\n optimizer=None)\n self.evaluate(agent.initialize())\n\n @test_cases()\n def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):\n \"\"\"Check NeuralLinUCBAgent updates when behaving like LinUCB.\"\"\"\n\n # Construct a `Trajectory` for the given action, observation, reward.\n num_actions = 5\n initial_step, final_step = _get_initial_and_final_steps(\n batch_size, context_dim)\n action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)\n action_step = _get_action_step(action)\n experience = _get_experience(initial_step, action_step, final_step)\n\n # Construct an agent and perform the update.\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n encoder = DummyNet(observation_spec)\n encoding_dim = 10\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=0,\n encoding_dim=encoding_dim,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))\n\n loss_info = agent.train(experience)\n self.evaluate(agent.initialize())\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(loss_info)\n final_a = self.evaluate(agent.cov_matrix)\n final_b = self.evaluate(agent.data_vector)\n\n # Compute the expected updated estimates.\n observations_list = tf.dynamic_partition(\n data=tf.reshape(tf.cast(experience.observation, tf.float64),\n [batch_size, context_dim]),\n partitions=tf.convert_to_tensor(action),\n num_partitions=num_actions)\n rewards_list = tf.dynamic_partition(\n data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),\n partitions=tf.convert_to_tensor(action),\n num_partitions=num_actions)\n expected_a_updated_list = []\n expected_b_updated_list = []\n for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(\n observations_list, rewards_list)):\n\n encoded_observations_for_arm, _ = encoder(observations_for_arm)\n encoded_observations_for_arm = tf.cast(\n encoded_observations_for_arm, dtype=tf.float64)\n\n num_samples_for_arm_current = tf.cast(\n tf.shape(rewards_for_arm)[0], tf.float64)\n num_samples_for_arm_total = num_samples_for_arm_current\n\n # pylint: disable=cell-var-from-loop\n def true_fn():\n a_new = tf.matmul(\n encoded_observations_for_arm,\n encoded_observations_for_arm,\n transpose_a=True)\n b_new = bandit_utils.sum_reward_weighted_observations(\n rewards_for_arm, encoded_observations_for_arm)\n return a_new, b_new\n def false_fn():\n return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),\n tf.zeros([encoding_dim], dtype=tf.float64))\n a_new, b_new = tf.cond(\n tf.squeeze(num_samples_for_arm_total) > 0,\n true_fn,\n false_fn)\n\n expected_a_updated_list.append(self.evaluate(a_new))\n expected_b_updated_list.append(self.evaluate(b_new))\n\n # Check that the actual updated estimates match the expectations.\n self.assertAllClose(expected_a_updated_list, final_a)\n self.assertAllClose(expected_b_updated_list, final_b)\n\n @test_cases()\n def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):\n \"\"\"Check NeuralLinUCBAgent updates when behaving like eps-greedy.\"\"\"\n\n # Construct a `Trajectory` for the given action, observation, reward.\n num_actions = 5\n initial_step, final_step = _get_initial_and_final_steps(\n batch_size, context_dim)\n action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)\n action_step = _get_action_step(action)\n experience = _get_experience(initial_step, action_step, final_step)\n\n # Construct an agent and perform the update.\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n encoder = DummyNet(observation_spec)\n encoding_dim = 10\n variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(\n num_actions, encoding_dim)\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=10,\n encoding_dim=encoding_dim,\n variable_collection=variable_collection,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))\n\n loss_info, _ = agent.train(experience)\n self.evaluate(agent.initialize())\n self.evaluate(tf.compat.v1.global_variables_initializer())\n loss_value = self.evaluate(loss_info)\n self.assertGreater(loss_value, 0.0)\n\n @test_cases()\n def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(\n self, batch_size=1, context_dim=10):\n \"\"\"Check updates when behaving like eps-greedy and using masked actions.\"\"\"\n\n # Construct a `Trajectory` for the given action, observation, reward.\n num_actions = 5\n initial_step, final_step = _get_initial_and_final_steps_with_action_mask(\n batch_size, context_dim, num_actions)\n action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)\n action_step = _get_action_step(action)\n experience = _get_experience(initial_step, action_step, final_step)\n\n # Construct an agent and perform the update.\n observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),\n tensor_spec.TensorSpec([num_actions], tf.int32))\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n encoder = DummyNet(observation_spec[0])\n encoding_dim = 10\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=10,\n encoding_dim=encoding_dim,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),\n observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))\n\n loss_info, _ = agent.train(experience)\n self.evaluate(agent.initialize())\n self.evaluate(tf.compat.v1.global_variables_initializer())\n loss_value = self.evaluate(loss_info)\n self.assertGreater(loss_value, 0.0)\n\n def testInitializeRestoreVariableCollection(self):\n if not tf.executing_eagerly():\n self.skipTest('Test only works in eager mode.')\n num_actions = 5\n encoding_dim = 7\n variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(\n num_actions=num_actions, encoding_dim=encoding_dim)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(variable_collection.num_samples_list)\n checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)\n checkpoint_dir = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')\n checkpoint.save(file_prefix=checkpoint_prefix)\n\n variable_collection.actions_from_reward_layer.assign(False)\n\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n checkpoint_load_status = checkpoint.restore(latest_checkpoint)\n self.evaluate(checkpoint_load_status.initialize_or_restore())\n self.assertEqual(\n self.evaluate(variable_collection.actions_from_reward_layer), True)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.ones",
"tensorflow.eye",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.matmul",
"tensorflow.compat.v1.enable_resource_variables",
"tensorflow.squeeze",
"tensorflow.convert_to_tensor",
"tensorflow.executing_eagerly",
"tensorflow.constant",
"tensorflow.test.main",
"numpy.random.uniform",
"tensorflow.shape",
"numpy.zeros",
"tensorflow.cast",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.zeros",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.Checkpoint",
"numpy.random.randint"
]
] |
marcelkotze007/mk007---ML-Python-library | [
"307e51762fc821588206440daa2c18a6128f4aec"
] | [
"util.py"
] | [
"# https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-python\n# https://www.udemy.com/data-science-supervised-machine-learning-in-python\nfrom __future__ import print_function, division\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\n\nimport numpy as np\nimport pandas as pd\n\ndef get_data(limit=None):\n print(\"Reading in and transforming data...\")\n df = pd.read_csv('train.csv')\n data = df.values\n np.random.shuffle(data)\n X = data[:, 1:]\n print(X[4000])\n X = data[:, 1:] / 255.0 # data is from 0..255\n print(X[4000])\n Y = data[:, 0]\n if limit is not None:\n X, Y = X[:limit], Y[:limit]\n return X, Y\n\ndef get_xor():\n X = np.zeros((200, 2))\n X[:50] = np.random.random((50, 2)) / 2 + 0.5 # (0.5-1, 0.5-1)\n X[50:100] = np.random.random((50, 2)) / 2 # (0-0.5, 0-0.5)\n X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]]) # (0-0.5, 0.5-1)\n X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]]) # (0.5-1, 0-0.5)\n Y = np.array([0]*100 + [1]*100)\n return X, Y\n\ndef get_donut():\n N = 200\n R_inner = 5\n R_outer = 10\n\n # distance from origin is radius + random normal\n # angle theta is uniformly distributed between (0, 2pi)\n R1 = np.random.randn(N//2) + R_inner\n theta = 2*np.pi*np.random.random(N//2)\n X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T\n\n R2 = np.random.randn(N//2) + R_outer\n theta = 2*np.pi*np.random.random(N//2)\n X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T\n\n X = np.concatenate([ X_inner, X_outer ])\n Y = np.array([0]*(N//2) + [1]*(N//2))\n return X, Y\n\nget_data()"
] | [
[
"numpy.random.shuffle",
"numpy.zeros",
"pandas.read_csv",
"numpy.random.randn",
"numpy.cos",
"numpy.random.random",
"numpy.array",
"numpy.sin",
"numpy.concatenate"
]
] |
bem4solvation/pbj | [
"4fa9c111596359192539787ae241a79d4316b15b"
] | [
"pbj/electrostatics/pb_formulation/formulations/direct_external.py"
] | [
"import numpy as np\nimport bempp.api\nimport os\nfrom bempp.api.operators.boundary import sparse, laplace, modified_helmholtz\nfrom .common import calculate_potential_one_surface\n\ninvert_potential = True\n\n\ndef verify_parameters(self):\n return True\n\n\ndef lhs(self):\n dirichl_space = self.dirichl_space\n neumann_space = self.neumann_space\n ep_in = self.ep_in\n ep_out = self.ep_ex\n kappa = self.kappa\n operator_assembler = self.operator_assembler\n\n identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n slp_in = laplace.single_layer(\n neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler\n )\n dlp_in = laplace.double_layer(\n dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler\n )\n slp_out = modified_helmholtz.single_layer(\n neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler\n )\n dlp_out = modified_helmholtz.double_layer(\n dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler\n )\n\n A = bempp.api.BlockedOperator(2, 2)\n\n A[0, 0] = 0.5 * identity - dlp_out\n A[0, 1] = slp_out\n A[1, 0] = 0.5 * identity + dlp_in\n A[1, 1] = -(ep_out / ep_in) * slp_in\n\n self.matrices[\"A\"] = A\n\n\ndef rhs(self):\n dirichl_space = self.dirichl_space\n neumann_space = self.neumann_space\n q = self.q\n x_q = self.x_q\n ep_in = self.ep_in\n rhs_constructor = self.rhs_constructor\n\n if rhs_constructor == \"fmm\":\n\n @bempp.api.callable(vectorized=True)\n def fmm_green_func(x, n, domain_index, result):\n import exafmm.laplace as _laplace\n\n sources = _laplace.init_sources(x_q, q)\n targets = _laplace.init_targets(x.T)\n fmm = _laplace.LaplaceFmm(p=10, ncrit=500, filename=\".rhs.tmp\")\n tree = _laplace.setup(sources, targets, fmm)\n values = _laplace.evaluate(tree, fmm)\n os.remove(\".rhs.tmp\")\n result[:] = values[:, 0] / ep_in\n\n @bempp.api.real_callable\n def zero(x, n, domain_index, result):\n result[0] = 0\n\n rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)\n rhs_2 = bempp.api.GridFunction(dirichl_space, fun=fmm_green_func)\n\n else:\n\n @bempp.api.real_callable\n def charges_fun(x, n, domain_index, result):\n nrm = np.sqrt(\n (x[0] - x_q[:, 0]) ** 2\n + (x[1] - x_q[:, 1]) ** 2\n + (x[2] - x_q[:, 2]) ** 2\n )\n aux = np.sum(q / nrm)\n result[0] = aux / (4 * np.pi * ep_in)\n\n @bempp.api.real_callable\n def zero(x, n, domain_index, result):\n result[0] = 0\n\n rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)\n rhs_2 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)\n\n self.rhs[\"rhs_1\"], self.rhs[\"rhs_2\"] = rhs_1, rhs_2\n\ndef calculate_potential(self, rerun_all):\n calculate_potential_one_surface(self, rerun_all)\n\n"
] | [
[
"numpy.sqrt",
"numpy.sum"
]
] |
luxinglong/ViZDoom-SL | [
"fbc54c401b1ca320e9e804f2c97fdedc5d0c534d"
] | [
"doom/test.py"
] | [
"import sys\r\nimport argparse\r\nimport numpy as np\r\n\r\nfrom actions import ActionBuilder\r\nfrom game import Game\r\n\r\n# use_continuous speed action_combinations crouch freelook\r\n\r\nFALSY_STRINGS = {'off', 'false', '0'}\r\nTRUTHY_STRINGS = {'on', 'true', '1'}\r\n\r\ndef bool_flag(string):\r\n \"\"\"\r\n Parse boolean arguments from the command line.\r\n \"\"\"\r\n if string.lower() in FALSY_STRINGS:\r\n return False\r\n elif string.lower() in TRUTHY_STRINGS:\r\n return True\r\n else:\r\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag. \"\r\n \"use 0 or 1\")\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='LUBAN runner')\r\n parser.add_argument(\"--use_continuous\", type=bool_flag, default=False,\r\n help=\"weather use continuous actions\")\r\n # Available actions\r\n # combination of actions the agent is allowed to do.\r\n # this is for non-continuous mode only, and is ignored in continuous mode\r\n parser.add_argument(\"--action_combinations\", type=str,\r\n default='move_fb+turn_lr+move_lr+attack',\r\n help=\"Allowed combinations of actions\")\r\n # freelook: allow the agent to look up and down\r\n parser.add_argument(\"--freelook\", type=bool_flag, default=False,\r\n help=\"Enable freelook (look up / look down)\")\r\n parser.add_argument(\"--human_player\", type=bool_flag, default=False,\r\n help=\"DoomGame mode\")\r\n\r\n # speed and crouch buttons: in non-continuous mode, the network can not\r\n # have control on these buttons, and they must be set to always 'on' or\r\n # 'off'. In continuous mode, the network can manually control crouch and\r\n # speed.\r\n parser.add_argument(\"--speed\", type=str, default='off',\r\n help=\"Crouch: on / off / manual\")\r\n parser.add_argument(\"--crouch\", type=str, default='off',\r\n help=\"Crouch: on / off / manual\")\r\n\r\n # for process_buffers\r\n parser.add_argument(\"--height\", type=int, default=60,\r\n help=\"Image height\")\r\n parser.add_argument(\"--width\", type=int, default=108,\r\n help=\"Image width\")\r\n parser.add_argument(\"--gray\", type=bool_flag, default=False,\r\n help=\"Use grayscale\")\r\n parser.add_argument(\"--use_screen_buffer\", type=bool_flag, default=True,\r\n help=\"Use the screen buffer\")\r\n parser.add_argument(\"--use_depth_buffer\", type=bool_flag, default=False,\r\n help=\"Use the depth buffer\")\r\n parser.add_argument(\"--labels_mapping\", type=str, default='',\r\n help=\"Map labels to different feature maps\")\r\n parser.add_argument(\"--dump_freq\", type=int, default=0,\r\n help=\"Dump every X iterations (0 to disable)\")\r\n # for observe_state\r\n parser.add_argument(\"--hist_size\", type=int, default=4,\r\n help=\"History size\")\r\n\r\n params, unparsed = parser.parse_known_args(sys.argv)\r\n print(sys.argv)\r\n params.game_variables = [('health', 101), ('sel_ammo', 301)]\r\n print(params)\r\n\r\n action_builder = ActionBuilder(params)\r\n print(action_builder.n_actions)\r\n print(action_builder.available_actions)\r\n\r\n game = Game(\r\n scenario='full_deathmatch',\r\n action_builder=action_builder,\r\n score_variable='USER2',\r\n freedoom=True,\r\n screen_resolution='RES_800X450',\r\n use_screen_buffer=True,\r\n use_depth_buffer=True,\r\n labels_mapping=\"\",\r\n game_features=\"target,enemy\",\r\n mode=('SPECTATOR' if params.human_player else 'PLAYER'),\r\n render_hud=True,\r\n render_crosshair=True,\r\n render_weapon=True,\r\n freelook=params.freelook,\r\n visible=0,\r\n n_bots=10,\r\n use_scripted_marines=True\r\n )\r\n\r\n game.start(map_id = 2)\r\n\r\n game.init_bots_health(100)\r\n\r\n episodes = 100000\r\n\r\n last_states = []\r\n\r\n for _ in range(episodes):\r\n if game.is_player_dead():\r\n game.respawn_player()\r\n game.observe_state(params, last_states)\r\n action = np.random.randint(0, 29)\r\n game.make_action(action, frame_skip=1, sleep=None)\r\n game.close()\r\n\t\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"numpy.random.randint"
]
] |
Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | [
"e5d5fdff45c523a4f17635897b9de4b2e50d273d"
] | [
"src/Calibration.py"
] | [
"import os\nimport cv2\nimport numpy as np\n\n\nclass Calibration:\n def __init__(\n self,\n source_images_directory,\n destination_image_sub_directory,\n chessboard_shape,\n logger\n ):\n self.source_images_directory = source_images_directory\n self.destination_image_sub_directory= destination_image_sub_directory\n self.cornered_output_images = str(self.destination_image_sub_directory+'/Cornered')\n self.undistorted_output_images = str(self.destination_image_sub_directory+'/Undistorted')\n self.chessboard_x, self.chessboard_y= chessboard_shape\n self.logger = logger\n self.name_list_of_boards = os.listdir(self.source_images_directory)\n self.number_of_boards = len(self.name_list_of_boards)\n self.image_size = None\n self.object_points = []\n self.image_points = []\n self.camera_matrix, self.distortion_coefficient = \\\n self.__calculate_calibration_parameters()\n\n \n\n def get_calibration_parameters(self):\n return self.camera_matrix, self.distortion_coefficient\n\n def __calculate_calibration_parameters(self):\n object_points = np.zeros((self.chessboard_x*self.chessboard_y, 3), np.float32)\n object_points[:, :2] = np.mgrid[0:self.chessboard_x, 0:self.chessboard_y].T.reshape(-1, 2)\n \n for img_name in self.name_list_of_boards:\n # Read the image\n image_path = '{}/{}'.format(str(self.source_images_directory), str(img_name))\n image_obj = cv2.imread(image_path)\n # Gray it\n gray_image = cv2.cvtColor(image_obj, cv2.COLOR_BGR2GRAY)\n self.image_size = gray_image.shape[::-1]\n\n # Find its corners\n ret, corners = cv2.findChessboardCorners(gray_image, (self.chessboard_x, self.chessboard_y), None)\n\n if ret:\n self.object_points.append(object_points)\n self.image_points.append(corners)\n\n # save image with corners\n image = cv2.drawChessboardCorners(\\\n image_obj, \\\n (self.chessboard_y, self.chessboard_x), \\\n corners, \\\n ret)\n # Saved image with corners\n self.logger.save_image(str(self.cornered_output_images), img_name, image)\n else:\n self.logger.log_error('Can not find all needed corners in {}'.format(str(img_name)))\n \n # Calibrate the camera\n calibration_parameters = \\\n cv2.calibrateCamera(self.object_points, \\\n self.image_points, \\\n self.image_size, \\\n None, None)\n\n # save corrected images\n self.__save_undistorted_images(calibration_parameters[1], calibration_parameters[2])\n\n # return onlt camera_matrix, and dis_coef\n return calibration_parameters[1], calibration_parameters[2]\n \n\n def __save_undistorted_images(self, camera_matrix, distortion_coef):\n cornered_images_list = os.listdir(str('./results/'+self.cornered_output_images))\n \n for cornered_img in cornered_images_list:\n image_path = '{}/{}'.format(str('./results/'+self.cornered_output_images), str(cornered_img))\n image_obj = cv2.imread(image_path)\n\n self.logger.save_image( \\\n str(self.undistorted_output_images), \\\n cornered_img, \n cv2.undistort(image_obj, camera_matrix, distortion_coef, None, camera_matrix))"
] | [
[
"numpy.zeros"
]
] |
barslmn/dove | [
"df6344286633422219c0e93e15d4327f9d082041"
] | [
"dove/utils/bed.py"
] | [
"# -*- coding: utf-8 -*-\n__author__ = 'bars'\n\nfrom io import StringIO\nimport pandas as pd\nfrom collections import defaultdict\n\n\nclass Bed:\n \"\"\"description\"\"\"\n\n def __init__(self, bed_file, mode='file'):\n self.bed_file = bed_file\n self.mode = mode\n\n def get_header(self):\n lines_to_skip = 0\n header = defaultdict(list)\n if self.mode == 'str':\n for line in self.bed_file.split('\\n'):\n if line.startswith('track'):\n header['track'].append(line)\n lines_to_skip += 1\n elif line.startswith('browser'):\n header['browser'].append(line)\n lines_to_skip += 1\n else:\n break\n else:\n with open(self.bed_file) as f:\n lines = f.read().splitlines()\n for line in lines:\n if line.startswith('track'):\n header['track'].append(line)\n lines_to_skip += 1\n elif line.startswith('browser'):\n header['browser'].append(line)\n lines_to_skip += 1\n else:\n break\n return lines_to_skip, header\n\n def from_file(self):\n lines_to_skip, header = self.get_header()\n df_bed = pd.read_csv(\n self.bed_file,\n sep='\\t',\n usecols=[0, 1, 2],\n names=['CHR', 'START', 'END'],\n dtype={'START': int, 'END': int},\n skiprows=lines_to_skip\n )\n return df_bed\n\n def from_string(self):\n lines_to_skip, header = self.get_header()\n df_bed = pd.read_csv(\n StringIO(self.bed_file),\n sep='\\t',\n usecols=[0, 1, 2],\n names=['CHR', 'START', 'END'],\n dtype={'START': int, 'END': int},\n skiprows=lines_to_skip\n )\n return df_bed\n"
] | [
[
"pandas.read_csv"
]
] |
junzhezhang/cmr | [
"f0b2ded813535493f124852ce64b26efa761a35c"
] | [
"nnutils/dibr_kaolin.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport numpy as np\nimport scipy.misc\nimport tqdm\nimport cv2\n\nimport torch\n\nfrom nnutils import geom_utils\n\n# from kaolin.graphics.dib_renderer.rasterizer import linear_rasterizer\n# from kaolin.graphics.dib_renderer.utils import datanormalize\n# from kaolin.graphics.dib_renderer.renderer.phongrender import PhongRender\nfrom kaolin.graphics.dib_renderer.renderer.texrender import TexRender\nfrom kaolin.graphics.dib_renderer.utils.perspective import lookatnp, perspectiveprojectionnp\n\nfrom kaolin.graphics.dib_renderer.utils.mesh import loadobj, face2pfmtx, loadobjtex, savemesh\n\n\ndef quaternion_to_matrix(quaternions):\n \"\"\"\n Convert rotations given as quaternions to rotation matrices.\n Args:\n quaternions: quaternions with real part first,\n as tensor of shape (..., 4).\n Returns:\n Rotation matrices as tensor of shape (..., 3, 3).\n \"\"\"\n r, i, j, k = torch.unbind(quaternions, -1)\n two_s = 2.0 / (quaternions * quaternions).sum(-1)\n\n o = torch.stack(\n (\n 1 - two_s * (j * j + k * k),\n two_s * (i * j - k * r),\n two_s * (i * k + j * r),\n two_s * (i * j + k * r),\n 1 - two_s * (i * i + k * k),\n two_s * (j * k - i * r),\n two_s * (i * k - j * r),\n two_s * (j * k + i * r),\n 1 - two_s * (i * i + j * j),\n ),\n -1,\n )\n return o.reshape(quaternions.shape[:-1] + (3, 3))\n\nclass NeuralRenderer(torch.nn.Module):\n \"\"\"\n replace NeuralRenderer from nmr.py with the kaolin's\n \"\"\"\n # 512 --> 256 TODO\n def __init__(self, img_size=256,uv_sampler=None):\n self.img_size = img_size\n super(NeuralRenderer, self).__init__()\n self.renderer = TexRender(height=img_size,width=img_size)\n # self.renderer = NeuralMeshRenderer(image_size=img_size, camera_mode='look_at',perspective=False,viewing_angle=30,light_intensity_ambient=0.8)\n self.offset_z = 5.\n self.proj_fn = geom_utils.orthographic_proj_withz\n if uv_sampler is not None:\n self.uv_sampler = uv_sampler.clone()\n else:\n print('no uv sampler')\n print('DIB-R...')\n \n def ambient_light_only(self):\n # Make light only ambient.\n # self.renderer.light_intensity_ambient = 1\n # self.renderer.light_intensity_directional = 0\n print(\"TODO: ambient_light_only\")\n pass\n \n def set_bgcolor(self, color):\n # self.renderer.background_color = color\n print(\"TODO: set_bgcolor\")\n pass\n\n def project_points(self, verts, cams):\n proj = self.proj_fn(verts, cams)\n return proj[:, :, :2]\n \n def forward(self, vertices, faces, cams, textures=None):\n ### TODO save mesh\n if textures is not None:\n v_np = vertices[0].detach().cpu().numpy()\n f_np = faces[0].detach().cpu().numpy()\n file_name = 'vis/bird.obj'\n try:\n savemesh(v_np, f_np, file_name)\n except:\n import pdb; pdb.set_trace()\n # ours = False\n ours = True\n if ours:\n translation = cams[:,:3]\n quant = cams[:,-4:]\n tfcamviewmtx_bx3x3 = quaternion_to_matrix(quant)\n tfcamshift_bx3 = - translation\n\n # camfovy = 45 / 180.0 * np.pi\n camfovy = 90 / 180.0 * np.pi\n camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)\n tfcamproj_3x1 = torch.from_numpy(camprojmtx).cuda()\n\n tfcameras = [tfcamviewmtx_bx3x3,\n tfcamshift_bx3,\n tfcamproj_3x1]\n else:\n tfcameras = self.get_sample_cams(bs=vertices.shape[0])\n # import pdb; pdb.set_trace()\n print('1:',tfcameras[0].shape)\n print('2:',tfcameras[1].shape)\n print('3:',tfcameras[2].shape)\n \n \n if textures is None:\n tex_flag = False\n # shape = [vertices.shape[0], 1280, 6,6,6,3]\n # textures = torch.ones(vertices.shape[0], 1280, 6,6,6,3).cuda()*256\n textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()\n else:\n tex_flag = True\n \n # # TODO try with convmesh output\n imfile = '/mnt/lustre/zhangjunzhe/tm/convmesh/output/pretrained_cub_512x512_class/mesh_0.png'\n # textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32) / 255.0\n textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32) \n dim = (self.img_size, self.img_size)\n resized = cv2.resize(textures_np, dim, interpolation = cv2.INTER_AREA)\n textures = torch.from_numpy(resized).cuda().unsqueeze(0)\n textures = textures.permute([0, 3, 1, 2])\n # print('tex shape:', textures.shape)\n # # import pdb; pdb.set_trace()\n # textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()\n\n # print(texture)\n # renderer.set_smooth(pfmtx) # TODO for phong renderer\n tfp_bxpx3 = vertices\n tff_fx3 = faces[0] # TODO to verify if fixed topology within a batch\n # tff_fx3 = tff_fx3.type(int64)\n tff_fx3 = tff_fx3.type(torch.long)\n points = [tfp_bxpx3, tff_fx3]\n uvs = self.uv_sampler\n # TODO texture to clone?\n # TODOL ft_fx3\n # ft_fx3??? TODO\n #only keep rgb, no alpha and depth\n print('uv shape:',uvs.shape)\n imgs = self.renderer(points=points,\n cameras=tfcameras,\n uv_bxpx2 = uvs,\n texture_bx3xthxtw=textures,\n ft_fx3=None)[0]\n if tex_flag:\n for i, img in enumerate(imgs):\n img = img.detach().cpu().numpy()\n\n cv2.imwrite('./vis/lam'+str(i)+'.jpg',img*255)\n print('saved img')\n print('!!!imgs:',imgs.shape)\n \n imgs = imgs.permute([0,3,1,2])\n print('new shape:',imgs.shape)\n # print(' cam:',cams) \n return imgs\n\n def get_sample_cams(self,bs):\n ##########################################################\n # campos = np.array([0, 0, 1.5], dtype=np.float32) # where camera it is\n # campos = np.array([0, 0, 4], dtype=np.float32)\n # campos = np.array([0, 4, 0], dtype=np.float32)\n campos = np.array([4, 0, 0], dtype=np.float32)\n \n camcenter = np.array([0, 0, 0], dtype=np.float32) # where camra is looking at\n \n # camup = np.array([-1, 1, 0], dtype=np.float32) # y axis of camera view\n # camup = np.array([-1, 0, 1], dtype=np.float32)\n # camup = np.array([0, -1, 1], dtype=np.float32)\n # camup = np.array([0, 1, -1], dtype=np.float32)\n # camup = np.array([1, -1, 0], dtype=np.float32)\n # camup = np.array([1, 0, -1], dtype=np.float32)\n # camup = np.array([1, 1, 0], dtype=np.float32)\n # camup = np.array([-1, 0, -1], dtype=np.float32)\n camup = np.array([1, 0, 1], dtype=np.float32)\n \n camviewmtx, camviewshift = lookatnp(campos.reshape(3, 1), camcenter.reshape(3, 1), camup.reshape(3, 1))\n camviewshift = -np.dot(camviewmtx.transpose(), camviewshift)\n\n camfovy = 45 / 180.0 * np.pi\n camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)\n\n #####################################################\n # tfp_px3 = torch.from_numpy(p)\n # tfp_px3.requires_grad = True\n\n # tff_fx3 = torch.from_numpy(f)\n\n # tfuv_tx2 = torch.from_numpy(uv)\n # tfuv_tx2.requires_grad = True\n # tfft_fx3 = torch.from_numpy(ft)\n\n # tftex_thxtwx3 = torch.from_numpy(np.ascontiguousarray(texturenp))\n # tftex_thxtwx3.requires_grad = True\n\n tfcamviewmtx = torch.from_numpy(camviewmtx)\n tfcamshift = torch.from_numpy(camviewshift)\n tfcamproj = torch.from_numpy(camprojmtx)\n\n ##########################################################\n # tfp_1xpx3 = torch.unsqueeze(tfp_px3, dim=0)\n # tfuv_1xtx2 = torch.unsqueeze(tfuv_tx2, dim=0)\n # tftex_1xthxtwx3 = torch.unsqueeze(tftex_thxtwx3, dim=0)\n\n tfcamviewmtx_1x3x3 = torch.unsqueeze(tfcamviewmtx, dim=0)\n tfcamshift_1x3 = tfcamshift.view(-1, 3)\n tfcamproj_3x1 = tfcamproj\n\n # bs = 4\n # tfp_bxpx3 = tfp_1xpx3.repeat([bs, 1, 1])\n # tfuv_bxtx2 = tfuv_1xtx2.repeat([bs, 1, 1])\n # tftex_bxthxtwx3 = tftex_1xthxtwx3.repeat([bs, 1, 1, 1])\n\n tfcamviewmtx_bx3x3 = tfcamviewmtx_1x3x3.repeat([bs, 1, 1])\n tfcamshift_bx3 = tfcamshift_1x3.repeat([bs, 1]) \n\n tfcameras = [tfcamviewmtx_bx3x3.cuda(),\n tfcamshift_bx3.cuda(),\n tfcamproj_3x1.cuda()]\n return tfcameras\n\n # def compute_uvsampler(self,verts_t, faces_t, tex_size=2):\n # \"\"\"\n # NOTE: copied from utils/mesh.py\n # tex_size texture resolution per face default = 6\n # TODO : merge with backbone\n\n # For this mesh, pre-computes the UV coordinates for\n # F x T x T points.\n # Returns F x T x T x 2\n # \"\"\"\n # verts = verts_t[0].clone().detach().cpu().numpy()\n # faces = faces_t[0].clone().detach().cpu().numpy()\n # # import pdb; pdb.set_trace()\n # alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1)\n # beta = np.arange(tex_size, dtype=np.float) / (tex_size-1)\n # import itertools\n # # Barycentric coordinate values\n # coords = np.stack([p for p in itertools.product(*[alpha, beta])])\n # vs = verts[faces]\n # # Compute alpha, beta (this is the same order as NMR)\n # v2 = vs[:, 2]\n # v0v2 = vs[:, 0] - vs[:, 2]\n # v1v2 = vs[:, 1] - vs[:, 2] \n # # F x 3 x T*2\n # samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1) \n # # F x T*2 x 3 points on the sphere \n # samples = np.transpose(samples, (0, 2, 1))\n\n # # Now convert these to uv.\n # uv = get_spherical_coords(samples.reshape(-1, 3))\n # # uv = uv.reshape(-1, len(coords), 2)\n\n # uv = uv.reshape(-1, tex_size, tex_size, 2)\n # return uv"
] | [
[
"torch.unsqueeze",
"torch.unbind",
"torch.ones",
"torch.stack",
"torch.from_numpy",
"numpy.array"
]
] |
horizon-blue/beanmachine-1 | [
"b13e4e3e28ffb860947eb8046863b0cabb581222"
] | [
"src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Tuple\n\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (\n SingleSiteAncestralProposer,\n)\nfrom beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (\n is_valid,\n hessian_of_log_prob,\n)\nfrom beanmachine.ppl.model.rv_identifier import RVIdentifier\nfrom beanmachine.ppl.utils import tensorops\nfrom beanmachine.ppl.world import World\n\n\nLOGGER = logging.getLogger(\"beanmachine\")\n\n\nclass SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):\n \"\"\"\n Single-Site Half Space Newtonian Monte Carlo Proposers.\n See sec. 3.2 of [1]\n\n [1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`\n \"\"\"\n\n def __init__(self, node: RVIdentifier):\n super().__init__(node)\n self._proposal_distribution = None\n\n def compute_alpha_beta(\n self, world: World\n ) -> Tuple[bool, torch.Tensor, torch.Tensor]:\n \"\"\"\n Computes alpha and beta of the Gamma proposal given the node.\n alpha = 1 - hessian_diag * x^2\n beta = -1 * x * hessian_diag - first_grad\n \"\"\"\n node_val = world[self.node]\n first_gradient, hessian_diag = hessian_of_log_prob(\n world, self.node, node_val, tensorops.halfspace_gradients\n )\n if not is_valid(first_gradient) or not is_valid(hessian_diag):\n LOGGER.warning(\n \"Gradient or Hessian is invalid at node {n}.\\n\".format(n=str(self.node))\n )\n return False, torch.tensor(0.0), torch.tensor(0.0)\n\n node_val_reshaped = node_val.reshape(-1)\n predicted_alpha = (\n 1 - hessian_diag * (node_val_reshaped * node_val_reshaped)\n ).t()\n predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient\n condition = (predicted_alpha > 0) & (predicted_beta > 0)\n predicted_alpha = torch.where(\n condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)\n )\n node_var = world.get_variable(self.node)\n mean = (\n node_var.distribution.mean.reshape(-1)\n if is_valid(node_var.distribution.mean)\n else torch.ones_like(predicted_beta)\n )\n predicted_beta = torch.where(condition, predicted_beta, mean)\n predicted_alpha = predicted_alpha.reshape(node_val.shape)\n predicted_beta = predicted_beta.reshape(node_val.shape)\n return True, predicted_alpha, predicted_beta\n\n def get_proposal_distribution(self, world: World) -> dist.Distribution:\n \"\"\"\n Returns the proposal distribution of the node.\n\n Args:\n world: the world in which we're proposing a new value for node.\n Returns:\n The proposal distribution.\n \"\"\"\n # if the number of variables in the world is 1 and proposal distribution\n # has already been computed, we can use the old proposal distribution\n # and skip re-computing the gradient, since there are no other variable\n # in the world that may change the gradient and the old one is still\n # correct.\n if self._proposal_distribution is not None and len(world.latent_nodes) == 1:\n return self._proposal_distribution\n\n is_valid, alpha, beta = self.compute_alpha_beta(world)\n if not is_valid:\n LOGGER.warning(\n \"Node {n} has invalid proposal solution. \".format(n=self.node)\n + \"Proposer falls back to SingleSiteAncestralProposer.\\n\"\n )\n return super().get_proposal_distribution(world)\n\n self._proposal_distribution = dist.Gamma(alpha, beta)\n return self._proposal_distribution\n"
] | [
[
"torch.ones_like",
"torch.distributions.Gamma",
"torch.where",
"torch.tensor"
]
] |
dhimmel/pandas | [
"776fed3ab63d74ddef6e5af1a702b10c2a30bbb6"
] | [
"pandas/tests/frame/test_analytics.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport warnings\nfrom datetime import timedelta\nimport operator\nimport pytest\n\nfrom string import ascii_lowercase\nfrom numpy import nan\nfrom numpy.random import randn\nimport numpy as np\n\nfrom pandas.compat import lrange, PY35\nfrom pandas import (compat, isna, notna, DataFrame, Series,\n MultiIndex, date_range, Timestamp, Categorical,\n _np_version_under1p12,\n to_datetime, to_timedelta)\nimport pandas as pd\nimport pandas.core.nanops as nanops\nimport pandas.core.algorithms as algorithms\n\nimport pandas.util.testing as tm\nimport pandas.util._test_decorators as td\nfrom pandas.tests.frame.common import TestData\n\n\nclass TestDataFrameAnalytics(TestData):\n\n # ---------------------------------------------------------------------=\n # Correlation and covariance\n\n @td.skip_if_no_scipy\n def test_corr_pearson(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('pearson')\n\n @td.skip_if_no_scipy\n def test_corr_kendall(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('kendall')\n\n @td.skip_if_no_scipy\n def test_corr_spearman(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('spearman')\n\n def _check_method(self, method='pearson', check_minp=False):\n if not check_minp:\n correls = self.frame.corr(method=method)\n exp = self.frame['A'].corr(self.frame['C'], method=method)\n tm.assert_almost_equal(correls['A']['C'], exp)\n else:\n result = self.frame.corr(min_periods=len(self.frame) - 8)\n expected = self.frame.corr()\n expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan\n tm.assert_frame_equal(result, expected)\n\n @td.skip_if_no_scipy\n def test_corr_non_numeric(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n # exclude non-numeric types\n result = self.mixed_frame.corr()\n expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()\n tm.assert_frame_equal(result, expected)\n\n @td.skip_if_no_scipy\n @pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])\n def test_corr_nooverlap(self, meth):\n # nothing in common\n df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],\n 'C': [np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan]})\n rs = df.corr(meth)\n assert isna(rs.loc['A', 'B'])\n assert isna(rs.loc['B', 'A'])\n assert rs.loc['A', 'A'] == 1\n assert rs.loc['B', 'B'] == 1\n assert isna(rs.loc['C', 'C'])\n\n @td.skip_if_no_scipy\n @pytest.mark.parametrize('meth', ['pearson', 'spearman'])\n def test_corr_constant(self, meth):\n # constant --> all NA\n\n df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1, 1]})\n rs = df.corr(meth)\n assert isna(rs.values).all()\n\n def test_corr_int(self):\n # dtypes other than float64 #1761\n df3 = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n\n df3.cov()\n df3.corr()\n\n @td.skip_if_no_scipy\n def test_corr_int_and_boolean(self):\n # when dtypes of pandas series are different\n # then ndarray will have dtype=object,\n # so it need to be properly handled\n df = DataFrame({\"a\": [True, False], \"b\": [1, 0]})\n\n expected = DataFrame(np.ones((2, 2)), index=[\n 'a', 'b'], columns=['a', 'b'])\n for meth in ['pearson', 'kendall', 'spearman']:\n\n # RuntimeWarning\n with warnings.catch_warnings(record=True):\n result = df.corr(meth)\n tm.assert_frame_equal(result, expected)\n\n def test_corr_cov_independent_index_column(self):\n # GH 14617\n df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),\n columns=list(\"abcd\"))\n for method in ['cov', 'corr']:\n result = getattr(df, method)()\n assert result.index is not result.columns\n assert result.index.equals(result.columns)\n\n def test_cov(self):\n # min_periods no NAs (corner case)\n expected = self.frame.cov()\n result = self.frame.cov(min_periods=len(self.frame))\n\n tm.assert_frame_equal(expected, result)\n\n result = self.frame.cov(min_periods=len(self.frame) + 1)\n assert isna(result.values).all()\n\n # with NAs\n frame = self.frame.copy()\n frame['A'][:5] = nan\n frame['B'][5:10] = nan\n result = self.frame.cov(min_periods=len(self.frame) - 8)\n expected = self.frame.cov()\n expected.loc['A', 'B'] = np.nan\n expected.loc['B', 'A'] = np.nan\n\n # regular\n self.frame['A'][:5] = nan\n self.frame['B'][:10] = nan\n cov = self.frame.cov()\n\n tm.assert_almost_equal(cov['A']['C'],\n self.frame['A'].cov(self.frame['C']))\n\n # exclude non-numeric types\n result = self.mixed_frame.cov()\n expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()\n tm.assert_frame_equal(result, expected)\n\n # Single column frame\n df = DataFrame(np.linspace(0.0, 1.0, 10))\n result = df.cov()\n expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),\n index=df.columns, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n df.loc[0] = np.nan\n result = df.cov()\n expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),\n index=df.columns, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_corrwith(self):\n a = self.tsframe\n noise = Series(randn(len(a)), index=a.index)\n\n b = self.tsframe.add(noise, axis=0)\n\n # make sure order does not matter\n b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])\n del b['B']\n\n colcorr = a.corrwith(b, axis=0)\n tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))\n\n rowcorr = a.corrwith(b, axis=1)\n tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))\n\n dropped = a.corrwith(b, axis=0, drop=True)\n tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))\n assert 'B' not in dropped\n\n dropped = a.corrwith(b, axis=1, drop=True)\n assert a.index[-1] not in dropped.index\n\n # non time-series data\n index = ['a', 'b', 'c', 'd', 'e']\n columns = ['one', 'two', 'three', 'four']\n df1 = DataFrame(randn(5, 4), index=index, columns=columns)\n df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)\n correls = df1.corrwith(df2, axis=1)\n for row in index[:4]:\n tm.assert_almost_equal(correls[row],\n df1.loc[row].corr(df2.loc[row]))\n\n def test_corrwith_with_objects(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n cols = ['A', 'B', 'C', 'D']\n\n df1['obj'] = 'foo'\n df2['obj'] = 'bar'\n\n result = df1.corrwith(df2)\n expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])\n tm.assert_series_equal(result, expected)\n\n result = df1.corrwith(df2, axis=1)\n expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)\n tm.assert_series_equal(result, expected)\n\n def test_corrwith_series(self):\n result = self.tsframe.corrwith(self.tsframe['A'])\n expected = self.tsframe.apply(self.tsframe['A'].corr)\n\n tm.assert_series_equal(result, expected)\n\n def test_corrwith_matches_corrcoef(self):\n df1 = DataFrame(np.arange(10000), columns=['a'])\n df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])\n c1 = df1.corrwith(df2)['a']\n c2 = np.corrcoef(df1['a'], df2['a'])[0][1]\n\n tm.assert_almost_equal(c1, c2)\n assert c1 < 1\n\n def test_corrwith_mixed_dtypes(self):\n # GH 18570\n df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],\n 'c': ['a', 'b', 'c', 'd']})\n s = pd.Series([0, 6, 7, 3])\n result = df.corrwith(s)\n corrs = [df['a'].corr(s), df['b'].corr(s)]\n expected = pd.Series(data=corrs, index=['a', 'b'])\n tm.assert_series_equal(result, expected)\n\n def test_bool_describe_in_mixed_frame(self):\n df = DataFrame({\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n })\n\n # Integer data are included in .describe() output,\n # Boolean and string data are not.\n result = df.describe()\n expected = DataFrame({'int_data': [5, 30, df.int_data.std(),\n 10, 20, 30, 40, 50]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n tm.assert_frame_equal(result, expected)\n\n # Top value is a boolean value that is False\n result = df.describe(include=['bool'])\n\n expected = DataFrame({'bool_data': [5, 2, False, 3]},\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_frame_equal(result, expected)\n\n def test_describe_bool_frame(self):\n # GH 13891\n df = pd.DataFrame({\n 'bool_data_1': [False, False, True, True],\n 'bool_data_2': [False, True, True, True]\n })\n result = df.describe()\n expected = DataFrame({'bool_data_1': [4, 2, True, 2],\n 'bool_data_2': [4, 2, True, 3]},\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({\n 'bool_data': [False, False, True, True, False],\n 'int_data': [0, 1, 2, 3, 4]\n })\n result = df.describe()\n expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,\n 2, 3, 4]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n tm.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({\n 'bool_data': [False, False, True, True],\n 'str_data': ['a', 'b', 'c', 'a']\n })\n result = df.describe()\n expected = DataFrame({'bool_data': [4, 2, True, 2],\n 'str_data': [4, 3, 'a', 2]},\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_frame_equal(result, expected)\n\n def test_describe_categorical(self):\n df = DataFrame({'value': np.random.randint(0, 10000, 100)})\n labels = [\"{0} - {1}\".format(i, i + 499) for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=['value'], ascending=True)\n df['value_group'] = pd.cut(df.value, range(0, 10500, 500),\n right=False, labels=cat_labels)\n cat = df\n\n # Categoricals should not show up together with numerical columns\n result = cat.describe()\n assert len(result.columns) == 1\n\n # In a frame, describe() for the cat should be the same as for string\n # arrays (count, unique, top, freq)\n\n cat = Categorical([\"a\", \"b\", \"b\", \"b\"], categories=['a', 'b', 'c'],\n ordered=True)\n s = Series(cat)\n result = s.describe()\n expected = Series([4, 2, \"b\", 3],\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_series_equal(result, expected)\n\n cat = Series(Categorical([\"a\", \"b\", \"c\", \"c\"]))\n df3 = DataFrame({\"cat\": cat, \"s\": [\"a\", \"b\", \"c\", \"c\"]})\n res = df3.describe()\n tm.assert_numpy_array_equal(res[\"cat\"].values, res[\"s\"].values)\n\n def test_describe_categorical_columns(self):\n # GH 11558\n columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],\n ordered=True, name='XXX')\n df = DataFrame({'int1': [10, 20, 30, 40, 50],\n 'int2': [10, 20, 30, 40, 50],\n 'obj': ['A', 0, None, 'X', 1]},\n columns=columns)\n result = df.describe()\n\n exp_columns = pd.CategoricalIndex(['int1', 'int2'],\n categories=['int1', 'int2', 'obj'],\n ordered=True, name='XXX')\n expected = DataFrame({'int1': [5, 30, df.int1.std(),\n 10, 20, 30, 40, 50],\n 'int2': [5, 30, df.int2.std(),\n 10, 20, 30, 40, 50]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'],\n columns=exp_columns)\n tm.assert_frame_equal(result, expected)\n tm.assert_categorical_equal(result.columns.values,\n expected.columns.values)\n\n def test_describe_datetime_columns(self):\n columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n freq='MS', tz='US/Eastern', name='XXX')\n df = DataFrame({0: [10, 20, 30, 40, 50],\n 1: [10, 20, 30, 40, 50],\n 2: ['A', 0, None, 'X', 1]})\n df.columns = columns\n result = df.describe()\n\n exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],\n freq='MS', tz='US/Eastern', name='XXX')\n expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),\n 10, 20, 30, 40, 50],\n 1: [5, 30, df.iloc[:, 1].std(),\n 10, 20, 30, 40, 50]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n expected.columns = exp_columns\n tm.assert_frame_equal(result, expected)\n assert result.columns.freq == 'MS'\n assert result.columns.tz == expected.columns.tz\n\n def test_describe_timedelta_values(self):\n # GH 6145\n t1 = pd.timedelta_range('1 days', freq='D', periods=5)\n t2 = pd.timedelta_range('1 hours', freq='H', periods=5)\n df = pd.DataFrame({'t1': t1, 't2': t2})\n\n expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),\n df.iloc[:, 0].std(),\n pd.Timedelta('1 days'),\n pd.Timedelta('2 days'),\n pd.Timedelta('3 days'),\n pd.Timedelta('4 days'),\n pd.Timedelta('5 days')],\n 't2': [5, pd.Timedelta('3 hours'),\n df.iloc[:, 1].std(),\n pd.Timedelta('1 hours'),\n pd.Timedelta('2 hours'),\n pd.Timedelta('3 hours'),\n pd.Timedelta('4 hours'),\n pd.Timedelta('5 hours')]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n\n res = df.describe()\n tm.assert_frame_equal(res, expected)\n\n exp_repr = (\" t1 t2\\n\"\n \"count 5 5\\n\"\n \"mean 3 days 00:00:00 0 days 03:00:00\\n\"\n \"std 1 days 13:56:50.394919 0 days 01:34:52.099788\\n\"\n \"min 1 days 00:00:00 0 days 01:00:00\\n\"\n \"25% 2 days 00:00:00 0 days 02:00:00\\n\"\n \"50% 3 days 00:00:00 0 days 03:00:00\\n\"\n \"75% 4 days 00:00:00 0 days 04:00:00\\n\"\n \"max 5 days 00:00:00 0 days 05:00:00\")\n assert repr(res) == exp_repr\n\n def test_describe_tz_values(self, tz_naive_fixture):\n # GH 21332\n tz = tz_naive_fixture\n s1 = Series(range(5))\n start = Timestamp(2018, 1, 1)\n end = Timestamp(2018, 1, 5)\n s2 = Series(date_range(start, end, tz=tz))\n df = pd.DataFrame({'s1': s1, 's2': s2})\n\n expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,\n 2, 1.581139, 0, 1, 2, 3, 4],\n 's2': [5, 5, s2.value_counts().index[0], 1,\n start.tz_localize(tz),\n end.tz_localize(tz), np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan]},\n index=['count', 'unique', 'top', 'freq', 'first',\n 'last', 'mean', 'std', 'min', '25%', '50%',\n '75%', 'max']\n )\n res = df.describe(include='all')\n tm.assert_frame_equal(res, expected)\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame({\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n })\n df.reindex(columns=['bool_data', 'int_data', 'string_data'])\n test = df.sum(axis=0)\n tm.assert_numpy_array_equal(test.values,\n np.array([2, 150, 'abcde'], dtype=object))\n tm.assert_series_equal(test, df.T.sum(axis=1))\n\n def test_count(self):\n f = lambda s: notna(s).sum()\n self._check_stat_op('count', f,\n has_skipna=False,\n has_numeric_only=True,\n check_dtype=False,\n check_dates=True)\n\n # corner case\n frame = DataFrame()\n ct1 = frame.count(1)\n assert isinstance(ct1, Series)\n\n ct2 = frame.count(0)\n assert isinstance(ct2, Series)\n\n # GH #423\n df = DataFrame(index=lrange(10))\n result = df.count(1)\n expected = Series(0, index=df.index)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(columns=lrange(10))\n result = df.count(0)\n expected = Series(0, index=df.columns)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame()\n result = df.count()\n expected = Series(0, index=[])\n tm.assert_series_equal(result, expected)\n\n def test_nunique(self):\n f = lambda s: len(algorithms.unique1d(s.dropna()))\n self._check_stat_op('nunique', f, has_skipna=False,\n check_dtype=False, check_dates=True)\n\n df = DataFrame({'A': [1, 1, 1],\n 'B': [1, 2, 3],\n 'C': [1, np.nan, 3]})\n tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))\n tm.assert_series_equal(df.nunique(dropna=False),\n Series({'A': 1, 'B': 3, 'C': 3}))\n tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))\n tm.assert_series_equal(df.nunique(axis=1, dropna=False),\n Series({0: 1, 1: 3, 2: 2}))\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, has_numeric_only=True,\n skipna_alternative=np.nansum)\n\n # mixed types (with upcasting happening)\n self._check_stat_op('sum', np.sum,\n frame=self.mixed_float.astype('float32'),\n has_numeric_only=True, check_dtype=False,\n check_less_precise=True)\n\n @pytest.mark.parametrize(\n \"method\", ['sum', 'mean', 'prod', 'var',\n 'std', 'skew', 'min', 'max'])\n def test_stat_operators_attempt_obj_array(self, method):\n # GH #676\n data = {\n 'a': [-0.00049987540199591344, -0.0016467257772919831,\n 0.00067695870775883013],\n 'b': [-0, -0, 0.0],\n 'c': [0.00031111847529610595, 0.0014902627951905339,\n -0.00094099200035979691]\n }\n df1 = DataFrame(data, index=['foo', 'bar', 'baz'],\n dtype='O')\n\n df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],\n 2: [np.nan, 4]}, dtype=object)\n\n for df in [df1, df2]:\n assert df.values.dtype == np.object_\n result = getattr(df, method)(1)\n expected = getattr(df.astype('f8'), method)(1)\n\n if method in ['sum', 'prod']:\n tm.assert_series_equal(result, expected)\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean, check_dates=True)\n\n def test_product(self):\n self._check_stat_op('product', np.prod)\n\n def test_median(self):\n def wrapper(x):\n if isna(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, check_dates=True)\n\n def test_min(self):\n with warnings.catch_warnings(record=True):\n self._check_stat_op('min', np.min, check_dates=True)\n self._check_stat_op('min', np.min, frame=self.intframe)\n\n def test_cummin(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cummin = self.tsframe.cummin()\n expected = self.tsframe.apply(Series.cummin)\n tm.assert_frame_equal(cummin, expected)\n\n # axis = 1\n cummin = self.tsframe.cummin(axis=1)\n expected = self.tsframe.apply(Series.cummin, axis=1)\n tm.assert_frame_equal(cummin, expected)\n\n # it works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummin() # noqa\n\n # fix issue\n cummin_xs = self.tsframe.cummin(axis=1)\n assert np.shape(cummin_xs) == np.shape(self.tsframe)\n\n def test_cummax(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cummax = self.tsframe.cummax()\n expected = self.tsframe.apply(Series.cummax)\n tm.assert_frame_equal(cummax, expected)\n\n # axis = 1\n cummax = self.tsframe.cummax(axis=1)\n expected = self.tsframe.apply(Series.cummax, axis=1)\n tm.assert_frame_equal(cummax, expected)\n\n # it works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummax() # noqa\n\n # fix issue\n cummax_xs = self.tsframe.cummax(axis=1)\n assert np.shape(cummax_xs) == np.shape(self.tsframe)\n\n def test_max(self):\n with warnings.catch_warnings(record=True):\n self._check_stat_op('max', np.max, check_dates=True)\n self._check_stat_op('max', np.max, frame=self.intframe)\n\n def test_mad(self):\n f = lambda x: np.abs(x - x.mean()).mean()\n self._check_stat_op('mad', f)\n\n def test_var_std(self):\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n result = self.tsframe.std(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n result = self.tsframe.var(ddof=4)\n expected = self.tsframe.apply(lambda x: x.var(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context('use_bottleneck', False):\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n @pytest.mark.parametrize(\n \"meth\", ['sem', 'var', 'std'])\n def test_numeric_only_flag(self, meth):\n # GH #9201\n df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])\n # set one entry to a number in str format\n df1.loc[0, 'foo'] = '100'\n\n df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])\n # set one entry to a non-number str\n df2.loc[0, 'foo'] = 'a'\n\n result = getattr(df1, meth)(axis=1, numeric_only=True)\n expected = getattr(df1[['bar', 'baz']], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n result = getattr(df2, meth)(axis=1, numeric_only=True)\n expected = getattr(df2[['bar', 'baz']], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n # df1 has all numbers, df2 has a letter inside\n pytest.raises(TypeError, lambda: getattr(df1, meth)(\n axis=1, numeric_only=False))\n pytest.raises(TypeError, lambda: getattr(df2, meth)(\n axis=1, numeric_only=False))\n\n @pytest.mark.parametrize('op', ['mean', 'std', 'var',\n 'skew', 'kurt', 'sem'])\n def test_mixed_ops(self, op):\n # GH 16116\n df = DataFrame({'int': [1, 2, 3, 4],\n 'float': [1., 2., 3., 4.],\n 'str': ['a', 'b', 'c', 'd']})\n\n result = getattr(df, op)()\n assert len(result) == 2\n\n with pd.option_context('use_bottleneck', False):\n result = getattr(df, op)()\n assert len(result) == 2\n\n def test_cumsum(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cumsum = self.tsframe.cumsum()\n expected = self.tsframe.apply(Series.cumsum)\n tm.assert_frame_equal(cumsum, expected)\n\n # axis = 1\n cumsum = self.tsframe.cumsum(axis=1)\n expected = self.tsframe.apply(Series.cumsum, axis=1)\n tm.assert_frame_equal(cumsum, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cumsum() # noqa\n\n # fix issue\n cumsum_xs = self.tsframe.cumsum(axis=1)\n assert np.shape(cumsum_xs) == np.shape(self.tsframe)\n\n def test_cumprod(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cumprod = self.tsframe.cumprod()\n expected = self.tsframe.apply(Series.cumprod)\n tm.assert_frame_equal(cumprod, expected)\n\n # axis = 1\n cumprod = self.tsframe.cumprod(axis=1)\n expected = self.tsframe.apply(Series.cumprod, axis=1)\n tm.assert_frame_equal(cumprod, expected)\n\n # fix issue\n cumprod_xs = self.tsframe.cumprod(axis=1)\n assert np.shape(cumprod_xs) == np.shape(self.tsframe)\n\n # ints\n df = self.tsframe.fillna(0).astype(int)\n df.cumprod(0)\n df.cumprod(1)\n\n # ints32\n df = self.tsframe.fillna(0).astype(np.int32)\n df.cumprod(0)\n df.cumprod(1)\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.tsframe.sem(ddof=4)\n expected = self.tsframe.apply(\n lambda x: x.std(ddof=4) / np.sqrt(len(x)))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context('use_bottleneck', False):\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n @td.skip_if_no_scipy\n def test_skew(self):\n from scipy.stats import skew\n\n def alt(x):\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n\n self._check_stat_op('skew', alt)\n\n @td.skip_if_no_scipy\n def test_kurt(self):\n from scipy.stats import kurtosis\n\n def alt(x):\n if len(x) < 4:\n return np.nan\n return kurtosis(x, bias=False)\n\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n df = DataFrame(np.random.randn(6, 3), index=index)\n\n kurt = df.kurt()\n kurt2 = df.kurt(level=0).xs('bar')\n tm.assert_series_equal(kurt, kurt2, check_names=False)\n assert kurt.name is None\n assert kurt2.name == 'bar'\n\n def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,\n has_numeric_only=False, check_dtype=True,\n check_dates=False, check_less_precise=False,\n skipna_alternative=None):\n if frame is None:\n frame = self.frame\n # set some NAs\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if check_dates:\n df = DataFrame({'b': date_range('1/1/2001', periods=2)})\n _f = getattr(df, name)\n result = _f()\n assert isinstance(result, Series)\n\n df['a'] = lrange(len(df))\n result = getattr(df, name)()\n assert isinstance(result, Series)\n assert len(result)\n\n if has_skipna:\n def wrapper(x):\n return alternative(x.values)\n\n skipna_wrapper = tm._make_skipna_wrapper(alternative,\n skipna_alternative)\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n tm.assert_series_equal(result0, frame.apply(wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n # HACK: win32\n tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise)\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n tm.assert_series_equal(result0, frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n if name in ['sum', 'prod']:\n exp = frame.apply(skipna_wrapper, axis=1)\n tm.assert_series_equal(result1, exp, check_dtype=False,\n check_less_precise=check_less_precise)\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n assert lcd_dtype == result0.dtype\n assert lcd_dtype == result1.dtype\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)\n # make sure works on mixed-type frame\n getattr(self.mixed_frame, name)(axis=0)\n getattr(self.mixed_frame, name)(axis=1)\n\n if has_numeric_only:\n getattr(self.mixed_frame, name)(axis=0, numeric_only=True)\n getattr(self.mixed_frame, name)(axis=1, numeric_only=True)\n getattr(self.frame, name)(axis=0, numeric_only=False)\n getattr(self.frame, name)(axis=1, numeric_only=False)\n\n # all NA case\n if has_skipna:\n all_na = self.frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name in ['sum', 'prod']:\n unit = int(name == 'prod')\n expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)\n tm.assert_series_equal(r0, expected)\n expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)\n tm.assert_series_equal(r1, expected)\n\n @pytest.mark.parametrize(\"dropna, expected\", [\n (True, {'A': [12],\n 'B': [10.0],\n 'C': [1.0],\n 'D': ['a'],\n 'E': Categorical(['a'], categories=['a']),\n 'F': to_datetime(['2000-1-2']),\n 'G': to_timedelta(['1 days'])}),\n (False, {'A': [12],\n 'B': [10.0],\n 'C': [np.nan],\n 'D': np.array([np.nan], dtype=object),\n 'E': Categorical([np.nan], categories=['a']),\n 'F': [pd.NaT],\n 'G': to_timedelta([pd.NaT])}),\n (True, {'H': [8, 9, np.nan, np.nan],\n 'I': [8, 9, np.nan, np.nan],\n 'J': [1, np.nan, np.nan, np.nan],\n 'K': Categorical(['a', np.nan, np.nan, np.nan],\n categories=['a']),\n 'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),\n 'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),\n 'N': [0, 1, 2, 3]}),\n (False, {'H': [8, 9, np.nan, np.nan],\n 'I': [8, 9, np.nan, np.nan],\n 'J': [1, np.nan, np.nan, np.nan],\n 'K': Categorical([np.nan, 'a', np.nan, np.nan],\n categories=['a']),\n 'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),\n 'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),\n 'N': [0, 1, 2, 3]})\n ])\n def test_mode_dropna(self, dropna, expected):\n\n df = DataFrame({\"A\": [12, 12, 19, 11],\n \"B\": [10, 10, np.nan, 3],\n \"C\": [1, np.nan, np.nan, np.nan],\n \"D\": [np.nan, np.nan, 'a', np.nan],\n \"E\": Categorical([np.nan, np.nan, 'a', np.nan]),\n \"F\": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),\n \"G\": to_timedelta(['1 days', 'nan', 'nan', 'nan']),\n \"H\": [8, 8, 9, 9],\n \"I\": [9, 9, 8, 8],\n \"J\": [1, 1, np.nan, np.nan],\n \"K\": Categorical(['a', np.nan, 'a', np.nan]),\n \"L\": to_datetime(['2000-1-2', '2000-1-2',\n 'NaT', 'NaT']),\n \"M\": to_timedelta(['1 days', 'nan',\n '1 days', 'nan']),\n \"N\": np.arange(4, dtype='int64')})\n\n result = df[sorted(list(expected.keys()))].mode(dropna=dropna)\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.skipif(not compat.PY3, reason=\"only PY3\")\n def test_mode_sortwarning(self):\n # Check for the warning that is raised when the mode\n # results cannot be sorted\n\n df = DataFrame({\"A\": [np.nan, np.nan, 'a', 'a']})\n expected = DataFrame({'A': ['a', np.nan]})\n\n with tm.assert_produces_warning(UserWarning, check_stacklevel=False):\n result = df.mode(dropna=False)\n result = result.sort_values(by='A').reset_index(drop=True)\n\n tm.assert_frame_equal(result, expected)\n\n def test_operators_timedelta64(self):\n from datetime import timedelta\n df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),\n B=date_range('2012-1-2', periods=3, freq='D'),\n C=Timestamp('20120101') -\n timedelta(minutes=5, seconds=5)))\n\n diffs = DataFrame(dict(A=df['A'] - df['C'],\n B=df['A'] - df['B']))\n\n # min\n result = diffs.min()\n assert result[0] == diffs.loc[0, 'A']\n assert result[1] == diffs.loc[0, 'B']\n\n result = diffs.min(axis=1)\n assert (result == diffs.loc[0, 'B']).all()\n\n # max\n result = diffs.max()\n assert result[0] == diffs.loc[2, 'A']\n assert result[1] == diffs.loc[2, 'B']\n\n result = diffs.max(axis=1)\n assert (result == diffs['A']).all()\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame(dict(A=df['A'] - df['C'],\n B=df['B'] - df['A']))\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed['C'] = 'foo'\n mixed['D'] = 1\n mixed['E'] = 1.\n mixed['F'] = Timestamp('20130101')\n\n # results in an object array\n from pandas.core.tools.timedeltas import (\n _coerce_scalar_to_timedelta_type as _coerce)\n\n result = mixed.min()\n expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),\n _coerce(timedelta(days=-1)),\n 'foo', 1, 1.0,\n Timestamp('20130101')],\n index=mixed.columns)\n tm.assert_series_equal(result, expected)\n\n # excludes numeric\n result = mixed.min(axis=1)\n expected = Series([1, 1, 1.], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # works when only those columns are selected\n result = mixed[['A', 'B']].min(1)\n expected = Series([timedelta(days=-1)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = mixed[['A', 'B']].min()\n expected = Series([timedelta(seconds=5 * 60 + 5),\n timedelta(days=-1)], index=['A', 'B'])\n tm.assert_series_equal(result, expected)\n\n # GH 3106\n df = DataFrame({'time': date_range('20130102', periods=5),\n 'time2': date_range('20130105', periods=5)})\n df['off1'] = df['time2'] - df['time']\n assert df['off1'].dtype == 'timedelta64[ns]'\n\n df['off2'] = df['time'] - df['time2']\n df._consolidate_inplace()\n assert df['off1'].dtype == 'timedelta64[ns]'\n assert df['off2'].dtype == 'timedelta64[ns]'\n\n def test_sum_corner(self):\n axis0 = self.empty.sum(0)\n axis1 = self.empty.sum(1)\n assert isinstance(axis0, Series)\n assert isinstance(axis1, Series)\n assert len(axis0) == 0\n assert len(axis1) == 0\n\n @pytest.mark.parametrize('method, unit', [\n ('sum', 0),\n ('prod', 1),\n ])\n def test_sum_prod_nanops(self, method, unit):\n idx = ['a', 'b', 'c']\n df = pd.DataFrame({\"a\": [unit, unit],\n \"b\": [unit, np.nan],\n \"c\": [np.nan, np.nan]})\n # The default\n result = getattr(df, method)\n expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')\n\n # min_count=1\n result = getattr(df, method)(min_count=1)\n expected = pd.Series([unit, unit, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = getattr(df, method)(min_count=0)\n expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')\n tm.assert_series_equal(result, expected)\n\n result = getattr(df.iloc[1:], method)(min_count=1)\n expected = pd.Series([unit, np.nan, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count > 1\n df = pd.DataFrame({\"A\": [unit] * 10, \"B\": [unit] * 5 + [np.nan] * 5})\n result = getattr(df, method)(min_count=5)\n expected = pd.Series(result, index=['A', 'B'])\n tm.assert_series_equal(result, expected)\n\n result = getattr(df, method)(min_count=6)\n expected = pd.Series(result, index=['A', 'B'])\n tm.assert_series_equal(result, expected)\n\n def test_sum_nanops_timedelta(self):\n # prod isn't defined on timedeltas\n idx = ['a', 'b', 'c']\n df = pd.DataFrame({\"a\": [0, 0],\n \"b\": [0, np.nan],\n \"c\": [np.nan, np.nan]})\n\n df2 = df.apply(pd.to_timedelta)\n\n # 0 by default\n result = df2.sum()\n expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df2.sum(min_count=0)\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df2.sum(min_count=1)\n expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)\n tm.assert_series_equal(result, expected)\n\n def test_sum_object(self):\n values = self.frame.values.astype(int)\n frame = DataFrame(values, index=self.frame.index,\n columns=self.frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self):\n # ensure this works, bug report\n bools = np.isnan(self.frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_mean_corner(self):\n # unit test when have object data\n the_mean = self.mixed_frame.mean(axis=0)\n the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)\n tm.assert_index_equal(the_sum.index, the_mean.index)\n assert len(the_mean.index) < len(self.mixed_frame.columns)\n\n # xs sum mixed type, just want to know it works...\n the_mean = self.mixed_frame.mean(axis=1)\n the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)\n tm.assert_index_equal(the_sum.index, the_mean.index)\n\n # take mean of boolean column\n self.frame['bool'] = self.frame['A'] > 0\n means = self.frame.mean(0)\n assert means['bool'] == self.frame['bool'].values.mean()\n\n def test_stats_mixed_type(self):\n # don't blow up\n self.mixed_frame.std(1)\n self.mixed_frame.var(1)\n self.mixed_frame.mean(1)\n self.mixed_frame.skew(1)\n\n def test_median_corner(self):\n def wrapper(x):\n if isna(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, frame=self.intframe,\n check_dtype=False, check_dates=True)\n\n # Miscellanea\n\n def test_count_objects(self):\n dm = DataFrame(self.mixed_frame._series)\n df = DataFrame(self.mixed_frame._series)\n\n tm.assert_series_equal(dm.count(), df.count())\n tm.assert_series_equal(dm.count(1), df.count(1))\n\n def test_cumsum_corner(self):\n dm = DataFrame(np.arange(20).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n # ?(wesm)\n result = dm.cumsum() # noqa\n\n def test_sum_bools(self):\n df = DataFrame(index=lrange(1), columns=lrange(10))\n bools = isna(df)\n assert bools.sum(axis=1)[0] == 10\n\n # Index of max / min\n\n def test_idxmin(self):\n frame = self.frame\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmin(axis=axis, skipna=skipna)\n expected = df.apply(Series.idxmin, axis=axis,\n skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n pytest.raises(ValueError, frame.idxmin, axis=2)\n\n def test_idxmax(self):\n frame = self.frame\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmax(axis=axis, skipna=skipna)\n expected = df.apply(Series.idxmax, axis=axis,\n skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n pytest.raises(ValueError, frame.idxmax, axis=2)\n\n # ----------------------------------------------------------------------\n # Logical reductions\n\n def test_any_all(self):\n self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)\n self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)\n\n def test_any_all_extra(self):\n df = DataFrame({\n 'A': [True, False, False],\n 'B': [True, True, False],\n 'C': [True, True, True],\n }, index=['a', 'b', 'c'])\n result = df[['A', 'B']].any(1)\n expected = Series([True, True, False], index=['a', 'b', 'c'])\n tm.assert_series_equal(result, expected)\n\n result = df[['A', 'B']].any(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n result = df.all(1)\n expected = Series([True, False, False], index=['a', 'b', 'c'])\n tm.assert_series_equal(result, expected)\n\n result = df.all(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n # Axis is None\n result = df.all(axis=None).item()\n assert result is False\n\n result = df.any(axis=None).item()\n assert result is True\n\n result = df[['C']].all(axis=None).item()\n assert result is True\n\n # skip pathological failure cases\n # class CantNonzero(object):\n\n # def __nonzero__(self):\n # raise ValueError\n\n # df[4] = CantNonzero()\n\n # it works!\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n # df[4][4] = np.nan\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n @pytest.mark.parametrize('func, data, expected', [\n (np.any, {}, False),\n (np.all, {}, True),\n (np.any, {'A': []}, False),\n (np.all, {'A': []}, True),\n (np.any, {'A': [False, False]}, False),\n (np.all, {'A': [False, False]}, False),\n (np.any, {'A': [True, False]}, True),\n (np.all, {'A': [True, False]}, False),\n (np.any, {'A': [True, True]}, True),\n (np.all, {'A': [True, True]}, True),\n\n (np.any, {'A': [False], 'B': [False]}, False),\n (np.all, {'A': [False], 'B': [False]}, False),\n\n (np.any, {'A': [False, False], 'B': [False, True]}, True),\n (np.all, {'A': [False, False], 'B': [False, True]}, False),\n\n # other types\n (np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),\n (np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),\n (np.all, {'A': pd.Series([0, 1], dtype=int)}, False),\n (np.any, {'A': pd.Series([0, 1], dtype=int)}, True),\n pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n (np.all, {'A': pd.Series([0, 1], dtype='category')}, False),\n (np.any, {'A': pd.Series([0, 1], dtype='category')}, True),\n (np.all, {'A': pd.Series([1, 2], dtype='category')}, True),\n (np.any, {'A': pd.Series([1, 2], dtype='category')}, True),\n\n # # Mix\n # GH-21484\n # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),\n # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),\n ])\n def test_any_all_np_func(self, func, data, expected):\n # https://github.com/pandas-dev/pandas/issues/19976\n data = DataFrame(data)\n result = func(data)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n # method version\n result = getattr(DataFrame(data), func.__name__)(axis=None)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n def test_any_all_object(self):\n # https://github.com/pandas-dev/pandas/issues/19976\n result = np.all(DataFrame(columns=['a', 'b'])).item()\n assert result is True\n\n result = np.any(DataFrame(columns=['a', 'b'])).item()\n assert result is False\n\n @pytest.mark.parametrize('method', ['any', 'all'])\n def test_any_all_level_axis_none_raises(self, method):\n df = DataFrame(\n {\"A\": 1},\n index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],\n names=['out', 'in'])\n )\n xpr = \"Must specify 'axis' when aggregating by level.\"\n with tm.assert_raises_regex(ValueError, xpr):\n getattr(df, method)(axis=None, level='out')\n\n def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,\n has_bool_only=False):\n if frame is None:\n frame = self.frame > 0\n # set some NAs\n frame = DataFrame(frame.values.astype(object), frame.index,\n frame.columns)\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n tm.assert_series_equal(result0, frame.apply(wrapper))\n tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n tm.assert_series_equal(result0, frame.apply(skipna_wrapper))\n tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n pytest.raises(ValueError, f, axis=2)\n\n # make sure works on mixed-type frame\n mixed = self.mixed_frame\n mixed['_bool_'] = np.random.randn(len(mixed)) > 0\n getattr(mixed, name)(axis=0)\n getattr(mixed, name)(axis=1)\n\n class NonzeroFail(object):\n\n def __nonzero__(self):\n raise ValueError\n\n mixed['_nonzero_fail_'] = NonzeroFail()\n\n if has_bool_only:\n getattr(mixed, name)(axis=0, bool_only=True)\n getattr(mixed, name)(axis=1, bool_only=True)\n getattr(frame, name)(axis=0, bool_only=False)\n getattr(frame, name)(axis=1, bool_only=False)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name == 'any':\n assert not r0.any()\n assert not r1.any()\n else:\n assert r0.all()\n assert r1.all()\n\n # ----------------------------------------------------------------------\n # Isin\n\n def test_isin(self):\n # GH #4211\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n other = ['a', 'b', 'c']\n\n result = df.isin(other)\n expected = DataFrame([df.loc[s].isin(other) for s in df.index])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"empty\", [[], Series(), np.array([])])\n def test_isin_empty(self, empty):\n # see gh-16991\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n expected = DataFrame(False, df.index, df.columns)\n\n result = df.isin(empty)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_dict(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n d = {'A': ['a']}\n\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n\n result = df.isin(d)\n tm.assert_frame_equal(result, expected)\n\n # non unique columns\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n df.columns = ['A', 'A']\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n result = df.isin(d)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n # GH4763\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n with pytest.raises(TypeError):\n df.isin('a')\n\n with pytest.raises(TypeError):\n df.isin('aaa')\n\n def test_isin_df(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})\n expected = DataFrame(False, df1.index, df1.columns)\n result = df1.isin(df2)\n expected['A'].loc[[1, 3]] = True\n expected['B'].loc[[0, 2]] = True\n tm.assert_frame_equal(result, expected)\n\n # partial overlapping columns\n df2.columns = ['A', 'C']\n result = df1.isin(df2)\n expected['B'] = False\n tm.assert_frame_equal(result, expected)\n\n def test_isin_tuples(self):\n # GH16394\n df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n df['C'] = list(zip(df['A'], df['B']))\n result = df['C'].isin([(1, 'a')])\n tm.assert_series_equal(result,\n Series([True, False, False], name=\"C\"))\n\n def test_isin_df_dupe_values(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n # just cols duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['B', 'B'])\n with pytest.raises(ValueError):\n df1.isin(df2)\n\n # just index duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['A', 'B'], index=[0, 0, 1, 1])\n with pytest.raises(ValueError):\n df1.isin(df2)\n\n # cols and index:\n df2.columns = ['B', 'B']\n with pytest.raises(ValueError):\n df1.isin(df2)\n\n def test_isin_dupe_self(self):\n other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})\n df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])\n result = df.isin(other)\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected.loc[0] = True\n expected.iloc[1, 1] = True\n tm.assert_frame_equal(result, expected)\n\n def test_isin_against_series(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},\n index=['a', 'b', 'c', 'd'])\n s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected['A'].loc['a'] = True\n expected.loc['d'] = True\n result = df.isin(s)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_multiIndex(self):\n idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),\n (0, 'b', 'bar'), (0, 'b', 'baz'),\n (2, 'a', 'foo'), (2, 'a', 'bar'),\n (2, 'c', 'bar'), (2, 'c', 'baz'),\n (1, 'b', 'foo'), (1, 'b', 'bar'),\n (1, 'c', 'bar'), (1, 'c', 'baz')])\n df1 = DataFrame({'A': np.ones(12),\n 'B': np.zeros(12)}, index=idx)\n df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n 'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})\n # against regular index\n expected = DataFrame(False, index=df1.index, columns=df1.columns)\n result = df1.isin(df2)\n tm.assert_frame_equal(result, expected)\n\n df2.index = idx\n expected = df2.values.astype(np.bool)\n expected[:, 1] = ~expected[:, 1]\n expected = DataFrame(expected, columns=['A', 'B'], index=idx)\n\n result = df1.isin(df2)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_empty_datetimelike(self):\n # GH 15473\n df1_ts = DataFrame({'date':\n pd.to_datetime(['2014-01-01', '2014-01-02'])})\n df1_td = DataFrame({'date':\n [pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})\n df2 = DataFrame({'date': []})\n df3 = DataFrame()\n\n expected = DataFrame({'date': [False, False]})\n\n result = df1_ts.isin(df2)\n tm.assert_frame_equal(result, expected)\n result = df1_ts.isin(df3)\n tm.assert_frame_equal(result, expected)\n\n result = df1_td.isin(df2)\n tm.assert_frame_equal(result, expected)\n result = df1_td.isin(df3)\n tm.assert_frame_equal(result, expected)\n\n # Rounding\n def test_round(self):\n # GH 2665\n\n # Test that rounding an empty DataFrame does nothing\n df = DataFrame()\n tm.assert_frame_equal(df, df.round())\n\n # Here's the test frame we'll be working with\n df = DataFrame({'col1': [1.123, 2.123, 3.123],\n 'col2': [1.234, 2.234, 3.234]})\n\n # Default round to integer (i.e. decimals=0)\n expected_rounded = DataFrame(\n {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})\n tm.assert_frame_equal(df.round(), expected_rounded)\n\n # Round with an integer\n decimals = 2\n expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],\n 'col2': [1.23, 2.23, 3.23]})\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\n\n # This should also work with np.round (since np.round dispatches to\n # df.round)\n tm.assert_frame_equal(np.round(df, decimals), expected_rounded)\n\n # Round with a list\n round_list = [1, 2]\n with pytest.raises(TypeError):\n df.round(round_list)\n\n # Round with a dictionary\n expected_rounded = DataFrame(\n {'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})\n round_dict = {'col1': 1, 'col2': 2}\n tm.assert_frame_equal(df.round(round_dict), expected_rounded)\n\n # Incomplete dict\n expected_partially_rounded = DataFrame(\n {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})\n partial_round_dict = {'col2': 1}\n tm.assert_frame_equal(df.round(partial_round_dict),\n expected_partially_rounded)\n\n # Dict with unknown elements\n wrong_round_dict = {'col3': 2, 'col2': 1}\n tm.assert_frame_equal(df.round(wrong_round_dict),\n expected_partially_rounded)\n\n # float input to `decimals`\n non_int_round_dict = {'col1': 1, 'col2': 0.5}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n # String input\n non_int_round_dict = {'col1': 1, 'col2': 'foo'}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # List input\n non_int_round_dict = {'col1': 1, 'col2': [1, 2]}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # Non integer Series inputs\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # Negative numbers\n negative_round_dict = {'col1': -1, 'col2': -2}\n big_df = df * 100\n expected_neg_rounded = DataFrame(\n {'col1': [110., 210, 310], 'col2': [100., 200, 300]})\n tm.assert_frame_equal(big_df.round(negative_round_dict),\n expected_neg_rounded)\n\n # nan in Series round\n nan_round_Series = Series({'col1': nan, 'col2': 1})\n\n # TODO(wesm): unused?\n expected_nan_round = DataFrame({ # noqa\n 'col1': [1.123, 2.123, 3.123],\n 'col2': [1.2, 2.2, 3.2]})\n\n with pytest.raises(TypeError):\n df.round(nan_round_Series)\n\n # Make sure this doesn't break existing Series.round\n tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])\n\n # named columns\n # GH 11986\n decimals = 2\n expected_rounded = DataFrame(\n {'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})\n df.columns.name = \"cols\"\n expected_rounded.columns.name = \"cols\"\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\n\n # interaction of named columns & series\n tm.assert_series_equal(df['col1'].round(decimals),\n expected_rounded['col1'])\n tm.assert_series_equal(df.round(decimals)['col1'],\n expected_rounded['col1'])\n\n def test_numpy_round(self):\n # See gh-12600\n df = DataFrame([[1.53, 1.36], [0.06, 7.01]])\n out = np.round(df, decimals=0)\n expected = DataFrame([[2., 1.], [0., 7.]])\n tm.assert_frame_equal(out, expected)\n\n msg = \"the 'out' parameter is not supported\"\n with tm.assert_raises_regex(ValueError, msg):\n np.round(df, decimals=0, out=df)\n\n def test_round_mixed_type(self):\n # GH11885\n df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],\n 'col2': ['1', 'a', 'c', 'f'],\n 'col3': date_range('20111111', periods=4)})\n round_0 = DataFrame({'col1': [1., 2., 3., 4.],\n 'col2': ['1', 'a', 'c', 'f'],\n 'col3': date_range('20111111', periods=4)})\n tm.assert_frame_equal(df.round(), round_0)\n tm.assert_frame_equal(df.round(1), df)\n tm.assert_frame_equal(df.round({'col1': 1}), df)\n tm.assert_frame_equal(df.round({'col1': 0}), round_0)\n tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)\n tm.assert_frame_equal(df.round({'col3': 1}), df)\n\n def test_round_issue(self):\n # GH11611\n\n df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],\n index=['first', 'second', 'third'])\n\n dfs = pd.concat((df, df), axis=1)\n rounded = dfs.round()\n tm.assert_index_equal(rounded.index, dfs.index)\n\n decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])\n pytest.raises(ValueError, df.round, decimals)\n\n def test_built_in_round(self):\n if not compat.PY3:\n pytest.skip(\"build in round cannot be overridden \"\n \"prior to Python 3\")\n\n # GH11763\n # Here's the test frame we'll be working with\n df = DataFrame(\n {'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})\n\n # Default round to integer (i.e. decimals=0)\n expected_rounded = DataFrame(\n {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})\n tm.assert_frame_equal(round(df), expected_rounded)\n\n def test_pct_change(self):\n # GH 11150\n pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(\n 0, 40, 10)]).astype(np.float64)\n pnl.iat[1, 0] = np.nan\n pnl.iat[1, 1] = np.nan\n pnl.iat[2, 3] = 60\n\n for axis in range(2):\n expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(\n axis=axis) - 1\n result = pnl.pct_change(axis=axis, fill_method='pad')\n\n tm.assert_frame_equal(result, expected)\n\n # Clip\n\n def test_clip(self):\n median = self.frame.median().median()\n original = self.frame.copy()\n\n capped = self.frame.clip_upper(median)\n assert not (capped.values > median).any()\n\n floored = self.frame.clip_lower(median)\n assert not (floored.values < median).any()\n\n double = self.frame.clip(upper=median, lower=median)\n assert not (double.values != median).any()\n\n # Verify that self.frame was not changed inplace\n assert (self.frame.values == original.values).all()\n\n def test_inplace_clip(self):\n # GH #15388\n median = self.frame.median().median()\n frame_copy = self.frame.copy()\n\n frame_copy.clip_upper(median, inplace=True)\n assert not (frame_copy.values > median).any()\n frame_copy = self.frame.copy()\n\n frame_copy.clip_lower(median, inplace=True)\n assert not (frame_copy.values < median).any()\n frame_copy = self.frame.copy()\n\n frame_copy.clip(upper=median, lower=median, inplace=True)\n assert not (frame_copy.values != median).any()\n\n def test_dataframe_clip(self):\n # GH #2747\n df = DataFrame(np.random.randn(1000, 2))\n\n for lb, ub in [(-1, 1), (1, -1)]:\n clipped_df = df.clip(lb, ub)\n\n lb, ub = min(lb, ub), max(ub, lb)\n lb_mask = df.values <= lb\n ub_mask = df.values >= ub\n mask = ~lb_mask & ~ub_mask\n assert (clipped_df.values[lb_mask] == lb).all()\n assert (clipped_df.values[ub_mask] == ub).all()\n assert (clipped_df.values[mask] == df.values[mask]).all()\n\n def test_clip_mixed_numeric(self):\n # TODO(jreback)\n # clip on mixed integer or floats\n # with integer clippers coerces to float\n df = DataFrame({'A': [1, 2, 3],\n 'B': [1., np.nan, 3.]})\n result = df.clip(1, 2)\n expected = DataFrame({'A': [1, 2, 2.],\n 'B': [1., np.nan, 2.]})\n tm.assert_frame_equal(result, expected, check_like=True)\n\n @pytest.mark.parametrize(\"inplace\", [True, False])\n def test_clip_against_series(self, inplace):\n # GH #6966\n\n df = DataFrame(np.random.randn(1000, 2))\n lb = Series(np.random.randn(1000))\n ub = lb + 1\n\n original = df.copy()\n clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)\n\n if inplace:\n clipped_df = df\n\n for i in range(2):\n lb_mask = original.iloc[:, i] <= lb\n ub_mask = original.iloc[:, i] >= ub\n mask = ~lb_mask & ~ub_mask\n\n result = clipped_df.loc[lb_mask, i]\n tm.assert_series_equal(result, lb[lb_mask], check_names=False)\n assert result.name == i\n\n result = clipped_df.loc[ub_mask, i]\n tm.assert_series_equal(result, ub[ub_mask], check_names=False)\n assert result.name == i\n\n tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])\n\n @pytest.mark.parametrize(\"inplace\", [True, False])\n @pytest.mark.parametrize(\"lower\", [[2, 3, 4], np.asarray([2, 3, 4])])\n @pytest.mark.parametrize(\"axis,res\", [\n (0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),\n (1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])\n ])\n def test_clip_against_list_like(self, inplace, lower, axis, res):\n # GH #15390\n original = self.simple.copy(deep=True)\n\n result = original.clip(lower=lower, upper=[5, 6, 7],\n axis=axis, inplace=inplace)\n\n expected = pd.DataFrame(res,\n columns=original.columns,\n index=original.index)\n if inplace:\n result = original\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n @pytest.mark.parametrize(\"axis\", [0, 1, None])\n def test_clip_against_frame(self, axis):\n df = DataFrame(np.random.randn(1000, 2))\n lb = DataFrame(np.random.randn(1000, 2))\n ub = lb + 1\n\n clipped_df = df.clip(lb, ub, axis=axis)\n\n lb_mask = df <= lb\n ub_mask = df >= ub\n mask = ~lb_mask & ~ub_mask\n\n tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])\n tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])\n tm.assert_frame_equal(clipped_df[mask], df[mask])\n\n def test_clip_with_na_args(self):\n \"\"\"Should process np.nan argument as None \"\"\"\n # GH # 17276\n tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)\n tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),\n self.frame)\n\n # GH #19992\n df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],\n 'col_2': [7, 8, 9]})\n\n result = df.clip(lower=[4, 5, np.nan], axis=0)\n expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],\n 'col_2': [7, 8, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n result = df.clip(lower=[4, 5, np.nan], axis=1)\n expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],\n 'col_2': [np.nan, np.nan, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n # Matrix-like\n def test_dot(self):\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n result = a.dot(b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n # Check alignment\n b1 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n tm.assert_frame_equal(result, expected)\n\n # Check series argument\n result = a.dot(b['one'])\n tm.assert_series_equal(result, expected['one'], check_names=False)\n assert result.name is None\n\n result = a.dot(b1['one'])\n tm.assert_series_equal(result, expected['one'], check_names=False)\n assert result.name is None\n\n # can pass correct-length arrays\n row = a.iloc[0].values\n\n result = a.dot(row)\n exp = a.dot(a.iloc[0])\n tm.assert_series_equal(result, exp)\n\n with tm.assert_raises_regex(ValueError,\n 'Dot product shape mismatch'):\n a.dot(row[:-1])\n\n a = np.random.rand(1, 5)\n b = np.random.rand(5, 1)\n A = DataFrame(a)\n\n # TODO(wesm): unused\n B = DataFrame(b) # noqa\n\n # it works\n result = A.dot(b)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n with tm.assert_raises_regex(ValueError, 'aligned'):\n df.dot(df2)\n\n @pytest.mark.skipif(not PY35,\n reason='matmul supported for Python>=3.5')\n @pytest.mark.xfail(\n _np_version_under1p12,\n reason=\"unpredictable return types under numpy < 1.12\")\n def test_matmul(self):\n # matmul test is for GH #10259\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n # DataFrame @ DataFrame\n result = operator.matmul(a, b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_frame_equal(result, expected)\n\n # DataFrame @ Series\n result = operator.matmul(a, b.one)\n expected = Series(np.dot(a.values, b.one.values),\n index=['a', 'b', 'c'])\n tm.assert_series_equal(result, expected)\n\n # np.array @ DataFrame\n result = operator.matmul(a.values, b)\n expected = np.dot(a.values, b.values)\n tm.assert_almost_equal(result, expected)\n\n # nested list @ DataFrame (__rmatmul__)\n result = operator.matmul(a.values.tolist(), b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_almost_equal(result.values, expected.values)\n\n # mixed dtype DataFrame @ DataFrame\n a['q'] = a.q.round().astype(int)\n result = operator.matmul(a, b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_frame_equal(result, expected)\n\n # different dtypes DataFrame @ DataFrame\n a = a.astype(int)\n result = operator.matmul(a, b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_frame_equal(result, expected)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n with tm.assert_raises_regex(ValueError, 'aligned'):\n operator.matmul(df, df2)\n\n\[email protected]\ndef df_duplicates():\n return pd.DataFrame({'a': [1, 2, 3, 4, 4],\n 'b': [1, 1, 1, 1, 1],\n 'c': [0, 1, 2, 5, 4]},\n index=[0, 0, 1, 1, 1])\n\n\[email protected]\ndef df_strings():\n return pd.DataFrame({'a': np.random.permutation(10),\n 'b': list(ascii_lowercase[:10]),\n 'c': np.random.permutation(10).astype('float64')})\n\n\[email protected]\ndef df_main_dtypes():\n return pd.DataFrame(\n {'group': [1, 1, 2],\n 'int': [1, 2, 3],\n 'float': [4., 5., 6.],\n 'string': list('abc'),\n 'category_string': pd.Series(list('abc')).astype('category'),\n 'category_int': [7, 8, 9],\n 'datetime': pd.date_range('20130101', periods=3),\n 'datetimetz': pd.date_range('20130101',\n periods=3,\n tz='US/Eastern'),\n 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},\n columns=['group', 'int', 'float', 'string',\n 'category_string', 'category_int',\n 'datetime', 'datetimetz',\n 'timedelta'])\n\n\nclass TestNLargestNSmallest(object):\n\n dtype_error_msg_template = (\"Column {column!r} has dtype {dtype}, cannot \"\n \"use method {method!r} with this dtype\")\n\n # ----------------------------------------------------------------------\n # Top / bottom\n @pytest.mark.parametrize('order', [\n ['a'],\n ['c'],\n ['a', 'b'],\n ['a', 'c'],\n ['b', 'a'],\n ['b', 'c'],\n ['a', 'b', 'c'],\n ['c', 'a', 'b'],\n ['c', 'b', 'a'],\n ['b', 'c', 'a'],\n ['b', 'a', 'c'],\n\n # dups!\n ['b', 'c', 'c']])\n @pytest.mark.parametrize('n', range(1, 11))\n def test_n(self, df_strings, nselect_method, n, order):\n # GH10393\n df = df_strings\n if 'b' in order:\n\n error_msg = self.dtype_error_msg_template.format(\n column='b', method=nselect_method, dtype='object')\n with tm.assert_raises_regex(TypeError, error_msg):\n getattr(df, nselect_method)(n, order)\n else:\n ascending = nselect_method == 'nsmallest'\n result = getattr(df, nselect_method)(n, order)\n expected = df.sort_values(order, ascending=ascending).head(n)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('columns', [\n ('group', 'category_string'), ('group', 'string')])\n def test_n_error(self, df_main_dtypes, nselect_method, columns):\n df = df_main_dtypes\n col = columns[1]\n error_msg = self.dtype_error_msg_template.format(\n column=col, method=nselect_method, dtype=df[col].dtype)\n # escape some characters that may be in the repr\n error_msg = (error_msg.replace('(', '\\\\(').replace(\")\", \"\\\\)\")\n .replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\"))\n with tm.assert_raises_regex(TypeError, error_msg):\n getattr(df, nselect_method)(2, columns)\n\n def test_n_all_dtypes(self, df_main_dtypes):\n df = df_main_dtypes\n df.nsmallest(2, list(set(df) - {'category_string', 'string'}))\n df.nlargest(2, list(set(df) - {'category_string', 'string'}))\n\n def test_n_identical_values(self):\n # GH15297\n df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})\n\n result = df.nlargest(3, 'a')\n expected = pd.DataFrame(\n {'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.nsmallest(3, 'a')\n expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('order', [\n ['a', 'b', 'c'],\n ['c', 'b', 'a'],\n ['a'],\n ['b'],\n ['a', 'b'],\n ['c', 'b']])\n @pytest.mark.parametrize('n', range(1, 6))\n def test_n_duplicate_index(self, df_duplicates, n, order):\n # GH 13412\n\n df = df_duplicates\n result = df.nsmallest(n, order)\n expected = df.sort_values(order).head(n)\n tm.assert_frame_equal(result, expected)\n\n result = df.nlargest(n, order)\n expected = df.sort_values(order, ascending=False).head(n)\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_keep_all_ties(self):\n # see gh-16818\n df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],\n 'b': [10, 9, 8, 7, 5, 50, 10, 20]})\n result = df.nlargest(4, 'a', keep='all')\n expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,\n 5: 3, 6: 3, 7: 3},\n 'b': {0: 10, 1: 9, 2: 8, 4: 5,\n 5: 50, 6: 10, 7: 20}})\n tm.assert_frame_equal(result, expected)\n\n result = df.nsmallest(2, 'a', keep='all')\n expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},\n 'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})\n tm.assert_frame_equal(result, expected)\n\n def test_series_broadcasting(self):\n # smoke test for numpy warnings\n # GH 16378, GH 16306\n df = DataFrame([1.0, 1.0, 1.0])\n df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})\n s = Series([1, 1, 1])\n s_nan = Series([np.nan, np.nan, 1])\n\n with tm.assert_produces_warning(None):\n df_nan.clip_lower(s, axis=0)\n for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:\n getattr(df, op)(s_nan, axis=0)\n\n def test_series_nat_conversion(self):\n # GH 18521\n # Check rank does not mutate DataFrame\n df = DataFrame(np.random.randn(10, 3), dtype='float64')\n expected = df.copy()\n df.rank()\n result = df\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"pandas.timedelta_range",
"pandas.core.nanops.nansem",
"numpy.ones",
"pandas.Series",
"pandas.CategoricalIndex",
"pandas.core.nanops.nanvar",
"numpy.var",
"numpy.asarray",
"pandas.Categorical",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.assert_categorical_equal",
"numpy.cov",
"pandas.util.testing.makeTimeDataFrame",
"scipy.stats.skew",
"pandas.to_datetime",
"numpy.random.rand",
"numpy.isnan",
"pandas.to_timedelta",
"pandas.Timestamp",
"numpy.round",
"numpy.linspace",
"numpy.corrcoef",
"numpy.random.randint",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.date_range",
"numpy.zeros",
"scipy.stats.kurtosis",
"pandas.MultiIndex.from_product",
"numpy.median",
"pandas.Timedelta",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.util.testing.assert_index_equal",
"pandas.concat",
"numpy.std",
"numpy.array",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_raises_regex",
"pandas.notna",
"numpy.random.permutation",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.random",
"numpy.shape",
"pandas.util.testing._make_skipna_wrapper",
"pandas.compat.lrange",
"pandas.MultiIndex",
"numpy.dot",
"pandas.util.testing.assert_frame_equal",
"pandas.isna",
"pandas.option_context"
]
] |
MehariBZ/pydca | [
"034e0707a13e6e43da1343630047d47caeca896e"
] | [
"pydca/meanfield_dca/meanfield_dca.py"
] | [
"from __future__ import absolute_import, division\nfrom . import msa_numerics\nfrom pydca.fasta_reader import fasta_reader\nimport logging\nimport numpy as np\n\n\"\"\"This module implements Direc Coupling Analysis (DCA) of residue coevolution\nfor protein and RNA sequences using the mean-field algorithm. The final\ncoevolution score is computed from the direct probability. The general steps\ncarried out are outlined as follows\n\nFor a detailed information about Direct Coupling Analysis, one can refer to the\nfollowing articles:\n\n a) Identification of direct residue contacts in protein-protein interaction\n by message-passing\n Martin Weigt, Robert A White, Hendrik Szurmant, James A Hoch, Terence Hwa\n Journal: Proceedings of the National Academy of Sciences\n Volume: 106\n Issue: 1\n Pages: 67-72\n b) Direct-coupling analysis of residue coevolution captures native contacts\n across many protein families\n Faruck Morcos, Andrea Pagnani, Bryan Lunt, Arianna Bertolino,\n Debora S Marks, Chris Sander, Riccardo Zecchina, Jose N Onuchic,\n Terence Hwa, Martin Weigt\n Journal: Proceedings of the National Academy of Sciences\n Volume: 108\n Issue: 49\n Pages: E1293-E1301\n\nAuthor(s) Mehari B. Zerihun, Alexander Schug\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\nclass MeanFieldDCAException(Exception):\n \"\"\"\n \"\"\"\n\nclass MeanFieldDCA:\n \"\"\"MeanFieldDCA class. Instances of this class are used to carry out Direct\n Coupling Analysis (DCA) of residue coevolution using the mean-field DCA\n algorithm.\n \"\"\"\n def __init__(self, msa_file_name, biomolecule, pseudocount=None, seqid=None):\n \"\"\"MeanFieldDCA object class initializer\n Parameters\n ----------\n msa_file : str\n Name of the FASTA formatted file containing alignmnet\n biomolecule : str\n Type of biomolecule (must be protein or RNA, lower or\n upper case)\n pseudocount : float\n Parameter for regularizing data before DCA analysis.\n Default value is 0.5\n seqid : float\n This parameter's value measure the maximum\n similarity two or more sequences can have so that they can be\n considered distinct, or lumped together otherwise.\n Returns\n -------\n None : None\n \"\"\"\n\n self.__pseudocount = pseudocount if pseudocount is not None else 0.5\n self.__seqid = seqid if seqid is not None else 0.8\n #Validate the value of pseudo count incase user provide an invalid one\n if self.__pseudocount >= 1.0 or self.__pseudocount < 0:\n logger.error('\\n\\tValue of relative pseudo-count must be'\n ' between 0 and 1.0. Typical value is 0.5')\n raise ValueError\n #Validate the value of sequence identity\n if self.__seqid > 1.0 or self.__seqid <= 0.0:\n logger.error('\\n\\tValue of sequence-identity must'\n ' not exceed 1 nor less than 0. Typical values are 0.7, 0.8., 0.9')\n raise ValueError\n biomolecule = biomolecule.strip().upper()\n self.__msa_file_name = msa_file_name\n if biomolecule=='RNA':\n self.__num_site_states = 5\n elif biomolecule=='PROTEIN':\n self.__num_site_states = 21\n else:\n logger.error(\n '\\n\\tUnknown biomolecule ... must be protein (PROTEIN) or rna (RNA)',\n )\n raise ValueError\n \n self.__sequences = fasta_reader.get_alignment_int_form(\n self.__msa_file_name,\n biomolecule=biomolecule,\n )\n\n self.__num_sequences = len(self.__sequences)\n self.__sequences_len = len(self.__sequences[0])\n self.__biomolecule = biomolecule\n if self.__seqid < 1.0:\n self.__sequences_weight = self.compute_sequences_weight()\n else :\n # assign each sequence a weight of one\n self.__sequences_weight = np.ones((self.__num_sequences,), dtype = np.float64)\n self.__effective_num_sequences = np.sum(self.__sequences_weight)\n #sometimes users might enter the wrong biomolecule type\n #verify biomolecule type\n\n mf_dca_info = \"\"\"\\n\\tCreated a MeanFieldDCA object with the following attributes\n \\tbiomolecule: {}\n \\ttotal states at sites: {}\n \\tpseudocount: {}\n \\tsequence identity: {}\n \\talignment length: {}\n \\ttotal number of unique sequences (excluding redundant sequences with 100 percent similarity): {}\n \\teffective number of sequences (with sequence identity {}): {}\n \"\"\".format(\n biomolecule,\n self.__num_site_states,\n self.__pseudocount,\n self.__seqid,\n self.__sequences_len,\n self.__num_sequences,\n self.__seqid,\n self.__effective_num_sequences,\n )\n logger.info(mf_dca_info)\n return None\n\n\n def __str__(self):\n \"\"\"Describes the MeanFieldDCA object.\n\n Parameters\n ----------\n self: MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n description : str\n A representation about objects created from\n the MeanFieldDCA class.\n \"\"\"\n description = '<instance of MeanFieldDCA>'\n return description\n\n\n def __call__(self, pseudocount = 0.5 , seqid = 0.8):\n \"\"\"Resets the value of pseudo count and sequence identity through\n the instance.\n\n Parameters\n ----------\n self : MeanFieldDCA\n MeanFieldDCA instance.\n pseudocount : float\n The value of the raltive pseudo count. It must be between\n 0 and 1. Default value is 0.5.\n seqid : float\n Threshold sequence similarity for computing sequences weight.\n This parameter must be between 0 and 1. Typical values are\n 0.7, 0.8, 0.9 or something in between these numbers.\n\n Returns\n -------\n None : None\n \"\"\"\n\n #warn the user that paramertes are being reset\n self.__pseudocount = pseudocount\n self.__seqid = seqid\n logger.warning('\\n\\tYou have changed one of the parameters (pseudo count or sequence identity)'\n '\\n\\tfrom their default values'\n '\\n\\tpseudocount: {} \\n\\tsequence_identity: {}'.format(\n self.__pseudocount, self.__seqid,\n )\n )\n return None\n\n\n @property\n def alignment(self):\n \"\"\"Alignment data getter.\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n --------\n self.__sequences : list\n A 2d list of alignment sequences in integer representation.\n \"\"\"\n\n return self.__sequences\n\n @property\n def biomolecule(self):\n \"\"\"Sequence type getter\n\n Parameters\n ----------\n Self : MeanFieldDCA\n Instance of MeanFieldDCA class\n Returns\n -------\n self.__biomolecule : str\n Biomolecule type (protein or RNA)\n \"\"\"\n return self.__biomolecule\n @property\n def sequences_len(self):\n \"\"\"Sequences length getter.\n\n Parameters\n ---------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__sequences_len : int\n Sequences length in alignment data\n \"\"\"\n\n return self.__sequences_len\n\n\n @property\n def num_site_states(self):\n \"\"\"Get number of states for an MSA (eg. 5 for RNAs and 21 for proteins)\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__num_site_states : int\n Maximum number of states in a sequence site\n \"\"\"\n\n return self.__num_site_states\n\n @property\n def num_sequences(self):\n \"\"\"Getter for the number of sequences read from alignment file\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__num_sequences : int\n The total number of sequences in alignment data\n \"\"\"\n\n return self.__num_sequences\n\n\n @property\n def sequence_identity(self):\n \"\"\"Getter for the value of sequence indentity.\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__seqid : float\n Cut-off value for sequences similarity above which sequences are\n considered identical\n \"\"\"\n\n return self.__seqid\n\n\n @property\n def pseudocount(self):\n \"\"\"Getter for value of pseudo count\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__pseudocount : float\n Value of pseudo count usef for regularization\n \"\"\"\n\n return self.__pseudocount\n\n\n @property\n def sequences_weight(self):\n \"\"\"Getter for the weight of each sequences in alignment data.\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__sequences_weight : np.array(dtype=np.float64)\n A 1d numpy array containing the weight of each sequences in the\n alignment.\n \"\"\"\n\n return self.__sequences_weight\n\n\n @property\n def effective_num_sequences(self):\n \"\"\"Getter for the effective number of sequences.\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n np.sum(self.__sequences_weight) : float\n The sum of each sequence's weight.\n \"\"\"\n\n return np.sum(self.__sequences_weight)\n\n\n def compute_sequences_weight(self):\n \"\"\"Computes the weight of each sequences in the alignment. If the\n sequences identity is one, each sequences has equal weight and this is\n the maximum weight a sequence in the alignment data can have. Whenever\n the sequence identity is set a value less than one, sequences that have\n similarity beyond the sequence identity are lumped together. If there are\n m similar sequences, their corresponding weight is the reciprocal.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance\n\n Returns\n -------\n weights : np.array\n A 1d numpy array of size self.__num_sequences containing the\n weight of each sequence.\n \"\"\"\n\n logger.info('\\n\\tComputing sequences weights')\n weights = msa_numerics.compute_sequences_weight(\n alignment_data= np.array(self.__sequences, dtype=np.int32),\n seqid = self.__seqid,\n )\n return weights\n\n\n def get_single_site_freqs(self):\n \"\"\"Computes single site frequency counts.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n single_site_freqs : np.array\n A 2d numpy array of shape (L, q) containing the frequency\n count of residues at sequence sites. L is the length of\n sequences in the alignment, and q is the maximum possible\n states a site can accommodate. The last state (q) of each\n site represents a gap.\n \"\"\"\n\n logger.info('\\n\\tComputing single site frequencies')\n\n single_site_freqs = msa_numerics.compute_single_site_freqs(\n alignment_data = np.array(self.__sequences),\n num_site_states = self.__num_site_states,\n seqs_weight = self.__sequences_weight,\n )\n return single_site_freqs\n\n\n def get_reg_single_site_freqs(self):\n \"\"\"Regularizes single site frequencies.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance\n\n Returns\n -------\n reg_single_site_freqs : np.array\n A 2d numpy array of shape (L, q) containing regularized single\n site frequencies. L and q are the sequences length and maximum\n number of site-states respectively.\n \"\"\"\n\n single_site_freqs = self.get_single_site_freqs()\n\n logger.info('\\n\\tRegularizing single site frequencies')\n\n reg_single_site_freqs = msa_numerics.get_reg_single_site_freqs(\n single_site_freqs = single_site_freqs,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n pseudocount = self.__pseudocount,\n )\n return reg_single_site_freqs\n\n\n def get_pair_site_freqs(self):\n \"\"\"Computes pair site frequencies\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n pair_site_freqs : np.array\n A 2d numpy array of pair site frequncies. It has a shape of\n (N, q-1, q-1) where N is the number of unique site pairs and q\n is the maximum number of states a site can accommodate. Note\n site pairig is performed in the following order: (0, 0), (0, 1),\n ..., (0, L-1), ...(L-1, L) where L is the sequences length. This\n ordering is critical that any computation involding pair site\n frequencies must be implemented in the righ order of pairs.\n \"\"\"\n\n logger.info('\\n\\tComputing pair site frequencies')\n pair_site_freqs = msa_numerics.compute_pair_site_freqs(\n alignment_data = np.array(self.__sequences),\n num_site_states = self.__num_site_states,\n seqs_weight = self.__sequences_weight,\n )\n return pair_site_freqs\n\n\n def get_reg_pair_site_freqs(self):\n \"\"\"Regularizes pair site frequencies\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n reg_pair_site_freqs : np.array\n A 3d numpy array of shape (N, q-1, q-1) containing regularized\n pair site frequencies. N is the number of unique site pairs and\n q is the maximum number of states in a sequence site. The\n ordering of pairs follows numbering like (unregularized) pair\n site frequencies.\n \"\"\"\n\n pair_site_freqs = self.get_pair_site_freqs()\n logger.info('\\n\\tRegularizing pair site frequencies')\n reg_pair_site_freqs = msa_numerics.get_reg_pair_site_freqs(\n pair_site_freqs = pair_site_freqs,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n pseudocount = self.__pseudocount,\n )\n return reg_pair_site_freqs\n\n\n def construct_corr_mat(self, reg_fi, reg_fij):\n \"\"\"Constructs the correlation matrix from regularized frequencies.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n reg_fi : np.array\n Regularized single site frequencies.\n reg_fij : np.array\n Regularized pair site frequncies.\n\n Returns\n -------\n corr_mat : np.array\n A 2d numpy array of (N, N) where N = L*(q-1) where L and q are\n the length of sequences and number of states in a site\n respectively.\n \"\"\"\n\n logger.info('\\n\\tConstructing the correlation matrix')\n corr_mat = msa_numerics.construct_corr_mat(\n reg_fi = reg_fi,\n reg_fij = reg_fij,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n )\n return corr_mat\n\n\n def compute_couplings(self, corr_mat):\n \"\"\"Computing couplings by inverting the matrix of correlations. Note that\n the couplings are the negative of the inverse of the correlation matrix.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n corr_mat : np.array\n The correlation matrix formed from regularized pair site and\n single site frequencies.\n\n Returns\n -------\n couplings : np.array\n A 2d numpy array of the same shape as the correlation matrix.\n \"\"\"\n\n logger.info('\\n\\tComputing couplings')\n try:\n couplings = msa_numerics.compute_couplings(corr_mat = corr_mat)\n except Exception as e:\n logger.error('\\n\\tCorrelation {}\\n\\tYou set the pseudocount {}.'\n ' You might need to increase it.'.format(e, self.__pseudocount)\n )\n raise\n # capture couplings to avoid recomputing\n self.__couplings = couplings \n logger.info('\\n\\tMaximum and minimum couplings: {}, {}'.format(\n np.max(couplings), np.min(couplings)))\n return couplings\n\n\n def compute_two_site_model_fields(self, couplings, reg_fi):\n \"\"\"Computes two site model fields by fitting the marginal probabilities\n of the direct probability with the empirical data obtained from the\n alignment\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n couplings : np.array\n A 2d numpy array of couplings computed from the correlation matrix.\n reg_fi : np.array\n A 3d numpy array of regularized single site frequencies.\n Returns\n -------\n two_site_model_fields : np.array\n A 3d numpy array of shape (N, q, q) where N is the total number\n of unique site pairs and q is the maximum number of states a site\n can accommodate. The ordering of site pairs is the same as those\n in pair site frequencies.\n \"\"\"\n\n logger.info('\\n\\tComputing two site model fields')\n two_site_model_fields = msa_numerics.compute_two_site_model_fields(\n couplings = couplings,\n reg_fi = reg_fi,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n )\n return two_site_model_fields\n\n\n def compute_fields(self, couplings=None):\n \"\"\"Computes the local fields of the global probability of sequence space.\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class\n\n couplings : np.array\n A 2d numpy array of the couplings. If not give, will be computed.\n\n Returns\n -------\n fields : dict \n A dictionary of fields whose keys are sites in MSA and whose values\n are arrays of fields per site.\n \"\"\"\n\n if couplings is None:\n reg_fi = self.get_reg_single_site_freqs()\n reg_fij = self.get_reg_pair_site_freqs()\n corr_mat = self.construct_corr_mat(reg_fi, reg_fij)\n couplings = self.compute_couplings(corr_mat)\n else:\n reg_fi = self.get_reg_single_site_freqs()\n q = self.__num_site_states\n fields = dict()\n logger.info('\\n\\tComputing local fields of the global probability function')\n for i in range(self.__sequences_len):\n pi = reg_fi[i]\n piq = pi[-1]\n sum = np.zeros((q-1, 1))\n row_start = i * (q - 1)\n row_end = row_start + (q - 1)\n for j in range(self.__sequences_len):\n if j != i:\n pj = reg_fi[j]\n col_start = j * (q - 1)\n col_end = col_start + (q - 1)\n couplings_ij = couplings[row_start:row_end, col_start:col_end]\n pj_col_vec = np.reshape(pj[:-1], (q-1, 1))\n sum += np.dot(couplings_ij, pj_col_vec)\n\n fields_i = np.log(pi[:-1]/piq) - np.reshape(sum, (q-1, ))\n fields[i] = fields_i\n return fields\n \n \n def shift_couplings(self, couplings_ij):\n \"\"\"Shifts the couplings value.\n\n Parameters\n ----------\n self : MeanFieldDCA \n An instance of MeanFieldDCA class\n couplings_ij : np.array\n 1d array of couplings for site pair (i, j)\n Returns\n -------\n shifted_couplings_ij : np.array\n A 2d array of the couplings for site pair (i, j)\n \"\"\"\n qm1 = self.__num_site_states - 1\n couplings_ij = np.reshape(couplings_ij, (qm1,qm1))\n avx = np.mean(couplings_ij, axis=1)\n avx = np.reshape(avx, (qm1, 1))\n avy = np.mean(couplings_ij, axis=0)\n avy = np.reshape(avy, (1, qm1))\n av = np.mean(couplings_ij)\n couplings_ij = couplings_ij - avx - avy + av\n return couplings_ij \n\n \n def compute_params(self, seqbackmapper=None, ranked_by=None, linear_dist=None, num_site_pairs=None):\n \"\"\"Computes fields and couplings with the couplings ranked by DCA score.\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instanc of MeanFieldDCA class\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class\n ranked_by : str\n DCA score type usef to rank the couplings by their site pairs.\n By default they are ranked by the Frobenius Norm of couplings with\n average product correction.\n linear_dist : int\n Minimum separation beteween site pairs (i, j).\n num_site_pairs : int \n Number of site pairs whose couplings are to be otained. \n \n Returns\n -------\n fields, couplings : tuple \n A tuple of lists of fields and couplings. \n \"\"\"\n if ranked_by is None: ranked_by = 'fn_apc'\n if linear_dist is None: linear_dist = 4\n\n RANKING_METHODS = ('FN', 'FN_APC', 'DI', 'DI_APC')\n ranked_by = ranked_by.strip().upper()\n if ranked_by not in RANKING_METHODS:\n logger.error('\\n\\tInvalid ranking criterion {}.\\nChoose from {}'.format(ranked_by, RANKING_METHODS))\n raise MeanFieldDCAException\n if ranked_by == 'FN': dca_scores = self.compute_sorted_FN(seqbackmapper=seqbackmapper)\n if ranked_by == 'FN_APC': dca_scores = self.compute_sorted_FN_APC(seqbackmapper=seqbackmapper)\n if ranked_by == 'DI': dca_scores = self.compute_sorted_DI(seqbackmapper=seqbackmapper)\n if ranked_by == 'DI_APC': dca_scores = self.compute_sorted_DI_APC(seqbackmapper=seqbackmapper)\n\n fields = self.compute_fields(couplings=self.__couplings)\n\n qm1 = self.__num_site_states - 1 \n\n if seqbackmapper is not None:\n # mapping_dict has keys from MSA sites and values from refseq sites\n # we need to reverse this mapping as the fields and couplings are from MSA sites\n mapping_dict = {\n value : key for key, value in self.__refseq_mapping_dict.items()\n }\n else:\n mapping_dict = {\n i : i for i in range(self.__sequences_len)\n }\n # set default number of site pairs whose couplings are to be extracted\n if num_site_pairs is None :\n num_site_pairs = len(seqbackmapper.ref_sequence) if seqbackmapper is not None else len(mapping_dict.keys()) \n # we need only the fields corresponding to mapped sites \n fields_mapped = list()\n logger.info('\\n\\tExtracting fields')\n for i in mapping_dict.keys():\n site_in_msa = mapping_dict[i]\n fields_im = fields[site_in_msa]\n site_fields = i, fields_im\n fields_mapped.append(site_fields)\n # extract couplings\n logger.info('\\n\\tExtracting couplings for top {} site pairs (i, j) with |i - j| > {} and ranked by {}'.format(\n num_site_pairs, linear_dist, ranked_by)\n )\n couplings_ranked_by_dca_score = list()\n count_pairs = 0\n for pair, score in dca_scores:\n site_1_in_refseq, site_2_in_refseq = pair[0], pair[1]\n if abs(site_1_in_refseq - site_2_in_refseq) > linear_dist:\n count_pairs += 1\n if count_pairs > num_site_pairs: break \n i, j = mapping_dict[site_1_in_refseq], mapping_dict[site_2_in_refseq]\n if(i > j): \n logger.error('\\n\\tInvalid site pair. Site pair (i, j) should be ordered in i < j')\n raise MeanFieldDCAException\n row_start = i * qm1 \n row_end = row_start + qm1 \n column_start = j * qm1 \n column_end = column_start + qm1 \n couplings_ij = self.__couplings[row_start:row_end, column_start:column_end]\n couplings_ij = self.shift_couplings(couplings_ij) # now couplings_ij is a 2d numpy array\n couplings_ij = np.reshape(couplings_ij, (qm1*qm1,))\n pair_couplings_ij = pair, couplings_ij \n couplings_ranked_by_dca_score.append(pair_couplings_ij)\n if count_pairs < num_site_pairs:\n logger.warning('\\n\\tObtained couplings for only {} ranked site pairs.' \n '\\n\\tThis is the maximum number of site paris we can obtain under ' \n 'the given criteria'.format(count_pairs)\n )\n \n return tuple(fields_mapped), tuple(couplings_ranked_by_dca_score) \n\n\n def get_mapped_site_pairs_dca_scores(self, sorted_dca_scores, seqbackmapper):\n \"\"\"Filters mapped site pairs with a reference sequence. \n\n Parameters\n -----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class\n sorted_dca_scores : tuple of tuples\n A tuple of tuples of site-pair and DCA score sorted by DCA scores \n in reverse order.\n seqbackmapper : SequenceBackmapper \n An instance of SequenceBackmapper class\n \n Returns\n -------\n sorted_scores_mapped : tuple\n A tuple of tuples of site pairs and dca score\n \"\"\"\n mapping_dict = seqbackmapper.map_to_reference_sequence()\n # Add attribute __reseq_mapping_dict\n self.__refseq_mapping_dict = mapping_dict \n sorted_scores_mapped = list()\n num_mapped_pairs = 0\n for pair, score in sorted_dca_scores:\n try:\n mapped_pair = mapping_dict[pair[0]], mapping_dict[pair[1]]\n except KeyError:\n pass \n else:\n current_pair_score = mapped_pair, score \n sorted_scores_mapped.append(current_pair_score)\n num_mapped_pairs += 1\n # sort mapped pairs in case they were not\n sorted_scores_mapped = sorted(sorted_scores_mapped, key = lambda k : k[1], reverse=True)\n logger.info('\\n\\tTotal number of mapped sites: {}'.format(num_mapped_pairs))\n return tuple(sorted_scores_mapped)\n\n \n def get_site_pair_di_score(self):\n \"\"\"Obtains computed direct information (DI) scores from backend and\n puts them a list of tuples of in (site-pair, score) form.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n site_pair_di_score : list\n A list of tuples containing site pairs and DCA score, i.e., the\n list [((i, j), score), ...] for all unique ite pairs (i, j) \n such that j > i.\n \"\"\"\n reg_fi = self.get_reg_single_site_freqs()\n reg_fij = self.get_reg_pair_site_freqs()\n corr_mat = self.construct_corr_mat(reg_fi, reg_fij)\n couplings = self.compute_couplings(corr_mat)\n fields_ij = self.compute_two_site_model_fields(couplings, reg_fi)\n logger.info('\\n\\tComputing direct information')\n unsorted_DI = msa_numerics.compute_direct_info(\n couplings = couplings,\n fields_ij = fields_ij,\n reg_fi = reg_fi,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n )\n\n site_pair_di_score= dict()\n pair_counter = 0\n for i in range(self.__sequences_len - 1):\n for j in range(i + 1, self.__sequences_len):\n site_pair = (i , j)\n site_pair_di_score[site_pair] = unsorted_DI[pair_counter]\n pair_counter += 1\n return site_pair_di_score\n\n def compute_sorted_DI(self, seqbackmapper=None):\n \"\"\"Computes direct informations for each pair of sites and sorts them in\n descending order of DCA score.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n\n Returns\n -------\n sorted_DI : list\n A list of tuples containing site pairs and DCA score, i.e., the\n contents of sorted_DI are [((i, j), score), ...] for all unique\n site pairs (i, j) such that j > i.\n \"\"\"\n unsorted_DI = self.get_site_pair_di_score()\n sorted_DI = sorted(unsorted_DI.items(), key = lambda k : k[1], reverse=True)\n if seqbackmapper is not None:\n sorted_DI = self.get_mapped_site_pairs_dca_scores(sorted_DI, seqbackmapper)\n return sorted_DI\n\n\n def compute_sorted_DI_APC(self, seqbackmapper=None):\n \"\"\"Computes the average DI score for every site.\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n Returns\n -------\n sorted_DI_APC : list\n A list of tuples containing site pairs and DCA score, i.e., the\n contents of sorted_DI are [((i, j), score), ...] for all unique\n site pairs (i, j) such that j > i. These DI scores are average \n product corrected.\n \"\"\"\n\n sorted_DI = self.compute_sorted_DI() # we must not supply seqbackmapper at this point. \n # the backmapping is done at the end of APC step\n logger.info('\\n\\tPerforming average product correction (APC) of DI scores')\n # compute the average score of each site\n av_score_sites = list()\n N = self.__sequences_len\n for i in range(N):\n i_scores = [score for pair, score in sorted_DI if i in pair]\n assert len(i_scores) == N - 1\n i_scores_sum = sum(i_scores)\n i_scores_ave = i_scores_sum/float(N - 1)\n av_score_sites.append(i_scores_ave)\n # compute average product corrected DI\n av_all_scores = sum(av_score_sites)/float(N)\n sorted_DI_APC = list()\n for pair, score in sorted_DI:\n i, j = pair\n score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)\n sorted_DI_APC.append((pair, score_apc))\n # sort the scores as doing APC may have disrupted the ordering\n sorted_DI_APC = sorted(sorted_DI_APC, key = lambda k : k[1], reverse=True)\n # Now we must do backmapping if seqbackmapper is provided.\n if seqbackmapper is not None:\n sorted_DI_APC = self.get_mapped_site_pairs_dca_scores(sorted_DI_APC, seqbackmapper)\n return sorted_DI_APC\n\n\n def compute_sorted_FN(self, seqbackmapper=None):\n \"\"\"Computes the Frobenius norm of couplings.\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class.\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n\n Returns\n -------\n fn_sorted : list\n A list of tuples containing site pairs and DCA score, i.e., the\n list [((i, j), score), ...] for all unique\n site pairs (i, j) such that j > i.\n \"\"\"\n reg_fi = self.get_reg_single_site_freqs()\n reg_fij = self.get_reg_pair_site_freqs()\n corr_mat = self.construct_corr_mat(reg_fi, reg_fij)\n couplings = self.compute_couplings(corr_mat)\n logger.info('\\n\\tComputing Frobenius norm of couplings')\n num_sites = self.__sequences_len\n q = self.__num_site_states\n frobenius_norm = list()\n for i in range(num_sites):\n row_start = i * (q - 1)\n row_end = row_start + (q - 1)\n for j in range(i + 1, num_sites):\n site_pair = (i, j)\n col_start = j * (q - 1)\n col_end = col_start + (q - 1)\n cij = couplings[row_start:row_end, col_start:col_end]\n cij_mean_1 = np.reshape(np.mean(cij, axis=0), (1, q-1))\n cij_mean_2 = np.reshape(np.mean(cij, axis=1), (q-1, 1))\n cij_mean = np.mean(cij)\n cij_new = cij - cij_mean_1 - cij_mean_2 + cij_mean\n fn_ij = np.sqrt(np.sum(cij_new * cij_new))\n frobenius_norm.append((site_pair, fn_ij))\n fn_sorted = sorted(frobenius_norm, key = lambda x : x[1], reverse=True)\n if seqbackmapper is not None:\n fn_sorted = self.get_mapped_site_pairs_dca_scores(fn_sorted, seqbackmapper)\n return fn_sorted\n\n\n def compute_sorted_FN_APC(self, seqbackmapper = None):\n \"\"\"Performs average product correction (APC) on DCA scores\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class.\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n\n Returns\n -------\n sorted_FN_APC : list\n A list of tuples containing site pairs and DCA score, i.e., the\n list [((i, j), score), ...] for all unique site pairs (i, j) \n such that j > i. The DCA scores are average product corrected.\n \"\"\"\n raw_FN = self.compute_sorted_FN() # Must not supply seqbackmapper at this stage.\n logger.info('\\n\\tPerforming average product correction (APC) to Frobenius'\n ' norm of couplings.'\n )\n\n # compute the average score of each site\n av_score_sites = list()\n N = self.__sequences_len\n for i in range(N):\n i_scores = [score for pair, score in raw_FN if i in pair]\n assert len(i_scores) == N - 1\n i_scores_sum = sum(i_scores)\n i_scores_ave = i_scores_sum/float(N - 1)\n av_score_sites.append(i_scores_ave)\n # compute average product corrected DI\n av_all_scores = sum(av_score_sites)/float(N)\n sorted_FN_APC = list()\n for pair, score in raw_FN:\n i, j = pair\n score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)\n sorted_FN_APC.append((pair, score_apc))\n sorted_FN_APC = sorted(sorted_FN_APC, key=lambda x : x[1], reverse=True)\n # Must do backmapping is sebackmapper is not None\n if seqbackmapper is not None:\n sorted_FN_APC = self.get_mapped_site_pairs_dca_scores(sorted_FN_APC, seqbackmapper) \n return sorted_FN_APC\n\n\nif __name__ == '__main__':\n \"\"\"\n \"\"\"\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.zeros",
"numpy.reshape",
"numpy.max",
"numpy.log",
"numpy.min",
"numpy.array",
"numpy.dot",
"numpy.mean"
]
] |
wakafengfan/CPM-1-Finetune | [
"b2c30bd94df31bcd6ee75ba90c347113563d4075"
] | [
"arguments.py"
] | [
"# coding=utf-8\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"argparser configuration\"\"\"\n\nimport argparse\nimport os\nimport torch\nimport deepspeed\n\n\ndef add_model_config_args(parser):\n \"\"\"Model arguments\"\"\"\n\n group = parser.add_argument_group('model', 'model configuration')\n\n group.add_argument('--pretrained-bert', action='store_true',\n help='use a pretrained bert-large-uncased model instead'\n 'of initializing from scratch. See '\n '--tokenizer-model-type to specify which pretrained '\n 'BERT model to use')\n group.add_argument('--attention-dropout', type=float, default=0.1,\n help='dropout probability for attention weights')\n group.add_argument('--num-attention-heads', type=int, default=16,\n help='num of transformer attention heads')\n group.add_argument('--hidden-size', type=int, default=1024,\n help='tansformer hidden size')\n group.add_argument('--intermediate-size', type=int, default=None,\n help='transformer embedding dimension for FFN'\n 'set to 4*`--hidden-size` if it is None')\n group.add_argument('--num-layers', type=int, default=24,\n help='num decoder layers')\n group.add_argument('--layernorm-epsilon', type=float, default=1e-5,\n help='layer norm epsilon')\n group.add_argument('--hidden-dropout', type=float, default=0.1,\n help='dropout probability for hidden state transformer')\n group.add_argument('--max-position-embeddings', type=int, default=512,\n help='maximum number of position embeddings to use')\n group.add_argument('--vocab-size', type=int, default=30522,\n help='vocab size to use for non-character-level '\n 'tokenization. This value will only be used when '\n 'creating a tokenizer')\n group.add_argument('--deep-init', action='store_true',\n help='initialize bert model similar to gpt2 model.'\n 'scales initialization of projection layers by a '\n 'factor of 1/sqrt(2N). Necessary to train bert '\n 'models larger than BERT-Large.')\n group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,\n help='Pad the vocab size to be divisible by this value.'\n 'This is added for computational efficieny reasons.')\n group.add_argument('--cpu-optimizer', action='store_true',\n help='Run optimizer on CPU')\n group.add_argument('--cpu_torch_adam', action='store_true',\n help='Use Torch Adam as optimizer on CPU.')\n\n return parser\n\n\ndef add_fp16_config_args(parser):\n \"\"\"Mixed precision arguments.\"\"\"\n\n group = parser.add_argument_group('fp16', 'fp16 configurations')\n\n group.add_argument('--fp16', action='store_true',\n help='Run model in fp16 mode')\n group.add_argument('--fp32-embedding', action='store_true',\n help='embedding in fp32')\n group.add_argument('--fp32-layernorm', action='store_true',\n help='layer norm in fp32')\n group.add_argument('--fp32-tokentypes', action='store_true',\n help='embedding token types in fp32')\n group.add_argument('--fp32-allreduce', action='store_true',\n help='all-reduce in fp32')\n group.add_argument('--hysteresis', type=int, default=2,\n help='hysteresis for dynamic loss scaling')\n group.add_argument('--loss-scale', type=float, default=None,\n help='Static loss scaling, positive power of 2 '\n 'values can improve fp16 convergence. If None, dynamic'\n 'loss scaling is used.')\n group.add_argument('--loss-scale-window', type=float, default=1000,\n help='Window over which to raise/lower dynamic scale')\n group.add_argument('--min-scale', type=float, default=1,\n help='Minimum loss scale for dynamic loss scale')\n\n return parser\n\n\ndef add_training_args(parser):\n \"\"\"Training arguments.\"\"\"\n\n group = parser.add_argument_group('train', 'training configurations')\n\n group.add_argument('--do_train', action='store_true',\n help=\"Do training\")\n group.add_argument('--do_eval', action='store_true',\n help=\"Do evaluation\")\n group.add_argument('--zero_shot', action=\"store_true\",\n help=\"do zero-shot\")\n group.add_argument('--batch-size', type=int, default=4,\n help='Data Loader batch size')\n group.add_argument('--weight-decay', type=float, default=0.01,\n help='weight decay coefficient for L2 regularization')\n group.add_argument('--checkpoint-activations', action='store_true',\n help='checkpoint activation to allow for training '\n 'with larger models and sequences')\n group.add_argument('--checkpoint-num-layers', type=int, default=1,\n help='chunk size (number of layers) for checkpointing')\n group.add_argument('--deepspeed-activation-checkpointing', action='store_true',\n help='uses activation checkpointing from deepspeed')\n group.add_argument('--clip-grad', type=float, default=1.0,\n help='gradient clipping')\n group.add_argument('--epoch', type=int, default=10,\n help='total number of iterations to train over all training runs')\n group.add_argument('--log-interval', type=int, default=100,\n help='report interval')\n group.add_argument('--exit-interval', type=int, default=None,\n help='Exit the program after this many new iterations.')\n\n group.add_argument('--seed', type=int, default=1234,\n help='random seed')\n # Batch prodecuer arguments\n group.add_argument('--reset-position-ids', action='store_true',\n help='Reset posistion ids after end-of-document token.')\n group.add_argument('--reset-attention-mask', action='store_true',\n help='Reset self attention maske after '\n 'end-of-document token.')\n \n # Learning rate.\n group.add_argument('--lr-decay-iters', type=int, default=None,\n help='number of iterations to decay LR over,'\n ' If None defaults to `--train-iters`*`--epochs`')\n group.add_argument('--lr-decay-style', type=str, default='linear',\n choices=['constant', 'linear', 'cosine', 'exponential'],\n help='learning rate decay function')\n group.add_argument('--lr', type=float, default=1.0e-4,\n help='initial learning rate')\n group.add_argument('--warmup', type=float, default=0.01,\n help='percentage of data to warmup on (.01 = 1% of all '\n 'training iters). Default 0.01')\n # model checkpointing\n group.add_argument('--save', type=str, default=None,\n help='Output directory to save checkpoints to.')\n group.add_argument('--save-interval', type=int, default=5000,\n help='number of iterations between saves')\n group.add_argument('--no-save-optim', action='store_true',\n help='Do not save current optimizer.')\n group.add_argument('--no-save-rng', action='store_true',\n help='Do not save current rng state.')\n group.add_argument('--load', type=str, default=None,\n help='Path to a directory containing a model checkpoint.')\n group.add_argument('--no-load-optim', action='store_true',\n help='Do not load optimizer when loading checkpoint.')\n group.add_argument('--no-load-rng', action='store_true',\n help='Do not load rng state when loading checkpoint.')\n group.add_argument('--finetune', action='store_true',\n help='Load model for finetuning. Do not load optimizer '\n 'or rng state from checkpoint and set iteration to 0. '\n 'Assumed when loading a release checkpoint.')\n # distributed training args\n group.add_argument('--distributed-backend', default='nccl',\n help='which backend to use for distributed '\n 'training. One of [gloo, nccl]')\n\n group.add_argument('--local_rank', type=int, default=None,\n help='local rank passed from distributed launcher.')\n\n group.add_argument('--results_dir', type=str, default=None,\n help='The dir to save the model.')\n group.add_argument('--model_name', type=str, default=\"test\",\n help=\"The name you give to the model.\")\n\n # eval\n group.add_argument('--eval_ckpt_path', type=str, default=None,\n help='The checkpoint path used for evaluation')\n\n return parser\n\n\ndef add_evaluation_args(parser):\n \"\"\"Evaluation arguments.\"\"\"\n\n group = parser.add_argument_group('validation', 'validation configurations')\n\n group.add_argument('--eval-batch-size', type=int, default=None,\n help='Data Loader batch size for evaluation datasets.'\n 'Defaults to `--batch-size`')\n group.add_argument('--eval-iters', type=int, default=100,\n help='number of iterations to run for evaluation'\n 'validation/test for')\n group.add_argument('--eval-interval', type=int, default=1000,\n help='interval between running evaluation on validation set')\n group.add_argument('--eval-seq-length', type=int, default=None,\n help='Maximum sequence length to process for '\n 'evaluation. Defaults to `--seq-length`')\n group.add_argument('--eval-max-preds-per-seq', type=int, default=None,\n help='Maximum number of predictions to use for '\n 'evaluation. Defaults to '\n 'math.ceil(`--eval-seq-length`*.15/10)*10')\n group.add_argument('--overlapping-eval', type=int, default=32,\n help='sliding window for overlapping eval ')\n group.add_argument('--cloze-eval', action='store_true',\n help='Evaluation dataset from `--valid-data` is a cloze task')\n group.add_argument('--eval-hf', action='store_true',\n help='perform evaluation with huggingface openai model.'\n 'use `--load` to specify weights path to be loaded')\n group.add_argument('--load-openai', action='store_true',\n help='load openai weights into our model. Use `--load` '\n 'to specify weights path to be loaded')\n\n return parser\n\ndef add_text_generate_args(parser):\n \"\"\"Text generate arguments.\"\"\"\n\n group = parser.add_argument_group('Text generation', 'configurations')\n group.add_argument(\"--temperature\", type=float, default=1.0)\n group.add_argument(\"--top_p\", type=float, default=0.0)\n group.add_argument(\"--top_k\", type=int, default=0)\n group.add_argument(\"--out-seq-length\", type=int, default=256)\n return parser\n\n\ndef add_data_args(parser):\n \"\"\"Train/valid/test data arguments.\"\"\"\n\n group = parser.add_argument_group('data', 'data configurations')\n group.add_argument('--data_dir', type=str, required=True,\n help=\"Training data dir\")\n group.add_argument('--mmap-warmup', action='store_true',\n help='Warm up mmap files.')\n group.add_argument('--model-parallel-size', type=int, default=1,\n help='size of the model parallel.')\n group.add_argument('--shuffle', action='store_true',\n help='Shuffle data. Shuffling is deterministic '\n 'based on seed and current epoch.')\n group.add_argument('--use-npy-data-loader', action='store_true',\n help='Use the numpy data loader. If set, then'\n 'train-data-path, val-data-path, and test-data-path'\n 'should also be provided.')\n group.add_argument('--num-workers', type=int, default=2,\n help=\"\"\"Number of workers to use for dataloading\"\"\")\n group.add_argument('--tokenizer-model-type', type=str,\n default='bert-large-uncased',\n help=\"Model type to use for sentencepiece tokenization \\\n (one of ['bpe', 'char', 'unigram', 'word']) or \\\n bert vocab to use for BertWordPieceTokenizer (one of \\\n ['bert-large-uncased', 'bert-large-cased', etc.])\")\n group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',\n help='path used to save/load sentencepiece tokenization '\n 'models')\n group.add_argument('--tokenizer-type', type=str,\n default='BertWordPieceTokenizer',\n choices=['CharacterLevelTokenizer',\n 'SentencePieceTokenizer',\n 'BertWordPieceTokenizer',\n 'GPT2BPETokenizer'],\n help='what type of tokenizer to use')\n group.add_argument(\"--cache-dir\", default=None, type=str,\n help=\"Where to store pre-trained BERT downloads\")\n group.add_argument('--use-tfrecords', action='store_true',\n help='load `--train-data`, `--valid-data`, '\n '`--test-data` from BERT tf records instead of '\n 'normal data pipeline')\n group.add_argument('--seq-length', type=int, default=512,\n help=\"Maximum sequence length to process\")\n group.add_argument('--max-preds-per-seq', type=int, default=None,\n help='Maximum number of predictions to use per sequence.'\n 'Defaults to math.ceil(`--seq-length`*.15/10)*10.'\n 'MUST BE SPECIFIED IF `--use-tfrecords` is True.')\n\n return parser\n\ndef get_args():\n \"\"\"Parse all the args.\"\"\"\n\n parser = argparse.ArgumentParser(description='PyTorch BERT Model')\n parser = add_model_config_args(parser)\n parser = add_fp16_config_args(parser)\n parser = add_training_args(parser)\n parser = add_evaluation_args(parser)\n parser = add_text_generate_args(parser)\n parser = add_data_args(parser)\n\n # Include DeepSpeed configuration arguments\n parser = deepspeed.add_config_arguments(parser)\n\n args = parser.parse_args()\n\n if not args.data_dir:\n print('WARNING: No data specified')\n\n args.cuda = torch.cuda.is_available()\n\n args.rank = int(os.getenv('RANK', '0'))\n args.world_size = int(os.getenv(\"WORLD_SIZE\", '1'))\n\n if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):\n # We are using (OpenMPI) mpirun for launching distributed data parallel processes\n local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))\n local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))\n\n # Possibly running with Slurm\n num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))\n nodeid = int(os.getenv('SLURM_NODEID', '0'))\n\n args.local_rank = local_rank\n args.rank = nodeid*local_size + local_rank\n args.world_size = num_nodes*local_size\n\n args.model_parallel_size = min(args.model_parallel_size, args.world_size)\n if args.rank == 0:\n print('using world size: {} and model-parallel size: {} '.format(\n args.world_size, args.model_parallel_size))\n\n args.dynamic_loss_scale = False\n if args.loss_scale is None:\n args.dynamic_loss_scale = True\n if args.rank == 0:\n print(' > using dynamic loss scaling')\n\n # The args fp32_* or fp16_* meant to be active when the\n # args fp16 is set. So the default behaviour should all\n # be false.\n if not args.fp16:\n args.fp32_embedding = False\n args.fp32_tokentypes = False\n args.fp32_layernorm = False\n\n return args\n"
] | [
[
"torch.cuda.is_available"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.