repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
SimonsRoad/UnDeepVO | [
"956598958e0dba4729a8af70ee7a4cdcc10f09ec"
] | [
"demo_odometry.py"
] | [
"\n\"\"\"Example of pykitti.odometry usage.\"\"\"\nimport itertools\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport pykitti\n\n__author__ = \"Lee Clement\"\n__email__ = \"[email protected]\"\n\n# Change this to the directory where you store KITTI data\nbasedir = './data/dataset'\n\n# Specify the dataset to load\nsequence = '01'\n\n# Load the data. Optionally, specify the frame range to load.\n# Passing imformat='cv2' will convert images to uint8 and BGR for\n# easy use with OpenCV.\n# dataset = pykitti.odometry(basedir, sequence)\ndataset = pykitti.odometry(basedir, sequence, frames=range(0, 20, 5))\n\n# dataset.calib: Calibration data are accessible as a named tuple\n# dataset.timestamps: Timestamps are parsed into a list of timedelta objects\n# dataset.poses: Generator to load ground truth poses T_w_cam0\n# dataset.camN: Generator to load individual images from camera N\n# dataset.gray: Generator to load monochrome stereo pairs (cam0, cam1)\n# dataset.rgb: Generator to load RGB stereo pairs (cam2, cam3)\n# dataset.velo: Generator to load velodyne scans as [x,y,z,reflectance]\n\n# Grab some data\nsecond_pose = next(iter(itertools.islice(dataset.poses, 1, None)))\nfirst_gray = next(iter(dataset.gray))\nfirst_cam1 = next(iter(dataset.cam1))\nfirst_rgb = next(iter(dataset.rgb))\nfirst_cam2 = next(iter(dataset.cam2))\nthird_velo = next(iter(itertools.islice(dataset.velo, 2, None)))\n\n# Display some of the data\nnp.set_printoptions(precision=4, suppress=True)\nprint('\\nSequence: ' + str(dataset.sequence))\nprint('\\nFrame range: ' + str(dataset.frames))\n\n# print('\\nGray stereo pair baseline [m]: ' + str(dataset.calib.b_gray))\nprint('\\nRGB stereo pair baseline [m]: ' + str(dataset.calib.b_rgb))\n\nprint('\\nFirst timestamp: ' + str(dataset.timestamps[0]))\nprint('\\nSecond ground truth pose:\\n' + str(second_pose))\n\nf, ax = plt.subplots(2, 2, figsize=(15, 5))\nax[0, 0].imshow(first_gray[0], cmap='gray')\nax[0, 0].set_title('Left Gray Image (cam0)')\n\nax[0, 1].imshow(first_cam1, cmap='gray')\nax[0, 1].set_title('Right Gray Image (cam1)')\n\nax[1, 0].imshow(first_cam2)\nax[1, 0].set_title('Left RGB Image (cam2)')\n\nax[1, 1].imshow(first_rgb[1])\nax[1, 1].set_title('Right RGB Image (cam3)')\n\nf2 = plt.figure()\nax2 = f2.add_subplot(111, projection='3d')\n# Plot every 100th point so things don't get too bogged down\nvelo_range = range(0, third_velo.shape[0], 10)\nax2.scatter(third_velo[velo_range, 0],\n third_velo[velo_range, 1],\n third_velo[velo_range, 2],\n c=third_velo[velo_range, 3],\n cmap='gray',\n s=0.1)\nax2.axis('equal')\nax2.set_title('Third Velodyne scan (subsampled)')\n\nplt.show()\n\n\n\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.set_printoptions",
"matplotlib.pyplot.subplots"
]
] |
thouska/SALib | [
"5deeaf316ef58ea0a26295c8ad2ca57cdc739d45"
] | [
"tests/test_test_functions.py"
] | [
"from nose.tools import assert_almost_equal, assert_equal, raises\n\nfrom numpy.testing import assert_allclose\nimport numpy as np\n\nfrom SALib.test_functions.Sobol_G import evaluate, total_variance, \\\n partial_first_order_variance, \\\n sensitivity_index, \\\n total_sensitivity_index\n\ndef test_Sobol_G():\n '''\n '''\n parameter_values = np.zeros((1, 8))\n actual = evaluate(parameter_values)\n expected = np.array([4.0583])\n assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)\n\n\n@raises(ValueError)\ndef test_Sobol_G_raises_error_if_values_wrong_size():\n \"\"\"\n Tests that a value error is raised if the Sobol G function is called with\n the wrong number of variables\n \"\"\"\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)\n\n\n@raises(ValueError)\ndef test_Sobol_G_raises_error_if_values_gt_one():\n \"\"\"\n Tests that a value error is raised if the Sobol G function is called with\n values greater than one\n \"\"\"\n evaluate(np.array([0, 1, .02, 0.23, 1.234, 0.02848848, 0, 0.78]))\n\n\n@raises(ValueError)\ndef test_Sobol_G_raises_error_if_values_lt_zero():\n \"\"\"\n Tests that a value error is raised if the Sobol G function is called with\n values less than zero.\n \"\"\"\n evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))\n\n\n@raises(TypeError)\ndef test_Sobol_G_raises_error_if_values_not_numpy_array():\n \"\"\"\n Tests that a type error is raised if the Sobol G function is called with\n values argument not as a numpy array.\n \"\"\"\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n evaluate(x)\n\n\ndef test_total_variance():\n\n a = np.array([78, 12, 0.5, 2, 97, 33])\n actual = total_variance(a)\n expected = 0.19347\n\n assert_allclose(actual, expected, rtol=1e-4)\n\n\ndef test_partial_first_order_variance():\n\n a = np.array([78, 12, 0.5, 2, 97, 33])\n actual = partial_first_order_variance(a)\n expected = (len(a),)\n\n assert_equal(a.shape, expected)\n\n expected = np.array([0.000053, 0.001972, 0.148148, 0.037037, 0.000035, 0.000288])\n\n assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)\n\n\ndef test_sensitivity_index():\n a = np.array([78, 12, 0.5, 2, 97, 33])\n actual = sensitivity_index(a)\n expected = np.array([0.000276, 0.010195, 0.765743,\n 0.191436, 0.000179, 0.001490])\n assert_allclose(actual, expected, atol=1e-2, rtol=1e-6)\n\n\ndef test_total_sensitivity_index():\n a = np.array([78, 12, 0.5, 2, 97, 33])\n\n actual = total_sensitivity_index(a)\n\n expected = np.array([0.030956547, 0.040875287, 0.796423551,\n 0.222116249, 0.030859879, 0.032170899])\n\n assert_allclose(actual, expected, atol=1e-2, rtol=1e-6)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_allclose"
]
] |
dineshpinto/nft_analytics | [
"99fd4adbfe786f4de6fa2a6a0c5e8a58eaaf338a"
] | [
"src/nft_analytics.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nMIT License\n\nCopyright (c) 2021 Dinesh Pinto\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport sys\nfrom json import JSONDecodeError\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom .infura_api import InfuraAPI\nfrom .opensea_api import OpenSeaAPI\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n stream=sys.stdout, level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass NFTAnalytics(OpenSeaAPI):\n def __init__(self, asset_contract_address: str):\n super().__init__(asset_contract_address)\n self.eth_api = InfuraAPI()\n\n @staticmethod\n def make_directories(folder_name: str):\n \"\"\" Set up directories for data and results if they don't exist. \"\"\"\n data_folder = os.path.join(\"data\", folder_name)\n result_folder = os.path.join(\"results\", folder_name)\n\n if not os.path.isdir(data_folder):\n logger.info(f\"Making directoy {data_folder}\")\n os.makedirs(data_folder)\n\n if not os.path.isdir(result_folder):\n logger.info(f\"Making directoy {result_folder}\")\n os.makedirs(result_folder)\n\n return data_folder, result_folder\n\n def fetch_data(self, max_offset: int = 10000, collection: str = None) -> list:\n \"\"\"\n Query OpenSea API for collection data, offset is shifted until max\n offset is reached (i.e. number of items in a collection).\n \"\"\"\n local_assets = []\n\n pbar = tqdm(range(0, max_offset + 1, 50))\n for offset in pbar:\n pbar.set_description(f\"{offset}\")\n try:\n asset_data = self.get_asset_data(offset=offset, limit=50, collection=collection)\n except JSONDecodeError:\n logger.error(f\"Only fetched data till offset={offset - 1}. \"\n f\"Warning={self.get_asset_data(offset=offset, limit=50)}\")\n return local_assets\n\n if \"assets\" not in asset_data:\n logger.error(f\"Only fetched data till offset={offset - 1}. Warning={asset_data}\")\n return local_assets\n\n for asset in asset_data[\"assets\"]:\n local_assets.append(asset)\n\n return local_assets\n\n def fetch_events(self, max_offset: int = 10000) -> list:\n \"\"\"\n Query OpenSea API for event data, offset is shifted until max\n offset is reached (i.e. number of items in a collection).\n \"\"\"\n local_events = []\n\n pbar = tqdm(range(0, max_offset + 1, 300))\n for offset in pbar:\n pbar.set_description(f\"{offset}\")\n try:\n event_data = self.get_event_data(offset=offset, limit=300)\n except JSONDecodeError:\n logger.error(f\"Only fetched data till offset={offset - 1}. \"\n f\"Warning={self.get_asset_data(offset=offset, limit=50)}\")\n return local_events\n\n if \"asset_events\" not in event_data:\n logger.error(f\"Only fetched data till offset={offset - 1}. Warning={event_data}\")\n return local_events\n\n for event in event_data[\"asset_events\"]:\n local_events.append(event)\n\n return local_events\n\n @staticmethod\n def save_json(asset_data: list, filename: str = \"data.json\"):\n with open(filename, 'w', encoding='utf-8') as f:\n json.dump(asset_data, f, ensure_ascii=False, indent=4)\n logger.info(f\"Saved asset data to {filename}\")\n\n @staticmethod\n def load_json(filename: str = \"data.json\") -> list:\n with open(filename) as f:\n asset_data = json.load(f)\n\n return asset_data\n\n @staticmethod\n def get_trait_values_for_type(asset_data: list, trait_type: str) -> list:\n \"\"\" Get all possible values of traits for a specific type of trait. \"\"\"\n trait_values = []\n for asset in asset_data:\n for traits in asset[\"traits\"]:\n if traits[\"trait_type\"] == trait_type and traits[\"value\"] not in trait_values:\n trait_values.append(traits[\"value\"])\n\n return trait_values\n\n def get_trait_type_median_price(self, asset_data: list, trait_type: str) -> dict:\n \"\"\" Get the median price of a specific trait type. \"\"\"\n trait_value_prices = {}\n for value in self.get_trait_values_for_type(asset_data, trait_type):\n listing_prices_trait = []\n\n for asset in asset_data:\n if asset[\"sell_orders\"]:\n for traits in asset[\"traits\"]:\n if traits[\"trait_type\"] == trait_type and traits[\"value\"] == value:\n listing_prices_trait.append(float(asset[\"sell_orders\"][0][\"base_price\"]) / 1e18)\n\n trait_value_prices[value] = np.median(np.array(listing_prices_trait))\n\n return dict(sorted(trait_value_prices.items(), key=lambda item: item[1], reverse=True))\n\n def get_median_prices(self, asset_data: list, traits_dict: dict) -> np.ndarray:\n \"\"\" Get median prices of all trait types. \"\"\"\n median_prices = []\n for trait_type, trait_value in traits_dict.items():\n median_prices.append(self.get_trait_type_median_price(asset_data, trait_type)[trait_value])\n\n return np.array(median_prices)\n\n def get_traits_with_median_prices(self, asset_data: list, asset: dict) -> dict:\n \"\"\" Get median prices of trait types for specific asset. \"\"\"\n traits = {}\n for trait in asset[\"traits\"]:\n traits[trait[\"trait_type\"]] = trait[\"value\"]\n\n trait_prices = {}\n\n for trait_type, trait_value in traits.items():\n price = self.get_trait_type_median_price(asset_data, trait_type)[trait_value]\n trait_prices[trait_value + \" \" + trait_type] = price\n\n return trait_prices\n\n def get_nft_holdings(self, asset_data: list, asset_name: str, eth_balances: bool = True) \\\n -> pd.DataFrame:\n \"\"\" Query the number of NFTs held and/or the ETH balances of addresses in a collection. \"\"\"\n nfts_held = {}\n\n for asset in asset_data:\n nfts_held[asset[\"owner\"][\"address\"]] = 0\n\n for asset in asset_data:\n nfts_held[asset[\"owner\"][\"address\"]] += 1\n\n logger.info(f\"Total NFTs in collection = {sum(nfts_held.values())}\")\n\n if eth_balances:\n logger.info(f\"Getting NFT holdings and ETH balances...\")\n df = pd.DataFrame(columns=[\"Address\", asset_name, \"ETH_balance\"])\n\n pbar = tqdm(nfts_held.items())\n\n for idx, (address, num_nfts) in enumerate(pbar):\n pbar.set_description(f\"{idx}\")\n df.loc[idx] = [address, num_nfts, self.eth_api.get_eth_balance(address)]\n else:\n logger.info(f\"Getting NFT holdings...\")\n df = pd.DataFrame(columns=[\"Address\", asset_name])\n\n pbar = tqdm(nfts_held.items())\n\n for idx, (address, num_nfts) in enumerate(pbar):\n pbar.set_description(f\"{idx}\")\n df.loc[idx] = [address, num_nfts]\n\n etherscan_links = []\n for address in df[\"Address\"]:\n etherscan_links.append(f\"https://etherscan.io/address/{address}\")\n df[\"Etherscan_link\"] = etherscan_links\n\n opensea_links = []\n for address in df[\"Address\"]:\n opensea_links.append(f\"https://opensea.io/{address}\")\n df[\"OpenSea_link\"] = opensea_links\n\n return df\n\n @staticmethod\n def calculate_rarity_df(asset_data: list, items_in_collection: int) -> pd.DataFrame:\n \"\"\"\n Calculate rarity of a particular trait.\n\n Uses the formula from rarity tools, full article at:\n raritytools.medium.com/ranking-rarity-understanding-rarity-calculation-methods-86ceaeb9b98c\n\n Formula:\n [Rarity Score for a Trait Value] =\n 1 / ([Number of Items with that Trait Value] / [Total Number of Items in Collection])\n\n The total Rarity Score for an NFT is the sum of the Rarity Score of all of its trait values.\n \"\"\"\n df = pd.DataFrame(columns=[\"Name\", \"Price\", \"Rarity\", \"RarityPriceRatio\"])\n\n for idx, asset in enumerate(asset_data):\n if asset[\"sell_orders\"]:\n if asset[\"sell_orders\"][0][\"payment_token_contract\"][\"symbol\"] == \"ETH\":\n price = float(asset[\"sell_orders\"][0][\"current_price\"]) / 1e18\n if price != 0:\n rarity = 0\n for trait in asset[\"traits\"]:\n trait_count = int(trait[\"trait_count\"])\n if trait_count != 0:\n rarity += 1 / (trait_count / items_in_collection)\n name = asset[\"name\"]\n df.loc[idx] = [name, price, rarity, rarity / price]\n\n return df\n"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] |
smsaladi/fixthejet | [
"b3089e6ee8cf2afbf24251de47702e0b1446eb73"
] | [
"colors_from_mpl.py"
] | [
"\"\"\"\nWrites out hex colors from color scales provided in matplotlib\ninto JS file\n\npython colors_from_mpl.py >> js/colorscales.js\n\"\"\"\n\nimport itertools\nimport json\n\nimport numpy as np\nimport matplotlib.colors\nimport matplotlib.cm\n\n# Have colormaps separated into categories:\n# http://matplotlib.org/examples/color/colormaps_reference.html\ncmap_names = [\n ('Perceptually Uniform Sequential', [\n 'viridis', 'plasma', 'inferno', 'magma']),\n ('Sequential', [\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),\n ('Sequential (2)', [\n 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',\n 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',\n 'hot', 'afmhot', 'gist_heat', 'copper']),\n ('Diverging', [\n 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',\n 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),\n ('Qualitative', [\n 'Pastel1', 'Pastel2', 'Paired', 'Accent',\n 'Dark2', 'Set1', 'Set2', 'Set3',\n 'tab10', 'tab20', 'tab20b', 'tab20c']),\n ('Miscellaneous', [\n 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',\n 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',\n 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])\n ]\n\ncm_names = [cat[1] for cat in cmap_names]\n\nprint(\"var mpl_scales = {\")\n\nfor name in itertools.chain.from_iterable(cm_names):\n cmap = matplotlib.cm.get_cmap(name)\n values = np.linspace(0, 1, cmap.N)\n rgba = cmap(values)\n hex = np.apply_along_axis(matplotlib.colors.rgb2hex, axis=1, arr=rgba)\n print(' \"{}\": {},\\n'.format(name, json.dumps(hex.tolist())))\n\nprint(\"};\")\n"
] | [
[
"numpy.linspace",
"numpy.apply_along_axis"
]
] |
Wesley-Tse/Road-Detection | [
"c3b444287d9b41ccc4234e737e4421b5d1b3c3da"
] | [
"train.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @author: Wesley\n# @time: 2020-12-11 10:47\n\nimport os\nimport time\nimport torch\nfrom torch import nn\nfrom models.dinknet34 import DinkNet34\nfrom loss import dice_bce_loss\nfrom models.unet import UNet\nfrom dataset import MyDataset\nfrom torch.utils.data import DataLoader\n\nimg_path = r'E:\\PyCharmProject\\datasets\\5k\\train_set\\JPEGImages'\nmask_path = r'E:\\PyCharmProject\\datasets\\5k\\train_set\\SegmentationClass'\nval_img_path = r'E:\\PyCharmProject\\datasets\\5k\\validate_set\\JPEGImages'\nval_mask_path = r'E:\\PyCharmProject\\datasets\\5k\\validate_set\\SegmentationClass'\nlog = './dinknet.txt'\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nbatch_size_per = 16\nbatch_size = batch_size_per * torch.cuda.device_count()\nepoch_limit = 10\nnet = DinkNet34().to(device)\nnet = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n\nweight = r'E:\\PyCharmProject\\Road-Detection\\weights\\dinknet34.pt'\n# if os.path.exists(weight):\n # net.load_state_dict(torch.load(weight))\n\ntrain_dataset = MyDataset(img_path, mask_path)\nval_dataset = MyDataset(val_img_path, val_mask_path)\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\nval_dataloader = DataLoader(val_dataset, batch_size=batch_size)\n\nadam = torch.optim.Adam(net.parameters(), lr=2e-4)\nsgd = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)\n\nloss_fun = dice_bce_loss()\n\nif __name__ == '__main__':\n\n epoch = 1\n log = open(log, 'w', encoding='utf-8')\n log.write('epoch' + '\\t' + 'loss' + '\\t' + 'pa' + '\\t' + 'iou' + '\\t' + 'precision' + '\\n')\n log.flush()\n while epoch < 300:\n s_time = time.time()\n print('epoch - {} - training'.format(epoch))\n net.train()\n TP = FP = TN = FN = 0\n pa = 0\n iou = 0\n stop = 0\n flag = 0\n train_loss = 0\n batch = len(train_dataloader)\n for i, (img, mask) in enumerate(train_dataloader):\n img = img.to(device)\n mask = mask.to(device)\n out = net(img)\n loss = loss_fun(mask, out)\n\n adam.zero_grad()\n loss.backward()\n adam.step()\n\n if i % 10 == 0:\n print('{}: {}/{} - loss: {}'.format(epoch, i, batch, loss.item()))\n # torch.save(net.state_dict(), weight)\n # print('save success')\n train_loss += loss.item()\n epoch_loss = train_loss / len(train_dataloader)\n\n e_time = time.time()\n print('epoch - {} - epoch_loss: {}'.format(epoch, epoch_loss))\n print('total-time: ', e_time - s_time)\n print('epoch - {} - evaluating'.format(epoch))\n\n net.eval()\n for img, mask in val_dataloader:\n img = img.to(device)\n mask = mask.to(device)\n with torch.no_grad():\n pred = net(img)\n pred[pred >= 0.5] = 1\n pred[pred < 0.5] = 0\n\n TP += ((pred == 1) & (mask == 1)).cpu().sum().item()\n TN += ((pred == 0) & (mask == 0)).cpu().sum().item()\n FN += ((pred == 0) & (mask == 1)).cpu().sum().item()\n FP += ((pred == 1) & (mask == 0)).cpu().sum().item()\n\n pa = (TP + TN) / (TP + TN + FP + FN)\n precision = TP / (TP + FN)\n iou = TP / (TP + FP + FN)\n\n print('pa: ', pa)\n print('iou: ', iou)\n print('precision', precision)\n log.write(\n str(epoch) + '\\t' + str(epoch_loss) + '\\t' + str(pa) + '\\t' + str(iou) + '\\t' + str(precision) + '\\n')\n log.flush()\n\n if iou > stop:\n stop = iou\n torch.save(net.state_dict(), weight)\n print(\"save success,iou updated to: {}\".format(iou))\n flag = 0\n else:\n flag += 1\n print(\"pa为{},没有提升,参数未更新,iou为{},第{}次未更新\".format(iou, stop, flag))\n if flag >= epoch_limit:\n print(\"early stop at epoch {}, finally iou: {}\".format(epoch, stop))\n break\n epoch += 1\n log.close()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.no_grad",
"torch.cuda.device_count"
]
] |
p5a0u9l/clamm | [
"a41ce2526e9792ce08263bf27eb9c417608d1f5d"
] | [
"clamm/streams/plot_big_stft.py"
] | [
"\"\"\"\nConvert a large audio wav file (album length, i.e. > 30 minutes typically)\ninto a series of videos consisting of the audio synchronized with images of the\nspectrogram.\n\"\"\"\nimport os\nimport sys\nimport multiprocessing as mp\nimport subprocess\n\nimport tqdm\nimport numpy as np\nimport librosa.core\nimport librosa.display\nimport librosa.feature\nimport matplotlib.pyplot as plt\nplt.switch_backend(\"agg\")\n\nSAMPLERATE = 44.1e3 # samples/sec\nWAVPATH = sys.argv[1]\nBASENAME = os.path.basename(WAVPATH).replace(\".wav\", \"\")\nROOT = \"/mnt/nfs-share/music/data\"\nFRAMEROOT = ROOT + \"/frames/\" + BASENAME\nDURATION = 20 #\nNUMPROC = 8\nFFTFREQ = librosa.fft_frequencies(sr=SAMPLERATE)\nF_MAX = np.max(FFTFREQ)\nN_FFT = 2048\nN_HOP = int(1.0 / 4 * N_FFT)\nFILETIME = librosa.core.get_duration(filename=WAVPATH)\nNFRAME = int(FILETIME) / DURATION # allow truncation\nDUMPFILE = \"data.npy\"\nFPS = 5\n\n\ndef single_image(argtuple):\n y, i_frame, i_second = argtuple\n fractional_second = float(i_second) / FPS\n abs_index = i_frame * DURATION * FPS + i_second\n time = DURATION*i_frame + fractional_second\n titlestr = \"%s - file time %0.2f seconds\" % (BASENAME, time)\n\n # display the spectrogram\n plt.figure(figsize=(18, 8))\n librosa.display.specshow(\n y, x_axis='time', y_axis='mel', sr=SAMPLERATE, hop_length=N_HOP)\n\n plt.vlines(\n fractional_second, 0, F_MAX,\n linestyles='dashed', colors='w', alpha=0.6)\n\n plt.title(titlestr)\n plt.savefig(FRAMEROOT + \"/%05d.png\" % (abs_index))\n plt.tight_layout()\n plt.close()\n\n\ndef main():\n \"\"\" main\n \"\"\"\n\n pbar = tqdm.tqdm(total=NFRAME)\n pool = mp.Pool(NUMPROC)\n init = False\n if not os.path.exists(FRAMEROOT):\n os.makedirs(FRAMEROOT)\n\n for i_frame in range(10, NFRAME):\n # load the audio\n x, sr = librosa.core.load(\n WAVPATH, sr=SAMPLERATE,\n offset=DURATION * i_frame, duration=DURATION)\n\n # compute the spectrogram\n x = librosa.power_to_db(\n librosa.feature.melspectrogram(\n y=x, hop_length=N_HOP, n_fft=N_FFT, sr=SAMPLERATE),\n ref=np.max)\n\n if not init:\n f_mean = np.sum(x, axis=1)\n init = True\n else:\n f_mean += np.sum(x, axis=1)\n\n # loop updates\n pbar.update(1)\n pool.map(\n single_image,\n [(x, i_frame, i_second) for i_second in range(FPS*DURATION)])\n\n np.save(BASENAME + 'f_mean.npy', f_mean)\n pbar.close()\n\n subprocess.call([\n \"ffmpeg\", '-r', '5', '-i', FRAMEROOT + '%05d.png', '-i', WAVPATH,\n '-shortest', '-c:v', 'libx264', '-c:a', 'aac', '-strict', '-2',\n '-pix_fmt', 'yuv420p', '-crf', '23', '-r', '5', '-y',\n ROOT + \"/videos/\" + BASENAME + '.mp4'])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.save",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.vlines",
"numpy.max",
"matplotlib.pyplot.close"
]
] |
mbooali/introduction-to-machine | [
"3f75f9897f1f63f07bb6eace312fa35e16786623"
] | [
"mglearn/plot_animal_tree.py"
] | [
"from imageio import imread\nimport matplotlib.pyplot as plt\n\n\ndef plot_animal_tree(ax=None):\n import graphviz\n if ax is None:\n ax = plt.gca()\n mygraph = graphviz.Digraph(node_attr={'shape': 'box'},\n edge_attr={'labeldistance': \"10.5\"},\n format=\"png\")\n mygraph.node(\"0\", \"Has feathers?\")\n mygraph.node(\"1\", \"Can fly?\")\n mygraph.node(\"2\", \"Has fins?\")\n mygraph.node(\"3\", \"Hawk\")\n mygraph.node(\"4\", \"Penguin\")\n mygraph.node(\"5\", \"Dolphin\")\n mygraph.node(\"6\", \"Bear\")\n mygraph.edge(\"0\", \"1\", label=\"True\")\n mygraph.edge(\"0\", \"2\", label=\"False\")\n mygraph.edge(\"1\", \"3\", label=\"True\")\n mygraph.edge(\"1\", \"4\", label=\"False\")\n mygraph.edge(\"2\", \"5\", label=\"True\")\n mygraph.edge(\"2\", \"6\", label=\"False\")\n mygraph.render(\"tmp\")\n ax.imshow(imread(\"tmp.png\"))\n ax.set_axis_off()\n"
] | [
[
"matplotlib.pyplot.gca"
]
] |
Shihao-Feng-98/RRP_Hopper_simulation | [
"444dbcce90d5ffb6bf577ed03adc9717183e21ae"
] | [
"controller.py"
] | [
"'''\nonly for RRP Hopper\nShihao Feng\n2021.10.28\n'''\n\nimport numpy as np\nimport pybullet as p\nfrom leg_kinematics import LegKinematicsRRP\nimport pinocchio as pin\n\nclass JointPDController(object):\n def __init__ (self):\n self.kp = np.array([70, 70, 1500])\n self.kd = np.array([2, 2, 10])\n\n def solve(self, q_d, dq_d, q_state, dq_state):\n q = q_state[7:10]\n dq = dq_state[6:9]\n ddq_d = np.zeros(3) # 期望加速度计算量大,简单地设为0\n tau_d = ddq_d + self.kd*(dq_d - dq) + self.kp*(q_d - q) # (3,)\n return tau_d \n\n \nclass SLIPController(object):\n def __init__(self):\n self.q_d = np.array([0., 0., 0.]) # q_d[2] always 0\n self.dq_d = np.array([0., 0., 0.]) # always 0\n # 关节增益 \n self.kp = np.array([70., 70., 3000.]) \n self.kd = np.array([2., 2., 10.]) # 阻尼模拟能量损失, 同时防止腿抖动\n # 身体姿态增益\n self.kp_pose = 5. * np.ones(2) \n self.kd_pose = 1. * np.ones(2)\n # 水平速度增益\n self.kp_vel = 0.1 * np.ones(2) \n \n self.leg_length_normal = 0.55\n self.RRP = LegKinematicsRRP(L=self.leg_length_normal)\n\n # private methods\n def __w_to_drpy(self, rpy, w):\n '''\n rpy -> (3,), w -> (3,),drpy -> (3,)\n '''\n H = np.array([[np.cos(rpy[2])/np.cos(rpy[1]), np.sin(rpy[2])/np.cos(rpy[1]), 0.],\n [-np.sin(rpy[2]), np.cos(rpy[2]), 0.],\n [np.cos(rpy[2])*np.tan(rpy[1]), np.sin(rpy[2])*np.tan(rpy[1]), 0.]])\n drpy = (H @ w.reshape(-1,1)).ravel()\n return drpy\n\n def solve(self, q_state, dq_state, robot_state_machine, T_s, vel, dir, F_thrust):\n tau_d = np.zeros(3) # 初始化\n orn_body = q_state[3:7] # 身体姿态 四元数\n rpy = np.array(p.getEulerFromQuaternion(orn_body))\n w_body = dq_state[3:6] # 身体角速度 w\n drpy = self.__w_to_drpy(rpy, w_body)\n q = q_state[7:10] # 关节位置\n dq = dq_state[6:9] # 关节速度\n\n # 控制虚拟弹簧力\n tau_d[2] = self.kd[2]*(self.dq_d[2] - dq[2]) \\\n + self.kp[2]*(self.q_d[2] - q[2]) \n\n # 弹簧伸长时,施加推力抵消能量损耗\n if robot_state_machine == 'THRUST':\n tau_d[2] += F_thrust\n\n # 触地或者离地时,关节扭矩为0\n if (robot_state_machine == 'LOADING' or robot_state_machine == 'UNLOADING'):\n tau_d[0:2] = np.zeros(2)\n\n # 弹簧压缩或者伸长时,施加关节扭矩控制身体姿态 \n if (robot_state_machine == 'COMPRESSION' or robot_state_machine == 'THRUST'): \n # 姿态线性伺服控制\n tau_d[0:2] = - (self.kd_pose*(np.zeros(2) - drpy[0:2]) \\\n + self.kp_pose*(np.zeros(2) - rpy[0:2])) # (2,) \n\n # 飞行时,控制足端移动到落地点 \n if robot_state_machine == 'FLIGHT':\n vel_xy_d = np.array([vel*np.cos(dir), vel*np.sin(dir)])\n v_body = dq_state[0:2] # 当前水平速度\n # 相对于H系:坐标系原点与身体坐标系重合,方向与世界坐标系平行\n xy_d = v_body*T_s/2 - self.kp_vel*(vel_xy_d - v_body) # 计算落脚点\n r = q[2] + self.leg_length_normal \n z_d = - (r**2 - xy_d[0]**2 - xy_d[1]**2)**0.5\n # 转换到B系:身体坐标系\n R_HB = pin.rpy.rpyToMatrix(rpy)\n R_BH = R_HB.T \n p_H = np.array([xy_d[0], xy_d[1], z_d])\n p_B = (R_BH @ p_H.reshape(-1,1)).ravel() # (3,)\n q_d = self.RRP.IK(p_B)\n self.q_d[0:2] = q_d[0:2]\n # 关节PD控制\n tau_d[0:2] = self.kd[0:2]*(self.dq_d[0:2] - dq[0:2]) \\\n + self.kp[0:2]*(self.q_d[0:2] - q[0:2]) # (2,)\n \n print('tau_d: ', tau_d)\n return tau_d\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.cos",
"numpy.tan",
"numpy.array",
"numpy.sin"
]
] |
XinlongSBU/pynucastro | [
"4f1547e99208ad03d8f79d748601219591a157b5"
] | [
"pynucastro/networks/rate_collection.py"
] | [
"\"\"\"A collection of classes and methods to deal with collections of\nrates that together make up a network.\"\"\"\n\n# Common Imports\nfrom __future__ import print_function\n\nimport functools\nimport math\nfrom operator import mul\nimport os\nfrom collections import OrderedDict\n\nfrom ipywidgets import interact\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n#from mpl_toolkits.axes_grid1 import make_axes_locatable\nimport networkx as nx\n\n# Import Rate\nfrom pynucastro.rates import Rate, Nucleus, Library\n\nmpl.rcParams['figure.dpi'] = 100\n\nclass Composition(object):\n \"\"\"a composition holds the mass fractions of the nuclei in a network\n -- useful for evaluating the rates\n\n \"\"\"\n def __init__(self, nuclei, small=1.e-16):\n \"\"\"nuclei is an iterable of the nuclei (Nucleus objects) in the network\"\"\"\n if not isinstance(nuclei[0], Nucleus):\n raise ValueError(\"must supply an iterable of Nucleus objects\")\n else:\n self.X = {k: small for k in nuclei}\n\n def set_solar_like(self, Z=0.02):\n \"\"\" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and\n the remainder evenly distributed with Z \"\"\"\n num = len(self.X)\n rem = Z/(num-2)\n for k in self.X:\n if k == Nucleus(\"p\"):\n self.X[k] = 0.7\n elif k.raw == \"he4\":\n self.X[k] = 0.3 - Z\n else:\n self.X[k] = rem\n\n self.normalize()\n\n def set_all(self, xval):\n \"\"\" set all species to a particular value \"\"\"\n for k in self.X:\n self.X[k] = xval\n\n def set_nuc(self, name, xval):\n \"\"\" set nuclei name to the mass fraction xval \"\"\"\n for k in self.X:\n if k.raw == name:\n self.X[k] = xval\n break\n\n def normalize(self):\n \"\"\" normalize the mass fractions to sum to 1 \"\"\"\n X_sum = sum([self.X[k] for k in self.X])\n\n for k in self.X:\n self.X[k] /= X_sum\n\n def get_molar(self):\n \"\"\" return a dictionary of molar fractions\"\"\"\n molar_frac = {k: v/k.A for k, v in self.X.items()}\n return molar_frac\n\n def __str__(self):\n ostr = \"\"\n for k in self.X:\n ostr += \" X({}) : {}\\n\".format(k, self.X[k])\n return ostr\n\nclass RateCollection(object):\n \"\"\" a collection of rates that together define a network \"\"\"\n\n pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n def __init__(self, rate_files=None, libraries=None, rates=None):\n \"\"\"\n rate_files are the files that together define the network. This\n can be any iterable or single string.\n\n This can include Reaclib library files storing multiple rates.\n\n If libraries is supplied, initialize a RateCollection using the rates\n in the Library object(s) in list 'libraries'.\n\n If rates is supplied, initialize a RateCollection using the\n Rate objects in the list 'rates'.\n\n Any combination of these options may be combined.\n \"\"\"\n\n self.files = []\n self.rates = []\n self.library = None\n\n if rate_files:\n if isinstance(rate_files, str):\n rate_files = [rate_files]\n self._read_rate_files(rate_files)\n\n if rates:\n if isinstance(rates, Rate):\n rates = [rates]\n try:\n for r in rates:\n assert(isinstance(r, Rate))\n except:\n print('Expected Rate object or list of Rate objects passed as the rates argument.')\n raise\n else:\n rlib = Library(rates=rates)\n if not self.library:\n self.library = rlib\n else:\n self.library = self.library + rlib\n\n if libraries:\n if isinstance(libraries, Library):\n libraries = [libraries]\n try:\n for lib in libraries:\n assert(isinstance(lib, Library))\n except:\n print('Expected Library object or list of Library objects passed as the libraries argument.')\n raise\n else:\n if not self.library:\n self.library = libraries.pop(0)\n for lib in libraries:\n self.library = self.library + lib\n\n if self.library:\n self.rates = self.rates + self.library.get_rates()\n\n # get the unique nuclei\n u = []\n for r in self.rates:\n t = set(r.reactants + r.products)\n u = set(list(u) + list(t))\n\n self.unique_nuclei = sorted(u)\n\n # now make a list of each rate that touches each nucleus\n # we'll store this in a dictionary keyed on the nucleus\n self.nuclei_consumed = OrderedDict()\n self.nuclei_produced = OrderedDict()\n\n for n in self.unique_nuclei:\n self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]\n self.nuclei_produced[n] = [r for r in self.rates if n in r.products]\n\n # Re-order self.rates so Reaclib rates come first,\n # followed by Tabular rates. This is needed if\n # reaclib coefficients are targets of a pointer array\n # in the Fortran network.\n # It is desired to avoid wasting array size\n # storing meaningless Tabular coefficient pointers.\n self.rates = sorted(self.rates,\n key=lambda r: r.chapter == 't')\n\n self.tabular_rates = []\n self.reaclib_rates = []\n for n, r in enumerate(self.rates):\n if r.chapter == 't':\n self.tabular_rates.append(n)\n elif isinstance(r.chapter, int):\n self.reaclib_rates.append(n)\n else:\n print('ERROR: Chapter type unknown for rate chapter {}'.format(\n str(r.chapter)))\n exit()\n\n def _read_rate_files(self, rate_files):\n # get the rates\n self.files = rate_files\n for rf in self.files:\n try:\n rflib = Library(rf)\n except:\n print(\"Error reading library from file: {}\".format(rf))\n raise\n else:\n if not self.library:\n self.library = rflib\n else:\n self.library = self.library + rflib\n\n def get_nuclei(self):\n \"\"\" get all the nuclei that are part of the network \"\"\"\n return self.unique_nuclei\n\n def evaluate_rates(self, rho, T, composition):\n \"\"\"evaluate the rates for a specific density, temperature, and\n composition\"\"\"\n rvals = OrderedDict()\n ys = composition.get_molar()\n\n for r in self.rates:\n val = r.prefactor * rho**r.dens_exp * r.eval(T)\n yfac = functools.reduce(mul, [ys[q] for q in r.reactants])\n rvals[r] = yfac * val\n\n return rvals\n\n def network_overview(self):\n \"\"\" return a verbose network overview \"\"\"\n ostr = \"\"\n for n in self.unique_nuclei:\n ostr += \"{}\\n\".format(n)\n ostr += \" consumed by:\\n\"\n for r in self.nuclei_consumed[n]:\n ostr += \" {}\\n\".format(r.string)\n\n ostr += \" produced by:\\n\"\n for r in self.nuclei_produced[n]:\n ostr += \" {}\\n\".format(r.string)\n\n ostr += \"\\n\"\n return ostr\n\n def write_network(self, *args, **kwargs):\n \"\"\"Before writing the network, check to make sure the rates\n are distinguishable by name.\"\"\"\n assert self._distinguishable_rates(), \"ERROR: Rates not uniquely identified by Rate.fname\"\n self._write_network(*args, **kwargs)\n\n def _distinguishable_rates(self):\n \"\"\"Every Rate in this RateCollection should have a unique Rate.fname,\n as the network writers distinguish the rates on this basis.\"\"\"\n names = [r.fname for r in self.rates]\n return len(set(names)) == len(self.rates)\n\n def _write_network(self, *args, **kwargs):\n \"\"\"A stub for function to output the network -- this is implementation\n dependent.\"\"\"\n print('To create network integration source code, use a class that implements a specific network type.')\n return\n\n def plot(self, outfile=None, rho=None, T=None, comp=None, size=(800, 600), dpi=100):\n \"\"\"Make a plot of the network structure showing the links between nuclei\"\"\"\n\n G = nx.MultiDiGraph()\n G.position = {}\n G.labels = {}\n\n fig, ax = plt.subplots()\n #divider = make_axes_locatable(ax)\n #cax = divider.append_axes('right', size='15%', pad=0.05)\n\n ax.plot([0, 0], [8, 8], 'b-')\n\n # nodes -- the node nuclei will be all of the heavies, but not\n # p, n, alpha, unless we have p + p, 3-a, etc.\n node_nuclei = []\n for n in self.unique_nuclei:\n if n.raw not in [\"p\", \"n\", \"he4\"]:\n node_nuclei.append(n)\n else:\n for r in self.rates:\n if r.reactants.count(n) > 1:\n node_nuclei.append(n)\n break\n\n for n in node_nuclei:\n G.add_node(n)\n G.position[n] = (n.N, n.Z)\n G.labels[n] = r\"${}$\".format(n.pretty)\n\n if rho is not None and T is not None and comp is not None:\n ydots = self.evaluate_rates(rho, T, comp)\n else:\n ydots = None\n\n #for rr in ydots:\n # print(\"{}: {}\".format(rr, ydots[rr]))\n\n # edges\n for n in node_nuclei:\n for r in self.nuclei_consumed[n]:\n for p in r.products:\n if p in node_nuclei:\n # networkx doesn't seem to keep the edges in\n # any particular order, so we associate data\n # to the edges here directly, in this case,\n # the reaction rate, which will be used to\n # color it\n if ydots is None:\n G.add_edges_from([(n, p)], weight=0.5)\n else:\n try:\n rate_weight = math.log10(ydots[r])\n except ValueError:\n # if ydots[r] is zero, then set the weight\n # to roughly the minimum exponent possible\n # for python floats\n rate_weight = -308\n except:\n raise\n G.add_edges_from([(n, p)], weight=rate_weight)\n\n nx.draw_networkx_nodes(G, G.position,\n node_color=\"#A0CBE2\", alpha=1.0,\n node_shape=\"o\", node_size=1000, linewidth=2.0, zorder=10, ax=ax)\n\n nx.draw_networkx_labels(G, G.position, G.labels,\n font_size=13, font_color=\"w\", zorder=100, ax=ax)\n\n # get the edges and weights coupled in the same order\n edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())\n\n edges_lc = nx.draw_networkx_edges(G, G.position, width=3,\n edgelist=edges, edge_color=weights,\n node_size=1000,\n edge_cmap=plt.cm.viridis, zorder=1, ax=ax)\n\n # for networkx <= 2.0 draw_networkx_edges returns a\n # LineCollection matplotlib type which we can use for the\n # colorbar directly. For networkx >= 2.1, it is a collection\n # of FancyArrowPatch-s, which we need to run through a\n # PatchCollection. See: \n # https://stackoverflow.com/questions/18658047/adding-a-matplotlib-colorbar-from-a-patchcollection\n\n if ydots is not None:\n pc = mpl.collections.PatchCollection(edges_lc, cmap=plt.cm.viridis)\n pc.set_array(weights)\n plt.colorbar(pc, label=\"log10(rate)\")\n\n Ns = [n.N for n in node_nuclei]\n Zs = [n.Z for n in node_nuclei]\n\n plt.xlim(min(Ns)-1, max(Ns)+1)\n #plt.ylim(min(Zs)-1, max(Zs)+1)\n plt.xlabel(r\"$N$\", fontsize=\"large\")\n plt.ylabel(r\"$Z$\", fontsize=\"large\")\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n ax.set_aspect(\"equal\", \"datalim\")\n\n fig.set_size_inches(size[0]/dpi, size[1]/dpi)\n\n if outfile is None:\n plt.show()\n else:\n plt.tight_layout()\n plt.savefig(outfile, dpi=dpi)\n\n def __repr__(self):\n string = \"\"\n for r in self.rates:\n string += \"{}\\n\".format(r.string)\n return string\n\n\nclass Explorer(object):\n \"\"\" interactively explore a rate collection \"\"\"\n def __init__(self, rc, comp, size=(800, 600)):\n \"\"\" take a RateCollection and a composition \"\"\"\n self.rc = rc\n self.comp = comp\n self.size = size\n\n def _make_plot(self, logrho, logT):\n self.rc.plot(rho=10.0**logrho, T=10.0**logT, comp=self.comp, size=self.size)\n\n def explore(self, logrho=(2, 6, 0.1), logT=(7, 9, 0.1)):\n \"\"\"Perform interactive exploration of the network structure.\"\"\"\n interact(self._make_plot, logrho=logrho, logT=logT)\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel"
]
] |
MGIMM/dynamic_balancing | [
"74482a970996ec75f5fb3f433b8285420787ccd7"
] | [
"notebooks/static_simulation.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from MMDBalancing import MMDBalancing as MMDB\n# from OptimalTransportBalancing import OptimalTransportBalancing as OTB\n# from NeuralAdversarialBalancing import NeuralAdversarialBalancing as NAB\n#get_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\n\n\n# utils\nfrom utils_balancing import *\n\n\n# In[2]:\n\n\ndef static_simulation():\n n = 5000\n m = 5000\n d = 1\n r = lambda x:(x-3).square() + (x>-2)*(x+3).square() +x.abs()\n #r = lambda x:x.square()\n def get_data(n = 500,m = 500, r = r, d = d):\n def pi(x):\n return torch.sin(x)+ 2*torch.rand(x.shape)-1\n def pi_ring(x):\n return torch.sin(x)+ 1*torch.rand(x.shape)-0.5\n \n \n xi = torch.normal(mean = -1, std = 2, size = (n,d))\n xi_ring = torch.zeros(size = (m,d))\n for i in range(m):\n if torch.rand(1).item()>0.3:\n xi_ring[i,0] = torch.normal(mean = -4, std = 2, size = (1,)).item()\n else:\n xi_ring[i,0] = torch.normal(mean = 3, std = 0.2, size = (1,)).item()\n w = torch.ones(n)\n w_ring = torch.ones(m)\n \n \n \n \n xi_natural = torch.cat((xi, pi(xi)),axis = 1)\n xi_ring_natural = torch.cat((xi_ring, pi_ring(xi_ring)), axis = 1)\n Z =xi_natural[:,0]+xi_natural[:,1] + torch.rand((n,)) \n Z_ring =xi_ring_natural[:,0]+xi_ring_natural[:,1]+torch.rand((m,))\n R = r(Z)\n return xi_natural,xi_ring_natural,R,Z,Z_ring\n \n # ## Reference value\n \n # In[7]:\n \n \n xi_natural, xi_ring_natural,R,Z,Z_ring = get_data(n = 50000, m = 50000)\n ref = r(Z_ring).mean()\n \n \n # ### Re-generate data set with $n=m=500$.\n \n # In[8]:\n \n \n n = 500\n m = 500\n xi_natural, xi_ring_natural,R,Z,Z_ring = get_data(n = n, m = m, r = r)\n \n \n # # GIPWE: DE and DRE\n # \n # 1. Data splitting (K-folds with K = 3)\n \n # In[9]:\n \n \n def get_split_ind(n,K = 3):\n I_n = torch.arange(n, dtype = float)\n \n rand_ind_n = torch.multinomial(I_n,len(I_n),replacement = False)\n num_folds_n = int(n/K)\n Ind = []\n for i in range(K):\n if (i+1)*num_folds_n <= n:\n Ind.append(list(rand_ind_n[i*num_folds_n:(i+1)*num_folds_n].detach().numpy()))\n else:\n Ind.append(list(rand_ind_n[i*num_folds_n:].detach().numpy()))\n \n Ind_split = []\n for i in range(K):\n list_n = []\n for j in range(n):\n if j >= i*num_folds_n and j < (i+1)*num_folds_n:\n pass\n else:\n list_n.append(rand_ind_n[j].item())\n \n Ind_split.append(list_n)\n return Ind_split,Ind\n \n \n # In[10]:\n \n \n K = 3\n Ind_out, Ind_in = get_split_ind(n,K)\n \n \n # 2. Get GIPW weights\n \n # In[11]:\n \n \n from sklearn.ensemble import RandomForestRegressor\n import xgboost as xgb\n from sklearn.linear_model import LogisticRegression\n \n \n # In[12]:\n \n \n XGB = xgb.XGBRegressor(gamma = 5e0)\n RF = RandomForestRegressor(n_estimators = 20, min_samples_split = 20)\n LR = LogisticRegression()\n def get_GIPW_weights(model):\n eta = np.zeros(n)\n for k in range(K):\n SGIPW = Shallow_GIPW(xi_natural[Ind_out[k],:], xi_ring_natural)\n \n SGIPW.train(model,xi = np.array(xi_natural[Ind_in[k],:]),log=False)\n eta[Ind_in[k]] = SGIPW.weights*(SGIPW.weights>0)\n return eta\n \n eta_XGB = get_GIPW_weights(XGB)\n eta_RF = get_GIPW_weights(RF)\n eta_LR = get_GIPW_weights(LR)\n \n \n # In[13]:\n \n # OT\n OTB = OptimalTransportBalancing()\n eta_OT = OTB.get_weights(xi_natural,xi_ring_natural)\n eta_OT = eta_OT.detach().numpy()\n \n \n # In[17]:\n \n \n # MMD weights\n lambda_RKHS = 1e2\n lambda_l2 = 1e-3\n MMDB = MMDBalancing(xi_natural,xi_ring_natural,sigma = 5e-1,D = 2000)\n eta_MMD = MMDB.get_weights(lambda_RKHS = lambda_RKHS, lambda_l2 = lambda_l2)\n eta_MMD = eta_MMD.to(\"cpu\").detach().numpy()\n \n \n # In[18]:\n \n \n \n \n \n # In[20]:\n \n \n # Neural Adversarial Balancing\n class NeuralNetwork(nn.Module):\n def __init__(self,input_dim = 1, num_nodes = 32):\n super(NeuralNetwork, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(input_dim, num_nodes),\n nn.ReLU(),\n #nn.Dropout(0.3),\n #nn.BatchNorm1d(num_nodes), \n \n nn.Linear(num_nodes, num_nodes),\n nn.ReLU(),\n nn.Linear(num_nodes, num_nodes),\n nn.ReLU(),\n #nn.Dropout(0.3),\n #nn.BatchNorm1d(num_nodes), \n \n #nn.Linear(num_nodes, num_nodes),\n #nn.ReLU(),\n # # #nn.Dropout(0.3),\n # # nn.BatchNorm1d(num_nodes), \n \n nn.Linear(num_nodes, 1),\n )\n \n def forward(self, x):\n x = self.flatten(x)\n target = self.linear_relu_stack(x)\n return target\n \n \n # In[21]:\n \n \n AB = Adversarial_Balancing(xi_natural,xi_ring_natural)\n num_nodes_IPM = 24\n model_IPM = NeuralNetwork(input_dim = d*2,num_nodes = 2*num_nodes_IPM).to(AB.dev)\n model_reweighting = NeuralNetwork(input_dim = d*2, num_nodes = num_nodes_IPM).to(AB.dev)\n learning_rate = 1e-3\n optimizer_IPM = torch.optim.Adam(model_IPM.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)\n optimizer_reweighting = torch.optim.Adam(model_reweighting.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)\n \n \n # In[22]:\n \n \n epochs = 50\n loss_trace = []\n for t in range(epochs):\n #print(f\"Epoch {t+1}\\n-------------------------------\")\n current_test_loss = AB.train_loop(model_IPM = model_IPM,\n model_reweighting = model_reweighting,\n optimizer_IPM = optimizer_IPM,\n optimizer_reweighting = optimizer_reweighting,\n IPM_steps = 3,\n reweight_steps = 3,\n lambda_l2_weight = 5e-2,\n lambda_l2_IPM = 1e-2,\n lambda_l1_IPM = 1e-2,\n )\n loss_trace.append(current_test_loss.to(\"cpu\").detach().numpy())\n \n \n \n \n weights = model_reweighting(xi_natural.to(\"cuda:0\"))\n #weights /=weights.mean()\n eta_NAB = weights.to(\"cpu\").detach().numpy()\n \n \n \n \n \n # 4. Get $r^{\\natural}$ estimation with the same K-fold splitting\n \n # In[26]:\n \n \n from sklearn.linear_model import LinearRegression\n RF_R = RandomForestRegressor(n_estimators = 20, min_samples_split = 5)\n #model_r = RF_R\n model_r = LinearRegression()\n \n \n # In[27]:\n \n \n def get_r_estimation(model, K = 3):\n r_hat = np.zeros(n)\n r_hat_ring = np.zeros(m)\n for k in range(K):\n SGIPW = Shallow_GIPW(xi_natural[Ind_out[k],:], xi_ring_natural)\n model_k = model\n model_k.fit(xi_natural[Ind_out[k],:].detach().numpy(), R[Ind_out[k]].detach().numpy())\n \n r_hat[Ind_in[k]] = model_k.predict(xi_natural[Ind_in[k]].detach().numpy())\n r_hat_ring += model_k.predict(xi_ring_natural.detach().numpy())\n r_hat_ring /= K\n \n return r_hat, r_hat_ring\n \n \n # In[28]:\n \n \n r_hat,r_hat_ring = get_r_estimation(model_r)\n \n \n # In[29]:\n \n \n \n \n # ## Estimators\n \n # In[30]:\n \n \n def get_DE(eta, R = R, ref= ref):\n try:\n eta = torch.from_numpy(eta)\n except:\n pass\n pred = (eta*R).mean().item()\n error = torch.abs(pred - ref).item()\n return pred, error \n def get_DRE(eta,r_hat, r_hat_ring, R = R, ref = ref):\n try:\n eta = torch.from_numpy(eta)\n r_hat = torch.from_numpy(r_hat)\n except:\n pass\n pred = (eta*(R -r_hat)).mean() + r_hat_ring.mean()\n error = torch.abs(pred - ref).item()\n return pred.item(), error \n \n \n \n \n \n \n # In[31]:\n \n \n #pd.set_option(\"display.precision\", 2)\n #pd.set_option('display.float_format', lambda x: '%.2f' % x)\n table_bad_reg = pd.DataFrame([[get_DE(eta_OT)[1],get_DRE(eta_OT,r_hat,r_hat_ring)[1]],[get_DE(eta_MMD)[1],get_DRE(eta_MMD,r_hat,r_hat_ring)[1]], [get_DE(eta_NAB)[1],get_DRE(eta_NAB,r_hat,r_hat_ring)[1]], [get_DE(eta_RF)[1],get_DRE(eta_RF,r_hat,r_hat_ring)[1]],[get_DE(eta_XGB)[1],get_DRE(eta_XGB,r_hat,r_hat_ring)[1]], [get_DE(eta_LR)[1],get_DRE(eta_LR,r_hat,r_hat_ring)[1]],[None, torch.abs(r_hat_ring.mean()-ref).item()]], columns = (\"DE\",\"DRE\"), index = (\"OT\", \"MMD\",\"NAB\", \"GIPW-RF\",\"GIPW-XGB\",\"GIPW-LR\",\"G-computation\"))\n \n \n # ## Bad regression model: Linear regression\n \n # In[32]:\n \n \n \n \n # In[ ]:\n \n \n \n \n \n # ## Good regression model: XGBoosting\n \n # In[33]:\n \n \n XGB_R = xgb.XGBRegressor(n_estimators = 20, gamma = 1e-0)\n model_r = XGB_R\n r_hat,r_hat_ring = get_r_estimation(model_r)\n \n \n # In[34]:\n \n \n pd.set_option(\"display.precision\", 2)\n table_good_reg = pd.DataFrame([[get_DE(eta_OT)[1],get_DRE(eta_OT,r_hat,r_hat_ring)[1]],[get_DE(eta_MMD)[1],get_DRE(eta_MMD,r_hat,r_hat_ring)[1]], [get_DE(eta_NAB)[1],get_DRE(eta_NAB,r_hat,r_hat_ring)[1]], [get_DE(eta_RF)[1],get_DRE(eta_RF,r_hat,r_hat_ring)[1]],[get_DE(eta_XGB)[1],get_DRE(eta_XGB,r_hat,r_hat_ring)[1]], [get_DE(eta_LR)[1],get_DRE(eta_LR,r_hat,r_hat_ring)[1]],[None, torch.abs(r_hat_ring.mean()-ref).item()]], columns = (\"DE\",\"DRE\"), index = (\"OT\", \"MMD\",\"NAB\", \"GIPW-RF\",\"GIPW-XGB\",\"GIPW-LR\",\"G-computation\"))\n \n \n # In[35]:\n \n \n return table_bad_reg, table_good_reg\n\n\n"
] | [
[
"torch.ones",
"numpy.zeros",
"torch.rand",
"sklearn.linear_model.LinearRegression",
"torch.normal",
"pandas.set_option",
"sklearn.ensemble.RandomForestRegressor",
"torch.arange",
"torch.from_numpy",
"torch.abs",
"sklearn.linear_model.LogisticRegression",
"torch.sin",
"torch.zeros",
"numpy.array"
]
] |
Xuerui-Yang/xuerui-stat | [
"08b9dfedac810cbad5ee5969ca554212eb989db0"
] | [
"xuerui_stat/analysis/random_forest/plot_tree.py"
] | [
"import matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\n\r\nclass PlotTree():\r\n def __init__(self,tree_class):\r\n self._tree_class=tree_class\r\n self._decision_node = dict(boxstyle=\"sawtooth\", fc=\"0.8\")\r\n self._leaf_node = dict(boxstyle=\"round4\", fc=\"0.8\")\r\n self._arrow_args = dict(arrowstyle=\"<-\")\r\n\r\n def __get_tree_depth(self,tree):\r\n \"\"\"获取树的深度\"\"\"\r\n depth = 0\r\n # 定义的dict中首位储存的是节点信息,不计入计数\r\n for key in ('Left', 'Right'):\r\n # 记录各子节点的深度\r\n sub_tree = tree[key]\r\n if type(sub_tree).__name__ == \"dict\":\r\n # 如果该节点有分支,迭代计算该节点的深度\r\n thisdepth = self.__get_tree_depth(sub_tree)\r\n else:\r\n # 否则深度为一\r\n thisdepth = 1\r\n # 比较各分支深度,保留最深记录\r\n if thisdepth > depth:\r\n depth = thisdepth\r\n # 分支深度加一即为当前节点深度\r\n return depth + 1\r\n\r\n\r\n def __plot_node(self,node_txt, cntr_pt, prnt_pt, node_type):\r\n self._ax1.annotate(node_txt, xy=prnt_pt, xycoords='axes fraction',\r\n xytext=cntr_pt, textcoords='axes fraction',\r\n va=\"center\", ha=\"center\", bbox=node_type, arrowprops=self._arrow_args)\r\n\r\n\r\n def __plot_mid_text(self,cntr_pt, prnt_pt, txt_string):\r\n xMid = (prnt_pt[0] - cntr_pt[0]) / 2.0 + cntr_pt[0]\r\n yMid = (prnt_pt[1] - cntr_pt[1]) / 2.0 + cntr_pt[1]\r\n self._ax1.text(xMid, yMid, txt_string, va=\"center\",\r\n ha=\"center\", rotation=30)\r\n\r\n def __plot_tree(self,tree, prnt_pt, node_txt, branch=None):\r\n self._layer += 1\r\n diff = 1 / 2**(self._layer)\r\n keys = list(tree.keys())\r\n text = tree[keys[0]]\r\n if branch == 'Left':\r\n self._xOff -= diff\r\n elif branch == 'Right':\r\n self._xOff += diff\r\n else:\r\n pass\r\n cntr_pt = (self._xOff, self._yOff)\r\n self.__plot_mid_text(cntr_pt, prnt_pt, node_txt)\r\n self.__plot_node(text, cntr_pt, prnt_pt, self._decision_node)\r\n self._yOff = self._yOff - 1.0 / self._totalD\r\n for key in keys[1:]:\r\n sub_tree = tree[key]\r\n if type(sub_tree).__name__ == 'dict':\r\n self.__plot_tree(sub_tree, cntr_pt, str(key), key)\r\n else:\r\n if key == 'Left':\r\n x = self._xOff - diff / 2\r\n elif key == 'Right':\r\n x = self._xOff + diff / 2\r\n else:\r\n pass\r\n self.__plot_node(sub_tree, (x, self._yOff), cntr_pt, self._leaf_node)\r\n self.__plot_mid_text((x, self._yOff), cntr_pt, str(key))\r\n if branch == 'Left':\r\n self._xOff += diff\r\n elif branch == 'Right':\r\n self._xOff -= diff\r\n else:\r\n pass\r\n self._layer -= 1\r\n self._yOff = self._yOff + 1.0 / self._totalD\r\n\r\n def tree_structure_plot(self):\r\n fig = plt.figure(1, facecolor='white')\r\n fig.clf()\r\n axprops = dict(xticks=[], yticks=[])\r\n self._ax1 = plt.subplot(111, frameon=False, **axprops)\r\n self._totalD = float(self.__get_tree_depth(self._tree_class.tree))\r\n self._xOff = 0.5\r\n self._yOff = 1.0\r\n self._layer = 0\r\n self.__plot_tree(self._tree_class.tree, (0.5, 1.0), '')\r\n plt.show()\r\n\r\n def confusion_matrix_plot(self):\r\n mat=self._tree_class.confusion_matrix\r\n if mat is None:\r\n print(\"The confusion matrix is not computed. Please use 'test()' in 'DecisionTree' class to get it.\")\r\n else:\r\n fig, ax = plt.subplots(figsize=(6, 6))\r\n sns.heatmap(mat,xticklabels=mat.columns,yticklabels=mat.index,\r\n cbar_kws={\"shrink\": .5}, ax=ax)\r\n plt.tight_layout()\r\n plt.show()\r\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
] |
cvisb/cvisb_data | [
"81ebf22782f2c44f8aa8ab9437cc4fb54248c3ed"
] | [
"sample-viewer-api/src/static/data/exploratory_scripts/2019-06-03_bonnie_plasmasamples.py"
] | [
"# Goal: get ebola/Lassa for Bonnie's plasma samples.\n# Simple clean and merge\n\nimport pandas as pd\n\nimport os\nos.chdir(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/\")\n\nimport helpers\n\ndf = pd.read_excel(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/one_offs/CViSB Plasma Samples_Bonnie_2019-06-03.xlsx\")\n\ndf.shape\n\n\ndf['privatePatientID'] = df[\"Sample ID\"].apply(helpers.interpretID)\n\n# id dictionary\nids = pd.read_json(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/output_data/patients/patients_2019-06-03_PRIVATE_dict.json\")\nids.reset_index(inplace=True)\nids.head()\nmerged = pd.merge(df, ids, how=\"left\", left_on=\"privatePatientID\", right_on=\"index\", indicator=True)\n\nmerged._merge.value_counts()\n\nmerged[merged._merge == \"left_only\"]\nmerged = merged[['Sample ID', \"Date of collection\", \"Sample type\", \"cohort\", \"elisa\", \"sID\", \"gID\", \"patientID\"]]\nmerged.to_csv(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/one_offs/2019-06-03_CViSBplasma_Bonnie.csv\", index = False)\n"
] | [
[
"pandas.read_excel",
"pandas.merge",
"pandas.read_json"
]
] |
plutasnyy/mgr | [
"4ca5686ba7d62d0e2b8c172f17eb90bd822fdc21"
] | [
"src/models/conv_block.py"
] | [
"from torch import nn\n\n\nclass ConvolutionalBlock(nn.Module):\n\n def __init__(self, in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1, padding_mode='zeros'):\n super().__init__()\n self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride,\n padding_mode=padding_mode)\n self.bn1 = nn.BatchNorm1d(out_channels)\n self.relu1 = nn.ReLU()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n return out\n"
] | [
[
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d"
]
] |
peterchanw/utils | [
"26133c52ba5b0407d38371100b7b56fe2cf68149"
] | [
"LinReg/utilsLinReg.py"
] | [
"import sklearn.metrics as metrics\nimport pandas as pd\nimport numpy as np\n\ndef repair_chrdata(df, tCol):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n ### Output\n # df: repaired dataframe\n # word: string of related dataframe column with some records have NaN in targeted column\n # count: number of records fixed in the targeted column with NaN\n\n # work out number of NaN records need to fix\n dFrm = df[df[tCol].isnull()]\n count = len(dFrm)\n # work out the fill up string (most appearance) at targeted column for NULL\n tword = df[tCol].unique().tolist()\n # print(tword)\n wordLT = df[tCol].value_counts(dropna=False)\n word = ''\n wordCnt = 0\n for index, value in wordLT.items():\n print(f'[COUNT] Index: {index}, Value: {value}')\n if wordCnt < value:\n word = index\n wordCnt = value\n # print(word)\n # print(wordLT)\n # update the targeted NaN with the most frequent string\n mask = df[tCol].isnull()\n df.loc[mask, tCol] = word\n print(f'[REPAIR] \"{tCol}\" with string: {word}, Count: {count}')\n return df, word, count\n\n# Repair a single number data column contained NaN with median value\ndef repair_numdata(df, tCol):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n ### Output\n # df: repaired dataframe\n # medianVal: median value of related dataframe column with some records have NaN in targeted column\n # count: number of records fixed in the targeted column with NaN\n\n # work out number of NaN records need to fix\n dFrm = df[df[tCol].isnull()]\n count = len(dFrm)\n # work out the median value of the records from targeted column\n medianVal = df[tCol].median()\n # update the targeted NaN with the median value\n mask = df[tCol].isnull()\n df.loc[mask, tCol] = medianVal\n print(f'[REPAIR] \"{tCol}\" Median: {medianVal}, Count: {count}')\n return df, medianVal, count\n\n### Work out the educated guess targets to repair dataframe with NaN in 'repair_rdata' function\ndef repair_target(df, tCol, rCol):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n # rCol: related column label without NaN for educated guess\n ### Output\n # target: column value of related column that have NaN in targeted column\n repair = df[df[tCol].isnull()]\n # print(repair[[rCol, tCol]])\n target = sorted(repair[rCol].unique().tolist())\n print(f'[TARGET] {tCol} NaN target: {target}')\n return target\n\n### Educated guess to repair dataframe column contained NaN with mean value of related\n### dataframe column\ndef repair_rcdata(df, tCol, rCol, target):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n # rCol: related column label without NaN for educated guess\n # target: column value of related column that have NaN in targeted column\n ### Output\n # df: repaired dataframe\n # meanVal: mean value of related dataframe column with some records have NaN in targeted column\n # count: number of records fixed in the targeted column with NaN\n\n ### Main coding\n # work out number of NaN records need to fix\n dFrm = df[df[tCol].isnull()]\n dFrm = dFrm[dFrm[rCol] == target]\n count = len(dFrm)\n # work out the mean value of the records from related column\n repair = df.loc[df[rCol] == target]\n meanVal = round(repair[tCol].mean(), 3)\n if np.isnan(meanVal):\n meanVal = np.float64(0)\n # update the targeted NaN with the calculated mean value of related records\n df[tCol] = df.apply(\n lambda row: meanVal if np.isnan(row[tCol]) & (row[rCol] == target)\n else row[tCol], axis=1\n )\n print(f'[REPAIR] {tCol}({target}) Mean: {meanVal}, Count: {count}')\n return df, meanVal, count\n\ndef regression_results(y_true, y_pred):\n\n # Regression metrics\n explained_variance=metrics.explained_variance_score(y_true, y_pred)\n mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred)\n mse=metrics.mean_squared_error(y_true, y_pred)\n # mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)\n # median_absolute_error=metrics.median_absolute_error(y_true, y_pred)\n r2=metrics.r2_score(y_true, y_pred)\n\n print('explained_variance: ', round(explained_variance,4))\n # print('mean_squared_log_error: ', round(mean_squared_log_error,4))\n print('r-squared (r2): ', round(r2,4))\n print('mean_absolute_error (MAE): ', round(mean_absolute_error,4))\n print('mean_squared_error (MSE): ', round(mse,4))\n print('root_mean_squared_error (RMSE): ', round(np.sqrt(mse),4))\n"
] | [
[
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.explained_variance_score",
"numpy.isnan",
"numpy.sqrt",
"sklearn.metrics.r2_score",
"numpy.float64"
]
] |
hansonmcoombs/flopy | [
"49398983c36d381992621d5bf698ea7f78fc0014"
] | [
"autotest/t024_test.py"
] | [
"import os\n\nimport numpy as np\nimport pytest\nfrom ci_framework import FlopyTestSetup, base_test_dir\n\nimport flopy\n\nbase_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True)\n\nex_pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\")\ntestmodels = [\n os.path.join(ex_pth, f) for f in os.listdir(ex_pth) if f.endswith(\".nam\")\n]\n\n\[email protected](\n \"namfile\",\n testmodels,\n)\ndef test_checker_on_load(namfile):\n # load all of the models in the mf2005_test folder\n # model level checks are performed by default on load()\n checker_on_load(namfile)\n\n\ndef checker_on_load(mfnam):\n f = os.path.basename(mfnam)\n d = os.path.dirname(mfnam)\n m = flopy.modflow.Modflow.load(f, model_ws=d)\n assert isinstance(\n m, flopy.modflow.Modflow\n ), \"Not a flopy.modflow.Modflow instance\"\n\n\ndef test_bcs_check():\n model_ws = f\"{base_dir}_test_bcs_check\"\n test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)\n\n mf = flopy.modflow.Modflow(version=\"mf2005\", model_ws=model_ws)\n\n # test check for isolated cells\n dis = flopy.modflow.ModflowDis(\n mf, nlay=2, nrow=3, ncol=3, top=100, botm=95\n )\n bas = flopy.modflow.ModflowBas(mf, ibound=np.ones((2, 3, 3), dtype=int))\n chk = bas.check()\n\n dis = flopy.modflow.ModflowDis(\n mf, nlay=3, nrow=5, ncol=5, top=100, botm=95\n )\n ibound = np.zeros((3, 5, 5), dtype=int)\n ibound[1, 1, 1] = 1 # fully isolated cell\n ibound[0:2, 4, 4] = 1 # cell connected vertically to one other cell\n bas = flopy.modflow.ModflowBas(mf, ibound=ibound)\n mf._mg_resync = True\n chk = bas.check()\n assert chk.summary_array[\"desc\"][0] == \"isolated cells in ibound array\"\n assert (\n chk.summary_array.i[0] == 1\n and chk.summary_array.i[0] == 1\n and chk.summary_array.j[0] == 1\n )\n assert len(chk.summary_array) == 1\n\n ghb = flopy.modflow.ModflowGhb(\n mf, stress_period_data={0: [0, 0, 0, 100, 1]}\n )\n riv = flopy.modflow.ModflowRiv(\n mf,\n stress_period_data={\n 0: [[0, 0, 0, 101, 10, 100], [0, 0, 1, 80, 10, 90]]\n },\n )\n chk = ghb.check()\n assert chk.summary_array[\"desc\"][0] == \"BC in inactive cell\"\n chk = riv.check()\n assert chk.summary_array[\"desc\"][4] == \"RIV stage below rbots\"\n assert np.array_equal(chk.summary_array[\"j\"], np.array([0, 1, 1, 1, 1]))\n\n\ndef test_properties_check():\n # test that storage values ignored for steady state\n model_ws = f\"{base_dir}_test_properties_check\"\n test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)\n\n mf = flopy.modflow.Modflow(\n version=\"mf2005\",\n model_ws=model_ws,\n )\n dis = flopy.modflow.ModflowDis(\n mf,\n nrow=2,\n ncol=2,\n top=np.array([[100, np.nan], [100, 100]]),\n nper=3,\n steady=True,\n )\n chk = dis.check()\n assert len(chk.summary_array) == 1\n kij = (\n chk.summary_array[\"k\"][0],\n chk.summary_array[\"i\"][0],\n chk.summary_array[\"j\"][0],\n )\n assert kij == (0, 0, 1)\n lpf = flopy.modflow.ModflowLpf(mf, sy=np.ones((2, 2)), ss=np.ones((2, 2)))\n chk = lpf.check()\n assert len(chk.summary_array) == 0\n\n # test k values check\n lpf = flopy.modflow.ModflowLpf(\n mf,\n hk=np.array([[1, 1e10], [1, -1]]),\n hani=np.array([[1, 1], [1, -1]]),\n vka=np.array([[1e10, 0], [1, 1e-20]]),\n )\n chk = lpf.check()\n ind1 = np.array(\n [\n True if list(inds) == [0, 1, 1] else False\n for inds in chk.view_summary_array_fields([\"k\", \"i\", \"j\"])\n ]\n )\n ind1_errors = chk.summary_array[ind1][\"desc\"]\n ind2 = np.array(\n [\n True if list(inds) == [0, 0, 1] else False\n for inds in chk.view_summary_array_fields([\"k\", \"i\", \"j\"])\n ]\n )\n ind2_errors = chk.summary_array[ind2][\"desc\"]\n ind3 = np.array(\n [\n True if list(inds) == [0, 0, 0] else False\n for inds in chk.view_summary_array_fields([\"k\", \"i\", \"j\"])\n ]\n )\n ind3_errors = chk.summary_array[ind3][\"desc\"]\n\n assert (\n \"zero or negative horizontal hydraulic conductivity values\"\n in ind1_errors\n )\n assert (\n \"horizontal hydraulic conductivity values below checker threshold of 1e-11\"\n in ind1_errors\n )\n assert \"negative horizontal anisotropy values\" in ind1_errors\n assert (\n \"vertical hydraulic conductivity values below checker threshold of 1e-11\"\n in ind1_errors\n )\n assert (\n \"horizontal hydraulic conductivity values above checker threshold of 100000.0\"\n in ind2_errors\n )\n assert (\n \"zero or negative vertical hydraulic conductivity values\"\n in ind2_errors\n )\n assert (\n \"vertical hydraulic conductivity values above checker threshold of 100000.0\"\n in ind3_errors\n )\n\n\ndef test_oc_check():\n m = flopy.modflow.Modflow()\n oc = flopy.modflow.mfoc.ModflowOc(m)\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"DIS package not available\" in chk.summary_array[0][\"desc\"]\n\n flopy.modflow.ModflowDis(m)\n oc.stress_period_data = {(0, 0): [\"save head\", \"save budget\"]}\n chk = oc.check() # check passsed\n assert len(chk.summary_array) == 0, len(chk.summary_array)\n\n oc.stress_period_data = {(0, 0): [\"save\"]}\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"too few words\" in chk.summary_array[0][\"desc\"]\n\n oc.stress_period_data = {(0, 0): [\"save it\"]}\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"action 'save it' ignored\" in chk.summary_array[0][\"desc\"]\n\n oc.stress_period_data = {(1, 1): [\"save head\", \"save budget\"]}\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"OC stress_period_data ignored\" in chk.summary_array[0][\"desc\"]\n\n\nif __name__ == \"__main__\":\n print(f\"numpy version: {np.__version__}\")\n for mfnam in testmodels:\n checker_on_load(mfnam)\n test_bcs_check()\n test_properties_check()\n test_oc_check()\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.zeros"
]
] |
ggzhang0071/nni | [
"f4145e62d89c3ca383cf00f2de5dfd2d1025ad92",
"eaad98528c7aa714c9848800d607d6aa3bdd531d"
] | [
"nni/retiarii/nn/pytorch/api.py",
"nni/compression/pytorch/utils/mask_conflict.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport math\nimport operator\nimport warnings\nfrom typing import Any, List, Union, Dict, Optional, Callable, Iterable, NoReturn, TypeVar\n\nimport torch\nimport torch.nn as nn\n\nfrom nni.common.serializer import Translatable\nfrom nni.retiarii.serializer import basic_unit\nfrom nni.retiarii.utils import STATE_DICT_PY_MAPPING_PARTIAL\nfrom .utils import Mutable, generate_new_label, get_fixed_value\n\n\n__all__ = ['LayerChoice', 'InputChoice', 'ValueChoice', 'Placeholder', 'ChosenInputs']\n\n\nclass LayerChoice(Mutable):\n \"\"\"\n Layer choice selects one of the ``candidates``, then apply it on inputs and return results.\n\n Layer choice does not allow itself to be nested.\n\n Parameters\n ----------\n candidates : list of nn.Module or OrderedDict\n A module list to be selected from.\n prior : list of float\n Prior distribution used in random sampling.\n label : str\n Identifier of the layer choice.\n\n Attributes\n ----------\n length : int\n Deprecated. Number of ops to choose from. ``len(layer_choice)`` is recommended.\n names : list of str\n Names of candidates.\n choices : list of Module\n Deprecated. A list of all candidate modules in the layer choice module.\n ``list(layer_choice)`` is recommended, which will serve the same purpose.\n\n Notes\n -----\n ``candidates`` can be a list of modules or a ordered dict of named modules, for example,\n\n .. code-block:: python\n\n self.op_choice = LayerChoice(OrderedDict([\n (\"conv3x3\", nn.Conv2d(3, 16, 128)),\n (\"conv5x5\", nn.Conv2d(5, 16, 128)),\n (\"conv7x7\", nn.Conv2d(7, 16, 128))\n ]))\n\n Elements in layer choice can be modified or deleted. Use ``del self.op_choice[\"conv5x5\"]`` or\n ``self.op_choice[1] = nn.Conv3d(...)``. Adding more choices is not supported yet.\n \"\"\"\n\n # FIXME: prior is designed but not supported yet\n\n @classmethod\n def create_fixed_module(cls, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,\n label: Optional[str] = None, **kwargs):\n chosen = get_fixed_value(label)\n if isinstance(candidates, list):\n result = candidates[int(chosen)]\n else:\n result = candidates[chosen]\n\n # map the named hierarchies to support weight inheritance for python engine\n if hasattr(result, STATE_DICT_PY_MAPPING_PARTIAL):\n # handle cases where layer choices are nested\n # already has a mapping, will merge with it\n prev_mapping = getattr(result, STATE_DICT_PY_MAPPING_PARTIAL)\n setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {k: f'{chosen}.{v}' for k, v in prev_mapping.items()})\n else:\n # \"result\" needs to know where to map itself.\n # Ideally, we should put a _mapping_ in the module where \"result\" is located,\n # but it's impossible to put mapping into parent module here.\n setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {'__self__': str(chosen)})\n return result\n\n def __init__(self, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,\n prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):\n super(LayerChoice, self).__init__()\n if 'key' in kwargs:\n warnings.warn(f'\"key\" is deprecated. Assuming label.')\n label = kwargs['key']\n if 'return_mask' in kwargs:\n warnings.warn(f'\"return_mask\" is deprecated. Ignoring...')\n if 'reduction' in kwargs:\n warnings.warn(f'\"reduction\" is deprecated. Ignoring...')\n self.candidates = candidates\n self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]\n assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'\n self._label = generate_new_label(label)\n\n self.names = []\n if isinstance(candidates, dict):\n for name, module in candidates.items():\n assert name not in [\"length\", \"reduction\", \"return_mask\", \"_key\", \"key\", \"names\"], \\\n \"Please don't use a reserved name '{}' for your module.\".format(name)\n self.add_module(name, module)\n self.names.append(name)\n elif isinstance(candidates, list):\n for i, module in enumerate(candidates):\n self.add_module(str(i), module)\n self.names.append(str(i))\n else:\n raise TypeError(\"Unsupported candidates type: {}\".format(type(candidates)))\n self._first_module = self._modules[self.names[0]] # to make the dummy forward meaningful\n\n @property\n def key(self):\n return self._key()\n\n @torch.jit.ignore\n def _key(self):\n warnings.warn('Using key to access the identifier of LayerChoice is deprecated. Please use label instead.',\n category=DeprecationWarning)\n return self._label\n\n @property\n def label(self):\n return self._label\n\n def __getitem__(self, idx):\n if isinstance(idx, str):\n return self._modules[idx]\n return list(self)[idx]\n\n def __setitem__(self, idx, module):\n key = idx if isinstance(idx, str) else self.names[idx]\n return setattr(self, key, module)\n\n def __delitem__(self, idx):\n if isinstance(idx, slice):\n for key in self.names[idx]:\n delattr(self, key)\n else:\n if isinstance(idx, str):\n key, idx = idx, self.names.index(idx)\n else:\n key = self.names[idx]\n delattr(self, key)\n del self.names[idx]\n\n def __len__(self):\n return len(self.names)\n\n def __iter__(self):\n return map(lambda name: self._modules[name], self.names)\n\n @property\n def choices(self):\n return self._choices()\n\n @torch.jit.ignore\n def _choices(self):\n warnings.warn(\"layer_choice.choices is deprecated. Use `list(layer_choice)` instead.\", category=DeprecationWarning)\n return list(self)\n\n def forward(self, x):\n warnings.warn('You should not run forward of this module directly.')\n return self._first_module(x)\n\n def __repr__(self):\n return f'LayerChoice({self.candidates}, label={repr(self.label)})'\n\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nReductionType = Literal['mean', 'concat', 'sum', 'none']\n\n\nclass InputChoice(Mutable):\n \"\"\"\n Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys).\n Use ``reduction`` to specify how chosen inputs are reduced into one output. A few options are:\n\n * ``none``: do nothing and return the list directly.\n * ``sum``: summing all the chosen inputs.\n * ``mean``: taking the average of all chosen inputs.\n * ``concat``: concatenate all chosen inputs at dimension 1.\n\n We don't support customizing reduction yet.\n\n Parameters\n ----------\n n_candidates : int\n Number of inputs to choose from. It is required.\n n_chosen : int\n Recommended inputs to choose. If None, mutator is instructed to select any.\n reduction : str\n ``mean``, ``concat``, ``sum`` or ``none``.\n prior : list of float\n Prior distribution used in random sampling.\n label : str\n Identifier of the input choice.\n \"\"\"\n\n @classmethod\n def create_fixed_module(cls, n_candidates: int, n_chosen: Optional[int] = 1,\n reduction: ReductionType = 'sum', *,\n prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):\n return ChosenInputs(get_fixed_value(label), reduction=reduction)\n\n def __init__(self, n_candidates: int, n_chosen: Optional[int] = 1,\n reduction: str = 'sum', *,\n prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):\n super(InputChoice, self).__init__()\n if 'key' in kwargs:\n warnings.warn(f'\"key\" is deprecated. Assuming label.')\n label = kwargs['key']\n if 'return_mask' in kwargs:\n warnings.warn(f'\"return_mask\" is deprecated. Ignoring...')\n if 'choose_from' in kwargs:\n warnings.warn(f'\"reduction\" is deprecated. Ignoring...')\n self.n_candidates = n_candidates\n self.n_chosen = n_chosen\n self.reduction = reduction\n self.prior = prior or [1 / n_candidates for _ in range(n_candidates)]\n assert self.reduction in ['mean', 'concat', 'sum', 'none']\n self._label = generate_new_label(label)\n\n @property\n def key(self):\n return self._key()\n\n @torch.jit.ignore\n def _key(self):\n warnings.warn('Using key to access the identifier of InputChoice is deprecated. Please use label instead.',\n category=DeprecationWarning)\n return self._label\n\n @property\n def label(self):\n return self._label\n\n def forward(self, candidate_inputs: List[torch.Tensor]) -> torch.Tensor:\n warnings.warn('You should not run forward of this module directly.')\n return candidate_inputs[0]\n\n def __repr__(self):\n return f'InputChoice(n_candidates={self.n_candidates}, n_chosen={self.n_chosen}, ' \\\n f'reduction={repr(self.reduction)}, label={repr(self.label)})'\n\n\nclass ChosenInputs(nn.Module):\n \"\"\"\n A module that chooses from a tensor list and outputs a reduced tensor.\n The already-chosen version of InputChoice.\n\n When forward, ``chosen`` will be used to select inputs from ``candidate_inputs``,\n and ``reduction`` will be used to choose from those inputs to form a tensor.\n\n Attributes\n ----------\n chosen : list of int\n Indices of chosen inputs.\n reduction : ``mean`` | ``concat`` | ``sum`` | ``none``\n How to reduce the inputs when multiple are selected.\n \"\"\"\n\n def __init__(self, chosen: Union[List[int], int], reduction: ReductionType):\n super().__init__()\n self.chosen = chosen if isinstance(chosen, list) else [chosen]\n self.reduction = reduction\n\n def forward(self, candidate_inputs):\n return self._tensor_reduction(self.reduction, [candidate_inputs[i] for i in self.chosen])\n\n def _tensor_reduction(self, reduction_type, tensor_list):\n if reduction_type == 'none':\n return tensor_list\n if not tensor_list:\n return None # empty. return None for now\n if len(tensor_list) == 1:\n return tensor_list[0]\n if reduction_type == 'sum':\n return sum(tensor_list)\n if reduction_type == 'mean':\n return sum(tensor_list) / len(tensor_list)\n if reduction_type == 'concat':\n return torch.cat(tensor_list, dim=1)\n raise ValueError(f'Unrecognized reduction policy: \"{reduction_type}\"')\n\n\n# the code in ValueChoice can be generated with this codegen\n# this is not done online because I want to have type-hint supports\n# $ python -c \"from nni.retiarii.nn.pytorch.api import _valuechoice_codegen; _valuechoice_codegen(_internal=True)\"\ndef _valuechoice_codegen(*, _internal: bool = False):\n if not _internal:\n raise RuntimeError(\"This method is set to be internal. Please don't use it directly.\")\n MAPPING = {\n # unary\n 'neg': '-', 'pos': '+', 'invert': '~',\n # binary\n 'add': '+', 'sub': '-', 'mul': '*', 'matmul': '@',\n 'truediv': '//', 'floordiv': '/', 'mod': '%',\n 'lshift': '<<', 'rshift': '>>',\n 'and': '&', 'xor': '^', 'or': '|',\n # no reflection\n 'lt': '<', 'le': '<=', 'eq': '==',\n 'ne': '!=', 'ge': '>=', 'gt': '>',\n # NOTE\n # Currently we don't support operators like __contains__ (b in a),\n # Might support them in future when we actually need them.\n }\n\n binary_template = \"\"\" def __{op}__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [self, other])\"\"\"\n\n binary_r_template = \"\"\" def __r{op}__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [other, self])\"\"\"\n\n unary_template = \"\"\" def __{op}__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.{op}, '{sym}{{}}', [self])\"\"\"\n\n for op, sym in MAPPING.items():\n if op in ['neg', 'pos', 'invert']:\n print(unary_template.format(op=op, sym=sym) + '\\n')\n else:\n opt = op + '_' if op in ['and', 'or'] else op\n print(binary_template.format(op=op, opt=opt, sym=sym) + '\\n')\n if op not in ['lt', 'le', 'eq', 'ne', 'ge', 'gt']:\n print(binary_r_template.format(op=op, opt=opt, sym=sym) + '\\n')\n\n\ndef _valuechoice_staticmethod_helper(orig_func):\n orig_func.__doc__ += \"\"\"\n Notes\n -----\n This function performs lazy evaluation.\n Only the expression will be recorded when the function is called.\n The real evaluation happens when the inner value choice has determined its final decision.\n If no value choice is contained in the parameter list, the evaluation will be intermediate.\"\"\"\n return orig_func\n\n\nclass ValueChoiceX(Translatable):\n \"\"\"Internal API. Implementation note:\n\n The transformed (X) version of value choice.\n It can be the result of composition (transformation) of one or several value choices. For example,\n\n .. code-block:: python\n\n nn.ValueChoice([1, 2]) + nn.ValueChoice([3, 4]) + 5\n\n The instance of base class cannot be created directly. Instead, they should be only the result of transformation of value choice.\n Therefore, there is no need to implement ``create_fixed_module`` in this class, because,\n 1. For python-engine, value choice itself has create fixed module. Consequently, the transformation is born to be fixed.\n 2. For graph-engine, it uses evaluate to calculate the result.\n\n Potentially, we have to implement the evaluation logic in oneshot algorithms. I believe we can postpone the discussion till then.\n \"\"\"\n\n def __init__(self, function: Callable[..., Any], repr_template: str, arguments: List[Any], dry_run: bool = True):\n super().__init__()\n\n if function is None:\n # this case is a hack for ValueChoice subclass\n # it will reach here only because ``__init__`` in ``nn.Module`` is useful.\n return\n\n self.function = function\n self.repr_template = repr_template\n self.arguments = arguments\n\n assert any(isinstance(arg, ValueChoiceX) for arg in self.arguments)\n\n if dry_run:\n # for sanity check\n self.dry_run()\n\n def inner_choices(self) -> Iterable['ValueChoice']:\n \"\"\"\n Return an iterable of all leaf value choices.\n Useful for composition of value choices.\n No deduplication on labels. Mutators should take care.\n \"\"\"\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX):\n yield from arg.inner_choices()\n\n def dry_run(self) -> Any:\n \"\"\"\n Dry run the value choice to get one of its possible evaluation results.\n \"\"\"\n # values are not used\n return self._evaluate(iter([]), True)\n\n def evaluate(self, values: Iterable[Any]) -> Any:\n \"\"\"\n Evaluate the result of this group.\n ``values`` should in the same order of ``inner_choices()``.\n \"\"\"\n return self._evaluate(iter(values), False)\n\n def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:\n # \"values\" iterates in the recursion\n eval_args = []\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX):\n # recursive evaluation\n eval_args.append(arg._evaluate(values, dry_run))\n # the recursion will stop when it hits a leaf node (value choice)\n # the implementation is in `ValueChoice`\n else:\n # constant value\n eval_args.append(arg)\n return self.function(*eval_args)\n\n def _translate(self):\n \"\"\"\n Try to behave like one of its candidates when used in ``basic_unit``.\n \"\"\"\n return self.dry_run()\n\n def __repr__(self):\n reprs = []\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice):\n reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority\n else:\n reprs.append(repr(arg))\n return self.repr_template.format(*reprs)\n\n # the following are a series of methods to create \"ValueChoiceX\"\n # which is a transformed version of value choice\n # https://docs.python.org/3/reference/datamodel.html#special-method-names\n\n # Special operators that can be useful in place of built-in conditional operators.\n @staticmethod\n @_valuechoice_staticmethod_helper\n def to_int(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', int]:\n \"\"\"\n Convert a ``ValueChoice`` to an integer.\n \"\"\"\n if isinstance(obj, ValueChoiceX):\n return ValueChoiceX(int, 'int({})', [obj])\n return int(obj)\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def to_float(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', float]:\n \"\"\"\n Convert a ``ValueChoice`` to a float.\n \"\"\"\n if isinstance(obj, ValueChoiceX):\n return ValueChoiceX(float, 'float({})', [obj])\n return float(obj)\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def condition(pred: 'ValueChoiceOrAny',\n true: 'ValueChoiceOrAny',\n false: 'ValueChoiceOrAny') -> 'ValueChoiceOrAny':\n \"\"\"\n Return ``true`` if the predicate ``pred`` is true else ``false``.\n\n Examples\n --------\n >>> ValueChoice.condition(ValueChoice([1, 2]) > ValueChoice([0, 3]), 2, 1)\n \"\"\"\n if any(isinstance(obj, ValueChoiceX) for obj in [pred, true, false]):\n return ValueChoiceX(lambda t, c, f: t if c else f, '{} if {} else {}', [true, pred, false])\n return true if pred else false\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def max(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],\n *args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':\n \"\"\"\n Returns the maximum value from a list of value choices.\n The usage should be similar to Python's built-in value choices,\n where the parameters could be an iterable, or at least two arguments.\n \"\"\"\n if not args:\n return ValueChoiceX.max(*list(arg0))\n lst = [arg0] + list(args)\n if any(isinstance(obj, ValueChoiceX) for obj in lst):\n return ValueChoiceX(max, 'max({})', lst)\n return max(lst)\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def min(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],\n *args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':\n \"\"\"\n Returns the minunum value from a list of value choices.\n The usage should be similar to Python's built-in value choices,\n where the parameters could be an iterable, or at least two arguments.\n \"\"\"\n if not args:\n return ValueChoiceX.min(*list(arg0))\n lst = [arg0] + list(args)\n if any(isinstance(obj, ValueChoiceX) for obj in lst):\n return ValueChoiceX(min, 'min({})', lst)\n return min(lst)\n\n def __hash__(self):\n # this is required because we have implemented ``__eq__``\n return id(self)\n\n # NOTE:\n # Write operations are not supported. Reasons follow:\n # - Semantics are not clear. It can be applied to \"all\" the inner candidates, or only the chosen one.\n # - Implementation effort is too huge.\n # As a result, inplace operators like +=, *=, magic methods like `__getattr__` are not included in this list.\n\n def __getitem__(self, key: Any) -> 'ValueChoiceX':\n return ValueChoiceX(lambda x, y: x[y], '{}[{}]', [self, key])\n\n # region implement int, float, round, trunc, floor, ceil\n # because I believe sometimes we need them to calculate #channels\n # `__int__` and `__float__` are not supported because `__int__` is required to return int.\n def __round__(self, ndigits: Optional[Any] = None) -> 'ValueChoiceX':\n if ndigits is not None:\n return ValueChoiceX(round, 'round({}, {})', [self, ndigits])\n return ValueChoiceX(round, 'round({})', [self])\n\n def __trunc__(self) -> 'ValueChoiceX':\n raise RuntimeError(\"Try to use `ValueChoice.to_int()` instead of `math.trunc()` on value choices.\")\n\n def __floor__(self) -> 'ValueChoiceX':\n return ValueChoiceX(math.floor, 'math.floor({})', [self])\n\n def __ceil__(self) -> 'ValueChoiceX':\n return ValueChoiceX(math.ceil, 'math.ceil({})', [self])\n\n def __index__(self) -> NoReturn:\n # https://docs.python.org/3/reference/datamodel.html#object.__index__\n raise RuntimeError(\"`__index__` is not allowed on ValueChoice, which means you can't \"\n \"use int(), float(), complex(), range() on a ValueChoice.\")\n\n def __bool__(self) -> NoReturn:\n raise RuntimeError('Cannot use bool() on ValueChoice. That means, using ValueChoice in a if-clause is illegal. '\n 'Please try methods like `ValueChoice.max(a, b)` to see whether that meets your needs.')\n # endregion\n\n # region the following code is generated with codegen (see above)\n # Annotated with \"region\" because I want to collapse them in vscode\n def __neg__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.neg, '-{}', [self])\n\n def __pos__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.pos, '+{}', [self])\n\n def __invert__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.invert, '~{}', [self])\n\n def __add__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.add, '{} + {}', [self, other])\n\n def __radd__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.add, '{} + {}', [other, self])\n\n def __sub__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.sub, '{} - {}', [self, other])\n\n def __rsub__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.sub, '{} - {}', [other, self])\n\n def __mul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mul, '{} * {}', [self, other])\n\n def __rmul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mul, '{} * {}', [other, self])\n\n def __matmul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.matmul, '{} @ {}', [self, other])\n\n def __rmatmul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.matmul, '{} @ {}', [other, self])\n\n def __truediv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.truediv, '{} // {}', [self, other])\n\n def __rtruediv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.truediv, '{} // {}', [other, self])\n\n def __floordiv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.floordiv, '{} / {}', [self, other])\n\n def __rfloordiv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.floordiv, '{} / {}', [other, self])\n\n def __mod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mod, '{} % {}', [self, other])\n\n def __rmod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mod, '{} % {}', [other, self])\n\n def __lshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.lshift, '{} << {}', [self, other])\n\n def __rlshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.lshift, '{} << {}', [other, self])\n\n def __rshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.rshift, '{} >> {}', [self, other])\n\n def __rrshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.rshift, '{} >> {}', [other, self])\n\n def __and__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.and_, '{} & {}', [self, other])\n\n def __rand__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.and_, '{} & {}', [other, self])\n\n def __xor__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.xor, '{} ^ {}', [self, other])\n\n def __rxor__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.xor, '{} ^ {}', [other, self])\n\n def __or__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.or_, '{} | {}', [self, other])\n\n def __ror__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.or_, '{} | {}', [other, self])\n\n def __lt__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.lt, '{} < {}', [self, other])\n\n def __le__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.le, '{} <= {}', [self, other])\n\n def __eq__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.eq, '{} == {}', [self, other])\n\n def __ne__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.ne, '{} != {}', [self, other])\n\n def __ge__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.ge, '{} >= {}', [self, other])\n\n def __gt__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.gt, '{} > {}', [self, other])\n # endregion\n\n # __pow__, __divmod__, __abs__ are special ones.\n # Not easy to cover those cases with codegen.\n def __pow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':\n if modulo is not None:\n return ValueChoiceX(pow, 'pow({}, {}, {})', [self, other, modulo])\n return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [self, other])\n\n def __rpow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':\n if modulo is not None:\n return ValueChoiceX(pow, 'pow({}, {}, {})', [other, self, modulo])\n return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [other, self])\n\n def __divmod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(divmod, 'divmod({}, {})', [self, other])\n\n def __rdivmod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(divmod, 'divmod({}, {})', [other, self])\n\n def __abs__(self) -> 'ValueChoiceX':\n return ValueChoiceX(abs, 'abs({})', [self])\n\n\nValueChoiceOrAny = TypeVar('ValueChoiceOrAny', ValueChoiceX, Any)\n\n\nclass ValueChoice(ValueChoiceX, Mutable):\n \"\"\"\n ValueChoice is to choose one from ``candidates``.\n\n In most use scenarios, ValueChoice should be passed to the init parameters of a serializable module. For example,\n\n .. code-block:: python\n\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(3, nn.ValueChoice([32, 64]), kernel_size=nn.ValueChoice([3, 5, 7]))\n\n def forward(self, x):\n return self.conv(x)\n\n In case, you want to search a parameter that is used repeatedly, this is also possible by sharing the same value choice instance.\n (Sharing the label should have the same effect.) For example,\n\n .. code-block:: python\n\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n hidden_dim = nn.ValueChoice([128, 512])\n self.fc = nn.Sequential(\n nn.Linear(64, hidden_dim),\n nn.Linear(hidden_dim, 10)\n )\n\n # the following code has the same effect.\n # self.fc = nn.Sequential(\n # nn.Linear(64, nn.ValueChoice([128, 512], label='dim')),\n # nn.Linear(nn.ValueChoice([128, 512], label='dim'), 10)\n # )\n\n def forward(self, x):\n return self.fc(x)\n\n Note that ValueChoice should be used directly. Transformations like ``nn.Linear(32, nn.ValueChoice([64, 128]) * 2)``\n are not supported.\n\n Another common use case is to initialize the values to choose from in init and call the module in forward to get the chosen value.\n Usually, this is used to pass a mutable value to a functional API like ``torch.xxx`` or ``nn.functional.xxx```.\n For example,\n\n .. code-block:: python\n\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.dropout_rate = nn.ValueChoice([0., 1.])\n\n def forward(self, x):\n return F.dropout(x, self.dropout_rate())\n\n Parameters\n ----------\n candidates : list\n List of values to choose from.\n prior : list of float\n Prior distribution to sample from.\n label : str\n Identifier of the value choice.\n \"\"\"\n\n # FIXME: prior is designed but not supported yet\n\n @classmethod\n def create_fixed_module(cls, candidates: List[Any], *, label: Optional[str] = None, **kwargs):\n value = get_fixed_value(label)\n if value not in candidates:\n raise ValueError(f'Value {value} does not belong to the candidates: {candidates}.')\n return value\n\n def __init__(self, candidates: List[Any], *, prior: Optional[List[float]] = None, label: Optional[str] = None):\n super().__init__(None, None, None)\n self.candidates = candidates\n self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]\n assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'\n self._label = generate_new_label(label)\n self._accessor = []\n\n @property\n def label(self):\n return self._label\n\n def forward(self):\n warnings.warn('You should not run forward of this module directly.')\n return self.candidates[0]\n\n def inner_choices(self) -> Iterable['ValueChoice']:\n # yield self because self is the only value choice here\n yield self\n\n def dry_run(self) -> Any:\n return self.candidates[0]\n\n def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:\n if dry_run:\n return self.candidates[0]\n try:\n value = next(values)\n except StopIteration:\n raise ValueError(f'Value list {values} is exhausted when trying to get a chosen value of {self}.')\n if value not in self.candidates:\n raise ValueError(f'Value {value} does not belong to the candidates of {self}.')\n return value\n\n def __repr__(self):\n return f'ValueChoice({self.candidates}, label={repr(self.label)})'\n\n\n@basic_unit\nclass Placeholder(nn.Module):\n \"\"\"\n The API that creates an empty module for later mutations.\n For advanced usages only.\n \"\"\"\n\n def __init__(self, label, **related_info):\n self.label = label\n self.related_info = related_info\n super().__init__()\n\n def forward(self, x):\n return x\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\nimport os\nimport logging\nimport torch\nimport numpy as np\nfrom .shape_dependency import ChannelDependency, GroupDependency, InputChannelDependency\nfrom .utils import get_module_by_name\n# logging.basicConfig(level = logging.DEBUG)\n_logger = logging.getLogger('FixMaskConflict')\n\n\ndef fix_mask_conflict(masks, model, dummy_input, traced=None):\n \"\"\"\n MaskConflict fix the mask conflict for the channel dependencies\n and group dependency.\n\n Parameters\n ----------\n masks : dict/str\n A dict object that stores the masks or the path of the mask file\n model : torch.nn.Module\n model to fix the mask conflict\n dummy_input : torch.Tensor/list of tensors/dict of tensors\n input example to trace the model\n traced : torch._C.torch.jit.TopLevelTracedModule\n the traced model of the target model, is this parameter is not None,\n we donnot use the model and dummpy_input to get the trace graph.\n \"\"\"\n if isinstance(masks, str):\n # if the input is the path of the mask_file\n assert os.path.exists(masks)\n masks = torch.load(masks)\n assert len(masks) > 0, 'Mask tensor cannot be empty'\n # if the user uses the model and dummy_input to trace the model, we\n # should get the traced model handly, so that, we only trace the\n # model once, GroupMaskConflict and ChannelMaskConflict will reuse\n # this traced model.\n if traced is None:\n assert model is not None and dummy_input is not None\n training = model.training\n # We need to trace the model in eval mode\n model.eval()\n kw_args = {}\n if torch.__version__ >= '1.6.0':\n # only pytorch with version greater than 1.6.0 has the strict option\n kw_args['strict'] = False\n traced = torch.jit.trace(model, dummy_input, **kw_args)\n model.train(training)\n\n fix_group_mask = GroupMaskConflict(masks, model, dummy_input, traced)\n masks = fix_group_mask.fix_mask()\n fix_channel_mask = ChannelMaskConflict(masks, model, dummy_input, traced)\n masks = fix_channel_mask.fix_mask()\n return masks\n\n\nclass MaskFix:\n def __init__(self, masks, model=None, dummy_input=None, traced=None):\n # check if the parameters are valid\n parameter_valid = False\n if traced is not None:\n parameter_valid = True\n elif (model is not None) and (dummy_input is not None):\n parameter_valid = True\n if not parameter_valid:\n raise Exception('The input parameters is invalid!')\n self.model = model\n self.dummy_input = dummy_input\n self.traced = traced\n self.masks = masks\n\n def fix_mask(self):\n raise NotImplementedError\n\n def export(self, path):\n \"\"\"\n Export the masks after fixing the conflict to file.\n \"\"\"\n torch.save(self.masks, path)\n\n\nclass GroupMaskConflict(MaskFix):\n def __init__(self, masks, model, dummy_input, traced=None):\n \"\"\"\n GroupMaskConflict fix the mask conflict between the layers that\n has group dependecy with each other.\n\n Parameters\n ----------\n masks : dict\n a dict object that stores the masks\n model : torch.nn.Module\n model to fix the mask conflict\n dummy_input : torch.Tensor\n input example to trace the model\n traced : torch._C.torch.jit.TopLevelTracedModule\n the traced model of the target model, is this parameter is not None,\n we donnot use the model and dummpy_input to get the trace graph.\n \"\"\"\n super(GroupMaskConflict, self).__init__(\n masks, model, dummy_input, traced)\n\n def fix_mask(self):\n \"\"\"\n Fix the mask conflict before the mask inference for the layers that\n has group dependencies. This function should be called before the\n mask inference of the 'speedup' module.\n \"\"\"\n group_depen = GroupDependency(\n self.model, self.dummy_input, self.traced)\n depens = group_depen.dependency\n min_groups = group_depen.min_groups\n _logger.info(depens)\n for layername in depens:\n group_max = depens[layername]\n group_min = min_groups[layername]\n if layername not in self.masks:\n # this layer not pruned\n continue\n w_mask = self.masks[layername]['weight']\n shape = w_mask.size()\n count = np.prod(shape[1:])\n all_ones = (w_mask.flatten(1).sum(-1) == count).nonzero().squeeze(1).tolist()\n all_zeros = (w_mask.flatten(1).sum(-1) == 0).nonzero().squeeze(1).tolist()\n if len(all_ones) + len(all_zeros) < w_mask.size(0):\n # In fine-grained pruning, skip this layer\n _logger.info('Layers %s using fine-grained pruning', layername)\n continue\n assert shape[0] % group_max == 0\n # Find the number of masked filter for each group (mini_masked).\n # Because we have to keep the pruned filter can still\n # be divided into the same number of groups, so we only can\n # prune mini_masked filters for each group.\n step = shape[0] / group_max\n group_masked = []\n for i in range(group_max):\n _start = step * i\n _end = step * (i + 1)\n _tmp_list = list(\n filter(lambda x: _start <= x and x < _end, all_zeros))\n group_masked.append(_tmp_list)\n mini_masked = min([len(x) for x in group_masked])\n need_unmask = set()\n for gm in group_masked:\n for i in range(mini_masked, len(gm)):\n # To keep the output channel number still being divisible to\n # groups, we set the masks of following filters to be zero.\n pos = gm[i]\n need_unmask.add(pos)\n step = shape[0] / group_min\n for i in range(group_min):\n _start = step * i\n _end = step * (i+1)\n _tmp_list = list(\n filter(lambda x: _start <= x and x < _end, all_zeros))\n if len(_tmp_list) == step:\n # if the whole group is removed, then we don't have to unmask for\n # the filters in this group\n for pos in _tmp_list:\n if pos in need_unmask:\n need_unmask.remove(pos)\n for pos in need_unmask:\n self.masks[layername]['weight'][pos] = torch.ones(shape[1:])\n if hasattr(self.masks[layername], 'bias'):\n self.masks[layername]['bias'][pos] = 1\n return self.masks\n\n\nclass ChannelMaskConflict(MaskFix):\n def __init__(self, masks, model, dummy_input, traced=None):\n \"\"\"\n ChannelMaskConflict fix the mask conflict between the layers that\n has channel dependecy with each other.\n\n Parameters\n ----------\n masks : dict\n a dict object that stores the masks\n model : torch.nn.Module\n model to fix the mask conflict\n dummy_input : torch.Tensor\n input example to trace the model\n graph : torch._C.torch.jit.TopLevelTracedModule\n the traced graph of the target model, is this parameter is not None,\n we donnot use the model and dummpy_input to get the trace graph.\n \"\"\"\n super(ChannelMaskConflict, self).__init__(\n masks, model, dummy_input, traced)\n self.conv_prune_dim = detect_mask_prune_dim(masks, model)\n self.channel_prune_type = detect_channel_prune_type(masks, model)\n _logger.info('Dectected conv prune dim\" %d', self.conv_prune_dim)\n\n def fix_mask(self):\n \"\"\"\n Fix the mask conflict before the mask inference for the layers that\n has shape dependencies. This function should be called before the\n mask inference of the 'speedup' module. Only structured pruning masks\n are supported.\n \"\"\"\n if self.conv_prune_dim == 0:\n channel_depen = ChannelDependency(\n self.model, self.dummy_input, self.traced, self.channel_prune_type)\n\n else:\n channel_depen = InputChannelDependency(\n self.model, self.dummy_input, self.traced)\n depen_sets = channel_depen.dependency_sets\n sum_idx = (1, 2, 3) if self.conv_prune_dim == 0 else (0, 2, 3)\n\n (_tmp_name, _tmp_tensor) = list(self.masks.items())[0]\n device = _tmp_tensor['weight'].device\n\n for dset in depen_sets:\n if len(dset) <= 1:\n continue\n # channel_masks is a list, each element is None or a vector, for example:\n # [[0, 1, 1, 0, 0], [0, 0, 1, 1, 0], None], None means no channel\n # is pruned.\n channel_masks = []\n fine_grained = False\n for name in dset:\n if name in self.masks:\n _, m = get_module_by_name(self.model, name)\n assert m is not None\n mask = self.masks[name]['weight']\n if type(m).__name__ == 'Conv2d':\n channel_mask = (mask.abs().sum(sum_idx) != 0).int()\n channel_masks.append(channel_mask)\n if (channel_mask.sum() * (mask.numel() / mask.shape[self.conv_prune_dim])).item() != (mask > 0).sum().item():\n fine_grained = True\n elif type(m).__name__ == 'Linear':\n if self.conv_prune_dim == 1:\n channel_masks.append(\n (mask.abs().sum(0) != 0).int())\n else:\n channel_masks.append(\n (mask.abs().sum(1) != 0).int())\n elif type(m).__name__ == 'BatchNorm2d':\n channel_masks.append(mask.int())\n elif type(m).__name__ == 'ConvTranspose2d':\n # convtranspose have difference memory layout, so that we need create\n # a tmp_sum_idx for conv_transpose\n tmp_sum_idx = (\n 0, 2, 3) if self.conv_prune_dim == 0 else (1, 2, 3)\n channel_mask = (mask.abs().sum(tmp_sum_idx) != 0).int()\n channel_masks.append(channel_mask)\n if (channel_mask.sum() * (mask.numel() / mask.shape[1 - self.conv_prune_dim])).item() != (mask > 0).sum().item():\n fine_grained = True\n else:\n raise RuntimeError(\n f'unsupported module type: {type(m).__name__}')\n else:\n # no mask means not pruned, equivlent to full masks\n channel_masks.append(None)\n if fine_grained:\n _logger.info(\"Fine-grianed mask detected\")\n if all(x is None for x in channel_masks):\n continue\n num_channels_list = [len(x)\n for x in channel_masks if x is not None]\n # number of channels in same set should be identical\n assert len(set(num_channels_list)) == 1\n num_channels = num_channels_list[0]\n\n for i, dim_mask in enumerate(channel_masks):\n if dim_mask is None:\n channel_masks[i] = torch.ones(\n num_channels).int().to(device)\n\n # merge masks with 'or'\n merged_channel_mask = channel_masks[0].clone()\n for i in range(1, len(channel_masks)):\n merged_channel_mask = (\n (merged_channel_mask + channel_masks[i]) != 0).int()\n\n merged_index = torch.nonzero(merged_channel_mask, as_tuple=True)[0]\n\n for name in dset:\n if name not in self.masks:\n assert all(merged_channel_mask)\n continue\n orig_mask = self.masks[name]['weight']\n _, m = get_module_by_name(self.model, name)\n new_mask = torch.zeros_like(orig_mask)\n if type(m).__name__ == 'Conv2d':\n if self.conv_prune_dim == 0:\n new_mask[merged_index, :, :, :] = 1.\n else:\n new_mask[:, merged_index, :, :] = 1.\n elif type(m).__name__ == 'Linear':\n if self.conv_prune_dim == 0:\n new_mask[merged_index, :] = 1\n elif self.conv_prune_dim == 1:\n new_mask[:, merged_index] = 1.\n elif type(m).__name__ == 'BatchNorm2d':\n new_mask = merged_channel_mask.type_as(orig_mask)\n else:\n raise RuntimeError(\n f'unsupported module type: {type(m).__name__}')\n self.masks[name]['weight'] = new_mask\n if 'bias' in self.masks[name] and self.masks[name]['bias'] is not None:\n if type(m).__name__ == 'Conv2d':\n assert self.conv_prune_dim == 0\n if self.conv_prune_dim == 0:\n self.masks[name]['bias'] = merged_channel_mask.type_as(\n self.masks[name]['bias'])\n\n return self.masks\n\ndef detect_channel_prune_type(masks, model):\n \"\"\"\n User can prune a channel through two ways: 1) prune\n the corresponding filter of the conv layer(all the\n filter related pruner), 2) prune the BN layers that\n followed after a conv(Slim pruner). This function find\n the pruning type of the masks.\n\n Parameters\n ----------\n masks: dict\n A dict object that stores the masks.\n model: nn.Module\n Model object which the mask can be applied on.\n\n Returns:\n -------\n prune_type: str\n Could be Filter or Batchnorm\n \"\"\"\n prune_type = 'Filter'\n all_batch_norm = True\n for layer_name in masks:\n _, m = get_module_by_name(model, layer_name)\n if m is None or (not isinstance(m, torch.nn.BatchNorm2d)):\n all_batch_norm = False\n break\n if all_batch_norm:\n # if all masks are for batchnorm layers, then the prune_type is BatchNorm\n # Note, actually we currently do not support pruning both Conv and BatchNorm\n # at the same time.\n prune_type = 'Batchnorm'\n return prune_type\n\ndef detect_mask_prune_dim(masks, model):\n \"\"\"\n Detect how the masks of convolutional layers are pruned.\n\n Parameters\n ----------\n masks: dict\n A dict object that stores the masks.\n model: nn.Module\n Model object which the mask can be applied on.\n Returns:\n -------\n How the masks of convolutional layers are pruned, this depends on pruning algorithms, it should\n return 1 for masks generated by AMCPruner, and returns 0 for masks generated by the rest\n NNI builtin pruners.\n 0: filter pruning, prune filters of weights which causes channels of output feature maps are pruned.\n 1: channel pruning, prune kernels corresponding to each input channels which causes channels of\n input feature maps are pruned.\n \"\"\"\n dim0_preserved, dim1_preserved = 0., 0.\n dim0_num, dim1_num = 0., 0.\n for module_name in masks:\n _, m = get_module_by_name(model, module_name)\n if m is None or type(m).__name__ != 'Conv2d':\n continue\n\n mask = masks[module_name]['weight'].clone()\n assert (mask >= 0).sum() == mask.numel(), \\\n \"mask values should be greater than or equal to 0.\"\n mask = (mask > 0).int()\n mask = mask.view(mask.shape[0], mask.shape[1], -1)\n dim0_mask = (mask.sum((1, 2)) > 0).int()\n dim1_mask = (mask.sum((0, 2)) > 0).int()\n dim0_preserved += dim0_mask.sum().item()\n dim1_preserved += dim1_mask.sum().item()\n dim0_num += len(dim0_mask)\n dim1_num += len(dim1_mask)\n\n if dim0_num == 0 or dim1_num == 0:\n _logger.warning('no multi-dimension masks found.')\n return 0\n\n dim0_sparsity, dim1_sparsity = 1. - dim0_preserved / \\\n dim0_num, 1. - dim1_preserved / dim1_num\n _logger.info('dim0 sparsity: %f', dim0_sparsity)\n _logger.info('dim1 sparsity: %f', dim1_sparsity)\n\n if dim0_sparsity == dim1_sparsity == 0.:\n _logger.warning('nothing masked.')\n\n if dim0_sparsity > 0 and dim1_sparsity > 0:\n _logger.warning('both dim0 and dim1 masks found.')\n\n return 0 if dim0_sparsity >= dim1_sparsity else 1\n"
] | [
[
"torch.cat"
],
[
"torch.ones",
"torch.nonzero",
"torch.load",
"torch.zeros_like",
"torch.save",
"numpy.prod",
"torch.jit.trace"
]
] |
neochristou/tensorflow | [
"1fb338b1c42930c0eef4d0b4d8d5fdf24a678654"
] | [
"tensorflow/python/distribute/collective_all_reduce_strategy.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class CollectiveAllReduceStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport threading\nimport time\nimport weakref\n\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import collective_util\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import ClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# pylint: disable=line-too-long\n@tf_export(\"distribute.MultiWorkerMirroredStrategy\", v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n \"\"\"A distribution strategy for synchronous training on multiple workers.\n\n This strategy implements synchronous distributed training across multiple\n workers, each with potentially multiple GPUs. Similar to\n `tf.distribute.MirroredStrategy`, it replicates all variables and computations\n to each local device. The difference is that it uses a distributed collective\n implementation (e.g. all-reduce), so that multiple workers can work together.\n\n You need to launch your program on each worker and configure\n `cluster_resolver` correctly. For example, if you are using\n `tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to\n have its corresponding `task_type` and `task_id` set in the `TF_CONFIG`\n environment variable. An example TF_CONFIG on worker-0 of a two worker cluster\n is:\n\n ```\n TF_CONFIG = '{\"cluster\": {\"worker\": [\"localhost:12345\", \"localhost:23456\"]}, \"task\": {\"type\": \"worker\", \"index\": 0} }'\n ```\n\n Your program runs on each worker as-is. Note that collectives require each\n worker to participate. All `tf.distribute` and non `tf.distribute` API may use\n collectives internally, e.g. checkpointing and saving since reading a\n `tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value.\n Therefore it's recommended to run exactly the same program on each worker.\n Dispatching based on `task_type` or `task_id` of the worker is error-prone.\n\n `cluster_resolver.num_accelerators()` determines the number of GPUs the\n strategy uses. If it's zero, the strategy uses the CPU. All workers need to\n use the same number of devices, otherwise the behavior is undefined.\n\n This strategy is not intended for TPU. Use `tf.distribute.TPUStrategy`\n instead.\n\n After setting up TF_CONFIG, using this strategy is similar to using\n `tf.distribute.MirroredStrategy` and `tf.distribute.TPUStrategy`.\n\n ```\n strategy = tf.distribute.MultiWorkerMirroredStrategy()\n\n with strategy.scope():\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(2, input_shape=(5,)),\n ])\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)\n\n def dataset_fn(ctx):\n x = np.random.random((2, 5)).astype(np.float32)\n y = np.random.randint(2, size=(2, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n return dataset.repeat().batch(1, drop_remainder=True)\n dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)\n\n model.compile()\n model.fit(dist_dataset)\n ```\n\n You can also write your own training loop:\n\n ```\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n features, labels = inputs\n with tf.GradientTape() as tape:\n logits = model(features, training=True)\n loss = tf.keras.losses.sparse_categorical_crossentropy(\n labels, logits)\n\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n strategy.run(step_fn, args=(next(iterator),))\n\n for _ in range(NUM_STEP):\n train_step(iterator)\n ```\n\n See\n [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)\n for a detailed tutorial.\n\n __Saving__\n\n You need to save and checkpoint on all workers instead of just one. This is\n because variables whose synchronization=ON_READ triggers aggregation during\n saving. It's recommended to save to a different path on each worker to avoid\n race conditions. Each worker saves the same thing. See\n [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading)\n tutorial for examples.\n\n __Known Issues__\n\n * `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the\n correct number of accelerators. The strategy uses all available GPUs if\n `cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver`\n or `None`.\n * In eager mode, the strategy needs to be created before calling any other\n Tensorflow API.\n\n \"\"\"\n # pylint: enable=line-too-long\n\n # TODO(anjalisridhar): Update our guides with examples showing how we can use\n # the cluster_resolver argument.\n\n # The starting number for collective keys. This should only be set in tests.\n _collective_key_base = 0\n\n def __init__(self,\n cluster_resolver=None,\n communication_options=None):\n \"\"\"Creates the strategy.\n\n Args:\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n communication_options: optional\n `tf.distribute.experimental.CommunicationOptions`. This configures the\n default options for cross device communications. It can be overridden by\n options provided to the communication APIs like\n `tf.distribute.ReplicaContext.all_reduce`. See\n `tf.distribute.experimental.CommunicationOptions` for details.\n \"\"\"\n if communication_options is None:\n communication_options = collective_util.Options()\n super(CollectiveAllReduceStrategy, self).__init__(\n CollectiveAllReduceExtended(\n self,\n cluster_resolver=cluster_resolver,\n communication_options=communication_options))\n\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended._num_gpus_per_worker)\n\n @classmethod\n def _from_local_devices(cls, devices, communication_options=None):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication_options=communication_options)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n @property\n def cluster_resolver(self):\n \"\"\"Returns the cluster resolver associated with this strategy.\n\n As a multi-worker strategy, `tf.distribute.MultiWorkerMirroredStrategy`\n provides the associated `tf.distribute.cluster_resolver.ClusterResolver`. If\n the user provides one in `__init__`, that instance is returned; if the user\n does not, a default `TFConfigClusterResolver` is provided.\n \"\"\"\n return self.extended._cluster_resolver # pylint: disable=protected-access\n\n\nclass _CollectiveAllReduceStrategyExperimentalMeta(type):\n\n @classmethod\n def __instancecheck__(cls, instance):\n # This is to make isinstance(tf.distribute.MultiWorkerMirroredStrategy(),\n # tf.distribute.experimental.MultiWorkerMirroredStrategy). Some libraries is\n # performing such check.\n return isinstance(instance, CollectiveAllReduceStrategy)\n\n\n@tf_export(\"distribute.experimental.MultiWorkerMirroredStrategy\", v1=[])\nclass _CollectiveAllReduceStrategyExperimental(\n CollectiveAllReduceStrategy,\n metaclass=_CollectiveAllReduceStrategyExperimentalMeta):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n @deprecation.deprecated(\n None, \"use distribute.MultiWorkerMirroredStrategy instead\")\n def __init__(self,\n communication=collective_util.CommunicationImplementation.AUTO,\n cluster_resolver=None):\n \"\"\"Creates the strategy.\n\n Args:\n communication: optional\n `tf.distribute.experimental.CommunicationImplementation`. This is a hint\n on the preferred collective communication implementation. Possible\n values include `AUTO`, `RING`, and `NCCL`.\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n \"\"\"\n communication_options = collective_util.Options(\n implementation=communication)\n super(_CollectiveAllReduceStrategyExperimental,\n self).__init__(cluster_resolver, communication_options)\n\n @classmethod\n def _from_local_devices(\n cls,\n devices,\n communication=collective_util.CommunicationImplementation.AUTO):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n\n_CollectiveAllReduceStrategyExperimental.__name__ = CollectiveAllReduceStrategy.__name__\n\n\n@tf_export(v1=[\"distribute.experimental.MultiWorkerMirroredStrategy\"]) # pylint: disable=missing-docstring\nclass CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n # The starting number for collective keys. This should only be set in tests.\n _collective_key_base = 0\n\n def __init__(self,\n communication=collective_util.CommunicationImplementation.AUTO,\n cluster_resolver=None):\n \"\"\"Initializes the object.\"\"\"\n communication_options = collective_util.Options(\n implementation=communication)\n super(CollectiveAllReduceStrategyV1, self).__init__(\n CollectiveAllReduceExtended(\n self,\n cluster_resolver=cluster_resolver,\n communication_options=communication_options))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_gpu_per_worker\").set(self.extended._num_gpus_per_worker)\n\n\nclass CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):\n \"\"\"Implementation of CollectiveAllReduceStrategy.\"\"\"\n\n # Whether to perdically check the health of the cluster. If any worker is not\n # reachable, collectives are aborted and the user program should get a\n # tf.errors.UnavailableError. It's required to restart in order to recover.\n _enable_check_health = True\n # Check health interval in seconds.\n _check_health_interval = 30\n # Timeout in seconds for the first check health. The first check health needs\n # to wait for cluster, which may make a longer time.\n _check_health_initial_timeout = 0\n # Times to retry before considering the peer is down.\n _check_health_retry_limit = 3\n # Timeout in seconds the each check health.\n _check_health_timeout = 10\n\n def __init__(self, container_strategy, cluster_resolver,\n communication_options):\n if not isinstance(communication_options, collective_util.Options):\n raise ValueError(\"communication_options must be an instance of \"\n \"tf.distribute.experimental.CommunicationOptions\")\n self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()\n if not isinstance(self._cluster_resolver, ClusterResolver):\n raise ValueError(\"cluster_resolver must be an instance of \"\n \"tf.distribute.cluster_resolver.ClusterResolver\")\n distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)\n self._communication_options = communication_options\n self._collective_key_base = container_strategy._collective_key_base # pylint: disable=protected-access\n self._initialize_strategy(self._cluster_resolver)\n self._cfer_fn_cache = weakref.WeakKeyDictionary()\n self.experimental_enable_get_next_as_optional = True\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n def _use_merge_call(self):\n \"\"\"XLA is not supported for multi-worker strategy.\"\"\"\n return True\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_local(self, cluster_resolver, devices=None):\n \"\"\"Initializes the object for local training.\"\"\"\n self._is_chief = True\n self._num_workers = 1\n\n if ops.executing_eagerly_outside_functions():\n try:\n context.context().configure_collective_ops(\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",))\n except RuntimeError:\n logging.warning(\"Collective ops is not configured at program startup. \"\n \"Some performance features may not be enabled.\")\n self._collective_ops_configured = True\n\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if devices:\n local_devices = devices\n else:\n if num_gpus:\n local_devices = tuple(\"/device:GPU:%d\" % i for i in range(num_gpus))\n else:\n local_devices = (\"/device:CPU:0\",)\n\n self._worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n self._collective_keys = cross_device_utils.CollectiveKeys(\n group_key_start=1 + self._collective_key_base)\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices),\n collective_keys=self._collective_keys)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys)\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n self._id_in_cluster = 0\n\n # This is a mark to tell whether we are running with standalone client or\n # independent worker. Right now with standalone client, strategy object is\n # created as local strategy and then turn into multi-worker strategy via\n # configure call.\n self._local_or_standalone_client_mode = True\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\n \"Single-worker MultiWorkerMirroredStrategy with local_devices \"\n \"= %r, communication = %s\", local_devices,\n self._communication_options.implementation)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_resolver.cluster_spec())\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if task_type is None or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`.\")\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n self._id_in_cluster = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n\n self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)\n if not self._num_workers:\n raise ValueError(\"No `worker`, `chief` or `evaluator` tasks can be found \"\n \"in `cluster_spec`.\")\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n\n self._worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n if (ops.executing_eagerly_outside_functions() and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n context.context().configure_collective_ops(\n collective_leader=multi_worker_util.collective_leader(\n cluster_spec, task_type, task_id),\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n device_filters=(\"/job:%s/task:%d\" % (task_type, task_id),))\n self._collective_ops_configured = True\n\n # Starting a std server in eager mode and in independent worker mode.\n if (context.executing_eagerly() and\n not getattr(self, \"_std_server_started\", False) and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n # Checking _local_or_standalone_client_mode as well because we should not\n # create the std server in standalone client mode.\n config_proto = copy.deepcopy(context.context().config)\n config_proto = self._update_config_proto(config_proto)\n\n # If coordination service is enabled, use its internal heartbeat to detect\n # peer failures instead of the Python-level health check.\n if config_proto.experimental.coordination_service:\n self._enable_check_health = False\n\n if hasattr(cluster_resolver, \"port\"):\n port = cluster_resolver.port\n else:\n port = 0\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_spec.as_cluster_def(),\n default_session_config=config_proto,\n job_name=task_type,\n task_index=task_id,\n protocol=cluster_resolver.rpc_layer or \"grpc\",\n port=port)\n context.context().enable_collective_ops(server_def)\n self._std_server_started = True\n # The `ensure_initialized` is needed before calling\n # `context.context().devices()`.\n context.context().ensure_initialized()\n logging.info(\n \"Enabled multi-worker collective ops with available devices: %r\",\n context.context().devices())\n\n # TODO(yuefengz): The `num_gpus` is only for this particular task. It\n # assumes all workers have the same number of GPUs. We should remove this\n # assumption by querying all tasks for their numbers of GPUs.\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if num_gpus:\n local_devices = tuple(\"%s/device:GPU:%d\" % (self._worker_device, i)\n for i in range(num_gpus))\n else:\n local_devices = (self._worker_device,)\n\n self._collective_keys = cross_device_utils.CollectiveKeys(\n group_key_start=1 + self._collective_key_base)\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices) * self._num_workers,\n collective_keys=self._collective_keys)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys)\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = \"/job:%s/task:%d\" % (task_type, task_id)\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n if self._enable_check_health and context.executing_eagerly():\n self._start_check_health_thread()\n else:\n logging.info(\"Check health not enabled.\")\n\n logging.info(\n \"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, \"\n \"task_id = %r, num_workers = %r, local_devices = %r, \"\n \"communication = %s\", cluster_spec.as_dict(), task_type, task_id,\n self._num_workers, local_devices,\n self._communication_options.implementation)\n\n def __del__(self):\n self._stop_check_health_thread()\n\n def _input_workers_with_options(self, options=None):\n host_device = device_util.get_host_for_device(self._worker_device)\n if not options or options.experimental_fetch_to_device:\n return input_lib.InputWorkers([(host_device, self.worker_devices)])\n else:\n return input_lib.InputWorkers([(\n host_device,\n [device_util.get_host_for_device(worker) for worker in\n self.worker_devices])])\n\n @property\n def _input_workers(self):\n return self._input_workers_with_options()\n\n def _get_variable_creator_initial_value(self,\n replica_id,\n device,\n primary_var,\n **kwargs):\n if replica_id == 0: # First replica on each worker.\n assert device is not None\n assert primary_var is None\n\n def initial_value_fn(): # pylint: disable=g-missing-docstring\n # Only the first device participates in the broadcast of initial values.\n group_key = self._collective_keys.get_group_key([device])\n group_size = self._num_workers\n collective_instance_key = (\n self._collective_keys.get_instance_key(group_key, device))\n\n with ops.device(device):\n initial_value = kwargs[\"initial_value\"]\n if callable(initial_value):\n initial_value = initial_value()\n if isinstance(initial_value, base.CheckpointInitialValue):\n initial_value = initial_value.wrapped_value\n assert not callable(initial_value)\n initial_value = ops.convert_to_tensor(\n initial_value, dtype=kwargs.get(\"dtype\", None))\n\n if self._num_workers > 1:\n if self._is_chief:\n bcast_send = collective_ops.broadcast_send(\n initial_value, initial_value.shape, initial_value.dtype,\n group_size, group_key, collective_instance_key)\n with ops.control_dependencies([bcast_send]):\n return array_ops.identity(initial_value)\n else:\n return collective_ops.broadcast_recv(initial_value.shape,\n initial_value.dtype,\n group_size, group_key,\n collective_instance_key)\n return initial_value\n\n return initial_value_fn\n else:\n return super(CollectiveAllReduceExtended,\n self)._get_variable_creator_initial_value(\n replica_id=replica_id,\n device=device,\n primary_var=primary_var,\n **kwargs)\n\n def _make_input_context(self):\n input_context = distribute_lib.InputContext(\n num_input_pipelines=self._num_workers,\n input_pipeline_id=self._id_in_cluster,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_context\n\n def _experimental_distribute_dataset(self, dataset, options):\n if (options and options.experimental_replication_mode ==\n distribute_lib.InputReplicationMode.PER_REPLICA):\n raise NotImplementedError(\n \"InputReplicationMode.PER_REPLICA \"\n \"is only supported in \"\n \"`distribute_datasets_from_function` \"\n \"of tf.distribute.MirroredStrategy\"\n )\n input_context = self._make_input_context()\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers_with_options(options),\n self._container_strategy(),\n num_replicas_in_sync=self._num_replicas_in_sync,\n input_context=input_context,\n options=options)\n\n def _distribute_datasets_from_function(self, dataset_fn, options):\n if (options and options.experimental_replication_mode ==\n distribute_lib.InputReplicationMode.PER_REPLICA):\n raise NotImplementedError(\n \"InputReplicationMode.PER_REPLICA \"\n \"is only supported in \"\n \"`distribute_datasets_from_function` \"\n \"of tf.distribute.MirroredStrategy\")\n input_context = self._make_input_context()\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn=dataset_fn,\n input_workers=self._input_workers_with_options(options),\n input_contexts=[input_context],\n strategy=self._container_strategy(),\n options=options)\n\n def _experimental_distribute_values_from_function(self, value_fn):\n per_replica_values = []\n num_local_replicas = len(self.worker_devices)\n for local_replica_id in range(num_local_replicas):\n replica_id = (self._id_in_cluster * num_local_replicas +\n local_replica_id)\n value_context = distribute_lib.ValueContext(\n replica_id, self._num_replicas_in_sync)\n per_replica_values.append(value_fn(value_context))\n return distribute_utils.regroup(per_replica_values, always_wrap=True)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n num_replicas_in_sync=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the input function to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context],\n self._container_strategy())\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the object.\n\n Args:\n session_config: a `tf.compat.v1.ConfigProto`\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type, such as \"worker\".\n task_id: the current task id.\n\n Raises:\n ValueError: if `task_type` is not in the `cluster_spec`.\n \"\"\"\n if cluster_spec:\n # Use the num_gpus_per_worker recorded in constructor since _configure\n # doesn't take num_gpus.\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\"GPU\": self._num_gpus_per_worker},\n rpc_layer=self._rpc_layer)\n self._initialize_multi_worker(cluster_resolver)\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n # Enable the scoped allocator optimization for CollectiveOps. This\n # optimization converts many small all-reduces into fewer larger\n # all-reduces.\n rewrite_options = updated_config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n # We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =\n # [\"CollectiveReduce\"]. Since we can't assign to a repeated proto field, we\n # clear and then append.\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n rewrite_options.scoped_allocator_opts.enable_op.append(\"CollectiveReduce\")\n\n if (not ops.executing_eagerly_outside_functions() and\n self._communication_options.implementation ==\n collective_util.CommunicationImplementation.NCCL):\n updated_config.experimental.collective_nccl = True\n\n if not self._cluster_spec:\n return updated_config\n\n assert self._task_type\n assert self._task_id is not None\n\n # Collective group leader is needed for collective ops to coordinate\n # workers.\n updated_config.experimental.collective_group_leader = (\n multi_worker_util.collective_leader(self._cluster_spec, self._task_type,\n self._task_id))\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n\n return updated_config\n\n def _get_cross_device_ops(self, value):\n # CollectiveAllReduce works on a predefined set of devices. In most cases\n # they should be the compute devices, but certain use cases may reduce host\n # tensors as well (e.g. early stopping). We infer the cross_device_ops to\n # use based on the number of devices, since inputs don't always have device\n # annotations. The compute devices one is preferred since we can potentially\n # leverage NCCL.\n if isinstance(value, values.DistributedValues):\n num_devices = len(value._values) # pylint: disable=protected-access\n else:\n num_devices = 1\n if num_devices == len(self.worker_devices):\n return self._cross_device_ops\n else:\n return self._host_cross_device_ops\n\n def _gather_to_implementation(self, value, destinations, axis, options):\n return self._get_cross_device_ops(value)._gather( # pylint: disable=protected-access\n value,\n destinations=destinations,\n axis=axis,\n options=options)\n\n def _reduce_to(self, reduce_op, value, destinations, options):\n if (isinstance(value, values.Mirrored) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not isinstance(value, values.Mirrored)\n\n if (isinstance(value, values.DistributedValues) and\n len(self.worker_devices) == 1):\n value = value.values[0]\n\n # When there are multiple workers, we need to reduce across workers using\n # collective ops.\n if (not isinstance(value, values.DistributedValues) and\n self._num_workers == 1):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, value, destinations, len(self.worker_devices))\n return self._get_cross_device_ops(value).reduce(\n reduce_op,\n value,\n destinations=destinations,\n options=self._communication_options.merge(options))\n\n def _replica_ctx_all_reduce(self, reduce_op, value, options=None):\n \"\"\"Implements `StrategyExtendedV2._replica_ctx_all_reduce`.\"\"\"\n # This implementation avoids using `merge_call` and just launches collective\n # ops in one replica.\n if options is None:\n options = collective_util.Options()\n\n if context.executing_eagerly():\n # In eager mode, falls back to the default implemenation that uses\n # `merge_call`. Replica functions are running sequentially in eager mode,\n # and due to the blocking nature of collective ops, execution will hang if\n # collective ops are to be launched sequentially.\n return super()._replica_ctx_all_reduce(reduce_op, value, options)\n\n replica_context = ds_context.get_replica_context()\n assert replica_context, (\n \"`StrategyExtended._replica_ctx_all_reduce` must be called in a \"\n \"replica context\")\n return self._cross_device_ops._all_reduce( # pylint: disable=protected-access\n reduce_op,\n value,\n replica_context._replica_id, # pylint: disable=protected-access\n options)\n\n def _check_health(self):\n while True:\n if self._check_health_thread_should_stop.is_set():\n return\n for job in self._cluster_spec.jobs:\n for task_id in range(self._cluster_spec.num_tasks(job)):\n peer = \"/job:{}/replica:0/task:{}\".format(job, task_id)\n attempts = 0\n while True:\n attempts += 1\n try:\n context.context().check_collective_ops_peer_health(\n peer, timeout_in_ms=self._check_health_timeout * 1000)\n # If check_collective_ops_peer_health doesn't raise an Exception,\n # the peer is healthy.\n break\n except (errors.UnavailableError, errors.FailedPreconditionError,\n errors.DeadlineExceededError) as e:\n # TODO(b/151232436): Always raise UnavailableError when a peer\n # fails. Now there could be many kinds of errors:\n # - Unavailable: when the peer is not reachable, e.g. it's down.\n # - FailedPrecondition: when the peer has restarted.\n if attempts < self._check_health_retry_limit:\n logging.warning(\"%s seems down, retrying %d/%d\", peer, attempts,\n self._check_health_retry_limit)\n continue\n logging.error(\n \"Cluster check alive failed, %s is down, \"\n \"aborting collectives: %s\", peer, e)\n context.context().abort_collective_ops(\n errors.UNAVAILABLE,\n \"cluster check alive failed, {} is down\".format(peer))\n return\n except Exception as e: # pylint: disable=broad-except\n logging.error(\"Unexpected exception in check alive: %s\", e)\n context.context().abort_collective_ops(\n errors.INTERNAL,\n \"unexecpted exception in check alive: %s\" % e)\n return\n time.sleep(self._check_health_interval)\n\n def _start_check_health_thread(self):\n # Use a dummy all-reduce as a barrier to wait for all workers to be up,\n # otherwise the check health may fail immediately.\n\n # Use array_ops.identity to create the dummy tensor so that we have a new\n # Tensor. If we use constant it may be a cached from on a /job:localhost\n # device, which will cause some code that relies on tensor.device to error.\n #\n # TODO(b/151232436): change to an explicit barrier if we have it.\n dummy_value = array_ops.identity([])\n logging.info(\"Waiting for the cluster, timeout = %s\",\n self._check_health_initial_timeout or \"inf\")\n try:\n self._host_cross_device_ops.reduce(\n reduce_util.ReduceOp.SUM,\n dummy_value,\n dummy_value,\n options=collective_util.Options(\n timeout_seconds=self._check_health_initial_timeout,\n implementation=collective_util.CommunicationImplementation.RING))\n if context.is_async():\n context.async_wait()\n except errors.DeadlineExceededError:\n raise RuntimeError(\n \"Timeout waiting for the cluster, timeout is %d seconds\" %\n self._check_health_initial_timeout)\n logging.info(\"Cluster is ready.\")\n self._check_health_thread_should_stop = threading.Event()\n # Start the thread as daemon to avoid it blocking the program from exiting.\n # We try best to shutdown the thread but __del__ is not guaranteed to be\n # called when program exists.\n self._check_health_thread = threading.Thread(\n target=self._check_health,\n daemon=True)\n self._check_health_thread.start()\n\n def _stop_check_health_thread(self):\n if getattr(self, \"_check_health_thread\", None):\n logging.info(\"stopping check health thread\")\n self._check_health_thread_should_stop.set()\n self._check_health_thread.join()\n self._check_health_thread = None\n logging.info(\"check health thread stopped\")\n\n def _warn_nccl_no_gpu(self):\n if ((self._communication_options.implementation ==\n collective_util.CommunicationImplementation.NCCL) and\n self._num_gpus_per_worker == 0):\n logging.warning(\"Enabled NCCL communication but no GPUs detected/\"\n \"specified.\")\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return self._num_workers > 1\n\n @property\n def experimental_between_graph(self):\n return True\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n @property\n def _num_replicas_in_sync(self):\n return len(self.worker_devices) * self._num_workers\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n\n def _get_replica_id_in_sync_group(self, replica_id):\n return self._id_in_cluster * len(self.worker_devices) + replica_id\n\n def _get_local_replica_id(self, replica_id_in_sync_group):\n return (replica_id_in_sync_group -\n self._id_in_cluster * len(self.worker_devices))\n\n def __deepcopy__(self, memo):\n # We check the check health thread instead of whether we are in eager mode\n # to limit the backward incompatibility.\n if hasattr(self, \"_check_health_thread\"):\n raise ValueError(\n \"MultiWorkerMirroredStrategy cannot be deep copied in eager mode. \"\n \"If you're using Estimator and see this error message, call \"\n \"tf.compat.v1.disable_eager_execution() at the beginning of your \"\n \"program\")\n # Otherwise, do a regular deepcopy.\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, copy.deepcopy(v, memo))\n return result\n"
] | [
[
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.distribute.multi_worker_util.collective_leader",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.distribute.multi_worker_util.is_chief",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.distribute.distribute_lib.StrategyExtendedV1.__init__",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.distribute.input_lib.InputWorkers",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.ops.collective_ops.broadcast_recv",
"tensorflow.python.distribute.distribute_utils.regroup",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell",
"tensorflow.python.distribute.cross_device_utils.CollectiveKeys",
"tensorflow.python.ops.collective_ops.broadcast_send",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.context.async_wait",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.distribute.collective_util.Options",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.framework.ops.device",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.eager.context.is_async",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.distribute.distribute_lib.ValueContext",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions"
]
] |
SparkJiao/MERIt | [
"e887dd11bd2969345a5fb07c47d49bd0245e41e6"
] | [
"reclor_trainer_base_v2.py"
] | [
"# coding=utf-8\n#\n# Copyright 2020 Heinrich Heine University Duesseldorf\n#\n# Part of this code is based on the source code of BERT-DST\n# (arXiv:1907.03040)\n# Part of this code is based on the source code of Transformers\n# (arXiv:1910.03771)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport json\nimport logging\nimport os\nimport sys\nfrom typing import Dict, Union\n\nimport hydra\nimport numpy as np\nimport torch\nimport transformers\nfrom fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP\nfrom fairscale.nn.wrap.auto_wrap import auto_wrap\nfrom fairscale.optim.grad_scaler import ShardedGradScaler\nfrom omegaconf import DictConfig, OmegaConf\nfrom torch import distributed as dist\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm, trange\nfrom transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)\n\nfrom general_util.logger import setting_logger\nfrom general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer\n\nlogger: logging.Logger\n\n# transformers.logging.set_verbosity_error()\n\n\ndef save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):\n # Save model checkpoint.\n if cfg.local_rank != -1:\n state_dict = model.state_dict()\n if cfg.local_rank == 0:\n unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)\n else:\n model.save_pretrained(output_dir)\n\n # Save tokenizer and training args.\n if cfg.local_rank in [-1, 0]:\n if tokenizer is not None:\n tokenizer.save_pretrained(output_dir)\n OmegaConf.save(cfg, os.path.join(output_dir, \"training_config.yaml\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n\ndef forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):\n if cfg.fp16:\n with torch.cuda.amp.autocast():\n outputs = model(**inputs)\n loss = outputs[\"loss\"] # model outputs are always tuple in transformers (see doc)\n else:\n outputs = model(**inputs)\n loss = outputs[\"loss\"] # model outputs are always tuple in pytorch-transformers (see doc)\n\n if cfg.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if cfg.gradient_accumulation_steps > 1:\n loss = loss / cfg.gradient_accumulation_steps\n\n if cfg.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n return loss.item()\n\n\ndef train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):\n \"\"\" Train the model \"\"\"\n if cfg.local_rank in [-1, 0]:\n _dir_splits = cfg.output_dir.split('/')\n _log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])\n tb_writer = SummaryWriter(log_dir=_log_dir)\n else:\n tb_writer = None\n\n cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)\n train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)\n train_collator = hydra.utils.instantiate(cfg.collator) if \"collator\" in cfg and cfg.collator else None\n train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,\n collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,\n prefetch_factor=cfg.prefetch_factor)\n\n if \"extended_vocab\" in cfg and cfg.extended_vocab:\n logger.info(f\"Extended extra vocab size: {cfg.extended_vocab}\")\n model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)\n\n if cfg.max_steps > 0:\n t_total = cfg.max_steps\n cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs\n\n num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps\n\n optimizer = scheduler = None\n # Prepare optimizer and schedule (linear warmup and decay)\n if cfg.local_rank == -1:\n no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': cfg.weight_decay\n },\n {\n 'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': 0.0\n }\n ]\n optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)\n\n if cfg.fp16:\n if cfg.local_rank != -1:\n scaler = ShardedGradScaler()\n else:\n from torch.cuda.amp.grad_scaler import GradScaler\n\n scaler = GradScaler()\n else:\n scaler = None\n\n # multi-gpu training (should be after apex fp16 initialization)\n model_single_gpu = model\n if cfg.n_gpu > 1:\n model = torch.nn.DataParallel(model_single_gpu)\n\n # Distributed training (should be after apex fp16 initialization)\n if cfg.local_rank != -1:\n model = auto_wrap(model)\n model = FullyShardedDDP(model,\n mixed_precision=cfg.fp16,\n flatten_parameters=getattr(cfg, \"flatten_parameters\", True),\n reshard_after_forward=cfg.reshard_after_forward,\n move_grads_to_cpu=cfg.move_grads_to_cpu,\n move_params_to_cpu=cfg.move_params_to_cpu)\n if not cfg.move_params_to_cpu:\n model = model.to(cfg.device)\n\n no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': cfg.weight_decay\n },\n {\n 'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': 0.0\n }\n ]\n optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)\n\n logger.info(optimizer)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", cfg.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", cfg.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", cfg.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n logger.info(\" Warmup steps = %d\", num_warmup_steps)\n\n if continue_from_global_step > 0:\n logger.info(\"Fast forwarding to global step %d to resume training from latest checkpoint...\", continue_from_global_step)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(cfg.num_train_epochs), desc=\"Epoch\", disable=cfg.local_rank not in [-1, 0])\n set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)\n\n for epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)\n if cfg.local_rank != -1:\n train_dataloader.sampler.set_epoch(epoch)\n\n for step, batch in enumerate(epoch_iterator):\n # If training is continued from a checkpoint, fast forward\n # to the state of that checkpoint.\n if global_step < continue_from_global_step:\n if (step + 1) % cfg.gradient_accumulation_steps == 0:\n scheduler.step() # Update learning rate schedule\n global_step += 1\n continue\n\n model.train()\n batch = batch_to_device(batch, cfg.device)\n\n if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n loss = forward_step(model, batch, cfg, scaler)\n else:\n loss = forward_step(model, batch, cfg, scaler)\n\n tr_loss += loss\n if (step + 1) % cfg.gradient_accumulation_steps == 0:\n if cfg.fp16:\n scaler.unscale_(optimizer)\n\n if cfg.max_grad_norm:\n if hasattr(optimizer, \"clip_grad_norm\"):\n optimizer.clip_grad_norm(cfg.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n model.clip_grad_norm_(cfg.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)\n\n if cfg.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n\n scheduler.step() # Update learning rate schedule\n model.zero_grad(set_to_none=True)\n global_step += 1\n\n # Log metrics\n if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)\n logging_loss = tr_loss\n\n # Save model checkpoint\n if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:\n output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))\n if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n save_model(model, cfg, output_dir, tokenizer)\n\n # Evaluation\n if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:\n state_dict = model.state_dict()\n\n if cfg.local_rank in [-1, 0]:\n results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split=\"dev\")\n for key, value in results.items():\n tb_writer.add_scalar(f\"eval/{key}\", value, global_step)\n\n sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))\n flag = note_best_checkpoint(cfg, results, sub_path)\n if cfg.save_best and flag:\n if cfg.local_rank == 0:\n unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)\n else:\n model.save_pretrained(cfg.output_dir)\n\n tokenizer.save_pretrained(cfg.output_dir)\n OmegaConf.save(cfg, os.path.join(cfg.output_dir, \"training_config.yaml\"))\n logger.info(\"Saving best model checkpoint to %s\", cfg.output_dir)\n\n if 0 < cfg.max_steps < global_step:\n epoch_iterator.close()\n break\n\n if 0 < cfg.max_steps < global_step:\n train_iterator.close()\n break\n\n if cfg.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix=\"\", _split=\"dev\"):\n dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)\n\n if not os.path.exists(os.path.join(cfg.output_dir, prefix)):\n os.makedirs(os.path.join(cfg.output_dir, prefix))\n\n cfg.eval_batch_size = cfg.per_gpu_eval_batch_size\n eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly\n eval_collator = hydra.utils.instantiate(cfg.collator) if \"collator\" in cfg and cfg.collator else None\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,\n collate_fn=eval_collator)\n single_model_gpu = unwrap_model(model)\n single_model_gpu.get_eval_log(reset=True)\n # Eval!\n torch.cuda.empty_cache()\n logger.info(\"***** Running evaluation {}.{} *****\".format(_split, prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", cfg.eval_batch_size)\n # Seems FSDP does not need to unwrap the model for evaluating.\n model.eval()\n pred_list = []\n prob_list = []\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\", dynamic_ncols=True):\n batch = batch_to_device(batch, cfg.device)\n with torch.cuda.amp.autocast():\n with torch.no_grad():\n outputs = model(**batch)\n probs = outputs[\"logits\"].softmax(dim=-1).detach().float().cpu()\n prob, pred = probs.max(dim=-1)\n pred_list.extend(pred.tolist())\n prob_list.extend(prob.tolist())\n\n metric_log, results = single_model_gpu.get_eval_log(reset=True)\n logger.info(\"****** Evaluation Results ******\")\n logger.info(f\"Global Steps: {prefix}\")\n logger.info(metric_log)\n\n prediction_file = os.path.join(cfg.output_dir, prefix, \"eval_predictions.npy\")\n np.save(prediction_file, pred_list)\n json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, \"eval_probs.json\"), \"w\"))\n\n return results\n\n\ndef load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split=\"train\"):\n if cfg.local_rank not in [-1, 0] and _split == \"train\":\n dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n if _split == \"train\":\n input_file = cfg.train_file\n elif _split == \"dev\":\n input_file = cfg.dev_file\n elif _split == \"test\":\n input_file = cfg.test_file\n else:\n raise RuntimeError(_split)\n\n examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)\n\n if cfg.local_rank == 0 and _split == \"train\":\n dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n dataset = TensorDataset(*tensors)\n\n return dataset, features\n\n\[email protected](config_path=\"conf\", config_name=\"config\")\ndef main(cfg: DictConfig):\n if cfg.local_rank == -1 or cfg.no_cuda:\n device = str(torch.device(\"cuda\" if torch.cuda.is_available() and not cfg.no_cuda else \"cpu\"))\n cfg.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs\n torch.cuda.set_device(cfg.local_rank)\n device = str(torch.device(\"cuda\", cfg.local_rank))\n dist.init_process_group(backend='nccl')\n cfg.n_gpu = 1\n cfg.world_size = dist.get_world_size()\n cfg.device = device\n\n global logger\n logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)\n\n # Set seed\n set_seed(cfg)\n\n # Load pre-trained model and tokenizer\n if cfg.local_rank not in [-1, 0]:\n dist.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n if cfg.pretrain:\n pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')\n else:\n pretrain_state_dict = None\n\n tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)\n model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)\n\n if cfg.local_rank == 0:\n dist.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.\n model.to(cfg.device)\n\n # logger.info(\"Training/evaluation parameters %s\", OmegaConf.to_yaml(cfg))\n if cfg.local_rank in [-1, 0] and cfg.do_train:\n if not os.path.exists(cfg.output_dir):\n os.makedirs(cfg.output_dir)\n OmegaConf.save(cfg, os.path.join(cfg.output_dir, \"training_config.yaml\"))\n\n # Training\n if cfg.do_train:\n # TODO: Add option for continuously training from checkpoint.\n # The operation should be introduced in ``train`` method since both the state dict\n # of schedule and optimizer (and scaler, if any) should be loaded.\n # If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)\n continue_from_global_step = 0 # If set to 0, start training from the beginning\n # if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n # checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))\n # if len(checkpoints) > 0:\n # checkpoint = checkpoints[-1]\n # logger.info(\"Resuming training from the latest checkpoint: %s\", checkpoint)\n # continue_from_global_step = int(checkpoint.split('-')[-1])\n # model = model_class.from_pretrained(checkpoint)\n # model.to(args.device)\n\n train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split=\"train\")\n global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Test\n results = {}\n if cfg.do_eval and cfg.local_rank in [-1, 0]:\n checkpoints = [cfg.output_dir]\n if cfg.save_best:\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):\n checkpoints = [cfg.prediction_cfg.best_checkpoint]\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n elif cfg.eval_sub_path:\n checkpoints = list(\n os.path.dirname(c) for c in\n sorted(glob.glob(cfg.output_dir + f\"/{cfg.eval_sub_path}/\" + \"pytorch_model.bin\", recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\" the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n split = \"dev\"\n\n model = hydra.utils.call(cfg.model, checkpoint)\n model.to(device)\n\n if cfg.test_file:\n prefix = f'test' + (f'-{prefix}' if prefix != \"\" else \"\")\n split = \"test\"\n\n result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n hydra_formatted_args = []\n # convert the cli params added by torch.distributed.launch into Hydra format\n for arg in sys.argv:\n if arg.startswith(\"--\"):\n hydra_formatted_args.append(arg[len(\"--\"):])\n else:\n hydra_formatted_args.append(arg)\n sys.argv = hydra_formatted_args\n\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.save",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.empty_cache",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.utils.data.RandomSampler",
"torch.device",
"torch.cuda.set_device",
"torch.distributed.get_world_size",
"torch.load",
"torch.cuda.amp.grad_scaler.GradScaler",
"torch.utils.data.SequentialSampler",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.amp.autocast",
"torch.utils.tensorboard.SummaryWriter"
]
] |
ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors | [
"4198bd2d325a32ffc4e714c486540e63440ab110"
] | [
"deblurring_celeba_algorithm_1.py"
] | [
"import tensorflow as tf\nimport keras.backend as K\nimport numpy as np\nfrom Utils import *\nfrom generators.MotionBlurGenerator import *\nfrom generators.CelebAGenerator import *\nK.set_learning_phase(0)\nfrom glob import glob\nimport os\n\n\n# paths\nOrig_Path = './results/CelebA/Original Images/*.png'\nRange_Path = './results/CelebA/Range Images/*.png'\nBlur_Path = './results/CelebA/Original Blurs/Test Blurs.npy'\n\n# constants\nREGULARIZORS = [0.01 , 0.01]\nRANDOM_RESTARTS = 10\nNOISE_STD = 0.01\nSTEPS = 10000\nIMAGE_RANGE = [-1,1]\n\ndef step_size(t):\n return 0.01 * np.exp( - t / 1000 )\n\nSAVE_PATH = './results/CelebA/deblurring - alg1 - ' +str(int(NOISE_STD*100)) + 'perc noise - ' +str(RANDOM_RESTARTS) + 'RR/deblurring_'\n# -----------------------------------------------------------------------\n\n# loading test blur images\nW = np.load(Blur_Path) \nBLUR_RES = W.shape[1]\n\n# loading test celeba images\nX_Orig = np.array([ imread(path) for path in glob(Orig_Path)])/255\nX_Range = np.array([ imread(path) for path in glob(Range_Path)])/255\nIMAGE_RES = X_Orig.shape[1]\nCHANNELS = X_Orig.shape[-1]\n\n# loading celeba generator\nCelebAGen = CelebAGenerator()\nCelebAGen.GenerateModel()\nCelebAGen.LoadWeights()\nCelebAGAN = CelebAGen.GetModels()\nceleba_latent_dim = CelebAGen.latent_dim\n\n# loading motion blur generator\nBLURGen = MotionBlur()\nBLURGen.GenerateModel()\nBLURGen.LoadWeights()\nblur_vae, blur_encoder, blur_decoder = BLURGen.GetModels()\nblur_latent_dim = BLURGen.latent_dim\n\n# check if save dir exists, if not create a new one\ntry:\n os.stat(SAVE_PATH[:-11])\nexcept:\n os.mkdir(SAVE_PATH[:-11])\n\n# generating blurry images from test\nY_np = []\nBlurry_Images = []\nfor i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Test-Blurry'):\n x_np = X_Orig[i]\n w_np = W[i]\n y_np, y_f = GenerateBlurry(x_np, w_np, noise_std = NOISE_STD )\n Y_np.append(y_np)\n for _ in range(RANDOM_RESTARTS):\n Blurry_Images.append(y_f)\n\nY_np = np.array(Y_np)\nBlurry_Images = np.array(Blurry_Images)\n\n# generating blurry images from range\nBlurry_Images_range = []\nY_np_range = []\nfor i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Range-Blurry'):\n y_np, y_f = GenerateBlurry(X_Range[i], W[i], noise_std = NOISE_STD )\n Y_np_range.append(y_np)\n for _ in range(RANDOM_RESTARTS):\n Blurry_Images_range.append(y_f)\n\nY_np_range = np.array(Y_np_range)\nBlurry_Images_range = np.array(Blurry_Images_range)\n\n\n# alternating gradient descent for test images\nimage_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0],\n reg = REGULARIZORS, image_range = IMAGE_RANGE,\n decoder = CelebAGAN, blur_decoder = blur_decoder,\n image_res = IMAGE_RES, blur_res = BLUR_RES,\n channels = CHANNELS)\nm_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS,\n image_grad = image_gradients , blur_grad = blur_gradients, \n getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)\nX_hat_test = []\nW_hat_test = []\nfor i in range(len(X_Orig)):\n m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,\n latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) \n X_hat_test.append(x_hat_test)\n W_hat_test.append(w_hat_test)\n\nX_hat_test = np.array(X_hat_test)\nW_hat_test = np.array(W_hat_test)\n\n# alternating gradient descent for range images\nm_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images_range, stepsize=step_size,steps = STEPS,\n image_grad = image_gradients , blur_grad = blur_gradients, \n getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)\nX_hat_range = []\nW_hat_range = []\nfor i in range(len(X_Orig)):\n m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n x_hat_range, w_hat_range, loss_last_iter_range = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,\n latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) \n X_hat_range.append(x_hat_range)\n W_hat_range.append(w_hat_range)\n\nX_hat_range = np.array(X_hat_range)\nW_hat_range = np.array(W_hat_range)\n\nX_hat_test = (X_hat_test + 1)/2\nX_hat_range = (X_hat_range + 1)/2\nMax = 10**len(str(len(X_Orig)-1))\n\n# saving results\nfor i in range(len(X_Orig)):\n Save_Results(path = SAVE_PATH + str(i+Max)[1:], \n x_np = None, \n w_np = None,\n y_np = Y_np[i], \n y_np_range = Y_np_range[i] , \n x_hat_test = X_hat_test[i], \n w_hat_test = W_hat_test[i], \n x_range = None, \n x_hat_range = X_hat_range[i], \n w_hat_range = W_hat_range[i], clip=True)"
] | [
[
"numpy.array",
"numpy.load",
"numpy.exp"
]
] |
dmontemayor/CRPM | [
"e896831fad7bed42d17574b137e600fc5adbf6b0"
] | [
"crpm/pvalue.py"
] | [
"\"\"\" Calcualte p-values, ROC, AUC, and proportion of significant observations for\na set of observations given the null hypothesis distribution\n\n Args:\n variable: array of observed values\n hypothesis: optional null hypotheis distribution (beta distribution by default)\n alpha: optional significance parameter (.05 by default)\n Returns:\n pvalues: for every observation in variable\n ROC: on a grid of 1000 points\n AUC: integral of ROC\n proportion of significant observations\n\"\"\"\n\nimport numpy as np\n\ndef pvalue(variable=None, hypothesis=None, alpha=.05):\n \"\"\" calcualte pvalues, AUC and fraction of significant observations\n \"\"\"\n #set model\n if variable is None:\n variable = np.random.beta(a=3, b=5, size=5000)\n\n else:\n variable = np.array(variable)\n\n #set null-hypothesis\n if hypothesis is None:\n hypothesis = np.random.beta(a=5, b=5, size=1000)\n else:\n hypothesis = np.array(hypothesis)\n\n #calculate prob of left-tail event p(H<=x|H) for every instance of X\n prob = []\n for var in variable:\n prob.append((hypothesis <= var).sum())\n #normalize p\n prob = np.divide(prob, hypothesis.size)\n\n #scan alpha from 0 to 1 and find prob(p<=alpha)\n scanprob = []\n alphagrid = np.linspace(0, 1, num=1000)\n for val in alphagrid:\n #calculate prob p<=alpha\n scanprob.append((prob <= val).sum() / variable.size)\n\n return prob, scanprob, np.sum(prob) / alphagrid.size, (prob <= alpha).sum() /variable.size\n\ndef lefttailpvalue(variable=None, hypothesis=None):\n \"\"\" calcualte left-tail pvalues\n \"\"\"\n #set model\n if variable is None:\n variable = np.random.beta(a=3, b=5, size=5000)\n\n else:\n variable = np.array(variable)\n\n #set null-hypothesis\n if hypothesis is None:\n hypothesis = np.random.beta(a=5, b=5, size=1000)\n else:\n hypothesis = np.array(hypothesis)\n\n #calculate prob of left-tail event p(H<=x|H) for every instance of X\n prob = []\n for var in variable:\n prob.append((hypothesis <= var).sum())\n #normalize p\n prob = np.divide(prob, hypothesis.size)\n\n return prob\n\ndef righttailpvalue(variable=None, hypothesis=None):\n \"\"\" calcualte left-tail pvalues\n \"\"\"\n #set model\n if variable is None:\n variable = np.random.beta(a=3, b=5, size=5000)\n\n else:\n variable = np.array(variable)\n\n #set null-hypothesis\n if hypothesis is None:\n hypothesis = np.random.beta(a=5, b=5, size=1000)\n else:\n hypothesis = np.array(hypothesis)\n\n #calculate prob of right-tail event p(H>=x|H) for every instance of X\n prob = []\n for var in variable:\n prob.append((hypothesis >= var).sum())\n #normalize p\n prob = np.divide(prob, hypothesis.size)\n\n return prob\n"
] | [
[
"numpy.sum",
"numpy.divide",
"numpy.random.beta",
"numpy.array",
"numpy.linspace"
]
] |
tywang89/pyprobml | [
"82cfdcb8daea653cda8f77e8737e585418476ca7"
] | [
"book/linreg_poly_vs_degree.py"
] | [
"# Plot polynomial regression on 1d problem\n# Based on https://github.com/probml/pmtk3/blob/master/demos/linregPolyVsDegree.m\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyprobml_utils import save_fig\n\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import MinMaxScaler \nimport sklearn.metrics \nfrom sklearn.metrics import mean_squared_error as mse\n\ndef make_1dregression_data(n=21):\n np.random.seed(0)\n xtrain = np.linspace(0.0, 20, n)\n xtest = np.arange(0.0, 20, 0.1)\n sigma2 = 4\n w = np.array([-1.5, 1/9.])\n fun = lambda x: w[0]*x + w[1]*np.square(x)\n ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \\\n np.sqrt(sigma2)\n ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \\\n np.sqrt(sigma2)\n return xtrain, ytrain, xtest, ytest\n\nxtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)\n\n#Rescaling data\nscaler = MinMaxScaler(feature_range=(-1, 1))\nXtrain = scaler.fit_transform(xtrain.reshape(-1, 1))\nXtest = scaler.transform(xtest.reshape(-1, 1))\n\n\ndegs = np.arange(1, 21, 1)\nndegs = np.max(degs)\nmse_train = np.empty(ndegs)\nmse_test = np.empty(ndegs)\nytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)\nytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)\nfor deg in degs:\n model = LinearRegression()\n poly_features = PolynomialFeatures(degree=deg, include_bias=False)\n Xtrain_poly = poly_features.fit_transform(Xtrain)\n model.fit(Xtrain_poly, ytrain)\n ytrain_pred = model.predict(Xtrain_poly)\n ytrain_pred_stored[deg-1] = ytrain_pred\n Xtest_poly = poly_features.transform(Xtest)\n ytest_pred = model.predict(Xtest_poly)\n mse_train[deg-1] = mse(ytrain_pred, ytrain) \n mse_test[deg-1] = mse(ytest_pred, ytest)\n ytest_pred_stored[deg-1] = ytest_pred\n \n# Plot MSE vs degree\nfig, ax = plt.subplots()\nmask = degs <= 15\nax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test')\nax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train')\nax.legend(loc='upper right', shadow=True)\nplt.xlabel('degree')\nplt.ylabel('mse')\nsave_fig('polyfitVsDegree.pdf')\nplt.show()\n\n# Plot fitted functions\nchosen_degs = [1, 2, 14, 20]\nfor deg in chosen_degs:\n fig, ax = plt.subplots()\n ax.scatter(xtrain, ytrain)\n ax.plot(xtest, ytest_pred_stored[deg-1])\n ax.set_ylim((-10, 15))\n plt.title('degree {}'.format(deg))\n save_fig('polyfitDegree{}.pdf'.format(deg))\n plt.show()\n \n# Plot residuals\n#https://blog.minitab.com/blog/adventures-in-statistics-2/why-you-need-to-check-your-residual-plots-for-regression-analysis\nchosen_degs = [1, 2, 14, 20]\nfor deg in chosen_degs:\n fig, ax = plt.subplots()\n ypred = ytrain_pred_stored[deg-1]\n residuals = ytrain - ypred\n ax.plot(ypred, residuals, 'o')\n ax.set_xlabel('predicted y')\n ax.set_ylabel('residual')\n plt.title('degree {}. Predictions on the training set'.format(deg))\n save_fig('polyfitDegree{}Residuals.pdf'.format(deg))\n plt.show()\n\n\n# Plot fit vs actual\n# https://blog.minitab.com/blog/adventures-in-statistics-2/regression-analysis-how-do-i-interpret-r-squared-and-assess-the-goodness-of-fit \nchosen_degs = [1, 2, 14, 20]\nfor deg in chosen_degs:\n for train in [True, False]:\n if train:\n ytrue = ytrain\n ypred = ytrain_pred_stored[deg-1]\n dataset = 'Train'\n else:\n ytrue = ytest\n ypred = ytest_pred_stored[deg-1]\n dataset = 'Test'\n fig, ax = plt.subplots()\n ax.scatter(ytrue, ypred)\n ax.plot(ax.get_xlim(), ax.get_ylim(), ls=\"--\", c=\".3\")\n ax.set_xlabel('true y')\n ax.set_ylabel('predicted y')\n r2 = sklearn.metrics.r2_score(ytrue, ypred)\n plt.title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2))\n save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset))\n plt.show()"
] | [
[
"numpy.sqrt",
"numpy.empty",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.metrics.mean_squared_error",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.linear_model.LinearRegression",
"numpy.random.seed",
"numpy.random.normal",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.max",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.square",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
haymrpig/Pytorch_template | [
"9a0eda43b2da27807461b305ed42e1bd7c1341dd"
] | [
"baseline/utils/mainFunctions.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\nclass _BaseWrapper():\n def __init__(self, model):\n super().__init__()\n self.model = model\n self.handlers = []\n\n def forward(self, images):\n self.image_shape = images.shape[2:]\n print(self.image_shape)\n self.logits = self.model(images)\n self.probs = F.softmax(self.logits, dim=1)\n return self.probs.sort(dim=1, descending=True)\n\n def backward(self, ids):\n one_hot = F.one_hot(ids, self.logits.shape[-1])\n one_hot = one_hot.squeeze()\n self.model.zero_grad()\n self.logits.backward(gradient=one_hot, retain_graph=True)\n # gradient는 해당 index에 대해서만 미분을 통한 backpropagation을 하겠다는 의미이다. \n # 즉, 내가 확인하고 싶은 class에 대해서 featuremap이 얼마나 영향을 미쳤는지 확인할 수 있다. \n\n def generate(self):\n raise NotImplementedError\n\n\nclass GradCAM(_BaseWrapper):\n def __init__(self, model, layers=None):\n super().__init__(model)\n self.feature_map = {}\n self.grad_map = {}\n self.layers = layers\n\n def save_fmaps(key):\n def forward_hook(module, input, output):\n self.feature_map[key]=output.detach()\n\n return forward_hook\n\n def save_grads(key):\n def backward_hook(modeul, grad_in, grad_out):\n self.grad_map[key] = grad_out[0].detach()\n\n return backward_hook\n\n for name, module in self.model.named_modules():\n if self.layers is None or name in self.layers:\n self.handlers.append(module.register_forward_hook(save_fmaps(name)))\n self.handlers.append(module.register_backward_hook(save_grads(name)))\n\n def findLayers(self, layers, target_layer):\n if target_layer in layers.keys():\n return layers[target_layer]\n else:\n raise ValueError(f\"{target_layer} not exists\")\n\n def generate(self, target_layer):\n feature_maps = self.findLayers(self.feature_map, target_layer)\n grad_maps = self.findLayers(self.grad_map, target_layer)\n weights = F.adaptive_avg_pool2d(grad_maps, 1)\n grad_cam = torch.mul(feature_maps, weights).sum(dim=1, keepdim=True)\n grad_cam = F.relu(grad_cam)\n grad_cam = F.interpolate(grad_cam, self.image_shape, mode=\"bilinear\", align_corners=False)\n B, C, H, W = grad_cam.shape\n # C는 1인듯?\n\n grad_cam = grad_cam.view(B, -1)\n grad_cam -= grad_cam.min(dim=1, keepdim=True)[0]\n # 양수 만들어주려고 하는듯\n grad_cam /= grad_cam.max(dim=1, keepdim=True)[0]\n grad_cam = grad_cam.view(B, C, H, W)\n\n return grad_cam\n\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.functional.relu",
"torch.mul",
"torch.nn.functional.one_hot",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.interpolate"
]
] |
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | [
"7ee1b530db0eb536666dbc872fbf8200e53dd49b",
"7ee1b530db0eb536666dbc872fbf8200e53dd49b"
] | [
"hmc/tests/test_cox_poisson.py",
"hmc/applications/cox_poisson/prior.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom hmc.applications.cox_poisson import forward_transform, inverse_transform, generate_data, gaussian_posterior_factory, hyperparameter_posterior_factory\nfrom hmc.applications.cox_poisson.prior import log_prior, grad_log_prior, hess_log_prior, grad_hess_log_prior\n\n\nclass TestCoxPoisson(unittest.TestCase):\n def test_prior(self):\n def transformed_log_prior(qt):\n return log_prior(*inverse_transform(qt)[0])\n\n transformed_grad_log_prior = lambda qt: grad_log_prior(*qt)\n transformed_hess_log_prior = lambda qt: hess_log_prior(*qt)\n transformed_grad_hess_log_prior = lambda qt: grad_hess_log_prior(*qt)\n\n q = np.random.uniform(size=(2, ))\n qt, _ = forward_transform(q)\n\n delta = 1e-5\n\n u = np.random.normal(size=qt.shape)\n fd = (transformed_log_prior(qt + 0.5*delta*u) - transformed_log_prior(qt - 0.5*delta*u)) / delta\n dd = transformed_grad_log_prior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n fd = (transformed_grad_log_prior(qt + 0.5*delta*u) - transformed_grad_log_prior(qt - 0.5*delta*u)) / delta\n dd = transformed_hess_log_prior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n fd = (transformed_hess_log_prior(qt + 0.5*delta*u) - transformed_hess_log_prior(qt - 0.5*delta*u)) / delta\n dd = transformed_grad_hess_log_prior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n def test_gaussian_posterior(self):\n sigmasq, beta = np.random.uniform(size=(2, ))\n mu = np.log(126.0) - sigmasq / 2.0\n dist, x, y = generate_data(10, mu, beta, sigmasq)\n\n euclidean_auxiliaries, metric = gaussian_posterior_factory(dist, mu, sigmasq, beta, y)\n log_posterior = lambda x: euclidean_auxiliaries(x)[0]\n grad_log_posterior = lambda x: euclidean_auxiliaries(x)[1]\n delta = 1e-6\n\n u = np.random.normal(size=x.shape)\n fd = (log_posterior(x + 0.5*delta*u) - log_posterior(x - 0.5*delta*u)) / delta\n dd = grad_log_posterior(x)@u\n self.assertTrue(np.allclose(fd, dd))\n\n def test_hyperparameter_posterior(self):\n sigmasq, beta = np.random.uniform(size=(2, ))\n mu = np.log(126.0) - sigmasq / 2.0\n dist, x, y = generate_data(16, mu, beta, sigmasq)\n\n log_posterior, metric, _, euclidean_auxiliaries, riemannian_auxiliaries = hyperparameter_posterior_factory(dist, mu, x, y)\n\n grad_log_posterior = lambda qt: euclidean_auxiliaries(qt)[1]\n grad_metric = lambda qt: riemannian_auxiliaries(qt)[3]\n\n q = np.array([sigmasq, beta])\n qt, _ = forward_transform(q)\n\n delta = 1e-4\n u = np.random.normal(size=(2, ))\n fd = (log_posterior(qt + 0.5*delta*u) - log_posterior(qt - 0.5*delta*u)) / delta\n dd = grad_log_posterior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n fd = (metric(qt + 0.5*delta*u) - metric(qt - 0.5*delta*u)) / delta\n dd = grad_metric(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n",
"from typing import Tuple\n\nimport numpy as np\n\n\ndef gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Log-density of the Gamma distribution up to a constant factor.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The log-density of the Gamma distribution.\n\n \"\"\"\n return (k - 1.0)*np.log(x) - x / theta\n\ndef grad_gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Gradient of the log-density of the Gamma distribution.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The gradient of the log-density of the Gamma distribution.\n\n \"\"\"\n return (k - 1.0) / x - np.reciprocal(theta)\n\ndef hess_gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Hessian of the log-density of the Gamma distribution.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The Hessian of the log-density of the Gamma distribution.\n\n \"\"\"\n return -(k - 1.0) / np.square(x)\n\ndef grad_hess_gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Third-order derivatives of the log-density of the Gamma distribution.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The third-order derivative of the log-density of the Gamma\n distribution.\n\n \"\"\"\n return 2.0*(k - 1.0) / np.power(x, 3.0)\n\ndef log_prior(sigmasq: float, beta: float) -> float:\n \"\"\"The log-prior of the log-Gaussian Cox-Poisson model.\n\n Args:\n sigmasq: Amplitude of the Gaussian process kernel.\n beta: Length scale of the Gaussian process kernel.\n\n Returns:\n lp: The log-density of the prior distribution.\n\n \"\"\"\n lp = gamma_logpdf(beta, 2.0, 0.5)\n lp += gamma_logpdf(sigmasq, 2.0, 0.5)\n return lp\n\ndef grad_log_prior(phis: float, phib: float) -> Tuple[float]:\n \"\"\"Gradient of the log-prior with respect to the reparameterized model\n parameters that are unconstrained.\n\n Args:\n phis: Reparameterized aplitude of the Gaussian process kernel.\n phib: Reparameterized length scale of the Gaussian process kernel.\n\n Returns:\n out: The gradient of the log-prior with respect to the reparameterized\n model parameters.\n\n \"\"\"\n sigmasq = np.exp(phis)\n beta = np.exp(phib)\n dphis = grad_gamma_logpdf(sigmasq, 2.0, 0.5) * sigmasq\n dphib = grad_gamma_logpdf(beta, 2.0, 0.5) * beta\n return np.array((dphis, dphib))\n\ndef hess_log_prior(phis: float, phib: float) -> np.ndarray:\n \"\"\"Compute the hessian of the log-prior with respect to the reparameterized\n model parameters.\n\n Args:\n phis: Reparameterized aplitude of the Gaussian process kernel.\n phib: Reparameterized length scale of the Gaussian process kernel.\n\n Returns:\n H: The Hessian of the log-prior with respect to the reparameterized model\n parameters.\n\n \"\"\"\n sigmasq = np.exp(phis)\n beta = np.exp(phib)\n H = np.array([\n [grad_gamma_logpdf(sigmasq, 2.0, 0.5)*sigmasq + np.square(sigmasq)*hess_gamma_logpdf(sigmasq, 2.0, 0.5), 0.0],\n [0.0, grad_gamma_logpdf(beta, 2.0, 0.5)*beta + np.square(beta)*hess_gamma_logpdf(beta, 2.0, 0.5)]\n ])\n return H\n\ndef grad_hess_log_prior(phis: float, phib: float) -> np.ndarray:\n \"\"\"Compute the third-order derivatives of the log-prior with respect to the\n reparameterized model parameters.\n\n Args:\n phis: Reparameterized aplitude of the Gaussian process kernel.\n phib: Reparameterized length scale of the Gaussian process kernel.\n\n Returns:\n dH: The third-order derivatives of the log-prior.\n\n \"\"\"\n sigmasq = np.exp(phis)\n beta = np.exp(phib)\n dH = np.zeros((2, 2, 2))\n a = sigmasq*grad_gamma_logpdf(sigmasq, 2.0, 0.5)\n a += np.square(sigmasq)*hess_gamma_logpdf(sigmasq, 2.0, 0.5)\n a += 2.0*sigmasq*hess_gamma_logpdf(sigmasq, 2.0, 0.5)\n a += np.square(sigmasq)*grad_hess_gamma_logpdf(sigmasq, 2.0, 0.5)\n b = beta*grad_gamma_logpdf(beta, 2.0, 0.5)\n b += np.square(beta)*hess_gamma_logpdf(beta, 2.0, 0.5)\n b += 2.0*beta*hess_gamma_logpdf(beta, 2.0, 0.5)\n b += np.square(beta)*grad_hess_gamma_logpdf(beta, 2.0, 0.5)\n dH = np.array([\n [[a, 0.0], [0.0, 0.0]],\n [[0.0, 0.0], [0.0, b]]\n ])\n return dH\n"
] | [
[
"numpy.random.uniform",
"numpy.allclose",
"numpy.log",
"numpy.random.normal",
"numpy.array"
],
[
"numpy.zeros",
"numpy.exp",
"numpy.reciprocal",
"numpy.power",
"numpy.log",
"numpy.array",
"numpy.square"
]
] |
UF-f1tenth/F1tenth-UFL | [
"93b0a822c67b2b425664642955342138e65974f4"
] | [
"Object detection and depth estimation/catkin_ws/src/f110-fall2018-skeltons/labs/wall_following/scripts/utils/other.py"
] | [
"\"\"\"\nCreated on Fri Oct 29 18:54:18 2021\n\n@author: Krishna Nuthalapati\n\"\"\"\n\nimport numpy as np\n\ndef iou(boxA, boxB):\n\t# determine the (x, y)-coordinates of the intersection rectangle\n\txA = max(boxA[0], boxB[0])\n\tyA = max(boxA[1], boxB[1])\n\txB = min(boxA[2], boxB[2])\n\tyB = min(boxA[3], boxB[3])\n\t# compute the area of intersection rectangle\n\tinterArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\t# compute the area of both the prediction and ground-truth\n\t# rectangles\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n\tboxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\t# compute the intersection over union by taking the intersection\n\t# area and dividing it by the sum of prediction + ground-truth\n\t# areas - the interesection area\n\tiou_score = interArea / float(boxAArea + boxBArea - interArea)\n\t# return the intersection over union value\n\treturn iou_score\n\ndef nms(boxes, scores, thresh):\n num_boxes = boxes.shape[0]\n indices = np.zeros((num_boxes), dtype=int)\n # print(\"PRINTING : \", num_boxes)\n for i in range(num_boxes):\n if indices[i] == -1:\n continue\n for j in range(i+1, num_boxes):\n if indices[j] == -1:\n continue\n \n base_box = boxes[i]\n curr_box = boxes[j]\n iou_score = iou(base_box, curr_box)\n \n if iou_score >= thresh:\n if scores[i]>scores[j]:\n indices[i] = 1\n indices[j] = -1\n continue\n indices[j] = 1\n indices[i] = -1\n \n idxs = np.where(indices == 1)[0]\n \n return idxs\n"
] | [
[
"numpy.where",
"numpy.zeros"
]
] |
alstonlo/fgh-gnn | [
"099aee925a3c5077070803d31b6e45793972239c"
] | [
"fgh_gnn/data/graph_builder.py"
] | [
"import itertools\n\nimport dgl\nimport torch\nfrom rdkit import Chem\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree\n\nfrom fgh_gnn.utils import FGROUP_MOLS, get_ring_fragments, ogb_graph_to_mol\n\n\nclass FGroupHetGraphBuilder:\n\n def __init__(self, vocab):\n self.vocab = vocab\n\n self.fgroup_vocab = vocab.loc[vocab['type'] == 'fgroup']\n\n self.ring_vocab = vocab.loc[vocab['type'] == 'ring']\n self.ring_smiles_set = set(self.ring_vocab['name'].unique())\n self.misc_ring_idx = len(vocab) - 1\n\n def build_fgroup_heterograph(self, raw_graph):\n\n atom_feats = torch.from_numpy(raw_graph['node_feat'])\n bond_feats = torch.from_numpy(raw_graph['edge_feat'])\n a2a_edges = torch.from_numpy(raw_graph['edge_index'])\n\n # build tree\n mol = ogb_graph_to_mol(raw_graph)\n clusters = self._make_clusters(mol)\n cluster_feats = torch.tensor([c.features for c in clusters],\n dtype=torch.long)\n\n c2atom_edges, atom2c_edges = self._make_inter_edges(clusters)\n c2c_edges, overlap_feats = \\\n self._make_intracluster_edges(raw_graph, clusters)\n\n data_dict = {\n ('atom', 'bond', 'atom'): (a2a_edges[0], a2a_edges[1]),\n ('cluster', 'refine', 'atom'): (c2atom_edges[0], c2atom_edges[1]),\n ('atom', 'pool', 'cluster'): (atom2c_edges[0], atom2c_edges[1]),\n ('cluster', 'overlap', 'cluster'): (c2c_edges[0], c2c_edges[1])\n }\n num_nodes_dict = {\n 'atom': raw_graph['num_nodes'],\n 'cluster': len(clusters)\n }\n\n g = dgl.heterograph(data_dict=data_dict, num_nodes_dict=num_nodes_dict)\n\n g.nodes['atom'].data['x'] = atom_feats\n g.nodes['cluster'].data['x'] = cluster_feats\n\n g.edges['bond'].data['x'] = bond_feats\n g.edges['overlap'].data['x'] = overlap_feats\n\n return g\n\n def _make_clusters(self, mol):\n\n clusters = []\n\n # add all functional groups\n for row in self.fgroup_vocab.itertuples():\n\n row_idx = row.Index\n\n fgroup_query = FGROUP_MOLS[row.name]\n matches = mol.GetSubstructMatches(fgroup_query)\n\n for match_idxs in matches:\n clusters.append(Cluster(row_idx, 'fgroup', match_idxs))\n\n # add all rings\n for ring_idxs in get_ring_fragments(mol):\n\n ring_smiles = Chem.MolFragmentToSmiles(mol, list(ring_idxs),\n isomericSmiles=False,\n kekuleSmiles=True)\n\n if ring_smiles in self.ring_smiles_set:\n row_idx = self.ring_vocab.index[self.ring_vocab['name']\n == ring_smiles]\n row_idx = int(row_idx[0])\n else:\n row_idx = self.misc_ring_idx\n\n clusters.append(Cluster(row_idx, 'ring', ring_idxs))\n\n # add all remaining singular atoms\n leftover_atoms = set(range(mol.GetNumAtoms()))\n for cluster in clusters:\n leftover_atoms.difference_update(cluster.atom_idxs)\n\n for atom_idx in leftover_atoms:\n atomic_num = mol.GetAtomWithIdx(atom_idx).GetAtomicNum()\n clusters.append(Cluster(atomic_num, 'atom', (atom_idx,)))\n\n return clusters\n\n def _make_inter_edges(self, clusters):\n\n c2atom_edges = [[], []]\n atom2c_edges = [[], []]\n\n for cluster_idx, cluster in enumerate(clusters):\n for atom_idx in cluster.atom_idxs:\n c2atom_edges[0].append(cluster_idx)\n c2atom_edges[1].append(atom_idx)\n\n atom2c_edges[0].append(atom_idx)\n atom2c_edges[1].append(cluster_idx)\n\n c2atom_edges = torch.tensor(c2atom_edges, dtype=torch.long)\n atom2c_edges = torch.tensor(atom2c_edges, dtype=torch.long)\n\n return c2atom_edges, atom2c_edges\n\n def _make_intracluster_edges(self, raw_graph, clusters):\n\n edge_index = raw_graph['edge_index']\n\n edge_dict = {i: set() for i in range(raw_graph['num_nodes'])}\n for i, j in zip(edge_index[0], edge_index[1]):\n edge_dict[i].add(j)\n\n num_clusters = len(clusters)\n adj_matrix = [[0] * num_clusters for _ in range(num_clusters)]\n\n cluster_neighbours = []\n for cluster in clusters:\n neighbours = set()\n for atom_idx in cluster.atom_idxs:\n neighbours.add(atom_idx)\n neighbours.update(edge_dict[atom_idx])\n cluster_neighbours.append(neighbours)\n\n for i, j in itertools.combinations(range(num_clusters), r=2):\n ci, cj = clusters[i], clusters[j]\n\n if ci.atom_idxs & cj.atom_idxs:\n edge_weight = len(ci.atom_idxs & cj.atom_idxs) + 1\n elif cluster_neighbours[i] & cluster_neighbours[j]:\n edge_weight = 1\n else:\n continue\n\n adj_matrix[i][j] = edge_weight\n adj_matrix[j][i] = edge_weight\n\n # build spanning tree\n adj_matrix = csr_matrix(adj_matrix)\n span_tree = minimum_spanning_tree(adj_matrix, overwrite=True)\n adj_matrix = torch.from_numpy(span_tree.toarray()).long()\n adj_matrix = to_bidirectional(adj_matrix)\n\n # represent as sparse matrix\n adj_matrix = adj_matrix.to_sparse().coalesce()\n edge_index = adj_matrix.indices()\n edge_feats = adj_matrix.values()\n\n return edge_index, edge_feats\n\n\nclass Cluster:\n\n def __init__(self, vocab_id, cluster_type, atom_idxs):\n\n # for sanity\n if not isinstance(vocab_id, int):\n raise ValueError()\n\n self.vocab_id = vocab_id\n self.cluster_type_idx = ('fgroup', 'ring', 'atom').index(cluster_type)\n self.atom_idxs = frozenset(atom_idxs)\n\n self.features = [self.vocab_id, self.cluster_type_idx]\n\n\n# Helper Method\n\ndef to_bidirectional(X):\n X_T = X.t()\n sym_sum = X + X_T\n X_min = torch.min(X, X_T)\n\n return torch.where(X_min > 0, X_min, sym_sum)\n"
] | [
[
"torch.min",
"scipy.sparse.csgraph.minimum_spanning_tree",
"scipy.sparse.csr_matrix",
"torch.tensor",
"torch.where",
"torch.from_numpy"
]
] |
wangjiangtao-NJPI/MachineLearning | [
"78124b56a26ec68efb3c517a4a2420860b6e4a75"
] | [
"g_CNN/Optimizers.py"
] | [
"import os\nimport sys\nroot_path = os.path.abspath(\"../\")\nif root_path not in sys.path:\n sys.path.append(root_path)\n\nimport tensorflow as tf\n\n\nclass Optimizer:\n def __init__(self, lr=1e-3):\n self._lr = lr\n self._opt = None\n\n @property\n def name(self):\n return str(self)\n\n def minimize(self, x, *args, **kwargs):\n return self._opt.minimize(x, *args, **kwargs)\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\n\nclass MBGD(Optimizer):\n def __init__(self, lr=1e-3):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.GradientDescentOptimizer(self._lr)\n\n\nclass Momentum(Optimizer):\n def __init__(self, lr=1e-3, momentum=0.8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.MomentumOptimizer(self._lr, momentum)\n\n\nclass NAG(Optimizer):\n def __init__(self, lr=1e-3, momentum=0.8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.MomentumOptimizer(self._lr, momentum, use_nesterov=True)\n\n\nclass AdaDelta(Optimizer):\n def __init__(self, lr=1e-3, rho=0.95, eps=1e-8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.AdadeltaOptimizer(self._lr, rho, eps)\n\n\nclass AdaGrad(Optimizer):\n def __init__(self, lr=1e-3, init=0.1):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.AdagradOptimizer(self._lr, init)\n\n\nclass Adam(Optimizer):\n def __init__(self, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.AdamOptimizer(self._lr, beta1, beta2, eps)\n\n\nclass RMSProp(Optimizer):\n def __init__(self, lr=1e-3, decay=0.9, momentum=0.0, eps=1e-10):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.RMSPropOptimizer(self._lr, decay, momentum, eps)\n\n\n# Factory\n\nclass OptFactory:\n\n available_optimizers = {\n \"MBGD\": MBGD, \"Momentum\": Momentum, \"NAG\": NAG,\n \"AdaDelta\": AdaDelta, \"AdaGrad\": AdaGrad,\n \"Adam\": Adam, \"RMSProp\": RMSProp\n }\n\n def get_optimizer_by_name(self, name, lr, *args, **kwargs):\n try:\n optimizer = self.available_optimizers[name](lr, *args, **kwargs)\n return optimizer\n except KeyError:\n raise NotImplementedError(\"Undefined Optimizer '{}' found\".format(name))\n"
] | [
[
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.AdagradOptimizer",
"tensorflow.train.GradientDescentOptimizer"
]
] |
fshp971/mcmc-unlearning | [
"3113dedca6de33bcaf316b804cb9c1e636db7fd5"
] | [
"BNN/forget.py"
] | [
"from datetime import datetime\nimport os\nimport pickle\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom mcmc_unlearner import sgmcmcUnlearner\nimport utils\nimport models\n\n\nclass myUnlearner(sgmcmcUnlearner):\n def _apply_sample(self, z):\n x, y = z\n if not self.cpu: x, y = x.cuda(), y.cuda()\n self.model.train()\n lo = -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n\n self.optimizer.zero_grad()\n lo.backward()\n self.optimizer.step()\n\n def _fun(self, z):\n x, y = z\n if not self.cpu: x, y = x.cuda(), y.cuda()\n self.model.train()\n return -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n\n\n def _z_fun(self, z):\n x, y = z\n if not self.cpu: x, y = x.cuda(), y.cuda()\n self.model.train()\n return F.cross_entropy(self.model(x), y, reduction='sum')\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n utils.add_shared_args(parser)\n\n parser.add_argument('--rm-idx-path', type=str, default=None)\n parser.add_argument('--save-freq', type=int, default=-1)\n\n return parser.parse_args()\n\n\ndef get_forget_idx(dataset, kill_num):\n kill_val = 0\n\n if 'targets' in vars(dataset).keys():\n labels = np.array(dataset.targets)\n elif 'labels' in vars(dataset).keys():\n labels = np.array(dataset.labels)\n else:\n raise NotImplementedError\n\n randidx = np.random.permutation( np.where(labels==kill_val)[0] )\n return randidx[:kill_num]\n\n\ndef evaluate(model, loader, cpu):\n ''' average log predictive probability '''\n loss = utils.AverageMeter()\n acc = utils.AverageMeter()\n\n n = len(loader.sampler.indices)\n\n model.eval()\n for x, y in loader:\n if not cpu: x, y = x.cuda(), y.cuda()\n\n with torch.no_grad():\n _y = model(x)\n lo = - model.log_prior() + F.cross_entropy(_y,y) * n\n lo = lo.item()\n ac = (_y.argmax(dim=1) == y).sum().item() / len(y)\n\n loss.update(lo, len(y))\n acc.update(ac, len(y))\n\n return loss.average(), acc.average()\n\n\ndef forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log):\n remain_train_loss, remain_train_acc = evaluate(model, train_loader, args.cpu)\n forgetted_train_loss, forgetted_train_acc = evaluate(model, forgetted_train_loader, args.cpu)\n test_loss, test_acc = evaluate(model, test_loader, args.cpu)\n\n utils.add_log(log, 'remain_train_loss', remain_train_loss)\n utils.add_log(log, 'remain_train_acc', remain_train_acc)\n utils.add_log(log,'forgetted_train_loss', forgetted_train_loss)\n utils.add_log(log,'forgetted_train_acc', forgetted_train_acc)\n utils.add_log(log, 'test_loss', test_loss)\n utils.add_log(log, 'test_acc', test_acc)\n\n logger.info('remaining train loss {:.2e} \\t train acc {:.2%}'\n .format(remain_train_loss, remain_train_acc))\n logger.info('forgetted train loss {:.2e} \\t train acc {:.2%}'\n .format(forgetted_train_loss, forgetted_train_acc))\n logger.info('test loss {:.2e} \\t test acc {:.2%}'\n .format(test_loss, test_acc))\n logger.info('')\n\n\ndef save_checkpoint(save_dir, save_name, log, model, optimizer):\n with open('{}/{}-log.pkl'.format(save_dir, save_name), 'wb') as f:\n pickle.dump(log, f)\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, '{}/{}-model.pkl'.format(save_dir, save_name))\n\n\ndef main(args, logger):\n ''' retrieve lots of data '''\n trainset, testset = utils.get_dataset(args.dataset)\n\n if args.rm_idx_path is not None:\n with open(args.rm_idx_path, 'rb') as f:\n forgetted_idx = pickle.load(f)\n else:\n forgetted_idx = get_forget_idx(trainset, args.ifs_kill_num)\n\n forgetted_idx_loader = utils.IndexBatchSampler(\n batch_size=args.ifs_rm_bs, indices=forgetted_idx)\n\n train_sampler = utils.DataSampler(trainset, args.batch_size)\n\n train_loader = utils.DataLoader(trainset, args.batch_size)\n train_loader.remove(forgetted_idx)\n\n forgetted_train_loader = utils.DataLoader(trainset, args.batch_size)\n forgetted_train_loader.set_sampler_indices(forgetted_idx)\n\n test_loader = utils.DataLoader(testset, args.batch_size)\n ''' end of retrieving data '''\n\n model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)\n\n if not args.cpu:\n model.cuda()\n\n args.lr /= len(trainset)\n optimizer = utils.get_optim(model.parameters(), args.optim,\n lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, sghmc_alpha=args.sghmc_alpha)\n\n model.n = len(train_sampler)\n\n ''' restore model / sampler '''\n state_dict = torch.load(args.resume_path)\n model.load_state_dict(state_dict['model_state_dict'])\n optimizer.load_state_dict(state_dict['optimizer_state_dict'])\n\n ''' for backward compatibility '''\n for group in optimizer.param_groups:\n if 'lr_decay' in group:\n group['lr'] *= group['lr_decay']\n group.pop('lr_decay')\n\n del state_dict\n\n unlearner = myUnlearner(\n model = model,\n optimizer = optimizer,\n params = model.parameters(),\n cpu = args.cpu,\n iter_T = args.ifs_iter_T,\n scaling = args.ifs_scaling,\n samp_T = args.ifs_samp_T,)\n\n log = dict()\n log['user_time'] = 0\n utils.add_log(log, 'forgetted_idx', forgetted_idx)\n\n forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)\n\n removed_nums = 0\n freq_counter = 0\n\n for ii in forgetted_idx_loader:\n ''' create forget-batch '''\n xx, yy = [], []\n for i in ii:\n x, y = trainset[i]\n if len(x.shape) == 3: x = x.reshape(1, *x.shape)\n xx.append(x)\n yy.append(y)\n xx, yy = torch.cat(xx), torch.tensor(yy)\n ''' end '''\n\n scaling = args.ifs_scaling / len(train_sampler)\n unlearner.param_dict['scaling'] = scaling\n\n ''' start calculation of time '''\n start_time = datetime.now()\n\n unlearner.remove([xx,yy], train_sampler)\n\n torch.cuda.synchronize()\n end_time = datetime.now()\n user_time = (end_time - start_time).total_seconds()\n ''' end calculation of time '''\n\n log['user_time'] += user_time\n\n train_sampler.remove(ii)\n ''' after removal, update the number of remaining datums '''\n unlearner.model.n = len(train_sampler)\n\n removed_nums += len(ii)\n freq_counter += len(ii)\n\n ''' update mcmc sampler '''\n for group in unlearner.optimizer.param_groups:\n group['lr'] *= (len(train_sampler) + len(ii)) / len(train_sampler)\n\n logger.info('remaining trainset size {}'.format(len(train_sampler)))\n logger.info('user time {:.3f} sec \\t'\n 'cumulated user time {:.3f} mins'\n .format(user_time, log['user_time']/60) )\n\n if (args.save_freq > 0) and (freq_counter >= args.save_freq):\n freq_counter = 0\n save_checkpoint(args.save_dir, '{}-ckpt-{}'.format(args.save_name, removed_nums), log, model, optimizer)\n\n forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)\n\n save_checkpoint(args.save_dir, args.save_name, log, model, optimizer)\n\n return\n\n\nif __name__ == '__main__':\n args = get_args()\n logger = utils.generic_init(args)\n\n try:\n main(args, logger)\n except Exception as e:\n logger.exception('Unexpected exception! %s', e)\n"
] | [
[
"torch.load",
"torch.no_grad",
"torch.cuda.synchronize",
"torch.tensor",
"torch.nn.functional.cross_entropy",
"numpy.array",
"numpy.where",
"torch.cat"
]
] |
Nina-pinheiro/Data-Science-Python | [
"b6b2bc28f2f8f925e1b43408330641bd72388232"
] | [
"files/regressao_linear/regressaolinear1.py"
] | [
"# Importar as bibliotecas necessárias\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport seaborn as sns\nfrom sklearn.linear_model import LinearRegression\n\n# Leitura do dataset\n\ndf = pd.read_csv(\"dataset/consumo.csv\") \n\n# Converter uma coluna para numerica\n\ndf['Temperatura Maxima (C)'] = df['Temperatura Maxima (C)'].str.replace(',','.').astype(float)\ndf['Temperatura Minima (C)'] = df['Temperatura Minima (C)'].str.replace(',','.').astype(float)\ndf['Precipitacao (mm)'] = df['Precipitacao (mm)'].str.replace(',','.').astype(float)\ndf['Temperatura Media (C)'] = df['Temperatura Media (C)'].str.replace(',','.').astype(float)\n\n# Análise descritiva\n\ndf.describe()\ndf.head()\ndf.dtypes\ndf.info()\ndf.tail()\ndf.shape\n\n# Verificar quais são os valores faltantes\n\ndf.isnull().sum()\n\n# Remover todos os valores faltantes\ndf.dropna(how = \"all\", inplace = True)\n\n# Copiando um data frame em uma nova variável \n\ndf_feature = df.copy()\n\n# Criação de uma nova feature\n\ndf_feature['variacao'] = (df_feature['Temperatura Maxima (C)']) - (df_feature['Temperatura Minima (C)'])\ndf_feature\n\n# Plotando o gráfico da nova feature\ndf_feature.plot(x='variacao', y = 'Consumo de cerveja (litros)')\nplt.xlabel('variacao', fontsize = 15)\nplt.ylabel('Consumo de cerveja (litros)',fontsize = 15)\nplt.grid()\n\n# Excluindo a coluna data\ndf_feature = df_feature.drop(columns = 'Data')\n\n# Realizar a matriz de correlação\n\ndf_feature.corr().round(3)\n\n# Gráficos\n\nplt.figure()\nsns.pairplot(df_feature,x_vars=['Temperatura Minima (C)','Temperatura Media (C)','Temperatura Maxima (C)','Precipitacao (mm)','variacao'],\n y_vars=['Consumo de cerveja (litros)'],hue='Final de Semana',diag_kind=None)\n\n# Realizar o gráfico de final de semana e consumo de cerveja\nplt.figure(2)\nsns.swarmplot(x='Final de Semana',y='Consumo de cerveja (litros)',data= df_feature)\nplt.grid()\nplt.xlabel('Final de semana')\nplt.ylabel('Consumo de cerveja [L]')\n\n# Realizar o gráfico de final de semana e variacao(nova feature criada)\n\nplt.figure(3)\nsns.swarmplot(x = 'Final de Semana', y = 'variacao', data = df_feature)\nplt.grid()\nplt.xlabel('Final de semana')\nplt.ylabel('variacao')\n\n\n# Utilizando o modelo de regressão linear\nmodelo = LinearRegression()\n\n# Colocando a variável target\ny = df_feature['Consumo de cerveja (litros)'].values #target\n\n# colocando as variaveis independentes neste exemplo pega todos menos consumo de cerveja\nx = df_feature.drop(columns='Consumo de cerveja (litros)').values #fetures\nxColunas = df_feature.drop(columns='Consumo de cerveja (litros)').columns\n\n# Realizando o treinamento \n\nxTrain,xTest,yTrain,yTest = train_test_split(x,y, test_size = 0.3, random_state = 54564541)\n\n# Fitando o modelo\n\nmodelo.fit(xTrain,yTrain)\nyPred = modelo.predict(xTest)\n\n# Calcular os resíduos\n\nres = yPred - yTest\n\n# Testes\n\nprint('Valor de R2: {}'.format(modelo.score(xTest,yTest)))\nprint('Valor MSE: {}' .format(mean_squared_error(yTest,yPred)))\nprint('Coeficientes da regressão: {}'.format(modelo.coef_))\nprint('Intercept da regressão: {} \\n'.format(modelo.intercept_))\n\n"
] | [
[
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
alancsouza/chip_clas | [
"e6df8713ae7dd70a5719af83b3b6cb5686f87e29"
] | [
"Experimental setup/Window size test/data6.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ndata6 = Breast cancer\n\n\"\"\"\nfrom chip_clas_new import chip_clas_new\nimport statistics\nfrom functions import remove_noise\nfrom sklearn.model_selection import train_test_split, KFold\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport pandas as pd\n\ndata_name = \"Breast cancer\"\nprint(data_name)\n\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'\ndata1 = pd.read_csv(url, sep=',', header=None, skiprows=1)\n\ndata = data1.iloc[:,1:].copy() # the first is the id\n\n# converting object data into category dtype\ndata.iloc[:,5] = data.iloc[:,5].astype('category') \n# encoding labels\ndata.iloc[:,5] = data.iloc[:,5].cat.codes\n\nX = data.iloc[:,:-1]\nmin_max_scaler = MinMaxScaler(feature_range=(-1, 1)) # Normalizing data between -1 and 1\nX = pd.DataFrame(min_max_scaler.fit_transform(X))\n\ny = data.iloc[:,-1].copy() # Class: (2 for benign, 4 for malignant cancer)\ny[y == 2] = 1\ny[y == 4] = -1\n\n# Filtering data:\nX_new, y_new = remove_noise(X, y)\n\nX_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.2, random_state=42)\n\nf = open(\"results_window_size.txt\", \"a+\")\nf.write(\"\\n\\nDatabase: %s \\n\" % data_name)\nf.write(\"Size before filter: %d \\n\" % X.shape[0])\nf.write(\"Dimension: %d \\n\" % X.shape[1])\n\nf.write(\"Size after filter: %d \\n\" % X_new.shape[0])\nf.write(\"Train Size: %d \\n\" % X_train.shape[0])\n\nwindow_size = [50, 30, 20, 10, 5, 1]\n\nfor split in window_size:\n\n y_hat, y_test, result, runtime, final_split_size, arestas_suporte_size = chip_clas_new(X_train, X_test, y_train, y_test, method = \"parallel\", split_size = split)\n\n\n f.write(\"\\nSplit: %d \\n\" % split)\n f.write(\"AUC: %f \\n\" % result)\n f.write(\"Runtime: %d \\n\" % runtime)\n f.write(\"Final_split_size: %d \\n\" % final_split_size)\n f.write(\"arestas_suporte_size: %d \\n\" % arestas_suporte_size)\n \nf.write(\"#######################################################################\") \nf.close()"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.model_selection.train_test_split"
]
] |
lhcezx/Deteciton_3D | [
"e98b9bb0dd96dfa112e196ec93129caf1ffef39e"
] | [
"sfa/data_process/transformation.py"
] | [
"import os\nimport sys\nimport math\n\nimport numpy as np\nimport torch\n\nsrc_dir = os.path.dirname(os.path.realpath(__file__))\nwhile not src_dir.endswith(\"sfa\"):\n src_dir = os.path.dirname(src_dir)\nif src_dir not in sys.path:\n sys.path.append(src_dir)\n\nfrom config import kitti_config as cnf\n\n\ndef angle_in_limit(angle):\n # To limit the angle in -pi/2 - pi/2\n limit_degree = 5\n while angle >= np.pi / 2:\n angle -= np.pi\n while angle < -np.pi / 2:\n angle += np.pi\n if abs(angle + np.pi / 2) < limit_degree / 180 * np.pi:\n angle = np.pi / 2\n return angle\n\n# 相机坐标系转雷达坐标系\ndef camera_to_lidar(x, y, z, V2C=None, R0=None, P2=None):\n p = np.array([x, y, z, 1]) # \n if V2C is None or R0 is None:\n p = np.matmul(cnf.R0_inv, p)\n p = np.matmul(cnf.Tr_velo_to_cam_inv, p)\n else:\n # 建立坐标变化矩阵\n R0_i = np.zeros((4, 4))\n R0_i[:3, :3] = R0\n R0_i[3, 3] = 1\n p = np.matmul(np.linalg.inv(R0_i), p) # np.linalg.inv() 求逆矩阵\n p = np.matmul(inverse_rigid_trans(V2C), p)\n p = p[0:3]\n return tuple(p)\n\n# 雷达坐标系转图像坐标系\ndef lidar_to_camera(x, y, z, V2C=None, R0=None, P2=None):\n p = np.array([x, y, z, 1]) # 先将点(x,y,z)变为齐次坐标系\n if V2C is None or R0 is None:\n p = np.matmul(cnf.Tr_velo_to_cam, p) # 将坐标系从雷达坐标坐标系转为相机坐标系\n p = np.matmul(cnf.R0, p) # 将Velodyne坐标中的点x投影到编号为0的相机中点进行修正\n else:\n p = np.matmul(V2C, p)\n p = np.matmul(R0, p)\n p = p[0:3]\n return tuple(p)\n\n\ndef camera_to_lidar_point(points):\n # (N, 3) -> (N, 3)\n N = points.shape[0]\n points = np.hstack([points, np.ones((N, 1))]).T # (N,4) -> (4,N)\n\n points = np.matmul(cnf.R0_inv, points)\n points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T # (4, N) -> (N, 4)\n points = points[:, 0:3]\n return points.reshape(-1, 3)\n\n# \ndef lidar_to_camera_point(points, V2C=None, R0=None):\n # (N, 3) -> (N, 3)\n N = points.shape[0]\n points = np.hstack([points, np.ones((N, 1))]).T # 在水平方向上拼接一个(N,1)的单位向量并转置\n\n if V2C is None or R0 is None:\n points = np.matmul(cnf.Tr_velo_to_cam, points)\n points = np.matmul(cnf.R0, points).T\n else:\n points = np.matmul(V2C, points)\n points = np.matmul(R0, points).T\n points = points[:, 0:3]\n return points.reshape(-1, 3)\n\n# 将相机坐标系下的x,y,z转到雷达坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, rz/y)\ndef camera_to_lidar_box(boxes, V2C=None, R0=None, P2=None):\n # (N, 7) -> (N, 7) x,y,z,h,w,l,r\n ret = []\n for box in boxes:\n x, y, z, h, w, l, ry = box\n # 把相机坐标系x,y,z转换为雷达坐标系x,y,z,并通过ry计算出rz\n (x, y, z), h, w, l, rz = camera_to_lidar(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -ry - np.pi / 2\n # rz = angle_in_limit(rz)\n ret.append([x, y, z, h, w, l, rz])\n return np.array(ret).reshape(-1, 7)\n\n# 将雷达坐标系下的x,y,z转到相机坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, ry)\ndef lidar_to_camera_box(boxes, V2C=None, R0=None, P2=None):\n # (N, 7) -> (N, 7) x,y,z,h,w,l,r\n # Test模式下读取的prediction结果里面还多一个score\n ret = []\n for box in boxes:\n # x, y, z, h, w, l, rz, score = box\n x, y, z, h, w, l, rz = box\n # 把雷达坐标系下的x,y,z转换为相机坐标系x,y,z\n # (x, y, z), h, w, l, ry, score = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2, score\n (x, y, z), h, w, l, ry = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2\n # ry = angle_in_limit(ry)\n # ret.append([x, y, z, h, w, l, ry, score])\n ret.append([x, y, z, h, w, l, ry])\n # return np.array(ret).reshape(-1, 8)\n return np.array(ret).reshape(-1, 7)\n\n\ndef center_to_corner_box2d(boxes_center, coordinate='lidar'):\n # (N, 5) -> (N, 4, 2)\n N = boxes_center.shape[0]\n boxes3d_center = np.zeros((N, 7))\n boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center\n boxes3d_corner = center_to_corner_box3d(boxes3d_center, coordinate=coordinate)\n\n return boxes3d_corner[:, 0:4, 0:2]\n\n# 将中心点坐标表示法变成八个角点坐标表示3dbbox\ndef center_to_corner_box3d(boxes_center, coordinate='lidar'):\n # (N, 7) -> (N, 8, 3)\n N = boxes_center.shape[0]\n ret = np.zeros((N, 8, 3), dtype=np.float32) # 保存每一个样本的3Dbbox的八个角点坐标\n\n if coordinate == 'camera': \n boxes_center = camera_to_lidar_box(boxes_center) # 如果是相机坐标系,则需要转变到雷达坐标系下并输出3dbbox的信息\n # 样本循环\n for i in range(N):\n box = boxes_center[i] \n translation = box[0:3] # x,y,z\n size = box[3:6] # h,w,l\n rotation = [0, 0, box[-1]] # [0, 0, rz]\n\n h, w, l = size[0], size[1], size[2]\n # 3D bbox的八个点\n trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet\n [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \\\n [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \\\n [0, 0, 0, 0, h, h, h, h]])\n\n # re-create 3D bounding box in velodyne coordinate system\n yaw = rotation[2] # 绕z轴的偏航角\n rotMat = np.array([\n [np.cos(yaw), -np.sin(yaw), 0.0],\n [np.sin(yaw), np.cos(yaw), 0.0],\n [0.0, 0.0, 1.0]])\n # 根据航向角调整bbox的方向rotation,然后对八个角都加上(x,y,z)中心点坐标,最终获得通过偏航角rz旋转后的3dbbox的八个点坐标\n cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T # 沿着Y轴复制8个同样的向量,沿着X轴保持不变,最后转置。\n box3d = cornerPosInVelo.transpose()\n ret[i] = box3d\n\n if coordinate == 'camera': # 如果是相机坐标系则需要从雷达坐标系变回相机坐标系\n for idx in range(len(ret)):\n ret[idx] = lidar_to_camera_point(ret[idx])\n\n return ret\n\n\nCORNER2CENTER_AVG = True\n\n# 3dbbox的八个角点表示法变成以3dbbox中心点坐标来表示\ndef corner_to_center_box3d(boxes_corner, coordinate='camera'):\n # (N, 8, 3) -> (N, 7) x,y,z,h,w,l,ry/z\n if coordinate == 'lidar': # 如果是雷达坐标系则需要先变为相机坐标系\n for idx in range(len(boxes_corner)):\n boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx]) \n\n ret = []\n for roi in boxes_corner:\n if CORNER2CENTER_AVG: # average version\n roi = np.array(roi) # roi = ()\n # 相机坐标系下y轴代表高度\n h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4) # 前四个角点的y轴接近0,后四个角点y轴接近h,对他们四个取平均\n # 前后相邻的两个角点的欧式距离 w = sqrt(x^2+y^2),对四条边求平均值\n # [0, 2]表示x,y坐标\n w = np.sum(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))\n ) / 4\n # 左右相邻的两个角点的欧式距离 l = sqrt(x^2+y^2),对四条边求平均值\n l = np.sum(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))\n ) / 4\n x = np.sum(roi[:, 0], axis=0) / 8 # 对八个角点的x坐标求平均值\n y = np.sum(roi[0:4, 1], axis=0) / 4 # 对四个角点的y坐标求平均值\n z = np.sum(roi[:, 2], axis=0) / 8 # 对八个角点的z坐标求平均值\n # 对航向角求平均值\n ry = np.sum(\n math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +\n math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +\n math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +\n math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +\n math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +\n math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +\n math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +\n math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])\n ) / 8\n if w > l:\n w, l = l, w\n ry = ry - np.pi / 2\n elif l > w:\n l, w = w, l\n ry = ry - np.pi / 2\n ret.append([x, y, z, h, w, l, ry])\n\n else: # max version\n h = max(abs(roi[:4, 1] - roi[4:, 1])) # 前四个角点的z轴接近0,后四个角点z轴接近h,对他们四个取最大\n w = np.max(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))\n )\n l = np.max(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))\n )\n x = np.sum(roi[:, 0], axis=0) / 8\n y = np.sum(roi[0:4, 1], axis=0) / 4\n z = np.sum(roi[:, 2], axis=0) / 8\n ry = np.sum(\n math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +\n math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +\n math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +\n math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +\n math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +\n math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +\n math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +\n math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])\n ) / 8\n if w > l:\n w, l = l, w\n ry = angle_in_limit(ry + np.pi / 2)\n ret.append([x, y, z, h, w, l, ry])\n\n if coordinate == 'lidar':\n ret = camera_to_lidar_box(np.array(ret))\n\n return np.array(ret)\n\n\ndef point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0):\n # Input:\n # points: (N, 3)\n # rx/y/z: in radians\n # Output:\n # points: (N, 3)\n N = points.shape[0]\n points = np.hstack([points, np.ones((N, 1))])\n\n # 点云数据平移\n mat1 = np.eye(4)\n mat1[3, 0:3] = tx, ty, tz\n points = np.matmul(points, mat1)\n \n # 点云数据旋转\n # 4x4围绕x轴旋转的矩阵\n if rx != 0:\n mat = np.zeros((4, 4))\n mat[0, 0] = 1\n mat[3, 3] = 1\n mat[1, 1] = np.cos(rx)\n mat[1, 2] = -np.sin(rx)\n mat[2, 1] = np.sin(rx)\n mat[2, 2] = np.cos(rx)\n points = np.matmul(points, mat)\n\n # 4x4围绕y轴旋转的矩阵\n if ry != 0:\n mat = np.zeros((4, 4))\n mat[1, 1] = 1\n mat[3, 3] = 1\n mat[0, 0] = np.cos(ry)\n mat[0, 2] = np.sin(ry)\n mat[2, 0] = -np.sin(ry)\n mat[2, 2] = np.cos(ry)\n points = np.matmul(points, mat)\n\n # 4x4围绕z轴旋转的矩阵\n if rz != 0:\n mat = np.zeros((4, 4))\n mat[2, 2] = 1\n mat[3, 3] = 1\n mat[0, 0] = np.cos(rz)\n mat[0, 1] = -np.sin(rz)\n mat[1, 0] = np.sin(rz)\n mat[1, 1] = np.cos(rz)\n points = np.matmul(points, mat)\n\n return points[:, 0:3]\n\n\n# 返回旋转过后的label标签,如果雷达坐标系下则返回雷达label,反之camera_label\ndef box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'):\n # Input:\n # boxes: (N, 7) x y z h w l rz/y\n # Output:\n # boxes: (N, 7) x y z h w l rz/y\n # 将每个样本的label中心点坐标根据长宽高变为其3dbbox八个角点的坐标(这个过程需要在雷达坐标系下进行),如果input_label是雷达坐标系则返回雷达坐标,如果是camera坐标系则需要把雷达坐标变回camera坐标\n boxes_corner = center_to_corner_box3d(boxes, coordinate=coordinate) # (N, 8, 3) \n for idx in range(len(boxes_corner)):\n if coordinate == 'lidar':\n boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, rz=r) # 如果是lidar坐标系的话偏向角是沿z轴旋转\n else:\n boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, ry=r) # 如果是camera坐标系的话偏向角是沿y轴旋转\n\n return corner_to_center_box3d(boxes_corner, coordinate=coordinate) \n\n# 刚体的坐标变换\ndef inverse_rigid_trans(Tr):\n ''' Inverse a rigid body transform matrix (3x4 as [R|t])\n [R'|-R't; 0|1]\n '''\n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])\n inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])\n return inv_Tr\n\n# 选择多个方法结合进行数据增强\nclass Compose(object):\n def __init__(self, transforms, p=1.0):\n self.transforms = transforms\n self.p = p\n\n def __call__(self, lidar, labels):\n if np.random.random() <= self.p:\n for t in self.transforms:\n lidar, labels = t(lidar, labels)\n return lidar, labels\n\n# 选择一个方法进行数据增强\nclass OneOf(object):\n def __init__(self, transforms, p=1.0):\n self.transforms = transforms\n self.p = p\n\n def __call__(self, lidar, labels):\n if np.random.random() <= self.p:\n choice = np.random.randint(low=0, high=len(self.transforms))\n lidar, labels = self.transforms[choice](lidar, labels)\n\n return lidar, labels\n\n\nclass Random_Rotation(object):\n def __init__(self, limit_angle=np.pi / 4, p=0.5):\n self.limit_angle = limit_angle\n self.p = p\n\n def __call__(self, lidar, labels):\n \"\"\"\n :param labels: # (N', 7) x, y, z, h, w, l, r\n :return:\n \"\"\"\n if np.random.random() <= self.p:\n # 随机取一个角度在-limit_angle到limit_angle之间\n angle = np.random.uniform(-self.limit_angle, self.limit_angle)\n # 点云数据绕Z轴旋转\n lidar[:, 0:3] = point_transform(lidar[:, 0:3], 0, 0, 0, rz=angle)\n # 把数据对应的label也旋转\n labels = box_transform(labels, 0, 0, 0, r=angle, coordinate='lidar')\n\n return lidar, labels\n\n\nclass Random_Scaling(object):\n def __init__(self, scaling_range=(0.95, 1.05), p=0.5):\n self.scaling_range = scaling_range\n self.p = p\n\n def __call__(self, lidar, labels):\n \"\"\"\n :param labels: # (N', 7) x, y, z, h, w, l, r\n :return:\n \"\"\"\n if np.random.random() <= self.p:\n # 数据缩放因子\n factor = np.random.uniform(self.scaling_range[0], self.scaling_range[0])\n # lidar和label数据缩放\n lidar[:, 0:3] = lidar[:, 0:3] * factor\n labels[:, 0:6] = labels[:, 0:6] * factor\n\n return lidar, labels\n\n\nclass Cutout(object):\n \"\"\"Randomly mask out one or more patches from an image.\n Args:\n n_holes (int): Number of patches to cut out of each image.\n length (int): The length (in pixels) of each square patch.\n Refer from: https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py\n \"\"\"\n\n def __init__(self, n_holes, ratio, fill_value=0., p=1.0):\n self.n_holes = n_holes\n self.ratio = ratio\n assert 0. <= fill_value <= 1., \"the fill value is in a range of 0 to 1\"\n self.fill_value = fill_value\n self.p = p\n\n def __call__(self, img, targets):\n \"\"\"\n Args:\n img (Tensor): Tensor image of size (C, H, W).\n Returns:\n Tensor: Image with n_holes of dimension length x length cut out of it.\n \"\"\"\n if np.random.random() <= self.p:\n h = img.size(1)\n w = img.size(2)\n\n h_cutout = int(self.ratio * h)\n w_cutout = int(self.ratio * w)\n\n for n in range(self.n_holes):\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - h_cutout // 2, 0, h)\n y2 = np.clip(y + h_cutout // 2, 0, h)\n x1 = np.clip(x - w_cutout // 2, 0, w)\n x2 = np.clip(x + w_cutout // 2, 0, w)\n\n img[:, y1: y2, x1: x2] = self.fill_value # Zero out the selected area\n # Remove targets that are in the selected area\n keep_target = []\n for target_idx, target in enumerate(targets):\n _, _, target_x, target_y, target_w, target_l, _, _ = target\n if (x1 <= target_x * w <= x2) and (y1 <= target_y * h <= y2):\n continue\n keep_target.append(target_idx)\n targets = targets[keep_target]\n\n return img, targets\n"
] | [
[
"numpy.random.uniform",
"numpy.eye",
"numpy.matmul",
"numpy.zeros_like",
"numpy.transpose",
"numpy.ones",
"numpy.zeros",
"numpy.tile",
"numpy.linalg.inv",
"numpy.sum",
"numpy.cos",
"numpy.random.random",
"numpy.clip",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.random.randint"
]
] |
art-vish/neuro-comma | [
"148ff7150e92d734d926a576c50bcabf1ae0ec0a"
] | [
"src/neuro_comma/dataset.py"
] | [
"from typing import Dict, List, Optional, Tuple, Union\nfrom typing_extensions import TypedDict\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom tqdm import tqdm\nfrom transformers import PreTrainedTokenizer\n\nfrom neuro_comma.augmentation import AUGMENTATIONS\nfrom neuro_comma.pretrained import TOKEN_IDX\n\n\nclass BaseDataset(torch.utils.data.Dataset):\n def __init__(self,\n files: Union[str, List[str]],\n tokenizer: PreTrainedTokenizer,\n targets: Dict[str, int],\n sequence_len: int,\n token_style: str,\n *args,\n **kwargs) -> None:\n\n self.tokenizer = tokenizer\n self.targets = targets\n self.seq_len = sequence_len\n self.token_style = token_style\n\n if isinstance(files, list):\n self.data = []\n for file in files:\n self.data += self._parse_data(file, *args, **kwargs)\n else:\n self.data = self._parse_data(files, *args, **kwargs)\n\n def _parse_data(self, file_path: str, *args, **kwargs) -> List[List[List[int]]]:\n \"\"\"Parse file to train data\n\n Args:\n file_path (`str`): text file path that contains tokens and punctuations separated by tab in lines\n Returns:\n list[Batch]: each having sequence_len punctuation_mask is used to ignore special indices like padding and intermediate sub-word token during evaluation\n \"\"\"\n with open(file_path, 'r', encoding='utf-8') as file:\n x, y = [], []\n for i, line in enumerate(file):\n if (line.strip()):\n line = line.strip()\n token = line.rsplit('\\t', 1)\n if len(token) == 2:\n x.append(token[0])\n target = self.targets[token[1]]\n y.append(target)\n else:\n continue\n\n data = self.parse_tokens(x, self.tokenizer, self.seq_len, self.token_style, y, *args, **kwargs)\n return data\n\n @classmethod\n def parse_tokens(cls,\n tokens: Union[List[str], Tuple[str]],\n tokenizer: PreTrainedTokenizer,\n seq_len: int,\n token_style: str,\n targets: Optional[List[int]] = None,\n *args,\n **kwargs) -> List[List[List[int]]]:\n \"\"\"\n Convert tokenized data for model prediction\n\n Args:\n tokens (`Union[list[str], tuple[str]]`): splited tokens\n tokenizer (`PreTrainedTokenizer`): tokenizer which split tokens to subtokens\n seq_len (`int`): sequence length\n token_style (`str`): token_style from pretrained.TOKEN_IDX\n\n Returns:\n (`list[BatchWithoutTarget]`): list of bathces\n\n ```txt\n tokens : [token token ##token PAD ]\n x : [321 1233 23121 101 ]\n y : [tar 0 tar 0 ]\n y_mask : [1 0 1 0 ]\n attn_mask : [1 1 1 0 ]\n ```\n\n \"\"\"\n data_items = []\n # loop until end of the entire text\n idx = 0\n\n debug = kwargs.get('debug')\n if debug:\n pbar = tqdm(total=len(tokens))\n\n while idx < len(tokens):\n x = [TOKEN_IDX[token_style]['START_SEQ']]\n w_id = [-1] # word indexes\n y = [0]\n y_mask = [1] if targets else [0]\n\n # loop until we have required sequence length\n # -1 because we will have a special end of sequence token at the end\n while len(x) < seq_len - 1 and idx < len(tokens):\n word_pieces = tokenizer.tokenize(tokens[idx])\n\n # if taking these tokens exceeds sequence length we finish\n # current sequence with padding\n # then start next sequence from this token\n if len(word_pieces) + len(x) >= seq_len:\n break\n for i in range(len(word_pieces) - 1):\n x.append(tokenizer.convert_tokens_to_ids(word_pieces[i]))\n w_id.append(idx)\n y.append(0)\n y_mask.append(0)\n if len(word_pieces) > 0:\n x.append(tokenizer.convert_tokens_to_ids(word_pieces[-1]))\n else:\n x.append(TOKEN_IDX[token_style]['UNK'])\n\n w_id.append(idx)\n\n if targets:\n y.append(targets[idx])\n else:\n y.append(0)\n\n y_mask.append(1)\n\n idx += 1\n if debug:\n pbar.update(1)\n\n x.append(TOKEN_IDX[token_style]['END_SEQ'])\n w_id.append(-1)\n y.append(0)\n if targets:\n y_mask.append(1)\n else:\n y_mask.append(0)\n\n # Fill with pad tokens\n if len(x) < seq_len:\n x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(seq_len - len(x))]\n w_id = w_id + [-100 for _ in range(seq_len - len(w_id))]\n y = y + [0 for _ in range(seq_len - len(y))]\n y_mask = y_mask + [0 for _ in range(seq_len - len(y_mask))]\n\n attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]\n\n data_items.append([x, w_id, attn_mask, y, y_mask])\n\n if debug:\n pbar.close()\n\n return data_items\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n x = self.data[index][0]\n attn_mask = self.data[index][2]\n y = self.data[index][3]\n y_mask = self.data[index][4]\n\n x = torch.tensor(x) # type: ignore\n attn_mask = torch.tensor(attn_mask) # type: ignore\n y = torch.tensor(y) # type: ignore\n y_mask = torch.tensor(y_mask) # type: ignore\n\n return x, y, attn_mask, y_mask # type: ignore\n\n\nclass RepunctDataset(BaseDataset):\n def __init__(self,\n files: Union[str, List[str]],\n tokenizer: PreTrainedTokenizer,\n targets: Dict[str, int],\n sequence_len: int,\n token_style: str,\n is_train=False,\n augment_rate=0.,\n augment_type='substitute',\n *args,\n **kwargs) -> None:\n \"\"\"Preprocess data for restore punctuation\n\n Args:\n files (`Union[str, list[str]]`): single file or list of text files containing tokens and punctuations separated by tab in lines\n tokenizer (`PreTrainedTokenizer`): tokenizer that will be used to further tokenize word for BERT like models\n targets (`dict[str, int]`): dict with targets\n sequence_len (`int`): length of each sequence\n token_style (`str`): For getting index of special tokens in pretrained.TOKEN_IDX\n is_train (`bool, optional`): if false do not apply augmentation. Defaults to False.\n augment_rate (`float, optional`): percent of data which should be augmented. Defaults to 0.0.\n augment_type (`str, optional`): augmentation type. Defaults to 'substitute'.\n \"\"\"\n super().__init__(files, tokenizer, targets, sequence_len, token_style, *args, **kwargs)\n\n self.is_train = is_train\n self.augment_type = augment_type\n self.augment_rate = augment_rate\n\n def _augment(self, x, y, y_mask):\n x_aug = []\n y_aug = []\n y_mask_aug = []\n for i in range(len(x)):\n r = np.random.rand()\n if r < self.augment_rate:\n AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)\n else:\n x_aug.append(x[i])\n y_aug.append(y[i])\n y_mask_aug.append(y_mask[i])\n\n if len(x_aug) > self.seq_len:\n # len increased due to insert\n x_aug = x_aug[:self.seq_len]\n y_aug = y_aug[:self.seq_len]\n y_mask_aug = y_mask_aug[:self.seq_len]\n elif len(x_aug) < self.seq_len:\n # len decreased due to delete\n x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.seq_len - len(x_aug))]\n y_aug = y_aug + [0 for _ in range(self.seq_len - len(y_aug))]\n y_mask_aug = y_mask_aug + [0 for _ in range(self.seq_len - len(y_mask_aug))]\n\n attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]\n return x_aug, y_aug, attn_mask, y_mask_aug\n\n def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n x = self.data[index][0]\n attn_mask = self.data[index][2]\n y = self.data[index][3]\n y_mask = self.data[index][4]\n\n if self.is_train and self.augment_rate > 0:\n x, y, attn_mask, y_mask = self._augment(x, y, y_mask)\n\n x = torch.tensor(x) # type: ignore\n attn_mask = torch.tensor(attn_mask) # type: ignore\n y = torch.tensor(y) # type: ignore\n y_mask = torch.tensor(y_mask) # type: ignore\n\n return x, y, attn_mask, y_mask # type: ignore\n"
] | [
[
"torch.tensor",
"numpy.random.rand"
]
] |
ajayiagbebaku/NFL-Model | [
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed"
] | [
"venv/Lib/site-packages/streamlit/caching/hashing.py",
"venv/Lib/site-packages/pandas/tests/test_optional_dependency.py"
] | [
"# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hashing for st.memo and st.singleton.\"\"\"\nimport collections\nimport functools\nimport hashlib\nimport inspect\nimport io\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport threading\nimport unittest.mock\nimport weakref\nfrom typing import Any, Pattern, Optional, Dict, List\n\nfrom streamlit import type_util\nfrom streamlit import util\nfrom streamlit.logger import get_logger\nfrom streamlit.uploaded_file_manager import UploadedFile\nfrom .cache_errors import (\n CacheType,\n UnhashableTypeError,\n)\n\n_LOGGER = get_logger(__name__)\n\n\n# If a dataframe has more than this many rows, we consider it large and hash a sample.\n_PANDAS_ROWS_LARGE = 100000\n_PANDAS_SAMPLE_SIZE = 10000\n\n\n# Similar to dataframes, we also sample large numpy arrays.\n_NP_SIZE_LARGE = 1000000\n_NP_SAMPLE_SIZE = 100000\n\n\n# Arbitrary item to denote where we found a cycle in a hashed object.\n# This allows us to hash self-referencing lists, dictionaries, etc.\n_CYCLE_PLACEHOLDER = b\"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE\"\n\n\ndef update_hash(val: Any, hasher, cache_type: CacheType) -> None:\n \"\"\"Updates a hashlib hasher with the hash of val.\n\n This is the main entrypoint to hashing.py.\n \"\"\"\n ch = _CacheFuncHasher(cache_type)\n ch.update(hasher, val)\n\n\nclass _HashStack:\n \"\"\"Stack of what has been hashed, for debug and circular reference detection.\n\n This internally keeps 1 stack per thread.\n\n Internally, this stores the ID of pushed objects rather than the objects\n themselves because otherwise the \"in\" operator inside __contains__ would\n fail for objects that don't return a boolean for \"==\" operator. For\n example, arr == 10 where arr is a NumPy array returns another NumPy array.\n This causes the \"in\" to crash since it expects a boolean.\n \"\"\"\n\n def __init__(self):\n self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n def push(self, val: Any):\n self._stack[id(val)] = val\n\n def pop(self):\n self._stack.popitem()\n\n def __contains__(self, val: Any):\n return id(val) in self._stack\n\n\nclass _HashStacks:\n \"\"\"Stacks of what has been hashed, with at most 1 stack per thread.\"\"\"\n\n def __init__(self):\n self._stacks: weakref.WeakKeyDictionary[\n threading.Thread, _HashStack\n ] = weakref.WeakKeyDictionary()\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n @property\n def current(self) -> _HashStack:\n current_thread = threading.current_thread()\n\n stack = self._stacks.get(current_thread, None)\n\n if stack is None:\n stack = _HashStack()\n self._stacks[current_thread] = stack\n\n return stack\n\n\nhash_stacks = _HashStacks()\n\n\ndef _int_to_bytes(i: int) -> bytes:\n num_bytes = (i.bit_length() + 8) // 8\n return i.to_bytes(num_bytes, \"little\", signed=True)\n\n\ndef _key(obj: Optional[Any]) -> Any:\n \"\"\"Return key for memoization.\"\"\"\n\n if obj is None:\n return None\n\n def is_simple(obj):\n return (\n isinstance(obj, bytes)\n or isinstance(obj, bytearray)\n or isinstance(obj, str)\n or isinstance(obj, float)\n or isinstance(obj, int)\n or isinstance(obj, bool)\n or obj is None\n )\n\n if is_simple(obj):\n return obj\n\n if isinstance(obj, tuple):\n if all(map(is_simple, obj)):\n return obj\n\n if isinstance(obj, list):\n if all(map(is_simple, obj)):\n return (\"__l\", tuple(obj))\n\n if (\n type_util.is_type(obj, \"pandas.core.frame.DataFrame\")\n or type_util.is_type(obj, \"numpy.ndarray\")\n or inspect.isbuiltin(obj)\n or inspect.isroutine(obj)\n or inspect.iscode(obj)\n ):\n return id(obj)\n\n return NoResult\n\n\nclass _CacheFuncHasher:\n \"\"\"A hasher that can hash objects with cycles.\"\"\"\n\n def __init__(self, cache_type: CacheType):\n self._hashes: Dict[Any, bytes] = {}\n\n # The number of the bytes in the hash.\n self.size = 0\n\n self.cache_type = cache_type\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n def to_bytes(self, obj: Any) -> bytes:\n \"\"\"Add memoization to _to_bytes and protect against cycles in data structures.\"\"\"\n tname = type(obj).__qualname__.encode()\n key = (tname, _key(obj))\n\n # Memoize if possible.\n if key[1] is not NoResult:\n if key in self._hashes:\n return self._hashes[key]\n\n # Break recursive cycles.\n if obj in hash_stacks.current:\n return _CYCLE_PLACEHOLDER\n\n hash_stacks.current.push(obj)\n\n try:\n # Hash the input\n b = b\"%s:%s\" % (tname, self._to_bytes(obj))\n\n # Hmmm... It's possible that the size calculation is wrong. When we\n # call to_bytes inside _to_bytes things get double-counted.\n self.size += sys.getsizeof(b)\n\n if key[1] is not NoResult:\n self._hashes[key] = b\n\n finally:\n # In case an UnhashableTypeError (or other) error is thrown, clean up the\n # stack so we don't get false positives in future hashing calls\n hash_stacks.current.pop()\n\n return b\n\n def update(self, hasher, obj: Any) -> None:\n \"\"\"Update the provided hasher with the hash of an object.\"\"\"\n b = self.to_bytes(obj)\n hasher.update(b)\n\n def _to_bytes(self, obj: Any) -> bytes:\n \"\"\"Hash objects to bytes, including code with dependencies.\n\n Python's built in `hash` does not produce consistent results across\n runs.\n \"\"\"\n\n if isinstance(obj, unittest.mock.Mock):\n # Mock objects can appear to be infinitely\n # deep, so we don't try to hash them at all.\n return self.to_bytes(id(obj))\n\n elif isinstance(obj, bytes) or isinstance(obj, bytearray):\n return obj\n\n elif isinstance(obj, str):\n return obj.encode()\n\n elif isinstance(obj, float):\n return self.to_bytes(hash(obj))\n\n elif isinstance(obj, int):\n return _int_to_bytes(obj)\n\n elif isinstance(obj, (list, tuple)):\n h = hashlib.new(\"md5\")\n for item in obj:\n self.update(h, item)\n return h.digest()\n\n elif isinstance(obj, dict):\n h = hashlib.new(\"md5\")\n for item in obj.items():\n self.update(h, item)\n return h.digest()\n\n elif obj is None:\n return b\"0\"\n\n elif obj is True:\n return b\"1\"\n\n elif obj is False:\n return b\"0\"\n\n elif type_util.is_type(obj, \"pandas.core.frame.DataFrame\") or type_util.is_type(\n obj, \"pandas.core.series.Series\"\n ):\n import pandas as pd\n\n if len(obj) >= _PANDAS_ROWS_LARGE:\n obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)\n try:\n return b\"%s\" % pd.util.hash_pandas_object(obj).sum()\n except TypeError:\n # Use pickle if pandas cannot hash the object for example if\n # it contains unhashable objects.\n return b\"%s\" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n\n elif type_util.is_type(obj, \"numpy.ndarray\"):\n h = hashlib.new(\"md5\")\n self.update(h, obj.shape)\n\n if obj.size >= _NP_SIZE_LARGE:\n import numpy as np\n\n state = np.random.RandomState(0)\n obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)\n\n self.update(h, obj.tobytes())\n return h.digest()\n\n elif inspect.isbuiltin(obj):\n return bytes(obj.__name__.encode())\n\n elif type_util.is_type(obj, \"builtins.mappingproxy\") or type_util.is_type(\n obj, \"builtins.dict_items\"\n ):\n return self.to_bytes(dict(obj))\n\n elif type_util.is_type(obj, \"builtins.getset_descriptor\"):\n return bytes(obj.__qualname__.encode())\n\n elif isinstance(obj, UploadedFile):\n # UploadedFile is a BytesIO (thus IOBase) but has a name.\n # It does not have a timestamp so this must come before\n # temproary files\n h = hashlib.new(\"md5\")\n self.update(h, obj.name)\n self.update(h, obj.tell())\n self.update(h, obj.getvalue())\n return h.digest()\n\n elif hasattr(obj, \"name\") and (\n isinstance(obj, io.IOBase)\n # Handle temporary files used during testing\n or isinstance(obj, tempfile._TemporaryFileWrapper)\n ):\n # Hash files as name + last modification date + offset.\n # NB: we're using hasattr(\"name\") to differentiate between\n # on-disk and in-memory StringIO/BytesIO file representations.\n # That means that this condition must come *before* the next\n # condition, which just checks for StringIO/BytesIO.\n h = hashlib.new(\"md5\")\n obj_name = getattr(obj, \"name\", \"wonthappen\") # Just to appease MyPy.\n self.update(h, obj_name)\n self.update(h, os.path.getmtime(obj_name))\n self.update(h, obj.tell())\n return h.digest()\n\n elif isinstance(obj, Pattern):\n return self.to_bytes([obj.pattern, obj.flags])\n\n elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):\n # Hash in-memory StringIO/BytesIO by their full contents\n # and seek position.\n h = hashlib.new(\"md5\")\n self.update(h, obj.tell())\n self.update(h, obj.getvalue())\n return h.digest()\n\n elif type_util.is_type(obj, \"numpy.ufunc\"):\n # For numpy.remainder, this returns remainder.\n return bytes(obj.__name__.encode())\n\n elif inspect.ismodule(obj):\n # TODO: Figure out how to best show this kind of warning to the\n # user. In the meantime, show nothing. This scenario is too common,\n # so the current warning is quite annoying...\n # st.warning(('Streamlit does not support hashing modules. '\n # 'We did not hash `%s`.') % obj.__name__)\n # TODO: Hash more than just the name for internal modules.\n return self.to_bytes(obj.__name__)\n\n elif inspect.isclass(obj):\n # TODO: Figure out how to best show this kind of warning to the\n # user. In the meantime, show nothing. This scenario is too common,\n # (e.g. in every \"except\" statement) so the current warning is\n # quite annoying...\n # st.warning(('Streamlit does not support hashing classes. '\n # 'We did not hash `%s`.') % obj.__name__)\n # TODO: Hash more than just the name of classes.\n return self.to_bytes(obj.__name__)\n\n elif isinstance(obj, functools.partial):\n # The return value of functools.partial is not a plain function:\n # it's a callable object that remembers the original function plus\n # the values you pickled into it. So here we need to special-case it.\n h = hashlib.new(\"md5\")\n self.update(h, obj.args)\n self.update(h, obj.func)\n self.update(h, obj.keywords)\n return h.digest()\n\n else:\n # As a last resort, hash the output of the object's __reduce__ method\n h = hashlib.new(\"md5\")\n try:\n reduce_data = obj.__reduce__()\n except BaseException as e:\n raise UnhashableTypeError() from e\n\n for item in reduce_data:\n self.update(h, item)\n return h.digest()\n\n\nclass NoResult:\n \"\"\"Placeholder class for return values when None is meaningful.\"\"\"\n\n pass\n",
"import sys\nimport types\n\nimport pytest\n\nfrom pandas.compat._optional import (\n VERSIONS,\n import_optional_dependency,\n)\n\nimport pandas._testing as tm\n\n\ndef test_import_optional():\n match = \"Missing .*notapackage.* pip .* conda .* notapackage\"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency(\"notapackage\")\n\n result = import_optional_dependency(\"notapackage\", errors=\"ignore\")\n assert result is None\n\n\ndef test_xlrd_version_fallback():\n pytest.importorskip(\"xlrd\")\n import_optional_dependency(\"xlrd\")\n\n\ndef test_bad_version(monkeypatch):\n name = \"fakemodule\"\n module = types.ModuleType(name)\n module.__version__ = \"0.9.0\"\n sys.modules[name] = module\n monkeypatch.setitem(VERSIONS, name, \"1.0.0\")\n\n match = \"Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'\"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency(\"fakemodule\")\n\n # Test min_version parameter\n result = import_optional_dependency(\"fakemodule\", min_version=\"0.8\")\n assert result is module\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency(\"fakemodule\", errors=\"warn\")\n assert result is None\n\n module.__version__ = \"1.0.0\" # exact match is OK\n result = import_optional_dependency(\"fakemodule\")\n assert result is module\n\n\ndef test_submodule(monkeypatch):\n # Create a fake module with a submodule\n name = \"fakemodule\"\n module = types.ModuleType(name)\n module.__version__ = \"0.9.0\"\n sys.modules[name] = module\n sub_name = \"submodule\"\n submodule = types.ModuleType(sub_name)\n setattr(module, sub_name, submodule)\n sys.modules[f\"{name}.{sub_name}\"] = submodule\n monkeypatch.setitem(VERSIONS, name, \"1.0.0\")\n\n match = \"Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'\"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency(\"fakemodule.submodule\")\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency(\"fakemodule.submodule\", errors=\"warn\")\n assert result is None\n\n module.__version__ = \"1.0.0\" # exact match is OK\n result = import_optional_dependency(\"fakemodule.submodule\")\n assert result is submodule\n\n\ndef test_no_version_raises(monkeypatch):\n name = \"fakemodule\"\n module = types.ModuleType(name)\n sys.modules[name] = module\n monkeypatch.setitem(VERSIONS, name, \"1.0.0\")\n\n with pytest.raises(ImportError, match=\"Can't determine .* fakemodule\"):\n import_optional_dependency(name)\n"
] | [
[
"numpy.random.RandomState",
"pandas.util.hash_pandas_object"
],
[
"pandas.compat._optional.import_optional_dependency",
"pandas._testing.assert_produces_warning"
]
] |
DeVriesMatt/pointMLP-pytorch | [
"e9c09a2038551e83b072353f3fd7e3294463e892"
] | [
"classification_ModelNet40/test.py"
] | [
"\"\"\"\npython test.py --model pointMLP --msg 20220209053148-404\n\"\"\"\nimport argparse\nimport os\nimport datetime\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom torch.utils.data import DataLoader\nimport models as models\nfrom utils import progress_bar, IOStream\nfrom data import ModelNet40\nimport sklearn.metrics as metrics\nfrom helper import cal_loss\nimport numpy as np\nimport torch.nn.functional as F\n\nmodel_names = sorted(\n name for name in models.__dict__ if callable(models.__dict__[name])\n)\n\n\ndef parse_args():\n \"\"\"Parameters\"\"\"\n parser = argparse.ArgumentParser(\"training\")\n parser.add_argument(\n \"-c\",\n \"--checkpoint\",\n type=str,\n metavar=\"PATH\",\n help=\"path to save checkpoint (default: checkpoint)\",\n )\n parser.add_argument(\"--msg\", type=str, help=\"message after checkpoint\")\n parser.add_argument(\n \"--batch_size\", type=int, default=16, help=\"batch size in training\"\n )\n parser.add_argument(\n \"--model\", default=\"pointMLP\", help=\"model name [default: pointnet_cls]\"\n )\n parser.add_argument(\n \"--num_classes\",\n default=40,\n type=int,\n choices=[10, 40],\n help=\"training on ModelNet10/40\",\n )\n parser.add_argument(\"--num_points\", type=int, default=1024, help=\"Point Number\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n print(f\"args: {args}\")\n os.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"==> Using device: {device}\")\n if args.msg is None:\n message = str(datetime.datetime.now().strftime(\"-%Y%m%d%H%M%S\"))\n else:\n message = \"-\" + args.msg\n args.checkpoint = \"checkpoints/\" + args.model + message\n\n print(\"==> Preparing data..\")\n test_loader = DataLoader(\n ModelNet40(partition=\"test\", num_points=args.num_points),\n num_workers=4,\n batch_size=args.batch_size,\n shuffle=False,\n drop_last=False,\n )\n # Model\n print(\"==> Building model..\")\n net = models.__dict__[args.model]()\n criterion = cal_loss\n net = net.to(device)\n checkpoint_path = os.path.join(args.checkpoint, \"best_checkpoint.pth\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n # criterion = criterion.to(device)\n if device == \"cuda\":\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n net.load_state_dict(checkpoint[\"net\"])\n\n test_out = validate(net, test_loader, criterion, device)\n print(f\"Vanilla out: {test_out}\")\n\n\ndef validate(net, testloader, criterion, device):\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n test_true = []\n test_pred = []\n time_cost = datetime.datetime.now()\n with torch.no_grad():\n for batch_idx, (data, label) in enumerate(testloader):\n data, label = data.to(device), label.to(device).squeeze()\n data = data.permute(0, 2, 1)\n logits = net(data)\n loss = criterion(logits, label)\n test_loss += loss.item()\n preds = logits.max(dim=1)[1]\n test_true.append(label.cpu().numpy())\n test_pred.append(preds.detach().cpu().numpy())\n total += label.size(0)\n correct += preds.eq(label).sum().item()\n progress_bar(\n batch_idx,\n len(testloader),\n \"Loss: %.3f | Acc: %.3f%% (%d/%d)\"\n % (\n test_loss / (batch_idx + 1),\n 100.0 * correct / total,\n correct,\n total,\n ),\n )\n\n time_cost = int((datetime.datetime.now() - time_cost).total_seconds())\n test_true = np.concatenate(test_true)\n test_pred = np.concatenate(test_pred)\n return {\n \"loss\": float(\"%.3f\" % (test_loss / (batch_idx + 1))),\n \"acc\": float(\"%.3f\" % (100.0 * metrics.accuracy_score(test_true, test_pred))),\n \"acc_avg\": float(\n \"%.3f\" % (100.0 * metrics.balanced_accuracy_score(test_true, test_pred))\n ),\n \"time\": time_cost,\n }\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.metrics.balanced_accuracy_score",
"torch.no_grad",
"torch.nn.DataParallel",
"sklearn.metrics.accuracy_score",
"torch.cuda.is_available",
"numpy.concatenate",
"torch.device"
]
] |
roy881020/VSGNet | [
"a9ba741871d1d7ff401cecf23659f0b75576e7c3"
] | [
"scripts_hico/HICO_eval/bbox_utils.py"
] | [
"import numpy as np\n#import skimage.draw as skdraw\n\n\ndef add_bbox(img,bbox,color=[0,0,0],fill=False,alpha=1):\n x1,y1,x2,y2 = bbox\n \n # Clockwise starting from top left\n r = [y1,y1,y2,y2]\n c = [x1,x2,x2,x1]\n \n if fill:\n coords = skdraw.polygon(r,c,shape=img.shape[0:2])\n skdraw.set_color(img,coords,color,alpha=alpha)\n return\n\n peri_coords = skdraw.polygon_perimeter(r,c,shape=img.shape[0:2])\n skdraw.set_color(img,peri_coords,color,alpha=alpha)\n\n\ndef compute_area(bbox,invalid=None):\n x1,y1,x2,y2 = bbox\n\n if (x2 <= x1) or (y2 <= y1):\n area = invalid\n else:\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n return area\n\n\ndef compute_iou(bbox1,bbox2,verbose=False):\n x1,y1,x2,y2 = bbox1\n x1_,y1_,x2_,y2_ = bbox2\n \n x1_in = max(x1,x1_)\n y1_in = max(y1,y1_)\n x2_in = min(x2,x2_)\n y2_in = min(y2,y2_)\n\n intersection = compute_area(bbox=[x1_in,y1_in,x2_in,y2_in],invalid=0.0)\n area1 = compute_area(bbox1)\n area2 = compute_area(bbox2)\n union = area1 + area2 - intersection\n iou = intersection / (union + 1e-6)\n\n if verbose:\n return iou, intersection, union\n\n return iou \n\n\ndef compute_area_batch(bbox):\n x1,y1,x2,y2 = [bbox[:,i] for i in range(4)]\n area = np.zeros(x1.shape[0])\n valid_mask = np.logical_and(x2 > x1, y2 > y1)\n area_ = (x2 - x1 + 1) * (y2 - y1 + 1)\n area[valid_mask] = area_[valid_mask]\n return area\n\n\ndef compute_iou_batch(bbox1,bbox2,verbose=False):\n x1,y1,x2,y2 = [bbox1[:,i] for i in range(4)]\n x1_,y1_,x2_,y2_ = [bbox2[:,i] for i in range(4)]\n \n x1_in = np.maximum(x1,x1_)\n y1_in = np.maximum(y1,y1_)\n x2_in = np.minimum(x2,x2_)\n y2_in = np.minimum(y2,y2_)\n \n intersection_bbox = np.stack((x1_in,y1_in,x2_in,y2_in),1)\n intersection = compute_area_batch(bbox=intersection_bbox)\n \n area1 = compute_area_batch(bbox1)\n area2 = compute_area_batch(bbox2)\n union = area1 + area2 - intersection\n iou = intersection / (union + 1e-6)\n \n if verbose:\n return iou, intersection, union\n\n return iou \n \n\ndef vis_bbox(bbox,img,color=(0,0,0),modify=False):\n im_h,im_w = img.shape[0:2]\n x1,y1,x2,y2 = bbox\n x1 = max(0,min(x1,im_w-1))\n x2 = max(x1,min(x2,im_w-1))\n y1 = max(0,min(y1,im_h-1))\n y2 = max(y1,min(y2,im_h-1))\n r = [y1,y1,y2,y2]\n c = [x1,x2,x2,x1]\n\n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n rr,cc = skdraw.polygon(r,c,img.shape[:2])\n skdraw.set_color(img_,(rr,cc),color,alpha=0.2)\n\n rr,cc = skdraw.polygon_perimeter(r,c,img.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = color[k]\n\n return img_\n\n\ndef vis_bboxes(bboxes,img,color=(0,0,0),modify=False):\n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n for bbox in bboxes:\n img_ = vis_bbox(bbox,img_,color,True)\n\n return img_\n\n\ndef join_bboxes_by_line(bbox1,bbox2,img,color=(255,0,255),modify=False):\n im_h,im_w = img.shape[0:2]\n x1,y1,x2,y2 = bbox1\n x1_,y1_,x2_,y2_ = bbox2\n\n c0 = 0.5*(x1+x2)\n r0 = 0.5*(y1+y2)\n c1 = 0.5*(x1_+x2_)\n r1 = 0.5*(y1_+y2_)\n r0,c0,r1,c1 = [int(x) for x in [r0,c0,r1,c1]]\n c0 = max(0,min(c0,im_w-1))\n c1 = max(0,min(c1,im_w-1))\n r0 = max(0,min(r0,im_h-1))\n r1 = max(0,min(r1,im_h-1))\n rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)\n \n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n for k in range(3):\n img_[rr,cc,k] = val*color[k]\n\n rr,cc = skdraw.circle(r0,c0,4,img_.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = color[k]\n\n rr,cc = skdraw.circle(r1,c1,4,img_.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = color[k]\n\n return img_\n\n\ndef vis_sub_obj_bboxes(\n sub_bboxes,\n obj_bboxes,\n img,\n sub_color=(0,0,255),\n obj_color=(255,0,0),\n modify=False):\n\n img_ = vis_bboxes(sub_bboxes,img,sub_color,modify)\n img_ = vis_bboxes(obj_bboxes,img_,obj_color,modify=True)\n \n for sub_bbox,obj_bbox in zip(sub_bboxes,obj_bboxes):\n img_ = join_bboxes_by_line(sub_bbox,obj_bbox,img_,modify=True)\n\n return img_\n\n\ndef vis_human_keypts(\n img,\n keypts,\n radius=2,\n pt_color=(0,255,255),\n line_color=(0,255,255),\n modify=False):\n LINKS = [\n (0,1),\n (1,2),\n (2,3),\n (3,4),\n (1,5),\n (5,6),\n (6,7),\n (0,15),\n (15,17),\n (0,14),\n (14,16),\n (1,8),\n (8,9),\n (9,10),\n (1,11),\n (11,12),\n (12,13),\n (8,11)\n ]\n\n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n h,w = img.shape[:2]\n\n for i,j in LINKS:\n c0,r0,conf0 = keypts[i]\n c1,r1,conf1 = keypts[j]\n r0,r1 = [max(0,min(h-1,int(v))) for v in [r0,r1]]\n c0,c1 = [max(0,min(w-1,int(v))) for v in [c0,c1]]\n if conf0 > 0 and conf1 > 0:\n rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)\n for k in range(3):\n img_[rr,cc,k] = val*line_color[k]\n\n num_keypts = keypts.shape[0]\n for i in range(num_keypts):\n c,r,conf = keypts[i]\n if conf==0.0:\n continue\n \n rr,cc = skdraw.circle(r,c,radius,img_.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = pt_color[k]\n\n return img_\n\n\n"
] | [
[
"numpy.zeros",
"numpy.stack",
"numpy.logical_and",
"numpy.copy",
"numpy.maximum",
"numpy.minimum"
]
] |
PacktPublishing/Python-Machine-Learning-Solutions-V- | [
"130c9881757fa90bbb124d48ddd0c6c1136fa20c"
] | [
"Section_07_code/speech_recognizer.py"
] | [
"import os\nimport argparse\nimport warnings\nimport numpy as np\nfrom scipy.io import wavfile\nfrom hmmlearn import hmm\nfrom python_speech_features import mfcc\n\n# Function to parse input arguments\ndef build_arg_parser():\n parser = argparse.ArgumentParser(description='Trains the HMM classifier')\n parser.add_argument(\"--input-folder\", dest=\"input_folder\", required=True,\n help=\"Input folder containing the audio files in subfolders\")\n return parser\n\n\n# Class to handle all HMM related processing\nclass HMMTrainer(object):\n def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):\n self.model_name = model_name\n self.n_components = n_components\n self.cov_type = cov_type\n self.n_iter = n_iter\n self.models = []\n\n if self.model_name == 'GaussianHMM':\n self.model = hmm.GaussianHMM(n_components=self.n_components,\n covariance_type=self.cov_type, n_iter=self.n_iter)\n else:\n raise TypeError('Invalid model type')\n\n # X is a 2D numpy array where each row is 13D\n def train(self, X):\n np.seterr(all='ignore')\n self.models.append(self.model.fit(X))\n\n # Run the model on input data\n def get_score(self, input_data):\n return self.model.score(input_data)\n\nif __name__=='__main__':\n args = build_arg_parser().parse_args()\n input_folder = args.input_folder\n\n hmm_models = []\n\n # Parse the input directory\n for dirname in os.listdir(input_folder):\n # Get the name of the subfolder\n subfolder = os.path.join(input_folder, dirname)\n\n if not os.path.isdir(subfolder):\n continue\n\n # Extract the label\n label = subfolder[subfolder.rfind('/') + 1:]\n\n # Initialize variables\n X = np.array([])\n y_words = []\n warnings.filterwarnings(\"ignore\")\n # Iterate through the audio files (leaving 1 file for testing in each class)\n for filename in [x for x in os.listdir(subfolder) if x.endswith('.wav')][:-1]:\n # Read the input file\n filepath = os.path.join(subfolder, filename)\n sampling_freq, audio = wavfile.read(filepath)\n\n # Extract MFCC features\n mfcc_features = mfcc(audio, sampling_freq)\n\n # Append to the variable X\n if len(X) == 0:\n X = mfcc_features\n else:\n X = np.append(X, mfcc_features, axis=0)\n\n # Append the label\n y_words.append(label)\n\n #print('X.shape =', X.shape)\n # Train and save HMM model\n hmm_trainer = HMMTrainer()\n hmm_trainer.train(X)\n hmm_models.append((hmm_trainer, label))\n hmm_trainer = None\n\n # Test files\n input_files = [\n 'data/pineapple/pineapple15.wav',\n 'data/orange/orange15.wav',\n 'data/apple/apple15.wav',\n 'data/kiwi/kiwi15.wav'\n ]\n\n # Classify input data\n for input_file in input_files:\n # Read input file\n sampling_freq, audio = wavfile.read(input_file)\n\n # Extract MFCC features\n mfcc_features = mfcc(audio, sampling_freq)\n\n # Define variables\n max_score = [float(\"-inf\")]\n output_label = [float(\"-inf\")]\n\n # Iterate through all HMM models and pick\n # the one with the highest score\n for item in hmm_models:\n hmm_model, label = item\n score = hmm_model.get_score(mfcc_features)\n if score > max_score:\n max_score = score\n output_label = label\n\n # Print the output\n print( \"\\nTrue:\", input_file[input_file.find('/')+1:input_file.rfind('/')])\n print(\"Predicted:\", output_label)\n warnings.filterwarnings(\"ignore\")\n"
] | [
[
"numpy.seterr",
"numpy.append",
"numpy.array",
"scipy.io.wavfile.read"
]
] |
baklanovp/pystella | [
"f6f44ed12d9648585a52a09e15d494daa4c70c59"
] | [
"tests/test_reader_table.py"
] | [
"# coding=utf-8\nimport numpy as np\nimport unittest\n\nimport pystella as ps\n# from pystella.rf import band\n# from pystella.rf.lc import LightCurve\n# from pystella.util.reader_table import read_table_header_float, table2curves, read_obs_table_header, curves2table\n\n__author__ = 'bakl'\n\n\ndef lc_create(b, m=-19, dt=0.):\n n = 10\n time = np.linspace(0. + dt, 200. + dt, n)\n mags = m * np.ones(n)\n return ps.LightCurve(b, time, mags)\n\n\nclass TestReaderTable(unittest.TestCase):\n def test_read_table_header_float(self):\n fname = 'data/stella/cat_R500_M15_Ni006_E12.gri'\n data = ps.util.read_table_header_float(fname)\n cols = len(data.dtype.names)\n self.assertTrue(cols == 15,\n msg=\"The number of colums in the data should be 15, but it's : %d.\" % cols)\n\n def test_read_table_header_float_skiprows(self):\n fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'\n data = ps.util.read_table_header_float(fname, skip=87)\n cols = len(data.dtype.names)\n self.assertTrue(cols == 14,\n msg=\"The number of colums in [%s] should be 14, but it's : %d.\" % (fname, cols))\n\n def test_table2curves_no_bands(self):\n ps.Band.load_settings()\n fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'\n data = ps.util.read_table_header_float(fname, skip=87)\n data.dtype.names = [col.replace('M', '') for col in data.dtype.names]\n curves = ps.table2curves('test', data)\n for bname in curves.BandNames:\n self.assertTrue(bname in data.dtype.names,\n msg=\"No band %s in [%s] after table2curves.\" % (bname, ''.join(data.dtype.names)))\n\n def test_curves2table(self):\n ps.Band.load_settings()\n fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'\n data = ps.util.read_table_header_float(fname, skip=87)\n data.dtype.names = [col.replace('M', '') for col in data.dtype.names]\n curves = ps.table2curves('test', data, is_filter_zero=False)\n tbl = ps.curves2table(curves)\n self.assertCountEqual(curves.Length, len(tbl.names))\n\n def test_read_obs_table_header(self):\n fname = 'data/obs/1999em-uphHamuy.dat'\n tbl, cols_data = ps.util.read_obs_table_header(fname, is_out=True)\n for c in ('JD', 'V'):\n self.assertTrue(c in tbl.dtype.names,\n msg=\"No band %s in [%s] after read_obs_table_header.\" % (c, ','.join(tbl.dtype.names)))\n"
] | [
[
"numpy.ones",
"numpy.linspace"
]
] |
jreback/ibis | [
"fdcca59b085416b1311eb268be3886abad1db230"
] | [
"ibis/backends/clickhouse/tests/test_functions.py"
] | [
"import math\nimport operator\nfrom datetime import date, datetime\nfrom operator import methodcaller\n\nimport pandas as pd\nimport pandas.testing as tm\nimport pytest\nfrom pytest import param\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.types as ir\nfrom ibis import literal as L\n\nclickhouse_driver = pytest.importorskip('clickhouse_driver')\npytestmark = pytest.mark.clickhouse\n\n\[email protected](\n ('to_type', 'expected'),\n [\n ('int8', 'CAST(`double_col` AS Int8)'),\n ('int16', 'CAST(`double_col` AS Int16)'),\n ('float', 'CAST(`double_col` AS Float32)'),\n # alltypes.double_col is non-nullable\n (dt.Double(nullable=False), '`double_col`'),\n ],\n)\ndef test_cast_double_col(alltypes, translate, to_type, expected):\n expr = alltypes.double_col.cast(to_type)\n assert translate(expr) == expected\n\n\[email protected](\n ('to_type', 'expected'),\n [\n ('int8', 'CAST(`string_col` AS Int8)'),\n ('int16', 'CAST(`string_col` AS Int16)'),\n (dt.String(nullable=False), '`string_col`'),\n ('timestamp', 'CAST(`string_col` AS DateTime)'),\n ('date', 'CAST(`string_col` AS Date)'),\n ],\n)\ndef test_cast_string_col(alltypes, translate, to_type, expected):\n expr = alltypes.string_col.cast(to_type)\n assert translate(expr) == expected\n\n\[email protected](\n raises=AssertionError, reason='Clickhouse doesn\\'t have decimal type'\n)\ndef test_decimal_cast():\n assert False\n\n\[email protected](\n 'column',\n [\n 'index',\n 'Unnamed: 0',\n 'id',\n 'bool_col',\n 'tinyint_col',\n 'smallint_col',\n 'int_col',\n 'bigint_col',\n 'float_col',\n 'double_col',\n 'date_string_col',\n 'string_col',\n 'timestamp_col',\n 'year',\n 'month',\n ],\n)\ndef test_noop_cast(alltypes, translate, column):\n col = alltypes[column]\n result = col.cast(col.type())\n assert result.equals(col)\n assert translate(result) == '`{}`'.format(column)\n\n\ndef test_timestamp_cast_noop(alltypes, translate):\n target = dt.Timestamp(nullable=False)\n result1 = alltypes.timestamp_col.cast(target)\n result2 = alltypes.int_col.cast(target)\n\n assert isinstance(result1, ir.TimestampColumn)\n assert isinstance(result2, ir.TimestampColumn)\n\n assert translate(result1) == '`timestamp_col`'\n assert translate(result2) == 'CAST(`int_col` AS DateTime)'\n\n\ndef test_timestamp_now(con, translate):\n expr = ibis.now()\n # now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n assert translate(expr) == 'now()'\n # assert con.execute(expr) == now\n\n\[email protected](\n ('unit', 'expected'),\n [\n ('y', '2009-01-01'),\n param('m', '2009-05-01', marks=pytest.mark.xfail),\n ('d', '2009-05-17'),\n ('w', '2009-05-11'),\n ('h', '2009-05-17 12:00:00'),\n ('minute', '2009-05-17 12:34:00'),\n ],\n)\ndef test_timestamp_truncate(con, translate, unit, expected):\n stamp = ibis.timestamp('2009-05-17 12:34:56')\n expr = stamp.truncate(unit)\n assert con.execute(expr) == pd.Timestamp(expected)\n\n\[email protected](\n ('func', 'expected'),\n [\n (methodcaller('year'), 2015),\n (methodcaller('month'), 9),\n (methodcaller('day'), 1),\n (methodcaller('hour'), 14),\n (methodcaller('minute'), 48),\n (methodcaller('second'), 5),\n ],\n)\ndef test_simple_datetime_operations(con, func, expected):\n value = ibis.timestamp('2015-09-01 14:48:05.359')\n with pytest.raises(ValueError):\n con.execute(func(value))\n\n value = ibis.timestamp('2015-09-01 14:48:05')\n con.execute(func(value)) == expected\n\n\[email protected](('value', 'expected'), [(0, None), (5.5, 5.5)])\ndef test_nullifzero(con, value, expected):\n result = con.execute(L(value).nullifzero())\n if expected is None:\n assert pd.isnull(result)\n else:\n assert result == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L(None).isnull(), True),\n (L(1).isnull(), False),\n (L(None).notnull(), False),\n (L(1).notnull(), True),\n ],\n)\ndef test_isnull_notnull(con, expr, expected):\n assert con.execute(expr) == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (ibis.coalesce(5, None, 4), 5),\n (ibis.coalesce(ibis.NA, 4, ibis.NA), 4),\n (ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),\n ],\n)\ndef test_coalesce(con, expr, expected):\n assert con.execute(expr) == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (ibis.NA.fillna(5), 5),\n (L(5).fillna(10), 5),\n (L(5).nullif(5), None),\n (L(10).nullif(5), 10),\n ],\n)\ndef test_fillna_nullif(con, expr, expected):\n result = con.execute(expr)\n if expected is None:\n assert pd.isnull(result)\n else:\n assert result == expected\n\n\[email protected](\n ('value', 'expected'),\n [\n (L('foo_bar'), 'String'),\n (L(5), 'UInt8'),\n (L(1.2345), 'Float64'),\n (L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),\n (L(date(2015, 9, 1)), 'Date'),\n param(\n ibis.NA,\n 'Null',\n marks=pytest.mark.xfail(\n raises=AssertionError,\n reason=(\n 'Client/server version mismatch not handled in the '\n 'clickhouse driver'\n ),\n ),\n ),\n ],\n)\ndef test_typeof(con, value, expected):\n assert con.execute(value.typeof()) == expected\n\n\[email protected](('value', 'expected'), [('foo_bar', 7), ('', 0)])\ndef test_string_length(con, value, expected):\n assert con.execute(L(value).length()) == expected\n\n\[email protected](\n ('op', 'expected'),\n [\n (methodcaller('substr', 0, 3), 'foo'),\n (methodcaller('substr', 4, 3), 'bar'),\n (methodcaller('substr', 1), 'oo_bar'),\n ],\n)\ndef test_string_substring(con, op, expected):\n value = L('foo_bar')\n assert con.execute(op(value)) == expected\n\n\ndef test_string_column_substring(con, alltypes, translate):\n expr = alltypes.string_col.substr(2)\n assert translate(expr) == 'substring(`string_col`, 2 + 1)'\n assert len(con.execute(expr))\n\n expr = alltypes.string_col.substr(0, 3)\n assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'\n assert len(con.execute(expr))\n\n\ndef test_string_reverse(con):\n assert con.execute(L('foo').reverse()) == 'oof'\n\n\ndef test_string_upper(con):\n assert con.execute(L('foo').upper()) == 'FOO'\n\n\ndef test_string_lower(con):\n assert con.execute(L('FOO').lower()) == 'foo'\n\n\ndef test_string_lenght(con):\n assert con.execute(L('FOO').length()) == 3\n\n\[email protected](\n ('value', 'op', 'expected'),\n [\n (L('foobar'), methodcaller('contains', 'bar'), True),\n (L('foobar'), methodcaller('contains', 'foo'), True),\n (L('foobar'), methodcaller('contains', 'baz'), False),\n (L('100%'), methodcaller('contains', '%'), True),\n (L('a_b_c'), methodcaller('contains', '_'), True),\n ],\n)\ndef test_string_contains(con, op, value, expected):\n assert con.execute(op(value)) == expected\n\n\n# TODO: clickhouse-driver escaping bug\ndef test_re_replace(con, translate):\n expr1 = L('Hello, World!').re_replace('.', '\\\\\\\\0\\\\\\\\0')\n expr2 = L('Hello, World!').re_replace('^', 'here: ')\n\n assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'\n assert con.execute(expr2) == 'here: Hello, World!'\n\n\[email protected](\n ('value', 'expected'),\n [(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?\n)\ndef test_find_in_set(con, value, expected, translate):\n vals = list('abc')\n expr = value.find_in_set(vals)\n assert con.execute(expr) == expected\n\n\ndef test_string_column_find_in_set(con, alltypes, translate):\n s = alltypes.string_col\n vals = list('abc')\n\n expr = s.find_in_set(vals)\n assert translate(expr) == \"indexOf(['a','b','c'], `string_col`) - 1\"\n assert len(con.execute(expr))\n\n\[email protected](\n ('url', 'extract', 'expected'),\n [\n (L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),\n (L('https://www.cloudera.com'), 'PROTOCOL', 'https'),\n (\n L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),\n 'PATH',\n '/watch',\n ),\n (\n L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),\n 'QUERY',\n 'v=kEuEcWfewf8&t=10',\n ),\n ],\n)\ndef test_parse_url(con, translate, url, extract, expected):\n expr = url.parse_url(extract)\n assert con.execute(expr) == expected\n\n\ndef test_parse_url_query_parameter(con, translate):\n url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')\n expr = url.parse_url('QUERY', 't')\n assert con.execute(expr) == '10'\n\n expr = url.parse_url('QUERY', 'v')\n assert con.execute(expr) == 'kEuEcWfewf8'\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L('foobar').find('bar'), 3),\n (L('foobar').find('baz'), -1),\n (L('foobar').like('%bar'), True),\n (L('foobar').like('foo%'), True),\n (L('foobar').like('%baz%'), False),\n (L('foobar').like(['%bar']), True),\n (L('foobar').like(['foo%']), True),\n (L('foobar').like(['%baz%']), False),\n (L('foobar').like(['%bar', 'foo%']), True),\n (L('foobarfoo').replace('foo', 'H'), 'HbarH'),\n ],\n)\ndef test_string_find_like(con, expr, expected):\n assert con.execute(expr) == expected\n\n\ndef test_string_column_like(con, alltypes, translate):\n expr = alltypes.string_col.like('foo%')\n assert translate(expr) == \"`string_col` LIKE 'foo%'\"\n assert len(con.execute(expr))\n\n expr = alltypes.string_col.like(['foo%', '%bar'])\n expected = \"`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'\"\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\ndef test_string_column_find(con, alltypes, translate):\n s = alltypes.string_col\n\n expr = s.find('a')\n assert translate(expr) == \"position(`string_col`, 'a') - 1\"\n assert len(con.execute(expr))\n\n expr = s.find(s)\n assert translate(expr) == \"position(`string_col`, `string_col`) - 1\"\n assert len(con.execute(expr))\n\n\[email protected](\n ('call', 'expected'),\n [\n (methodcaller('log'), 'log(`double_col`)'),\n (methodcaller('log2'), 'log2(`double_col`)'),\n (methodcaller('log10'), 'log10(`double_col`)'),\n (methodcaller('round'), 'round(`double_col`)'),\n (methodcaller('round', 0), 'round(`double_col`, 0)'),\n (methodcaller('round', 2), 'round(`double_col`, 2)'),\n (methodcaller('exp'), 'exp(`double_col`)'),\n (methodcaller('abs'), 'abs(`double_col`)'),\n (methodcaller('ceil'), 'ceil(`double_col`)'),\n (methodcaller('floor'), 'floor(`double_col`)'),\n (methodcaller('sqrt'), 'sqrt(`double_col`)'),\n (\n methodcaller('sign'),\n 'intDivOrZero(`double_col`, abs(`double_col`))',\n ),\n ],\n)\ndef test_translate_math_functions(con, alltypes, translate, call, expected):\n expr = call(alltypes.double_col)\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L(-5).abs(), 5),\n (L(5).abs(), 5),\n (L(5.5).round(), 6.0),\n (L(5.556).round(2), 5.56),\n (L(5.556).ceil(), 6.0),\n (L(5.556).floor(), 5.0),\n (L(5.556).exp(), math.exp(5.556)),\n (L(5.556).sign(), 1),\n (L(-5.556).sign(), -1),\n (L(0).sign(), 0),\n (L(5.556).sqrt(), math.sqrt(5.556)),\n (L(5.556).log(2), math.log(5.556, 2)),\n (L(5.556).ln(), math.log(5.556)),\n (L(5.556).log2(), math.log(5.556, 2)),\n (L(5.556).log10(), math.log10(5.556)),\n ],\n)\ndef test_math_functions(con, expr, expected, translate):\n assert con.execute(expr) == expected\n\n\ndef test_greatest(con, alltypes, translate):\n expr = ibis.greatest(alltypes.int_col, 10)\n\n assert translate(expr) == \"greatest(`int_col`, 10)\"\n assert len(con.execute(expr))\n\n expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)\n assert translate(expr) == \"greatest(`int_col`, `bigint_col`)\"\n assert len(con.execute(expr))\n\n\ndef test_least(con, alltypes, translate):\n expr = ibis.least(alltypes.int_col, 10)\n assert translate(expr) == \"least(`int_col`, 10)\"\n assert len(con.execute(expr))\n\n expr = ibis.least(alltypes.int_col, alltypes.bigint_col)\n assert translate(expr) == \"least(`int_col`, `bigint_col`)\"\n assert len(con.execute(expr))\n\n\n# TODO: clickhouse-driver escaping bug\[email protected](\n ('expr', 'expected'),\n [\n (L('abcd').re_search('[a-z]'), True),\n (L('abcd').re_search(r'[\\\\d]+'), False),\n (L('1222').re_search(r'[\\\\d]+'), True),\n ],\n)\ndef test_regexp(con, expr, expected):\n assert con.execute(expr) == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L('abcd').re_extract('([a-z]+)', 0), 'abcd'),\n # (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),\n # valid group number but no match => empty string\n (L('abcd').re_extract(r'(\\\\d)', 0), ''),\n # match but not a valid group number => NULL\n # (L('abcd').re_extract('abcd', 3), None),\n ],\n)\ndef test_regexp_extract(con, expr, expected, translate):\n assert con.execute(expr) == expected\n\n\ndef test_column_regexp_extract(con, alltypes, translate):\n expected = r\"extractAll(`string_col`, '[\\d]+')[3 + 1]\"\n\n expr = alltypes.string_col.re_extract(r'[\\d]+', 3)\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\ndef test_column_regexp_replace(con, alltypes, translate):\n expected = r\"replaceRegexpAll(`string_col`, '[\\d]+', 'aaa')\"\n\n expr = alltypes.string_col.re_replace(r'[\\d]+', 'aaa')\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\ndef test_numeric_builtins_work(con, alltypes, df, translate):\n expr = alltypes.double_col\n result = expr.execute()\n expected = df.double_col.fillna(0)\n tm.assert_series_equal(result, expected)\n\n\ndef test_null_column(alltypes, translate):\n t = alltypes\n nrows = t.count().execute()\n expr = t.mutate(na_column=ibis.NA).na_column\n result = expr.execute()\n expected = pd.Series([None] * nrows, name='na_column')\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n ('attr', 'expected'),\n [\n (operator.methodcaller('year'), {2009, 2010}),\n (operator.methodcaller('month'), set(range(1, 13))),\n (operator.methodcaller('day'), set(range(1, 32))),\n ],\n)\ndef test_date_extract_field(db, alltypes, attr, expected):\n t = alltypes\n expr = attr(t.timestamp_col.cast('date')).distinct()\n result = expr.execute().astype(int)\n assert set(result) == expected\n\n\ndef test_timestamp_from_integer(con, alltypes, translate):\n # timestamp_col has datetime type\n expr = alltypes.int_col.to_timestamp()\n assert translate(expr) == 'toDateTime(`int_col`)'\n assert len(con.execute(expr))\n\n\ndef test_count_distinct_with_filter(alltypes):\n expr = alltypes.string_col.nunique(\n where=alltypes.string_col.cast('int64') > 1\n )\n result = expr.execute()\n expected = alltypes.string_col.execute()\n expected = expected[expected.astype('int64') > 1].nunique()\n assert result == expected\n"
] | [
[
"pandas.testing.assert_series_equal",
"pandas.Series",
"pandas.Timestamp",
"pandas.isnull"
]
] |
HugoPfister/Pyrats | [
"fc2cab0d1e14b8dd19b3eba361d47f053187ab47"
] | [
"pyrats/halos.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Module to deal with halos, to be used with HaloMaker.\n\nThis module is heavily inspired by the set of IDL routines originally\nfound in the Ramses Analysis ToolSuite (RATS).\n\nTODO: Some more documentation\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport yt\nfrom yt.utilities.logger import ytLogger as mylog\nimport yt.utilities.fortran_utils as fpu\nfrom yt.funcs import get_pbar\nimport os\nimport pandas as pd\n\n\nclass HaloList(object):\n def __init__(self, ds, folder='.', contam=False):\n \"\"\"\n PandaList with halos and their properties\n \"\"\"\n\n self.folder = folder\n self.iout = int(str(ds).split('_')[1])\n if os.path.exists(\n '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(\n s=self)):\n self.halos = pd.read_hdf(\n '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(\n s=self))\n else:\n self.halos = self._read_halos(data_set=ds, with_contam_option=contam)\n if self.halos.index.size > 0:\n self.halos.to_hdf(\n '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(\n s=self), 'hdf')\n self.ds = ds\n\n self.halos['bhid'] = -1 ; self.halos['galID'] = -1\n self.halos['mgal'] = 0 ; self.halos['msink'] = 0\n # read purity of halos\n self.halos['pollution'] = 0\n contam_file_path = '{s.folder}/Halos/{s.iout}/contam_halos{s.iout:03d}'.format(\n s=self)\n if os.path.exists(contam_file_path):\n p = np.loadtxt(contam_file_path)\n if len(p) > 0:\n p = p.T\n self.halos.loc[p[0], 'pollution'] = p[1]/p[2]\n\n def get_halo(self, hid, fname=None):\n\n halo = self.halos.loc[hid]\n scale_mpc = float(self.ds.length_unit.in_units('Mpc'))\n\n halostr = (\"Halo {hid:.0f} (level {h.level:.0f}):\\n\"\n \"\\tContains {h.nbpart:.0f} particles and {h.nbsub:.0f} subhalo(s)\\n\"\n \"\\tCenter:\\t\\t ({h.x}, {h.y}, {h.z}) box units\\n\"\n \"\\tVelocity:\\t ({h.vx}, {h.vy}, {h.vz}) km/s\\n\"\n \"\\tL:\\t\\t ({h.Lx}, {h.Ly}, {h.Lz}) ToCheck\\n\"\n \"\\tMass:\\t\\t {h.m:.3e} Msun\\n\"\n \"\\tMvir:\\t\\t {h.mvir:.3e} Msun\\n\"\n \"\\tRadius:\\t\\t {h.r:.3e} Mpc ({rcodeunits:.3e} box units)\\n\"\n \"\\tRvir:\\t\\t {h.rvir:.3e} Mpc ({rvcodeunits:.3e} box units)\\n\"\n \"\\tTvir:\\t\\t {h.tvir:.3e} K\".format(hid=hid,\n h=halo,\n rcodeunits=halo.r / scale_mpc,\n rvcodeunits=halo.rvir / scale_mpc))\n\n if fname is not None:\n with open(fname, 'w') as f:\n f.write(halostr)\n\n return halostr\n\n def get_halo_sphere(self, hid, rvir_factor=5):\n halo_spheres = getattr(self, '_halo_spheres', {})\n if (hid, rvir_factor) in halo_spheres:\n return halo_spheres[hid, rvir_factor]\n\n tmp = self.halos.loc[hid, ['x', 'y', 'z', 'rvir', 'vx', 'vy', 'vz']]\\\n .values\n center = self.ds.arr(tmp[:3], 'code_length')\n radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')\n vel = self.ds.arr(tmp[4:7], 'km/s')\n\n # Get a sphere centered on the halo\n sphere = self.ds.sphere(center, radius)\n sphere.set_field_parameter('bulk_velocity', vel)\n\n halo_spheres[(hid, rvir_factor)] = sphere\n self._halo_spheres = halo_spheres\n\n return sphere\n\n def plot_halo(self, hid, rvir_factor=5, field=('deposit', 'all_density'), folder='./',\n weight_field=('index', 'ones'), cmap='viridis', slice=False,\n axis='z', **kwargs):\n '''Plot a given halo.\n\n Parameters\n ----------\n * hid, integer\n The halo id to plot\n * rvir_factor, float, default=5\n Size of the region to plot in unit of Rvir\n\n * field, tuple\n The yt field to plot\n * folder, string\n The folder where to save the data\n * weight_field, tuple\n The field to weight the projection by.\n * cmap, string\n The colormap to use\n * slice, boolean\n If true, do a slice plot instead of a projection plot\n * axis, 'x', 'y' or 'z'\n The axis to project onto\n '''\n for k, v in kwargs.items():\n print('%s: %s not supported' % (k, v))\n\n if hid not in self.halos.index:\n mylog.error('%s not found.' % hid)\n return\n\n # Get position\n tmp = np.array(self.halos.loc[hid, ['x', 'y', 'z', 'rvir']])\n center = self.ds.arr(tmp[:3], 'code_length')\n radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')\n\n # Get a sphere centered on the halo\n sphere = self.ds.sphere(center, radius)\n\n # Make a projection plot\n p = yt.ProjectionPlot(self.ds, axis, field, data_source=sphere,\n weight_field=weight_field)\n\n p.set_cmap(field=field, cmap=cmap)\n p.annotate_timestamp(corner='upper_left', time=True, redshift=True)\n p.annotate_scale(corner='upper_right')\n\n # TODO: annotate halos\n # TODO: better name\n p.save(folder)\n\n # Accessors\n def __getitem__(self, item):\n if str(item) in self.halos:\n return self.halos[item]\n else:\n return self.halos.ix[item]\n\n # def __getattr__(self, name):\n # return self.halos.__getattr__(name) # self.halos[name]\n\n def __len__(self):\n return len(self.halos)\n\n def __iter__(self):\n return self.halos.iterrows()\n\n # Printing functions\n def __str__(self):\n return self.halos.__str__()\n\n # Convenience functions\n def _read_halos(self, data_set, with_contam_option=False):\n halo_keys = ('ID', 'nbpart', 'level', 'min_part_id',\n 'host', 'hostsub', 'nbsub', 'nextsub',\n 'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',\n 'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',\n 'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel')\n filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(\n s=self)\n\n data = np.empty(shape=(0, len(halo_keys)), dtype=object)\n yt.funcs.mylog.debug('Reading halo catalog %s (ds=%s)' % (filename, data_set))\n offsets = {}\n if os.path.exists(filename):\n with open(filename, 'rb') as f:\n [npart] = fpu.read_vector(f, 'i')\n [massp] = fpu.read_vector(f, 'f')\n [aexp] = fpu.read_vector(f, 'f')\n [omega_t] = fpu.read_vector(f, 'f')\n [age] = fpu.read_vector(f, 'f')\n [nhalos, nsubs] = fpu.read_vector(f, 'i')\n\n # Save the age/aexp, the mass of the particle,\n # as well as the number of (sub)halos\n self.nhalos = nhalos\n self.nsubs = nsubs\n self.aexp = aexp\n self.age = age\n self.massp = massp\n data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)\n\n mylog.info('Brick: halos : %s' % nhalos)\n mylog.info('Brick: sub halos : %s' % nsubs)\n mylog.info('Brick: aexp : %s' % aexp)\n\n #pbar = get_pbar('', nhalos+nsubs)\n\n for ihalo in range(nhalos + nsubs):\n pos = f.tell()\n [nbpart] = fpu.read_vector(f, 'i') # Number of particles\n listp = fpu.read_vector(f, 'i') # List of the particles IDs\n [ID] = fpu.read_vector(f, 'i') # Halo ID\n fpu.skip(f, 1) # Skip timestep\n [level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')\n [m] = fpu.read_vector(f, 'f') # Total mass\n [x, y, z] = fpu.read_vector(f, 'f') # Center\n [vx, vy, vz] = fpu.read_vector(f, 'f') # Velocity\n [Lx, Ly, Lz] = fpu.read_vector(f, 'f') # Angular momentum\n [r, a, b, c] = fpu.read_vector(f, 'f') # Shape (ellipticity)\n [ek, ep, et] = fpu.read_vector(f, 'f') # Energetics\n [spin] = fpu.read_vector(f, 'f') # Total angular momentum\n [rvir, mvir, tvir, cvel] = fpu.read_vector(f, 'f') # Virial parameters\n [rho0, r_c] = fpu.read_vector(f, 'f') # NFW params\n\n if with_contam_option:\n [contam] = fpu.read_vector(f, 'i') # Contamination\n\n # Add the halo to the list\n # halos.loc[ihalo] = [ID, nbpart, level, listp.min(),\n # host, hostsub, nbsub, nextsub,\n # x, y, z, vx, vy, vz, Lx, Ly, Lz,\n # a, b, c, ek, ep, et, rho0, r_c,\n # spin, m, r, mvir, rvir, tvir, cvel]\n data[ihalo] = [ID, nbpart, level, listp.min(),\n host, hostsub, nbsub, nextsub,\n x, y, z, vx, vy, vz, Lx, Ly, Lz,\n a, b, c, ek, ep, et, rho0, r_c,\n spin, m, r, mvir, rvir, tvir, cvel]\n #pbar.update()\n offsets[ID] = pos\n\n print('')\n types = {}\n for k in ('ID', 'nbpart', 'level', 'min_part_id',\n 'host', 'hostsub', 'nbsub', 'nextsub'):\n types[k] = np.int64\n for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',\n 'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',\n 'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel'):\n types[k] = np.float64\n dd = {k: data[:, i].astype(types[k])\n for i, k in enumerate(halo_keys)}\n\n halos = pd.DataFrame(dd)\n\n # Get properties in the right units\n # Masses\n halos.m *= 1e11\n halos.mvir *= 1e11\n # Positions and distances\n scale_mpc = float(data_set.length_unit.in_units('cm') / 3.08e24)\n halos.x = halos.x / scale_mpc + .5\n halos.y = halos.y / scale_mpc + .5\n halos.z = halos.z / scale_mpc + .5\n\n self.offsets = offsets\n\n\n return halos.set_index('ID')\n\n def get_halo_parts(self, hid):\n filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(\n s=self)\n with open(filename, 'br') as fd:\n fd.seek(self.offsets[hid])\n fpu.skip(fd, 1)\n listp = fpu.read_vector(fd, 'i')\n\n return listp\n"
] | [
[
"numpy.array",
"pandas.DataFrame",
"numpy.loadtxt"
]
] |
gr33n-made/catalyst | [
"bd413abc908ef7cbdeab42b0e805277a791e3ddb"
] | [
"tests/pipelines/test_distillation.py"
] | [
"# flake8: noqa\n\nimport os\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data import ToTensor\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\n\nclass DistilRunner(dl.Runner):\n def handle_batch(self, batch):\n x, y = batch\n\n self.model[\"teacher\"].eval() # let's manually set teacher model to eval mode\n with torch.no_grad():\n t_logits = self.model[\"teacher\"](x)\n\n s_logits = self.model[\"student\"](x)\n self.batch = {\n \"t_logits\": t_logits,\n \"s_logits\": s_logits,\n \"targets\": y,\n \"s_logprobs\": F.log_softmax(s_logits, dim=-1),\n \"t_probs\": F.softmax(t_logits, dim=-1),\n }\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n teacher = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n model = {\"teacher\": teacher, \"student\": student}\n criterion = {\"cls\": nn.CrossEntropyLoss(), \"kl\": nn.KLDivLoss(reduction=\"batchmean\")}\n optimizer = optim.Adam(student.parameters(), lr=0.02)\n\n loaders = {\n \"train\": DataLoader(\n MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32\n ),\n \"valid\": DataLoader(\n MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32\n ),\n }\n\n runner = DistilRunner()\n # model training\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n num_epochs=1,\n logdir=logdir,\n verbose=False,\n callbacks=[\n dl.AccuracyCallback(\n input_key=\"t_logits\", target_key=\"targets\", num_classes=2, prefix=\"teacher_\"\n ),\n dl.AccuracyCallback(\n input_key=\"s_logits\", target_key=\"targets\", num_classes=2, prefix=\"student_\"\n ),\n dl.CriterionCallback(\n input_key=\"s_logits\",\n target_key=\"targets\",\n metric_key=\"cls_loss\",\n criterion_key=\"cls\",\n ),\n dl.CriterionCallback(\n input_key=\"s_logprobs\",\n target_key=\"t_probs\",\n metric_key=\"kl_div_loss\",\n criterion_key=\"kl\",\n ),\n dl.MetricAggregationCallback(\n metric_key=\"loss\", metrics=[\"kl_div_loss\", \"cls_loss\"], mode=\"mean\"\n ),\n dl.OptimizerCallback(metric_key=\"loss\", model_key=\"student\"),\n dl.CheckpointCallback(\n logdir=logdir,\n loader_key=\"valid\",\n metric_key=\"loss\",\n minimize=True,\n save_n_best=3,\n ),\n ],\n )\n\n\n# Torch\ndef test_distillation_on_cpu():\n train_experiment(\"cpu\")\n\n\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_distillation_on_torch_cuda0():\n train_experiment(\"cuda:0\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_distillation_on_torch_cuda1():\n train_experiment(\"cuda:1\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_distillation_on_torch_dp():\n train_experiment(None, dl.DataParallelEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_distillation_on_torch_ddp():\n train_experiment(None, dl.DistributedDataParallelEngine())\n\n\n# AMP\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason=\"No CUDA or AMP found\",\n)\ndef test_distillation_on_amp():\n train_experiment(None, dl.AMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_distillation_on_amp_dp():\n train_experiment(None, dl.DataParallelAMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_distillation_on_amp_ddp():\n train_experiment(None, dl.DistributedDataParallelAMPEngine())\n\n\n# APEX\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason=\"No CUDA or Apex found\",\n)\ndef test_distillation_on_apex():\n train_experiment(None, dl.APEXEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_distillation_on_apex_dp():\n train_experiment(None, dl.DataParallelAPEXEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n# reason=\"No CUDA>=2 or Apex found\",\n# )\n# def test_distillation_on_apex_ddp():\n# train_experiment(None, dl.DistributedDataParallelApexEngine())\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.Flatten",
"torch.nn.functional.softmax",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.nn.KLDivLoss"
]
] |
mcasanova1445/models | [
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32"
] | [
"official/nlp/modeling/networks/albert_encoder_test.py",
"official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator_run.py",
"official/nlp/tasks/masked_lm.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for ALBERT transformer-based text encoder network.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.nlp.modeling.networks import albert_encoder\n\n\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\n# guarantees forward compatibility of this code for the V2 switchover.\n@keras_parameterized.run_all_keras_modes\nclass AlbertEncoderTest(keras_parameterized.TestCase):\n\n def tearDown(self):\n super(AlbertEncoderTest, self).tearDown()\n tf.keras.mixed_precision.set_global_policy(\"float32\")\n\n @parameterized.named_parameters(\n dict(testcase_name=\"default\", expected_dtype=tf.float32),\n dict(testcase_name=\"with_float16_dtype\", expected_dtype=tf.float16),\n )\n def test_network_creation(self, expected_dtype):\n hidden_size = 32\n sequence_length = 21\n\n kwargs = dict(\n vocab_size=100,\n hidden_size=hidden_size,\n num_attention_heads=2,\n num_layers=3)\n if expected_dtype == tf.float16:\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n\n # Create a small TransformerEncoder for testing.\n test_network = albert_encoder.AlbertEncoder(**kwargs)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n expected_data_shape = [None, sequence_length, hidden_size]\n expected_pooled_shape = [None, hidden_size]\n self.assertAllEqual(expected_data_shape, data.shape.as_list())\n self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())\n\n # If float_dtype is set to float16, the data output is float32 (from a layer\n # norm) and pool output should be float16.\n self.assertEqual(tf.float32, data.dtype)\n self.assertEqual(expected_dtype, pooled.dtype)\n\n # ALBERT has additonal 'embedding_hidden_mapping_in' weights and\n # it shares transformer weights.\n self.assertNotEmpty(\n [x for x in test_network.weights if \"embedding_projection/\" in x.name])\n self.assertNotEmpty(\n [x for x in test_network.weights if \"transformer/\" in x.name])\n self.assertEmpty(\n [x for x in test_network.weights if \"transformer/layer\" in x.name])\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n num_layers = 3\n # Create a small TransformerEncoder for testing.\n test_network = albert_encoder.AlbertEncoder(\n vocab_size=vocab_size,\n embedding_width=8,\n hidden_size=hidden_size,\n num_attention_heads=2,\n num_layers=num_layers,\n type_vocab_size=num_types)\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n list_outputs = model.predict([word_id_data, mask_data, type_id_data])\n\n # Creates a TransformerEncoder with max_sequence_length != sequence_length\n max_sequence_length = 128\n test_network = albert_encoder.AlbertEncoder(\n vocab_size=vocab_size,\n embedding_width=8,\n hidden_size=hidden_size,\n max_sequence_length=max_sequence_length,\n num_attention_heads=2,\n num_layers=num_layers,\n type_vocab_size=num_types)\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n _ = model.predict([word_id_data, mask_data, type_id_data])\n\n # Tests dictionary outputs.\n test_network_dict = albert_encoder.AlbertEncoder(\n vocab_size=vocab_size,\n embedding_width=8,\n hidden_size=hidden_size,\n max_sequence_length=max_sequence_length,\n num_attention_heads=2,\n num_layers=num_layers,\n type_vocab_size=num_types,\n dict_outputs=True)\n _ = test_network_dict([word_ids, mask, type_ids])\n test_network_dict.set_weights(test_network.get_weights())\n list_outputs = test_network([word_id_data, mask_data, type_id_data])\n dict_outputs = test_network_dict(\n dict(\n input_word_ids=word_id_data,\n input_mask=mask_data,\n input_type_ids=type_id_data))\n self.assertAllEqual(list_outputs[0], dict_outputs[\"sequence_output\"])\n self.assertAllEqual(list_outputs[1], dict_outputs[\"pooled_output\"])\n self.assertLen(dict_outputs[\"pooled_output\"], num_layers)\n\n def test_serialize_deserialize(self):\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n # Create a network object that sets all of its config options.\n kwargs = dict(\n vocab_size=100,\n embedding_width=8,\n hidden_size=32,\n num_layers=3,\n num_attention_heads=2,\n max_sequence_length=21,\n type_vocab_size=12,\n intermediate_size=1223,\n activation=\"relu\",\n dropout_rate=0.05,\n attention_dropout_rate=0.22,\n initializer=\"glorot_uniform\")\n network = albert_encoder.AlbertEncoder(**kwargs)\n\n expected_config = dict(kwargs)\n expected_config[\"activation\"] = tf.keras.activations.serialize(\n tf.keras.activations.get(expected_config[\"activation\"]))\n expected_config[\"initializer\"] = tf.keras.initializers.serialize(\n tf.keras.initializers.get(expected_config[\"initializer\"]))\n self.assertEqual(network.get_config(), expected_config)\n\n # Create another network object from the first object's config.\n new_network = (\n albert_encoder.AlbertEncoder.from_config(\n network.get_config()))\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(network.get_config(), new_network.get_config())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Evaluates image classification accuracy using tflite_imagenet_evaluator.\n\nUsage:\ntflite_imagenet_evaluator_run --tflite_model_path=/PATH/TO/MODEL.tflite\n\"\"\"\n\nfrom typing import Sequence\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.core import exp_factory\nfrom official.projects.edgetpu.vision.serving import tflite_imagenet_evaluator\nfrom official.projects.edgetpu.vision.tasks import image_classification\n\n\nflags.DEFINE_string('tflite_model_path', None,\n 'Path to the tflite file to be evaluated.')\nflags.DEFINE_integer('num_threads', 16, 'Number of local threads.')\nflags.DEFINE_integer('batch_size', 256, 'Batch size per thread.')\nflags.DEFINE_string(\n 'model_name', 'mobilenet_edgetpu_v2_xs',\n 'Model name to identify a registered data pipeline setup and use as the '\n 'validation dataset.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv: Sequence[str]):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n with tf.io.gfile.GFile(FLAGS.tflite_model_path, 'rb') as f:\n model_content = f.read()\n\n config = exp_factory.get_exp_config(FLAGS.model_name)\n global_batch_size = FLAGS.num_threads * FLAGS.batch_size\n config.task.validation_data.global_batch_size = global_batch_size\n config.task.validation_data.dtype = 'float32'\n\n task = image_classification.EdgeTPUTask(config.task)\n dataset = task.build_inputs(config.task.validation_data)\n\n evaluator = tflite_imagenet_evaluator.AccuracyEvaluator(\n model_content=model_content,\n dataset=dataset,\n num_threads=FLAGS.num_threads)\n\n evals, corrects = evaluator.evaluate_all()\n accuracy = 100.0 * corrects / evals if evals > 0 else 0\n print('Final accuracy: {}, Evaluated: {}, Correct: {} '.format(\n accuracy, evals, corrects))\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('tflite_model_path')\n app.run(main)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Masked language task.\"\"\"\n\nimport dataclasses\nimport tensorflow as tf\n\nfrom official.core import base_task\nfrom official.core import config_definitions as cfg\nfrom official.core import task_factory\nfrom official.modeling import tf_utils\nfrom official.nlp.configs import bert\nfrom official.nlp.configs import encoders\nfrom official.nlp.data import data_loader_factory\nfrom official.nlp.modeling import layers\nfrom official.nlp.modeling import models\n\n\[email protected]\nclass MaskedLMConfig(cfg.TaskConfig):\n \"\"\"The model config.\"\"\"\n model: bert.PretrainerConfig = bert.PretrainerConfig(cls_heads=[\n bert.ClsHeadConfig(\n inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence')\n ])\n # TODO(b/154564893): Mathematically, scale_loss should be True.\n # However, it works better with scale_loss being False.\n scale_loss: bool = False\n train_data: cfg.DataConfig = cfg.DataConfig()\n validation_data: cfg.DataConfig = cfg.DataConfig()\n\n\n@task_factory.register_task_cls(MaskedLMConfig)\nclass MaskedLMTask(base_task.Task):\n \"\"\"Task object for Mask language modeling.\"\"\"\n\n def _build_encoder(self, encoder_cfg):\n return encoders.build_encoder(encoder_cfg)\n\n def build_model(self, params=None):\n config = params or self.task_config.model\n encoder_cfg = config.encoder\n encoder_network = self._build_encoder(encoder_cfg)\n cls_heads = [\n layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads\n ] if config.cls_heads else []\n return models.BertPretrainerV2(\n mlm_activation=tf_utils.get_activation(config.mlm_activation),\n mlm_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=config.mlm_initializer_range),\n encoder_network=encoder_network,\n classification_heads=cls_heads)\n\n def build_losses(self,\n labels,\n model_outputs,\n metrics,\n aux_losses=None) -> tf.Tensor:\n with tf.name_scope('MaskedLMTask/losses'):\n metrics = dict([(metric.name, metric) for metric in metrics])\n lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(\n labels['masked_lm_ids'],\n tf.cast(model_outputs['mlm_logits'], tf.float32),\n from_logits=True)\n lm_label_weights = labels['masked_lm_weights']\n lm_numerator_loss = tf.reduce_sum(lm_prediction_losses *\n lm_label_weights)\n lm_denominator_loss = tf.reduce_sum(lm_label_weights)\n mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)\n metrics['lm_example_loss'].update_state(mlm_loss)\n if 'next_sentence_labels' in labels:\n sentence_labels = labels['next_sentence_labels']\n sentence_outputs = tf.cast(\n model_outputs['next_sentence'], dtype=tf.float32)\n sentence_loss = tf.reduce_mean(\n tf.keras.losses.sparse_categorical_crossentropy(\n sentence_labels, sentence_outputs, from_logits=True))\n metrics['next_sentence_loss'].update_state(sentence_loss)\n total_loss = mlm_loss + sentence_loss\n else:\n total_loss = mlm_loss\n\n if aux_losses:\n total_loss += tf.add_n(aux_losses)\n return total_loss\n\n def build_inputs(self, params, input_context=None):\n \"\"\"Returns tf.data.Dataset for pretraining.\"\"\"\n if params.input_path == 'dummy':\n\n def dummy_data(_):\n dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)\n dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)\n return dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids,\n masked_lm_positions=dummy_lm,\n masked_lm_ids=dummy_lm,\n masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),\n next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))\n\n dataset = tf.data.Dataset.range(1)\n dataset = dataset.repeat()\n dataset = dataset.map(\n dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n\n return data_loader_factory.get_data_loader(params).load(input_context)\n\n def build_metrics(self, training=None):\n del training\n metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),\n tf.keras.metrics.Mean(name='lm_example_loss')\n ]\n # TODO(hongkuny): rethink how to manage metrics creation with heads.\n if self.task_config.train_data.use_next_sentence_label:\n metrics.append(\n tf.keras.metrics.SparseCategoricalAccuracy(\n name='next_sentence_accuracy'))\n metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))\n return metrics\n\n def process_metrics(self, metrics, labels, model_outputs):\n with tf.name_scope('MaskedLMTask/process_metrics'):\n metrics = dict([(metric.name, metric) for metric in metrics])\n if 'masked_lm_accuracy' in metrics:\n metrics['masked_lm_accuracy'].update_state(\n labels['masked_lm_ids'], model_outputs['mlm_logits'],\n labels['masked_lm_weights'])\n if 'next_sentence_accuracy' in metrics:\n metrics['next_sentence_accuracy'].update_state(\n labels['next_sentence_labels'], model_outputs['next_sentence'])\n\n def train_step(self, inputs, model: tf.keras.Model,\n optimizer: tf.keras.optimizers.Optimizer, metrics):\n \"\"\"Does forward and backward.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the model, forward pass definition.\n optimizer: the optimizer for this training step.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n with tf.GradientTape() as tape:\n outputs = model(inputs, training=True)\n # Computes per-replica loss.\n loss = self.build_losses(\n labels=inputs,\n model_outputs=outputs,\n metrics=metrics,\n aux_losses=model.losses)\n if self.task_config.scale_loss:\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync\n tvars = model.trainable_variables\n if self.task_config.scale_loss:\n grads = tape.gradient(scaled_loss, tvars)\n else:\n grads = tape.gradient(loss, tvars)\n optimizer.apply_gradients(list(zip(grads, tvars)))\n self.process_metrics(metrics, inputs, outputs)\n return {self.loss: loss}\n\n def validation_step(self, inputs, model: tf.keras.Model, metrics):\n \"\"\"Validatation step.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the keras.Model.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n outputs = self.inference_step(inputs, model)\n loss = self.build_losses(\n labels=inputs,\n model_outputs=outputs,\n metrics=metrics,\n aux_losses=model.losses)\n self.process_metrics(metrics, inputs, outputs)\n return {self.loss: loss}\n"
] | [
[
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.keras.initializers.get",
"tensorflow.keras.Model",
"numpy.random.randint",
"tensorflow.keras.activations.get",
"tensorflow.test.main",
"tensorflow.keras.Input"
],
[
"tensorflow.io.gfile.GFile"
],
[
"tensorflow.zeros",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.add_n",
"tensorflow.distribute.get_strategy",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.metrics.Mean",
"tensorflow.cast",
"tensorflow.name_scope",
"tensorflow.GradientTape",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.data.Dataset.range",
"tensorflow.math.divide_no_nan",
"tensorflow.reduce_sum"
]
] |
kouroshHakha/circuit-fewshot-code | [
"32007e119da30632736868a3f643027624bf08d2"
] | [
"configs/opamp/biased_pmos_gain/15-layer-ft-all-pool-0.5/config.py"
] | [
"import time\n\nimport hashlib\nimport torch\nfrom torch_geometric.data import DataLoader\n\nfrom cgl.utils.params import ParamDict\nfrom cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset\n\n# from cgl.models.gnn import DeepGENNet\n\ns = time.time()\nprint('Loading the dataset ...')\nroot = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'\ncir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')\nnode_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']\nvout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()\n\n# gain mean and variance\ngmean, gstd = -1.1057, 0.6559\n\ndef transform_fn(data):\n data.gain = (data.vac_mag[vout_idx, 0].float() - gmean) / gstd\n return data\n\ndset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)\nprint(f'Dataset was loaded in {time.time() - s:.6f} seconds.')\n\nsample_data = dset[0]\n\nfract = 0.05\nsplits = dset.splits\ntrain_idx = int(fract * len(splits['train']))\ntrain_dset = dset[splits['train'][:train_idx]]\nvalid_dset = dset[splits['valid']]\ntest_dset = dset[splits['test']]\n\nbackbone_config = 'configs/opamp/dc/deep_gen_net/15-layer/config.py'\nbb_id = hashlib.sha256(backbone_config.encode('utf-8')).hexdigest()[:6]\n\n\nlr = 1e-3\nactivation = 'relu'\nhidden_channels = 128\nnum_layers = 15\ntrain_batch_size = min(256, len(train_dset))\nvalid_batch_size = min(256, len(valid_dset)) \ntest_batch_size = min(256, len(test_dset)) \n\nexp_name = f'GAIN_PMOS_FT_Pool_{fract*10:.1f}_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'\n\nmdl_config = ParamDict(\n exp_name=exp_name,\n num_nodes=sample_data.vdc.shape[0],\n in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],\n hidden_channels=hidden_channels,\n num_layers=num_layers,\n dropout=0,\n activation=activation,\n bins=50,\n lr=lr,\n freeze_backbone=False,\n use_pooling=True,\n output_label='gain',\n output_sigmoid=False,\n lr_warmup={'peak_lr': lr, 'weight_decay': 0, \n 'warmup_updates': 50, 'tot_updates': 20000, 'end_lr': 5e-5},\n)\n\ntrain_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)\nvalid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)\ntest_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)\n\n# .to converts the weight dtype to match input\n# model = DeepGENNet(mdl_config).to(sample_data.x.dtype)\n\n"
] | [
[
"torch.where"
]
] |
Zamwell/pandapower | [
"ce51946342109e969b87b60c8883d7eec02d3060"
] | [
"pandapower/plotting/plotly/traces.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport math\n\nimport numpy as np\nimport pandas as pd\nfrom packaging import version\nfrom collections.abc import Iterable\n\nfrom pandapower.plotting.plotly.get_colors import get_plotly_color, get_plotly_cmap\nfrom pandapower.plotting.plotly.mapbox_plot import _on_map_test, _get_mapbox_token, MapboxTokenMissing\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\nlogger = logging.getLogger(__name__)\n\ntry:\n from plotly import __version__ as plotly_version\n from plotly.graph_objs.scatter.marker import ColorBar\n from plotly.graph_objs import Figure, Layout\n from plotly.graph_objs.layout import XAxis, YAxis\n from plotly.graph_objs.scatter import Line, Marker\n from plotly.graph_objs.scattermapbox import Line as scmLine\n from plotly.graph_objs.scattermapbox import Marker as scmMarker\nexcept ImportError:\n logger.info(\"Failed to import plotly - interactive plotting will not be available\")\n\n\ndef version_check():\n if version.parse(plotly_version) < version.parse(\"3.1.1\"):\n raise UserWarning(\"Your plotly version {} is no longer supported.\\r\\n\"\n \"Please upgrade your python-plotly installation, \"\n \"e.g., via pip install --upgrade plotly\".format(__version__))\n\n\ndef _in_ipynb():\n \"\"\"\n an auxiliary function which checks if plot is called from a jupyter-notebook or not\n \"\"\"\n import __main__ as main\n return not hasattr(main, '__file__')\n\n\ndef sum_line_length(pts):\n pt_diff = lambda p: (p[0][0] - p[1][0], p[0][1] - p[1][1])\n diffs = map(pt_diff, zip(pts[:-1], pts[1:]))\n line_length = sum(math.hypot(d1, d2) for d1, d2 in diffs)\n return line_length\n\n\ndef get_line_neutral(coord):\n if len(coord) == 1:\n return coord[0]\n half_length = sum_line_length(coord) / 2.0\n length = 0.0\n ind = 0\n while length < half_length:\n ind += 1\n length = sum_line_length(coord[:ind])\n\n start_coord = coord[ind - 2]\n end_coord = coord[ind - 1]\n mid = [(a1 + a2) / 2.0 for a1, a2 in zip(start_coord, end_coord)]\n\n return mid\n\n\ndef create_edge_center_trace(line_trace, size=1, patch_type=\"circle\", color=\"white\", infofunc=None,\n trace_name='edge_center', use_line_geodata=False):\n \"\"\"\n Creates a plotly trace of pandapower buses.\n\n INPUT:\n **line traces** (from pandapowerNet) - The already generated line traces with center geodata\n\n OPTIONAL:\n\n **size** (int, 5) - patch size\n\n **patch_type** (str, \"circle\") - patch type, can be\n\n - \"circle\" for a circle\n - \"square\" for a rectangle\n - \"diamond\" for a diamond\n - much more pathc types at https://plot.ly/python/reference/#scatter-marker\n\n **infofunc** (pd.Series, None) - hoverinfo for each trace element. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"buses\") - name of the trace which will appear in the legend\n\n **color** (String, \"blue\") - color of buses in the trace\n\n \"\"\"\n # color = get_plotly_color(color)\n\n center_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,\n marker=dict(color=color, size=size, symbol=patch_type))\n\n if not use_line_geodata:\n center_trace['x'], center_trace['y'] = (line_trace[0][\"x\"][1::4], line_trace[0][\"y\"][1::4])\n else:\n x, y = [], []\n for trace in line_trace:\n coord = list(zip(trace[\"x\"], trace[\"y\"]))\n mid_coord = get_line_neutral(coord)\n x.append(mid_coord[0])\n y.append(mid_coord[1])\n\n center_trace['x'], center_trace['y'] = (x, y)\n\n center_trace['text'] = infofunc\n\n return center_trace\n\n\ndef create_bus_trace(net, buses=None, size=5, patch_type=\"circle\", color=\"blue\", infofunc=None,\n trace_name='buses', legendgroup=None, cmap=None, cmap_vals=None,\n cbar_title=None, cmin=None, cmax=None, cpos=1.0, colormap_column=\"vm_pu\"):\n \"\"\"\n Creates a plotly trace of pandapower buses.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network\n\n OPTIONAL:\n **buses** (list, None) - The buses for which the collections are created.\n If None, all buses in the network are considered.\n\n **size** (int, 5) - patch size\n\n **patch_type** (str, \"circle\") - patch type, can be\n\n - \"circle\" for a circle\n - \"square\" for a rectangle\n - \"diamond\" for a diamond\n - much more pathc types at https://plot.ly/python/reference/#scatter-marker\n\n **infofunc** (pd.Series, None) - hoverinfo for bus elements. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"buses\") - name of the trace which will appear in the legend\n\n **color** (String, \"blue\") - color of buses in the trace\n\n **cmap** (String, None) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,\n Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)\n alternatively a custom discrete colormap can be used\n\n **cmap_vals** (list, None) - values used for coloring using colormap\n\n **cbar_title** (String, None) - title for the colorbar\n\n **cmin** (float, None) - colorbar range minimum\n\n **cmax** (float, None) - colorbar range maximum\n\n **cpos** (float, 1.1) - position of the colorbar\n\n **colormap_column** (str, \"vm_pu\") - set color of bus according to this variable\n\n \"\"\"\n color = get_plotly_color(color)\n\n bus_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,\n marker=dict(color=color, size=size, symbol=patch_type))\n\n buses = net.bus.index.tolist() if buses is None else list(buses)\n bus_plot_index = [b for b in buses if b in list(set(buses) & set(net.bus_geodata.index))]\n\n bus_trace['x'], bus_trace['y'] = (net.bus_geodata.loc[bus_plot_index, 'x'].tolist(),\n net.bus_geodata.loc[bus_plot_index, 'y'].tolist())\n\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(buses):\n infofunc = pd.Series(index=buses, data=infofunc)\n\n bus_trace['text'] = net.bus.loc[bus_plot_index, 'name'] if infofunc is None else infofunc.loc[buses]\n\n if legendgroup:\n bus_trace['legendgroup'] = legendgroup\n\n # if color map is set\n if cmap is not None:\n # TODO introduce discrete colormaps (see contour plots in plotly)\n # if cmap_vals are not given\n\n cmap = 'Jet' if cmap is True else cmap\n\n if cmap_vals is not None:\n cmap_vals = cmap_vals\n else:\n if net.res_line.shape[0] == 0:\n logger.error(\"There are no power flow results for buses voltage magnitudes which are default for bus \"\n \"colormap coloring...\"\n \"set cmap_vals input argument if you want colormap according to some specific values...\")\n cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column].values\n\n cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column] if cmap_vals is None else cmap_vals\n\n cmin = cmin if cmin else cmap_vals.min()\n cmax = cmax if cmax else cmap_vals.max()\n\n bus_trace['marker'] = Marker(size=size,\n color=cmap_vals, cmin=cmin, cmax=cmax,\n colorscale=cmap,\n colorbar=ColorBar(thickness=10,\n x=cpos),\n symbol=patch_type\n )\n\n if cbar_title:\n bus_trace['marker']['colorbar']['title'] = cbar_title\n\n bus_trace['marker']['colorbar']['title']['side'] = 'right'\n\n return [bus_trace]\n\n\ndef _get_line_geodata_plotly(net, lines, use_line_geodata):\n xs = []\n ys = []\n if use_line_geodata:\n for line_ind, _ in lines.iterrows():\n line_coords = net.line_geodata.loc[line_ind, 'coords']\n linex, liney = list(zip(*line_coords))\n xs += linex\n xs += [None]\n ys += liney\n ys += [None]\n else:\n # getting x and y values from bus_geodata for from and to side of each line\n\n from_bus = net.bus_geodata.loc[lines.from_bus, 'x'].tolist()\n to_bus = net.bus_geodata.loc[lines.to_bus, 'x'].tolist()\n # center point added because of the hovertool\n center = (np.array(from_bus) + np.array(to_bus)) / 2\n none_list = [None] * len(from_bus)\n xs = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()\n\n from_bus = net.bus_geodata.loc[lines.from_bus, 'y'].tolist()\n to_bus = net.bus_geodata.loc[lines.to_bus, 'y'].tolist()\n # center point added because of the hovertool\n center = (np.array(from_bus) + np.array(to_bus)) / 2\n none_list = [None] * len(from_bus)\n ys = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()\n\n # [:-1] is because the trace will not appear on maps if None is at the end\n return xs[:-1], ys[:-1]\n\n\ndef create_line_trace(net, lines=None, use_line_geodata=True, respect_switches=False, width=1.0,\n color='grey', infofunc=None, trace_name='lines', legendgroup=None,\n cmap=None, cbar_title=None, show_colorbar=True, cmap_vals=None, cmin=None,\n cmax=None, cpos=1.1):\n \"\"\"\n Creates a plotly trace of pandapower lines.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network\n\n OPTIONAL:\n **lines** (list, None) - The lines for which the collections are created.\n If None, all lines in the network are considered.\n\n **width** (int, 1) - line width\n\n **respect_switches** (bool, False) - flag for consideration of disconnected lines\n\n **infofunc** (pd.Series, None) - hoverinfo for line elements. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"lines\") - name of the trace which will appear in the legend\n\n **color** (String, \"grey\") - color of lines in the trace\n\n **legendgroup** (String, None) - defines groups of layers that will be displayed in a legend\n e.g. groups according to voltage level (as used in `vlevel_plotly`)\n\n **cmap** (String, None) - name of a colormap which exists within plotly if set to True default `Jet`\n colormap is used, alternative colormaps : Greys, YlGnBu, Greens, YlOrRd,\n Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis\n\n **cmap_vals** (list, None) - values used for coloring using colormap\n\n **show_colorbar** (bool, False) - flag for showing or not corresponding colorbar\n\n **cbar_title** (String, None) - title for the colorbar\n\n **cmin** (float, None) - colorbar range minimum\n\n **cmax** (float, None) - colorbar range maximum\n\n **cpos** (float, 1.1) - position of the colorbar\n\n \"\"\"\n\n color = get_plotly_color(color)\n\n # defining lines to be plot\n lines = net.line.index.tolist() if lines is None else list(lines)\n if len(lines) == 0:\n return []\n\n if infofunc is not None:\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(lines):\n infofunc = pd.Series(index=lines, data=infofunc)\n if len(infofunc) != len(lines) and len(infofunc) != len(net.line):\n raise UserWarning(\"Different amount of hover info than lines to plot\")\n assert isinstance(infofunc, pd.Series), \\\n \"infofunc should be a pandas series with the net.line.index to the infofunc contents\"\n\n no_go_lines = set()\n if respect_switches:\n no_go_lines = set(lines) & set(net.switch.element[(net.switch.et == \"l\") & (net.switch.closed == 0)])\n\n lines_to_plot = net.line.loc[set(net.line.index) & (set(lines) - no_go_lines)]\n no_go_lines_to_plot = None\n use_line_geodata = use_line_geodata if net.line_geodata.shape[0] > 0 else False\n\n if use_line_geodata:\n lines_to_plot = lines_to_plot.loc[set(lines_to_plot.index) & set(net.line_geodata.index)]\n else:\n lines_with_geodata = lines_to_plot.from_bus.isin(net.bus_geodata.index) & \\\n lines_to_plot.to_bus.isin(net.bus_geodata.index)\n lines_to_plot = lines_to_plot.loc[lines_with_geodata]\n\n cmap_lines = None\n if cmap is not None:\n # workaround: if colormap plot is used, each line need to be separate scatter object because\n # plotly still doesn't support appropriately colormap for line objects\n # TODO correct this when plotly solves existing github issue about Line colorbar\n\n cmap = 'jet' if cmap is True else cmap\n\n if cmap_vals is not None:\n if not isinstance(cmap_vals, np.ndarray):\n cmap_vals = np.asarray(cmap_vals)\n else:\n if net.res_line.shape[0] == 0:\n logger.error(\"There are no power flow results for lines which are default for line colormap coloring...\"\n \"set cmap_vals input argument if you want colormap according to some specific values...\")\n cmap_vals = net.res_line.loc[lines_to_plot.index, 'loading_percent'].values\n\n cmap_lines = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)\n if len(cmap_lines) == len(net.line):\n # some lines are not plotted although cmap_value were provided for all lines\n line_idx_map = dict(zip(net.line.loc[lines].index.tolist(), range(len(lines))))\n cmap_lines = [cmap_lines[line_idx_map[idx]] for idx in lines_to_plot.index]\n else:\n assert len(cmap_lines) == len(lines_to_plot), \\\n \"Different amounts of cmap values and lines to plot were supplied\"\n\n line_traces = []\n for col_i, (idx, line) in enumerate(lines_to_plot.iterrows()):\n line_color = color\n line_info = line['name']\n if cmap is not None:\n try:\n line_color = cmap_lines[col_i]\n line_info = line['name'] if infofunc is None else infofunc.loc[idx]\n except IndexError:\n logger.warning(\"No color and info for line {:d} (name: {}) available\".format(idx, line['name']))\n\n line_trace = dict(type='scatter', text=[], hoverinfo='text', mode='lines', name=trace_name,\n line=Line(width=width, color=color))\n\n line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, lines_to_plot.loc[idx:idx], use_line_geodata)\n\n line_trace['line']['color'] = line_color\n\n line_trace['text'] = line_info\n\n line_traces.append(line_trace)\n\n if show_colorbar and cmap is not None:\n\n cmin = cmin if cmin else cmap_vals.min()\n cmax = cmax if cmax else cmap_vals.max()\n try:\n # TODO for custom colormaps\n cbar_cmap_name = 'Jet' if cmap is 'jet' else cmap\n # workaround to get colorbar for lines (an unvisible node is added)\n lines_cbar = dict(type='scatter', x=[net.bus_geodata.x[0]], y=[net.bus_geodata.y[0]], mode='markers',\n marker=Marker(size=0, cmin=cmin, cmax=cmax,\n color='rgb(255,255,255)',\n colorscale=cbar_cmap_name,\n colorbar=ColorBar(thickness=10,\n x=cpos),\n ))\n if cbar_title:\n lines_cbar['marker']['colorbar']['title'] = cbar_title\n\n lines_cbar['marker']['colorbar']['title']['side'] = 'right'\n\n line_traces.append(lines_cbar)\n except:\n pass\n\n if len(no_go_lines) > 0:\n no_go_lines_to_plot = net.line.loc[no_go_lines]\n for idx, line in no_go_lines_to_plot.iterrows():\n line_color = color\n line_trace = dict(type='scatter',\n text=[], hoverinfo='text', mode='lines', name='disconnected lines',\n line=Line(width=width / 2, color='grey', dash='dot'))\n\n line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, no_go_lines_to_plot.loc[idx:idx], use_line_geodata)\n\n line_trace['line']['color'] = line_color\n try:\n line_trace['text'] = infofunc.loc[idx]\n except (KeyError, IndexError):\n line_trace[\"text\"] = line['name']\n\n line_traces.append(line_trace)\n\n if legendgroup:\n line_trace['legendgroup'] = legendgroup\n\n # sort infofunc so that it is the correct order lines_to_plot + no_go_lines_to_plot\n if infofunc is not None:\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(net.line):\n infofunc = pd.Series(index=net.line.index, data=infofunc)\n assert isinstance(infofunc, pd.Series), \\\n \"infofunc should be a pandas series with the net.line.index to the infofunc contents\"\n sorted_idx = lines_to_plot.index.tolist()\n if no_go_lines_to_plot is not None:\n sorted_idx += no_go_lines_to_plot.index.tolist()\n infofunc = infofunc.loc[sorted_idx]\n\n center_trace = create_edge_center_trace(line_traces, color=color, infofunc=infofunc,\n use_line_geodata=use_line_geodata)\n line_traces.append(center_trace)\n return line_traces\n\n\ndef create_trafo_trace(net, trafos=None, color='green', width=5, infofunc=None, cmap=None,\n trace_name='trafos', cmin=None, cmax=None, cmap_vals=None, use_line_geodata=None):\n \"\"\"\n Creates a plotly trace of pandapower trafos.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network\n\n OPTIONAL:\n **trafos** (list, None) - The trafos for which the collections are created.\n If None, all trafos in the network are considered.\n\n **width** (int, 5) - line width\n\n **infofunc** (pd.Series, None) - hoverinfo for trafo elements. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"lines\") - name of the trace which will appear in the legend\n\n **color** (String, \"green\") - color of lines in the trace\n\n **cmap** (bool, False) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,\n Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)\n\n **cmap_vals** (list, None) - values used for coloring using colormap\n\n **cbar_title** (String, None) - title for the colorbar\n\n **cmin** (float, None) - colorbar range minimum\n\n **cmax** (float, None) - colorbar range maximum\n\n\n \"\"\"\n color = get_plotly_color(color)\n\n # defining lines to be plot\n trafos = net.trafo.index.tolist() if trafos is None else list(trafos)\n if len(trafos) == 0:\n return []\n\n trafo_buses_with_geodata = net.trafo.hv_bus.isin(net.bus_geodata.index) & \\\n net.trafo.lv_bus.isin(net.bus_geodata.index)\n\n trafos_mask = net.trafo.index.isin(trafos)\n trafos_to_plot = net.trafo[trafo_buses_with_geodata & trafos_mask]\n\n if infofunc is not None:\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(trafos):\n infofunc = pd.Series(index=trafos, data=infofunc)\n assert isinstance(infofunc, pd.Series), \\\n \"infofunc should be a pandas series with the net.trafo.index to the infofunc contents\"\n infofunc = infofunc.loc[trafos_to_plot.index]\n\n cmap_colors = []\n if cmap is not None:\n cmap = 'jet' if cmap is None else cmap\n\n cmin = 0 if cmin is None else cmin\n cmax = 100 if cmin is None else cmax\n\n if cmap_vals is not None:\n cmap_vals = cmap_vals\n else:\n if net.res_trafo.shape[0] == 0:\n logger.error(\"There are no power flow results for lines which are default for line colormap coloring...\"\n \"set cmap_vals input argument if you want colormap according to some specific values...\")\n cmap_vals = net.res_trafo.loc[trafos_to_plot.index, 'loading_percent'].values\n\n cmap_colors = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)\n\n trafo_traces = []\n for col_i, (idx, trafo) in enumerate(trafos_to_plot.iterrows()):\n if cmap is not None:\n color = cmap_colors[col_i]\n\n trafo_trace = dict(type='scatter', text=[], line=Line(width=width, color=color),\n hoverinfo='text', mode='lines', name=trace_name)\n\n trafo_trace['text'] = trafo['name'] if infofunc is None else infofunc.loc[idx]\n\n from_bus = net.bus_geodata.loc[trafo.hv_bus, 'x']\n to_bus = net.bus_geodata.loc[trafo.lv_bus, 'x']\n trafo_trace['x'] = [from_bus, (from_bus + to_bus) / 2, to_bus]\n\n from_bus = net.bus_geodata.loc[trafo.hv_bus, 'y']\n to_bus = net.bus_geodata.loc[trafo.lv_bus, 'y']\n trafo_trace['y'] = [from_bus, (from_bus + to_bus) / 2, to_bus]\n\n trafo_traces.append(trafo_trace)\n\n center_trace = create_edge_center_trace(trafo_traces, color=color, infofunc=infofunc,\n use_line_geodata=use_line_geodata)\n trafo_traces.append(center_trace)\n return trafo_traces\n\n\ndef draw_traces(traces, on_map=False, map_style='basic', showlegend=True, figsize=1,\n aspectratio='auto', filename=\"temp-plot.html\"):\n \"\"\"\n plots all the traces (which can be created using :func:`create_bus_trace`, :func:`create_line_trace`,\n :func:`create_trafo_trace`)\n to PLOTLY (see https://plot.ly/python/)\n\n INPUT:\n **traces** - list of dicts which correspond to plotly traces\n generated using: `create_bus_trace`, `create_line_trace`, `create_trafo_trace`\n\n OPTIONAL:\n **on_map** (bool, False) - enables using mapbox plot in plotly\n\n **map_style** (str, 'basic') - enables using mapbox plot in plotly\n\n - 'streets'\n - 'bright'\n - 'light'\n - 'dark'\n - 'satellite'\n\n **showlegend** (bool, 'True') - enables legend display\n\n **figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size\n\n **aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata\n any custom aspectration can be given as a tuple, e.g. (1.2, 1)\n\n **filename** (str, \"temp-plot.html\") - plots to a html file called filename\n\n \"\"\"\n\n if on_map:\n try:\n on_map = _on_map_test(traces[0]['x'][0], traces[0]['y'][0])\n except:\n logger.warning(\"Test if geo-data are in lat/long cannot be performed using geopy -> \"\n \"eventual plot errors are possible.\")\n\n if on_map is False:\n logger.warning(\"Existing geodata are not real lat/lon geographical coordinates. -> \"\n \"plot on maps is not possible.\\n\"\n \"Use geo_data_to_latlong(net, projection) to transform geodata from specific projection.\")\n\n if on_map:\n # change traces for mapbox\n # change trace_type to scattermapbox and rename x to lat and y to lon\n for trace in traces:\n trace['lat'] = trace.pop('x')\n trace['lon'] = trace.pop('y')\n trace['type'] = 'scattermapbox'\n if \"line\" in trace and isinstance(trace[\"line\"], Line):\n # scattermapboxplot lines do not support dash for some reason, make it a red line instead\n if \"dash\" in trace[\"line\"]._props:\n _prps = dict(trace[\"line\"]._props)\n _prps.pop(\"dash\", None)\n _prps[\"color\"] = \"red\"\n trace[\"line\"] = scmLine(_prps)\n else:\n trace[\"line\"] = scmLine(dict(trace[\"line\"]._props))\n elif \"marker\" in trace and isinstance(trace[\"marker\"], Marker):\n trace[\"marker\"] = scmMarker(trace[\"marker\"]._props)\n\n # setting Figure object\n fig = Figure(data=traces, # edge_trace\n layout=Layout(\n titlefont=dict(size=16),\n showlegend=showlegend,\n autosize=True if aspectratio is 'auto' else False,\n hovermode='closest',\n margin=dict(b=5, l=5, r=5, t=5),\n # annotations=[dict(\n # text=\"\",\n # showarrow=False,\n # xref=\"paper\", yref=\"paper\",\n # x=0.005, y=-0.002)],\n xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),\n yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False),\n # legend=dict(x=0, y=1.0)\n ), )\n\n # check if geodata are real geographycal lat/lon coordinates using geopy\n\n if on_map:\n try:\n mapbox_access_token = _get_mapbox_token()\n except Exception:\n logger.exception('mapbox token required for map plots. '\n 'Get Mapbox token by signing in to https://www.mapbox.com/.\\n'\n 'After getting a token, set it to pandapower using:\\n'\n 'pandapower.plotting.plotly.mapbox_plot.set_mapbox_token(\\'<token>\\')')\n raise MapboxTokenMissing\n\n fig['layout']['mapbox'] = dict(accesstoken=mapbox_access_token,\n bearing=0,\n center=dict(lat=pd.Series(traces[0]['lat']).dropna().mean(),\n lon=pd.Series(traces[0]['lon']).dropna().mean()),\n style=map_style,\n pitch=0,\n zoom=11)\n\n # default aspectratio: if on_map use auto, else use 'original'\n aspectratio = 'original' if not on_map and aspectratio is 'auto' else aspectratio\n\n if aspectratio is not 'auto':\n if aspectratio is 'original':\n # TODO improve this workaround for getting original aspectratio\n xs = []\n ys = []\n for trace in traces:\n xs += trace['x']\n ys += trace['y']\n x_dropna = pd.Series(xs).dropna()\n y_dropna = pd.Series(ys).dropna()\n xrange = x_dropna.max() - x_dropna.min()\n yrange = y_dropna.max() - y_dropna.min()\n ratio = xrange / yrange\n if ratio < 1:\n aspectratio = (ratio, 1.)\n else:\n aspectratio = (1., 1 / ratio)\n\n aspectratio = np.array(aspectratio) / max(aspectratio)\n fig['layout']['width'], fig['layout']['height'] = ([ar * figsize * 700 for ar in aspectratio])\n\n # check if called from ipynb or not in order to consider appropriate plot function\n if _in_ipynb():\n from plotly.offline import init_notebook_mode, iplot as plot\n init_notebook_mode()\n else:\n from plotly.offline import plot as plot\n\n plot(fig, filename=filename)\n"
] | [
[
"numpy.array",
"pandas.Series",
"numpy.asarray"
]
] |
stefan-de/lifelines | [
"519bd3abe6051bd9fb5da0dfffce24ab86171f3f"
] | [
"lifelines/tests/utils/test_utils.py"
] | [
"# -*- coding: utf-8 -*-\n\n\nimport pytest\nimport os\nimport numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal, assert_series_equal\nimport numpy.testing as npt\nfrom numpy.linalg import norm, lstsq\nfrom numpy.random import randn\nfrom flaky import flaky\n\nfrom lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter\nfrom lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi\nfrom lifelines import utils\nfrom lifelines import exceptions\nfrom lifelines.utils.sklearn_adapter import sklearn_adapter\nfrom lifelines.utils.safe_exp import safe_exp\n\n\ndef test_format_p_values():\n assert utils.format_p_value(2)(0.004) == \"<0.005\"\n assert utils.format_p_value(3)(0.004) == \"0.004\"\n\n assert utils.format_p_value(3)(0.000) == \"<0.0005\"\n assert utils.format_p_value(3)(0.005) == \"0.005\"\n assert utils.format_p_value(3)(0.2111) == \"0.211\"\n assert utils.format_p_value(3)(0.2119) == \"0.212\"\n\n\ndef test_ridge_regression_with_penalty_is_less_than_without_penalty():\n X = randn(2, 2)\n Y = randn(2)\n assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])\n assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])\n\n\ndef test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():\n c1 = 10e8\n c2 = 0.0\n offset = np.ones(2)\n X = randn(2, 2)\n Y = randn(2)\n assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4\n\n\ndef test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():\n c1 = 0.0\n c2 = 10e8\n offset = np.ones(2)\n X = randn(2, 2)\n Y = randn(2)\n assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4\n\n\ndef test_lstsq_returns_similar_values_to_ridge_regression():\n X = randn(2, 2)\n Y = randn(2)\n expected = lstsq(X, Y, rcond=None)[0]\n assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4\n\n\ndef test_lstsq_returns_correct_values():\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])\n y = [1, 1, 1, -1, -1]\n beta, V = utils.ridge_regression(X, y)\n expected_beta = [-0.98684211, -0.07894737]\n expected_v = [\n [-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],\n [-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],\n ]\n assert norm(beta - expected_beta) < 10e-4\n for V_row, e_v_row in zip(V, expected_v):\n assert norm(V_row - e_v_row) < 1e-4\n\n\ndef test_unnormalize():\n df = load_larynx()\n m = df.mean(0)\n s = df.std(0)\n\n ndf = utils.normalize(df)\n\n npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)\n\n\ndef test_normalize():\n df = load_larynx()\n n, d = df.shape\n npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))\n npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))\n\n\ndef test_median():\n sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))\n assert utils.median_survival_times(sv) == 500\n\n\ndef test_median_accepts_series():\n sv = pd.Series(1 - np.linspace(0, 1, 1000))\n assert utils.median_survival_times(sv) == 500\n\n\ndef test_qth_survival_times_with_varying_datatype_inputs():\n sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]\n sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])\n sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])\n sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])\n sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])\n sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])\n\n q = 0.5\n\n assert utils.qth_survival_times(q, sf_list) == 2\n assert utils.qth_survival_times(q, sf_array) == 2\n assert utils.qth_survival_times(q, sf_df_no_index) == 2\n assert utils.qth_survival_times(q, sf_df_index) == 30\n assert utils.qth_survival_times(q, sf_series_index) == 30\n assert utils.qth_survival_times(q, sf_series_no_index) == 2\n\n\ndef test_qth_survival_times_multi_dim_input():\n sf = np.linspace(1, 0, 50)\n sf_multi_df = pd.DataFrame({\"sf\": sf, \"sf**2\": sf ** 2})\n medians = utils.qth_survival_times(0.5, sf_multi_df)\n assert medians[\"sf\"].loc[0.5] == 25\n assert medians[\"sf**2\"].loc[0.5] == 15\n\n\ndef test_qth_survival_time_returns_inf():\n sf = pd.Series([1.0, 0.7, 0.6])\n assert utils.qth_survival_time(0.5, sf) == np.inf\n\n\ndef test_qth_survival_time_accepts_a_model():\n kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])\n assert utils.qth_survival_time(0.8, kmf) > 0\n\n\ndef test_qth_survival_time_with_dataframe():\n sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])\n sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])\n sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])\n\n assert utils.qth_survival_time(0.5, sf_df_no_index) == 2\n assert utils.qth_survival_time(0.5, sf_df_index) == 30\n\n with pytest.raises(ValueError):\n utils.qth_survival_time(0.5, sf_df_too_many_columns)\n\n\ndef test_qth_survival_times_with_multivariate_q():\n sf = np.linspace(1, 0, 50)\n sf_multi_df = pd.DataFrame({\"sf\": sf, \"sf**2\": sf ** 2})\n\n assert_frame_equal(\n utils.qth_survival_times([0.2, 0.5], sf_multi_df),\n pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=[\"sf\", \"sf**2\"]),\n )\n assert_frame_equal(\n utils.qth_survival_times([0.2, 0.5], sf_multi_df[\"sf\"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=[\"sf\"])\n )\n assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=[\"sf\", \"sf**2\"]))\n assert utils.qth_survival_times(0.5, sf_multi_df[\"sf\"]) == 25\n\n\ndef test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():\n sf = pd.DataFrame(np.linspace(1, 0, 50))\n\n q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])\n actual = utils.qth_survival_times(q, sf)\n assert actual.shape[0] == len(q)\n assert actual.index[0] == actual.index[1]\n assert_series_equal(actual.iloc[0], actual.iloc[1])\n\n npt.assert_almost_equal(actual.index.values, q.values)\n\n\ndef test_datetimes_to_durations_with_different_frequencies():\n # days\n start_date = [\"2013-10-10 0:00:00\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", \"2013-10-10 0:00:00\", \"2013-10-15\"]\n T, C = utils.datetimes_to_durations(start_date, end_date)\n npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))\n npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))\n\n # years\n start_date = [\"2013-10-10\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", \"2013-10-10\", \"2013-10-15\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\")\n npt.assert_almost_equal(T, np.array([0, 0, 1]))\n npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))\n\n # hours\n start_date = [\"2013-10-10 17:00:00\", \"2013-10-09 0:00:00\", \"2013-10-10 23:00:00\"]\n end_date = [\"2013-10-10 18:00:00\", \"2013-10-10 0:00:00\", \"2013-10-11 2:00:00\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"h\")\n npt.assert_almost_equal(T, np.array([1, 24, 3]))\n npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))\n\n\ndef test_datetimes_to_durations_will_handle_dates_above_fill_date():\n start_date = [\"2013-10-08\", \"2013-10-09\", \"2013-10-10\"]\n end_date = [\"2013-10-10\", \"2013-10-12\", \"2013-10-15\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\", fill_date=\"2013-10-12\")\n npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))\n\n\ndef test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():\n start_date = [\"2013-10-08\", \"2013-10-09\", \"2013-10-10\"]\n end_date = [\"2013-10-10\", None, None]\n last_observation = [\"2013-10-10\", \"2013-10-12\", \"2013-10-14\"]\n T, E = utils.datetimes_to_durations(start_date, end_date, freq=\"D\", fill_date=last_observation)\n npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))\n npt.assert_almost_equal(T, np.array([2, 3, 4]))\n\n\ndef test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():\n start_date = [\"2013-10-08\", \"2013-10-09\", \"2013-10-10\"]\n end_date = [\"2013-10-10\", None, None]\n last_observation = [\"2013-10-10\", \"2013-10-12\", \"2013-10-14\"]\n T, E = utils.datetimes_to_durations(start_date, end_date, freq=\"D\", fill_date=last_observation)\n npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))\n npt.assert_almost_equal(T, np.array([2, 3, 4]))\n\n\ndef test_datetimes_to_durations_censor():\n start_date = [\"2013-10-10\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", None, \"\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\")\n npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))\n\n\ndef test_datetimes_to_durations_custom_censor():\n start_date = [\"2013-10-10\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", \"NaT\", \"\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\", na_values=[\"NaT\", \"\"])\n npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))\n\n\ndef test_survival_events_from_table_no_ties():\n T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])\n d = utils.survival_table_from_events(T, C)\n T_, C_, W_ = utils.survival_events_from_table(d[[\"censored\", \"observed\"]])\n npt.assert_array_equal(T, T_)\n npt.assert_array_equal(C, C_)\n npt.assert_array_equal(W_, np.ones_like(T))\n\n\ndef test_survival_events_from_table_with_ties():\n T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])\n d = utils.survival_table_from_events(T, C)\n T_, C_, W_ = utils.survival_events_from_table(d[[\"censored\", \"observed\"]])\n npt.assert_array_equal([1, 2, 3, 4, 5], T_)\n npt.assert_array_equal([1, 0, 1, 1, 1], C_)\n npt.assert_array_equal([1, 1, 1, 2, 1], W_)\n\n\ndef test_survival_table_from_events_with_non_trivial_censorship_column():\n T = np.random.exponential(5, size=50)\n malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!\n proper_C = malformed_C > 0 # (proper \"boolean\" array)\n table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))\n table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))\n\n assert_frame_equal(table1, table2)\n\n\ndef test_group_survival_table_from_events_on_waltons_data():\n df = load_waltons()\n first_obs = np.zeros(df.shape[0])\n g, removed, observed, censored = utils.group_survival_table_from_events(df[\"group\"], df[\"T\"], df[\"E\"], first_obs)\n assert len(g) == 2\n assert all(removed.columns == [\"removed:miR-137\", \"removed:control\"])\n assert all(removed.index == observed.index)\n assert all(removed.index == censored.index)\n\n\ndef test_survival_table_from_events_binned_with_empty_bin():\n df = load_waltons()\n ix = df[\"group\"] == \"miR-137\"\n event_table = utils.survival_table_from_events(df.loc[ix][\"T\"], df.loc[ix][\"E\"], intervals=[0, 10, 20, 30, 40, 50])\n assert not pd.isnull(event_table).any().any()\n\n\ndef test_survival_table_from_events_at_risk_column():\n df = load_waltons()\n # from R\n expected = [\n 163.0,\n 162.0,\n 160.0,\n 157.0,\n 154.0,\n 152.0,\n 151.0,\n 148.0,\n 144.0,\n 139.0,\n 134.0,\n 133.0,\n 130.0,\n 128.0,\n 126.0,\n 119.0,\n 118.0,\n 108.0,\n 107.0,\n 99.0,\n 96.0,\n 89.0,\n 87.0,\n 69.0,\n 65.0,\n 49.0,\n 38.0,\n 36.0,\n 27.0,\n 24.0,\n 14.0,\n 1.0,\n ]\n df = utils.survival_table_from_events(df[\"T\"], df[\"E\"])\n assert list(df[\"at_risk\"][1:]) == expected # skip the first event as that is the birth time, 0.\n\n\ndef test_survival_table_to_events_casts_to_float():\n T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))\n d = utils.survival_table_from_events(T, C, np.zeros_like(T))\n npt.assert_array_equal(d[\"censored\"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))\n npt.assert_array_equal(d[\"removed\"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))\n\n\ndef test_group_survival_table_from_events_works_with_series():\n df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=[\"duration\", \"E\", \"G\"])\n ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))\n npt.assert_array_equal(ug, np.array([3, 2]))\n\n\ndef test_survival_table_from_events_will_collapse_if_asked():\n T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])\n table = utils.survival_table_from_events(T, C, collapse=True)\n assert table.index.tolist() == [\n pd.Interval(-0.001, 3.5089999999999999, closed=\"right\"),\n pd.Interval(3.5089999999999999, 7.0179999999999998, closed=\"right\"),\n ]\n\n\ndef test_survival_table_from_events_will_collapse_to_desired_bins():\n T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])\n table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])\n assert table.index.tolist() == [pd.Interval(-0.001, 4, closed=\"right\"), pd.Interval(4, 8, closed=\"right\")]\n\n\ndef test_cross_validator_returns_k_results():\n cf = CoxPHFitter()\n results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=3)\n assert len(results) == 3\n\n results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=5)\n assert len(results) == 5\n\n\ndef test_cross_validator_returns_fitters_k_results():\n cf = CoxPHFitter()\n fitters = [cf, cf]\n results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=3)\n assert len(results) == 2\n assert len(results[0]) == len(results[1]) == 3\n\n results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=5)\n assert len(results) == 2\n assert len(results[0]) == len(results[1]) == 5\n\n\ndef test_cross_validator_with_predictor():\n cf = CoxPHFitter()\n results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=3)\n assert len(results) == 3\n\n\ndef test_cross_validator_with_stratified_cox_model():\n cf = CoxPHFitter(strata=[\"race\"])\n utils.k_fold_cross_validation(cf, load_rossi(), duration_col=\"week\", event_col=\"arrest\")\n\n\ndef test_cross_validator_with_specific_loss_function():\n cf = CoxPHFitter()\n results_sq = utils.k_fold_cross_validation(\n cf, load_regression_dataset(), scoring_method=\"concordance_index\", duration_col=\"T\", event_col=\"E\"\n )\n\n\ndef test_concordance_index():\n size = 1000\n T = np.random.normal(size=size)\n P = np.random.normal(size=size)\n C = np.random.choice([0, 1], size=size)\n Z = np.zeros_like(T)\n\n # Zeros is exactly random\n assert utils.concordance_index(T, Z) == 0.5\n assert utils.concordance_index(T, Z, C) == 0.5\n\n # Itself is 1\n assert utils.concordance_index(T, T) == 1.0\n assert utils.concordance_index(T, T, C) == 1.0\n\n # Random is close to 0.5\n assert abs(utils.concordance_index(T, P) - 0.5) < 0.05\n assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05\n\n\ndef test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():\n n = 10\n T = np.arange(n)\n C = [True] * n\n min_obs = [0] * n\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == n\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_with_negative_T_and_no_lagged_births():\n n = 10\n T = np.arange(-n / 2, n / 2)\n C = [True] * n\n min_obs = None\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == n\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_with_non_negative_T_and_lagged_births():\n n = 10\n T = np.arange(n)\n C = [True] * n\n min_obs = np.linspace(0, 2, n)\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == 1\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_with_negative_T_and_lagged_births():\n n = 10\n T = np.arange(-n / 2, n / 2)\n C = [True] * n\n min_obs = np.linspace(-n / 2, 2, n)\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == 1\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_raises_value_error_if_too_early_births():\n n = 10\n T = np.arange(0, n)\n C = [True] * n\n min_obs = T.copy()\n min_obs[1] = min_obs[1] + 10\n with pytest.raises(ValueError):\n utils.survival_table_from_events(T, C, min_obs)\n\n\nclass TestLongDataFrameUtils(object):\n @pytest.fixture\n def seed_df(self):\n df = pd.DataFrame.from_records([{\"id\": 1, \"var1\": 0.1, \"T\": 10, \"E\": 1}, {\"id\": 2, \"var1\": 0.5, \"T\": 12, \"E\": 0}])\n return utils.to_long_format(df, \"T\")\n\n @pytest.fixture\n def cv1(self):\n return pd.DataFrame.from_records(\n [\n {\"id\": 1, \"t\": 0, \"var2\": 1.4},\n {\"id\": 1, \"t\": 4, \"var2\": 1.2},\n {\"id\": 1, \"t\": 8, \"var2\": 1.5},\n {\"id\": 2, \"t\": 0, \"var2\": 1.6},\n ]\n )\n\n @pytest.fixture\n def cv2(self):\n return pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": 6, \"var3\": 1}, {\"id\": 2, \"t\": 0, \"var3\": 0}]\n )\n\n def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):\n df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\").pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\"\n )\n\n df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\").pipe(\n utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\"\n )\n\n assert_frame_equal(df21, df12, check_like=True)\n\n def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):\n df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\", cumulative_sum=True).pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\", cumulative_sum=True\n )\n\n df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\", cumulative_sum=True).pipe(\n utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\", cumulative_sum=True\n )\n\n assert_frame_equal(df21, df12, check_like=True)\n\n def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 1, \"var1\": 1.0}, {\"id\": 1, \"t\": 2, \"var1\": 2.0}])\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n expected = pd.DataFrame.from_records(\n [\n {\"E\": False, \"id\": 1, \"stop\": 1.0, \"start\": 0, \"var1\": 0.1},\n {\"E\": False, \"id\": 1, \"stop\": 2.0, \"start\": 1, \"var1\": 1.0},\n {\"E\": True, \"id\": 1, \"stop\": 10.0, \"start\": 2, \"var1\": 2.0},\n ]\n )\n assert_frame_equal(df, expected, check_like=True)\n\n def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n\n new_value_at_time_0 = 1.0\n old_value_at_time_0 = seed_df[\"var1\"].iloc[0]\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var1\": new_value_at_time_0, \"var2\": 2.0}])\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\", overwrite=False)\n\n expected = pd.DataFrame.from_records(\n [{\"E\": True, \"id\": 1, \"stop\": 10.0, \"start\": 0, \"var1\": new_value_at_time_0 + old_value_at_time_0, \"var2\": 2.0}]\n )\n assert_frame_equal(df, expected, check_like=True)\n\n def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n\n new_value_at_time_0 = 1.0\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var1\": new_value_at_time_0}])\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\", overwrite=True)\n\n expected = pd.DataFrame.from_records([{\"E\": True, \"id\": 1, \"stop\": 10.0, \"start\": 0, \"var1\": new_value_at_time_0}])\n assert_frame_equal(df, expected, check_like=True)\n\n def test_enum_flag(self, seed_df, cv1, cv2):\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\", add_enum=True).pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\", add_enum=True\n )\n\n idx = df[\"id\"] == 1\n n = idx.sum()\n try:\n assert_series_equal(df[\"enum\"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)\n except AssertionError as e:\n # Windows Numpy and Pandas sometimes have int32 or int64 as default dtype\n if os.name == \"nt\" and \"int32\" in str(e) and \"int64\" in str(e):\n assert_series_equal(\n df[\"enum\"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df[\"enum\"].loc[idx].dtypes), check_names=False\n )\n else:\n raise e\n\n def test_event_col_is_properly_inserted(self, seed_df, cv2):\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\")\n assert df.groupby(\"id\").last()[\"E\"].tolist() == [1, 0]\n\n def test_redundant_cv_columns_are_dropped(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n cv = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"t\": 0, \"var3\": 0, \"var4\": 1},\n {\"id\": 1, \"t\": 1, \"var3\": 0, \"var4\": 1}, # redundant, as nothing changed during the interval\n {\"id\": 1, \"t\": 3, \"var3\": 0, \"var4\": 1}, # redundant, as nothing changed during the interval\n {\"id\": 1, \"t\": 6, \"var3\": 1, \"var4\": 1},\n {\"id\": 1, \"t\": 9, \"var3\": 1, \"var4\": 1}, # redundant, as nothing changed during the interval\n ]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 2\n\n def test_will_convert_event_column_to_bools(self, seed_df, cv1):\n seed_df[\"E\"] = seed_df[\"E\"].astype(int)\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\")\n assert df.dtypes[\"E\"] == bool\n\n def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):\n max_T = seed_df[\"stop\"].max()\n cv = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"t\": 0, \"var3\": 0},\n {\"id\": 1, \"t\": max_T + 10, \"var3\": 1}, # will be excluded\n {\"id\": 2, \"t\": 0, \"var3\": 0},\n ]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 2\n\n def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):\n min_T = seed_df[\"start\"].min()\n cv = pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": min_T - 1, \"var3\": 1}, {\"id\": 2, \"t\": 0, \"var3\": 0}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 3\n\n def test_cvs_with_null_values_are_dropped(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n cv = pd.DataFrame.from_records(\n [{\"id\": None, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": None, \"var3\": 1}, {\"id\": 2, \"t\": 0, \"var3\": None}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 1\n\n def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n cv1 = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": 5, \"var3\": 1}])\n\n cv2 = pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"var4\": 0}, {\"id\": 1, \"t\": 5, \"var4\": 1.5}, {\"id\": 1, \"t\": 6, \"var4\": 1.7}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\").pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\"\n )\n assert df.shape[0] == 3\n\n def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):\n del seed_df[\"start\"]\n with pytest.raises(IndexError):\n utils.add_covariate_to_timeline(seed_df, cv1, \"id\", \"t\", \"E\")\n\n def test_cumulative_sum(self):\n seed_df = pd.DataFrame.from_records([{\"id\": 1, \"start\": 0, \"stop\": 5, \"E\": 1}])\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var4\": 1}, {\"id\": 1, \"t\": 1, \"var4\": 1}, {\"id\": 1, \"t\": 3, \"var4\": 1}])\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\", cumulative_sum=True)\n expected = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 1.0, \"cumsum_var4\": 1, \"E\": False},\n {\"id\": 1, \"start\": 1, \"stop\": 3.0, \"cumsum_var4\": 2, \"E\": False},\n {\"id\": 1, \"start\": 3, \"stop\": 5.0, \"cumsum_var4\": 3, \"E\": True},\n ]\n )\n assert_frame_equal(expected, df, check_like=True)\n\n def test_delay(self, cv2):\n seed_df = pd.DataFrame.from_records([{\"id\": 1, \"start\": 0, \"stop\": 50, \"E\": 1}])\n\n cv3 = pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"varA\": 2}, {\"id\": 1, \"t\": 10, \"varA\": 4}, {\"id\": 1, \"t\": 20, \"varA\": 6}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, \"id\", \"t\", \"E\", delay=2).fillna(0)\n\n expected = pd.DataFrame.from_records(\n [\n {\"start\": 0, \"stop\": 2.0, \"varA\": 0.0, \"id\": 1, \"E\": False},\n {\"start\": 2, \"stop\": 12.0, \"varA\": 2.0, \"id\": 1, \"E\": False},\n {\"start\": 12, \"stop\": 22.0, \"varA\": 4.0, \"id\": 1, \"E\": False},\n {\"start\": 22, \"stop\": 50.0, \"varA\": 6.0, \"id\": 1, \"E\": True},\n ]\n )\n assert_frame_equal(expected, df, check_like=True)\n\n def test_covariates_from_event_matrix_with_simple_addition(self):\n\n base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=[\"id\", \"start\", \"stop\", \"e\"])\n\n event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=[\"id\", \"poison\"])\n cv = utils.covariates_from_event_matrix(event_df, \"id\")\n ldf = utils.add_covariate_to_timeline(base_df, cv, \"id\", \"duration\", \"e\", cumulative_sum=True)\n assert pd.notnull(ldf).all().all()\n\n expected = pd.DataFrame(\n [\n (0.0, 0.0, 1.0, 1, False),\n (1.0, 1.0, 5.0, 1, True),\n (0.0, 0.0, 2.0, 2, False),\n (2.0, 1.0, 4.0, 2, True),\n (0.0, 0.0, 3.0, 3, False),\n (3.0, 1.0, 8.0, 3, True),\n (0.0, 0.0, 4.0, 4, True),\n ],\n columns=[\"start\", \"cumsum_poison\", \"stop\", \"id\", \"e\"],\n )\n assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)\n\n def test_covariates_from_event_matrix(self):\n\n base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=[\"id\", \"start\", \"stop\", \"e\"])\n\n event_df = pd.DataFrame(\n [[1, 1, None, 2], [2, None, 5, None], [3, 3, 3, 7]], columns=[\"id\", \"promotion\", \"movement\", \"raise\"]\n )\n\n cv = utils.covariates_from_event_matrix(event_df, \"id\")\n ldf = utils.add_covariate_to_timeline(base_df, cv, \"id\", \"duration\", \"e\", cumulative_sum=True)\n expected = pd.DataFrame.from_records(\n [\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 0.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 1.0,\n \"start\": 0.0,\n \"stop\": 1.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 1.0,\n \"start\": 1.0,\n \"stop\": 2.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 1.0,\n \"e\": 1.0,\n \"id\": 1.0,\n \"start\": 2.0,\n \"stop\": 5.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 0.0,\n \"cumsum_raise\": 0.0,\n \"e\": 1.0,\n \"id\": 2.0,\n \"start\": 0.0,\n \"stop\": 4.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 0.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 3.0,\n \"start\": 0.0,\n \"stop\": 3.0,\n },\n {\n \"cumsum_movement\": 1.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 3.0,\n \"start\": 3.0,\n \"stop\": 7.0,\n },\n {\n \"cumsum_movement\": 1.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 1.0,\n \"e\": 1.0,\n \"id\": 3.0,\n \"start\": 7.0,\n \"stop\": 8.0,\n },\n {\n \"cumsum_movement\": None,\n \"cumsum_promotion\": None,\n \"cumsum_raise\": None,\n \"e\": 1.0,\n \"id\": 4.0,\n \"start\": 0.0,\n \"stop\": 4.0,\n },\n ]\n )\n\n assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)\n\n def test_to_episodic_format_with_long_time_gap_is_identical(self):\n rossi = load_rossi()\n rossi[\"id\"] = np.arange(rossi.shape[0])\n\n long_rossi = utils.to_episodic_format(rossi, duration_col=\"week\", event_col=\"arrest\", id_col=\"id\", time_gaps=1000.0)\n\n # using astype(int) would fail on Windows because int32 and int64 are used as dtype\n long_rossi[\"week\"] = long_rossi[\"stop\"].astype(rossi[\"week\"].dtype)\n del long_rossi[\"start\"]\n del long_rossi[\"stop\"]\n\n assert_frame_equal(long_rossi, rossi, check_like=True)\n\n def test_to_episodic_format_preserves_outcome(self):\n E = [1, 1, 0, 0]\n df = pd.DataFrame({\"T\": [1, 3, 1, 3], \"E\": E, \"id\": [1, 2, 3, 4]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\").sort_values([\"id\", \"stop\"])\n assert long_df.shape[0] == 1 + 3 + 1 + 3\n\n assert long_df.groupby(\"id\").last()[\"E\"].tolist() == E\n\n def test_to_episodic_format_handles_floating_durations(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 1], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\").sort_values([\"id\", \"stop\"])\n assert long_df.shape[0] == 1 + 4\n assert long_df[\"stop\"].tolist() == [0.1, 1, 2, 3, 3.5]\n\n def test_to_episodic_format_handles_floating_durations_with_time_gaps(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 1], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\", time_gaps=2.0).sort_values([\"id\", \"stop\"])\n assert long_df[\"stop\"].tolist() == [0.1, 2, 3.5]\n\n def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 0], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\", time_gaps=2.0).sort_values([\"id\", \"stop\"])\n assert long_df.groupby(\"id\").last()[\"E\"].tolist() == [1, 0]\n\n def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 0], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\", time_gaps=2.0).sort_values([\"id\", \"stop\"])\n assert long_df.groupby(\"id\").last()[\"E\"].tolist() == [1, 0]\n\n def test_to_episodic_format_adds_id_col(self):\n df = pd.DataFrame({\"T\": [1, 3], \"E\": [1, 0]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\")\n assert \"id\" in long_df.columns\n\n def test_to_episodic_format_uses_custom_index_as_id(self):\n df = pd.DataFrame({\"T\": [1, 3], \"E\": [1, 0]}, index=[\"A\", \"B\"])\n long_df = utils.to_episodic_format(df, \"T\", \"E\")\n assert long_df[\"id\"].tolist() == [\"A\", \"B\", \"B\", \"B\"]\n\n\nclass TestStepSizer:\n def test_StepSizer_step_will_decrease_if_unstable(self):\n start = 0.95\n ss = utils.StepSizer(start)\n assert ss.next() == start\n ss.update(1.0)\n ss.update(2.0)\n ss.update(1.0)\n ss.update(2.0)\n\n assert ss.next() < start\n\n def test_StepSizer_step_will_increase_if_stable(self):\n start = 0.5\n ss = utils.StepSizer(start)\n assert ss.next() == start\n ss.update(1.0)\n ss.update(0.5)\n ss.update(0.4)\n ss.update(0.1)\n\n assert ss.next() > start\n\n def test_StepSizer_step_will_decrease_if_explodes(self):\n start = 0.5\n ss = utils.StepSizer(start)\n assert ss.next() == start\n ss.update(20.0)\n assert ss.next() < start\n\n\nclass TestSklearnAdapter:\n @pytest.fixture\n def X(self):\n return load_regression_dataset().drop(\"T\", axis=1)\n\n @pytest.fixture\n def Y(self):\n return load_regression_dataset().pop(\"T\")\n\n def test_model_has_correct_api(self, X, Y):\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model()\n assert hasattr(cph, \"fit\")\n cph.fit(X, Y)\n assert hasattr(cph, \"predict\")\n cph.predict(X)\n assert hasattr(cph, \"score\")\n cph.score(X, Y)\n\n def test_sklearn_cross_val_score_accept_model(self, X, Y):\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n wf = base_model(penalizer=1.0)\n assert len(cross_val_score(wf, X, Y, cv=3)) == 3\n\n def test_sklearn_GridSearchCV_accept_model(self, X, Y):\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n\n grid_params = {\"penalizer\": 10.0 ** np.arange(-2, 3), \"model_ancillary\": [True, False]}\n clf = GridSearchCV(base_model(), grid_params, cv=4)\n clf.fit(X, Y)\n\n assert clf.best_params_ == {\"model_ancillary\": True, \"penalizer\": 100.0}\n assert clf.predict(X).shape[0] == X.shape[0]\n\n def test_model_can_accept_things_like_strata(self, X, Y):\n X[\"strata\"] = np.random.randint(0, 2, size=X.shape[0])\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model(strata=\"strata\")\n cph.fit(X, Y)\n\n def test_we_can_user_other_prediction_methods(self, X, Y):\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\", predict_method=\"predict_median\")\n wf = base_model(strata=\"strata\")\n wf.fit(X, Y)\n assert wf.predict(X).shape[0] == X.shape[0]\n\n @pytest.mark.xfail\n def test_dill(self, X, Y):\n import dill\n\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model()\n cph.fit(X, Y)\n\n s = dill.dumps(cph)\n s = dill.loads(s)\n assert cph.predict(X).shape[0] == X.shape[0]\n\n @pytest.mark.xfail\n def test_pickle(self, X, Y):\n import pickle\n\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model()\n cph.fit(X, Y)\n\n s = pickle.dumps(cph, protocol=-1)\n s = pickle.loads(s)\n assert cph.predict(X).shape[0] == X.shape[0]\n\n def test_isinstance(self):\n from sklearn.base import BaseEstimator, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin\n\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n assert isinstance(base_model(), BaseEstimator)\n assert isinstance(base_model(), RegressorMixin)\n assert isinstance(base_model(), MetaEstimatorMixin)\n\n @pytest.mark.xfail\n def test_sklearn_GridSearchCV_accept_model_with_parallelization(self, X, Y):\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n\n grid_params = {\"penalizer\": 10.0 ** np.arange(-2, 3), \"l1_ratio\": [0.05, 0.5, 0.95], \"model_ancillary\": [True, False]}\n # note the n_jobs\n clf = GridSearchCV(base_model(), grid_params, cv=4, n_jobs=-1)\n clf.fit(X, Y)\n\n assert clf.best_params_ == {\"l1_ratio\": 0.5, \"model_ancillary\": False, \"penalizer\": 0.01}\n assert clf.predict(X).shape[0] == X.shape[0]\n\n @pytest.mark.xfail\n def test_joblib(self, X, Y):\n from joblib import dump, load\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n\n clf = base_model()\n clf.fit(X, Y)\n dump(clf, \"filename.joblib\")\n clf = load(\"filename.joblib\")\n\n @pytest.mark.xfail\n def test_sklearn_check():\n from sklearn.utils.estimator_checks import check_estimator\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n check_estimator(base_model())\n\n\ndef test_rmst_works_at_kaplan_meier_edge_case():\n\n T = [1, 2, 3, 4, 10]\n kmf = KaplanMeierFitter().fit(T)\n\n # when S(t)=0, doesn't matter about extending past\n assert utils.restricted_mean_survival_time(kmf, t=10) == utils.restricted_mean_survival_time(kmf, t=10.001)\n\n assert utils.restricted_mean_survival_time(kmf, t=9.9) <= utils.restricted_mean_survival_time(kmf, t=10.0)\n\n assert abs((utils.restricted_mean_survival_time(kmf, t=4) - (1.0 + 0.8 + 0.6 + 0.4))) < 0.0001\n assert abs((utils.restricted_mean_survival_time(kmf, t=4 + 0.1) - (1.0 + 0.8 + 0.6 + 0.4 + 0.2 * 0.1))) < 0.0001\n\n\ndef test_rmst_exactely_with_known_solution():\n T = np.random.exponential(2, 100)\n exp = ExponentialFitter().fit(T)\n lambda_ = exp.lambda_\n\n assert abs(utils.restricted_mean_survival_time(exp) - lambda_) < 0.001\n assert abs(utils.restricted_mean_survival_time(exp, t=lambda_) - lambda_ * (np.e - 1) / np.e) < 0.001\n\n\n@flaky\ndef test_rmst_approximate_solution():\n T = np.random.exponential(2, 4000)\n exp = ExponentialFitter().fit(T, timeline=np.linspace(0, T.max(), 10000))\n lambda_ = exp.lambda_\n\n with pytest.warns(exceptions.ApproximationWarning) as w:\n\n assert (\n abs(\n utils.restricted_mean_survival_time(exp, t=lambda_)\n - utils.restricted_mean_survival_time(exp.survival_function_, t=lambda_)\n )\n < 0.001\n )\n\n\ndef test_rmst_variance():\n\n T = np.random.exponential(2, 1000)\n expf = ExponentialFitter().fit(T)\n hazard = 1 / expf.lambda_\n t = 1\n\n sq = 2 / hazard ** 2 * (1 - np.exp(-hazard * t) * (1 + hazard * t))\n actual_mean = 1 / hazard * (1 - np.exp(-hazard * t))\n actual_var = sq - actual_mean ** 2\n\n assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[0] - actual_mean) < 0.001\n assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[1] - actual_var) < 0.001\n\n\ndef test_find_best_parametric_model():\n T = np.random.exponential(2, 1000)\n E = np.ones_like(T)\n\n model, score = utils.find_best_parametric_model(T, E)\n assert True\n\n\ndef test_find_best_parametric_model_can_accept_other_models():\n T = np.random.exponential(2, 1000)\n model, score = utils.find_best_parametric_model(T, additional_models=[ExponentialFitter(), ExponentialFitter()])\n assert True\n\n\ndef test_find_best_parametric_model_with_BIC():\n T = np.random.exponential(2, 1000)\n model, score = utils.find_best_parametric_model(T, scoring_method=\"BIC\")\n assert True\n\n\ndef test_find_best_parametric_model_works_for_left_censoring():\n T = np.random.exponential(2, 100)\n model, score = utils.find_best_parametric_model(T, censoring_type=\"left\", show_progress=True)\n assert True\n\n\ndef test_find_best_parametric_model_works_for_interval_censoring():\n T_1 = np.random.exponential(2, 100)\n T_2 = T_1 + 1\n model, score = utils.find_best_parametric_model((T_1, T_2), censoring_type=\"interval\", show_progress=True)\n assert True\n\n\ndef test_find_best_parametric_model_works_with_weights_and_entry():\n T = np.random.exponential(5, 100)\n W = np.random.randint(1, 5, size=100)\n entry = np.random.exponential(0.01, 100)\n model, score = utils.find_best_parametric_model(T, weights=W, entry=entry, show_progress=True)\n assert True\n\n\ndef test_safe_exp():\n from lifelines.utils.safe_exp import MAX\n\n assert safe_exp(4.0) == np.exp(4.0)\n assert safe_exp(MAX) == np.exp(MAX)\n assert safe_exp(MAX + 1) == np.exp(MAX)\n\n from autograd import grad\n\n assert grad(safe_exp)(4.0) == np.exp(4.0)\n assert grad(safe_exp)(MAX) == np.exp(MAX)\n assert grad(safe_exp)(MAX + 1) == np.exp(MAX)\n"
] | [
[
"numpy.ones",
"pandas.Series",
"numpy.ones_like",
"numpy.testing.assert_almost_equal",
"pandas.notnull",
"numpy.testing.assert_array_equal",
"numpy.random.choice",
"pandas.isnull",
"pandas.testing.assert_frame_equal",
"numpy.linspace",
"sklearn.model_selection.cross_val_score",
"numpy.zeros",
"numpy.arange",
"pandas.testing.assert_series_equal",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.binomial",
"numpy.zeros_like",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.exp",
"pandas.DataFrame.from_records",
"numpy.linalg.lstsq",
"numpy.random.exponential",
"numpy.random.normal",
"numpy.random.randint",
"pandas.Interval"
]
] |
roozhou/botty | [
"a67a87845687cdf6900af10a13dc7170684faa9a"
] | [
"src/char/trapsin.py"
] | [
"import keyboard\nfrom utils.custom_mouse import mouse\nfrom char import IChar\nfrom pather import Pather\nfrom logger import Logger\nfrom screen import convert_abs_to_monitor, convert_screen_to_abs, grab\nfrom config import Config\nfrom utils.misc import wait, rotate_vec, unit_vector\nimport random\nfrom pather import Location, Pather\nimport numpy as np\n\n\nclass Trapsin(IChar):\n def __init__(self, skill_hotkeys: dict, pather: Pather):\n Logger.info(\"Setting up Trapsin\")\n super().__init__(skill_hotkeys)\n self._pather = pather\n\n def pre_buff(self):\n if Config().char[\"cta_available\"]:\n self._pre_buff_cta()\n if self._skill_hotkeys[\"fade\"]:\n keyboard.send(self._skill_hotkeys[\"fade\"])\n wait(0.1, 0.13)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n if self._skill_hotkeys[\"shadow_warrior\"]:\n keyboard.send(self._skill_hotkeys[\"shadow_warrior\"])\n wait(0.1, 0.13)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n if self._skill_hotkeys[\"burst_of_speed\"]:\n keyboard.send(self._skill_hotkeys[\"burst_of_speed\"])\n wait(0.1, 0.13)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n\n def _left_attack(self, cast_pos_abs: tuple[float, float], spray: int = 10):\n keyboard.send(Config().char[\"stand_still\"], do_release=False)\n if self._skill_hotkeys[\"skill_left\"]:\n keyboard.send(self._skill_hotkeys[\"skill_left\"])\n for _ in range(4):\n x = cast_pos_abs[0] + (random.random() * 2*spray - spray)\n y = cast_pos_abs[1] + (random.random() * 2*spray - spray)\n cast_pos_monitor = convert_abs_to_monitor((x, y))\n mouse.move(*cast_pos_monitor)\n mouse.press(button=\"left\")\n wait(0.2, 0.3)\n mouse.release(button=\"left\")\n keyboard.send(Config().char[\"stand_still\"], do_press=False)\n\n\n def _right_attack(self, cast_pos_abs: tuple[float, float], spray: float = 10):\n keyboard.send(self._skill_hotkeys[\"lightning_sentry\"])\n x = cast_pos_abs[0] + (random.random() * 2 * spray - spray)\n y = cast_pos_abs[1] + (random.random() * 2 * spray - spray)\n cast_pos_monitor = convert_abs_to_monitor((x, y))\n mouse.move(*cast_pos_monitor)\n def atk(num: int):\n for _ in range(num):\n mouse.press(button=\"right\")\n wait(0.20)\n mouse.release(button=\"right\")\n wait(0.15)\n atk(4)\n keyboard.send(self._skill_hotkeys[\"death_sentry\"])\n atk(1)\n\n def kill_pindle(self) -> bool:\n atk_len = max(1, int(Config().char[\"atk_len_pindle\"] / 2))\n pindle_pos_abs = convert_screen_to_abs(Config().path[\"pindle_end\"][0])\n cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9]\n for _ in range(atk_len):\n self._right_attack(cast_pos_abs, 11)\n self._left_attack(cast_pos_abs, 11)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n if self.capabilities.can_teleport_natively:\n self._pather.traverse_nodes_fixed(\"pindle_end\", self)\n else:\n self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, force_tp=True)\n return True\n\n def kill_eldritch(self) -> bool:\n atk_len = max(1, int(Config().char[\"atk_len_eldritch\"] / 2))\n eld_pos_abs = convert_screen_to_abs(Config().path[\"eldritch_end\"][0])\n cast_pos_abs = [eld_pos_abs[0] * 0.9, eld_pos_abs[1] * 0.9]\n for _ in range(atk_len):\n self._right_attack(cast_pos_abs, 90)\n self._left_attack(cast_pos_abs, 90)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n if self.capabilities.can_teleport_natively:\n self._pather.traverse_nodes_fixed(\"eldritch_end\", self)\n else:\n self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)\n return True\n\n def kill_shenk(self) -> bool:\n atk_len = max(1, int(Config().char[\"atk_len_shenk\"] / 2))\n shenk_pos_abs = self._pather.find_abs_node_pos(149, grab())\n if shenk_pos_abs is None:\n shenk_pos_abs = convert_screen_to_abs(Config().path[\"shenk_end\"][0])\n cast_pos_abs = [shenk_pos_abs[0] * 0.9, shenk_pos_abs[1] * 0.9]\n for _ in range(atk_len):\n self._right_attack(cast_pos_abs, 90)\n self._left_attack(cast_pos_abs, 90)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, timeout=1.4, force_tp=True)\n return True\n\n def kill_nihlathak(self, end_nodes: list[int]) -> bool:\n # Find nilhlatak position\n atk_len = max(1, int(Config().char[\"atk_len_nihlathak\"] / 2))\n for i in range(atk_len):\n nihlathak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], grab())\n if nihlathak_pos_abs is None:\n return False\n cast_pos_abs = np.array([nihlathak_pos_abs[0] * 0.9, nihlathak_pos_abs[1] * 0.9])\n self._left_attack(cast_pos_abs, 90)\n self._right_attack(cast_pos_abs, 90)\n # Do some tele \"dancing\" after each sequence\n if i < atk_len - 1:\n rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190)\n tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100\n pos_m = convert_abs_to_monitor(tele_pos_abs)\n self.pre_move()\n self.move(pos_m)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n self._pather.traverse_nodes(end_nodes, self, timeout=0.8)\n return True\n\n\nif __name__ == \"__main__\":\n import os\n import keyboard\n keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))\n keyboard.wait(\"f11\")\n from config import Config\n from char import Trapsin\n pather = Pather()\n char = Trapsin(Config().trapsin, Config().char, pather)"
] | [
[
"numpy.array"
]
] |
TD21forever/QoS-Predcition-Algorithm-library | [
"f4503462887d719a39c9ccddd6cc55546e783fd5"
] | [
"models/IPCC/model.py"
] | [
"import copy\nimport math\nimport numpy as np\nfrom tqdm import tqdm\nfrom utils.model_util import triad_to_matrix, nonzero_user_mean, nonzero_item_mean\n\n# 相似度计算库\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass IPCCModel(object):\n def __init__(self) -> None:\n super().__init__()\n self.matrix = None # QoS矩阵\n self.u_mean = None # 每个用户的评分均值(用于计算修正的余弦相似度)\n self.i_mean = None # 每个项目的评分均值\n self.similarity_matrix = None # 项目相似度矩阵\n self._nan_symbol = -1 # 缺失项标记(数据集中使用-1表示缺失项)\n\n def _get_similarity_matrix(self, matrix, metric):\n \"\"\"获取项目相似度矩阵\n\n Args:\n matrix (): QoS矩阵\n metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)\n\n \"\"\"\n _m = copy.deepcopy(matrix)\n _m[_m == self._nan_symbol] = 0 # 将缺失项用0代替,以便之后计算\n n_items = matrix.shape[1]\n similarity_matrix = np.zeros((n_items, n_items))\n\n # 计算相似度矩阵\n for i in tqdm(range(n_items), desc=\"生成相似度矩阵\"):\n for j in range(i + 1, n_items):\n col_i = _m[:, i]\n col_j = _m[:, j]\n nonzero_i = np.nonzero(col_i)[0] # 非0元素对应的下标\n nonzero_j = np.nonzero(col_j)[0]\n intersect = np.intersect1d(nonzero_i,\n nonzero_j) # 对项目i,j同时有评分的用户集合\n\n if len(intersect) == 0:\n sim = 0\n else:\n # 依据指定的相似度计算方法计算项目i,j的相似度\n try:\n if metric == 'PCC':\n # 如果一个项目的评分向量中所有值都相等,则无法计算皮尔逊相关系数\n if len(set(col_i[intersect])) == 1 or len(\n set(col_j[intersect])) == 1:\n sim = 0\n else:\n sim = pearsonr(col_i[intersect],\n col_j[intersect])[0]\n elif metric == 'COS':\n sim = cosine_similarity(col_i[intersect],\n col_j[intersect])\n elif metric == 'ACOS':\n sim = adjusted_cosine_similarity(\n col_i, col_j, intersect, self.u_mean)\n except Exception as e:\n sim = 0\n\n similarity_matrix[i][j] = similarity_matrix[j][i] = sim\n\n return similarity_matrix\n\n def _get_similarity_items(self, iid, topk=-1):\n \"\"\"获取相似用户\n\n Args:\n iid (): 当前项目\n topk (): 相似项目数量, -1表示不限制数量\n\n Returns:\n 依照相似度从大到小排序, 与当前项目最为相似的前topk个相似项目\n\n \"\"\"\n assert isinstance(topk, int)\n ordered_sim_iid = (\n -self.similarity_matrix[iid]).argsort() # 按相似度从大到小排序后, 相似用户对应的索引\n if topk == -1:\n return ordered_sim_iid\n else:\n assert topk > 0\n return ordered_sim_iid[:topk]\n\n def get_similarity(self, iid_a, iid_b):\n \"\"\"传入两个uid,获取这两个用户的相似度\n \"\"\"\n if iid_a == iid_b:\n return float(1)\n if iid_a + 1 > self.matrix.shape[1] or iid_b + 1 > self.matrix.shape[1]:\n return 0\n if self.similarity_matrix is None:\n assert self.matrix is not None, \"Please fit first e.g. model.fit()\"\n self._get_similarity_matrix(self.matrix)\n\n return self.similarity_matrix[iid_a][iid_b]\n\n def fit(self, triad, metric='PCC'):\n \"\"\"训练模型\n\n Args:\n triad (): 数据三元组: (uid, iid, rating)\n metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)\n \"\"\"\n self.matrix = triad_to_matrix(triad, self._nan_symbol) # 数据三元组转QoS矩阵\n self.u_mean = nonzero_user_mean(self.matrix,\n self._nan_symbol) # 根据QoS矩阵计算每个用户的评分均值\n # FIXME 考虑i_mean为0的情况\n self.i_mean = nonzero_item_mean(self.matrix,\n self._nan_symbol) # 根据QoS矩阵计算每个项目的评分均值\n self.similarity_matrix = self._get_similarity_matrix(\n self.matrix, metric) # 根据QoS矩阵获取项目相似矩阵\n\n def predict(self, triad, topK=-1):\n y_list = [] # 真实评分\n y_pred_list = [] # 预测评分\n cold_boot_cnt = 0 # 冷启动统计\n\n for row in tqdm(triad, desc=\"Predict... \"):\n uid, iid, rate = int(row[0]), int(row[1]), float(row[2])\n # 冷启动: 新用户因为没有计算过相似用户, 因此无法预测评分\n if iid + 1 > self.matrix.shape[1]:\n cold_boot_cnt += 1\n continue\n i_mean = self.i_mean[iid]\n similarity_items = self._get_similarity_items(iid, topK)\n up = 0 # 分子\n down = 0 # 分母\n # 对于当前项目的每一个相似项目\n for sim_iid in similarity_items:\n sim_item_rate = self.matrix[uid][sim_iid] # 当前用户对相似项目的评分\n similarity = self.get_similarity(iid, sim_iid)\n # 如果当前用户对相似项目没有评分,则不进行计算\n if sim_item_rate == self._nan_symbol:\n continue\n up += similarity * (sim_item_rate - self.i_mean[sim_iid]\n ) # 相似度 * (相似项目评分 - 相似项目评分均值)\n down += similarity # 相似度的绝对值\n\n if down != 0:\n y_pred = i_mean + up / down\n else:\n y_pred = 0\n\n y_pred_list.append(y_pred)\n y_list.append(rate)\n\n print(f\"cold boot :{cold_boot_cnt / len(triad) * 100:4f}%\")\n return y_list, y_pred_list\n\n\ndef adjusted_cosine_similarity(x, y, intersect, u_mean):\n \"\"\"修正的余弦相似度\n\n Returns:\n\n \"\"\"\n n = len(x)\n if n != len(y):\n raise ValueError('x and y must have the same length.')\n if n < 2:\n raise ValueError('x and y must have length at least 2.')\n if len(intersect) < 2:\n raise ValueError('there must be at least two non-zero entries')\n\n x = np.asarray(x)\n y = np.asarray(y)\n\n multiply_sum = sum(\n (x[i] - u_mean[i]) * (y[i] - u_mean[i]) for i in intersect)\n pow_sum_x = sum(math.pow(x[i] - u_mean[i], 2) for i in intersect)\n pow_sum_y = sum(math.pow(y[i] - u_mean[i], 2) for i in intersect)\n\n return multiply_sum / math.sqrt(pow_sum_x * pow_sum_y)\n\n\nif __name__ == \"__main__\":\n triad = np.array([\n [0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 3],\n [1, 2, 4],\n [2, 0, 2],\n [2, 1, 3],\n [2, 2, 5],\n ])\n\n test = np.array([[0, 2, 3]])\n\n ipcc = IPCCModel()\n ipcc.fit(triad)\n ipcc.predict(test, 20)\n"
] | [
[
"scipy.stats.pearsonr",
"numpy.zeros",
"numpy.intersect1d",
"numpy.asarray",
"numpy.array",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.nonzero"
]
] |
XiaoSong9905/tvm | [
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423"
] | [
"tests/python/relay/test_op_qnn_subtract.py",
"python/tvm/autotvm/feature.py",
"gallery/tutorial/autotvm_relay_x86.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nimport numpy as np\nfrom tvm import relay\n\n\ndef qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype=\"uint8\"):\n # all x, y and golden outputs should be of the same length\n assert len(x_datas) == len(y_datas)\n assert len(y_datas) == len(golden_outputs)\n\n x = relay.var(\"x\", shape=(1, 4), dtype=data_dtype)\n y = relay.var(\"y\", shape=(1, 4), dtype=data_dtype)\n lhs_scale = relay.const(scale_and_zp[\"lhs_scale\"], \"float32\")\n lhs_zp = relay.const(scale_and_zp[\"lhs_zp\"], \"int32\")\n rhs_scale = relay.const(scale_and_zp[\"rhs_scale\"], \"float32\")\n rhs_zp = relay.const(scale_and_zp[\"rhs_zp\"], \"int32\")\n output_scale = relay.const(scale_and_zp[\"output_scale\"], \"float32\")\n output_zp = relay.const(scale_and_zp[\"output_zp\"], \"int32\")\n z = relay.qnn.op.subtract(\n lhs=x,\n rhs=y,\n lhs_scale=lhs_scale,\n lhs_zero_point=lhs_zp,\n rhs_scale=rhs_scale,\n rhs_zero_point=rhs_zp,\n output_scale=output_scale,\n output_zero_point=output_zp,\n )\n func = relay.Function([x, y], z)\n mod = tvm.IRModule.from_expr(func)\n mod = relay.transform.InferType()(mod)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n for i in range(0, len(x_datas)):\n x_data = x_datas[i]\n y_data = y_datas[i]\n golden_output = golden_outputs[i]\n op_res = relay.create_executor(\"graph\", device=tvm.cpu(0), target=\"llvm\").evaluate(func)(\n x_data, y_data\n )\n np.testing.assert_equal(op_res.numpy(), golden_output)\n\n\ndef test_tflite_same_io_qnn_params():\n scale_and_zp = {\n \"lhs_scale\": 0.00784314,\n \"lhs_zp\": 127,\n \"rhs_scale\": 0.00784314,\n \"rhs_zp\": 127,\n \"output_scale\": 0.00784314,\n \"output_zp\": 127,\n }\n x_datas = [\n np.array((140, 153, 165, 178)).reshape((1, 4)),\n np.array((25, 153, 178, 216)).reshape((1, 4)),\n np.array((25, 153, 216, 165)).reshape((1, 4)),\n ]\n y_datas = [\n np.array((204, 178, 165, 140)).reshape((1, 4)),\n np.array((204, 178, 191, 25)).reshape((1, 4)),\n np.array((204, 178, 25, 191)).reshape((1, 4)),\n ]\n golden_outputs = [\n np.array((63, 102, 127, 165)).reshape((1, 4)),\n np.array((0, 102, 114, 255)).reshape((1, 4)),\n np.array((0, 102, 255, 101)).reshape((1, 4)),\n ]\n qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)\n\n\ndef test_tflite_different_io_qnn_params():\n scale_and_zp = {\n \"lhs_scale\": 0.0156863,\n \"lhs_zp\": 127,\n \"rhs_scale\": 0.0117647,\n \"rhs_zp\": 85,\n \"output_scale\": 0.0235294,\n \"output_zp\": 128,\n }\n x_datas = [\n np.array((76, 140, 153, 172)).reshape((1, 4)),\n np.array((133, 140, 146, 153)).reshape((1, 4)),\n np.array((76, 140, 172, 146)).reshape((1, 4)),\n ]\n y_datas = [\n np.array((136, 119, 128, 17)).reshape((1, 4)),\n np.array((136, 119, 111, 94)).reshape((1, 4)),\n np.array((136, 119, 17, 128)).reshape((1, 4)),\n ]\n golden_outputs = [\n np.array((68, 120, 123, 192)).reshape((1, 4)),\n np.array((106, 120, 128, 140)).reshape((1, 4)),\n np.array((68, 120, 192, 119)).reshape((1, 4)),\n ]\n qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)\n\n\ndef test_saturation():\n # Same params\n scale_and_zp = {\n \"lhs_scale\": 0.125,\n \"lhs_zp\": 0,\n \"rhs_scale\": 0.125,\n \"rhs_zp\": 0,\n \"output_scale\": 0.125,\n \"output_zp\": 0,\n }\n x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]\n y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]\n golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]\n qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)\n\n # Same params, different scale\n scale_and_zp = {\n \"lhs_scale\": 0.125,\n \"lhs_zp\": 0,\n \"rhs_scale\": 0.125,\n \"rhs_zp\": 0,\n \"output_scale\": 0.25,\n \"output_zp\": 0,\n }\n x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]\n y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]\n golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]\n qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)\n\n # All params different\n scale_and_zp = {\n \"lhs_scale\": 0.5,\n \"lhs_zp\": 0,\n \"rhs_scale\": 0.25,\n \"rhs_zp\": 0,\n \"output_scale\": 0.125,\n \"output_zp\": 0,\n }\n x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]\n y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]\n golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]\n qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)\n\n\nif __name__ == \"__main__\":\n test_tflite_same_io_qnn_params()\n test_tflite_different_io_qnn_params()\n test_saturation()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name,\n\"\"\"Extract feature of iter vars\n\nThere are two types of feature\n1) Itervar feature\n This feature is extracted based on loop variables.\n Different loop structures will result in different shapes of feature\n2) Curve sample feature (relation feature)\n This feature is extracted by sampling relation curve.\n This feature is invariant of loop structure.\n\"\"\"\n\nimport struct\nimport numpy as np\nimport tvm._ffi\n\nfrom tvm.target import Target\nfrom tvm.driver import build_module\n\n\ndef ana_lower(sch, args, binds=None, simple_mode=True):\n \"\"\"Do lower while keeping all axes in IR\n i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads\n \"\"\"\n sch = sch.normalize()\n # Phase 0\n context = tvm.transform.PassContext(config={\"tir.debug_keep_trivial_loop\": True})\n with context:\n mod = build_module.schedule_to_module(sch, args, binds=binds)\n\n mod = tvm.tir.transform.StorageFlatten(64)(mod._move())\n mod = tvm.tir.transform.Simplify()(mod._move())\n assert simple_mode\n return mod[\"main\"].body\n\n\ntry:\n _get_buffer_curve_sample_flatten = tvm._ffi.get_global_func(\n \"autotvm.feature.GetCurveSampleFeatureFlatten\"\n )\n _get_itervar_feature = tvm._ffi.get_global_func(\"autotvm.feature.GetItervarFeature\")\n _get_itervar_feature_flatten = tvm._ffi.get_global_func(\n \"autotvm.feature.GetItervarFeatureFlatten\"\n )\nexcept ValueError as e:\n\n def raise_error(*args, **kwargs): # pylint: disable=unused-argument\n raise RuntimeError(\"Cannot load autotvm c++ API\")\n\n _get_buffer_curve_sample_flatten = (\n _get_itervar_feature\n ) = _get_itervar_feature_flatten = raise_error\n\n\ndef get_itervar_feature(sch, args, take_log=False):\n \"\"\"get features of iter vars\n\n Parameters\n ----------\n sch: tvm.te.schedule.Schedule\n args: Array of te.tensor.Tensor\n the buffer args for lower\n take_log: bool\n whether take log of numerical statics\n\n Returns\n -------\n features of every axis in the IR, see doc/features.md for detail\n \"\"\"\n stmt = ana_lower(sch, args, simple_mode=True)\n feas = _get_itervar_feature(stmt, take_log)\n\n # convert tvm node to python type\n ret = []\n for row in feas:\n tmp = []\n tmp.append([row[0][0].value, row[0][1]])\n for item in row[1:]:\n tmp.append([item[0].value] + [x.value for x in item[1:]])\n ret.append(tmp)\n return ret\n\n\ndef flatten_itervar_feature(fea):\n \"\"\"flatten features into one-dimensional feature vectors\n\n Parameters\n ----------\n fea: list\n return value of get_itervar_feature\n\n Returns\n -------\n flatten_feature: np.ndarray\n one-dimensional vector\n \"\"\"\n flatten = []\n for axis in fea:\n for pair in axis[1:]:\n flatten.append(pair[1:])\n return np.concatenate(flatten)\n\n\ndef get_itervar_feature_flatten(sch, args, take_log=True):\n \"\"\"get flatten features of iter vars\n this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster.\n\n Parameters\n ----------\n sch: tvm.te.schedule.Schedule\n args: Array of te.tensor.Tensor\n the buffer args for lower\n take_log: bool\n whether take log of numerical statics\n\n Returns\n -------\n flatten_feature: np.ndarray\n one-dimensional vector\n \"\"\"\n stmt = ana_lower(sch, args, simple_mode=True)\n feas = _get_itervar_feature_flatten(stmt, take_log)\n feas = struct.unpack(\"%df\" % (len(feas) // 4), feas)\n return feas\n\n\ndef get_flatten_name(fea):\n \"\"\"Get names of feature after flatten.\n\n Parameters\n ----------\n fea: list or str\n return value of get_itervar_feature or a line of logfile\n\n Returns\n -------\n feature_names: Array of str\n \"\"\"\n\n feature_name = {\n \"_attr_\": [\"length\", \"nest_level\", \"topdown\", \"bottomup\"]\n + [\"ann_%d\" % i for i in range(20)],\n \"_arith_\": [\"add\", \"mul\", \"div\"],\n \"buf_touch\": [\"stride\", \"mod\", \"count\", \"reuse\", \"T_count\", \"T_reuse\"],\n }\n\n if isinstance(fea, str):\n # pylint: disable=import-outside-toplevel\n from .record import decode\n\n # flatten line to feature\n line = fea\n ret = decode(line)\n if ret is None:\n raise ValueError(\"Unsupported AutoTVM log format\")\n inp, _ = ret\n target = Target(inp.target)\n with target:\n s, args = inp.template.instantiate(inp.config)\n fea = get_itervar_feature(s, args)\n\n names = []\n ct = 0\n for row in fea:\n var_name = str(row[0][1])\n for pair in row[1:]:\n key = pair[0]\n if key in feature_name:\n name_list = feature_name[key]\n else:\n name_list = feature_name[\"buf_touch\"]\n\n for i in range(len((pair[1:]))):\n names.append(\".\".join([\"f%d\" % ct, var_name, key, name_list[i]]))\n ct += 1\n return names\n\n\ndef get_buffer_curve_sample_flatten(sch, args, sample_n=30):\n \"\"\"\n Get flatten curve sample feature (relation feature)\n\n Parameters\n ----------\n sch: tvm.te.schedule.Schedule\n args: Array of te.tensor.Tensor\n the buffer args for lower\n sample_n: int\n number of sample points along one dimension\n\n Returns\n -------\n flatten_feature: np.ndarray\n one-dimensional vector\n \"\"\"\n stmt = ana_lower(sch, args, simple_mode=True)\n feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False)\n feas = struct.unpack(\"%df\" % (len(feas) // 4), feas)\n return feas\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nCompiling and Optimizing a Model with the Python Interface (AutoTVM)\n====================================================================\n**Author**:\n`Chris Hoge <https://github.com/hogepodge>`_\n\nIn the `TVMC Tutorial <tvmc_command_line_driver>`_, we covered how to compile, run, and tune a\npre-trained vision model, ResNet-50 v2 using the command line interface for\nTVM, TVMC. TVM is more that just a command-line tool though, it is an\noptimizing framework with APIs available for a number of different languages\nthat gives you tremendous flexibility in working with machine learning models.\n\nIn this tutorial we will cover the same ground we did with TVMC, but show how\nit is done with the Python API. Upon completion of this section, we will have\nused the Python API for TVM to accomplish the following tasks:\n\n* Compile a pre-trained ResNet-50 v2 model for the TVM runtime.\n* Run a real image through the compiled model, and interpret the output and model\n performance.\n* Tune the model that model on a CPU using TVM.\n* Re-compile an optimized model using the tuning data collected by TVM.\n* Run the image through the optimized model, and compare the output and model\n performance.\n\nThe goal of this section is to give you an overview of TVM's capabilites and\nhow to use them through the Python API.\n\"\"\"\n\n################################################################################\n# TVM is a deep learning compiler framework, with a number of different modules\n# available for working with deep learning models and operators. In this\n# tutorial we will work through how to load, compile, and optimize a model\n# using the Python API.\n#\n# We begin by importing a number of dependencies, including ``onnx`` for\n# loading and converting the model, helper utilities for downloading test data,\n# the Python Image Library for working with the image data, ``numpy`` for pre\n# and post-processing of the image data, the TVM Relay framework, and the TVM\n# Graph Executor.\n\nimport onnx\nfrom tvm.contrib.download import download_testdata\nfrom PIL import Image\nimport numpy as np\nimport tvm.relay as relay\nimport tvm\nfrom tvm.contrib import graph_executor\n\n################################################################################\n# Downloading and Loading the ONNX Model\n# --------------------------------------\n#\n# For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a\n# convolutional neural network that is 50 layers deep and designed to classify\n# images. The model we will be using has been pre-trained on more than a\n# million images with 1000 different classifications. The network has an input\n# image size of 224x224. If you are interested exploring more of how the\n# ResNet-50 model is structured, we recommend downloading\n# `Netron <https://netron.app>`_, a freely available ML model viewer.\n#\n# TVM provides a helper library to download pre-trained models. By providing a\n# model URL, file name, and model type through the module, TVM will download\n# the model and save it to disk. For the instance of an ONNX model, you can\n# then load it into memory using the ONNX runtime.\n#\n# .. admonition:: Working with Other Model Formats\n#\n# TVM supports many popular model formats. A list can be found in the\n# :ref:`Compile Deep Learning Models <tutorial-frontend>` section of the TVM\n# Documentation.\n\nmodel_url = (\n \"https://github.com/onnx/models/raw/main/\"\n \"vision/classification/resnet/model/\"\n \"resnet50-v2-7.onnx\"\n)\n\nmodel_path = download_testdata(model_url, \"resnet50-v2-7.onnx\", module=\"onnx\")\nonnx_model = onnx.load(model_path)\n\n################################################################################\n# Downloading, Preprocessing, and Loading the Test Image\n# ------------------------------------------------------\n#\n# Each model is particular when it comes to expected tensor shapes, formats and\n# data types. For this reason, most models require some pre and\n# post-processing, to ensure the input is valid and to interpret the output.\n# TVMC has adopted NumPy's ``.npz`` format for both input and output data.\n#\n# As input for this tutorial, we will use the image of a cat, but you can feel\n# free to substitute this image for any of your choosing.\n#\n# .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg\n# :height: 224px\n# :width: 224px\n# :align: center\n#\n# Download the image data, then convert it to a numpy array to use as an input to the model.\n\nimg_url = \"https://s3.amazonaws.com/model-server/inputs/kitten.jpg\"\nimg_path = download_testdata(img_url, \"imagenet_cat.png\", module=\"data\")\n\n# Resize it to 224x224\nresized_image = Image.open(img_path).resize((224, 224))\nimg_data = np.asarray(resized_image).astype(\"float32\")\n\n# Our input image is in HWC layout while ONNX expects CHW input, so convert the array\nimg_data = np.transpose(img_data, (2, 0, 1))\n\n# Normalize according to the ImageNet input specification\nimagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\nimagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\nnorm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev\n\n# Add the batch dimension, as we are expecting 4-dimensional input: NCHW.\nimg_data = np.expand_dims(norm_img_data, axis=0)\n\n###############################################################################\n# Compile the Model With Relay\n# ----------------------------\n#\n# The next step is to compile the ResNet model. We begin by importing the model\n# to relay using the `from_onnx` importer. We then build the model, with\n# standard optimizations, into a TVM library. Finally, we create a TVM graph\n# runtime module from the library.\n\ntarget = \"llvm\"\n\n######################################################################\n# .. admonition:: Defining the Correct Target\n#\n# Specifying the correct target can have a huge impact on the performance of\n# the compiled module, as it can take advantage of hardware features\n# available on the target. For more information, please refer to\n# :ref:`Auto-tuning a convolutional network for x86 CPU <tune_relay_x86>`.\n# We recommend identifying which CPU you are running, along with optional\n# features, and set the target appropriately. For example, for some\n# processors ``target = \"llvm -mcpu=skylake\"``, or ``target = \"llvm\n# -mcpu=skylake-avx512\"`` for processors with the AVX-512 vector instruction\n# set.\n#\n\n# The input name may vary across model types. You can use a tool\n# like Netron to check input names\ninput_name = \"data\"\nshape_dict = {input_name: img_data.shape}\n\nmod, params = relay.frontend.from_onnx(onnx_model, shape_dict)\n\nwith tvm.transform.PassContext(opt_level=3):\n lib = relay.build(mod, target=target, params=params)\n\ndev = tvm.device(str(target), 0)\nmodule = graph_executor.GraphModule(lib[\"default\"](dev))\n\n######################################################################\n# Execute on the TVM Runtime\n# --------------------------\n# Now that we've compiled the model, we can use the TVM runtime to make\n# predictions with it. To use TVM to run the model and make predictions, we\n# need two things:\n#\n# - The compiled model, which we just produced.\n# - Valid input to the model to make predictions on.\n\ndtype = \"float32\"\nmodule.set_input(input_name, img_data)\nmodule.run()\noutput_shape = (1, 1000)\ntvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy()\n\n################################################################################\n# Collect Basic Performance Data\n# ------------------------------\n# We want to collect some basic performance data associated with this\n# unoptimized model and compare it to a tuned model later. To help account for\n# CPU noise, we run the computation in multiple batches in multiple\n# repetitions, then gather some basis statistics on the mean, median, and\n# standard deviation.\nimport timeit\n\ntiming_number = 10\ntiming_repeat = 10\nunoptimized = (\n np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number))\n * 1000\n / timing_number\n)\nunoptimized = {\n \"mean\": np.mean(unoptimized),\n \"median\": np.median(unoptimized),\n \"std\": np.std(unoptimized),\n}\n\nprint(unoptimized)\n\n################################################################################\n# Postprocess the output\n# ----------------------\n#\n# As previously mentioned, each model will have its own particular way of\n# providing output tensors.\n#\n# In our case, we need to run some post-processing to render the outputs from\n# ResNet-50 v2 into a more human-readable form, using the lookup-table provided\n# for the model.\n\nfrom scipy.special import softmax\n\n# Download a list of labels\nlabels_url = \"https://s3.amazonaws.com/onnx-model-zoo/synset.txt\"\nlabels_path = download_testdata(labels_url, \"synset.txt\", module=\"data\")\n\nwith open(labels_path, \"r\") as f:\n labels = [l.rstrip() for l in f]\n\n# Open the output and read the output tensor\nscores = softmax(tvm_output)\nscores = np.squeeze(scores)\nranks = np.argsort(scores)[::-1]\nfor rank in ranks[0:5]:\n print(\"class='%s' with probability=%f\" % (labels[rank], scores[rank]))\n\n################################################################################\n# This should produce the following output:\n#\n# .. code-block:: bash\n#\n# # class='n02123045 tabby, tabby cat' with probability=0.610553\n# # class='n02123159 tiger cat' with probability=0.367179\n# # class='n02124075 Egyptian cat' with probability=0.019365\n# # class='n02129604 tiger, Panthera tigris' with probability=0.001273\n# # class='n04040759 radiator' with probability=0.000261\n\n################################################################################\n# Tune the model\n# --------------\n# The previous model was compiled to work on the TVM runtime, but did not\n# include any platform specific optimization. In this section, we will show you\n# how to build an optimized model using TVM to target your working platform.\n#\n# In some cases, we might not get the expected performance when running\n# inferences using our compiled module. In cases like this, we can make use of\n# the auto-tuner, to find a better configuration for our model and get a boost\n# in performance. Tuning in TVM refers to the process by which a model is\n# optimized to run faster on a given target. This differs from training or\n# fine-tuning in that it does not affect the accuracy of the model, but only\n# the runtime performance. As part of the tuning process, TVM will try running\n# many different operator implementation variants to see which perform best.\n# The results of these runs are stored in a tuning records file.\n#\n# In the simplest form, tuning requires you to provide three things:\n#\n# - the target specification of the device you intend to run this model on\n# - the path to an output file in which the tuning records will be stored\n# - a path to the model to be tuned.\n#\n\nimport tvm.auto_scheduler as auto_scheduler\nfrom tvm.autotvm.tuner import XGBTuner\nfrom tvm import autotvm\n\n################################################################################\n# Set up some basic parameters for the runner. The runner takes compiled code\n# that is generated with a specific set of parameters and measures the\n# performance of it. ``number`` specifies the number of different\n# configurations that we will test, while ``repeat`` specifies how many\n# measurements we will take of each configuration. ``min_repeat_ms`` is a value\n# that specifies how long need to run configuration test. If the number of\n# repeats falls under this time, it will be increased. This option is necessary\n# for accurate tuning on GPUs, and is not required for CPU tuning. Setting this\n# value to 0 disables it. The ``timeout`` places an upper limit on how long to\n# run training code for each tested configuration.\n\nnumber = 10\nrepeat = 1\nmin_repeat_ms = 0 # since we're tuning on a CPU, can be set to 0\ntimeout = 10 # in seconds\n\n# create a TVM runner\nrunner = autotvm.LocalRunner(\n number=number,\n repeat=repeat,\n timeout=timeout,\n min_repeat_ms=min_repeat_ms,\n enable_cpu_cache_flush=True,\n)\n\n################################################################################\n# Create a simple structure for holding tuning options. We use an XGBoost\n# algorithim for guiding the search. For a production job, you will want to set\n# the number of trials to be larger than the value of 10 used here. For CPU we\n# recommend 1500, for GPU 3000-4000. The number of trials required can depend\n# on the particular model and processor, so it's worth spending some time\n# evaluating performance across a range of values to find the best balance\n# between tuning time and model optimization. Because running tuning is time\n# intensive we set number of trials to 10, but do not recommend a value this\n# small. The ``early_stopping`` parameter is the minimum number of trails to\n# run before a condition that stops the search early can be applied. The\n# measure option indicates where trial code will be built, and where it will be\n# run. In this case, we're using the ``LocalRunner`` we just created and a\n# ``LocalBuilder``. The ``tuning_records`` option specifies a file to write\n# the tuning data to.\n\ntuning_option = {\n \"tuner\": \"xgb\",\n \"trials\": 10,\n \"early_stopping\": 100,\n \"measure_option\": autotvm.measure_option(\n builder=autotvm.LocalBuilder(build_func=\"default\"), runner=runner\n ),\n \"tuning_records\": \"resnet-50-v2-autotuning.json\",\n}\n\n################################################################################\n# .. admonition:: Defining the Tuning Search Algorithm\n#\n# By default this search is guided using an `XGBoost Grid` algorithm.\n# Depending on your model complexity and amount of time available, you might\n# want to choose a different algorithm.\n\n\n################################################################################\n# .. admonition:: Setting Tuning Parameters\n#\n# In this example, in the interest of time, we set the number of trials and\n# early stopping to 10. You will likely see more performance improvements if\n# you set these values to be higher but this comes at the expense of time\n# spent tuning. The number of trials required for convergence will vary\n# depending on the specifics of the model and the target platform.\n\n# begin by extracting the tasks from the onnx model\ntasks = autotvm.task.extract_from_program(mod[\"main\"], target=target, params=params)\n\n# Tune the extracted tasks sequentially.\nfor i, task in enumerate(tasks):\n prefix = \"[Task %2d/%2d] \" % (i + 1, len(tasks))\n tuner_obj = XGBTuner(task, loss_type=\"rank\")\n tuner_obj.tune(\n n_trial=min(tuning_option[\"trials\"], len(task.config_space)),\n early_stopping=tuning_option[\"early_stopping\"],\n measure_option=tuning_option[\"measure_option\"],\n callbacks=[\n autotvm.callback.progress_bar(tuning_option[\"trials\"], prefix=prefix),\n autotvm.callback.log_to_file(tuning_option[\"tuning_records\"]),\n ],\n )\n\n################################################################################\n# The output from this tuning process will look something like this:\n#\n# .. code-block:: bash\n#\n# # [Task 1/24] Current/Best: 10.71/ 21.08 GFLOPS | Progress: (60/1000) | 111.77 s Done.\n# # [Task 1/24] Current/Best: 9.32/ 24.18 GFLOPS | Progress: (192/1000) | 365.02 s Done.\n# # [Task 2/24] Current/Best: 22.39/ 177.59 GFLOPS | Progress: (960/1000) | 976.17 s Done.\n# # [Task 3/24] Current/Best: 32.03/ 153.34 GFLOPS | Progress: (800/1000) | 776.84 s Done.\n# # [Task 4/24] Current/Best: 11.96/ 156.49 GFLOPS | Progress: (960/1000) | 632.26 s Done.\n# # [Task 5/24] Current/Best: 23.75/ 130.78 GFLOPS | Progress: (800/1000) | 739.29 s Done.\n# # [Task 6/24] Current/Best: 38.29/ 198.31 GFLOPS | Progress: (1000/1000) | 624.51 s Done.\n# # [Task 7/24] Current/Best: 4.31/ 210.78 GFLOPS | Progress: (1000/1000) | 701.03 s Done.\n# # [Task 8/24] Current/Best: 50.25/ 185.35 GFLOPS | Progress: (972/1000) | 538.55 s Done.\n# # [Task 9/24] Current/Best: 50.19/ 194.42 GFLOPS | Progress: (1000/1000) | 487.30 s Done.\n# # [Task 10/24] Current/Best: 12.90/ 172.60 GFLOPS | Progress: (972/1000) | 607.32 s Done.\n# # [Task 11/24] Current/Best: 62.71/ 203.46 GFLOPS | Progress: (1000/1000) | 581.92 s Done.\n# # [Task 12/24] Current/Best: 36.79/ 224.71 GFLOPS | Progress: (1000/1000) | 675.13 s Done.\n# # [Task 13/24] Current/Best: 7.76/ 219.72 GFLOPS | Progress: (1000/1000) | 519.06 s Done.\n# # [Task 14/24] Current/Best: 12.26/ 202.42 GFLOPS | Progress: (1000/1000) | 514.30 s Done.\n# # [Task 15/24] Current/Best: 31.59/ 197.61 GFLOPS | Progress: (1000/1000) | 558.54 s Done.\n# # [Task 16/24] Current/Best: 31.63/ 206.08 GFLOPS | Progress: (1000/1000) | 708.36 s Done.\n# # [Task 17/24] Current/Best: 41.18/ 204.45 GFLOPS | Progress: (1000/1000) | 736.08 s Done.\n# # [Task 18/24] Current/Best: 15.85/ 222.38 GFLOPS | Progress: (980/1000) | 516.73 s Done.\n# # [Task 19/24] Current/Best: 15.78/ 203.41 GFLOPS | Progress: (1000/1000) | 587.13 s Done.\n# # [Task 20/24] Current/Best: 30.47/ 205.92 GFLOPS | Progress: (980/1000) | 471.00 s Done.\n# # [Task 21/24] Current/Best: 46.91/ 227.99 GFLOPS | Progress: (308/1000) | 219.18 s Done.\n# # [Task 22/24] Current/Best: 13.33/ 207.66 GFLOPS | Progress: (1000/1000) | 761.74 s Done.\n# # [Task 23/24] Current/Best: 53.29/ 192.98 GFLOPS | Progress: (1000/1000) | 799.90 s Done.\n# # [Task 24/24] Current/Best: 25.03/ 146.14 GFLOPS | Progress: (1000/1000) | 1112.55 s Done.\n\n################################################################################\n# Compiling an Optimized Model with Tuning Data\n# ----------------------------------------------\n#\n# As an output of the tuning process above, we obtained the tuning records\n# stored in ``resnet-50-v2-autotuning.json``. The compiler will use the results to\n# generate high performance code for the model on your specified target.\n#\n# Now that tuning data for the model has been collected, we can re-compile the\n# model using optimized operators to speed up our computations.\n\nwith autotvm.apply_history_best(tuning_option[\"tuning_records\"]):\n with tvm.transform.PassContext(opt_level=3, config={}):\n lib = relay.build(mod, target=target, params=params)\n\ndev = tvm.device(str(target), 0)\nmodule = graph_executor.GraphModule(lib[\"default\"](dev))\n\n################################################################################\n# Verify that the optimized model runs and produces the same results:\n\ndtype = \"float32\"\nmodule.set_input(input_name, img_data)\nmodule.run()\noutput_shape = (1, 1000)\ntvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy()\n\nscores = softmax(tvm_output)\nscores = np.squeeze(scores)\nranks = np.argsort(scores)[::-1]\nfor rank in ranks[0:5]:\n print(\"class='%s' with probability=%f\" % (labels[rank], scores[rank]))\n\n################################################################################\n# Verifying that the predictions are the same:\n#\n# .. code-block:: bash\n#\n# # class='n02123045 tabby, tabby cat' with probability=0.610550\n# # class='n02123159 tiger cat' with probability=0.367181\n# # class='n02124075 Egyptian cat' with probability=0.019365\n# # class='n02129604 tiger, Panthera tigris' with probability=0.001273\n# # class='n04040759 radiator' with probability=0.000261\n\n################################################################################\n# Comparing the Tuned and Untuned Models\n# --------------------------------------\n# We want to collect some basic performance data associated with this optimized\n# model to compare it to the unoptimized model. Depending on your underlying\n# hardware, number of iterations, and other factors, you should see a performance\n# improvement in comparing the optimized model to the unoptimized model.\n\nimport timeit\n\ntiming_number = 10\ntiming_repeat = 10\noptimized = (\n np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number))\n * 1000\n / timing_number\n)\noptimized = {\"mean\": np.mean(optimized), \"median\": np.median(optimized), \"std\": np.std(optimized)}\n\n\nprint(\"optimized: %s\" % (optimized))\nprint(\"unoptimized: %s\" % (unoptimized))\n\n################################################################################\n# Final Remarks\n# -------------\n#\n# In this tutorial, we gave a short example of how to use the TVM Python API\n# to compile, run, and tune a model. We also discussed the need for pre and\n# post-processing of inputs and outputs. After the tuning process, we\n# demonstrated how to compare the performance of the unoptimized and optimize\n# models.\n#\n# Here we presented a simple example using ResNet-50 v2 locally. However, TVM\n# supports many more features including cross-compilation, remote execution and\n# profiling/benchmarking.\n"
] | [
[
"numpy.array"
],
[
"numpy.concatenate"
],
[
"numpy.transpose",
"numpy.squeeze",
"scipy.special.softmax",
"numpy.argsort",
"numpy.median",
"numpy.asarray",
"numpy.expand_dims",
"numpy.array",
"numpy.std",
"numpy.mean"
]
] |
astokely/seekr2 | [
"2fd8496dc885339437678a729b1f97a4b0bf9cfd"
] | [
"seekr2/tests/test_analyze.py"
] | [
"\"\"\"\ntest_analyze.py\n\nTesting analyze.py\n\"\"\"\n\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport seekr2.modules.common_analyze as common_analyze\nimport seekr2.modules.mmvt_analyze as mmvt_analyze\nimport seekr2.analyze as analyze\nimport seekr2.modules.common_base as base\nimport seekr2.modules.mmvt_base as mmvt_base\nimport seekr2.tests.smoluchowski_system as smoluchowski\n\nthis_dir = os.path.dirname(os.path.realpath(__file__))\n\ntest_output_filename = os.path.join(this_dir, \"test_analyze_outputfile.txt\")\ntest_statistics_filename = os.path.join(this_dir, \"test_analyze_statistics.txt\")\n\ndef test_read_output_file():\n N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \\\n R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \\\n T_alpha_list, T_alpha_average, T_alpha_std_dev, \\\n T_alpha_total, existing_lines \\\n = mmvt_analyze.openmm_read_output_file_list(\n [test_output_filename])\n \n N_i_j_alpha_dict1 = N_i_j_alpha\n R_i_alpha_dict1 = R_i_alpha_total\n N_alpha_beta_dict1 = N_alpha_beta\n T_alpha1 = T_alpha_total\n #N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha1 = \\\n # analyze.openmm_read_output_file_list([test_output_filename])\n \n N_i_j_alpha_dict2 = {(1, 2): 52, (2, 1): 52}\n R_i_alpha_dict2 = {1: 1658.696, 2: 198.912}\n N_alpha_beta_dict2 = {1: 2423, 2: 98}\n T_alpha2 = 1954.760\n \n for key in N_i_j_alpha_dict1:\n assert key in N_i_j_alpha_dict2\n assert np.isclose(N_i_j_alpha_dict1[key], N_i_j_alpha_dict2[key])\n \n for key in R_i_alpha_dict1:\n assert key in R_i_alpha_dict2\n assert np.isclose(R_i_alpha_dict1[key], R_i_alpha_dict2[key])\n \n for key in N_alpha_beta_dict1:\n assert key in N_alpha_beta_dict2\n assert np.isclose(N_alpha_beta_dict1[key], N_alpha_beta_dict2[key])\n \n assert np.isclose(T_alpha1, T_alpha2)\n \n N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \\\n R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \\\n T_alpha_list, T_alpha_average, T_alpha_std_dev, \\\n T_alpha_total, existing_lines \\\n = mmvt_analyze.openmm_read_output_file_list([test_output_filename, \n test_output_filename], \n skip_restart_check=True)\n \n N_i_j_alpha_dict1 = N_i_j_alpha\n R_i_alpha_dict1 = R_i_alpha_total\n N_alpha_beta_dict1 = N_alpha_beta\n T_alpha1 = T_alpha_total\n #N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha = \\\n # analyze.openmm_read_output_file_list([test_output_filename, \n # test_output_filename])\n \n for key in N_i_j_alpha_dict1:\n assert key in N_i_j_alpha_dict2\n assert np.isclose(N_i_j_alpha_dict1[key], 2*N_i_j_alpha_dict2[key], \n rtol=0.01)\n \n for key in N_alpha_beta_dict1:\n assert key in N_alpha_beta_dict2\n assert np.isclose(N_alpha_beta_dict1[key], 2*N_alpha_beta_dict2[key], \n rtol=0.01)\n \n return\n\ndef test_minor2d():\n A = np.array([[1,2,3],[4,5,6],[7,8,9]])\n B = np.array([[1,3],[7,9]])\n C = np.array([[1,2],[4,5]])\n D = np.array([[2,8],[3,9]])\n assert common_analyze.minor2d(A, 1, 1).all() == B.all()\n assert common_analyze.minor2d(A, 2, 2).all() == C.all()\n assert common_analyze.minor2d(A, 1, 0).all() == D.all()\n return\n \ndef test_minor1d():\n A = np.array([1,2,3])\n B = np.array([1,3])\n C = np.array([2,3])\n D = np.array([1,2])\n assert common_analyze.minor1d(A, 1).all() == B.all()\n assert common_analyze.minor1d(A, 0).all() == C.all()\n assert common_analyze.minor1d(A, 2).all() == D.all()\n return\n\ndef test_pretty_string_value_error():\n mystr = common_analyze.pretty_string_value_error(\n 5.6e-2, 2.0e-3, error_digits=1, use_unicode=False)\n expectedstr = \"5.6 +/- 0.2 * 10^-02\"\n assert(mystr == expectedstr)\n mystr = common_analyze.pretty_string_value_error(\n 5.6e-2, 2.0e-1, error_digits=1, use_unicode=False)\n expectedstr = \"5.6 +/- 20.0 * 10^-02\"\n assert(mystr == expectedstr)\n mystr = common_analyze.pretty_string_value_error(\n 1.23456789e8, 4.5678e5, error_digits=2, use_unicode=False)\n expectedstr = \"1.2346 +/- 0.0046 * 10^+08\"\n assert(mystr == expectedstr)\n\ndef make_fake_output_file_osc(anchor, tmp_path, timestep=1.0):\n num_steps = 50\n \n mmvt_output_filename = os.path.join(\n tmp_path, anchor.name, \"prod\", \n \"%s%d.%s\" % (mmvt_base.OPENMMVT_BASENAME, 1, \n mmvt_base.OPENMMVT_EXTENSION))\n with open(mmvt_output_filename, \"w\") as f:\n if anchor.index == 0:\n for i in range(num_steps+1):\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n \n else:\n for i in range(num_steps+1):\n if (i % 2) == 0:\n line = \"%d,%d,%f\\n\" % (2, i, i*timestep)\n f.write(line)\n else:\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n return\n\ndef make_fake_output_file2(anchor, tmp_path, ups=1, downs=9, timestep=1.0):\n num_steps = 50\n total = ups + downs\n \n mmvt_output_filename = os.path.join(\n tmp_path, anchor.name, \"prod\", \n \"%s%d.%s\" % (mmvt_base.OPENMMVT_BASENAME, 1, \n mmvt_base.OPENMMVT_EXTENSION))\n with open(mmvt_output_filename, \"w\") as f:\n if anchor.index == 0:\n for i in range(num_steps+1):\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n \n else:\n for i in range(num_steps+1):\n if (i % total) < ups:\n line = \"%d,%d,%f\\n\" % (2, i, i*timestep)\n f.write(line)\n else:\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n return\n\ndef test_solve_rate_matrix():\n Q = np.array(\n [[-0.5, 0.5, 0.0, 0.0],\n [0.1, -0.3, 0.2, 0.0],\n [0.0, 0.15, -0.3, 0.15],\n [0.0, 0.0, 0.3, -0.4]])\n \n K = np.zeros(Q.shape, dtype=np.longdouble)\n for i in range(Q.shape[0]):\n for j in range(Q.shape[0]):\n if i == j:\n K[i,j] = 0.0\n else:\n K[i,j] = -Q[i,j] / Q[i,i]\n \n for i in range(K.shape[0]-1):\n my_sum = sum(K[i,:])\n for j in range(K.shape[0]):\n K[i,j] = K[i,j] / my_sum\n \n test_times_1 = common_analyze.solve_rate_matrix(Q)\n \n one_vector = np.ones((Q.shape[0]))\n test_times_2 = np.linalg.solve(Q, -one_vector)\n \n error = np.linalg.norm(test_times_2 - test_times_1)\n assert error < 1e-8\n return\n\n\"\"\"\ndef make_smol_calculation(tmp_path, func=None):\n num_anchors = 10\n D = 0.01\n interval = 1.0\n n = 101\n \n intervals = []\n for i in range(num_anchors):\n intervals.append(interval)\n \n if func is None:\n func = smoluchowski.expW_constant\n \n q_s = np.zeros(num_anchors)\n mymodel = smoluchowski.make_smol_model(tmp_path, num_anchors, intervals)\n my_analysis = analyze.Analysis(mymodel)\n elberN_ij = defaultdict(float)\n elberR_i = defaultdict(float)\n smols = []\n for i, anchor in enumerate(mymodel.anchors[:-1]):\n a = interval*i\n b = interval*(i+1)\n smol = smoluchowski.Smoluchowski(a, b, func, n=n, D=D)\n q_s[i] = smol.expWq\n if i == 0:\n smol.reflect_lower = True\n k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, \\\n R_i_backwards, R_i_forwards, N_ij_backwards, N_ij_forwards \\\n = smol.compute_MMVT_kinetics_quantities()\n \n N_i_j_alpha_dict = defaultdict(int)\n R_i_alpha_dict = defaultdict(float)\n N_alpha_beta_dict = defaultdict(int)\n new_time_factor = (R_i_forwards + R_i_backwards) / T_alpha\n new_T_alpha = new_time_factor * T_alpha\n if i == 0:\n N_alpha_beta_dict[1] = new_time_factor\n R_i_alpha_dict[1] = new_T_alpha\n else:\n N_i_j_alpha_dict[(1, 2)] = N_ij_forwards\n N_i_j_alpha_dict[(2, 1)] = N_ij_backwards\n R_i_alpha_dict[1] = R_i_forwards\n R_i_alpha_dict[2] = R_i_backwards\n N_alpha_beta_dict[1] = N_backwards * new_time_factor\n N_alpha_beta_dict[2] = N_forwards * new_time_factor\n \n anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha=i)\n anchor_stats.N_i_j_alpha = N_i_j_alpha_dict\n anchor_stats.R_i_alpha_total = R_i_alpha_dict\n anchor_stats.R_i_alpha_std_dev = R_i_alpha_dict\n anchor_stats.R_i_alpha_list = {}\n for key in anchor_stats.R_i_alpha_total:\n anchor_stats.R_i_alpha_list[key] = []\n anchor_stats.N_alpha_beta = N_alpha_beta_dict\n anchor_stats.T_alpha_total = new_T_alpha\n anchor_stats.T_alpha_std_dev = new_T_alpha\n for key in N_alpha_beta_dict:\n anchor_stats.k_alpha_beta[key] = N_alpha_beta_dict[key] \\\n / new_T_alpha\n\n # N_i_j_alpha_dict, R_i_alpha_dict, N_alpha_beta_dict, new_T_alpha, \n # alpha=i)\n # FIll out values here...\n my_analysis.anchor_stats_list.append(anchor_stats)\n smols.append(smol)\n \n for i, anchor in enumerate(mymodel.anchors[:-1]):\n smol1 = smols[i]\n if i == 0:\n smol2 = smols[i+1]\n elberN_ij[(0,1)] = 1.0\n # need to make sure that u and exp(-beta*W) match up\n # on the edge.\n smol1_edge_value = smol1.expWfunc(smol1.b, q=smol1.expWq)\n elberR_i[0] = (smol2.u_q_forward + (1.0/smol1_edge_value)) / (smol2.J_forward)\n elif i == mymodel.num_milestones-1:\n elberN_ij[(mymodel.num_milestones-1,mymodel.num_milestones-2)] = 1.0\n elberR_i[mymodel.num_milestones-1] = (smol1.u_q_backward) / (smol1.J_backward)\n else:\n smol2 = smols[i+1]\n elberN_ij[(i,i+1)] = smol2.J_forward / (smol2.J_forward + smol1.J_backward)\n elberN_ij[(i,i-1)] = smol1.J_backward / (smol2.J_forward + smol1.J_backward)\n elberR_i[i] = (smol2.u_q_forward + smol1.u_q_backward) / (smol2.J_forward + smol1.J_backward)\n \n my_analysis.mmvt_check_anchor_stats()\n \n #my_analyze._calculate_equilibrium_probability()\n #my_analyze._calculate_overall_statistics()\n #my_analysis.extract_data()\n my_analysis.fill_out_data_samples()\n my_analysis.main_data_sample.pi_alpha = np.zeros(mymodel.num_anchors)\n for i, anchor in enumerate(mymodel.anchors[:-1]):\n my_analysis.main_data_sample.pi_alpha[i] = q_s[i] / np.sum(q_s)\n my_analysis.fill_out_data_samples()\n my_analysis.process_data_samples()\n my_analysis.main_data_sample.Q = np.zeros((mymodel.num_milestones, \n mymodel.num_milestones), dtype=np.longdouble)\n elberQ = np.zeros((mymodel.num_milestones, \n mymodel.num_milestones), dtype=np.longdouble)\n for i in range(mymodel.num_milestones):\n for j in range(mymodel.num_milestones):\n if my_analysis.main_data_sample.R_i[i] == 0.0:\n my_analysis.main_data_sample.Q[i,j] = 0.0\n else:\n my_analysis.main_data_sample.Q[i,j] \\\n = my_analysis.main_data_sample.N_ij[i,j] \\\n / my_analysis.main_data_sample.R_i[i]\n if elberR_i[i] > 0.0:\n elberQ[i,j] = elberN_ij[i,j] / elberR_i[i]\n \n for i in range(mymodel.num_milestones):\n my_analysis.main_data_sample.Q[i][i] = \\\n -np.sum(my_analysis.main_data_sample.Q[i])\n elberQ[i][i] = -np.sum(elberQ[i])\n \n #my_analyze._rate_mat_to_prob_mat()\n #print(\"my_analyze.Q:\", my_analyze.Q)\n #print(\"elberQ:\", elberQ)\n #print(\"my_analyze.K:\", my_analyze.K)\n #my_analyze.calculate_kinetics()\n my_analysis.main_data_sample.calculate_kinetics()\n mmvt_time = my_analysis.main_data_sample.MFPTs[(0,\"bulk\")]\n #print(\"mmvt_time:\", mmvt_time)\n my_analysis.main_data_sample.Q = elberQ\n my_analysis.main_data_sample.calculate_kinetics()\n elber_time = my_analysis.main_data_sample.MFPTs[(0,\"bulk\")]\n #print(\"elber_time:\", elber_time)\n \n a1 = 0.0\n b1 = interval\n a2 = interval\n b2 = interval*num_anchors\n smol1 = smoluchowski.Smoluchowski(a1, b1, func, n=n, D=D)\n smol2 = smoluchowski.Smoluchowski(a2, b2, func, n=n, D=D)\n q1 = smol1.expWq\n q2 = smol2.expWq\n k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, R_i_backwards, \\\n R_i_forwards, N_ij_backwards, N_ij_forwards \\\n = smol2.compute_MMVT_kinetics_quantities()\n \n J2 = q2 / (R_i_forwards + R_i_backwards)\n correct_time = R_i_forwards + q1/J2\n #print(\"correct_time:\", correct_time)\n print(\"Time predicted by Elber:\", elber_time, \"Time predicted by MMVT:\", \n mmvt_time, \"Exact time:\", correct_time)\n \n \"\"\n x_s = np.arange(0.0, num_anchors, interval)\n func_vals1 = np.zeros(num_anchors)\n func_vals2 = np.zeros(num_anchors)\n print(\"q_s:\", q_s)\n for i, x in enumerate(x_s):\n print(\"i:\", i, \"my_analyze.pi_alpha[i]:\", my_analyze.pi_alpha[i], \"q_s[i]:\", q_s[i] / np.sum(q_s))\n func_vals1[i] = my_analyze.pi_alpha[i]\n func_vals2[i] = q_s[i] / np.sum(q_s)\n \n plt.plot(x_s, func_vals1, \"g\", x_s, func_vals2, \"r\")\n plt.show()\n \"\"\n return mmvt_time, elber_time, correct_time\n \n\ndef test_smoluchowski_solution_flat_1(tmp_path):\n print(\"Constant PMF:\")\n mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path)\n assert np.isclose(mmvt_time, true_time, rtol=0.001)\n assert np.isclose(elber_time, true_time, rtol=0.001)\n \n print(\"linear PMF:\")\n func = smoluchowski.expW_linear\n mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)\n assert np.isclose(mmvt_time, true_time, rtol=0.001)\n assert np.isclose(elber_time, true_time, rtol=0.001)\n \n print(\"quadratic PMF:\")\n func = smoluchowski.expW_quadratic\n mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)\n assert np.isclose(mmvt_time, true_time, rtol=0.001)\n assert np.isclose(elber_time, true_time, rtol=0.001)\n\"\"\""
] | [
[
"numpy.ones",
"numpy.linalg.solve",
"numpy.zeros",
"numpy.isclose",
"numpy.array",
"numpy.linalg.norm"
]
] |
marlene09/skan | [
"97a217d36ec1393b380d4a797b5b7ceb68e824ec"
] | [
"skan/pipe.py"
] | [
"import os\nfrom . import pre, csr\nimport imageio\nfrom tqdm import tqdm\nimport numpy as np\nfrom skimage import morphology\nimport pandas as pd\nfrom .image_stats import image_summary\nfrom skimage.feature import shape_index\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport multiprocessing as mp\n\n\nCPU_COUNT = int(os.environ.get('CPU_COUNT', mp.cpu_count()))\n\ndef _get_scale(image, md_path_or_scale):\n \"\"\"Get a valid scale from an image and a metadata path or scale.\n\n Parameters\n ----------\n image : np.ndarray\n The input image.\n md_path_or_scale : float or image filename\n The path to the file containing the metadata, or the scale.\n\n Returns\n -------\n scale : float\n \"\"\"\n scale = None\n try:\n scale = float(md_path_or_scale)\n except ValueError:\n pass\n if md_path_or_scale is not None and scale is None:\n md_path = md_path_or_scale.split(sep='/')\n meta = image.meta\n for key in md_path:\n meta = meta[key]\n scale = float(meta)\n else:\n if scale is None:\n scale = 1 # measurements will be in pixel units\n return scale\n\n\ndef process_single_image(filename, image_format, scale_metadata_path,\n threshold_radius, smooth_radius,\n brightness_offset, crop_radius, smooth_method):\n image = imageio.imread(filename, format=image_format)\n scale = _get_scale(image, scale_metadata_path)\n if crop_radius > 0:\n c = crop_radius\n image = image[c:-c, c:-c]\n pixel_threshold_radius = int(np.ceil(threshold_radius / scale))\n\n pixel_smoothing_radius = smooth_radius * pixel_threshold_radius\n thresholded = pre.threshold(image, sigma=pixel_smoothing_radius,\n radius=pixel_threshold_radius,\n offset=brightness_offset,\n smooth_method=smooth_method)\n quality = shape_index(image, sigma=pixel_smoothing_radius,\n mode='reflect')\n skeleton = morphology.skeletonize(thresholded) * quality\n framedata = csr.summarise(skeleton, spacing=scale)\n framedata['squiggle'] = np.log2(framedata['branch-distance'] /\n framedata['euclidean-distance'])\n framedata['scale'] = scale\n framedata.rename(columns={'mean pixel value': 'mean shape index'},\n inplace=True)\n framedata['filename'] = filename\n return image, thresholded, skeleton, framedata\n\n\ndef process_images(filenames, image_format, threshold_radius,\n smooth_radius, brightness_offset, scale_metadata_path,\n crop_radius=0, smooth_method='Gaussian',\n num_threads=CPU_COUNT):\n \"\"\"Full pipeline from images to skeleton stats with local median threshold.\n\n Parameters\n ----------\n filenames : list of string\n The list of input filenames.\n image_format : string\n The format of the files. 'auto' is automatically determined by the\n imageio library. See imageio documentation for valid image formats.\n threshold_radius : float\n The radius for median thresholding,\n smooth_radius : float in [0, 1]\n The value of sigma with which to Gaussian-smooth the image,\n **relative to `threshold_radius`**.\n brightness_offset : float\n The standard brightness value with which to threshold is the local\n median, `m(x, y)`. Use this value to offset from there: the threshold\n used will be `m(x, y) + brightness_offset`.\n scale_metadata_path : string\n The path in the image dictionary to find the metadata on pixel scale,\n separated by forward slashes ('/').\n crop_radius : int, optional\n Crop `crop_radius` pixels from each margin of the image before\n processing.\n smooth_method : {'Gaussian', 'TV', 'NL'}, optional\n Which method to use for smoothing.\n num_threads : int, optional\n How many threads to use for computation. This should generally be\n set to the number of CPU cores available to you.\n\n Returns\n -------\n results : generator\n The pipeline yields individual image results in the form of a tuple\n of ``(filename, image, thresholded_image, skeleton, data_frame)``.\n Finally, after all the images have been processed, the pipeline yields\n a DataFrame containing all the collated branch-level results.\n \"\"\"\n image_format = None if image_format == 'auto' else image_format\n results = []\n image_results = []\n with ThreadPoolExecutor(max_workers=num_threads) as ex:\n future_data = {ex.submit(process_single_image, filename,\n image_format, scale_metadata_path,\n threshold_radius, smooth_radius,\n brightness_offset, crop_radius,\n smooth_method): filename\n for filename in filenames}\n for completed_data in tqdm(as_completed(future_data)):\n image, thresholded, skeleton, framedata = completed_data.result()\n filename = future_data[completed_data]\n results.append(framedata)\n image_stats = image_summary(skeleton,\n spacing=framedata['scale'][0])\n image_stats['filename'] = filename\n image_stats['branch density'] = (framedata.shape[0] /\n image_stats['area'])\n j2j = framedata[framedata['branch-type'] == 2]\n image_stats['mean J2J branch distance'] = (\n j2j['branch-distance'].mean())\n image_results.append(image_stats)\n yield filename, image, thresholded, skeleton, framedata\n yield pd.concat(results), pd.concat(image_results)\n"
] | [
[
"numpy.log2",
"pandas.concat",
"numpy.ceil"
]
] |
kohjingyu/prob-stats | [
"df396686b641079b5db93118b1b2373d79de7d7a"
] | [
"linear_regression.py"
] | [
"import matplotlib\nimport matplotlib.pyplot as plt\n\nx = [20, 23, 29, 27, 30, 34, 35, 37, 40, 43]\ny = [1.32, 1.67, 2.17, 2.70, 2.75, 2.87, 3.65, 2.86, 3.61, 4.25]\nn = len(x)\nassert(n == len(y))\n\n# Means\nbar_x = sum(x) / n\nbar_y = sum(y) / n\n\n# Sum of squares\nsxy = sum([(x[i] - bar_x) * (y[i] - bar_y) for i in range(n)])\nsxx = sum([(x[i] - bar_x)**2 for i in range(n)]) \nsyy = sum([(y[i] - bar_y)**2 for i in range(n)]) \n\nprint(\"S_xy = {0:5f}, S_xx = {1:5f}, S_yy = {2:5f}\".format(sxy ,sxx, syy))\n\n# Point estimates for \\beta_0 and \\beta_1\nb1 = sxy / sxx\nb0 = bar_y - b1 * bar_x\n\nprint(\"n = {0}\".format(n))\nprint(\"\\\\bar{{x}} = {0:5f}\".format(bar_x))\nprint(\"\\\\bar{{y}} = {0:5f}\".format(bar_y))\n\nprint(\"Estimated regression line: y = {0:5f} + {1:5f} x\".format(b0, b1))\n\n# Plot x and y and save it\nfig = plt.figure()\nax = plt.subplot(111)\nax.plot(x, y)\nx_values = range(min(x), max(x))\nax.plot(x_values, [b0 + b1 * xi for xi in x_values])\nfig.savefig(\"plot.png\")\n\n# error sum of squares\nsse = sum([(y[i] - (b0 + b1 * x[i]))**2 for i in range(n)])\n# total sum of squares\nsst = sum([y[i]**2 for i in range(n)]) - sum(y)**2 / n \nsigma_square = sse / (n - 2)\n\nprint(\"SSE: {0:5f}\".format(sse))\nprint(\"SST: {0:5f}\".format(sst))\nprint(\"\\sigma^2 = {0:5f}\".format(sigma_square))\nprint(\"\\sigma = {0:5f}\".format(sigma_square ** 0.5))\nprint(\"r^2 = {0:5f}\".format(1 - sse / sst))\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot"
]
] |
JamesSample/icpw | [
"47562f601fc8fe23720267d083dabc540889565e"
] | [
"toc_trends_analysis.py"
] | [
"#------------------------------------------------------------------------------\n# Name: toc_trends_analysis.py\n# Purpose: Analyse RESA2 data for trends.\n#\n# Author: James Sample\n#\n# Created: Fri Jul 15 11:35:12 2016\n# Copyright: (c) James Sample and NIVA\n# Licence: \n#------------------------------------------------------------------------------\n\"\"\" Tore has previously written code to perform trend analyses on the data in\n RESA2. I haven't been able to find the code, but it appears to shift data \n between RESA2, Excel and Access, which seems a bit messy.\n \n In the notebook updated_toc_trends_analysis.ipynb, I tested some code which\n refactors all the analysis into Python, interfacing directly with the \n database and returning results as dataframes. This seems to have worked \n well.\n \n The code below takes the main functions from this notebook and tidies them\n up a bit. This file can then be imported into new notebooks, which should\n make it easy to re-run trend analyses on different datasets in the future.\n\"\"\"\n\ndef mk_test(x, stn_id, par, alpha=0.05):\n \"\"\" Adapted from http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/\n by Sat Kumar Tomer.\n \n Perform the MK test for monotonic trends. Uses the \"normal\n approximation\" to determine significance and therefore should \n only be used if the number of values is >= 10.\n \n Args:\n x: 1D array of data\n name: Name for data series (string)\n alpha: Significance level\n \n Returns:\n var_s: Variance of test statistic\n s: M-K test statistic\n z: Normalised test statistic \n p: p-value of the significance test\n trend: Whether to reject the null hypothesis (no trend) at\n the specified significance level. One of: \n 'increasing', 'decreasing' or 'no trend'\n \"\"\"\n import numpy as np\n from scipy.stats import norm\n \n n = len(x)\n \n if n < 10:\n print (' Data series for %s at site %s has fewer than 10 non-null values. '\n 'Significance estimates may be unreliable.' % (par, int(stn_id)))\n \n # calculate S \n s = 0\n for k in range(n-1):\n for j in range(k+1,n):\n s += np.sign(x[j] - x[k])\n \n # calculate the unique data\n unique_x = np.unique(x)\n g = len(unique_x)\n \n # calculate the var(s)\n if n == g: # there is no tie\n var_s = (n*(n-1)*(2*n+5))/18. \n else: # there are some ties in data\n tp = np.zeros(unique_x.shape)\n for i in range(len(unique_x)):\n tp[i] = sum(unique_x[i] == x)\n # Sat Kumar's code has \"+ np.sum\", which is incorrect\n var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18.\n \n if s>0:\n z = (s - 1)/np.sqrt(var_s)\n elif s == 0:\n z = 0\n elif s<0:\n z = (s + 1)/np.sqrt(var_s)\n else:\n z = np.nan\n \n # calculate the p_value\n p = 2*(1-norm.cdf(abs(z))) # two tail test\n h = abs(z) > norm.ppf(1-alpha/2.) \n\n if (z<0) and h:\n trend = 'decreasing'\n elif (z>0) and h:\n trend = 'increasing'\n elif np.isnan(z):\n trend = np.nan\n else:\n trend = 'no trend'\n \n return var_s, s, z, p, trend\n\ndef wc_stats(raw_df, st_yr=None, end_yr=None, plot=False, fold=None):\n \"\"\" Calculate key statistics for the TOC trends analysis:\n \n 'station_id'\n 'par_id'\n 'non_missing'\n 'median'\n 'mean'\n 'std_dev'\n 'period'\n 'mk_std_dev'\n 'mk_stat'\n 'norm_mk_stat'\n 'mk_p_val'\n 'trend'\n 'sen_slp'\n \n Args:\n raw_df: Dataframe with annual data for a single station. Columns must \n be: [station_id, year, par1, par2, ... parn]\n st_yr: First year to include in analysis. Pass None to start\n at the beginning of the series\n end_year: Last year to include in analysis. Pass None to start\n at the beginning of the series\n plot: Whether to generate a PNG plot of the Sen's slope \n regression\n fold: Folder in which to save PNGs if plot=True\n \n Returns:\n df of key statistics.\n \"\"\"\n import numpy as np, pandas as pd\n import seaborn as sn, matplotlib.pyplot as plt, os\n from scipy.stats import theilslopes\n sn.set_context('poster')\n \n # Checking\n df = raw_df.copy()\n assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]'\n assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time'\n \n # Get just the period of interest\n if st_yr:\n df = df.query('YEAR >= @st_yr')\n if end_yr:\n df = df.query('YEAR <= @end_yr')\n\n # Only continue if data\n if len(df) > 0:\n # Get stn_id\n stn_id = df['STATION_ID'].iloc[0]\n \n # Tidy up df\n df.index = df['YEAR']\n df.sort_index(inplace=True)\n del df['STATION_ID'], df['YEAR']\n \n # Container for results\n data_dict = {'station_id':[],\n 'par_id':[],\n 'non_missing':[],\n 'n_start':[],\n 'n_end':[],\n 'median':[],\n 'mean':[],\n 'std_dev':[],\n 'period':[],\n 'mk_std_dev':[],\n 'mk_stat':[],\n 'norm_mk_stat':[],\n 'mk_p_val':[],\n 'trend':[],\n 'sen_slp':[]}\n \n # Loop over pars\n for col in df.columns:\n # 1. Station ID\n data_dict['station_id'].append(stn_id)\n \n # 2. Par ID\n data_dict['par_id'].append(col)\n \n # 3. Non-missing\n data_dict['non_missing'].append(pd.notnull(df[col]).sum())\n \n # 4. Number of non nulls at start\n if st_yr:\n # Record the number of non-nulls within 5 years of start year\n data_dict['n_start'].append(pd.notnull(df[df.index<(st_yr+5)][col]).sum())\n else:\n # Record the number of non-nulls in first 5 years of record\n data_dict['n_start'].append(pd.notnull(df[col].head(5)).sum())\n \n # 5. Number of non nulls at end\n if end_yr:\n # Record the number of non-nulls within 5 years of end year\n data_dict['n_end'].append(pd.notnull(df[df.index>(end_yr-5)][col]).sum())\n else:\n # Record the number of non-nulls in last 5 years of record\n data_dict['n_end'].append(pd.notnull(df[col].tail(5)).sum())\n \n # 6. Median\n data_dict['median'].append(df[col].median())\n \n # 7. Mean\n data_dict['mean'].append(df[col].mean())\n \n # 8. Std dev\n data_dict['std_dev'].append(df[col].std())\n \n # 9. Period\n st_yr = df.index.min()\n end_yr = df.index.max()\n per = '%s-%s' % (int(st_yr), int(end_yr))\n data_dict['period'].append(per)\n \n # 10. M-K test\n # Drop missing values\n mk_df = df[[col]].dropna(how='any')\n \n # Only run stats if more than 1 valid value\n if len(mk_df) > 1:\n var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col)\n data_dict['mk_std_dev'].append(np.sqrt(var_s)) \n data_dict['mk_stat'].append(s)\n data_dict['norm_mk_stat'].append(z)\n data_dict['mk_p_val'].append(p)\n data_dict['trend'].append(trend) \n \n # 11. Sen's slope. Returns:\n # Median slope, median intercept, 95% CI lower bound, \n # 95% CI upper bound\n sslp, icpt, lb, ub = theilslopes(mk_df[col].values, \n mk_df.index, 0.95)\n data_dict['sen_slp'].append(sslp)\n \n # 12. Plot if desired\n if plot:\n fig = plt.figure()\n plt.plot(mk_df.index, mk_df[col].values, 'bo-')\n plt.plot(mk_df.index, mk_df.index*sslp + icpt, 'k-')\n if col in ('Al', 'TOC'):\n plt.ylabel('%s (mg/l)' % col, fontsize=24)\n else:\n plt.ylabel('%s (ueq/l)' % col, fontsize=24)\n plt.title('%s at station %s' % (col, int(stn_id)),\n fontsize=32)\n plt.tight_layout()\n \n # Save fig\n out_path = os.path.join(fold,\n '%s_%s_%s-%s.png' % (int(stn_id), col, \n st_yr, end_yr))\n plt.savefig(out_path, dpi=150)\n plt.close()\n \n # Otherwise all NaN\n else:\n for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat', \n 'mk_p_val', 'trend', 'sen_slp']:\n data_dict[par].append(np.nan)\n \n # Build to df\n res_df = pd.DataFrame(data_dict)\n res_df = res_df[['station_id', 'par_id', 'period', 'non_missing', 'n_start',\n 'n_end', 'mean', 'median', 'std_dev', 'mk_stat', \n 'norm_mk_stat', 'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']] \n \n return res_df\n \ndef read_resa2(proj_list, engine):\n \"\"\" Reads raw data for the specified projects from RESA2. Extracts only\n the parameters required for the trends analysis and calculates \n aggregated annual values by taking medians.\n \n Args:\n proj_list: List of RESA2 project names for which to extract data\n engine: SQLAlchemy 'engine' object already connected to RESA2\n \n Returns: \n [stn_df, wc_df, dup_df]. Dataframe of stations; Dataframe of annual \n water chemistry values; dataframe of duplicates to check\n \"\"\"\n import pandas as pd \n\n # Get par IDs etc. for pars of interest\n par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC', \n 'Al', 'K', 'Na', 'NH4-N', 'pH']\n \n sql = ('SELECT * FROM resa2.parameter_definitions '\n 'WHERE name in %s' % str(tuple(par_list)))\n \n par_df = pd.read_sql_query(sql, engine)\n \n # Get stations for a specified list of projects\n if len(proj_list) == 1:\n sql = (\"SELECT station_id, station_code \"\n \"FROM resa2.stations \"\n \"WHERE station_id IN (SELECT UNIQUE(station_id) \"\n \"FROM resa2.projects_stations \"\n \"WHERE project_id IN (SELECT project_id \"\n \"FROM resa2.projects \"\n \"WHERE project_name = '%s'))\"\n % proj_list[0])\n else:\n sql = ('SELECT station_id, station_code '\n 'FROM resa2.stations '\n 'WHERE station_id IN (SELECT UNIQUE(station_id) '\n 'FROM resa2.projects_stations '\n 'WHERE project_id IN (SELECT project_id '\n 'FROM resa2.projects '\n 'WHERE project_name IN %s))'\n % str(tuple(proj_list))) \n stn_df = pd.read_sql(sql, engine)\n\n # Get results for ALL pars for these sites\n if len(stn_df)==1:\n sql = (\"SELECT * FROM resa2.water_chemistry_values2 \"\n \"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples \"\n \"WHERE station_id = %s)\"\n % stn_df['station_id'].iloc[0]) \n else:\n sql = (\"SELECT * FROM resa2.water_chemistry_values2 \"\n \"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples \"\n \"WHERE station_id IN %s)\"\n % str(tuple(stn_df['station_id'].values)))\n \n wc_df = pd.read_sql_query(sql, engine)\n\n # Get all sample dates for sites\n if len(stn_df)==1:\n sql = (\"SELECT water_sample_id, station_id, sample_date, depth1, depth2 \"\n \"FROM resa2.water_samples \"\n \"WHERE station_id = %s \"\n % stn_df['station_id'].iloc[0]) \n else:\n sql = (\"SELECT water_sample_id, station_id, sample_date, depth1, depth2 \"\n \"FROM resa2.water_samples \"\n \"WHERE station_id IN %s \"\n % str(tuple(stn_df['station_id'].values)))\n \n samp_df = pd.read_sql_query(sql, engine)\n \n # Join in par IDs based on method IDs\n sql = ('SELECT * FROM resa2.wc_parameters_methods')\n meth_par_df = pd.read_sql_query(sql, engine)\n \n wc_df = pd.merge(wc_df, meth_par_df, how='left',\n left_on='method_id', right_on='wc_method_id')\n \n # Get just the parameters of interest\n wc_df = wc_df.query('wc_parameter_id in %s' \n % str(tuple(par_df['parameter_id'].values)))\n \n # Join in sample dates\n wc_df = pd.merge(wc_df, samp_df, how='left',\n left_on='sample_id', right_on='water_sample_id')\n\n # Get just the near-surface samples\n wc_df = wc_df.query('(depth1 <= 1) and (depth2 <= 1)')\n \n # Join in parameter units\n sql = ('SELECT * FROM resa2.parameter_definitions')\n all_par_df = pd.read_sql_query(sql, engine)\n \n wc_df = pd.merge(wc_df, all_par_df, how='left',\n left_on='wc_parameter_id', right_on='parameter_id')\n \n # Join in station codes\n wc_df = pd.merge(wc_df, stn_df, how='left',\n left_on='station_id', right_on='station_id')\n \n # Convert units\n wc_df['value'] = wc_df['value'] * wc_df['conversion_factor']\n \n # Extract columns of interest\n wc_df = wc_df[['station_id', 'sample_date', 'name', \n 'value', 'entered_date_x']]\n \n # Check for duplicates\n dup_df = wc_df[wc_df.duplicated(subset=['station_id',\n 'sample_date',\n 'name'], \n keep=False)].sort_values(by=['station_id', \n 'sample_date', \n 'name'])\n\n if len(dup_df) > 0:\n print (' The database contains duplicate values for some station-'\n 'date-parameter combinations.\\n Only the most recent values '\n 'will be used, but you should check the repeated values are not '\n 'errors.\\n The duplicated entries are returned in a separate '\n 'dataframe.\\n')\n \n # Choose most recent record for each duplicate\n wc_df.sort_values(by='entered_date_x', inplace=True, ascending=True)\n\n # Drop duplicates\n wc_df.drop_duplicates(subset=['station_id', 'sample_date', 'name'],\n keep='last', inplace=True)\n \n # Sort\n wc_df.sort_values(by=['station_id', 'sample_date', 'name'],\n inplace=True)\n \n # Tidy\n del wc_df['entered_date_x'] \n wc_df.reset_index(inplace=True, drop=True)\n\n # Unstack\n wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True)\n wc_df = wc_df.unstack(level='name')\n wc_df.columns = wc_df.columns.droplevel()\n wc_df.reset_index(inplace=True)\n wc_df.columns.name = None\n\n # Extract year from date column\n wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year)\n del wc_df['sample_date']\n \n # Groupby station_id and year\n grpd = wc_df.groupby(['station_id', 'year'])\n \n # Calculate median\n wc_df = grpd.agg('median')\n\n return stn_df, wc_df, dup_df\n\ndef conv_units_and_correct(wc_df):\n \"\"\" Take a dataframe of aggregated annual values in the units specified by\n RESA2.PARAMETERS and performs unit conversions to ueq/l. Also applies\n sea-salt correction where necessary.\n \n Args:\n wc_df: Dataframe in original units\n \n Returns:\n Dataframe in converted units\n \"\"\"\n import pandas as pd\n \n # Tabulate chemical properties\n chem_dict = {'molar_mass':[96, 35, 40, 24, 14, 39, 23, 14],\n 'valency':[2, 1, 2, 2, 1, 1, 1, 1],\n 'resa2_ref_ratio':[0.103, 1., 0.037, 0.196, \n 'N/A', 0.018, 0.859, 'N/A']}\n \n chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg', \n 'NO3-N', 'K', 'Na', 'NH4-N'])\n chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']]\n\n # Fill NoData for ANC calculation. Assume that NH4 can be ignored if not \n # present.\n # If have data for NH4, fill data gaps with 0\n if 'NH4-N' in wc_df.columns:\n wc_df['NH4-N'].fillna(value=0, inplace=True)\n else: # Just assume 0\n wc_df['NH4-N'] = 0 \n \n # 1. Convert to ueq/l\n # 1.1. pH to H+\n wc_df['EH'] = 1E6 * 10**(-wc_df['pH'])\n \n # 1.2. Other pars\n for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N']:\n val = chem_df.at[par, 'valency']\n mm = chem_df.at[par, 'molar_mass']\n \n if par == 'NO3-N':\n wc_df['ENO3'] = wc_df[par] * val / mm\n elif par == 'NH4-N':\n wc_df['ENH4'] = wc_df[par] * val / mm\n else:\n wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm\n \n # 2. Apply sea-salt correction\n for par in ['ESO4', 'EMg', 'ECa']:\n ref = chem_df.at[par[1:], 'resa2_ref_ratio']\n wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl'])\n \n # 3. Calculate combinations\n # 3.1. ESO4 + ECl\n wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl']\n \n # 3.2. ECa + EMg\n wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg']\n \n # 3.3. ECaX + EMgX\n wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX']\n \n # 3.4. ANC = (ECa+EMg+EK+ENa+ENH4) - (ECl+ESO4+ENO3)\n wc_df['ANC'] = ((wc_df['ECa'] + wc_df['EMg'] + wc_df['EK'] + \n wc_df['ENa'] + wc_df['ENH4']) - \n (wc_df['ECl'] + wc_df['ESO4'] + wc_df['ENO3']))\n\n # 3.5. ANCX = (ECaX+EMgX+EK+ENa+ENH4) - (ECl+ESO4X+ENO3)\n wc_df['ANCX'] = ((wc_df['ECaX'] + wc_df['EMgX'] + wc_df['EK'] + \n wc_df['ENa'] + wc_df['ENH4']) - \n (wc_df['ECl'] + wc_df['ESO4X'] + wc_df['ENO3']))\n \n # 4. Delete unnecessary columns and tidy\n for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N', 'pH',\n 'EMg', 'ECa', 'EK', 'ENa', 'ENH4', 'EMgX', 'ECaX']:\n del wc_df[col]\n \n wc_df.reset_index(inplace=True)\n \n return wc_df\n\ndef run_trend_analysis(proj_list, engine, st_yr=None, end_yr=None,\n plot=False, fold=None):\n \"\"\" Run the trend analysis for the specified projects and time period.\n \n Args:\n proj_list: List of RESA2 project names for which to extract data\n engine: SQLAlchemy 'engine' object already connected to RESA2\n st_yr: First year to include in analysis. Pass None to start\n at the beginning of the series\n end_year: Last year to include in analysis. Pass None to start\n at the beginning of the series\n plot: Whether to generate a PNG plot of the Sen's slope \n regression\n fold: Folder in which to save PNGs if plot=True\n \n Returns: \n [res_df, dup_df, no_data_df]. Dataframe of statistics; dataframe of \n duplicated water chemistry values for investigation; dataframe of \n stations with no relevant data in the period of interest\n \"\"\"\n import pandas as pd, os\n \n # Check paths valid\n if plot:\n assert os.path.isdir(fold), 'The specified folder does not exist.'\n \n # Get raw data from db\n print ('Extracting data from RESA2...')\n stn_df, wc_df, dup_df = read_resa2(proj_list, engine)\n \n # Identify stations with no relevant records\n stns_no_data = (set(stn_df['station_id'].values) - \n set(wc_df.index.get_level_values('station_id')))\n \n if len(stns_no_data) > 0:\n print (' Some stations have no relevant data in the period '\n 'specified. Their IDs are returned in a separate dataframe.\\n')\n no_data_df = pd.DataFrame({'station_id':list(stns_no_data)})\n else:\n no_data_df = None\n \n print (' Done.')\n \n # Convert units and apply sea-salt correction\n print ('\\nConverting units and applying sea-salt correction...')\n wc_df = conv_units_and_correct(wc_df)\n print (' Done.')\n \n # Calculate stats \n # Container for output\n df_list = []\n\n # Loop over sites\n print ('\\nCalculating statistics...')\n for stn_id in wc_df['station_id'].unique():\n # Extract data for this site\n df = wc_df.query('station_id == @stn_id')\n\n # Modify col names\n names = list(df.columns)\n names[:2] = ['STATION_ID', 'YEAR']\n df.columns = names\n\n # Heleen wants the annual time series for each site for further analysis\n # Write df to output\n #out_ann_fold = (r'../../../Thematic_Trends_Report_2019/results/annual_chemistry_series')\n #out_ann_path = os.path.join(out_ann_fold, 'stn_%s.csv' % stn_id)\n #df_trunc = df.query('(YEAR>=1990) & (YEAR<=2016)') # Truncate to 1990 to 2016\n #df_trunc.to_csv(out_ann_path) \n \n # Run analysis\n df_list.append(wc_stats(df, st_yr=st_yr, end_yr=end_yr,\n plot=plot, fold=fold))\n \n res_df = pd.concat(df_list, axis=0)\n\n # Convert station_id cols to ints\n res_df['station_id'] = res_df['station_id'].map(int)\n dup_df['station_id'] = dup_df['station_id'].map(int)\n if no_data_df is not None:\n no_data_df['station_id'] = no_data_df['station_id'].map(int)\n \n print (' Done.') \n print ('\\nFinished.')\n \n return res_df, dup_df, no_data_df"
] | [
[
"numpy.sum",
"scipy.stats.norm.ppf",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"pandas.read_sql_query",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"pandas.notnull",
"numpy.isnan",
"numpy.unique",
"numpy.zeros",
"scipy.stats.theilslopes",
"pandas.merge",
"pandas.concat",
"matplotlib.pyplot.close",
"pandas.read_sql",
"numpy.sign",
"pandas.DataFrame",
"numpy.sqrt"
]
] |
zfisher/trax | [
"c8187944fc036418a5c3b0491fc53c223e73faa6"
] | [
"trax/layers/normalization_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for normalization layers.\"\"\"\n\nfrom absl.testing import absltest\nimport numpy as onp\n\nfrom trax.layers import base\nfrom trax.layers import normalization\nfrom trax.math import numpy as np\nfrom trax.shapes import ShapeDtype\n\n\nclass NormalizationLayerTest(absltest.TestCase):\n\n def test_batch_norm_shape(self):\n input_signature = ShapeDtype((29, 5, 7, 20))\n result_shape = base.check_shape_agreement(normalization.BatchNorm(),\n input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n def test_batch_norm(self):\n input_shape = (2, 3, 4)\n input_dtype = np.float32\n input_signature = ShapeDtype(input_shape, input_dtype)\n eps = 1e-5\n inp1 = np.reshape(np.arange(np.prod(input_shape), dtype=input_dtype),\n input_shape)\n m1 = 11.5 # Mean of this random input.\n v1 = 47.9167 # Variance of this random input.\n layer = normalization.BatchNorm(axis=(0, 1, 2))\n _, _ = layer.init(input_signature)\n state = layer.state\n onp.testing.assert_allclose(state[0], 0)\n onp.testing.assert_allclose(state[1], 1)\n self.assertEqual(state[2], 0)\n out = layer(inp1)\n state = layer.state\n onp.testing.assert_allclose(state[0], m1 * 0.001)\n onp.testing.assert_allclose(state[1], 0.999 + v1 * 0.001, rtol=1e-6)\n self.assertEqual(state[2], 1)\n onp.testing.assert_allclose(out, (inp1 - m1) / np.sqrt(v1 + eps),\n rtol=1e-6)\n\n def test_layer_norm_shape(self):\n input_signature = ShapeDtype((29, 5, 7, 20))\n result_shape = base.check_shape_agreement(\n normalization.LayerNorm(), input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n def test_frn_shape(self):\n B, H, W, C = 64, 5, 7, 3 # pylint: disable=invalid-name\n input_signature = ShapeDtype((B, H, W, C))\n result_shape = base.check_shape_agreement(\n normalization.FilterResponseNorm(), input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n result_shape = base.check_shape_agreement(\n normalization.FilterResponseNorm(learn_epsilon=False),\n input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"numpy.testing.assert_allclose"
]
] |
watanka/CRAFTS-implementation | [
"bc514638755fe798a0d5eb583d6d477e8eb55bff"
] | [
"file_utils.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport cv2\nimport imgproc\nfrom PIL import Image, ImageDraw\n\n\n\n# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py\ndef get_files(img_dir):\n imgs, masks, xmls = list_files(img_dir)\n return imgs, masks, xmls\n\ndef list_files(in_path):\n img_files = []\n mask_files = []\n gt_files = []\n for (dirpath, dirnames, filenames) in os.walk(in_path):\n for file in filenames:\n filename, ext = os.path.splitext(file)\n ext = str.lower(ext)\n if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':\n img_files.append(os.path.join(dirpath, file))\n elif ext == '.bmp':\n mask_files.append(os.path.join(dirpath, file))\n elif ext == '.xml' or ext == '.gt' or ext == '.txt':\n gt_files.append(os.path.join(dirpath, file))\n elif ext == '.zip':\n continue\n # img_files.sort()\n # mask_files.sort()\n # gt_files.sort()\n return img_files, mask_files, gt_files\n\ndef saveResult(img_file, img, boxes, font,dirname='./result/', verticals=None, texts=None):\n \"\"\" save text detection result one by one\n Args:\n img_file (str): image file name\n img (array): raw image context\n boxes (array): array of result file\n Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output\n Return:\n None\n \"\"\"\n img = np.array(img)\n img_pil = Image.fromarray(img)\n imgdraw = ImageDraw.Draw(img_pil)\n # make result file list\n filename, file_ext = os.path.splitext(os.path.basename(img_file))\n\n # result directory\n res_file = dirname + \"res_\" + filename + '.txt'\n res_img_file = dirname + \"res_\" + filename + '.jpg'\n\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n\n with open(res_file, 'w') as f:\n \n if texts is not None :\n for i, (box, text) in enumerate(zip(boxes, texts)):\n poly = np.array(box).astype(np.int32).reshape((-1))\n strResult = ','.join([str(p) for p in poly]) +','+text +'\\r\\n'\n # poly = np.array(box).astype(np.int32)\n # min_x = np.min(poly[:,0])\n # max_x = np.max(poly[:,0])\n # min_y = np.min(poly[:,1])\n # max_y = np.max(poly[:,1])\n # strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\\r\\n'\n f.write(strResult)\n\n poly = poly.reshape(-1, 2)\n# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)\n# cv2.putText(img, text, tuple(poly[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.1, color = (0,0,255), thickness= 1)\n imgdraw.polygon(poly.flatten().tolist(), fill = None, outline = (0,0,255))\n imgdraw.text(tuple(poly[1]), text,font = font, fill = (0,0,255))\n \n ptColor = (0, 255, 255)\n if verticals is not None:\n if verticals[i]:\n ptColor = (255, 0, 0)\n \n else : \n \n for i, box in enumerate(boxes):\n poly = np.array(box).astype(np.int32).reshape((-1))\n strResult = ','.join([str(p) for p in poly]) + '\\r\\n'\n # poly = np.array(box).astype(np.int32)\n # min_x = np.min(poly[:,0])\n # max_x = np.max(poly[:,0])\n # min_y = np.min(poly[:,1])\n # max_y = np.max(poly[:,1])\n # strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\\r\\n'\n f.write(strResult)\n\n poly = poly.reshape(-1, 2)\n# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)\n \n imgdraw.polygon([poly.reshape((-1,1,2))], fill = None, outline =(0,0,255))\n\n ptColor = (0, 255, 255)\n if verticals is not None:\n if verticals[i]:\n ptColor = (255, 0, 0)\n #\n # if texts is not None:\n # font = cv2.FONT_HERSHEY_SIMPLEX\n # font_scale = 0.5\n # cv2.putText(img, \"{}\".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)\n # cv2.putText(img, \"{}\".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)\n #\n # #Save result image\n cv2.imwrite(res_img_file, np.array(img_pil))\n\ndef load_txt(file, delimiter = ',') :\n ## character bbox는 \\n\\n으로 box별 구분\n coords_ls = []\n with open(file, 'r', encoding = 'utf-8-sig') as f :\n boxes_list = f.read().split('\\n\\n')\n for boxes in boxes_list :\n if boxes.strip() == '' :\n continue\n char_boxes = boxes.split('\\n')\n # char_txt는 라벨이 따로 없다\n charbox_ls = []\n for charbox in char_boxes :\n if len(char_boxes) == 0 :\n continue\n coords = charbox.split(delimiter)\n coords = [float(c) for c in coords if c != '']\n if len(coords) == 0 :\n continue\n coords = np.array(coords).reshape(-1,2)\n \n charbox_ls.append(coords)\n if len(charbox_ls) != 0 :\n coords_ls.append(np.array(charbox_ls))\n \n \n return coords_ls\n "
] | [
[
"numpy.array"
]
] |
SaraR-1/model-patching | [
"97b30bad4bb4575a5f3a4cc23fbd333b10a057a8"
] | [
"augmentation/methods/cyclegan/utils.py"
] | [
"import datetime\n\nimport tensorflow as tf\nimport random\nimport wandb\nfrom tensorflow_examples.models.pix2pix import pix2pix\n\nfrom augmentation.dataflows.utils import create_paired_direct_dataflow, \\\n create_paired_parallel_dataflow_via_numpy\nfrom augmentation.methods.cyclegan.models import mnist_unet_generator, mnist_discriminator, unet_generator\nfrom augmentation.utilities.optim import build_lr_scheduler\nfrom augmentation.utilities.visualize import gallery\n\n\n# Other places to look for training GANs\n# https://github.com/eriklindernoren/Keras-GAN\n\ndef gradient_penalty(f, real, fake, mode, scale=10.0):\n # https://github.com/LynnHo/CycleGAN-Tensorflow-2/blob/master/tf2gan/loss.py\n def _gradient_penalty(f, real, fake=None):\n def _interpolate(a, b=None):\n if b is None: # interpolation in DRAGAN\n beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)\n b = a + 0.5 * tf.math.reduce_std(a) * beta\n shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)\n alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)\n inter = a + alpha * (b - a)\n inter.set_shape(a.shape)\n return inter\n\n x = _interpolate(real, fake)\n with tf.GradientTape() as t:\n t.watch(x)\n pred = tf.reduce_mean(tf.reshape(f(x), [tf.shape(real)[0], -1]), axis=1)\n grad = t.gradient(pred, x)\n norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)\n gp = tf.reduce_mean((norm - 1.) ** 2)\n\n return gp\n\n if mode == 'none':\n gp = tf.constant(0, dtype=real.dtype)\n elif mode == 'dragan':\n gp = _gradient_penalty(f, real)\n elif mode == 'wgan-gp':\n gp = _gradient_penalty(f, real, fake)\n else:\n raise NotImplementedError\n\n return gp * scale\n\n\nclass ReplayBuffer(object):\n \"\"\"\n Adapted from https://github.com/tensorflow/models/blob/master/research/pcl_rl/replay_buffer.py\n \"\"\"\n\n def __init__(self, max_size):\n self.max_size = max_size\n self.cur_size = 0\n self.buffer = {}\n self.oldest_idx = 0\n self.init_length = 0\n\n def __len__(self):\n return self.cur_size\n\n def add(self, images):\n idx = 0\n while self.cur_size < self.max_size and idx < len(images):\n self.buffer[self.cur_size] = images[idx]\n self.cur_size += 1\n idx += 1\n\n if idx < len(images):\n remove_idxs = self.remove_n(len(images) - idx)\n for remove_idx in remove_idxs:\n self.buffer[remove_idx] = images[idx]\n idx += 1\n\n assert len(self.buffer) == self.cur_size\n\n def remove_n(self, n):\n return random.sample(range(self.init_length, self.cur_size), n)\n\n def get_batch(self, n):\n idxs = random.sample(range(self.cur_size), n)\n return [self.buffer[idx] for idx in idxs]\n\n def get_tf_batch(self, n):\n idxs = random.sample(range(self.cur_size), n)\n return tf.convert_to_tensor([self.buffer[idx] for idx in idxs])\n\n\ndef wgan_loss(targets, predictions):\n return tf.reduce_mean((-2 * targets + 1.) * predictions)\n\n\ndef build_gan_loss_fn(loss_name):\n if loss_name == 'bce':\n return tf.keras.losses.BinaryCrossentropy(from_logits=True)\n elif loss_name == 'lsgan':\n return tf.keras.losses.MeanSquaredError()\n elif loss_name == 'wgan':\n return wgan_loss\n else:\n raise NotImplementedError\n\n\ndef discriminator_loss(real, generated, loss_fn):\n # Classification loss for the discriminator, maximize log-prob of the real example\n real_loss = loss_fn(tf.ones_like(real), real)\n generated_loss = loss_fn(tf.zeros_like(generated), generated)\n total_disc_loss = real_loss + generated_loss\n return total_disc_loss * 0.5\n\n\ndef generator_loss(generated, loss_fn):\n # The discriminator's probability (generated) for realness is maximized\n return loss_fn(tf.ones_like(generated), generated)\n\n\ndef cycle_loss(real_image, cycled_image, scale):\n # Cycle-consistency using an L! loss\n return scale * tf.reduce_mean(tf.abs(real_image - cycled_image))\n\n\ndef identity_loss(real_image, same_image, scale):\n # Map the image to itself and compute the L1 loss\n return scale * 0.5 * tf.reduce_mean(tf.abs(real_image - same_image))\n\n\ndef build_cyclegan_models(n_channels, norm_type):\n assert norm_type in ['instancenorm', 'batchnorm']\n generator_g = pix2pix.unet_generator(n_channels, norm_type=norm_type)\n generator_f = pix2pix.unet_generator(n_channels, norm_type=norm_type)\n\n discriminator_x = pix2pix.discriminator(norm_type=norm_type, target=False)\n discriminator_y = pix2pix.discriminator(norm_type=norm_type, target=False)\n\n return generator_g, generator_f, discriminator_x, discriminator_y\n\n\ndef build_mnist_cyclegan_models(norm_type):\n assert norm_type in ['instancenorm', 'batchnorm']\n generator_g = mnist_unet_generator(norm_type=norm_type)\n generator_f = mnist_unet_generator(norm_type=norm_type)\n\n discriminator_x = mnist_discriminator(norm_type=norm_type, target=False)\n discriminator_y = mnist_discriminator(norm_type=norm_type, target=False)\n\n return generator_g, generator_f, discriminator_x, discriminator_y\n\n\ndef get_models_from_input_shape(input_shape, norm_type, output_init=0.02, residual_output=False):\n if input_shape == (28, 28, 1):\n # MNIST-like data\n return mnist_unet_generator(norm_type=norm_type), \\\n mnist_discriminator(norm_type=norm_type, target=False)\n elif input_shape == (256, 256, 3):\n # TODO: just use our unet_generator fn\n if residual_output is True or output_init != 0.02:\n raise NotImplementedError\n return pix2pix.unet_generator(output_channels=3, norm_type=norm_type), \\\n pix2pix.discriminator(norm_type=norm_type, target=False)\n else:\n return unet_generator(output_channels=3, input_shape=input_shape, norm_type=norm_type,\n output_init=output_init, residual_output=residual_output), \\\n pix2pix.discriminator(norm_type=norm_type, target=False)\n\n\ndef build_models(source_input_shape, target_input_shape, norm_type, output_init=0.02, residual_output=False):\n assert norm_type in ['instancenorm', 'batchnorm']\n generator_s_to_t, discriminator_s = get_models_from_input_shape(source_input_shape, norm_type, output_init, residual_output)\n generator_t_to_s, discriminator_t = get_models_from_input_shape(target_input_shape, norm_type, output_init, residual_output)\n\n return generator_s_to_t, generator_t_to_s, discriminator_s, discriminator_t\n\n\ndef build_optimizers(lr_gen=2e-4, lr_disc=2e-4,\n beta_1_gen=0.5, beta_1_disc=0.5,\n lr_scheduler='constant', lr_decay_steps=None):\n generator_g_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_gen)\n generator_f_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_gen)\n\n discriminator_x_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_disc)\n discriminator_y_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_disc)\n\n return generator_g_optimizer, generator_f_optimizer, discriminator_x_optimizer, discriminator_y_optimizer\n\n\ndef create_cyclegan_data_generator(source_dataset, target_dataset, batch_size, augmentations,\n dataflow, cache_dir):\n if dataflow == 'disk_cached':\n cache_dir = cache_dir + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S')\n # Shuffle hangs sometimes (e.g. for horse2zebra)\n return create_paired_direct_dataflow(source_dataset, target_dataset, batch_size,\n augmentations, x_only=True,\n cache_dir1=cache_dir + '1',\n cache_dir2=cache_dir + '2',\n shuffle=True)\n elif dataflow == 'in_memory':\n return create_paired_parallel_dataflow_via_numpy(source_dataset, target_dataset,\n batch_size, augmentations, x_only=True)\n else:\n raise NotImplementedError\n\n\ndef generate_and_log_one_image_batch(data_generator,\n generator_g,\n generator_f,\n step):\n # Grab a batch from the dataset\n for real_x, real_y in data_generator:\n # Convert to tensors\n real_x, real_y = tf.convert_to_tensor(real_x), tf.convert_to_tensor(real_y)\n\n # Compute the fake examples\n fake_y = generator_g(real_x, training=True)\n fake_x = generator_f(real_y, training=True)\n\n # Cycle the fake examples\n cycled_x = generator_f(fake_y, training=True)\n cycled_y = generator_g(fake_x, training=True)\n\n # Compute the identity examples\n same_x = generator_f(real_x, training=True)\n same_y = generator_g(real_y, training=True)\n\n # Log everything to Weights and Biases\n wandb.log({'test/real_x': wandb.Image(gallery(real_x.numpy() * 0.5 + 0.5)),\n 'test/fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),\n 'test/cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),\n 'test/same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),\n 'test/real_y': wandb.Image(gallery(real_y.numpy() * 0.5 + 0.5)),\n 'test/fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),\n 'test/cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),\n 'test/same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)\n\n # Break after a single batch: note, this will not run if you remove the break due to wandb reasons (ask Karan)\n break\n\n\nif __name__ == '__main__':\n buffer = ReplayBuffer(1)\n buffer.add([1])\n buffer.add([2])\n buffer.add([3])\n print(buffer.get_batch(1))\n print(buffer.get_batch(1))\n print(buffer.get_batch(1))\n buffer.add([4])\n print(buffer.get_batch(1))\n print(buffer.buffer)\n\n buffer = ReplayBuffer(1)\n buffer.add(tf.convert_to_tensor([1]))\n buffer.add(tf.convert_to_tensor([2]))\n buffer.add(tf.convert_to_tensor([3]))\n print(tf.convert_to_tensor(buffer.get_batch(1)))\n print(buffer.get_batch(1))\n print(buffer.get_batch(1))\n buffer.add(tf.convert_to_tensor([4]))\n print(buffer.get_batch(1))\n print(buffer.buffer)\n"
] | [
[
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.math.reduce_std",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.reduce_mean",
"tensorflow.zeros_like",
"tensorflow.GradientTape",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.abs",
"tensorflow.convert_to_tensor",
"tensorflow.random.uniform",
"tensorflow.constant"
]
] |
globusgenomics/galaxy | [
"7caf74d9700057587b3e3434c64e82c5b16540f1"
] | [
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py"
] | [
"\"\"\"\nCopyright (C) 2010 David Fong and Michael Saunders\n\nLSMR uses an iterative method.\n\n07 Jun 2010: Documentation updated\n03 Jun 2010: First release version in Python\n\nDavid Chin-lung Fong [email protected]\nInstitute for Computational and Mathematical Engineering\nStanford University\n\nMichael Saunders [email protected]\nSystems Optimization Laboratory\nDept of MS&E, Stanford University.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['lsmr']\n\nfrom numpy import zeros, infty\nfrom numpy.linalg import norm\nfrom math import sqrt\nfrom scipy.sparse.linalg.interface import aslinearoperator\n\nfrom .lsqr import _sym_ortho\n\ndef lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,\n maxiter=None, show=False):\n \"\"\"Iterative solver for least-squares problems.\n\n lsmr solves the system of linear equations ``Ax = b``. If the system\n is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.\n A is a rectangular matrix of dimension m-by-n, where all cases are\n allowed: m = n, m > n, or m < n. B is a vector of length m.\n The matrix A may be dense or sparse (usually sparse).\n\n .. versionadded:: 0.11.0\n\n Parameters\n ----------\n A : {matrix, sparse matrix, ndarray, LinearOperator}\n Matrix A in the linear system.\n b : (m,) ndarray\n Vector b in the linear system.\n damp : float\n Damping factor for regularized least-squares. `lsmr` solves\n the regularized least-squares problem::\n\n min ||(b) - ( A )x||\n ||(0) (damp*I) ||_2\n\n where damp is a scalar. If damp is None or 0, the system\n is solved without regularization.\n atol, btol : float\n Stopping tolerances. `lsmr` continues iterations until a\n certain backward error estimate is smaller than some quantity\n depending on atol and btol. Let ``r = b - Ax`` be the\n residual vector for the current approximate solution ``x``.\n If ``Ax = b`` seems to be consistent, ``lsmr`` terminates\n when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.\n Otherwise, lsmr terminates when ``norm(A^{T} r) <=\n atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),\n the final ``norm(r)`` should be accurate to about 6\n digits. (The final x will usually have fewer correct digits,\n depending on ``cond(A)`` and the size of LAMBDA.) If `atol`\n or `btol` is None, a default value of 1.0e-6 will be used.\n Ideally, they should be estimates of the relative error in the\n entries of A and B respectively. For example, if the entries\n of `A` have 7 correct digits, set atol = 1e-7. This prevents\n the algorithm from doing unnecessary work beyond the\n uncertainty of the input data.\n conlim : float\n `lsmr` terminates if an estimate of ``cond(A)`` exceeds\n `conlim`. For compatible systems ``Ax = b``, conlim could be\n as large as 1.0e+12 (say). For least-squares problems,\n `conlim` should be less than 1.0e+8. If `conlim` is None, the\n default value is 1e+8. Maximum precision can be obtained by\n setting ``atol = btol = conlim = 0``, but the number of\n iterations may then be excessive.\n maxiter : int\n `lsmr` terminates if the number of iterations reaches\n `maxiter`. The default is ``maxiter = min(m, n)``. For\n ill-conditioned systems, a larger value of `maxiter` may be\n needed.\n show : bool\n Print iterations logs if ``show=True``.\n\n Returns\n -------\n x : ndarray of float\n Least-square solution returned.\n istop : int\n istop gives the reason for stopping::\n\n istop = 0 means x=0 is a solution.\n = 1 means x is an approximate solution to A*x = B,\n according to atol and btol.\n = 2 means x approximately solves the least-squares problem\n according to atol.\n = 3 means COND(A) seems to be greater than CONLIM.\n = 4 is the same as 1 with atol = btol = eps (machine\n precision)\n = 5 is the same as 2 with atol = eps.\n = 6 is the same as 3 with CONLIM = 1/eps.\n = 7 means ITN reached maxiter before the other stopping\n conditions were satisfied.\n\n itn : int\n Number of iterations used.\n normr : float\n ``norm(b-Ax)``\n normar : float\n ``norm(A^T (b - Ax))``\n norma : float\n ``norm(A)``\n conda : float\n Condition number of A.\n normx : float\n ``norm(x)``\n\n References\n ----------\n .. [1] D. C.-L. Fong and M. A. Saunders,\n \"LSMR: An iterative algorithm for sparse least-squares problems\",\n SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.\n http://arxiv.org/abs/1006.0758\n .. [2] LSMR Software, http://www.stanford.edu/~clfong/lsmr.html\n\n \"\"\"\n\n A = aslinearoperator(A)\n b = b.squeeze()\n\n msg=('The exact solution is x = 0 ',\n 'Ax - b is small enough, given atol, btol ',\n 'The least-squares solution is good enough, given atol ',\n 'The estimate of cond(Abar) has exceeded conlim ',\n 'Ax - b is small enough for this machine ',\n 'The least-squares solution is good enough for this machine',\n 'Cond(Abar) seems to be too large for this machine ',\n 'The iteration limit has been reached ')\n\n hdg1 = ' itn x(1) norm r norm A''r'\n hdg2 = ' compatible LS norm A cond A'\n pfreq = 20 # print frequency (for repeating the heading)\n pcount = 0 # print counter\n\n m, n = A.shape\n\n # stores the num of singular values\n minDim = min([m, n])\n\n if maxiter is None:\n maxiter = minDim\n\n if show:\n print(' ')\n print('LSMR Least-squares solution of Ax = b\\n')\n print('The matrix A has %8g rows and %8g cols' % (m, n))\n print('damp = %20.14e\\n' % (damp))\n print('atol = %8.2e conlim = %8.2e\\n' % (atol, conlim))\n print('btol = %8.2e maxiter = %8g\\n' % (btol, maxiter))\n\n u = b\n beta = norm(u)\n\n v = zeros(n)\n alpha = 0\n\n if beta > 0:\n u = (1 / beta) * u\n v = A.rmatvec(u)\n alpha = norm(v)\n\n if alpha > 0:\n v = (1 / alpha) * v\n\n\n # Initialize variables for 1st iteration.\n\n itn = 0\n zetabar = alpha * beta\n alphabar = alpha\n rho = 1\n rhobar = 1\n cbar = 1\n sbar = 0\n\n h = v.copy()\n hbar = zeros(n)\n x = zeros(n)\n\n # Initialize variables for estimation of ||r||.\n\n betadd = beta\n betad = 0\n rhodold = 1\n tautildeold = 0\n thetatilde = 0\n zeta = 0\n d = 0\n\n # Initialize variables for estimation of ||A|| and cond(A)\n\n normA2 = alpha * alpha\n maxrbar = 0\n minrbar = 1e+100\n normA = sqrt(normA2)\n condA = 1\n normx = 0\n\n # Items for use in stopping rules.\n normb = beta\n istop = 0\n ctol = 0\n if conlim > 0:\n ctol = 1 / conlim\n normr = beta\n\n # Reverse the order here from the original matlab code because\n # there was an error on return when arnorm==0\n normar = alpha * beta\n if normar == 0:\n if show:\n print(msg[0])\n return x, istop, itn, normr, normar, normA, condA, normx\n\n if show:\n print(' ')\n print(hdg1, hdg2)\n test1 = 1\n test2 = alpha / beta\n str1 = '%6g %12.5e' % (itn, x[0])\n str2 = ' %10.3e %10.3e' % (normr, normar)\n str3 = ' %8.1e %8.1e' % (test1, test2)\n print(''.join([str1, str2, str3]))\n\n # Main iteration loop.\n while itn < maxiter:\n itn = itn + 1\n\n # Perform the next step of the bidiagonalization to obtain the\n # next beta, u, alpha, v. These satisfy the relations\n # beta*u = a*v - alpha*u,\n # alpha*v = A'*u - beta*v.\n\n u = A.matvec(v) - alpha * u\n beta = norm(u)\n\n if beta > 0:\n u = (1 / beta) * u\n v = A.rmatvec(u) - beta * v\n alpha = norm(v)\n if alpha > 0:\n v = (1 / alpha) * v\n\n # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.\n\n # Construct rotation Qhat_{k,2k+1}.\n\n chat, shat, alphahat = _sym_ortho(alphabar, damp)\n\n # Use a plane rotation (Q_i) to turn B_i to R_i\n\n rhoold = rho\n c, s, rho = _sym_ortho(alphahat, beta)\n thetanew = s*alpha\n alphabar = c*alpha\n\n # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar\n\n rhobarold = rhobar\n zetaold = zeta\n thetabar = sbar * rho\n rhotemp = cbar * rho\n cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)\n zeta = cbar * zetabar\n zetabar = - sbar * zetabar\n\n # Update h, h_hat, x.\n\n hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar\n x = x + (zeta / (rho * rhobar)) * hbar\n h = v - (thetanew / rho) * h\n\n # Estimate of ||r||.\n\n # Apply rotation Qhat_{k,2k+1}.\n betaacute = chat * betadd\n betacheck = -shat * betadd\n\n # Apply rotation Q_{k,k+1}.\n betahat = c * betaacute\n betadd = -s * betaacute\n\n # Apply rotation Qtilde_{k-1}.\n # betad = betad_{k-1} here.\n\n thetatildeold = thetatilde\n ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)\n thetatilde = stildeold* rhobar\n rhodold = ctildeold * rhobar\n betad = - stildeold * betad + ctildeold * betahat\n\n # betad = betad_k here.\n # rhodold = rhod_k here.\n\n tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold\n taud = (zeta - thetatilde * tautildeold) / rhodold\n d = d + betacheck * betacheck\n normr = sqrt(d + (betad - taud)**2 + betadd * betadd)\n\n # Estimate ||A||.\n normA2 = normA2 + beta * beta\n normA = sqrt(normA2)\n normA2 = normA2 + alpha * alpha\n\n # Estimate cond(A).\n maxrbar = max(maxrbar, rhobarold)\n if itn > 1:\n minrbar= min(minrbar, rhobarold)\n condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)\n\n # Test for convergence.\n\n # Compute norms for convergence testing.\n normar = abs(zetabar)\n normx = norm(x)\n\n # Now use these norms to estimate certain other quantities,\n # some of which will be small near a solution.\n\n test1 = normr / normb\n if (normA * normr) != 0:\n test2 = normar / (normA * normr)\n else:\n test2 = infty\n test3 = 1 / condA\n t1 = test1 / (1 + normA * normx / normb)\n rtol = btol + atol * normA * normx / normb\n\n # The following tests guard against extremely small values of\n # atol, btol or ctol. (The user may have set any or all of\n # the parameters atol, btol, conlim to 0.)\n # The effect is equivalent to the normAl tests using\n # atol = eps, btol = eps, conlim = 1/eps.\n\n if itn >= maxiter:\n istop = 7\n if 1 + test3 <= 1:\n istop = 6\n if 1 + test2 <= 1:\n istop = 5\n if 1 + t1 <= 1:\n istop = 4\n\n # Allow for tolerances set by the user.\n\n if test3 <= ctol:\n istop = 3\n if test2 <= atol:\n istop = 2\n if test1 <= rtol:\n istop = 1\n\n # See if it is time to print something.\n\n if show:\n if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \\\n (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \\\n (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \\\n (istop != 0):\n\n if pcount >= pfreq:\n pcount = 0\n print(' ')\n print(hdg1, hdg2)\n pcount = pcount + 1\n str1 = '%6g %12.5e' % (itn, x[0])\n str2 = ' %10.3e %10.3e' % (normr, normar)\n str3 = ' %8.1e %8.1e' % (test1, test2)\n str4 = ' %8.1e %8.1e' % (normA, condA)\n print(''.join([str1, str2, str3, str4]))\n\n if istop > 0:\n break\n\n # Print the stopping condition.\n\n if show:\n print(' ')\n print('LSMR finished')\n print(msg[istop])\n print('istop =%8g normr =%8.1e' % (istop, normr))\n print(' normA =%8.1e normAr =%8.1e' % (normA, normar))\n print('itn =%8g condA =%8.1e' % (itn, condA))\n print(' normx =%8.1e' % (normx))\n print(str1, str2)\n print(str3, str4)\n\n return x, istop, itn, normr, normar, normA, condA, normx\n"
] | [
[
"scipy.sparse.linalg.interface.aslinearoperator",
"numpy.linalg.norm",
"numpy.zeros"
]
] |
sisl/mechamodlearn | [
"ed514b5d1193ce546b0221ba9222b0228d6c319a"
] | [
"mechamodlearn/rigidbody.py"
] | [
"# File: rigidbody.py\n\nimport abc\nimport torch\n\nfrom mechamodlearn import nn, utils\nfrom mechamodlearn.models import CholeskyMMNet, PotentialNet, GeneralizedForceNet\n\n\nclass AbstractRigidBody:\n\n @property\n @abc.abstractmethod\n def thetamask(self):\n \"\"\"Returns theta mask for configuration q.\n These should use utils.diffangles to compute differences\n \"\"\"\n\n @abc.abstractmethod\n def mass_matrix(self, q):\n \"\"\"Return mass matrix for configuration q\"\"\"\n\n @abc.abstractmethod\n def potential(self, q):\n \"\"\"Return potential for configuration q\"\"\"\n\n @abc.abstractmethod\n def generalized_force(self, q, v, u):\n \"\"\"Return generalized force for configuration q, velocity v, external torque u\"\"\"\n\n def kinetic_energy(self, q, v):\n mass_matrix = self.mass_matrix(q)\n # TODO(jkg): Check if this works correctly for batched\n kenergy = 0.5 * (v.unsqueeze(1) @ (mass_matrix @ v.unsqueeze(2))).squeeze(2)\n return kenergy\n\n def lagrangian(self, q, v):\n \"\"\" Returns the Lagrangian of a mechanical system\n \"\"\"\n kenergy = self.kinetic_energy(q, v)\n pot = self.potential(q)\n lag = kenergy - pot\n return lag\n\n def hamiltonian(self, q, v):\n \"\"\" Returns the Hamiltonian of a mechanical system\n \"\"\"\n kenergy = self.kinetic_energy(q, v)\n pot = self.potential(q)\n ham = kenergy + pot\n return ham\n\n def corriolisforce(self, q, v, mass_matrix=None):\n \"\"\" Computes the corriolis matrix times v\n \"\"\"\n with torch.enable_grad():\n if mass_matrix is None:\n mass_matrix = self.mass_matrix(q)\n\n Mv = mass_matrix @ v.unsqueeze(2)\n\n KE = 0.5 * v.unsqueeze(1) @ Mv\n\n Cv_KE = torch.autograd.grad(KE.sum(), q, retain_graph=True, create_graph=True)[0]\n\n gMv = torch.stack([\n torch.autograd.grad(Mv[:, i].sum(), q, retain_graph=True, create_graph=True)[0]\n for i in range(q.size(1))\n ], dim=1)\n\n Cv = gMv @ v.unsqueeze(2) - Cv_KE.unsqueeze(2)\n\n return Cv\n\n def corriolis(self, q, v, mass_matrix=None):\n \"\"\" Computes the corriolis matrix\n \"\"\"\n with torch.enable_grad():\n if mass_matrix is None:\n mass_matrix = self.mass_matrix(q)\n\n qdim = q.size(1)\n B = mass_matrix.size(0)\n\n mass_matrix = mass_matrix.reshape(-1, qdim, qdim)\n\n # TODO vectorize\n rows = []\n\n for i in range(qdim):\n cols = []\n for j in range(qdim):\n qgrad = torch.autograd.grad(\n torch.sum(mass_matrix[:, i, j]), q, retain_graph=True, create_graph=True)[0]\n cols.append(qgrad)\n\n rows.append(torch.stack(cols, dim=1))\n\n dMijk = torch.stack(rows, dim=1)\n\n corriolis = 0.5 * ((dMijk + dMijk.transpose(2, 3) - dMijk.transpose(1, 3)\n ) @ v.reshape(B, 1, qdim, 1)).squeeze(3)\n return corriolis\n\n def gradpotential(self, q):\n \"\"\" Returns the conservative forces acting on the system\n \"\"\"\n with torch.enable_grad():\n pot = self.potential(q)\n gvec = torch.autograd.grad(torch.sum(pot), q, retain_graph=True, create_graph=True)[0]\n return gvec\n\n def solve_euler_lagrange(self, q, v, u=None):\n \"\"\" Computes `qddot` (generalized acceleration) by solving\n the Euler-Lagrange equation (Eq 7 in the paper)\n \\qddot = M^-1 (F - Cv - G)\n \"\"\"\n with torch.enable_grad():\n with utils.temp_require_grad((q, v)):\n M = self.mass_matrix(q)\n Cv = self.corriolisforce(q, v, M)\n G = self.gradpotential(q)\n\n F = torch.zeros_like(Cv)\n\n if u is not None:\n F = self.generalized_force(q, v, u)\n\n # Solve M \\qddot = F - Cv - G\n qddot = torch.gesv(F - Cv - G.unsqueeze(2), M)[0].squeeze(2)\n return qddot\n\n\nclass LearnedRigidBody(AbstractRigidBody, torch.nn.Module):\n\n def __init__(self, qdim: int, udim: int, thetamask: torch.tensor, mass_matrix=None,\n potential=None, generalized_force=None, hidden_sizes=None):\n \"\"\"\n\n Arguments:\n - `qdim`:\n - `udim`: [int]\n - `thetamask`: [torch.Tensor (1, qdim)] 1 if angle, 0 otherwise\n - `mass_matrix`: [torch.nn.Module]\n - `potential`: [torch.nn.Module]\n - `generalized_force`: [torch.nn.Module]\n - hidden_sizes: [list]\n \"\"\"\n self._qdim = qdim\n self._udim = udim\n\n self._thetamask = thetamask\n\n super().__init__()\n\n if mass_matrix is None:\n mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)\n\n self._mass_matrix = mass_matrix\n\n if potential is None:\n potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)\n\n self._potential = potential\n\n if generalized_force is None:\n generalized_force = GeneralizedForceNet(qdim, udim, hidden_sizes)\n\n self._generalized_force = generalized_force\n\n def mass_matrix(self, q):\n return self._mass_matrix(q)\n\n def potential(self, q):\n return self._potential(q)\n\n def generalized_force(self, q, v, u):\n return self._generalized_force(q, v, u)\n\n @property\n def thetamask(self):\n return self._thetamask\n\n def forward(self, q, v, u=None):\n return self.solve_euler_lagrange(q, v, u)\n"
] | [
[
"torch.sum",
"torch.zeros_like",
"torch.stack",
"torch.enable_grad"
]
] |
ucx-code/ucXception | [
"6b1f4fe4aa53a28e87584d07f540095c20ee50e9"
] | [
"framework/parsers/ucXception_fi_parser.py"
] | [
"import numpy as np\n\ndef map_reg_to_text(reg_code):\n\treg_dict = (\"rip\", \"rsp\", \"rax\", \"rbx\", \"rcx\", \"rdx\", \"cs\", \"ss\", \"eflags\", \"rbp\", \"r8\", \"r9\",\n\t\t\t\t\"r10\", \"r11\", \"r12\", \"r13\", \"r14\", \"r15\", \"rsi\", \"rdi\", \"orig_rax\", \"fs_base\", \"gs_base\",\n\t\t\t\t\"ds\", \"es\", \"fs\", \"gs\")\n\n\treturn reg_dict[reg_code]\n\n\nclass ucXception_fi_parser:\n\t\n\tdef parse(self, inj_time, reg, bit, chosen_thread, stdout, stderr):\n\t\trow = {}\n\n\t\trow[\"inj_time\"] = inj_time\n\t\trow[\"reg\"] = map_reg_to_text(reg)\n\t\trow[\"reg_d\"] = np.int32(reg)\n\t\trow[\"bit\"] = np.int32(bit)\n\t\trow[\"pid\"] = np.int32(chosen_thread)\n\n\t\t# Get the values of old and new registers\n\t\tprefix = \"none\"\n\t\tfor line in stdout.split(\"\\n\")[:-1]:\n\t\t\tif \"Old register values\" in line:\n\t\t\t\tprefix=\"old_\"\n\t\t\telif \"New register values\" in line:\n\t\t\t\tprefix=\"new_\"\n\t\t\telse:\t\n\t\t\t\t(reg_name, reg_val) = line.split(\": \")\n\t\t\t\treg_name = reg_name.rstrip().lower()\n\t\t\t\treg_val = \"0x%s\" % reg_val.rstrip()\n\t\t\t\t#print reg_name, reg_val, type(reg_val)\n\t\t\t\trow[prefix + reg_name] = reg_val\n\t\t\t\t# We also add the register value in decimal\n\t\t\t\trow[prefix + reg_name + \"_d\"] = np.int(reg_val, 16) # np.int64 gives a strange exception--- (numpy bug?)\n\t\treturn row"
] | [
[
"numpy.int32",
"numpy.int"
]
] |
lyuyangh/Cross-Attention-VizWiz-VQA | [
"853bfe480dac5bd1363f60c6b17e25134acdc2fa"
] | [
"demo/predict.py"
] | [
"import datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom utils.flags import FLAGS\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom model.vqa_model import ModelParams, VQAModel\nimport demo.demo_dataset as dataset\nimport demo.visualize as visualize\n\n\nclass Inference:\n def __init__(self):\n self.model = self._load_model()\n self.demo_data = dataset.VQAFeatureDataset()\n\n def _get_answer(self, p, dataloader):\n _m, idx = p.max(1)\n return dataloader.dataset.label2ans[idx.item()]\n\n def _load_model(self):\n data_params = json.load(open(FLAGS.data_params_path))\n model_params = ModelParams(\n add_self_attention=FLAGS.add_self_attention,\n fusion_method=FLAGS.fusion_method,\n question_sequence_length=dataset.MAX_QUES_SEQ_LEN,\n number_of_objects=dataset.NO_OBJECTS,\n word_embedding_dimension=data_params[\"word_feat_dimension\"],\n object_embedding_dimension=data_params[\"image_feat_dimension\"],\n vocabulary_size=data_params[\"vocabulary_size\"],\n num_ans_candidates=data_params[\"number_of_answer_candidiates\"],\n )\n model = VQAModel(\n glove_path=FLAGS.glove_path,\n model_params=model_params,\n hidden_dimension=FLAGS.hidden_dimension,\n ).cuda()\n FLAGS.snapshot_path = (\n \"/home/rachana/Documents/vizwiz/save_folder/self_cross_3/final\"\n )\n model_path = FLAGS.snapshot_path\n print(\"loading %s\" % model_path)\n model_data = torch.load(model_path)\n\n model = nn.DataParallel(model).cuda()\n model.load_state_dict(model_data.get(\"model_state\", model_data))\n model.train(False)\n return model\n\n def get_prediction(self, image_id, question, batch_size=1):\n self.demo_data.set_input(image_id, question)\n demo_data_loader = DataLoader(\n self.demo_data,\n batch_size,\n shuffle=False,\n num_workers=1,\n )\n visual_feature, bboxes, question = iter(demo_data_loader).next()\n visual_feature = Variable(visual_feature).cuda()\n bboxes = Variable(bboxes).cuda()\n question = Variable(question).cuda()\n pred, i_att, q_att = self.model(visual_feature, question)\n answer = self._get_answer(pred.data, demo_data_loader)\n\n return (\n answer,\n i_att,\n q_att,\n bboxes,\n )\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"torch.autograd.Variable",
"torch.load"
]
] |
Janetteeeeeeee/nnUNet | [
"db654c445aa5ced436dbf842d432dbbcdc01f4b5"
] | [
"nnunet/experiment_planning/experiment_planner_baseline_3DUNet.py"
] | [
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport nnunet\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom nnunet.configuration import default_num_threads\nfrom nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer\nfrom nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2\nfrom nnunet.experiment_planning.utils import create_lists_from_splitted_dataset\nfrom nnunet.network_architecture.generic_UNet import Generic_UNet\nfrom nnunet.paths import *\nfrom nnunet.preprocessing.cropping import get_case_identifier_from_npz\nfrom nnunet.training.model_restore import recursive_find_python_class\n\n\nclass ExperimentPlanner(object):\n def __init__(self, folder_with_cropped_data, preprocessed_output_folder):\n self.folder_with_cropped_data = folder_with_cropped_data\n self.preprocessed_output_folder = preprocessed_output_folder\n self.list_of_cropped_npz_files = subfiles(self.folder_with_cropped_data, True, None, \".npz\", True)\n\n self.preprocessor_name = \"GenericPreprocessor\"\n\n assert isfile(join(self.folder_with_cropped_data, \"dataset_properties.pkl\")), \\\n \"folder_with_cropped_data must contain dataset_properties.pkl\"\n self.dataset_properties = load_pickle(join(self.folder_with_cropped_data, \"dataset_properties.pkl\"))\n\n self.plans_per_stage = OrderedDict()\n self.plans = OrderedDict()\n self.plans_fname = join(self.preprocessed_output_folder, \"nnUNetPlans\" + \"fixed_plans_3D.pkl\")\n self.data_identifier = default_data_identifier\n\n self.transpose_forward = [0, 1, 2]\n self.transpose_backward = [0, 1, 2]\n\n self.unet_base_num_features = Generic_UNet.BASE_NUM_FEATURES_3D\n self.unet_max_num_filters = 320\n self.unet_max_numpool = 999\n self.unet_min_batch_size = 2\n self.unet_featuremap_min_edge_length = 4\n\n self.target_spacing_percentile = 50\n self.anisotropy_threshold = 3\n self.how_much_of_a_patient_must_the_network_see_at_stage0 = 4 # 1/4 of a patient\n self.batch_size_covers_max_percent_of_dataset = 0.05 # all samples in the batch together cannot cover more\n # than 5% of the entire dataset\n\n self.conv_per_stage = 2\n\n def get_target_spacing(self):\n spacings = self.dataset_properties['all_spacings']\n\n # target = np.median(np.vstack(spacings), 0)\n # if target spacing is very anisotropic we may want to not downsample the axis with the worst spacing\n # uncomment after mystery task submission\n \"\"\"worst_spacing_axis = np.argmax(target)\n if max(target) > (2.5 * min(target)):\n spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]\n target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 5)\n target[worst_spacing_axis] = target_spacing_of_that_axis\"\"\"\n\n target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)\n return target\n\n def save_my_plans(self):\n with open(self.plans_fname, 'wb') as f:\n pickle.dump(self.plans, f)\n\n def load_my_plans(self):\n self.plans = load_pickle(self.plans_fname)\n\n self.plans_per_stage = self.plans['plans_per_stage']\n self.dataset_properties = self.plans['dataset_properties']\n\n self.transpose_forward = self.plans['transpose_forward']\n self.transpose_backward = self.plans['transpose_backward']\n\n def determine_postprocessing(self):\n pass\n \"\"\"\n Spoiler: This is unused, postprocessing was removed. Ignore it.\n :return:\n print(\"determining postprocessing...\")\n\n props_per_patient = self.dataset_properties['segmentation_props_per_patient']\n\n all_region_keys = [i for k in props_per_patient.keys() for i in props_per_patient[k]['only_one_region'].keys()]\n all_region_keys = list(set(all_region_keys))\n\n only_keep_largest_connected_component = OrderedDict()\n\n for r in all_region_keys:\n all_results = [props_per_patient[k]['only_one_region'][r] for k in props_per_patient.keys()]\n only_keep_largest_connected_component[tuple(r)] = all(all_results)\n\n print(\"Postprocessing: only_keep_largest_connected_component\", only_keep_largest_connected_component)\n\n all_classes = self.dataset_properties['all_classes']\n classes = [i for i in all_classes if i > 0]\n\n props_per_patient = self.dataset_properties['segmentation_props_per_patient']\n\n min_size_per_class = OrderedDict()\n for c in classes:\n all_num_voxels = []\n for k in props_per_patient.keys():\n all_num_voxels.append(props_per_patient[k]['volume_per_class'][c])\n if len(all_num_voxels) > 0:\n min_size_per_class[c] = np.percentile(all_num_voxels, 1) * MIN_SIZE_PER_CLASS_FACTOR\n else:\n min_size_per_class[c] = np.inf\n\n min_region_size_per_class = OrderedDict()\n for c in classes:\n region_sizes = [l for k in props_per_patient for l in props_per_patient[k]['region_volume_per_class'][c]]\n if len(region_sizes) > 0:\n min_region_size_per_class[c] = min(region_sizes)\n # we don't need that line but better safe than sorry, right?\n min_region_size_per_class[c] = min(min_region_size_per_class[c], min_size_per_class[c])\n else:\n min_region_size_per_class[c] = 0\n\n print(\"Postprocessing: min_size_per_class\", min_size_per_class)\n print(\"Postprocessing: min_region_size_per_class\", min_region_size_per_class)\n return only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class\n \"\"\"\n\n def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,\n num_modalities, num_classes):\n \"\"\"\n Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is\n opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that\n for some organ of interest the acquisition method will most likely be chosen such that the field of view and\n voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated\n for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I\n will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to\n try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch)\n\n The patches created here attempt keep the aspect ratio of the new_median_shape\n\n :param current_spacing:\n :param original_spacing:\n :param original_shape:\n :param num_cases:\n :return:\n \"\"\"\n new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)\n dataset_num_voxels = np.prod(new_median_shape) * num_cases\n\n # the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t\n # input_patch_size = new_median_shape\n\n # compute how many voxels are one mm\n input_patch_size = 1 / np.array(current_spacing)\n\n # normalize voxels per mm\n input_patch_size /= input_patch_size.mean()\n\n # create an isotropic patch of size 512x512x512mm\n input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value\n input_patch_size = np.round(input_patch_size).astype(int)\n\n # clip it to the median shape of the dataset because patches larger then that make not much sense\n input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]\n\n network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \\\n shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,\n self.unet_featuremap_min_edge_length,\n self.unet_max_numpool,\n current_spacing)\n\n ref = Generic_UNet.use_this_for_batch_size_computation_3D\n here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,\n self.unet_base_num_features,\n self.unet_max_num_filters, num_modalities,\n num_classes,\n pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)\n while here > ref:\n axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]\n\n tmp = deepcopy(new_shp)\n tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]\n _, _, _, _, shape_must_be_divisible_by_new = \\\n get_pool_and_conv_props_poolLateV2(tmp,\n self.unet_featuremap_min_edge_length,\n self.unet_max_numpool,\n current_spacing)\n new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]\n\n # we have to recompute numpool now:\n network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \\\n shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,\n self.unet_featuremap_min_edge_length,\n self.unet_max_numpool,\n current_spacing)\n\n here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,\n self.unet_base_num_features,\n self.unet_max_num_filters, num_modalities,\n num_classes, pool_op_kernel_sizes,\n conv_per_stage=self.conv_per_stage)\n # print(new_shp)\n\n input_patch_size = new_shp\n\n batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3\n batch_size = int(np.floor(max(ref / here, 1) * batch_size))\n\n # check if batch size is too large\n max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /\n np.prod(input_patch_size, dtype=np.int64)).astype(int)\n max_batch_size = max(max_batch_size, self.unet_min_batch_size)\n batch_size = max(1, min(batch_size, max_batch_size))\n\n do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[\n 0]) > self.anisotropy_threshold\n\n plan = {\n 'batch_size': batch_size,\n 'num_pool_per_axis': network_num_pool_per_axis,\n 'patch_size': input_patch_size,\n 'median_patient_size_in_voxels': new_median_shape,\n 'current_spacing': current_spacing,\n 'original_spacing': original_spacing,\n 'do_dummy_2D_data_aug': do_dummy_2D_data_aug,\n 'pool_op_kernel_sizes': pool_op_kernel_sizes,\n 'conv_kernel_sizes': conv_kernel_sizes,\n }\n return plan\n\n def plan_experiment(self):\n use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()\n print(\"Are we using the nonzero mask for normalizaion?\", use_nonzero_mask_for_normalization)\n spacings = self.dataset_properties['all_spacings']\n sizes = self.dataset_properties['all_sizes']\n\n all_classes = self.dataset_properties['all_classes']\n modalities = self.dataset_properties['modalities']\n num_modalities = len(list(modalities.keys()))\n\n target_spacing = self.get_target_spacing()\n new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]\n\n max_spacing_axis = np.argmax(target_spacing)\n remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]\n self.transpose_forward = [max_spacing_axis] + remaining_axes\n self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]\n\n # we base our calculations on the median shape of the datasets\n median_shape = np.median(np.vstack(new_shapes), 0)\n print(\"the median shape of the dataset is \", median_shape)\n\n max_shape = np.max(np.vstack(new_shapes), 0)\n print(\"the max shape in the dataset is \", max_shape)\n min_shape = np.min(np.vstack(new_shapes), 0)\n print(\"the min shape in the dataset is \", min_shape)\n\n print(\"we don't want feature maps smaller than \", self.unet_featuremap_min_edge_length, \" in the bottleneck\")\n\n # how many stages will the image pyramid have?\n self.plans_per_stage = list()\n\n target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]\n median_shape_transposed = np.array(median_shape)[self.transpose_forward]\n print(\"the transposed median shape of the dataset is \", median_shape_transposed)\n\n print(\"generating configuration for 3d_fullres\")\n self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,\n median_shape_transposed,\n len(self.list_of_cropped_npz_files),\n num_modalities, len(all_classes) + 1))\n\n # thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)\n # if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \\\n # architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:\n architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)\n if np.prod(median_shape) / architecture_input_voxels_here < \\\n self.how_much_of_a_patient_must_the_network_see_at_stage0:\n more = False\n else:\n more = True\n\n if more:\n print(\"generating configuration for 3d_lowres\")\n # if we are doing more than one stage then we want the lowest stage to have exactly\n # HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0 (this is 4 by default so the number of voxels in the\n # median shape of the lowest stage must be 4 times as much as the network can process at once (128x128x128 by\n # default). Problem is that we are downsampling higher resolution axes before we start downsampling the\n # out-of-plane axis. We could probably/maybe do this analytically but I am lazy, so here\n # we do it the dumb way\n\n lowres_stage_spacing = deepcopy(target_spacing)\n num_voxels = np.prod(median_shape, dtype=np.float64)\n while num_voxels > self.how_much_of_a_patient_must_the_network_see_at_stage0 * architecture_input_voxels_here:\n max_spacing = max(lowres_stage_spacing)\n if np.any((max_spacing / lowres_stage_spacing) > 2):\n lowres_stage_spacing[(max_spacing / lowres_stage_spacing) > 2] \\\n *= 1.01\n else:\n lowres_stage_spacing *= 1.01\n num_voxels = np.prod(target_spacing / lowres_stage_spacing * median_shape, dtype=np.float64)\n\n lowres_stage_spacing_transposed = np.array(lowres_stage_spacing)[self.transpose_forward]\n new = self.get_properties_for_stage(lowres_stage_spacing_transposed, target_spacing_transposed,\n median_shape_transposed,\n len(self.list_of_cropped_npz_files),\n num_modalities, len(all_classes) + 1)\n architecture_input_voxels_here = np.prod(new['patch_size'], dtype=np.int64)\n if 2 * np.prod(new['median_patient_size_in_voxels'], dtype=np.int64) < np.prod(\n self.plans_per_stage[0]['median_patient_size_in_voxels'], dtype=np.int64):\n self.plans_per_stage.append(new)\n\n self.plans_per_stage = self.plans_per_stage[::-1]\n self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict\n\n print(self.plans_per_stage)\n print(\"transpose forward\", self.transpose_forward)\n print(\"transpose backward\", self.transpose_backward)\n\n normalization_schemes = self.determine_normalization_scheme()\n only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None\n # removed training data based postprocessing. This is deprecated\n\n # these are independent of the stage\n plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,\n 'modalities': modalities, 'normalization_schemes': normalization_schemes,\n 'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,\n 'original_spacings': spacings, 'original_sizes': sizes,\n 'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),\n 'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,\n 'use_mask_for_norm': use_nonzero_mask_for_normalization,\n 'keep_only_largest_region': only_keep_largest_connected_component,\n 'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,\n 'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,\n 'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,\n 'preprocessor_name': self.preprocessor_name,\n 'conv_per_stage': self.conv_per_stage,\n }\n\n self.plans = plans\n self.save_my_plans()\n\n def determine_normalization_scheme(self):\n schemes = OrderedDict()\n modalities = self.dataset_properties['modalities']\n num_modalities = len(list(modalities.keys()))\n\n for i in range(num_modalities):\n if modalities[i] == \"CT\" or modalities[i] == 'ct':\n schemes[i] = \"CT\"\n elif modalities[i] == 'noNorm':\n schemes[i] = \"noNorm\"\n else:\n schemes[i] = \"nonCT\"\n return schemes\n\n def save_properties_of_cropped(self, case_identifier, properties):\n with open(join(self.folder_with_cropped_data, \"%s.pkl\" % case_identifier), 'wb') as f:\n pickle.dump(properties, f)\n\n def load_properties_of_cropped(self, case_identifier):\n with open(join(self.folder_with_cropped_data, \"%s.pkl\" % case_identifier), 'rb') as f:\n properties = pickle.load(f)\n return properties\n\n def determine_whether_to_use_mask_for_norm(self):\n # only use the nonzero mask for normalization of the cropping based on it resulted in a decrease in\n # image size (this is an indication that the data is something like brats/isles and then we want to\n # normalize in the brain region only)\n modalities = self.dataset_properties['modalities']\n num_modalities = len(list(modalities.keys()))\n use_nonzero_mask_for_norm = OrderedDict()\n\n for i in range(num_modalities):\n if \"CT\" in modalities[i]:\n use_nonzero_mask_for_norm[i] = False\n else:\n all_size_reductions = []\n for k in self.dataset_properties['size_reductions'].keys():\n all_size_reductions.append(self.dataset_properties['size_reductions'][k])\n\n if np.median(all_size_reductions) < 3 / 4.:\n print(\"using nonzero mask for normalization\")\n use_nonzero_mask_for_norm[i] = True\n else:\n print(\"not using nonzero mask for normalization\")\n use_nonzero_mask_for_norm[i] = False\n\n for c in self.list_of_cropped_npz_files:\n case_identifier = get_case_identifier_from_npz(c)\n properties = self.load_properties_of_cropped(case_identifier)\n properties['use_nonzero_mask_for_norm'] = use_nonzero_mask_for_norm\n self.save_properties_of_cropped(case_identifier, properties)\n use_nonzero_mask_for_normalization = use_nonzero_mask_for_norm\n return use_nonzero_mask_for_normalization\n\n def write_normalization_scheme_to_patients(self):\n \"\"\"\n This is used for test set preprocessing\n :return: \n \"\"\"\n for c in self.list_of_cropped_npz_files:\n case_identifier = get_case_identifier_from_npz(c)\n properties = self.load_properties_of_cropped(case_identifier)\n properties['use_nonzero_mask_for_norm'] = self.plans['use_mask_for_norm']\n self.save_properties_of_cropped(case_identifier, properties)\n\n def run_preprocessing(self, num_threads):\n if os.path.isdir(join(self.preprocessed_output_folder, \"gt_segmentations\")):\n shutil.rmtree(join(self.preprocessed_output_folder, \"gt_segmentations\"))\n shutil.copytree(join(self.folder_with_cropped_data, \"gt_segmentations\"),\n join(self.preprocessed_output_folder, \"gt_segmentations\"))\n normalization_schemes = self.plans['normalization_schemes']\n use_nonzero_mask_for_normalization = self.plans['use_mask_for_norm']\n intensityproperties = self.plans['dataset_properties']['intensityproperties']\n preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], \"preprocessing\")],\n self.preprocessor_name, current_module=\"nnunet.preprocessing\")\n assert preprocessor_class is not None\n preprocessor = preprocessor_class(normalization_schemes, use_nonzero_mask_for_normalization,\n self.transpose_forward,\n intensityproperties)\n target_spacings = [i[\"current_spacing\"] for i in self.plans_per_stage.values()]\n if self.plans['num_stages'] > 1 and not isinstance(num_threads, (list, tuple)):\n num_threads = (default_num_threads, num_threads)\n elif self.plans['num_stages'] == 1 and isinstance(num_threads, (list, tuple)):\n num_threads = num_threads[-1]\n preprocessor.run(target_spacings, self.folder_with_cropped_data, self.preprocessed_output_folder,\n self.plans['data_identifier'], num_threads)\n\ndef maybe_mkdir_p(directory):\n directory = os.path.abspath(directory)\n splits = directory.split(\"\\\\\")[1:]\n base = directory.split('\\\\')[0]\n for i in range(0, len(splits)):\n if not os.path.isdir(join(base, join(\"\\\\\", *splits[:i+1]))):\n try:\n os.mkdir(join(base, join(\"\\\\\", *splits[:i+1])))\n except FileExistsError:\n # this can sometimes happen when two jobs try to create the same directory at the same time,\n # especially on network drives.\n print(\"WARNING: Folder %s already existed and does not need to be created\" % directory)\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--task_ids\", nargs=\"+\", help=\"list of int\")\n parser.add_argument(\"-p\", action=\"store_true\", help=\"set this if you actually want to run the preprocessing. If \"\n \"this is not set then this script will only create the plans file\")\n parser.add_argument(\"-tl\", type=int, required=False, default=8, help=\"num_threads_lowres\")\n parser.add_argument(\"-tf\", type=int, required=False, default=8, help=\"num_threads_fullres\")\n\n args = parser.parse_args()\n task_ids = args.task_ids\n run_preprocessing = args.p\n tl = args.tl\n tf = args.tf\n\n tasks = []\n for i in task_ids:\n i = int(i)\n candidates = subdirs(nnUNet_cropped_data, prefix=\"Task%03.0d\" % i, join=False)\n assert len(candidates) == 1\n tasks.append(candidates[0])\n\n for t in tasks:\n try:\n print(\"\\n\\n\\n\", t)\n cropped_out_dir = os.path.join(nnUNet_cropped_data, t)\n preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)\n splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)\n lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)\n\n dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False)\n _ = dataset_analyzer.analyze_dataset() # this will write output files that will be used by the ExperimentPlanner\n\n maybe_mkdir_p(preprocessing_output_dir_this_task)\n shutil.copy(join(cropped_out_dir, \"dataset_properties.pkl\"), preprocessing_output_dir_this_task)\n shutil.copy(join(nnUNet_raw_data, t, \"dataset.json\"), preprocessing_output_dir_this_task)\n\n threads = (tl, tf)\n\n print(\"number of threads: \", threads, \"\\n\")\n\n exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task)\n exp_planner.plan_experiment()\n if run_preprocessing:\n exp_planner.run_preprocessing(threads)\n except Exception as e:\n print(e)\n"
] | [
[
"numpy.vstack",
"numpy.any",
"numpy.argsort",
"numpy.median",
"numpy.argmax",
"numpy.prod",
"numpy.round",
"numpy.array"
]
] |
echasnovski/randomvars | [
"15417b0e3ecd27f185b70471102c158f60d51c28"
] | [
"randomvars/tests/test__continuous.py"
] | [
"# pylint: disable=missing-function-docstring\n\"\"\"Tests for '_continuous.py' file\"\"\"\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nimport scipy.stats.distributions as distrs\nfrom scipy.stats.kde import gaussian_kde\nfrom scipy.integrate import quad\nimport pytest\n\nfrom randomvars._continuous import Cont\nfrom randomvars.tests.commontests import (\n DECIMAL,\n _test_equal_rand,\n _test_equal_seq,\n _test_from_rv_rand,\n _test_from_sample_rand,\n _test_input_coercion,\n _test_log_fun,\n _test_one_value_input,\n _test_rvs_method,\n declass,\n h,\n)\nfrom randomvars.options import config\n\n\nDISTRIBUTIONS_COMMON = {\n \"beta\": distrs.beta(a=10, b=20),\n \"chi_sq\": distrs.chi2(df=10),\n \"expon\": distrs.expon(),\n \"f\": distrs.f(dfn=20, dfd=20),\n \"gamma\": distrs.gamma(a=10),\n \"laplace\": distrs.laplace(),\n \"lognorm\": distrs.lognorm(s=0.5),\n \"norm\": distrs.norm(),\n \"norm2\": distrs.norm(loc=10),\n \"norm3\": distrs.norm(scale=0.1),\n \"norm4\": distrs.norm(scale=10),\n \"norm5\": distrs.norm(loc=10, scale=0.1),\n \"t\": distrs.t(df=10),\n \"uniform\": distrs.uniform(),\n \"uniform2\": distrs.uniform(loc=10, scale=0.1),\n \"weibull_max\": distrs.weibull_max(c=2),\n \"weibull_min\": distrs.weibull_min(c=2),\n}\n\nDISTRIBUTIONS_INF_DENSITY = {\n \"inf_beta_both\": distrs.beta(a=0.4, b=0.6),\n \"inf_beta_left\": distrs.beta(a=0.5, b=2),\n \"inf_beta_right\": distrs.beta(a=2, b=0.5),\n \"inf_chi_sq\": distrs.chi2(df=1),\n \"inf_weibull_max\": distrs.weibull_max(c=0.5),\n \"inf_weibull_min\": distrs.weibull_min(c=0.5),\n}\n\nDISTRIBUTIONS_HEAVY_TAILS = {\n \"heavy_cauchy\": distrs.cauchy(),\n \"heavy_lognorm\": distrs.lognorm(s=1),\n \"heavy_t\": distrs.t(df=2),\n}\n\nDISTRIBUTIONS = {\n **DISTRIBUTIONS_COMMON,\n **DISTRIBUTIONS_HEAVY_TAILS,\n **DISTRIBUTIONS_INF_DENSITY,\n}\n\n\ndef augment_grid(x, n_inner_points):\n test_arr = [\n np.linspace(x[i], x[i + 1], n_inner_points + 1, endpoint=False)\n for i in np.arange(len(x) - 1)\n ]\n test_arr.append([x[-1]])\n return np.concatenate(test_arr)\n\n\ndef from_sample_cdf_max_error(x):\n rv = Cont.from_sample(x)\n density = config.estimator_cont(x)\n\n x_grid = augment_grid(rv.x, 10)\n\n # Efficient way of computing `quad(density, -np.inf, x_grid)`\n x_grid_ext = np.concatenate([[-np.inf], x_grid])\n cdf_intervals = np.array(\n [\n quad(density, x_l, x_r)[0]\n for x_l, x_r in zip(x_grid_ext[:-1], x_grid_ext[1:])\n ]\n )\n cdf_grid = np.cumsum(cdf_intervals)\n\n err = cdf_grid - rv.cdf(x_grid)\n return np.max(np.abs(err))\n\n\ndef circle_fun(x, low, high):\n x = np.array(x)\n center = 0.5 * (high + low)\n radius = 0.5 * (high - low)\n\n res = np.zeros_like(x)\n\n center_dist = np.abs(x - center)\n is_in = center_dist <= radius\n res[is_in] = np.sqrt(radius ** 2 - center_dist[is_in] ** 2)\n\n return res\n\n\ndef make_circ_density(intervals):\n \"\"\"Construct circular density\n\n Density looks like half-circles with diameters lying in elements of\n `intervals`. Total integral is equal to 1.\n\n Parameters\n ----------\n intervals : iterable with elements being 2-element iterables\n Iterable of intervals with non-zero density.\n\n Returns\n -------\n density : callable\n Function which returns density values.\n \"\"\"\n\n def density(x):\n res = np.zeros_like(x)\n tot_integral = 0\n for low, high in intervals:\n res += circle_fun(x, low, high)\n # There is only half of circle\n tot_integral += np.pi * (high - low) ** 2 / 8\n\n return res / tot_integral\n\n return density\n\n\nclass TestCont:\n \"\"\"Regression tests for `Cont` class\"\"\"\n\n def test_init_errors(self):\n def check_one_input(def_args, var):\n with pytest.raises(TypeError, match=f\"`{var}`.*numpy array\"):\n def_args[var] = {\"a\": None}\n Cont(**def_args)\n with pytest.raises(TypeError, match=f\"`{var}`.*float\"):\n def_args[var] = [\"a\", \"a\"]\n Cont(**def_args)\n with pytest.raises(TypeError, match=f\"`{var}`.*finite values\"):\n def_args[var] = [0, np.nan]\n Cont(**def_args)\n with pytest.raises(TypeError, match=f\"`{var}`.*finite values\"):\n def_args[var] = [0, np.inf]\n Cont(**def_args)\n with pytest.raises(ValueError, match=f\"`{var}`.*1d array\"):\n def_args[var] = [[0, 1]]\n Cont(**def_args)\n\n check_one_input({\"y\": [1, 1]}, \"x\")\n check_one_input({\"x\": [0, 1]}, \"y\")\n\n with pytest.raises(ValueError, match=\"[Ll]engths.*match\"):\n Cont([0, 1], [1, 1, 1])\n\n with pytest.raises(ValueError, match=\"two\"):\n Cont([1], [1])\n\n with pytest.warns(UserWarning, match=\"`x`.*not sorted.*`x` and `y`\"):\n rv = Cont([1, 0], [0, 2])\n rv_ref = Cont([0, 1], [2, 0])\n _test_equal_rand(rv, rv_ref)\n\n with pytest.raises(ValueError, match=\"`y`.*negative\"):\n Cont([0, 1], [1, -1])\n\n with pytest.raises(ValueError, match=\"`y`.*no positive\"):\n Cont([0, 1], [0, 0])\n\n def test_init(self):\n x_ref = np.array([0, 1, 2])\n y_ref = np.array([0, 1, 0])\n rv_ref = Cont(x_ref, y_ref)\n\n # Simple case with non-numpy input\n rv_1 = Cont(x=x_ref.tolist(), y=y_ref.tolist())\n _test_equal_rand(rv_1, rv_ref)\n\n # Check if `y` is normalized\n rv_2 = Cont(x=x_ref, y=10 * y_ref)\n _test_equal_rand(rv_2, rv_ref)\n\n # Check if `x` and `y` are rearranged if not sorted\n with pytest.warns(UserWarning, match=\"`x`.*not sorted\"):\n rv_3 = Cont(x=x_ref[[1, 0, 2]], y=y_ref[[1, 0, 2]])\n _test_equal_rand(rv_3, rv_ref)\n\n # Check if duplicated values are removed from `x`\n with pytest.warns(UserWarning, match=\"duplicated\"):\n # First pair of xy-grid is taken among duplicates\n rv_4 = Cont(x=x_ref[[0, 1, 1, 2]], y=y_ref[[0, 1, 2, 2]])\n _test_equal_rand(rv_4, rv_ref)\n\n def test_str(self):\n rv = Cont([0, 2, 4], [0, 1, 0])\n assert str(rv) == \"Continuous RV with 2 intervals (support: [0.0, 4.0])\"\n\n # Uses singular noun with one interval\n rv = Cont([0, 1], [1, 1])\n assert str(rv) == \"Continuous RV with 1 interval (support: [0.0, 1.0])\"\n\n def test_properties(self):\n x = np.arange(11)\n y = np.repeat(0.1, 11)\n rv = Cont(x, y)\n\n assert list(rv.params.keys()) == [\"x\", \"y\"]\n assert_array_equal(rv.params[\"x\"], x)\n assert_array_equal(rv.params[\"y\"], y)\n\n assert_array_equal(rv.x, x)\n assert_array_equal(rv.y, y)\n assert rv.a == 0.0\n assert rv.b == 10.0\n\n def test_support(self):\n rv = Cont([0.5, 1.5, 4.5], [0, 0.5, 0])\n assert rv.support() == (0.5, 4.5)\n\n def test_compress(self):\n # Zero tails\n ## Left tail\n _test_equal_rand(\n Cont([0, 1, 2, 3], [0, 0, 0, 2]).compress(), Cont([2, 3], [0, 2])\n )\n _test_equal_rand(\n Cont([0, 1, 2, 3], [0, 0, 1, 0]).compress(), Cont([1, 2, 3], [0, 1, 0])\n )\n\n ## Right tail\n _test_equal_rand(\n Cont([0, 1, 2, 3], [2, 0, 0, 0]).compress(), Cont([0, 1], [2, 0])\n )\n _test_equal_rand(\n Cont([0, 1, 2, 3], [0, 1, 0, 0]).compress(), Cont([0, 1, 2], [0, 1, 0])\n )\n\n ## Both tails\n _test_equal_rand(\n Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0]).compress(),\n Cont([1, 2, 3], [0, 1, 0]),\n )\n\n # Extra linearity\n ## Non-zero slope\n _test_equal_rand(\n Cont([0, 1, 2, 3, 4], [0.5, 0.25, 0, 0.25, 0.5]).compress(),\n Cont([0, 2, 4], [0.5, 0, 0.5]),\n )\n\n ## Zero slope, non-zero y\n _test_equal_rand(\n Cont([0, 1, 2], [0.5, 0.5, 0.5]).compress(), Cont([0, 2], [0.5, 0.5])\n )\n\n ## Zero slope, zero y, outside of tails\n _test_equal_rand(\n Cont([0, 1, 2, 3, 4], [1, 0, 0, 0, 1]).compress(),\n Cont([0, 1, 3, 4], [1, 0, 0, 1]),\n )\n\n # All features\n _test_equal_rand(\n Cont(np.arange(14), [0, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 1, 0, 0]).compress(),\n Cont([2, 4, 6, 8, 10, 11, 12], [0, 2, 2, 0, 0, 1, 0]),\n )\n\n # If nothing to compress, self should be returned\n rv = Cont([0, 1], [1, 1])\n assert rv.compress() is rv\n\n def test_ground(self):\n w = config.small_width\n\n # Basic usage\n rv = Cont([0, 1], [1, 1])\n _test_equal_rand(\n rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])\n )\n\n # Argument `direction`\n _test_equal_rand(\n rv.ground(direction=\"both\"),\n Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0]),\n )\n _test_equal_rand(\n rv.ground(direction=\"left\"), Cont([-w, 0, w, 1], [0, 0.5, 1, 1])\n )\n _test_equal_rand(\n rv.ground(direction=\"right\"), Cont([0, 1 - w, 1, 1 + w], [1, 1, 0.5, 0])\n )\n _test_equal_rand(rv.ground(direction=\"none\"), rv)\n\n # Argument `w`\n w2 = 0.1\n _test_equal_rand(\n rv.ground(w=w2, direction=\"both\"),\n Cont([-w2, 0, w2, 1 - w2, 1, 1 + w2], [0, 0.5, 1, 1, 0.5, 0]),\n )\n\n # Close neighbors\n rv2 = Cont([0, 0.25 * w, 0.5, 1 - 0.1 * w, 1], [1, 1, 1, 1, 1])\n rv2_grounded = rv2.ground(direction=\"both\")\n ## Check that only outer points were added\n assert_array_equal(rv2_grounded.x[1:-1], rv2.x)\n ## Check that grounded actually happend\n assert_array_equal(rv2_grounded.y[[0, -1]], 0.0)\n ## Check that non-edge x-values havae same y-values\n assert_array_equal(rv2_grounded.pdf(rv2.x[1:-1]), rv2.pdf(rv2.x[1:-1]))\n\n def test_ground_options(self):\n rv = Cont([0, 1], [1, 1])\n with config.context({\"small_width\": 0.1}):\n w = config.small_width\n _test_equal_rand(\n rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])\n )\n\n def test_ground_errors(self):\n rv = Cont([0, 1], [1, 1])\n with pytest.raises(ValueError, match=\"one of\"):\n rv.ground(direction=\"aaa\")\n\n def test__coeffs_by_ind(self):\n # All coefficients are returned if no `ind` is specified\n rv = Cont([0, 1, 2], [0, 1, 0])\n inter, slope = rv._coeffs_by_ind()\n assert_array_equal(inter, [0, 2])\n assert_array_equal(slope, [1, -1])\n\n def test__grid_by_ind(self):\n # All grid elements are returned if no `ind` is specified\n rv = Cont([0, 1, 2], [0, 1, 0])\n x_out, y_out, p_out = rv._grid_by_ind()\n x_ref, y_ref = rv.x, rv.y\n assert_array_equal(x_out, x_ref)\n assert_array_equal(y_out, y_ref)\n\n def test_pdf_coeffs(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n x = np.array([-1, 0, 0.5, 1, 1.5, 2, 2.5])\n\n with pytest.raises(ValueError, match=\"one of\"):\n rv.pdf_coeffs(x, side=\"a\")\n\n _test_equal_seq(\n rv.pdf_coeffs(x),\n (np.array([0, 0, 0, 2, 2, 2, 0]), np.array([0, 1, 1, -1, -1, -1, 0])),\n )\n _test_equal_seq(\n rv.pdf_coeffs(x, side=\"left\"),\n (np.array([0, 0, 0, 0, 2, 2, 0]), np.array([0, 1, 1, 1, -1, -1, 0])),\n )\n _test_equal_seq(\n rv.pdf_coeffs(np.array([-np.inf, np.nan, np.inf])),\n (np.array([0, np.nan, 0]), np.array([0, np.nan, 0])),\n )\n\n def test_from_rv_basic(self):\n uniform = distrs.uniform\n norm = distrs.norm\n\n # Basic usage\n rv_unif = Cont.from_rv(uniform)\n rv_unif_test = Cont(x=[0, 1], y=[1, 1])\n _test_equal_rand(rv_unif, rv_unif_test, decimal=DECIMAL)\n\n # Objects of `Rand` class should be `convert()`ed\n _test_from_rv_rand(cls=Cont, to_class=\"Cont\")\n\n # Forced support edges\n rv_right = Cont.from_rv(uniform, supp=(0.5, None))\n rv_right_test = Cont([0.5, 1], [2, 2])\n _test_equal_rand(rv_right, rv_right_test, decimal=DECIMAL)\n\n rv_left = Cont.from_rv(uniform, supp=(None, 0.5))\n rv_left_test = Cont([0, 0.5], [2, 2])\n _test_equal_rand(rv_left, rv_left_test, decimal=DECIMAL)\n\n rv_mid = Cont.from_rv(uniform, supp=(0.25, 0.75))\n rv_mid_test = Cont([0.25, 0.75], [2, 2])\n _test_equal_rand(rv_mid, rv_mid_test, decimal=DECIMAL)\n\n def test_from_rv_errors(self):\n # Absence of either `cdf` or `ppf` method should result intro error\n class Tmp:\n pass\n\n tmp1 = Tmp()\n tmp1.ppf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)\n with pytest.raises(ValueError, match=\"cdf\"):\n Cont.from_rv(tmp1)\n\n tmp2 = Tmp()\n tmp2.cdf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)\n with pytest.raises(ValueError, match=\"ppf\"):\n Cont.from_rv(tmp2)\n\n def test_from_rv_options(self):\n norm = distrs.norm\n\n # Finite support detection and usage of `small_prob` option\n with config.context({\"small_prob\": 1e-6}):\n rv_norm = Cont.from_rv(norm)\n assert_array_almost_equal(\n rv_norm.support(), norm.ppf([1e-6, 1 - 1e-6]), decimal=DECIMAL\n )\n\n with config.context({\"small_prob\": 1e-6}):\n rv_norm_right = Cont.from_rv(norm, supp=(-1, None))\n assert_array_almost_equal(\n rv_norm_right.support(), [-1, norm.ppf(1 - 1e-6)], decimal=DECIMAL\n )\n\n with config.context({\"small_prob\": 1e-6}):\n rv_norm_left = Cont.from_rv(norm, supp=(None, 1))\n assert_array_almost_equal(\n rv_norm_left.support(), [norm.ppf(1e-6), 1], decimal=DECIMAL\n )\n\n # Usage of `n_grid` option\n with config.context({\"n_grid\": 11}):\n rv_norm_small = Cont.from_rv(norm)\n assert len(rv_norm_small.x) <= 20\n\n # Usage of `cdf_tolerance` option\n with config.context({\"cdf_tolerance\": 1e-4}):\n rv_norm_1 = Cont.from_rv(norm)\n with config.context({\"cdf_tolerance\": 1e-1}):\n rv_norm_2 = Cont.from_rv(norm)\n ## Increasing CDF tolerance should lead to decrease of density grid\n assert len(rv_norm_1.x) > len(rv_norm_2.x)\n\n def test_from_sample_basic(self):\n norm = distrs.norm()\n\n rng = np.random.default_rng(101)\n x = norm.rvs(100, random_state=rng)\n rv = Cont.from_sample(x)\n assert isinstance(rv, Cont)\n\n def test_from_sample_errors(self):\n with pytest.raises(TypeError, match=\"numpy array with float\"):\n Cont.from_sample([\"a\"])\n\n with pytest.raises(ValueError, match=\"1d\"):\n Cont.from_sample([[1], [2]])\n\n def test_from_sample_options(self):\n norm = distrs.norm()\n\n rng = np.random.default_rng(101)\n x = norm.rvs(100, random_state=rng)\n\n # \"estimator_cont\"\n def uniform_estimator(x):\n x_min, x_max = x.min(), x.max()\n\n def res(x):\n return np.where((x >= x_min) & (x <= x_max), 1 / (x_max - x_min), 0)\n\n return res\n\n with config.context({\"estimator_cont\": uniform_estimator}):\n rv = Cont.from_sample(x)\n assert len(rv.y) == 2\n assert np.allclose(rv.y, rv.y[0], atol=1e-13)\n\n # \"estimator_cont\" which returns allowed classes\n ## `Rand` class should be forwarded to `from_rv()` method\n _test_from_sample_rand(\n cls=Cont,\n sample=x,\n estimator_option=\"estimator_cont\",\n )\n\n ## \"Scipy\" distribution should be forwarded to `Cont.from_rv()`\n rv_norm = distrs.norm()\n with config.context({\"estimator_cont\": lambda x: rv_norm}):\n rv = Cont.from_sample(np.asarray([0, 1, 2]))\n rv_ref = Cont.from_rv(rv_norm)\n _test_equal_rand(rv, rv_ref)\n\n # \"density_mincoverage\"\n with config.context({\"density_mincoverage\": 0.0}):\n rv = Cont.from_sample(x)\n ## With minimal density mincoverage output range should be equal to\n ## sample range\n assert_array_equal(rv.x[[0, -1]], [x.min(), x.max()])\n\n # \"n_grid\"\n with config.context({\"n_grid\": 11}):\n rv = Cont.from_sample(x)\n assert len(rv.x) <= 22\n\n # \"cdf_tolerance\"\n with config.context({\"cdf_tolerance\": 2.0}):\n rv = Cont.from_sample(x)\n ## With very high CDF tolerance downgridding should result into grid\n ## with three elements. That is because CDF is approximated with\n ## simplest quadratic spline with single segment. That requires three\n ## knots.\n assert len(rv.x) == 3\n\n @pytest.mark.slow\n def test_from_sample_single_value(self):\n \"\"\"How well `from_sample()` handles single unique value in sample\n\n Main problem here is how density range is initialized during estimation.\n \"\"\"\n\n zero_vec = np.zeros(10)\n\n # Default density estimator can't handle situation with single unique\n # sample value (gives `LinAlgError: singular matrix`).\n\n # Case when sample width is zero but density is not zero\n density_centered_interval = make_circ_density([(-1, 1)])\n with config.context({\"estimator_cont\": lambda x: density_centered_interval}):\n assert from_sample_cdf_max_error(zero_vec) <= 1e-4\n\n # Case when both sample width and density are zero\n density_shifted_interval = make_circ_density([(10, 20)])\n with config.context({\"estimator_cont\": lambda x: density_shifted_interval}):\n # Here currently the problem is that support is estimated way to\n # wide with very small (~1e-9) non-zero density outside of [10,\n # 20]. However, CDFs are still close.\n assert from_sample_cdf_max_error(zero_vec) <= 2e-4\n\n def test_pdf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n\n # Regular checks\n x = np.array([-1, 0, 0.5, 1, 2, 3, 4])\n assert_array_equal(rv.pdf(x), np.array([0, 0.5, 0.5, 0.5, 0.25, 0, 0]))\n\n # Coercion of not ndarray input\n _test_input_coercion(rv.pdf, x)\n\n # Input around edges\n x = np.array([0 - 1e-10, 0 + 1e-10, 3 - 1e-10, 3 + 1e-10])\n assert_array_almost_equal(\n rv.pdf(x), np.array([0, 0.5, 0.25e-10, 0]), decimal=DECIMAL\n )\n\n # Bad input\n x = np.array([-np.inf, np.nan, np.inf])\n assert_array_equal(rv.pdf(x), np.array([0, np.nan, 0]))\n\n # Dirac-like random variable\n rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])\n x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])\n ## Accuracy is of order of 10 due to extreme magnitudes of values\n assert_array_almost_equal(\n rv_dirac.pdf(x), np.array([0, 0.5e8, 1e8, 0.5e8, 0]), decimal=-1\n )\n\n # Broadcasting\n x = np.array([[-1, 0.5], [2, 4]])\n assert_array_equal(rv.pdf(x), np.array([[0.0, 0.5], [0.25, 0.0]]))\n\n # One value input\n _test_one_value_input(rv.pdf, 0.5)\n _test_one_value_input(rv.pdf, -1)\n _test_one_value_input(rv.pdf, np.nan)\n\n def test_logpdf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n _test_log_fun(rv.logpdf, rv.pdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])\n\n def test_pmf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n with pytest.raises(AttributeError, match=r\"Use `pdf\\(\\)`\"):\n rv.pmf(0)\n\n def test_logpmf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n with pytest.raises(AttributeError, match=r\"Use `logpdf\\(\\)`\"):\n rv.logpmf(0)\n\n def test_cdf(self):\n rv_1 = Cont([0, 1, 2], [0, 1, 0])\n\n # Regular checks\n x = np.array([-1, 0, 0.5, 1, 1.5, 2, 3])\n assert_array_equal(rv_1.cdf(x), np.array([0, 0, 0.125, 0.5, 0.875, 1, 1]))\n\n # Coercion of not ndarray input\n _test_input_coercion(rv_1.cdf, x)\n\n # Bad input\n x = np.array([-np.inf, np.nan, np.inf])\n assert_array_equal(rv_1.cdf(x), np.array([0, np.nan, 1]))\n\n # Dirac-like random variable\n rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])\n x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])\n assert_array_almost_equal(\n rv_dirac.cdf(x), np.array([0, 0.125, 0.5, 0.875, 1]), decimal=DECIMAL\n )\n\n # Broadcasting\n x = np.array([[-1, 0.5], [2, 4]])\n assert_array_equal(rv_1.cdf(x), np.array([[0.0, 0.125], [1.0, 1.0]]))\n\n # One value input\n _test_one_value_input(rv_1.cdf, 0.5)\n _test_one_value_input(rv_1.cdf, -1)\n _test_one_value_input(rv_1.cdf, np.nan)\n\n def test_logcdf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n _test_log_fun(rv.logcdf, rv.cdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])\n\n def test_sf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n x_ref = [-1, 0.1, 3, np.inf, np.nan]\n assert_array_equal(rv.sf(x_ref), 1 - rv.cdf(x_ref))\n\n def test_logsf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n _test_log_fun(rv.logsf, rv.sf, x_ref=[-1, 0.1, 3, np.inf, np.nan])\n\n def test_ppf(self):\n # `ppf()` method should be inverse to `cdf()` for every sensible input\n rv_1 = Cont([0, 1, 2], [0, 1, 0])\n\n # Regular checks\n q = np.array([0, 0.125, 0.5, 0.875, 1])\n assert_array_equal(rv_1.ppf(q), np.array([0, 0.5, 1, 1.5, 2]))\n\n # Coercion of not ndarray input\n _test_input_coercion(rv_1.ppf, q)\n\n # Bad input\n q = np.array([-np.inf, -h, np.nan, 1 + h, np.inf])\n assert_array_equal(\n rv_1.ppf(q), np.array([np.nan, np.nan, np.nan, np.nan, np.nan])\n )\n\n # Dirac-like random variable\n rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])\n q = np.array([0, 0.125, 0.5, 0.875, 1])\n assert_array_almost_equal(\n rv_dirac.ppf(q),\n np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h]),\n decimal=DECIMAL,\n )\n\n # Broadcasting\n q = np.array([[0, 0.5], [0.0, 1.0]])\n assert_array_equal(rv_1.ppf(q), np.array([[0.0, 1.0], [0.0, 2.0]]))\n\n # One value input\n _test_one_value_input(rv_1.ppf, 0.25)\n _test_one_value_input(rv_1.ppf, -1)\n _test_one_value_input(rv_1.ppf, np.nan)\n\n # Should return the smallest x-value in case of zero-density interval(s)\n rv_zero_density = Cont([0, 1, 2, 3, 4, 5, 6], [0, 0.5, 0, 0, 0, 0.5, 0])\n assert rv_zero_density.ppf(0.5) == 2\n\n def test_isf(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n\n # Regular checks\n q_ref = np.array([0, 0.125, 0.5, 0.875, 1])\n assert_array_equal(rv.sf(rv.isf(q_ref)), q_ref)\n\n def test_rvs(self):\n rv_1 = Cont([0, 1, 2], [0, 1, 0])\n\n _test_rvs_method(rv_1)\n\n def test__cdf_spline(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n x = [-10, 0, 0.5, 1, 1.5, 2, 10]\n assert_array_equal(rv._cdf_spline(x), rv.cdf(x))\n\n def test_integrate_cdf(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n assert np.allclose(rv.integrate_cdf(-10, 10), quad(rv.cdf, -10, 10)[0])\n\n def test_convert(self):\n import randomvars._boolean as bool\n import randomvars._discrete as disc\n import randomvars._mixture as mixt\n\n rv = Cont([0, 1, 2], [0, 1, 0])\n\n # By default and supplying `None` should return self\n assert rv.convert() is rv\n assert rv.convert(None) is rv\n\n # Converting to Bool should result into boolean with probability of\n # `False` being 0 (because probability of continuous RV being exactly\n # zero is 0).\n out_bool = rv.convert(\"Bool\")\n assert isinstance(out_bool, bool.Bool)\n assert out_bool.prob_true == 1.0\n\n # Converting to own class should return self\n out_cont = rv.convert(\"Cont\")\n assert out_cont is rv\n\n # Converting to Disc should result into discrete RV with the same `x`\n # values as in input's xy-grid\n out_disc = rv.convert(\"Disc\")\n assert isinstance(out_disc, disc.Disc)\n assert_array_equal(out_disc.x, rv.x)\n\n # Converting to Mixt should result into degenerate mixture with only\n # continuous component\n out_mixt = rv.convert(\"Mixt\")\n assert isinstance(out_mixt, mixt.Mixt)\n assert out_mixt.cont is rv\n assert out_mixt.weight_cont == 1.0\n\n # Any other target class should result into error\n with pytest.raises(ValueError, match=\"one of\"):\n rv.convert(\"aaa\")\n\n\nclass TestFromRVAccuracy:\n \"\"\"Accuracy of `Cont.from_rv()`\"\"\"\n\n # Output of `from_rv()` should have CDF that differs from original CDF by\n # no more than `thres`\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"distr_dict,thres\",\n [\n (DISTRIBUTIONS_COMMON, 1e-4),\n (DISTRIBUTIONS_INF_DENSITY, 1e-3),\n (DISTRIBUTIONS_HEAVY_TAILS, 5e-3),\n ],\n )\n def test_cdf_maxerror(self, distr_dict, thres):\n test_passed = {\n name: TestFromRVAccuracy.from_rv_cdf_maxerror(distr) <= thres\n for name, distr in distr_dict.items()\n }\n\n assert all(test_passed.values())\n\n def test_detected_support(self):\n \"\"\"Test correct trimming of zero tails\"\"\"\n rv_ref = Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0])\n rv_out = Cont.from_rv(declass(rv_ref))\n _test_equal_rand(rv_out, rv_ref.compress(), decimal=4)\n\n @staticmethod\n def from_rv_cdf_maxerror(rv_base, n_inner_points=10, **kwargs):\n rv_test = Cont.from_rv(rv_base, **kwargs)\n x_grid = augment_grid(rv_test.x, n_inner_points)\n err = rv_base.cdf(x_grid) - rv_test.cdf(x_grid)\n return np.max(np.abs(err))\n\n\nclass TestFromSampleAccuracy:\n \"\"\"Accuracy of `Cont.from_sample()`\"\"\"\n\n # Output of `from_sample()` should differ from original density estimate by\n # no more than `thres` (with default density estimator)\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"distr_dict,thres\",\n [\n (DISTRIBUTIONS_COMMON, 1e-4),\n (DISTRIBUTIONS_INF_DENSITY, 1.5e-4),\n (DISTRIBUTIONS_HEAVY_TAILS, 1e-4),\n ],\n )\n def test_close_cdf(self, distr_dict, thres):\n rng = np.random.default_rng(101)\n test_passed = {\n name: TestFromSampleAccuracy.simulated_cdf_error(distr, rng) <= thres\n for name, distr in distr_dict.items()\n }\n\n assert all(test_passed.values())\n\n @pytest.mark.slow\n def test_density_range(self):\n density_mincoverage = config.density_mincoverage\n estimator_cont = config.estimator_cont\n rng = np.random.default_rng(101)\n\n def generate_density_coverage(distr):\n x = distr.rvs(size=100, random_state=rng)\n density = estimator_cont(x)\n rv = Cont.from_sample(x)\n return quad(density, rv.x[0], rv.x[-1])[0]\n\n test_passed = {\n distr_name: generate_density_coverage(distr) >= density_mincoverage\n for distr_name, distr in DISTRIBUTIONS.items()\n }\n\n assert all(test_passed.values())\n\n @staticmethod\n def simulated_cdf_error(distr, rng):\n x = distr.rvs(size=100, random_state=rng)\n\n # Testing with `gaussian_kde` as the most used density estimator. This\n # also enables to use rather fast way of computing CDF of estimated\n # density via `integrate_box_1d` method.\n with config.context({\"estimator_cont\": gaussian_kde}):\n rv = Cont.from_sample(x)\n density = config.estimator_cont(x)\n\n x_grid = augment_grid(rv.x, 10)\n\n # Interestingly enough, direct computation with `-np.inf` as left\n # integration limit is both accurate and more efficient than computing\n # integrals for each segment and then use `np.cumsum()`. Probably this\n # is because integration of gaussian curves with infinite left limit is\n # done directly through gaussian CDF.\n cdf_grid = np.array(\n [density.integrate_box_1d(-np.inf, cur_x) for cur_x in x_grid]\n )\n\n err = cdf_grid - rv.cdf(x_grid)\n return np.max(np.abs(err))\n\n\ndef test__extend_range():\n def extra_estimator(x):\n x_min, x_max = x.min(), x.max()\n prob_height = 1 / (x_max - x_min + 1)\n\n def res(x):\n return np.where(\n ((x_min < x) & (x < x_max)) | ((x_max + 1 < x) & (x < x_max + 2)),\n prob_height,\n 0,\n )\n\n return res\n\n norm = distrs.norm()\n rng = np.random.default_rng(101)\n x = norm.rvs(100, random_state=rng)\n\n with config.context({\"estimator_cont\": extra_estimator}):\n rv = Cont.from_sample(x)\n\n assert (rv.x[0] <= x.min()) and (rv.x[-1] >= x.max())\n"
] | [
[
"numpy.asarray",
"scipy.stats.distributions.expon",
"scipy.stats.distributions.uniform",
"numpy.allclose",
"scipy.stats.distributions.beta",
"numpy.abs",
"numpy.testing.assert_array_equal",
"numpy.where",
"scipy.stats.distributions.chi2",
"scipy.stats.distributions.f",
"numpy.linspace",
"scipy.stats.distributions.weibull_max",
"numpy.zeros",
"numpy.repeat",
"numpy.arange",
"scipy.stats.distributions.norm",
"numpy.array",
"numpy.zeros_like",
"scipy.integrate.quad",
"numpy.cumsum",
"numpy.random.default_rng",
"scipy.stats.distributions.laplace",
"scipy.stats.distributions.gamma",
"scipy.stats.distributions.weibull_min",
"scipy.stats.distributions.lognorm",
"scipy.stats.distributions.t",
"numpy.sqrt",
"scipy.stats.distributions.cauchy",
"numpy.concatenate"
]
] |
ChenShawn/Grad-Paper-Experiments | [
"00fe1142dae4077b197e99253cc5a4ab759db2ff"
] | [
"TD3/artest.py"
] | [
"import gym\nimport pybullet_envs\nfrom PIL import Image\nimport argparse\nimport numpy as np\nimport torch\nimport copy\nimport os\nfrom sklearn.preprocessing import normalize as Normalize\n\nfrom models import TD3, TD3_adv2\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"TESTING\")\n parser.add_argument('-p', \"--policy\", type=str, default='td3', help=\"td3/adv\")\n parser.add_argument('-e', \"--env\", type=str, default=\"LunarLanderContinuous-v2\", help=\"env name\")\n parser.add_argument('-n', \"--n-episodes\", type=int, default=10, help=\"number of episodes\")\n parser.add_argument(\"--mode\", type=str, default='nr', help=\"nr (default) / pr\")\n parser.add_argument(\"--train-seed\", type=int, default=1, help=\"random seed for training\")\n parser.add_argument(\"--test-seed\", type=int, default=1, help=\"random seed for testing\")\n parser.add_argument(\"--nr-delta\", type=float, default=0.0, help=\"delta for NR-MDP\") \n parser.add_argument(\"--pr-prob\", type=float, default=0.0, help=\"prob of PR-MDP\")\n parser.add_argument(\"--render\", action=\"store_true\", default=False)\n return parser.parse_args()\n\n\n\ndef get_policy(arglist, kwargs, max_action):\n\t# Initialize policy\n\tif arglist.policy == \"td3\":\n\t\t# Target policy smoothing is scaled wrt the action scale\n\t\tkwargs[\"policy_noise\"] = 0.0\n\t\tkwargs[\"noise_clip\"] = 0.0\n\t\tkwargs[\"policy_freq\"] = 2\n\t\tpolicy = TD3.TD3(**kwargs)\n\telif arglist.policy == \"OurDDPG\":\n\t\tpolicy = OurDDPG.DDPG(**kwargs)\n\telif arglist.policy == \"DDPG\":\n\t\tpolicy = DDPG.DDPG(**kwargs)\n\telif arglist.policy == 'adv':\n\t\tkwargs['alpha'] = 0.01\n\t\tkwargs['adv_epsilon'] = 0.01\n\t\tkwargs['logdir'] = f'./tensorboard/{arglist.policy}_{arglist.env}_{arglist.train_seed}/'\n\t\tpolicy = TD3_adv2.TD3(**kwargs)\n\telse:\n\t\traise NotImplementedError\n\treturn policy\n\n\ndef test(arglist):\n env_name = arglist.env\n random_seed = arglist.test_seed\n n_episodes = arglist.n_episodes\n lr = 0.002\n max_timesteps = 3000\n render = arglist.render\n \n filename = \"{}_{}_{}\".format(arglist.policy, env_name, arglist.train_seed)\n directory = \"./train/{}\".format(env_name)\n \n env = gym.make(env_name)\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n max_action = float(env.action_space.high[0])\n\n # Set random seed\n env.seed(random_seed)\n torch.manual_seed(random_seed)\n np.random.seed(random_seed)\n\n kwargs = {\n\t\t\"state_dim\": state_dim,\n\t\t\"action_dim\": action_dim,\n\t\t\"max_action\": max_action,\n\t\t\"discount\": 0.99,\n\t\t\"tau\": 0.005,\n \"policy_noise\": 0.001,\n \"noise_clip\": 1.0,\n \"policy_freq\": 2\n\t}\n policy = get_policy(arglist, kwargs, max_action)\n policy.load(os.path.join(directory, filename))\n \n total_reward_list = []\n for ep in range(1, n_episodes+1):\n ep_reward = 0.0\n state = env.reset()\n for t in range(max_timesteps):\n\n action = policy.select_action(state)\n if arglist.mode == 'nr':\n # use truncated gaussian noise for both nr-mdp and pr-mdp settings\n noise = np.random.normal(0.0, max_action, size=action.shape)\n noise = np.clip(noise, -max_action, max_action)\n adv_action = (1.0 - arglist.nr_delta) * action + arglist.nr_delta * noise\n elif arglist.mode == 'pr':\n adv_action = action\n if np.random.rand() < arglist.pr_prob:\n adv_action = np.random.normal(0.0, action_dim, size=action.shape)\n adv_action = np.clip(adv_action, -max_action, max_action)\n else:\n raise NotImplementedError('invalid mode')\n\n state, reward, done, _ = env.step(adv_action)\n ep_reward += reward\n if render:\n env.render()\n if done:\n break\n \n print('Episode: {}\\tReward: {}'.format(ep, int(ep_reward)))\n total_reward_list.append(ep_reward)\n ep_reward = 0.0\n env.close()\n return total_reward_list\n\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n\n reward_list = test(args)\n\n reward_array = np.array(reward_list, dtype=np.float32)\n reward_mean = reward_array.mean()\n reward_half_std = reward_array.std() / 2.0\n loginfo = 'policy={} env={} load_seed={} random_seed={} mode={} pr-prob={} nr-delta={} result={}±{}'\n print(loginfo.format(args.policy, args.env, args.train_seed, args.test_seed, args.mode, args.pr_prob, args.nr_delta, reward_mean, reward_half_std))\n\n"
] | [
[
"torch.manual_seed",
"numpy.random.seed",
"numpy.random.normal",
"numpy.clip",
"numpy.random.rand",
"numpy.array"
]
] |
cyanide1x/covid19-dashboard | [
"7da01c2477c0691caf869d7401587dc85cacef29"
] | [
"_notebooks/canada_voc.py"
] | [
"import pandas as pd\nimport plotly.express as px\n\nurl = 'https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-voc.csv' \n\nprov_dict = {\n\t\"AB\" : \"Alberta\",\n\t\"BC\" : \"British Columbia\",\n\t\"CA\" : \"Canada\",\n\t\"MB\" : \"Manitoba\",\t\n\t\"NB\" : \"New Brunswick\",\n\t\"NL\" : \"Newfoundland and Labrador\",\n\t\"NS\" : \"Nova Scotia\",\n\t\"NT\" : \"Northwest Territories\",\n\t\"NU\" : \"Nunavut\",\n\t\"ON\" : \"Ontario\",\n\t\"PE\" : \"Prince Edward Island\",\n\t\"QC\" : \"Quebec\",\n\t\"SK\" : \"Saskatchewan\",\n\t\"YK\" : \"Yukon\",\n\t\"YT\" : \"Yukon\"\n}\n\ncolours = [\"#012169\", \"#E03C31\", \"green\", \"lightgray\"]\n\ndef get_province(prov):\n\ttry:\n\t\treturn prov_dict[prov]\n\texcept:\n\t\treturn prov\n\ndef get_area(prov):\n\tif prov == 'YK':\n\t\treturn 'YT'\n\telse:\n\t\treturn prov\n\n\ndf = pd.read_csv(url).fillna(0)\ndfclean = df[ (df[\"report_date\"] > \"2021\") & (df[\"report_date\"] < \"2023\") & (df[\"b117\"] >= 0) & (df[\"b1351\"] >= 0) & (df[\"p1\"] >= 0) ]\ndfclean[\"Province\"] = dfclean.apply(lambda r: get_province(r[\"prov\"]), axis=1)\ndfclean[\"Area\"] = dfclean.apply(lambda r: get_area(r[\"prov\"]), axis=1)\n\ndfAlpha = dfclean.copy()\ndfAlpha[\"Variant\"] = \"B.1.1.7 (Alpha)\"\ndfAlpha[\"Count\"] = dfAlpha[\"b117\"]\n\ndfBeta = dfclean.copy()\ndfBeta[\"Variant\"] = \"B.1.351 (Beta)\"\ndfBeta[\"Count\"] = dfBeta[\"b1351\"]\n\ndfGamma = dfclean.copy()\ndfGamma[\"Variant\"] = \"P.1 (Gamma)\"\ndfGamma[\"Count\"] = dfGamma[\"p1\"]\n\ndfvoc = dfAlpha.append(dfBeta).append(dfGamma)\n\ndfvocmax = dfvoc.groupby([\"Province\", \"Variant\"]).max().reset_index() \\\n[[\"Province\", \"Variant\", \"Count\"]] \\\n.rename(columns={\"Count\" : \"MaxVocCount\"}) \n\ndfvoc = pd.merge(dfvoc, dfvocmax, how=\"left\", left_on=[\"Province\", \"Variant\"], right_on=[\"Province\", \"Variant\"])\ndfvoc = dfvoc.sort_values(by=[\"Variant\", \"MaxVocCount\", \"Province\", \"report_date\"], ascending=[True, False, True, True])\n\ndfvoc[\"New\"] = dfvoc.groupby([\"Province\", \"Variant\"])[\"Count\"].diff()\n\ndfprov = dfvoc[dfvoc[\"Province\"] != \"Canada\"]\n\nfiglineprov = px.line(dfprov, \n x=\"report_date\", y=\"Count\", color=\"Variant\", facet_col=\"Province\", facet_col_wrap=1,\n labels={\"report_date\" : \"Reported date\", \"Count\" : \"Cumulative cases\", \"Province\" : \"Province/Territory\"},\n title=\"Cumulative cases with a variant of concern<br>by reported date by province/territory by variant\",\n height=5000, template=\"plotly_white\", color_discrete_sequence=colours, facet_row_spacing=0.025\n )\n\nfigbarprovd = px.bar(dfprov, x=\"report_date\", y=\"New\", color=\"Variant\", facet_col=\"Province\", facet_col_wrap=1,\n labels={\"report_date\" : \"Reported date\", \"New\" : \"New cases\", \"Province\" : \"Province/Territory\", \"Variant\" : \"Variant of concern\"},\n hover_name=\"Variant\",\n title=\"New cases with a variant of concern by reported date<br>by province/territory\",\n height=5000, template=\"plotly_white\", color_discrete_sequence=colours, facet_row_spacing=0.025\n )\n \ndfcan = dfvoc[dfvoc[\"Province\"] == \"Canada\"]\n\nfiglinecan_c = px.line(dfcan, \n x=\"report_date\", y=\"Count\", color=\"Variant\", \n labels={\"report_date\" : \"Reported date\", \"Count\" : \"Cumulative cases\"},\n title=\"Cumulative cases in Canada with a variant of concern<br>by reported date by variant\",\n template=\"plotly_white\", color_discrete_sequence=colours\n )\n \n\nfigbarcan_d = px.bar(dfcan, x=\"report_date\", y=\"New\", color=\"Variant\",\n labels={\"report_date\" : \"Reported date\", \"New\" : \"New cases\", \"Variant\" : \"Variant of concern\"},\n hover_name=\"Variant\",\n title=\"New cases in Canada with a variant of concern by reported date\",\n template=\"plotly_white\", color_discrete_sequence=colours\n )\n\n# Accessibility\n\ndate_name = \"Date\" \n\n\ndef join(df, area, variant):\n\tdfarea = dfclean[dfclean[\"Area\"] == area][[\"report_date\", variant]].rename(columns={\"report_date\" : date_name, variant : area}) \n\treturn pd.merge(df, dfarea, how=\"left\", left_on=[date_name], right_on=[date_name])\n\ndef create_table(variant):\n\tdate_max = dfclean.max()[\"report_date\"]\n\tdf_max = dfclean[(dfclean[\"Area\"]!=\"CA\") & (dfclean[\"report_date\"] == date_max)][[\"Area\", variant]].sort_values(by=[variant, \"Area\"], ascending=[False, True])\n\tareas = df_max[\"Area\"].tolist()\n\n\tdf_variant = pd.DataFrame()\n\tdf_variant[date_name] = dfclean[dfclean[\"Area\"]==\"CA\"][\"report_date\"]\n\n\tfor area in areas:\n\t df_variant = join(df_variant, area, variant)\n\t \n\tdf_variant = join(df_variant, \"CA\", variant)\n\treturn df_variant.set_index(date_name).sort_values(by=[date_name], ascending=[False]).round().astype(int)\n\t\ndf_Alpha = create_table(\"b117\")\ndf_Beta = create_table(\"b1351\")\ndf_Gamma = create_table(\"p1\")\n\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.merge"
]
] |
yufei1900/cs231n-homework | [
"b7f5a03d5a2b650603074a7c43f203b465b74333"
] | [
"assignment1/cs231n/classifiers/neural_net.py"
] | [
"from __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network. The net has an input dimension of\n N, a hidden layer dimension of H, and performs classification over C classes.\n We train the network with a softmax loss function and L2 regularization on the\n weight matrices. The network uses a ReLU nonlinearity after the first fully\n connected layer.\n\n In other words, the network has the following architecture:\n\n input - fully connected layer - ReLU - fully connected layer - softmax\n\n The outputs of the second fully-connected layer are the scores for each class.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n \"\"\"\n Initialize the model. Weights are initialized to small random values and\n biases are initialized to zero. Weights and biases are stored in the\n variable self.params, which is a dictionary with the following keys:\n\n W1: First layer weights; has shape (D, H)\n b1: First layer biases; has shape (H,)\n W2: Second layer weights; has shape (H, C)\n b2: Second layer biases; has shape (C,)\n\n Inputs:\n - input_size: The dimension D of the input data.\n - hidden_size: The number of neurons H in the hidden layer.\n - output_size: The number of classes C.\n \"\"\"\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n def loss(self, X, y=None, reg=0.0):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural\n network.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\n the score for class c on input X[i].\n\n If y is not None, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function; has the same keys as self.params.\n \"\"\"\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n out1 = np.maximum(0, X.dot(W1) + b1) # relu, (N, H)\n scores = out1.dot(W2) + b2 # (N, C)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. #\n #############################################################################\n correct_class_score = scores[np.arange(N), y].reshape(N, 1)\n exp_sum = np.sum(np.exp(scores), axis=1).reshape(N, 1)\n loss = np.sum(np.log(exp_sum) - correct_class_score)\n loss /= N\n loss += 0.5 * reg * np.sum(W1 * W1)+ 0.5 * reg * np.sum(W2 * W2)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n margin = np.exp(scores) / exp_sum\n margin[np.arange(N), y] += -1\n margin /= N #(N, C)\n dW2 = out1.T.dot(margin) #(H ,C)\n dW2 += reg * W2 \n grads['W2'] = dW2\n grads['b2'] = np.sum(margin, axis = 0)\n \n margin1 = margin.dot(W2.T) #(N, H)\n margin1[out1 <= 0] = 0\n dW1 = X.T.dot(margin1) #(D, H)\n dW1 += reg * W1 \n grads['W1'] = dW1\n grads['b1'] = np.sum(margin1, axis = 0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads\n\n def train(self, X, y, X_val, y_val,\n learning_rate=1e-3, learning_rate_decay=0.95,\n reg=5e-6, num_iters=100,\n batch_size=200, verbose=False):\n \"\"\"\n Train this neural network using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving training data.\n - y: A numpy array f shape (N,) giving training labels; y[i] = c means that\n X[i] has label c, where 0 <= c < C.\n - X_val: A numpy array of shape (N_val, D) giving validation data.\n - y_val: A numpy array of shape (N_val,) giving validation labels.\n - learning_rate: Scalar giving learning rate for optimization.\n - learning_rate_decay: Scalar giving factor used to decay the learning rate\n after each epoch.\n - reg: Scalar giving regularization strength.\n - num_iters: Number of steps to take when optimizing.\n - batch_size: Number of training examples to use per step.\n - verbose: boolean; if true print progress during optimization.\n \"\"\"\n num_train = X.shape[0]\n iterations_per_epoch = max(num_train / batch_size, 1)\n\n # Use SGD to optimize the parameters in self.model\n loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: Create a random minibatch of training data and labels, storing #\n # them in X_batch and y_batch respectively. #\n #########################################################################\n mask = np.random.choice(num_train, batch_size, replace=True)\n X_batch = X[mask]\n y_batch = y[mask]\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # Compute loss and gradients using the current minibatch\n loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\n loss_history.append(loss)\n\n #########################################################################\n # TODO: Use the gradients in the grads dictionary to update the #\n # parameters of the network (stored in the dictionary self.params) #\n # using stochastic gradient descent. You'll need to use the gradients #\n # stored in the grads dictionary defined above. #\n #########################################################################\n self.params['W1'] -= learning_rate * grads['W1']\n self.params['W2'] -= learning_rate * grads['W2']\n self.params['b1'] -= learning_rate * grads['b1']\n self.params['b2'] -= learning_rate * grads['b2']\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n # Every epoch, check train and val accuracy and decay learning rate.\n if it % iterations_per_epoch == 0:\n # Check accuracy\n train_acc = (self.predict(X_batch) == y_batch).mean()\n val_acc = (self.predict(X_val) == y_val).mean()\n train_acc_history.append(train_acc)\n val_acc_history.append(val_acc)\n\n # Decay learning rate\n learning_rate *= learning_rate_decay\n\n return {\n 'loss_history': loss_history,\n 'train_acc_history': train_acc_history,\n 'val_acc_history': val_acc_history,\n }\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this two-layer network to predict labels for\n data points. For each data point we predict scores for each of the C\n classes, and assign each data point to the class with the highest score.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving N D-dimensional data points to\n classify.\n\n Returns:\n - y_pred: A numpy array of shape (N,) giving predicted labels for each of\n the elements of X. For all i, y_pred[i] = c means that X[i] is predicted\n to have class c, where 0 <= c < C.\n \"\"\"\n y_pred = None\n\n ###########################################################################\n # TODO: Implement this function; it should be VERY simple! #\n ###########################################################################\n out1 = np.maximum(0, X.dot(self.params['W1']) + self.params['b1']) # relu, (N, H)\n y_pred = np.argmax(out1.dot(self.params['W2']) + self.params['b2'],axis = 1) # (N, C)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return y_pred\n\n\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.random.randn",
"numpy.random.choice",
"numpy.exp",
"numpy.arange",
"numpy.log"
]
] |
MarcoFavorito/breakout-env | [
"b41f9ed1da693874d7d34f83e7200fd51a59c97e"
] | [
"breakout_env/wrappers/wrappers.py"
] | [
"import copy\nfrom gym import Wrapper\nfrom pythogic.base.Symbol import Symbol\nfrom pythogic.base.Alphabet import Alphabet\nfrom pythogic.base.Formula import AtomicFormula, PathExpressionEventually, PathExpressionSequence, And, Not, \\\n LogicalTrue, PathExpressionStar\nfrom pythogic.base.utils import _to_pythomata_dfa\nfrom pythogic.ldlf_empty_traces.LDLf_EmptyTraces import LDLf_EmptyTraces\nimport numpy as np\nfrom pythomata.base.Simulator import Simulator\nfrom pythomata.base.utils import Sink\n\n\nclass BreakoutRABUWrapper(Wrapper):\n \"\"\"Env wrapper for bottom-up rows deletion\"\"\"\n def __init__(self, env):\n super().__init__(env)\n self.row_symbols = [Symbol(r) for r in [\"r0\", \"r1\", \"r2\"]]\n self.dfa = self._build_automata()\n self.goal_reward = 1000\n self.transition_reward = 100\n self.simulator = Simulator(self.dfa)\n self.last_status = None\n\n\n def reset(self):\n self.env.reset()\n self.simulator.reset()\n\n def step(self, action):\n obs, reward, done, _ = self.env.step(action)\n if done:\n # when we lose a life\n return obs, reward, done, _\n\n # overwrite old reward\n # reward = 0\n\n f = self.state2propositional_formula()\n\n old_state = self.simulator.cur_state\n self.simulator.make_transition(f)\n new_state = self.simulator.cur_state\n if new_state==Sink():\n done = True\n reward = -1000\n elif new_state in self.dfa.accepting_states:\n reward = 1000\n elif old_state!=new_state:\n reward = self.transition_reward\n\n return obs, reward, done or self.env.unwrapped.state.terminal, _\n\n\n\n def state2propositional_formula(self):\n e = self.unwrapped\n matrix = e.state.bricks.bricks_status_matrix\n row_status = np.all(matrix==0.0, axis=1)\n result = set()\n for rs, sym in zip(row_status, reversed(self.row_symbols)):\n if rs:\n result.add(sym)\n\n return frozenset(result)\n\n\n def _build_automata(self):\n rows = self.row_symbols\n atoms = [AtomicFormula(r) for r in rows]\n alphabet = Alphabet(set(rows))\n ldlf = LDLf_EmptyTraces(alphabet)\n f = PathExpressionEventually(\n PathExpressionSequence.chain([\n PathExpressionStar(And.chain([Not(atoms[0]), Not(atoms[1]), Not(atoms[2])])),\n PathExpressionStar(And.chain([atoms[0], Not(atoms[1]), Not(atoms[2])])),\n # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),\n PathExpressionStar(And.chain([atoms[0], atoms[1], Not(atoms[2])])),\n # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2]]), # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2], atoms[3], Not(atoms[4]), Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2], atoms[3], atoms[4], Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2], atoms[3], atoms[4], atoms[5] ])\n ]),\n And.chain([atoms[0], atoms[1], atoms[2]])\n )\n nfa = ldlf.to_nfa(f)\n dfa = _to_pythomata_dfa(nfa)\n\n return dfa\n\n\n\n\n\n"
] | [
[
"numpy.all"
]
] |
salomonw/mixed-traffic-amod-route-rebalance | [
"7f1edeb195a7bfab835e596ad84deead2957943e"
] | [
"experiments/plots_journal_CARS.py"
] | [
"import src.tnet as tnet\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\nimport math \n\nplt.style.use(['science','ieee', 'high-vis'])\n\n\ndef txt2list(fname):\n\treturn [line for line in open(fname)]\n\ndef read_result(fname):\n\tdf = pd.read_csv(fname)\n\tresults = df.T.values.tolist()\n\treturn results\n\ndef read_parameters(fname):\n\tdic = {}\n\tfor line in open(fname, 'r').readlines():\n\t\tp,v = line.split()\n\t\tdic[p] = v\n\treturn dic\n\n\ndef plot_topology(netname):\n netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters(net_name=netname,\n\n experiment_name=netname + 'topo_plot')\n tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)\n tNet.read_node_coordinates('data/pos/'+netname+'.txt')\n fig, ax = tnet.plot_network(tNet.G, width=0.3)\n return fig, ax\n\n\ndef plot_convergance(fname_sys, fname_usr):\n\treturn 1\n\n\ndef plot_costPenRate(fname, ax, parameters, k):\n\tj, cavsCost, noncavsCost, totCost, cavsFlow, nonCavsFlow, pedestrianFlow, rebalancingFlow, bikeFlow, subwayFlow = read_result(fname)\n\tif k == 'A':\n\t\tfor i in range(len(cavsCost)):\n\t\t\tcavsCost[i] = max(noncavsCost[i], cavsCost[i])\n\t\t\ttotCost[i] = max(noncavsCost[i], totCost[i])\n\tj = [round(.1 * i, 1) for i in range(11)]\n\tlstyle = ['-', '--', ':']\n\ti = 0\n\talg = 'CARS'+parameters['n:']\n\tax.plot(j, noncavsCost, label='Private', linestyle=lstyle[i], linewidth=2, marker='x')\n\tax.plot(j, cavsCost, label='AMoDs', linestyle=lstyle[i], linewidth=2, marker=\"^\")\n\tax.plot(j, totCost, label='Total', linestyle=lstyle[i], linewidth=2, marker='o')\n\tax.legend()\n\tax.set_xlabel('Penetration Rate')\n\tax.set_ylabel('Avg. Travel Time (min)')\n\tax.set_xlim((0, 1))\n\tax.legend(framealpha=0.8, fontsize='small', frameon=True, facecolor='w', fancybox='False')\n\t#ax.legend.get_frame().set_linewidth(0.2)\n\treturn ax\n\n\ndef plot_flowPenRate(fname, ax, parameters):\n\tn, cavsCost, noncavsCost, totCost, cavsFlow, nonCavsFlow, pedestrianFlow, rebalancingFlow, bikeFlow, subwayFlow = read_result(fname)\n\twidth = 0.9\n\tx_name = [round(.1 * i, 1) for i in range(11)]\n\tx = list(range(len(x_name)))\n\tp1 = ax.bar(x, nonCavsFlow, width, label='Private')\n\tp2 = ax.bar(x, cavsFlow, width,\n\t bottom=nonCavsFlow, label='AMoD')\n\tp3 = ax.bar(x, rebalancingFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] for i in range(len(cavsFlow))], label='Rebalancing')\n\tif sum(subwayFlow)>10:\n\t\tp6 = ax.bar(x, subwayFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] + pedestrianFlow[i] + bikeFlow[i] for i in\n\t range(len(cavsFlow))], label='Subway')\n\tif sum(pedestrianFlow)>10:\n\t\tp4 = ax.bar(x, pedestrianFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] for i in range(len(cavsFlow))], label='Pedestrian')\n\tif sum(bikeFlow)>10:\n\t\tp5 = ax.bar(x, bikeFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] + pedestrianFlow[i] for i in\n\t range(len(cavsFlow))], label='Biking')\n\n\n\tax.set_ylabel('Miles per mode of transport')\n\tax.set_xlabel('Penetration rate')\n\tax.set_xticks(x)\n\tax.set_xticklabels(x_name)\n\tax.legend(framealpha=0.8, fontsize='small', frameon=True, loc=3, facecolor='w', fancybox='False')\n\t#ax.legend.get_frame().set_linewidth(0.2)\n\treturn ax\n\n'''\ndire = '2021-01-08_11:51:44_penRate_NYC_1.5ASB_Reb_True'\nfname = 'results/' + dire + '/results.csv' \nparameters = read_parameters('results/' + dire + '/parameters.txt' )\n#print(read_result(fname))\n\nfig, ax = plt.subplots(1 ,figsize=(2.5,2))\nplot_costPenRate(fname, ax, parameters)\nplt.savefig('a.pdf')\n\nfig, ax = plt.subplots(1 ,figsize=(3.6,2))\nplot_flowPenRate(fname, ax, parameters)\nplt.savefig('b.pdf')\n'''\n\n# comparison\n\ndef plot_comparison(fnames, out):\n\tfig, ax = plt.subplots(ncols=2, \n\t\t\t\t\t\t\tnrows=len(fnames), \n\t\t\t\t\t\t#\twidth_ratios=[1,2], \n\t\t\t\t\t\t\tgridspec_kw={'width_ratios':[1,2]},\n\t\t\t\t\t\t\tfigsize=(3.6*1.7, 1.7*len(fnames)),\n\t\t\t\t\t\t\t#sharex=True, \n\t\t\t\t\t\t\tsharey=False)\n\tj = 0\n\tfor f in fnames:\n\t\tfname = 'results/' + f + '/results.csv'\n\t\tparameters = read_parameters('results/' + f + '/parameters.txt' )\n\t\tif out =='1c':\n\t\t\tplot_costPenRate(fname, ax[j,0], parameters, 'A')\n\t\telse:\n\t\t\tplot_costPenRate(fname, ax[j,0], parameters, 'B')\n\t\tplot_flowPenRate(fname, ax[j,1], parameters)\n\t\tj +=1\n\t#plt.legend(frameon=True, fancybox=False)\n\tplt.tight_layout()\n\tplt.savefig(out+'.pdf')\n\t#plt.show()\n\none = '2021-01-08_11/50/19_penRate_NYC_1.0A_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/50/08_penRate_NYC_1.5A_Reb_True'.replace('/', ':')\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0A_Reb_True'.replace('/', ':')\nfour = '2021-01-08_11/51/44_penRate_NYC_4.0A_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'1c')\n\n\none = '2021-01-08_11/50/19_penRate_NYC_1.0AS_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/50/08_penRate_NYC_1.5AS_Reb_True'.replace('/', ':')\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0AS_Reb_True'.replace('/', ':')\nfour = '2021-01-08_11/51/43_penRate_NYC_4.0AS_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'1_5c')\n\n\n\n\none = '2021-01-08_11/50/08_penRate_NYC_1.0ASP_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/51/48_penRate_NYC_1.5ASP_Reb_True'.replace('/', ':')\t\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0ASP_Reb_True'.replace('/', ':')\nfour = '2021-01-08_11/52/40_penRate_NYC_4.0ASP_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'2c')\n\n\n\none = '2021-01-08_11/50/08_penRate_NYC_1.0ASPB_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/51/44_penRate_NYC_1.5ASPB_Reb_True'.replace('/', ':')\nthree = '2021-01-12_00:58:41_penRate_NYC_2.0ASPB_Reb_True'.replace('/', ':')\nfour = '2021-01-14_02:00:28_penRate_NYC_4.0ASPB_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'4c')\n\none = '2021-01-08_11/51/44_penRate_NYC_2.0A_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/51/44_penRate_NYC_2.0AS_Reb_True'.replace('/', ':')\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0ASP_Reb_True'.replace('/', ':')\nfour = '2021-01-12_00:58:41_penRate_NYC_2.0ASPB_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'4c')\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.tight_layout"
]
] |
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs | [
"23b9b181499a4b06f2ca2951c002359c1959e727"
] | [
"utils/Paralell_Experience_Generator.py"
] | [
"import random\nimport torch\nimport sys\nfrom contextlib import closing\nfrom torch.multiprocessing import Pool\nfrom random import randint\nfrom exploration_strategies.OUNoise import OrnsteinUhlenbeckActionNoise\n\n\nclass Parallel_Experience_Generator(object):\n \"\"\" Plays n episode in parallel using a fixed agent. \"\"\"\n\n def __init__(self, environment, policy, seed, hyperparameters, action_size, use_GPU=False, action_choice_output_columns=None):\n self.use_GPU = use_GPU\n self.environment = environment\n self.policy = policy\n self.action_choice_output_columns = action_choice_output_columns\n self.hyperparameters = hyperparameters\n self.noise = OrnsteinUhlenbeckActionNoise(mu=[0 for _ in range(self.environment.action_shape[1])],\n sigma=0.15,\n theta=.01,\n dt=1e-2,\n seed=seed)\n\n def play_n_episodes(self, n):\n \"\"\"Plays n episodes in parallel using the fixed policy and returns the data\"\"\"\n\n with closing(Pool(processes=n)) as pool:\n results = pool.map(self, range(n))\n pool.terminate()\n\n states_for_all_episodes = [episode[0] for episode in results]\n actions_for_all_episodes = [episode[1] for episode in results]\n rewards_for_all_episodes = [episode[2] for episode in results]\n\n return states_for_all_episodes, actions_for_all_episodes, rewards_for_all_episodes\n\n def play_1_episode(self, epsilon_exploration):\n \"\"\"Plays 1 episode using the fixed policy and returns the data\"\"\"\n\n state = self.reset_game()\n done = False\n episode_states = []\n episode_actions = []\n episode_rewards = []\n while not done:\n action = self.pick_action(self.policy, state)\n next_state, reward, done, _ = self.environment.step(action)\n episode_states.append(state)\n episode_actions.append(action)\n episode_rewards.append(reward)\n state = next_state\n return episode_states, episode_actions, episode_rewards\n\n def reset_game(self):\n \"\"\"Resets the game environment so it is ready to play a new episode\"\"\"\n seed = randint(0, sys.maxsize)\n torch.manual_seed(seed) # Need to do this otherwise each worker generates same experience\n state = self.environment.reset()\n return state\n\n def pick_action(self, policy, state):\n\n state = torch.from_numpy(state).float().unsqueeze(0)\n actor_output = policy(state)\n\n if self.action_choice_output_columns is not None:\n actor_output = actor_output[:, self.action_choice_output_columns]\n\n action_distribution = self.create_distributions(policy, self.environment.action_size)\n action = action_distribution.sample().cpu()\n\n action += torch.Tensor(self.noise())\n\n return action.detach().numpy()\n\n @staticmethod\n def create_distributions(policy_output, number_of_actions):\n\n means = policy_output[:, :number_of_actions].squeeze(0)\n stds = policy_output[:, number_of_actions:].squeeze(0)\n\n action_distribution = torch.distributions.normal.Normal(means.squeeze(0), torch.abs(stds))\n\n return action_distribution"
] | [
[
"torch.manual_seed",
"torch.from_numpy",
"torch.abs",
"torch.multiprocessing.Pool"
]
] |
AbdulHoffmann/carla_carissma | [
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"9444dce96954c546333d5aecc92a06c3bfd19aa5"
] | [
"PythonAPI/carissma_project/lib/python3.5/site-packages/mpl_toolkits/axes_grid/__init__.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/tri/tripcolor.py"
] | [
"from . import axes_size as Size\nfrom .axes_divider import Divider, SubplotDivider, LocatableAxes, \\\n make_axes_locatable\nfrom .axes_grid import Grid, ImageGrid, AxesGrid\n#from axes_divider import make_axes_locatable\nfrom matplotlib.cbook import warn_deprecated\nwarn_deprecated(since='2.1',\n name='mpl_toolkits.axes_grid',\n alternative='mpl_toolkits.axes_grid1 and'\n ' mpl_toolkits.axisartist, which provide'\n ' the same functionality',\n obj_type='module')\n",
"import numpy as np\n\nfrom matplotlib.collections import PolyCollection, TriMesh\nfrom matplotlib.colors import Normalize\nfrom matplotlib.tri.triangulation import Triangulation\n\n\ndef tripcolor(ax, *args, alpha=1.0, norm=None, cmap=None, vmin=None,\n vmax=None, shading='flat', facecolors=None, **kwargs):\n \"\"\"\n Create a pseudocolor plot of an unstructured triangular grid.\n\n The triangulation can be specified in one of two ways; either::\n\n tripcolor(triangulation, ...)\n\n where triangulation is a :class:`matplotlib.tri.Triangulation`\n object, or\n\n ::\n\n tripcolor(x, y, ...)\n tripcolor(x, y, triangles, ...)\n tripcolor(x, y, triangles=triangles, ...)\n tripcolor(x, y, mask=mask, ...)\n tripcolor(x, y, triangles, mask=mask, ...)\n\n in which case a Triangulation object will be created. See\n :class:`~matplotlib.tri.Triangulation` for a explanation of these\n possibilities.\n\n The next argument must be *C*, the array of color values, either\n one per point in the triangulation if color values are defined at\n points, or one per triangle in the triangulation if color values\n are defined at triangles. If there are the same number of points\n and triangles in the triangulation it is assumed that color\n values are defined at points; to force the use of color values at\n triangles use the kwarg ``facecolors=C`` instead of just ``C``.\n\n *shading* may be 'flat' (the default) or 'gouraud'. If *shading*\n is 'flat' and C values are defined at points, the color values\n used for each triangle are from the mean C of the triangle's\n three points. If *shading* is 'gouraud' then color values must be\n defined at points.\n\n The remaining kwargs are the same as for\n :meth:`~matplotlib.axes.Axes.pcolor`.\n \"\"\"\n if shading not in ['flat', 'gouraud']:\n raise ValueError(\"shading must be one of ['flat', 'gouraud'] \"\n \"not {0}\".format(shading))\n\n tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n\n # C is the colors array defined at either points or faces (i.e. triangles).\n # If facecolors is None, C are defined at points.\n # If facecolors is not None, C are defined at faces.\n if facecolors is not None:\n C = facecolors\n else:\n C = np.asarray(args[0])\n\n # If there are a different number of points and triangles in the\n # triangulation, can omit facecolors kwarg as it is obvious from\n # length of C whether it refers to points or faces.\n # Do not do this for gouraud shading.\n if (facecolors is None and len(C) == len(tri.triangles) and\n len(C) != len(tri.x) and shading != 'gouraud'):\n facecolors = C\n\n # Check length of C is OK.\n if ((facecolors is None and len(C) != len(tri.x)) or\n (facecolors is not None and len(C) != len(tri.triangles))):\n raise ValueError('Length of color values array must be the same '\n 'as either the number of triangulation points '\n 'or triangles')\n\n # Handling of linewidths, shading, edgecolors and antialiased as\n # in Axes.pcolor\n linewidths = (0.25,)\n if 'linewidth' in kwargs:\n kwargs['linewidths'] = kwargs.pop('linewidth')\n kwargs.setdefault('linewidths', linewidths)\n\n edgecolors = 'none'\n if 'edgecolor' in kwargs:\n kwargs['edgecolors'] = kwargs.pop('edgecolor')\n ec = kwargs.setdefault('edgecolors', edgecolors)\n\n if 'antialiased' in kwargs:\n kwargs['antialiaseds'] = kwargs.pop('antialiased')\n if 'antialiaseds' not in kwargs and ec.lower() == \"none\":\n kwargs['antialiaseds'] = False\n\n if shading == 'gouraud':\n if facecolors is not None:\n raise ValueError('Gouraud shading does not support the use '\n 'of facecolors kwarg')\n if len(C) != len(tri.x):\n raise ValueError('For gouraud shading, the length of color '\n 'values array must be the same as the '\n 'number of triangulation points')\n collection = TriMesh(tri, **kwargs)\n else:\n # Vertices of triangles.\n maskedTris = tri.get_masked_triangles()\n verts = np.stack((tri.x[maskedTris], tri.y[maskedTris]), axis=-1)\n\n # Color values.\n if facecolors is None:\n # One color per triangle, the mean of the 3 vertex color values.\n C = C[maskedTris].mean(axis=1)\n elif tri.mask is not None:\n # Remove color values of masked triangles.\n C = C.compress(1-tri.mask)\n\n collection = PolyCollection(verts, **kwargs)\n\n collection.set_alpha(alpha)\n collection.set_array(C)\n if norm is not None and not isinstance(norm, Normalize):\n raise ValueError(\"'norm' must be an instance of 'Normalize'\")\n collection.set_cmap(cmap)\n collection.set_norm(norm)\n if vmin is not None or vmax is not None:\n collection.set_clim(vmin, vmax)\n else:\n collection.autoscale_None()\n ax.grid(False)\n\n minx = tri.x.min()\n maxx = tri.x.max()\n miny = tri.y.min()\n maxy = tri.y.max()\n corners = (minx, miny), (maxx, maxy)\n ax.update_datalim(corners)\n ax.autoscale_view()\n ax.add_collection(collection)\n return collection\n"
] | [
[
"matplotlib.cbook.warn_deprecated"
],
[
"matplotlib.collections.TriMesh",
"matplotlib.tri.triangulation.Triangulation.get_from_args_and_kwargs",
"matplotlib.collections.PolyCollection",
"numpy.asarray",
"numpy.stack"
]
] |
DexiongYung/robustnav_AE | [
"f2b1b5bb8780e4e6ae5f81c127b7589cfc949801"
] | [
"domain_adaptation/corruptions/corruptions.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom PIL import Image\n\n# /////////////// Corruption Helpers ///////////////\n\nimport skimage as sk\nfrom torchvision import transforms\nimport torchvision.transforms.functional as F\nfrom skimage.filters import gaussian\nfrom io import BytesIO\nfrom wand.image import Image as WandImage\nfrom wand.api import library as wandlibrary\nimport wand.color as WandColor\nimport ctypes\nfrom PIL import Image as PILImage\nimport cv2\nfrom scipy.ndimage import zoom as scizoom\nfrom scipy.ndimage.interpolation import map_coordinates\nimport warnings\nimport os\nfrom pkg_resources import resource_filename\n\nwarnings.simplefilter(\"ignore\", UserWarning)\n\n\ndef disk(radius, alias_blur=0.1, dtype=np.float32):\n # 17 x 17 kernel causes seg fault in opencv\n # if radius <= 8:\n # L = np.arange(-8, 8 + 1)\n # ksize = (3, 3) \n if radius <= 5:\n L = np.arange(-5, 5 + 1)\n ksize = (3, 3)\n else:\n L = np.arange(-radius, radius + 1)\n ksize = (5, 5)\n X, Y = np.meshgrid(L, L)\n aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)\n aliased_disk /= np.sum(aliased_disk)\n # supersample disk to antialias\n return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)\n\n\n# Tell Python about the C method\nwandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand\n ctypes.c_double, # radius\n ctypes.c_double, # sigma\n ctypes.c_double) # angle\n\n\n# Extend wand.image.Image class to include method signature\nclass MotionImage(WandImage):\n def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):\n wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)\n\n\n# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py\ndef plasma_fractal(mapsize=512, wibbledecay=3):\n \"\"\"\n Generate a heightmap using diamond-square algorithm.\n Return square 2d array, side length 'mapsize', of floats in range 0-255.\n 'mapsize' must be a power of two.\n \"\"\"\n assert (mapsize & (mapsize - 1) == 0)\n maparray = np.empty((mapsize, mapsize), dtype=np.float_)\n maparray[0, 0] = 0\n stepsize = mapsize\n wibble = 100\n\n def wibbledmean(array):\n return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)\n\n def fillsquares():\n \"\"\"For each square of points stepsize apart,\n calculate middle value as mean of points + wibble\"\"\"\n cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]\n squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)\n squareaccum += np.roll(squareaccum, shift=-1, axis=1)\n maparray[stepsize // 2:mapsize:stepsize,\n stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)\n\n def filldiamonds():\n \"\"\"For each diamond of points stepsize apart,\n calculate middle value as mean of points + wibble\"\"\"\n mapsize = maparray.shape[0]\n drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]\n ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]\n ldrsum = drgrid + np.roll(drgrid, 1, axis=0)\n lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)\n ltsum = ldrsum + lulsum\n maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)\n tdrsum = drgrid + np.roll(drgrid, 1, axis=1)\n tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)\n ttsum = tdrsum + tulsum\n maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)\n\n while stepsize >= 2:\n fillsquares()\n filldiamonds()\n stepsize //= 2\n wibble /= wibbledecay\n\n maparray -= maparray.min()\n return maparray / maparray.max()\n\n\ndef clipped_zoom(img, zoom_factor):\n h = img.shape[0]\n # ceil crop height(= crop width)\n ch = int(np.ceil(h / float(zoom_factor)))\n\n top = (h - ch) // 2\n img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)\n # trim off any extra pixels\n trim_top = (img.shape[0] - h) // 2\n\n return img[trim_top:trim_top + h, trim_top:trim_top + h]\n\n\n# /////////////// End Corruption Helpers ///////////////\n\n\n# /////////////// Corruptions ///////////////\n\ndef gaussian_noise(x, severity=1):\n # c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]\n c = [0.04, 0.06, .08, .09, .10][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n\n\ndef shot_noise(x, severity=1):\n # c = [60, 25, 12, 5, 3][severity - 1]\n c = [500, 250, 100, 75, 50][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255\n\n\ndef impulse_noise(x, severity=1):\n # c = [.03, .06, .09, 0.17, 0.27][severity - 1]\n c = [.01, .02, .03, .05, .07][severity - 1]\n\n x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)\n return np.clip(x, 0, 1) * 255\n\n\ndef speckle_noise(x, severity=1):\n # c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]\n c = [.06, .1, .12, .16, .2][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n\n\ndef gaussian_blur(x, severity=1):\n # c = [1, 2, 3, 4, 6][severity - 1]\n c = [.4, .6, 0.7, .8, 1][severity - 1]\n\n x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)\n return np.clip(x, 0, 1) * 255\n\n\ndef glass_blur(x, severity=1):\n # sigma, max_delta, iterations\n # c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]\n c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][severity - 1]\n\n x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)\n size = x.shape[0]\n # locally shuffle pixels\n for i in range(c[2]):\n for h in range(size - c[1], c[1], -1):\n for w in range(size - c[1], c[1], -1):\n dx, dy = np.random.randint(-c[1], c[1], size=(2,))\n h_prime, w_prime = h + dy, w + dx\n # swap\n x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]\n\n return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255\n\n\ndef defocus_blur(x, severity=1):\n # c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]\n c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][severity - 1]\n\n x = np.array(x) / 255.\n kernel = disk(radius=c[0], alias_blur=c[1])\n\n channels = []\n for d in range(3):\n channels.append(cv2.filter2D(x[:, :, d].astype(np.float32), -1, kernel))\n channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3\n\n return np.clip(channels, 0, 1) * 255\n\ndef motion_blur(x, severity=1):\n # c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]\n c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][severity - 1]\n\n output = BytesIO()\n Image.fromarray(x).save(output, format='PNG')\n x = MotionImage(blob=output.getvalue())\n\n x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))\n\n x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),\n cv2.IMREAD_UNCHANGED)\n\n if x.shape != (512, 512):\n return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB\n else: # greyscale to RGB\n return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)\n\n\ndef zoom_blur(x, severity=1):\n # c = [np.arange(1, 1.11, 0.01),\n # np.arange(1, 1.16, 0.01),\n # np.arange(1, 1.21, 0.02),\n # np.arange(1, 1.26, 0.02),\n # np.arange(1, 1.31, 0.03)][severity - 1]\n c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),\n np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][severity - 1]\n\n x = (np.array(x) / 255.).astype(np.float32)\n out = np.zeros_like(x)\n for zoom_factor in c:\n out += clipped_zoom(x, zoom_factor)\n\n x = (x + out) / (len(c) + 1)\n return np.clip(x, 0, 1) * 255\n\n\ndef fog(x, severity=1):\n # c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]\n c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][severity - 1]\n size = x.shape[0]\n x = np.array(x) / 255.\n max_val = x.max()\n x += c[0] * plasma_fractal(wibbledecay=c[1])[:size, :size][..., np.newaxis]\n return np.clip(x * max_val / (max_val + c[0]), 0, 1) # *255\n\n\ndef frost(x, severity=1):\n size = x.shape[0]\n # c = [(1, 0.4),\n # (0.8, 0.6),\n # (0.7, 0.7),\n # (0.65, 0.7),\n # (0.6, 0.75)][severity - 1]\n c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][severity - 1]\n idx = np.random.randint(5)\n filename = [resource_filename(__name__, 'frost/frost1.png'),\n resource_filename(__name__, 'frost/frost2.png'),\n resource_filename(__name__, 'frost/frost3.png'),\n resource_filename(__name__, 'frost/frost4.jpg'),\n resource_filename(__name__, 'frost/frost5.jpg'),\n resource_filename(__name__, 'frost/frost6.jpg')][idx]\n frost = cv2.imread(filename)\n # randomly crop and convert to rgb\n\n x_start, y_start = np.random.randint(0, frost.shape[0] - size), np.random.randint(0, frost.shape[1] - size)\n\n frost = frost[x_start:x_start + size, y_start:y_start + size][..., [2, 1, 0]]\n\n return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255) / 255\n\n\ndef snow(x, severity=1):\n # c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),\n # (0.2, 0.3, 2, 0.5, 12, 4, 0.7),\n # (0.55, 0.3, 4, 0.9, 12, 8, 0.7),\n # (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),\n # (0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]\n\n c = [(0.1,0.2,1,0.6,8,3,0.95),\n (0.1,0.2,1,0.5,10,4,0.9),\n (0.15,0.3,1.75,0.55,10,4,0.9),\n (0.25,0.3,2.25,0.6,12,6,0.85),\n (0.3,0.3,1.25,0.65,14,12,0.8)][severity - 1]\n\n size = x.shape[0]\n x = np.array(x, dtype=np.float32) / 255.\n snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome\n\n snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])\n snow_layer[snow_layer < c[3]] = 0\n\n snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')\n output = BytesIO()\n snow_layer.save(output, format='PNG')\n snow_layer = MotionImage(blob=output.getvalue())\n\n snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))\n\n snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),\n cv2.IMREAD_UNCHANGED) / 255.\n snow_layer = snow_layer[..., np.newaxis]\n\n x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, code = cv2.COLOR_RGB2GRAY).reshape(size, size, 1) * 1.5 + 0.5)\n return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255\n\n\ndef spatter(x, severity=1):\n # c = [(0.65, 0.3, 4, 0.69, 0.6, 0),\n # (0.65, 0.3, 3, 0.68, 0.6, 0),\n # (0.65, 0.3, 2, 0.68, 0.5, 0),\n # (0.65, 0.3, 1, 0.65, 1.5, 1),\n # (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]\n c = [(0.62,0.1,0.7,0.7,0.5,0),\n (0.65,0.1,0.8,0.7,0.5,0),\n (0.65,0.3,1,0.69,0.5,0),\n (0.65,0.1,0.7,0.69,0.6,1),\n (0.65,0.1,0.5,0.68,0.6,1)][severity - 1]\n\n x = np.array(x, dtype=np.float32) / 255.\n\n liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])\n\n liquid_layer = gaussian(liquid_layer, sigma=c[2])\n liquid_layer[liquid_layer < c[3]] = 0\n if c[5] == 0:\n liquid_layer = (liquid_layer * 255).astype(np.uint8)\n dist = 255 - cv2.Canny(liquid_layer, 50, 150)\n dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)\n _, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)\n dist = cv2.blur(dist, (3, 3)).astype(np.uint8)\n dist = cv2.equalizeHist(dist)\n ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])\n dist = cv2.filter2D(dist, cv2.CV_8U, ker)\n dist = cv2.blur(dist, (3, 3)).astype(np.float32)\n\n m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)\n m /= np.max(m, axis=(0, 1))\n m *= c[4]\n\n # water is pale turqouise\n color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),\n 238 / 255. * np.ones_like(m[..., :1]),\n 238 / 255. * np.ones_like(m[..., :1])), axis=2)\n\n color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)\n\n return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255\n else:\n m = np.where(liquid_layer > c[3], 1, 0)\n m = gaussian(m.astype(np.float32), sigma=c[4])\n m[m < 0.8] = 0\n\n # mud brown\n color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),\n 42 / 255. * np.ones_like(x[..., :1]),\n 20 / 255. * np.ones_like(x[..., :1])), axis=2)\n\n color *= m[..., np.newaxis]\n x *= (1 - m[..., np.newaxis])\n\n return np.clip(x + color, 0, 1) * 255\n\n\ndef contrast(x, severity=1):\n # c = [0.4, .3, .2, .1, .05][severity - 1]\n c = [.75, .5, .4, .3, 0.15][severity - 1]\n\n x = np.array(x) / 255.\n means = np.mean(x, axis=(0, 1), keepdims=True)\n return np.clip((x - means) * c + means, 0, 1) * 255\n\n\ndef generate_random_lines(imshape,slant,drop_length,rain_type):\n drops=[]\n area=imshape[0]*imshape[1]\n no_of_drops=area//600\n\n if rain_type.lower()=='drizzle':\n no_of_drops=area//770\n drop_length=10\n elif rain_type.lower()=='heavy':\n drop_length=30\n elif rain_type.lower()=='torrential':\n no_of_drops=area//500\n drop_length=60\n\n for i in range(no_of_drops): ## If You want heavy rain, try increasing this\n if slant<0:\n x= np.random.randint(slant,imshape[1])\n else:\n x= np.random.randint(0,imshape[1]-slant)\n y= np.random.randint(0,imshape[0]-drop_length)\n drops.append((x,y))\n return drops,drop_length\n\n\ndef rain_process(image,slant,drop_length,drop_color,drop_width,rain_drops):\n imshape = image.shape \n image_t = image.copy()\n for rain_drop in rain_drops:\n cv2.line(image_t,(rain_drop[0],rain_drop[1]),(rain_drop[0]+slant,rain_drop[1]+drop_length),drop_color,drop_width)\n image= cv2.blur(image_t,(7,7)) ## rainy view are blurry\n brightness_coefficient = 0.7 ## rainy days are usually shady \n image_HLS = hls(image) ## Conversion to HLS\n image_HLS[:,:,1] = image_HLS[:,:,1]*brightness_coefficient ## scale pixel values down for channel 1(Lightness)\n image_RGB= rgb(image_HLS,'hls') ## Conversion to RGB\n return image_RGB\n\n\ndef hls(image,src='RGB'):\n image_HLS = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HLS)')\n return image_HLS\n\n\ndef rgb(image, src='BGR'):\n image_RGB= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)')\n return image_RGB\n\n\ndef rain(image, slant=-1,drop_length=20,drop_width=1,drop_color=(200,200,200),rain_type='torrential'): ## (200,200,200) a shade of gray\n # verify_image(image)\n slant_extreme=slant\n # if not(is_numeric(slant_extreme) and (slant_extreme>=-20 and slant_extreme<=20)or slant_extreme==-1):\n # raise Exception(err_rain_slant)\n # if not(is_numeric(drop_width) and drop_width>=1 and drop_width<=5):\n # raise Exception(err_rain_width)\n # if not(is_numeric(drop_length) and drop_length>=0 and drop_length<=100):\n # raise Exception(err_rain_length)\n\n imshape = image.shape\n if slant_extreme==-1:\n slant= np.random.randint(-10,10) ##generate random slant if no slant value is given\n rain_drops, drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)\n output = rain_process(image,slant_extreme,drop_length,drop_color,drop_width,rain_drops)\n return output\n\n\ndef brightness(x, severity=1):\n # c = [.1, .2, .3, .4, .5][severity - 1]\n c = [.05, .1, .15, .2, .3][severity - 1]\n\n x = np.array(x) / 255.\n x = sk.color.rgb2hsv(x)\n x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)\n x = sk.color.hsv2rgb(x)\n\n return np.clip(x, 0, 1) * 255\n\n\ndef saturate(x, severity=1):\n # c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]\n c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][severity - 1]\n x = np.array(x) / 255.\n x = sk.color.rgb2hsv(x)\n x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)\n x = sk.color.hsv2rgb(x)\n\n return np.clip(x, 0, 1) * 255\n\n\ndef jpeg_compression(x, severity=1):\n # c = [25, 18, 15, 10, 7][severity - 1]\n c = [80, 65, 58, 50, 40][severity - 1]\n\n output = BytesIO()\n\n Image.fromarray(x).save(output, 'JPEG', quality=c)\n x = np.array(PILImage.open(output))\n\n return x\n\n\ndef pixelate(x, severity=1):\n size = x.shape[0]\n # c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]\n c = [0.95, 0.9, 0.85, 0.75, 0.65][severity - 1]\n\n x = Image.fromarray(x)\n x = x.resize((int(size * c), int(size * c)),resample=Image.BILINEAR)\n x = x.resize((size, size),Image.NEAREST)\n\n return np.array(x)\n\n\n# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5\ndef elastic_transform(image, severity=1):\n c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 512, but ultimately nothing is incorrect\n (244 * 2, 244 * 0.08, 244 * 0.2),\n (244 * 0.05, 244 * 0.01, 244 * 0.02),\n (244 * 0.07, 244 * 0.01, 244 * 0.02),\n (244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]\n\n image = np.array(image, dtype=np.float32) / 255.\n shape = image.shape\n shape_size = shape[:2]\n\n # random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size,\n [center_square[0] + square_size, center_square[1] - square_size],\n center_square - square_size])\n pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n\n dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),\n c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)\n dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),\n c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)\n dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))\n return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255\n\n\ndef blackoutNoise(image, severity=1):\n image = np.zeros(image.shape, dtype=np.uint8)\n m = (severity, severity, severity)\n s = (severity, severity, severity)\n\n image = np.clip(cv2.randn(image, m, s), 0, 255)\n\n return image\n\n\ndef additiveGaussianNoise(image, severity=1):\n m = (severity, severity, severity)\n s = (severity, severity, severity)\n corr = cv2.randn(np.zeros(image.shape, dtype=np.uint8), m, s)\n\n image = np.clip(image.copy() + corr, 0, 255)\n return image\n\n\ndef occlusion(image, severity=1):\n mask = np.ones(image.shape, dtype=np.uint8)\n\n x = int(image.shape[0] * np.random.rand())\n y = int(image.shape[1] * np.random.rand())\n r = int((min(image.shape[:2]) / 4) * np.random.rand() + (min(image.shape[:2]) / 4))\n\n cv2.circle(mask, (x, y), r, 0, -1)\n\n image = np.clip(image.copy() * mask, 0, 255)\n return image\n"
] | [
[
"numpy.sum",
"numpy.ones",
"scipy.ndimage.zoom",
"numpy.ones_like",
"numpy.random.poisson",
"numpy.meshgrid",
"numpy.reshape",
"numpy.random.rand",
"numpy.where",
"scipy.ndimage.interpolation.map_coordinates",
"numpy.mean",
"numpy.random.uniform",
"numpy.zeros",
"numpy.float32",
"numpy.arange",
"numpy.max",
"numpy.array",
"numpy.zeros_like",
"numpy.roll",
"numpy.empty",
"numpy.clip",
"numpy.rot90",
"numpy.random.normal",
"numpy.random.randint"
]
] |
simondlevy/TinyNEF | [
"2e42754cf22996c86f1e35780d77591ec2bbb658"
] | [
"gym/pendulum_test.py"
] | [
"#!/usr/bin/env python3\n'''\nUse the Neural Engineering framework to solve Pendulum via an elitist GA\n\nCopyright (C) 2020 Simon D. Levy\n\nMIT License\n'''\n\nfrom lib import NefGym\nfrom sys import argv\nimport pickle\nimport numpy as np\n\nfrom sueap.algorithms.elitist import Elitist\n\nclass NefPendulum(NefGym):\n\n def __init__(self, neurons=20, seed=None):\n\n NefGym.__init__(self, 'Pendulum-v0', neurons, seed)\n\n def activate(self, x):\n\n return np.clip(x, -2, +2)\n\nif __name__ == '__main__':\n\n if len(argv) < 2:\n print('Usage: python3 %s FILE' % argv[0])\n exit(0)\n \n problem = NefPendulum()\n net = pickle.load(open(argv[1], 'rb'))\n print('Got reward %.3f in %d steps' % problem.test(net))\n"
] | [
[
"numpy.clip"
]
] |
astrax/astro2019 | [
"c1f5309415c80fbd986d6760bcb8bc095898beda"
] | [
"docs/.src/programs/skyplot_proj/skyplotv1.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport astropy\nfrom scipy.spatial import cKDTree\n\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata=np.genfromtxt('ybs.degbv',names=True)\nmessier=np.genfromtxt('Messierdec.txt',names=True)\n\nvlim=4.5\nmagscale=10\nstarsize=magscale*(vlim-data['v'])\n#norm = ((-data['v'])-( (-data['v'])).min())/(data['v'].max()-data['v'].min())\n#starsize=vlim+norm*starsize\n\nimport astropy\n\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord, EarthLocation, AltAz\n\nstarcoords=SkyCoord(ra=data['ra']*u.degree,dec=data['dec']*u.degree)\nmcoords=SkyCoord(ra=messier['Mra']*15.*u.degree,dec=messier['Mdec']*u.degree)\n\nCT=EarthLocation(lat=-30.159*u.deg,lon=-70.809*u.deg,height=2207.*u.m)\nKP=EarthLocation(lat=31.98*u.deg,lon=-111.60*u.deg,height=2097.*u.m)\nRM=EarthLocation(lat=28.7569*u.deg,lon=-17.8925*u.deg,height=2267.*u.m)\nsitecodes=['CT','KP','RM']\nsitenames=['Cerro Tololo','Kitt Peak', 'La Palma']\n\n\nfor site in range(0,2):\n if site==0:\n obsloc=CT\n if site==1:\n obsloc=KP\n utcoffset=-5.0*u.hour\n showtime = Time('2015-7-21 22:00:00') - utcoffset\n showtime=Time.now()\n print(showtime.iso)\n staraltaz=starcoords.transform_to(AltAz(obstime=showtime,location=obsloc))\n az2plot=np.pi/2.+np.array((3.1415926/180.)*u.degree*staraltaz.az)\n zd2plot=np.array(90.*u.degree-staraltaz.alt)\n #pos4kd=np.array([[az2plot],[zd2plot]])\n upind=(zd2plot < 90.).nonzero()\n plt.clf()\n plt.figure(site+1)\n ax=plt.subplot(111,polar=True)\n ax.grid(False)\n ax.set_xticklabels(['W', '', 'N', '', 'E', '', 'S', ''])\n \n #plt.fill_between([0,90],[0,0],[360,360],facecolor='0')\n plt.scatter(az2plot[upind],zd2plot[upind],s=starsize[upind],c=data['bv'][upind],cmap='rainbow',linewidth=0,vmax=1.2,vmin=-0.5)\n plt.ylim([0.,90.])\n cb=plt.colorbar(pad=0.10)\n cb.set_label('Star color, B-V')\n #plt.tick_params(axis='x',labelbottom='off')\n plt.tick_params(axis='y',labelleft='off')\n ax.set_xticklabels(['W', '', 'N', '', 'E', '', 'S', ''])\n # add parallels of declination every 30 degrees\n for jdec in range(5):\n pardeg=60.-30.*jdec\n parra=np.array(range(361))\n skpar=SkyCoord(ra=parra*u.degree,dec=pardeg*u.degree)\n paraltaz=skpar.transform_to(AltAz(obstime=showtime,location=obsloc))\n paraz2plot=np.pi/2.+np.array((3.14159265/180.)*u.degree*paraltaz.az)\n parzd2plot=np.array(90.*u.degree-paraltaz.alt)\n plt.plot(paraz2plot,parzd2plot,linewidth=1,color='gray',linestyle=':')\n \n # plot Messier objects\n maltaz=mcoords.transform_to(AltAz(obstime=showtime,location=obsloc))\n maz2plot=np.pi/2.+np.array((3.1415926/180.)*u.degree*maltaz.az)\n mzd2plot=np.array(90.*u.degree-maltaz.alt)\n upm=(mzd2plot < 90.).nonzero()\n \n #plt.scatter(maz2plot[upm],mzd2plot[upm],s=100,c=messier['Mclass'][upm],cmap='rainbow',alpha=0.4,linewidth=0)\n plt.title(str(sitenames[site])+' '+showtime.iso+' UT\\n')\n labelcolors=np.array(['blue','blue','green','orange','red'])\n mlabels=np.array(['{0}'.format(i+1) for i in range(110)])\n for j in range(110):\n plt.annotate(mlabels[j],xy=(maz2plot[j],mzd2plot[j]),xytext=(0,0),textcoords='offset points',color=labelcolors[messier['Mclass'][j]],size='small')\n #add Magellanic clouds\n sklmc=SkyCoord(ra=15.0*5.25*u.degree,dec=-68.7*u.degree)\n sksmc=SkyCoord(ra=0.77*15.0*u.degree,dec=-73.0*u.degree)\n lmcaltaz=sklmc.transform_to(AltAz(obstime=showtime,location=obsloc))\n smcaltaz=sksmc.transform_to(AltAz(obstime=showtime,location=obsloc))\n plt.scatter(np.pi/2.+np.array((3.1415926/180.)*u.degree*lmcaltaz.az),90.*u.degree-lmcaltaz.alt,s=250,c='green',alpha=0.3)\n plt.scatter(np.pi/2.+np.array((3.1415926/180.)*u.degree*smcaltaz.az),90.*u.degree-smcaltaz.alt,s=120,c='green',alpha=0.3)\n \n #add constellation lines\n conlines=np.genfromtxt('constellations.txt',names=\"star1, star2\")\n nstar1=np.array(conlines['star1'])\n nstar2=np.array(conlines['star2'])\n nstars=nstar1.size\n starnumbers=np.array(data['starnum'])\n for jstar in range(nstars):\n indexstar1=np.where(starnumbers==nstar1[jstar])[0]\n indexstar2=np.where(data['starnum']==nstar2[jstar])[0]\n plotx=np.array((az2plot[indexstar1],az2plot[indexstar2]))\n ploty=np.array((zd2plot[indexstar1],zd2plot[indexstar2]))\n plt.plot(plotx,ploty,linewidth=1,color='black',zorder=0)\n \n plt.annotate('Messier Objects:',xy=(0.04,0.18),xycoords='figure fraction')\n plt.annotate('Nebula',xy=(0.05,0.145),xycoords='figure fraction',color='blue')\n plt.annotate('Galaxy',xy=(0.05,0.11),xycoords='figure fraction',color='green')\n plt.annotate('Open cluster',xy=(0.05,0.075),xycoords='figure fraction',color='orange')\n plt.annotate('Globular cluster',xy=(0.05,0.04),xycoords='figure fraction',color='red')\n plt.show()\n if site==0:\n plt.savefig('SkyplotCTIO.png')\n if site==1:\n plt.savefig('SkyplotKPNO.png')\n\n\n \n \n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.clf",
"numpy.where",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.array",
"matplotlib.pyplot.colorbar",
"numpy.genfromtxt",
"matplotlib.pyplot.scatter"
]
] |
ClementRolinat/stable-baselines | [
"333c59379f23e1f5c5c9e8bf93cbfa56ac52d13b"
] | [
"stable_baselines/a2c/a2c.py"
] | [
"import time\nfrom collections import deque\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom stable_baselines import logger\nfrom stable_baselines.common import explained_variance, tf_util, ActorCriticRLModel, SetVerbosity, TensorboardWriter\nfrom stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy\nfrom stable_baselines.common.runners import AbstractEnvRunner\nfrom stable_baselines.a2c.utils import discount_with_dones, Scheduler, find_trainable_variables, mse, \\\n total_episode_reward_logger\nfrom stable_baselines.ppo2.ppo2 import safe_mean\n\nclass A2C(ActorCriticRLModel):\n \"\"\"\n The A2C (Advantage Actor Critic) model class, https://arxiv.org/abs/1602.01783\n\n :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) Discount factor\n :param n_steps: (int) The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param vf_coef: (float) Value function coefficient for the loss calculation\n :param ent_coef: (float) Entropy coefficient for the loss caculation\n :param max_grad_norm: (float) The maximum value for the gradient clipping\n :param learning_rate: (float) The learning rate\n :param alpha: (float) RMSProp decay parameter (default: 0.99)\n :param epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)\n (default: 1e-5)\n :param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',\n 'double_linear_con', 'middle_drop' or 'double_middle_drop')\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n (used only for loading)\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n \"\"\"\n\n def __init__(self, policy, env, gamma=0.99, n_steps=5, vf_coef=0.25, ent_coef=0.01, max_grad_norm=0.5,\n learning_rate=7e-4, alpha=0.99, epsilon=1e-5, lr_schedule='constant', verbose=0, tensorboard_log=None,\n _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False):\n\n super(A2C, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,\n _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)\n\n self.n_steps = n_steps\n self.gamma = gamma\n self.vf_coef = vf_coef\n self.ent_coef = ent_coef\n self.max_grad_norm = max_grad_norm\n self.alpha = alpha\n self.epsilon = epsilon\n self.lr_schedule = lr_schedule\n self.learning_rate = learning_rate\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n\n self.graph = None\n self.sess = None\n self.learning_rate_ph = None\n self.n_batch = None\n self.actions_ph = None\n self.advs_ph = None\n self.rewards_ph = None\n self.pg_loss = None\n self.vf_loss = None\n self.entropy = None\n self.params = None\n self.apply_backprop = None\n self.train_model = None\n self.step_model = None\n self.step = None\n self.proba_step = None\n self.value = None\n self.initial_state = None\n self.learning_rate_schedule = None\n self.summary = None\n self.episode_reward = None\n\n # if we are loading, it is possible the environment is not known, however the obs and action space are known\n if _init_setup_model:\n self.setup_model()\n\n def _get_pretrain_placeholders(self):\n policy = self.train_model\n if isinstance(self.action_space, gym.spaces.Discrete):\n return policy.obs_ph, self.actions_ph, policy.policy\n return policy.obs_ph, self.actions_ph, policy.deterministic_action\n\n def setup_model(self):\n with SetVerbosity(self.verbose):\n\n assert issubclass(self.policy, ActorCriticPolicy), \"Error: the input policy for the A2C model must be an \" \\\n \"instance of common.policies.ActorCriticPolicy.\"\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.sess = tf_util.make_session(graph=self.graph)\n\n self.n_batch = self.n_envs * self.n_steps\n\n n_batch_step = None\n n_batch_train = None\n if issubclass(self.policy, RecurrentActorCriticPolicy):\n n_batch_step = self.n_envs\n n_batch_train = self.n_envs * self.n_steps\n\n step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n n_batch_step, reuse=False, **self.policy_kwargs)\n\n with tf.variable_scope(\"train_model\", reuse=True,\n custom_getter=tf_util.outer_scope_getter(\"train_model\")):\n train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,\n self.n_steps, n_batch_train, reuse=True, **self.policy_kwargs)\n\n with tf.variable_scope(\"loss\", reuse=False):\n self.actions_ph = train_model.pdtype.sample_placeholder([None], name=\"action_ph\")\n self.advs_ph = tf.placeholder(tf.float32, [None], name=\"advs_ph\")\n self.rewards_ph = tf.placeholder(tf.float32, [None], name=\"rewards_ph\")\n self.learning_rate_ph = tf.placeholder(tf.float32, [], name=\"learning_rate_ph\")\n\n neglogpac = train_model.proba_distribution.neglogp(self.actions_ph)\n self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())\n self.pg_loss = tf.reduce_mean(self.advs_ph * neglogpac)\n self.vf_loss = mse(tf.squeeze(train_model.value_flat), self.rewards_ph)\n # https://arxiv.org/pdf/1708.04782.pdf#page=9, https://arxiv.org/pdf/1602.01783.pdf#page=4\n # and https://github.com/dennybritz/reinforcement-learning/issues/34\n # suggest to add an entropy component in order to improve exploration.\n loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef\n\n tf.summary.scalar('entropy_loss', self.entropy)\n tf.summary.scalar('policy_gradient_loss', self.pg_loss)\n tf.summary.scalar('value_function_loss', self.vf_loss)\n tf.summary.scalar('loss', loss)\n\n self.params = find_trainable_variables(\"model\")\n grads = tf.gradients(loss, self.params)\n if self.max_grad_norm is not None:\n grads, _ = tf.clip_by_global_norm(grads, self.max_grad_norm)\n grads = list(zip(grads, self.params))\n\n with tf.variable_scope(\"input_info\", reuse=False):\n tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))\n tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))\n tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))\n if self.full_tensorboard_log:\n tf.summary.histogram('discounted_rewards', self.rewards_ph)\n tf.summary.histogram('learning_rate', self.learning_rate)\n tf.summary.histogram('advantage', self.advs_ph)\n if tf_util.is_image(self.observation_space):\n tf.summary.image('observation', train_model.obs_ph)\n else:\n tf.summary.histogram('observation', train_model.obs_ph)\n\n trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.alpha,\n epsilon=self.epsilon)\n self.apply_backprop = trainer.apply_gradients(grads)\n\n self.train_model = train_model\n self.step_model = step_model\n self.step = step_model.step\n self.proba_step = step_model.proba_step\n self.value = step_model.value\n self.initial_state = step_model.initial_state\n tf.global_variables_initializer().run(session=self.sess)\n\n self.summary = tf.summary.merge_all()\n\n def _train_step(self, obs, states, rewards, masks, actions, values, update, writer=None):\n \"\"\"\n applies a training step to the model\n\n :param obs: ([float]) The input observations\n :param states: ([float]) The states (used for recurrent policies)\n :param rewards: ([float]) The rewards from the environment\n :param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)\n :param actions: ([float]) The actions taken\n :param values: ([float]) The logits values\n :param update: (int) the current step iteration\n :param writer: (TensorFlow Summary.writer) the writer for tensorboard\n :return: (float, float, float) policy loss, value loss, policy entropy\n \"\"\"\n advs = rewards - values\n cur_lr = None\n for _ in range(len(obs)):\n cur_lr = self.learning_rate_schedule.value()\n assert cur_lr is not None, \"Error: the observation input array cannon be empty\"\n\n td_map = {self.train_model.obs_ph: obs, self.actions_ph: actions, self.advs_ph: advs,\n self.rewards_ph: rewards, self.learning_rate_ph: cur_lr}\n if states is not None:\n td_map[self.train_model.states_ph] = states\n td_map[self.train_model.dones_ph] = masks\n\n if writer is not None:\n # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)\n if self.full_tensorboard_log and (1 + update) % 10 == 0:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop],\n td_map, options=run_options, run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % (update * (self.n_batch + 1)))\n else:\n summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)\n writer.add_summary(summary, update * (self.n_batch + 1))\n\n else:\n policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)\n\n return policy_loss, value_loss, policy_entropy\n\n def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name=\"A2C\",\n reset_num_timesteps=True):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn(seed)\n\n self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps,\n schedule=self.lr_schedule)\n\n runner = A2CRunner(self.env, self, n_steps=self.n_steps, gamma=self.gamma)\n self.episode_reward = np.zeros((self.n_envs,))\n # Training stats (when using Monitor wrapper)\n ep_info_buf = deque(maxlen=100)\n\n t_start = time.time()\n for update in range(1, total_timesteps // self.n_batch + 1):\n # true_reward is the reward without discount\n obs, states, rewards, masks, actions, values, ep_infos, true_reward = runner.run()\n ep_info_buf.extend(ep_infos)\n _, value_loss, policy_entropy = self._train_step(obs, states, rewards, masks, actions, values,\n self.num_timesteps // (self.n_batch + 1), writer)\n n_seconds = time.time() - t_start\n fps = int((update * self.n_batch) / n_seconds)\n\n if writer is not None:\n self.episode_reward = total_episode_reward_logger(self.episode_reward,\n true_reward.reshape((self.n_envs, self.n_steps)),\n masks.reshape((self.n_envs, self.n_steps)),\n writer, self.num_timesteps)\n\n self.num_timesteps += self.n_batch + 1\n\n if callback is not None:\n # Only stop training if return value is False, not when it is None. This is for backwards\n # compatibility with callbacks that have no return statement.\n if callback(locals(), globals()) is False:\n break\n\n if self.verbose >= 1 and (update % log_interval == 0 or update == 1):\n explained_var = explained_variance(values, rewards)\n logger.record_tabular(\"nupdates\", update)\n logger.record_tabular(\"total_timesteps\", self.num_timesteps)\n logger.record_tabular(\"fps\", fps)\n logger.record_tabular(\"policy_entropy\", float(policy_entropy))\n logger.record_tabular(\"value_loss\", float(value_loss))\n logger.record_tabular(\"explained_variance\", float(explained_var))\n if len(ep_info_buf) > 0 and len(ep_info_buf[0]) > 0:\n logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))\n logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))\n logger.dump_tabular()\n\n return self\n\n def save(self, save_path):\n data = {\n \"gamma\": self.gamma,\n \"n_steps\": self.n_steps,\n \"vf_coef\": self.vf_coef,\n \"ent_coef\": self.ent_coef,\n \"max_grad_norm\": self.max_grad_norm,\n \"learning_rate\": self.learning_rate,\n \"alpha\": self.alpha,\n \"epsilon\": self.epsilon,\n \"lr_schedule\": self.lr_schedule,\n \"verbose\": self.verbose,\n \"policy\": self.policy,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"n_envs\": self.n_envs,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params = self.sess.run(self.params)\n\n self._save_to_file(save_path, data=data, params=params)\n\n\nclass A2CRunner(AbstractEnvRunner):\n def __init__(self, env, model, n_steps=5, gamma=0.99):\n \"\"\"\n A runner to learn the policy of an environment for an a2c model\n\n :param env: (Gym environment) The environment to learn from\n :param model: (Model) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) Discount factor\n \"\"\"\n super(A2CRunner, self).__init__(env=env, model=model, n_steps=n_steps)\n self.gamma = gamma\n\n def run(self):\n \"\"\"\n Run a learning step of the model\n\n :return: ([float], [float], [float], [bool], [float], [float])\n observations, states, rewards, masks, actions, values\n \"\"\"\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], []\n mb_states = self.states\n ep_infos = []\n for _ in range(self.n_steps):\n actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(actions)\n mb_values.append(values)\n mb_dones.append(self.dones)\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.env.action_space, gym.spaces.Box):\n clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)\n obs, rewards, dones, infos = self.env.step(clipped_actions)\n for info in infos:\n maybe_ep_info = info.get('episode')\n if maybe_ep_info is not None:\n ep_infos.append(maybe_ep_info)\n\n self.states = states\n self.dones = dones\n self.obs = obs\n mb_rewards.append(rewards)\n mb_dones.append(self.dones)\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1)\n mb_actions = np.asarray(mb_actions, dtype=self.env.action_space.dtype).swapaxes(0, 1)\n mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(0, 1)\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1)\n mb_masks = mb_dones[:, :-1]\n mb_dones = mb_dones[:, 1:]\n true_rewards = np.copy(mb_rewards)\n last_values = self.model.value(self.obs, self.states, self.dones).tolist()\n # discount/bootstrap off value fn\n for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):\n rewards = rewards.tolist()\n dones = dones.tolist()\n if dones[-1] == 0:\n rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]\n else:\n rewards = discount_with_dones(rewards, dones, self.gamma)\n mb_rewards[n] = rewards\n\n # convert from [n_env, n_steps, ...] to [n_steps * n_env, ...]\n mb_rewards = mb_rewards.reshape(-1, *mb_rewards.shape[2:])\n mb_actions = mb_actions.reshape(-1, *mb_actions.shape[2:])\n mb_values = mb_values.reshape(-1, *mb_values.shape[2:])\n mb_masks = mb_masks.reshape(-1, *mb_masks.shape[2:])\n true_rewards = true_rewards.reshape(-1, *true_rewards.shape[2:])\n return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, ep_infos, true_rewards\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.summary.image",
"numpy.asarray",
"numpy.copy",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.summary.histogram",
"tensorflow.global_variables_initializer",
"tensorflow.clip_by_global_norm",
"tensorflow.Graph",
"numpy.zeros",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.gradients",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.reduce_mean",
"tensorflow.RunMetadata",
"numpy.clip"
]
] |
lsst-sqre/qa-dashboard | [
"57d40a33f1d6fdc04fb8f5e6e7e4fcfaee25340c"
] | [
"squash/dashboard/viz/api_helper.py"
] | [
"import os\nimport pandas as pd\nimport requests\nfrom datetime import datetime\nfrom furl import furl\n\nSQUASH_API_URL = os.environ.get('SQUASH_API_URL',\n 'http://localhost:8000/dashboard/api/')\n\n\ndef get_endpoint_urls():\n \"\"\"\n Lookup API endpoint URLs\n \"\"\"\n\n r = requests.get(SQUASH_API_URL)\n r.raise_for_status()\n\n return r.json()\n\n\ndef get_data(endpoint, params=None):\n \"\"\"Return data as a dict from\n an API endpoint \"\"\"\n\n api = get_endpoint_urls()\n\n # e.g. http://localhost:8000/AMx?ci_id=1&ci_dataset=cfht&metric=AM1\n r = requests.get(api[endpoint],\n params=params)\n r.raise_for_status()\n\n return r.json()\n\n\ndef get_data_as_pandas_df(endpoint, params=None):\n \"\"\"\n Return data as a pandas dataframe from\n an API endpoint\n \"\"\"\n\n result = get_data(endpoint, params)\n\n data = pd.DataFrame.from_dict(result, orient='index').transpose()\n\n return data\n\n\ndef get_datasets(default=None):\n \"\"\"Get a list of datasets from the API\n and a default value\n Returns\n -------\n datasets : list\n list of dataset names\n default : str\n if a valid default value is provided, overwrite\n the default value obtained from the API\n \"\"\"\n\n datasets = get_data('datasets')\n default_dataset = get_data('defaults')['ci_dataset']\n\n if default:\n if default in datasets:\n default_dataset = default\n\n return {'datasets': datasets, 'default': default_dataset}\n\n\ndef get_metrics(default=None):\n \"\"\"Get the list of metrics from the API\n and a default value\n Returns\n -------\n metrics : list\n list of metric names\n default : str\n if a valid default value is provided, overwrite\n the default value returned from the API\n \"\"\"\n\n r = get_data('metrics')\n metrics = [m['metric'] for m in r['results']]\n\n default_metric = get_data('defaults')['metric']\n\n if default:\n if default in metrics:\n default_metric = default\n\n return {'metrics': metrics, 'default': default_metric}\n\n\ndef get_value(specs, name):\n \"\"\" Helper function to unpack metric specification\n values\n Parameters\n ----------\n specs: dict\n a dict with keys value and name\n name: str\n the spec name\n Return\n ------\n value: float or None\n value of the spec if exists, None otherwise\n \"\"\"\n\n value = None\n\n for s in specs:\n if s['name'] == name:\n value = s['value']\n break\n\n return value\n\n\ndef get_specs(name):\n \"\"\"Get metric specifications thresholds\n from its name\n Parameters\n ----------\n name: str\n a valid metric name\n Returns\n -------\n unit: str\n metric unit\n description:\n metric description\n minimum: float\n metric minimum specification\n design: float\n metric design specification\n stretch: float\n metric stretch goal\n \"\"\"\n\n r = get_data('metrics')\n\n unit = str()\n description = str()\n specs = []\n\n minimum = None\n design = None\n stretch = None\n\n for m in r['results']:\n if m['metric'] == name:\n unit = m['unit']\n description = m['description']\n specs = eval(str(m['specs']))\n break\n\n if specs:\n minimum = get_value(specs, 'minimum')\n design = get_value(specs, 'design')\n stretch = get_value(specs, 'stretch')\n\n return {'unit': unit, 'description': description,\n 'minimum': minimum, 'design': design, 'stretch': stretch}\n\n\ndef get_url_args(doc, defaults=None):\n \"\"\"Return url args recovered from django_full_path cookie in\n the bokeh request header.\n\n If defaults values are provided, overwrite the default values\n obtained from the API\n \"\"\"\n\n args = get_data('defaults')\n\n # overwrite api default values\n if defaults:\n for key in defaults:\n args[key] = defaults[key]\n\n r = doc().session_context.request\n if r:\n if 'django_full_path' in r.cookies:\n django_full_path = r.cookies['django_full_path'].value\n tmp = furl(django_full_path).args\n for key in tmp:\n # overwrite default values with those passed\n # as url args, make sure the url arg (key) is valid\n if key in args:\n args[key] = tmp[key]\n\n # the bokeh app name is the second segment of the url path\n args['bokeh_app'] = furl(django_full_path).path.segments[1]\n\n return args\n\n\n# TODO: these functions are used by the monitor app and need refactoring\ndef get_initial_page(page_size, num_pages, window):\n\n # Page size in hours assuming CI_TIME_INTERVAL\n\n CI_TIME_INTERVAL = 8\n\n page_window = page_size * CI_TIME_INTERVAL\n\n if window == 'weeks':\n initial_page = num_pages - int((24*7)/page_window)\n elif window == 'months':\n # maximum window of 3 months\n initial_page = num_pages - int((24*30*3)/page_window)\n elif window == 'years':\n # maximum window of 1 year\n initial_page = num_pages - int((24*365)/page_window)\n else:\n # everything\n initial_page = 1\n\n # Make sure we have enough pages for the input time window\n if initial_page < 1:\n initial_page = 1\n\n return initial_page\n\n\ndef get_meas_by_dataset_and_metric(selected_dataset, selected_metric, window):\n \"\"\" Get measurements for a given dataset and metric from the measurements\n api endpoint\n\n Parameters\n ----------\n selected_dataset : str\n the current selected dataset\n selected_metric : str\n the current selected metric\n\n Returns\n -------\n ci_id : list\n list of job ids from the CI system\n dates : list\n list of datetimes for each job measurement\n measurements : list\n flat list of dicts where the key is the metric and the value\n is its measurement\n ci_url : list\n list of URLs for the jobs in the CI system\n \"\"\"\n api = get_endpoint_urls()\n\n # http://localhost:8000/dashboard/api/measurements/?job__ci_dataset=cfht&metric=AM1\n\n r = requests.get(api['measurements'],\n params={'job__ci_dataset': selected_dataset,\n 'metric': selected_metric})\n r.raise_for_status()\n\n results = r.json()\n\n # results are paginated, walk through each page\n\n # TODO: figure out how to retrieve the number of pages in DRF\n count = results['count']\n page_size = len(results['results'])\n\n measurements = []\n if page_size > 0:\n # ceiling integer\n num_pages = int(count/page_size) + (count % page_size > 0)\n\n initial_page = get_initial_page(page_size, num_pages, window)\n\n for page in range(initial_page, num_pages + 1):\n r = requests.get(\n api['measurements'],\n params={'job__ci_dataset': selected_dataset,\n 'metric': selected_metric,\n 'page': page})\n r.raise_for_status()\n measurements.extend(r.json()['results'])\n\n ci_ids = [int(m['ci_id']) for m in measurements]\n\n # 2016-08-10T05:22:37.700146Z\n # after DM-7517 jobs return is sorted by date and the same is done for\n # the measurements\n dates = [datetime.strptime(m['date'], '%Y-%m-%dT%H:%M:%S.%fZ')\n for m in measurements]\n\n values = [m['value'] for m in measurements]\n\n ci_urls = [m['ci_url'] for m in measurements]\n\n packages = [m['changed_packages'] for m in measurements]\n\n # list of package names, name is the first element in the tuple\n names = []\n for i, sublist in enumerate(packages):\n names.append([])\n for package in sublist:\n names[i].append(package[0])\n\n # list of git urls, git package commit sha and base url are the second and\n # third elements in the tuple\n git_urls = []\n for i, sublist in enumerate(packages):\n git_urls.append([])\n for package in sublist:\n git_urls[i].append(\"{}/commit/{}\".format(package[2].strip('.git'),\n package[1]))\n\n return {'ci_ids': ci_ids, 'dates': dates, 'values': values,\n 'ci_urls': ci_urls, 'names': names, 'git_urls': git_urls}\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] |
srubenacker/DeepDog | [
"ce6613e01c04a14f62a2d6f6cd1c60f97efa790a"
] | [
"src/ddog.py"
] | [
"import util\nimport json\nimport numpy as np\nimport random\nimport tensorflow as tf\n\nclass DeepDog:\n \"\"\"\n The DeepDog class loads the training and test set images from\n disk into RAM, and provides functions to get the test set\n and mini batches of the training set. \n \"\"\"\n\n def __init__(self, imageWidth, imageHeight, trainingInRAM=False, classStratify=False,\n randomMirroring=False, randomCropping=None, normalizeImage=False):\n \"\"\"\n The constructor loads the one hot encodings and the entire test set into RAM.\n The training examples are stored on disk, and read into memory when needed\n for each batch. \n\n input:\n imageWidth: int, width of each image\n\n imageHeight: int, height of each image\n\n trainingInRAM: bool, whether or not to load the entire training set\n into RAM on initialization. This would be beneficial for smaller\n image sizes and decreases the time to fetch each batch.\n\n classStratify: bool, whether or not each batch should be equally \n represented by each breed class i.e. in a batch size of 120,\n each breed would show up once in the batch\n (not implemented yet)\n\n randomMirroring: bool, whether or not to randomly mirror individual \n training images returned by getNextMiniBatch()\n\n randomCropping: tuple, (cropWidth, cropHeight), cropWidth and cropHeight\n are the dimensions of the cropped image returned by\n getNextMiniBatch()\n\n normalizeImage: bool, whether or not to scale the images returned\n by getNextMiniBatch() and getTestImagesAndLabesl() to \n have 0 mean and unit standard deviation\n \"\"\"\n self.MIRROR_PROBABILITY = 0.5\n self.randomMirroring = randomMirroring\n self.randomCropping = randomCropping\n if self.randomCropping is not None:\n self.cropWidth = self.randomCropping[0]\n self.cropHeight = self.randomCropping[1]\n self.normalizeImage = normalizeImage\n\n self.image_width = imageWidth\n self.image_height = imageHeight\n self.training_in_RAM = trainingInRAM\n\n # load the one hot encodings from file\n self.one_hot_encodings = {}\n self.loadOneHotEncodings()\n self.numberBreeds = float(len(self.one_hot_encodings.keys()))\n\n # load the test set from file\n self.test_set_images, self.test_set_labels = [], []\n self.loadTestSet()\n\n # load the training annotations from file and randomize the \n # order of the training examples\n # self.training_examples is a list of 2-tuples\n # (breed, index in breed list of training_annotations)\n # self.training_set_images is a dictionary which is created\n # if trainingInRAM is set to True on construction\n # it is of the form {breed: [list of images in rgb form]}\n self.training_annotations = {}\n self.training_set_images = {}\n self.training_examples = []\n self.training_set_size = 0\n self.loadTrainingSet()\n\n # keep track of our place in the training examples list\n # so we can get the next mini batch\n self.current_index = 0\n\n\n ####################################################\n ################ Private Methods ###################\n ####################################################\n\n\n def loadOneHotEncodings(self):\n \"\"\"\n loadOneHotEncodings reads the one hot encodings for each\n breed and saves them to a member dictionary.\n\n input: none\n\n output: (doesn't return, saves to member variable)\n self.one_hot_encodings: dictionary, {'breed': [1, 0, 0]}\n \"\"\"\n with open('one_hot_encodings.json', 'r') as data_file:\n self.one_hot_encodings = json.load(data_file)\n\n\n def loadTrainingSet(self):\n \"\"\"\n loadTrainingSet reads the training_annotations.json\n into a member dictionary, and initializes the random\n order of the training_examples member list.\n\n input: none\n\n output: (doesn't return, saves to member variables)\n self.training_annotations: dictionary, {'breed': [list of annotations]}\n\n self.training_examples: list of 2-tuples\n [(breed, index into list of self.training_annotations), ...]\n \"\"\"\n print(\"Initializing training set order...\\n\")\n\n # load the training_annotations\n with open('training_annotations.json', 'r') as data_file:\n self.training_annotations = json.load(data_file)\n\n # create the list of 2-tuples of training examples (breed, index)\n for j, breed in enumerate(self.training_annotations.keys()):\n if self.training_in_RAM:\n print(str(round(j / self.numberBreeds * 100, 2)) + \"%: Loading training images for \" + breed)\n for i, annotation in enumerate(self.training_annotations[breed]):\n self.training_examples.append((breed, i))\n # if training_in_RAM is True, load the image from disk\n if self.training_in_RAM:\n currentImage = util.getResizedImageData(annotation, self.image_width, self.image_height)\n if breed not in self.training_set_images:\n self.training_set_images[breed] = [currentImage]\n else:\n self.training_set_images[breed].append(currentImage)\n\n self.training_set_size = len(self.training_examples)\n\n # randomize the order of the training examples\n random.shuffle(self.training_examples)\n\n print(\"Finished initializing training set order...\\n\")\n\n\n def loadTestSet(self):\n \"\"\"\n loadTestSet reads the test set images and labels from file\n and saves them into two lists in RAM. \n\n input: none\n\n output: (saves to member lists, doesn't return)\n testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]\n\n testLabels: numpy array [testSetSize x [numImageClasses]] \n \"\"\"\n print(\"Loading test set...\\n\")\n\n testing_breeds = {}\n with open('testing_annotations.json', 'r') as data_file:\n testing_breeds = json.load(data_file)\n\n for i, breed in enumerate(testing_breeds.keys()):\n print(str(round(i / self.numberBreeds * 100, 2)) + \"%: Loading test images for \" + breed)\n \n for annotation in testing_breeds[breed]:\n # append the image data to testImages\n if self.randomCropping is None:\n self.test_set_images.append(util.getResizedImageData(annotation, \n self.image_width, self.image_height))\n else:\n self.test_set_images.append(util.getResizedImageData(annotation, \n self.cropWidth, self.cropHeight))\n\n # append the image label's one hot encoding to testLabels\n self.test_set_labels.append(self.one_hot_encodings[annotation['breed']])\n\n # convert python lists to numpy arrays\n self.test_set_images = np.array(self.test_set_images)\n if self.normalizeImage:\n print(\"Normalizing test images...\")\n self.test_set_images = tf.map_fn(tf.image.per_image_standardization, self.test_set_images)\n self.test_set_labels = np.array(self.test_set_labels)\n\n print(\"Finished loading test set.....\\n\")\n\n\n ####################################################\n ################ Public Interface ##################\n ####################################################\n\n\n def getNextMiniBatch(self, batchSize):\n \"\"\"\n getNextMiniBatch returns a 2-tuple of (batchImages, batchLabels).\n batchImages and batchLabels are both arrays, where the image\n at index i in batchImages corresponds to the label at index \n i in batchLabels. The batch images and labels are from\n the training set.\n\n input: \n batchSize: int, number of images and labels to include\n in the mini batch returned by getNextMiniBatch\n\n output:\n batchImages: numpy array [batchSize x [imageWidth x imageHeight x 3]]\n\n batchLabels: numpy array [batchSize x [numImageClasses]]\n \"\"\"\n batchImages = []\n batchLabels = []\n\n # if we have reached the end of the training examples, \n # reshuffle the training examples and start from the \n # beginning of the list\n # in the event that the number of training examples\n # is not evenly divisable by the batchSize,\n # some training examples will be skipped during this reshuffling\n # i trade this off for decreased code complexity\n if self.current_index + batchSize > self.training_set_size:\n self.current_index = 0\n random.shuffle(self.training_examples)\n\n # for each training example annotation, load the resized image and\n # get the one hot encoding of the label\n for breed, index in self.training_examples[self.current_index:self.current_index+batchSize]:\n # placeholder image variable\n imageToAppend = None\n\n # if the training data is already in RAM, read it from self.training_set_images\n # otherwise, fetch the image from disk\n if self.training_in_RAM:\n imageToAppend = self.training_set_images[breed][index]\n else:\n annotation = self.training_annotations[breed][index]\n\n # get the image data for the training example\n imageToAppend = util.getResizedImageData(annotation, \n self.image_width, self.image_height)\n\n # mirror the image if the random number is less than the probability\n if self.randomMirroring and random.random() < self.MIRROR_PROBABILITY:\n imageToAppend = np.fliplr(imageToAppend)\n\n # randomly crop the image\n if self.randomCropping is not None:\n widthDiff = self.image_width - self.cropWidth\n heightDiff = self.image_height - self.cropHeight\n\n widthOffset = int(random.random() * widthDiff)\n heightOffset = int(random.random() * heightDiff)\n\n imageToAppend = imageToAppend[widthOffset:widthOffset+self.cropWidth, \n heightOffset:heightOffset+self.cropHeight, \n :]\n\n # # normalize the image to 0 mean and unit standard deviation\n # if self.normalizeImage:\n # imageToAppend = tf.image.per_image_standardization(imageToAppend)\n\n # finally append the image\n batchImages.append(imageToAppend)\n # get the one hot encoding of the label\n batchLabels.append(self.one_hot_encodings[breed])\n\n self.current_index += batchSize\n\n if self.normalizeImage:\n batchImages = tf.map_fn(tf.image.per_image_standardization, batchImages)\n return batchImages, np.array(batchLabels)\n return np.array(batchImages), np.array(batchLabels)\n\n\n def getTestImagesAndLabels(self):\n \"\"\"\n getTestImagesAndLabels returns a 2-tuple of (testImages, testLabels).\n testImages and testLabels are both numpy arrays, where the image \n at index i in testImages corresponds to the label at index i in \n testLabels. \n\n input: None\n\n output:\n testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]\n\n testLabels: numpy array [testSetSize x [numImageClasses]] \n \"\"\"\n return self.test_set_images, self.test_set_labels\n\n\n def getTrainingSetSize(self):\n \"\"\"\n getTraininSetSize returns the size of the training set. This\n function is useful when computing the progress inside an epoch.\n\n input: none\n\n output:\n trainingSetSize: int, number of examples in the training set\n \"\"\"\n return self.training_set_size\n\n\ndef main():\n dd = DeepDog(64, 64)\n im, la = dd.getNextMiniBatch(100)\n print(im.shape, la.shape)\n print(im)\n print(la)\n\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.array",
"numpy.fliplr",
"tensorflow.map_fn"
]
] |
decisionforce/HACO | [
"ebd1dc49598e6ae2704e58c053cc35f2d9e28429"
] | [
"haco/DIDrive_core/demo/cilrs/cilrs_collect_data.py"
] | [
"import os\nfrom functools import partial\n\nimport PIL\nimport lmdb\nimport numpy as np\nfrom ding.envs import SyncSubprocessEnvManager\nfrom ding.utils.default_helper import deep_merge_dicts\nfrom easydict import EasyDict\nfrom tqdm import tqdm\n\nfrom haco.DIDrive_core.data import CarlaBenchmarkCollector, BenchmarkDatasetSaver\nfrom haco.DIDrive_core.envs import SimpleCarlaEnv, CarlaEnvWrapper\nfrom haco.DIDrive_core.policy import AutoPIDPolicy\nfrom haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp\n\nconfig = dict(\n env=dict(\n env_num=5,\n simulator=dict(\n disable_two_wheels=True,\n planner=dict(\n type='behavior',\n resolution=1,\n ),\n obs=(\n dict(\n name='rgb',\n type='rgb',\n size=[400, 300],\n position=[1.3, 0.0, 2.3],\n fov=100,\n ),\n ),\n verbose=True,\n ),\n col_is_failure=True,\n stuck_is_failure=True,\n ran_light_is_failure=True,\n manager=dict(\n auto_reset=False,\n shared_memory=False,\n context='spawn',\n max_retry=1,\n ),\n wrapper=dict(\n speed_factor=25.,\n scale=1,\n crop=256,\n ),\n ),\n server=[\n dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),\n ],\n policy=dict(\n target_speed=25,\n tl_threshold=13,\n noise=True,\n noise_kwargs=dict(),\n collect=dict(\n n_episode=100,\n dir_path='./datasets_train/cilrs_datasets_train',\n preloads_name='cilrs_datasets_train.npy',\n collector=dict(\n suite='FullTown01-v1',\n nocrash=True,\n ),\n )\n ),\n)\n\nmain_config = EasyDict(config)\n\n\ndef cilrs_postprocess(observasion, scale=1, crop=256):\n rgb = observasion['rgb'].copy()\n im = PIL.Image.fromarray(rgb)\n (width, height) = (int(im.width // scale), int(im.height // scale))\n rgb = im.resize((width, height))\n rgb = np.asarray(rgb)\n start_x = height // 2 - crop // 2\n start_y = width // 2 - crop // 2\n rgb = rgb[start_x:start_x + crop, start_y:start_y + crop]\n sensor_data = {'rgb': rgb}\n others = {}\n return sensor_data, others\n\n\ndef wrapped_env(env_cfg, wrapper_cfg, host, port, tm_port=None):\n return CarlaEnvWrapper(SimpleCarlaEnv(env_cfg, host, port, tm_port), wrapper_cfg)\n\n\ndef post_process(config):\n epi_folder = [x for x in os.listdir(config.policy.collect.dir_path) if x.startswith('epi')]\n\n all_img_list = []\n all_mea_list = []\n\n for item in tqdm(epi_folder):\n lmdb_file = lmdb.open(os.path.join(config.policy.collect.dir_path, item, 'measurements.lmdb')).begin(write=False)\n png_files = [\n x for x in os.listdir(os.path.join(config.policy.collect.dir_path, item)) if (x.endswith('png') and x.startswith('rgb'))\n ]\n png_files.sort()\n for png_file in png_files:\n index = png_file.split('_')[1].split('.')[0]\n measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)\n data = {}\n data['control'] = np.array([measurements[15], measurements[16], measurements[17]]).astype(np.float32)\n data['speed'] = measurements[10] / config.env.wrapper.speed_factor\n data['command'] = float(measurements[11])\n new_dict = {}\n new_dict['brake'] = data['control'][2]\n new_dict['steer'] = (data['control'][0] + 1) / 2\n new_dict['throttle'] = data['control'][1]\n new_dict['speed'] = data['speed']\n new_dict['command'] = data['command']\n all_img_list.append(os.path.join(item, png_file))\n all_mea_list.append(new_dict)\n if not os.path.exists('_preloads'):\n os.mkdir('_preloads')\n np.save('_preloads/{}'.format(config.policy.collect.preloads_name), [all_img_list, all_mea_list])\n\n\ndef main(cfg, seed=0):\n cfg.env.manager = deep_merge_dicts(SyncSubprocessEnvManager.default_config(), cfg.env.manager)\n\n tcp_list = parse_carla_tcp(cfg.server)\n env_num = cfg.env.env_num\n assert len(tcp_list) >= env_num, \\\n \"Carla server not enough! Need {} servers but only found {}.\".format(env_num, len(tcp_list))\n\n collector_env = SyncSubprocessEnvManager(\n env_fn=[partial(wrapped_env, cfg.env, cfg.env.wrapper, *tcp_list[i]) for i in range(env_num)],\n cfg=cfg.env.manager,\n )\n\n policy = AutoPIDPolicy(cfg.policy)\n\n collector = CarlaBenchmarkCollector(cfg.policy.collect.collector, collector_env, policy.collect_mode)\n\n if not os.path.exists(cfg.policy.collect.dir_path):\n os.makedirs(cfg.policy.collect.dir_path)\n\n collected_episodes = 0\n data_postprocess = lambda x: cilrs_postprocess(x, scale=cfg.env.wrapper.scale, crop=cfg.env.wrapper.crop)\n saver = BenchmarkDatasetSaver(cfg.policy.collect.dir_path, cfg.env.simulator.obs, data_postprocess)\n print('[MAIN] Start collecting data')\n saver.make_dataset_path(cfg.policy.collect)\n while collected_episodes < cfg.policy.collect.n_episode:\n # Sampling data from environments\n n_episode = min(cfg.policy.collect.n_episode - collected_episodes, env_num * 2)\n new_data = collector.collect(n_episode=n_episode)\n saver.save_episodes_data(new_data, start_episode=collected_episodes)\n collected_episodes += n_episode\n print('[MAIN] Current collected: ', collected_episodes, '/', cfg.policy.collect.n_episode)\n\n collector_env.close()\n saver.make_index()\n print('[MAIN] Making preloads')\n post_process(cfg)\n\n\nif __name__ == '__main__':\n main(main_config)\n"
] | [
[
"numpy.array",
"numpy.asarray"
]
] |
yuguiyang/python_demo | [
"1be2406bfc920e22a0f92bf10d9a3665984067ba"
] | [
"old_code/pandas_order.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 28 13:42:30 2017\n\n@author: hexo\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n#读取第一个sheet页\ndf = pd.read_excel('D:\\Tableau_data\\示例 - 超市.xls',sheetname=0)\n\nprint(type(df))\n\n#每一列的数据类型\nprint(df.dtypes)\n#每种类型的数量\nprint(df.get_dtype_counts())\n\n#还不知道这个ftype到底是干嘛的,sparse|dense,稀疏|密集,表示什么呢?\nprint(df.ftypes)\nprint(df.get_ftype_counts())\n\ntop_10_data=df.head(10)\n\n#print(top_10_data)\n\nprint('----------------------------')\n#axis=0表示纵轴,axis=1表示横轴\n#这是每一列,每一列的均值\nprint(top_10_data.mean(axis=0))\nprint('----------------------------')\n#这是每一行,每一行的均值\nprint(top_10_data.mean(axis=1))\n\nprint('----------------------------')\n#sort_index\n#坑啊,这个axis到底是个什么鬼(ok)\n#但是这个level是干嘛的依然没有搞懂\n#按第1列降序排列\n#print(top_10_data.sort_index(axis=0,level=0,ascending=True))\n#print(top_10_data.sort_index(axis=0,level=1,ascending=True))\n\nprint(top_10_data)\nprint('----------------------------')\n#终于成功按照订单日期降序排列了!!!\n#这里按多了排序的话,貌似只可以执行一个排序方式,都是降序\nprint(top_10_data.sort_values(by=['订单日期','行 ID'] , ascending=False).head(2))\n\n\n\n\n\n"
] | [
[
"pandas.read_excel"
]
] |
tuanho27/torchstat | [
"46d653795a1262f0e58a2069276a69d6bd43078c"
] | [
"torchstat/compute_flops.py"
] | [
"import torch.nn as nn\nimport torch\nimport numpy as np\n\n\ndef compute_flops(module, inp, out):\n if isinstance(module, nn.Conv2d):\n return compute_Conv2d_flops(module, inp, out)\n elif isinstance(module, nn.BatchNorm2d):\n return compute_BatchNorm2d_flops(module, inp, out)\n elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):\n return compute_Pool2d_flops(module, inp, out)\n elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):\n return compute_ReLU_flops(module, inp, out)\n elif isinstance(module, nn.Upsample):\n return compute_Upsample_flops(module, inp, out)\n elif isinstance(module, nn.Linear):\n return compute_Linear_flops(module, inp, out)\n # elif \"loss\" in module:\n # pass\n else:\n print(f\"[Flops]: {type(module).__name__} is not supported!\")\n return 0\n pass\n\n\ndef compute_Conv2d_flops(module, inp, out):\n # Can have multiple inputs, getting the first one\n assert isinstance(module, nn.Conv2d)\n assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())\n\n batch_size = inp.size()[0]\n in_c = inp.size()[1]\n k_h, k_w = module.kernel_size\n out_c, out_h, out_w = out.size()[1:]\n groups = module.groups\n\n filters_per_channel = out_c // groups\n conv_per_position_flops = k_h * k_w * in_c * filters_per_channel\n active_elements_count = batch_size * out_h * out_w\n\n total_conv_flops = conv_per_position_flops * active_elements_count\n\n bias_flops = 0\n if module.bias is not None:\n bias_flops = out_c * active_elements_count\n\n total_flops = total_conv_flops + bias_flops\n return total_flops\n\n\ndef compute_BatchNorm2d_flops(module, inp, out):\n assert isinstance(module, nn.BatchNorm2d)\n assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())\n in_c, in_h, in_w = inp.size()[1:]\n batch_flops = np.prod(inp.shape)\n if module.affine:\n batch_flops *= 2\n return batch_flops\n\n\ndef compute_ReLU_flops(module, inp, out):\n assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU))\n batch_size = inp.size()[0]\n active_elements_count = batch_size\n\n for s in inp.size()[1:]:\n active_elements_count *= s\n\n return active_elements_count\n\n\ndef compute_Pool2d_flops(module, inp, out):\n assert isinstance(module, nn.MaxPool2d) or isinstance(module, nn.AvgPool2d)\n assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())\n return np.prod(inp.shape)\n\n\ndef compute_Linear_flops(module, inp, out):\n assert isinstance(module, nn.Linear)\n assert len(inp.size()) == 2 and len(out.size()) == 2\n batch_size = inp.size()[0]\n return batch_size * inp.size()[1] * out.size()[1]\n\ndef compute_Upsample_flops(module, inp, out):\n assert isinstance(module, nn.Upsample)\n output_size = out[0]\n batch_size = inp.size()[0]\n output_elements_count = batch_size\n for s in output_size.shape[1:]:\n output_elements_count *= s\n\n return output_elements_count\n"
] | [
[
"numpy.prod"
]
] |
pshustov/DateTimeTools | [
"e542fd3f0e3c5290faad09b7cf8a2751132d4dd3"
] | [
"build/lib/DateTimeTools/WithinTimeRange.py"
] | [
"import numpy as np\nfrom ._CFunctions import _CWithinTimeRange\nfrom ._CTConv import _CTConv\n\ndef WithinTimeRange(Timet,Time0,Time1,BoolOut=False):\n\t'''\n\tPerforms a simple check on a test time (Timet) to see if it exists\n\tbetween Time0 and time1.\n\t\n\tInputs\n\t======\n\tTimet : tuple | float \n\t\tTest time - either a single floating point (array or \n\t\tscalar) to denote hours of the day, or a tuple containing \n\t\t(Date,Time).\n\tTime0 :\ttuple | float\n\t\tStart time, same format as above.\n\tTime1 : tuple | float\n\t\tEnd time, same format as above.\n\tBoolOut : boolean\n\t\tTrue by default, returns a boolean array with the same size as \n\t\tTimet, where eath element in the range Time0 to Time1 is true.\n\t\tWhen False, returns a list of indices within the time range.\n\t\t\n\tOutput\n\t======\n\tout : bool | int\n\t\tIf BoolOut == True boolean (array or scalar), True if within \n\t\ttime range.\n\t\tWhen BoolOut == False, an integer array of indices is returned.\n\t'''\n\tsh = np.shape(Timet)\n\ts0 = np.size(Time0)\n\ts1 = np.size(Time1)\n\t\n\tif s0 == 2:\n\t\tD0 = Time0[0]\n\t\tT0 = Time0[1]\n\telse:\n\t\tT0 = Time0\n\t\tD0 = 20000101\n\t\t\n\tif s1 == 2:\n\t\tD1 = Time1[0]\n\t\tT1 = Time1[1]\n\telse:\n\t\tT1 = Time1\n\t\tD1 = 20000101\t\n\t\n\t\n\tif sh[0] == 2 and np.size(sh) == 2:\n\t\t#hopefully this is a list of date and time\n\t\tD = np.array([Timet[0]]).flatten()\n\t\tT = np.array([Timet[1]]).flatten()\n\telse: \n\t\tT = np.array(Timet)\n\t\tD = np.zeros(T.size,dtype='int32') + 20000101\n\t\t\n\t#convert the dtypes for compatibility with the C++ code\n\t_n = _CTConv(np.size(D),'c_int')\n\t_Date = _CTConv(D,'c_int_ptr')\n\t_ut = _CTConv(T,'c_float_ptr')\n\t_Date0 = _CTConv(D0,'c_int')\n\t_ut0 = _CTConv(T0,'c_float')\n\t_Date1 = _CTConv(D1,'c_int')\n\t_ut1 = _CTConv(T1,'c_float')\n\t_ni = np.zeros(1,dtype='int32')\n\t_ind = np.zeros(_n,dtype='int32')\n\t\t\n\t\t\n\t#call the C++ code\n\t_CWithinTimeRange(_n,_Date,_ut,_Date0,_ut0,_Date1,_ut1,_ni,_ind)\n\t\n\t#reduce the side of the index array\n\t_ind = _ind[:_ni[0]]\n\n\t#either return the indices or the boolean array\n\tif BoolOut:\n\t\tout = np.zeros(_n,dtype='bool8')\n\t\tout[_ind] = True\n\t\treturn out\n\telse:\n\t\treturn _ind\n"
] | [
[
"numpy.array",
"numpy.shape",
"numpy.zeros",
"numpy.size"
]
] |
liute62/NumCpp | [
"d6922b2b5e1f575021b0577aea1445e041ec7180"
] | [
"unitTests/testScripts/TestIntegrate.py"
] | [
"import numpy as np\nimport scipy.special as sp\nfrom termcolor import colored\nimport sys\nif sys.platform == 'linux':\n sys.path.append(r'../lib')\nelse:\n sys.path.append(r'../build/x64/Release')\nimport NumCpp\n\n\n####################################################################################\nNUM_DECIMALS_ROUND = 1\n\n\n####################################################################################\ndef doTest():\n print(colored('Testing Integration Module', 'magenta'))\n\n print(colored('Testing gauss_legendre', 'cyan'))\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_gauss_legendre(polyC, a, b), NUM_DECIMALS_ROUND)\n if area == areaC:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n print(colored('Testing romberg', 'cyan'))\n PERCENT_LEEWAY = 0.1\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_romberg(polyC, a, b), NUM_DECIMALS_ROUND)\n # romberg is much less acurate so let's give it some leeway\n areaLow, areaHigh = np.sort([area * (1 - PERCENT_LEEWAY), area * (1 + PERCENT_LEEWAY)])\n if areaLow < areaC < areaHigh:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n print(colored('Testing simpson', 'cyan'))\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_simpson(polyC, a, b), NUM_DECIMALS_ROUND)\n if area == areaC:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n print(colored('Testing trapazoidal', 'cyan'))\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_trapazoidal(polyC, a, b), NUM_DECIMALS_ROUND)\n if area == areaC:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n\n####################################################################################\nif __name__ == '__main__':\n doTest()\n"
] | [
[
"numpy.sort",
"numpy.random.rand",
"numpy.random.randint",
"numpy.flipud"
]
] |
vsegurar/DeepMSPeptide | [
"ab73f125b2297a7be01da3fa19a1c0b35c29d493"
] | [
"DeepMSPeptide/DeepMSPeptide.py"
] | [
"import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\nparser = argparse.ArgumentParser(description='''Predicts the detectability of input peptides using a single dimension\n Convolutionar Neural Network, based on Tensorflow 1.13.1\n Requierements: Tensorflow 1.13.1''')\nparser.add_argument('infile', metavar='F', type=str, nargs='+',\n help='File containing the peptides to be predicted, one per line (max length= 81)')\nargs = parser.parse_args()\n\n\ndef load_pep_and_codify(file, max_len):\n aa_dict={'A':1,'R':2,'N':3,'D':4,'C':5,'Q':6,'E':7,'G':8,'H':9,'I':10,'L':11,'K':12,'M':13,'F':14,\n 'P':15,'O':16,'S':17,'U':18,'T':19,'W':20,'Y':21,'V':22}\n with open(file, 'r') as inf:\n lines = inf.read().splitlines()\n pep_codes=[]\n long_pep_counter = 0\n newLines = []\n for pep in lines:\n if not len(pep) > max_len:\n current_pep=[]\n for aa in pep:\n current_pep.append(aa_dict[aa])\n pep_codes.append(current_pep)\n newLines.extend([pep])\n else:\n long_pep_counter += 1\n predict_data = keras.preprocessing.sequence.pad_sequences(pep_codes, value=0, padding='post', maxlen=max_len)\n return predict_data, long_pep_counter, newLines\n\n\nprint('Loading model...')\nmodel_2_1D = keras.models.load_model('model_2_1D.h5')\n\nprint('Loading input peptides')\npredict_data, skipped, lines = load_pep_and_codify(args.infile[0], 81)\nprint('Succesfully loaded {0} peptides and skipped {1}'.format(len(lines), str(skipped)))\n\nprint('Making predictions')\nmodel_2_1D_pred = model_2_1D.predict(predict_data)\nmodel_2_1D_pred = np.hstack((np.array(lines).reshape(len(lines), 1),model_2_1D_pred)).tolist()\n\nPred_output = []\nfor pred in model_2_1D_pred:\n if float(pred[1]) > 0.5:\n # pred.extend('0')\n Pred_output.append([pred[0], str(1-float(pred[1])), '0'])\n else:\n Pred_output.append([pred[0], str(1-float(pred[1])), '1'])\n # pred.extend('1')\n\noutFile = '{0}_Predictions.txt'.format(args.infile[0].split('.')[0])\nprint('Saving predictions to file {}'.format(outFile))\nwith open(outFile, 'w') as outf:\n outf.write('Peptide\\tProb\\tDetectability\\n')\n outf.writelines('\\t'.join(i) + '\\n' for i in Pred_output)\n"
] | [
[
"numpy.array",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.models.load_model"
]
] |
markhliu99/CAI | [
"b97f0831ee8ea6e2352fe4f25032fdd1019c1ba8"
] | [
"PythonControllerTemplates/src/py-ctrl.py"
] | [
"\"\"\"\npy-ctrl script\n1. generate problem PD file\n 1.1 save PD file in /inputfiles\n2. solve convex hull\n 2.1 save hull information in /output\n 2.2 show figure for 10 sec\n 2.3 save figure in /output\n\"\"\"\n\nimport os\nimport subprocess\nimport argparse\nimport matplotlib.pyplot as plt\n\n\nparser = argparse.ArgumentParser(description = 'Plot tradeoff')\n\n# parser.add_argument('-S', '--Solver', type = int, choices = [0, 1], default = 0, help = \"0: Cplex\\n1: Gurobi\")\n\nparser.add_argument('-P', '--Problem', type = int, choices = [1, 2, 3], default = 1, help=\" 1: Coded Caching\\n 2: Private Information Retrieval\\n 3: Symmetric Private Information Retrieval\")\n\nparser.add_argument('-N1', '--N1', type = int, choices = range(2, 10), default = 2, help = \"number of files in coded caching\")\nparser.add_argument('-K1', '--K1', type = int, choices = range(2, 10), default = 3, help = \"number of users in coded caching\")\n\nparser.add_argument('-N2', '--N2', type = int, choices = range(1, 10), default = 2, help = \"number of servers in private information retrieval\")\nparser.add_argument('-K2', '--K2', type = int, choices = range(1, 10), default = 2, help = \"number of files in private information retrieval\")\n\nparser.add_argument('-N3', '--N3', type = int, choices = range(1, 10), default = 2, help = \"number of servers in symmetric private information retrieval\")\nparser.add_argument('-K3', '--K3', type = int, choices = range(1, 10), default = 2, help = \"number of files in symmetric private information retrieval\")\n\n\nparser.add_argument('-IP', '--InPt', type = str, help = \"list of achievable points, e.g. \\\"(1,1);(1.25,0.85)\\\"\", default=None)\n\n\nif __name__ == \"__main__\":\n # directory of CAI repository\n cai = os.path.dirname(os.path.abspath(__file__)) + \"/../../\"\n\n #### HERE\n # You might need to change these lines:\n # 1. directory of solver\n SOLVER = cai + \"CplexCompute/cplexcompute.out\" \n # 2. duration of the convex hull figure pausing in sec\n PAUSE = 10\n\n # read args\n args = parser.parse_args()\n\n # generate PD file\n print(\"Genearte PD file\")\n if args.Problem == 1:\n from gen_pd_cache import gen_pd_cache\n fn = gen_pd_cache(args.N1, args.K1)\n title = \"Coded Caching with {} files and {} users\".format(args.N1, args.K1)\n xlabel = \"Storage\"\n ylabel = \"Download\"\n name = \"cache{}x{}\".format(args.N1, args.K1)\n \n elif args.Problem == 2:\n from gen_pd_pir import gen_pd_pir\n fn = gen_pd_pir(args.N2, args.K2)\n xlabel = \"Storage\"\n ylabel = \"Download\"\n title = \"Private Information Retrieval with {} servers and {} files\".format(args.N2, args.K2)\n name = \"PIR{}x{}\".format(args.N2, args.K2)\n\n elif args.Problem == 3:\n from gen_pd_spir import gen_pd_spir\n fn = gen_pd_spir(args.N3, args.K3)\n xlabel = \"Storage\"\n ylabel = \"Download\"\n title = \"Symmetric Private Information Retrieval with {} servers and {} files\".format(args.N3, args.K3)\n name = \"SPIR{}x{}\".format(args.N3, args.K3)\n \n # Solve PD\n print()\n print(\"Solve the convex hull\")\n if not os.path.exists(cai + 'PlotTradeoff/output'):\n os.makedirs(cai + 'PlotTradeoff/output')\n print('Open ' + cai + 'PlotTradeoff/output/Hull_' + name + '.txt for details')\n if os.path.exists(cai + 'PlotTradeoff/output/Hull_' + name + '.txt') and os.path.exists(cai + 'PlotTradeoff/output/Fig_' + name + '.eps'):\n print(\"file \" + cai + \"PlotTradeoff/output/Hull_\" + name + \".txt already exists\")\n print(\"Overwrite[y/n]:\", end=\"\")\n if input() == \"y\":\n pass\n else:\n with open(cai + 'PlotTradeoff/output/Hull_' + name + '.txt', 'w') as fout:\n subprocess.run([SOLVER, fn, \"hull\"], stdout=fout, text=True)\n \"\"\"\n if args.Solver == 0:\n subprocess.run([cai + \"CplexCompute/cplexcompute.out\", fn, \"hull\"], stdout=fout, text=True)\n else:\n subprocess.run([cai + \"GurobiCompute/gurobicompute.out\", fn, \"hull\"], stdout=fout, text=True)\n \"\"\"\n with open(cai + 'PlotTradeoff/output/Hull_' + name + '.txt', 'r') as fout:\n res = fout.read()\n\n # capture the points on the hull\n res = res[res.find(\"List of found points on the hull:\\n\"):-1].split(\"\\n\")[1: -1]\n points = []\n for p in res:\n points.append(tuple(map(float, p[1: -2].split(', '))))\n \n # plot region\n points = sorted(points, key=lambda x: x[0])\n width = points[0][1] - points[-1][1]\n plt.plot(*zip(*points), label = \"Outer Bounds\")\n if args.InPt != None:\n InPt = []\n for p in args.InPt.split(\";\"):\n InPt.append(tuple(map(float, p[1: -1].split(','))))\n plt.plot(*zip(*InPt), 'o', label = \"Achievable Points\")\n plt.ylim(points[-1][1]- 0.01 * width, points[0][1] + 0.01 * width)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.title(title)\n plt.savefig(cai + 'PlotTradeoff/output/Fig_' + name + '.eps', format='eps')\n plt.legend()\n plt.show(block=False)\n plt.pause(PAUSE)\n plt.close()\n\n print(\"Figure \" + cai + \"PlotTradeoff/output/Fig_\" + name + '.eps')"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel"
]
] |
tcavazos2/hw2-Clustering | [
"60b536729ba03dcb28384be99e1575c3c3c0fe7e"
] | [
"hw2skeleton/cluster.py"
] | [
"from .utils import Atom, Residue, ActiveSite\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom .helpers import *\nfrom Bio import pairwise2\nimport rmsd\nfrom sklearn.decomposition import PCA\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import graphviz_layout\n\ndef compute_similarity(site_a, site_b):\n \"\"\"\n Compute the similarity between two given ActiveSite instances.\n\n Input: two ActiveSite instances\n Output: the similarity between them (a floating point number)\n \"\"\"\n # Get strings of single letter aa residues\n s_a = output_aa_string(site_a.residues)\n s_b = output_aa_string(site_b.residues)\n \n # Align strings using local alignment algorithm which relies\n # on dynamic programming to compute all possible alignments and\n # returns the highest scoring alignment. \n \n # Local alignment aims to find the max alignment for substrings\n # of two larger strings.\n # Matches = +1\n # Mismatches, gaps = +0\n \n alignments = pairwise2.align.localxx(s_a, s_b) # perform alignment\n if len(alignments) == 0: return float(\"inf\") # return INF if no alignment found\n align_a, align_b, s = alignments[0][:3] # extract first alignment\n \n # Output indices where nucleotides in alignment match\n inds_a, inds_b = match(align_a, align_b)\n \n if len(inds_a) < 2: return float(\"inf\")\n \n # Create matrix of coordinates for atom CA\n V = create_coord_matrix(site_a, inds_a)\n W = create_coord_matrix(site_b, inds_b)\n \n # Center and rotate Ca matrices then calculate Root-Mean-Square-Deviation (RMSD)\n # It measures the average distance between backbone atoms of two\n # superimposed proteins.\n\n # The greater the RMSD, the less similar the proteins are.\n # A RMSD equal to 0 represents identical proteins.\n\n # Each protein is a matrix containing x, y, and z coordinates for each CA atom\n # The rows of the two matrices are matching residues obtained from the alignment\n\n # To minimize RMSD you must first center the coordinates on the origin so the\n # two vectors can be near each other.\n V -= rmsd.centroid(V)\n W -= rmsd.centroid(W)\n\n # Then find the optimal rotation for matrix W that aligns it best with V\n # This is the Kabasch algorithm which works by calculating a covariance matrix\n # and then finding the singular value decomposition (SVD) of the cov. matrix\n # Last, find the optimal rotation matrix which is the dot product of V and W\n # optimized by lowest RMSD\n return rmsd.kabsch_rmsd(V,W)\n\ndef output_similarity_matrix(active_sites):\n \"\"\"\n Calculate RMSD for all pairwise active sites. This distance measure\n is converted into a similarity metric by dividing by the max element and\n subtracting 1\n\n Input: list of active sites from PDB files\n Output: similarity matrix for active sites\n \"\"\"\n # Create empty pairwise matrix \n mat = np.empty([len(active_sites), len(active_sites)])\n # For every pair calculate the RMSD\n for (x,y), value in np.ndenumerate(mat):\n mat[x][y] = compute_similarity(active_sites[x], active_sites[y])\n # Infinite values means proteins had less than 3 similar amino acids, set to none\n mat[np.isinf(mat)] = None\n # Find max value in array for normalization\n max_val = np.nanmax(mat)\n # Make none values max value\n mat[np.isnan(mat)] = max_val\n # Get normalized dissimilarity matrix\n norm_mat = mat/max_val\n # Convert dissimilarity matrix to similarity by subtracting 1\n norm_mat_sim = 1 - norm_mat\n return norm_mat_sim\n\ndef cluster_by_partitioning(active_sites,k):\n \"\"\"\n Cluster a given set of ActiveSite instances using a partitioning method.\n\n Input: a list of ActiveSite instances\n Output: a clustering of ActiveSite instances\n (this is really a list of clusters, each of which is list of\n ActiveSite instances)\n \"\"\"\n cost_max = float(\"-inf\")\n mat = output_similarity_matrix(active_sites)\n \n # randomly choose k medoids\n centers = initialize_k_mediods(mat, k)\n # assign elements to cluster medoid with max similarity\n clusters = assign_k_clusters(mat, centers)\n # calculate cost of clustering (sum of similarity of points to cluster)\n cost = calculate_cost(mat, centers, clusters)\n # iterate until cost does not increase\n while cost_max < cost:\n cost_max = cost\n # Loop through medoids and all elements not in medoids\n for i in range(0, len(centers)):\n m = centers[i]\n for o in range(len(active_sites)):\n if o != m:\n # replace medoid with element and re-calculate clusters\n # and cost\n centers[i] = o\n clusters_temp = assign_k_clusters(mat, centers)\n cost_swap = calculate_cost(mat, centers, clusters_temp)\n # if cost increases then replace clusters\n if cost_swap > cost: \n cost = cost_swap\n clusters = clusters_temp\n # if cost decreases or stays the same leave center\n else: centers[i] = m\n return output_cluster_list(active_sites, clusters)\n\ndef cluster_hierarchically(active_sites,k):\n \"\"\"\n Cluster the given set of ActiveSite instances using a hierarchical algorithm. #\n\n Input: a list of ActiveSite instances\n Output: a list of clusterings\n (each clustering is a list of lists of Sequence objects)\n \"\"\"\n # Create similarity matrix\n mat_original = output_similarity_matrix(active_sites)\n mat = output_similarity_matrix(active_sites)\n # Fill diagonals with -infinity \n np.fill_diagonal(mat, float(\"-inf\"))\n \n # Create cluster array to keep track of number of clusters\n vals = [np.array([v]) for v in range(len(active_sites))]\n keys = np.arange(0,len(active_sites))\n clusters = dict(zip(keys, vals))\n all_clusters = []\n\n all_clusters.append(output_cluster_list(active_sites, clusters.values()))\n # Group the most similar elements until you only have one more cluster\n while len(clusters) > k:\n # Get most similar clusters\n i,j = np.unravel_index(mat.argmax(), mat.shape)\n # Get two clusters\n c_i = clusters.get(i)\n c_j = clusters.get(j)\n # Add new combined cluster\n c_new = list(clusters.keys())[-1]+1\n clusters[c_new] = np.append(c_i, c_j)\n \n # Add new row/column to similarity matrix\n new_dist = dist_HC(active_sites, clusters,c_new, mat_original)\n new_col = np.append(new_dist, float(\"-inf\"))\n mat = np.vstack([mat, new_dist])\n mat = np.column_stack([mat, new_col])\n # Replace row/column with negative infinitys that correspond to \n # most similar elements\n mat[i], mat[j] = float(\"-inf\"), float(\"-inf\")\n mat[:,j], mat[:,i] = float(\"-inf\"), float(\"-inf\")\n # Drop most similar elements from cluster\n clusters.pop(i)\n clusters.pop(j)\n all_clusters.append(output_cluster_list(active_sites, clusters.values()))\n return all_clusters\n"
] | [
[
"numpy.vstack",
"numpy.append",
"numpy.nanmax",
"numpy.isinf",
"numpy.column_stack",
"numpy.ndenumerate",
"numpy.isnan",
"numpy.array"
]
] |
ferenctorok/potential_field_planner | [
"7a9f7ae70a91523cc6d42029f869f9020cc1ea35"
] | [
"gradplanner/controller/low_level_controller.py"
] | [
"import numpy as np\n\n\nclass LowLevelController:\n \"\"\"Low level controller of a point mass robot with dynamics:\n\n x_{k+1} = x_k + v_k * Ts * cos(psi_k)\n y_{k+1} = y_k + v_k * Ts * sin(psi_k)\n v_{k+1} = v_k + Ts * a_k\n psi_{k+1} = psi_k + Ts * omega_k\n omega_{k+1} = omega_k + Ts * epsilon_k\n\n Where a_k and epsilon_k are the inputs and are the translational and rotational\n accelerations respectively.\n\n For now we assume, that it is a perfect controller which is able to produce\n the exact commanded outputs if they are reachable with the provided\n input constraints.\n \"\"\"\n\n def __init__(self,\n params):\n \"\"\"Initializes a LowLevelController.\"\"\"\n\n self._init_from_params(params)\n\n \n def get_inputs(self, state, cmd_vel):\n \"\"\"produces control inputs based on the actual state and the commanded\n velocities in cmd_vel = np.array([v_des, omega_des])\"\"\"\n\n v_des = cmd_vel[0]\n omega_des = cmd_vel[1]\n v_k = state[2]\n omega_k = state[4]\n\n # translational acceleration:\n a_k = (v_des - v_k) / self._Ts\n if a_k > self._acc_max:\n a_k = self._acc_max\n elif a_k < self._acc_min:\n a_k = self._acc_min\n\n # angular acceleration:\n epsilon_k = (omega_des - omega_k) / self._Ts\n if epsilon_k > self._epsilon_max:\n a_epsilon_kk = self._epsilon_max\n elif epsilon_k < self._epsilon_min:\n epsilon_k = self._epsilon_min\n\n return np.array([a_k, epsilon_k])\n\n\n def _init_from_params(self, params):\n \"\"\"Initializes some variables from the params.\"\"\"\n\n self._Ts = params[\"general\"][\"Ts\"]\n self._acc_min = params[\"LowLevelController\"][\"acc_min\"]\n self._acc_max = params[\"LowLevelController\"][\"acc_max\"]\n self._epsilon_min = params[\"LowLevelController\"][\"epsilon_min\"]\n self._epsilon_max = params[\"LowLevelController\"][\"epsilon_max\"]\n"
] | [
[
"numpy.array"
]
] |
DPaletti/mida_acv | [
"9b492adaf75ce24c94dfa6993c5757e1bb96c700"
] | [
"src/mida_acv/utilities.py"
] | [
"from pathlib import Path\nfrom typing import Tuple, List, Dict\n\nimport pandas as pd\nimport numpy as np\nfrom tsfresh.utilities.dataframe_functions import roll_time_series\n\n\ndef get_path(df: pd.DataFrame) -> np.array:\n out = []\n for index, row in df.iterrows():\n out.append((row[\"Latitude\"], row[\"Longitude\"]))\n return np.array(out)\n\n\ndef write_to_csv(path: str, data: Dict[str, List[pd.DataFrame]]) -> None:\n full_path: Path\n for k, v in data.items():\n full_path = Path(path).joinpath(k[: k.find(\"-\")], k[k.find(\"-\") + 1 :])\n full_path.mkdir(parents=True, exist_ok=True)\n for index, df in enumerate(v):\n df.to_csv(full_path.joinpath(\"timeseries-\" + str(index) + \".csv\").open(\"w\"))\n\n\ndef to_tsfresh(data_path: str) -> Tuple[pd.DataFrame, pd.Series, pd.Series]:\n df = pd.DataFrame()\n weight_series = pd.Series()\n drivers_series = pd.Series()\n temp_df: pd.DataFrame\n # ident: str = \"\"\n i: int = 0\n for placement in {\"deck\", \"stem\"}:\n for driver_number in {\"single\", \"double\"}:\n for ds in Path(data_path).joinpath(placement, driver_number).iterdir():\n temp_df = pd.read_csv(str(ds))\n weight = temp_df[\"Weight\"][0]\n # ident = placement + \"_\" + driver_number + \"_\" + temp_df[\"Driver\"][0]\n temp_df = temp_df.assign(id=i)\n temp_df = temp_df.drop(\n [\"Unnamed: 0\", \"Driver\", \"Weight\", \"Placement\"], axis=1\n )\n df = df.append(temp_df)\n weight_series.loc[i] = weight\n drivers_series.loc[i] = 0 if driver_number == \"single\" else 1\n i += 1\n return df.fillna(0), weight_series, drivers_series\n\n\ndef window_df(df: pd.DataFrame):\n return roll_time_series(\n df, column_id=\"id\", column_sort=\"Timestamp\", column_kind=None\n )\n\n\ndef align(signal_1: np.array, signal_2: np.array):\n # Standardization\n signal_1 = (signal_1 - np.mean(signal_1)) / np.std(signal_1)\n signal_2 = (signal_2 - np.mean(signal_2)) / np.std(signal_2)\n\n # Cross-Correlation\n correlation = np.correlate(signal_1, signal_2, \"full\")\n center = len(correlation) - min(len(signal_1), len(signal_1))\n max_position = correlation.argmax()\n phase = np.abs(center - max_position)\n if phase == 0:\n reversed_correlation_signal = correlation[::-1]\n max_position_reversed = reversed_correlation_signal.argmax()\n phase_reversed = np.abs(center - max_position_reversed)\n phase = np.max([phase, phase_reversed])\n return signal_1, signal_2[phase:]\n"
] | [
[
"pandas.Series",
"pandas.DataFrame",
"numpy.abs",
"numpy.correlate",
"numpy.max",
"numpy.array",
"numpy.std",
"numpy.mean"
]
] |
by-liu/calibration-framework | [
"7b306e4bbe6361d411b209759b7ba3d016bd0d17"
] | [
"netcal/scaling/LogisticCalibration.py"
] | [
"# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany\n# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany\n#\n# This Source Code Form is subject to the terms of the Apache License 2.0\n# If a copy of the APL2 was not distributed with this\n# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.\n\nfrom collections import OrderedDict\nfrom typing import Union\n\nimport numpy as np\nimport torch\nimport torch.distributions.constraints as constraints\nimport pyro\nimport pyro.distributions as dist\n\nfrom netcal.scaling import AbstractLogisticRegression\n\n\nclass LogisticCalibration(AbstractLogisticRegression):\n \"\"\"\n On classification, apply the logistic calibration method aka Platt scaling to obtain a\n calibration mapping. This method is originally proposed by [1]_.\n For the multiclass case, we use the Vector scaling proposed in [2]_.\n On detection mode, this calibration method uses multiple independent normal distributions to obtain a\n calibration mapping by means of the confidence as well as additional features [3]_. This calibration scheme\n assumes independence between all variables.\n\n On detection, it is necessary to provide all data in input parameter ``X`` as an NumPy array\n of shape ``(n_samples, n_features)``,\n whereas the confidence must be the first feature given in the input array. The ground-truth samples ``y``\n must be an array of shape ``(n_samples,)`` consisting of binary labels :math:`y \\\\in \\\\{0, 1\\\\}`. Those\n labels indicate if the according sample has matched a ground truth box :math:`\\\\text{m}=1` or is a false\n prediction :math:`\\\\text{m}=0`.\n\n **Mathematical background:** For confidence calibration in classification tasks, a\n confidence mapping :math:`g` is applied on top of a miscalibrated scoring classifier :math:`\\\\hat{p} = h(x)` to\n deliver a calibrated confidence score :math:`\\\\hat{q} = g(h(x))`.\n\n For detection calibration, we can also use the additional box regression output which we denote as\n :math:`\\\\hat{r} \\\\in [0, 1]^J` with :math:`J` as the number of dimensions used for the box encoding (e.g.\n :math:`J=4` for x position, y position, width and height).\n Therefore, the calibration map is not only a function of the confidence score, but also of :math:`\\\\hat{r}`.\n To define a general calibration map for binary problems, we use the logistic function and the combined\n input :math:`s = (\\\\hat{p}, \\\\hat{r})` of size K by\n\n .. math::\n\n g(s) = \\\\frac{1}{1 + \\\\exp(-z(s))} ,\n\n According to [1]_, we can interpret the logit :math:`z` as the logarithm of the posterior odds\n\n .. math::\n\n z(s) = \\\\log \\\\frac{f(\\\\text{m}=1 | s)}{f(\\\\text{m}=0 | s)} \\\\approx\n \\\\log \\\\frac{f(s | \\\\text{m}=1)}{f(s | \\\\text{m}=1)} = \\\\ell r(s)\n\n If we assume independence of all variables given in :math:`s`, we can use multiple univariate probability\n density distributions with the same variance to obtain a calibration mapping. Using this formulation, we can\n simply extend the scaling factor (from classification logistic calibration) to a scaling\n vector :math:`w \\\\in \\\\mathbb{R}^K`.\n However, instead of using the uncalibrated confidence estimate :math:`\\\\hat{p}`, we use the logit of the\n network as part of :math:`s` to be conform with the original formulation in [1]_ and [2]_. Thus,\n the log-likelihood ratio can be expressed as\n\n .. math::\n \\\\ell r(s) = s^T w + c,\n\n with bias :math:`c \\\\in \\\\mathbb{R}`.\n We utilize standard optimization methods to determine the calibration mapping :math:`g(s)`.\n\n Parameters\n ----------\n temperature_only : bool, default: False\n If True, use Temperature Scaling instead of Platt/Vector Scaling.\n method : str, default: \"mle\"\n Method that is used to obtain a calibration mapping:\n - 'mle': Maximum likelihood estimate without uncertainty using a convex optimizer.\n - 'momentum': MLE estimate using Momentum optimizer for non-convex optimization.\n - 'variational': Variational Inference with uncertainty.\n - 'mcmc': Markov-Chain Monte-Carlo sampling with uncertainty.\n momentum_epochs : int, optional, default: 1000\n Number of epochs used by momentum optimizer.\n mcmc_steps : int, optional, default: 20\n Number of weight samples obtained by MCMC sampling.\n mcmc_chains : int, optional, default: 1\n Number of Markov-chains used in parallel for MCMC sampling (this will result\n in mcmc_steps * mcmc_chains samples).\n mcmc_warmup_steps : int, optional, default: 100\n Warmup steps used for MCMC sampling.\n vi_epochs : int, optional, default: 1000\n Number of epochs used for ELBO optimization.\n detection : bool, default: False\n If False, the input array 'X' is treated as multi-class confidence input (softmax)\n with shape (n_samples, [n_classes]).\n If True, the input array 'X' is treated as a box predictions with several box features (at least\n box confidence must be present) with shape (n_samples, [n_box_features]).\n independent_probabilities : bool, optional, default: False\n Boolean for multi class probabilities.\n If set to True, the probability estimates for each\n class are treated as independent of each other (sigmoid).\n use_cuda : str or bool, optional, default: False\n Specify if CUDA should be used. If str, you can also specify the device\n number like 'cuda:0', etc.\n\n References\n ----------\n .. [1] Platt, John:\n \"Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods.\"\n Advances in large margin classifiers 10.3: 61-74, 1999\n `Get source online <https://www.researchgate.net/profile/John_Platt/publication/2594015_Probabilistic_Outputs_for_Support_Vector_Machines_and_Comparisons_to_Regularized_Likelihood_Methods/links/004635154cff5262d6000000.pdf>`_\n\n .. [2] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:\n \"On Calibration of Modern Neural Networks.\"\n Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.\n `Get source online <https://arxiv.org/abs/1706.04599>`_\n\n .. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:\n \"Multivariate Confidence Calibration for Object Detection.\"\n The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops.\n\n .. [4] Fabian Küppers, Jan Kronenberger, Jonas Schneider and Anselm Haselhoff:\n \"Bayesian Confidence Calibration for Epistemic Uncertainty Modelling.\"\n 2021 IEEE Intelligent Vehicles Symposium (IV), 2021\n \"\"\"\n\n def __init__(self, *args, temperature_only: bool = False, **kwargs):\n \"\"\" Create an instance of `LogisticCalibration`. Detailed parameter description given in class docs. \"\"\"\n\n super().__init__(*args, **kwargs)\n self.temperature_only = temperature_only\n\n # -------------------------------------------------\n\n @property\n def intercept(self) -> Union[np.ndarray, float]:\n \"\"\" Getter for intercept of logistic calibration. \"\"\"\n if self._sites is None:\n raise ValueError(\"Intercept is None. You have to call the method 'fit' first.\")\n\n if self.temperature_only:\n raise ValueError(\"There is no intercept for temperature scaling.\")\n\n return self._sites['bias']['values']\n\n @property\n def weights(self) -> Union[np.ndarray, float]:\n \"\"\" Getter for weights of logistic calibration. \"\"\"\n if self._sites is None:\n raise ValueError(\"Weights is None. You have to call the method 'fit' first.\")\n\n return self._sites['weights']['values']\n\n # -------------------------------------------------\n\n def prepare(self, X: np.ndarray) -> torch.Tensor:\n \"\"\"\n Preprocessing of input data before called at the beginning of the fit-function.\n\n Parameters\n ----------\n X : np.ndarray, shape=(n_samples, [n_classes]) or (n_samples, [n_box_features])\n NumPy array with confidence values for each prediction on classification with shapes\n 1-D for binary classification, 2-D for multi class (softmax).\n On detection, this array must have 2 dimensions with number of additional box features in last dim.\n\n Returns\n -------\n torch.Tensor\n Prepared data vector X as torch tensor.\n \"\"\"\n\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, 1))\n\n # on detection mode, convert confidence to sigmoid and append the remaining features\n if self.detection:\n data_input = np.concatenate((self._inverse_sigmoid(X[:, 0]).reshape(-1, 1), X[:, 1:]), axis=1)\n\n # on binary classification, simply convert the confidences to logits\n elif self._is_binary_classification():\n data_input = self._inverse_sigmoid(X)\n\n # on multiclass classification, use inverse softmax instead\n else:\n data_input = self._inverse_softmax(X)\n\n return torch.Tensor(data_input)\n\n def prior(self):\n \"\"\"\n Prior definition of the weights used for log regression. This function has to set the\n variables 'self.weight_prior_dist', 'self.weight_mean_init' and 'self.weight_stddev_init'.\n \"\"\"\n\n self._sites = OrderedDict()\n\n # on temperature scaling, we only have one single weight for all classes\n if self.temperature_only:\n self._sites['weights'] = {\n 'values': None,\n 'constraint': constraints.real,\n 'init': {\n 'mean': torch.ones(1),\n 'scale': torch.ones(1)\n },\n 'prior': dist.Normal(torch.ones(1), 10 * torch.ones(1), validate_args=True)\n }\n\n else:\n\n # on detection mode or binary classification, we have a weight for each given feature (one for binary\n # classification) and bias\n if self.detection or self._is_binary_classification():\n num_bias = 1\n num_weights = self.num_features\n\n # on multiclass classification, we have one weight and one bias for each class separately\n else:\n num_bias = self.num_classes\n num_weights = self.num_classes\n\n # set properties for \"weights\"\n self._sites['weights'] = {\n 'values': None,\n 'constraint': constraints.real,\n 'init': {\n 'mean': torch.ones(num_weights),\n 'scale': torch.ones(num_weights)\n },\n 'prior': dist.Normal(torch.ones(num_weights), 10 * torch.ones(num_weights), validate_args=True),\n }\n\n # set properties for \"bias\"\n self._sites['bias'] = {\n 'values': None,\n 'constraint': constraints.real,\n 'init': {\n 'mean': torch.zeros(num_bias),\n 'scale': torch.ones(num_bias)\n },\n 'prior': dist.Normal(torch.zeros(num_bias), 10 * torch.ones(num_bias), validate_args=True),\n }\n\n def model(self, X: torch.Tensor = None, y: torch.Tensor = None) -> torch.Tensor:\n \"\"\"\n Definition of the log regression model.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_log_regression_features)\n Input data that has been prepared by \"self.prepare\" function call.\n y : torch.Tensor, shape=(n_samples, [n_classes])\n Torch tensor with ground truth labels.\n Either as label vector (1-D) or as one-hot encoded ground truth array (2-D) (for multiclass MLE only).\n\n Returns\n -------\n torch.Tensor, shape=(n_samples, [n_classes])\n Logit of the log regression model.\n \"\"\"\n\n # sample from prior - on MLE, this weight will be set as conditional\n weights = pyro.sample(\"weights\", self._sites[\"weights\"][\"prior\"])\n\n if self.temperature_only:\n bias = 0.\n else:\n bias = pyro.sample(\"bias\", self._sites[\"bias\"][\"prior\"])\n\n # on detection or binary classification, use dot product to sum up all given features to one logit\n if self.detection or self._is_binary_classification():\n\n # we need squeeze to remove last (unnecessary) dim to avoid site-effects\n # temperature scaling: sinlge scalar\n if self.temperature_only:\n def logit_op(x, w, b): return torch.squeeze(torch.sum(torch.mul(x, w), dim=1))\n\n # platt scaling: one weight for each feature given\n else:\n weights = torch.reshape(weights, (-1, 1))\n def logit_op(x, w, b): return torch.squeeze(torch.matmul(x, w) + b)\n\n # define as probabilistic output the sigmoid and a bernoulli distribution\n prob_op = torch.sigmoid\n dist_op = dist.Bernoulli\n\n else:\n\n # the op for calculating the logit is an element-wise multiplication\n # for vector scaling and to keep multinomial output\n def logit_op(x, w, b): return torch.mul(x, w) + b\n\n # define as probabilistic output the softmax and a categorical distribution\n def prob_op(logit): return torch.softmax(logit, dim=1)\n dist_op = dist.Categorical\n\n # the first dimension of the given input data is the \"independent\" sample dimension\n with pyro.plate(\"data\", X.shape[0]):\n\n # calculate logit\n logit = logit_op(X, weights, bias)\n\n # if MLE, (slow) sampling is not necessary. However, this is needed for 'variational' and 'mcmc'\n if self.method in ['variational', 'mcmc']:\n probs = prob_op(logit)\n pyro.sample(\"obs\", dist_op(probs=probs, validate_args=True), obs=y)\n\n return logit\n"
] | [
[
"torch.ones",
"numpy.reshape",
"torch.mul",
"torch.reshape",
"torch.matmul",
"torch.zeros",
"torch.softmax",
"torch.Tensor"
]
] |
manzt/bioimage-latency-benchmark | [
"134a368f90cdf38532723f621e1766f31e2d3214"
] | [
"notebooks/chunks.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport math\n\nimport matplotlib.pyplot as plt\n\n\ndef file_count(shape, chunkXY, chunkZ=1, chunkT=1, chunkC=1):\n t, c, z, y, x = shape\n return (\n math.ceil(x / chunkXY)\n * math.ceil(y / chunkXY)\n * math.ceil(z / chunkZ)\n * math.ceil(t / chunkT)\n * math.ceil(c / chunkC)\n )\n\n\ndef plot(ax, twoD=True, font=16):\n if twoD:\n shape = (1, 8, 1, 2 ** 16, 2 ** 16)\n chunkSizesXY = [32, 1024]\n chunkSizesOther = (1, 2, 4, 8)\n else:\n shape = (100, 1, 1024, 1024, 1024)\n chunkSizesXY = (16, 32, 64, 128)\n chunkSizesOther = (1, 10, 100)\n\n ax.set_ylabel(\"Number of chunks\")\n ax.set_yscale(\"log\")\n ax.set_xscale(\"log\")\n ax.set(xlim=(10, 2 * 10 ** 3), ylim=(10, 10 ** 8))\n\n if twoD:\n ax.set_xlabel(\"Chunk size (X and Y)\")\n ax.set_title(\"XYZCT: (64k, 64k, 1, 8, 1)\")\n chunkDim = \"C\"\n annTitle = \"Chosen chunk size:\\n(256, 256, 1, 1, 1)\"\n xy = ((256), file_count(shape, 256))\n else:\n ax.set_xlabel(\"Chunk size (XYZ)\")\n ax.set_title(\"XYZCT: (1k, 1k, 1k, 1, 100)\")\n chunkDim = \"T\"\n annTitle = \"Chosen chunk size:\\n(32, 32, 32, 1, 1)\"\n xy = ((32), file_count(shape, 32, chunkZ=32))\n\n for item in (\n [ax.title, ax.xaxis.label, ax.yaxis.label]\n + ax.get_xticklabels()\n + ax.get_yticklabels()\n ):\n item.set_fontsize(font)\n\n styles = [\"solid\", \"dashed\", \"dashdot\", \"dotted\"]\n for whichChunk, chunkOther in enumerate(chunkSizesOther):\n numFiles = []\n fileSize = []\n for i in chunkSizesXY:\n if twoD:\n count = file_count(shape, i, **{f\"chunk{chunkDim}\": chunkOther})\n else:\n # Could be simpler\n count = file_count(\n shape, i, chunkZ=i, **{f\"chunk{chunkDim}\": chunkOther}\n )\n numFiles.append(count)\n fileSize.append(i)\n ax.plot(\n fileSize,\n numFiles,\n linewidth=0.5,\n label=f\"{chunkOther}\",\n linestyle=styles.pop(0),\n )\n\n ax.annotate(\n annTitle,\n xy=xy,\n xycoords=\"data\",\n xytext=(0, 40),\n textcoords=\"offset points\",\n arrowprops=dict(facecolor=\"black\", shrink=0.05),\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n fontsize=font - 4,\n )\n leg = ax.legend(\n loc=\"lower left\",\n title=f\"Chunk size ({chunkDim})\",\n frameon=False,\n prop={\"size\": font},\n )\n for legobj in leg.legendHandles:\n legobj.set_linewidth(0.5)\n\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n ax.spines[axis].set_linewidth(0.5)\n\n return fig\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\")\n ns = parser.parse_args()\n # fig = plt.figure()\n # ax2D = fig.add_subplot(2, 1, 1)\n # ax3D = fig.add_subplot(2, 1, 2)\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 5))\n plot(ax[1], False)\n plot(ax[0], True)\n\n plt.savefig(ns.filename)\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots"
]
] |
H43RO/examples | [
"54acd5f38d6368a29208b231e5028f16d18c954b"
] | [
"tensorflow_examples/lite/model_maker/core/task/model_spec/audio_spec.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Audio model specification.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport csv\nimport io\nimport os\nimport tempfile\n\nimport tensorflow as tf\nfrom tensorflow_examples.lite.model_maker.core.api.api_util import mm_export\nfrom tensorflow_examples.lite.model_maker.core.task import model_util\nimport tensorflow_hub as hub\n\ntry:\n from tflite_support.metadata_writers import audio_classifier as md_writer # pylint: disable=g-import-not-at-top\n from tflite_support.metadata_writers import metadata_info as md_info # pylint: disable=g-import-not-at-top\n from tflite_support.metadata_writers import writer_utils # pylint: disable=g-import-not-at-top\n ENABLE_METADATA = True\nexcept ImportError:\n ENABLE_METADATA = False\n\n\nclass MetadataWriter:\n \"\"\"Helper class to populate Audio Metadata, to be used in `with` statement.\n\n Simple usage for model with two classification heads.\n\n with MetadataWriter(tflite_path) as writer:\n writer.add_input(sample_rate=16000, channels=1)\n writer.add_output(name='animal_sound', labels=['dog', 'cat'])\n writer.add_output(name='speech_command', labels=['yes', 'no'])\n writer.save(tflite_path, json_filepath)\n\n `add_output` can also take an ordered dict for multiple locales, example:\n\n writer.add_output(name='animal_sound', labels=collections.OrderedDict([\n ('en', ['bird', 'cat']),\n ('fr', ['oiseau', 'chat'])\n ]))\n \"\"\"\n\n def __init__(self, tflite_filepath, **kwargs):\n self._model = writer_utils.load_file(tflite_filepath)\n self._general_md = md_info.GeneralMd(**kwargs)\n self._inputs = []\n self._outputs = []\n\n def __enter__(self):\n self._temp_folder = tempfile.TemporaryDirectory()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._temp_folder.cleanup()\n # Delete the attribute so that it errors out if not in `with` statement.\n delattr(self, '_temp_folder')\n\n def add_input(self, **kwargs):\n \"\"\"Add metadta for the input tensor.\"\"\"\n self._inputs.append(md_info.InputAudioTensorMd(**kwargs))\n\n def add_output(self, name, labels, **kwargs):\n \"\"\"Add metadata for output tensor in order.\"\"\"\n if isinstance(labels, list):\n default_locale = None\n labels = collections.OrderedDict([(default_locale, labels)])\n return self.add_output(name, labels, **kwargs)\n\n label_files = []\n if isinstance(labels, collections.OrderedDict):\n for locale, label_list in labels.items():\n full_path = os.path.join(\n self._temp_folder.name,\n '{}_labels_{}.txt'.format(name, locale or 'default'))\n model_util.export_labels(full_path, label_list)\n label_files.append(\n md_info.LabelFileMd(file_path=full_path, locale=locale))\n else:\n raise ValueError(\n '`labels` should be either a list of labels or an ordered dict mapping `locale` -> list of labels. got: {}'\n .format(labels))\n\n idx = len(self._outputs)\n self._outputs.append(\n md_info.ClassificationTensorMd(\n name=name,\n label_files=label_files,\n tensor_type=writer_utils.get_output_tensor_types(self._model)[idx],\n **kwargs))\n\n def save(self, tflite_filepath=None, json_filepath=None):\n \"\"\"Persist model with metadata.\"\"\"\n if len(self._inputs) > 1:\n raise ValueError('Only supports single input, got {}'.format(\n len(self._inputs)))\n input_md = self._inputs[0]\n\n writer = md_writer.MetadataWriter.create_from_metadata_info_for_multihead(\n model_buffer=self._model,\n general_md=self._general_md,\n input_md=input_md,\n output_md_list=self._outputs)\n if tflite_filepath:\n writer_utils.save_file(writer.populate(), tflite_filepath, mode='wb')\n if json_filepath:\n writer_utils.save_file(\n writer.get_metadata_json(), json_filepath, mode='wt')\n\n\ndef _ensure_tf25(version):\n if version < '2.5':\n raise RuntimeError(\n 'Audio Tasks requires TF2.5 or later. For example, you can run the '\n 'following command to install TF2.5.0rc2:\\n\\n'\n 'pip3 install tensorflow==2.5.0rc2\\n\\n')\n\n\ndef _get_tf_version():\n return tf.__version__\n\n\nclass BaseSpec(abc.ABC):\n \"\"\"Base model spec for audio classification.\"\"\"\n\n def __init__(self, model_dir=None, strategy=None):\n _ensure_tf25(_get_tf_version())\n self.model_dir = model_dir\n if not model_dir:\n self.model_dir = tempfile.mkdtemp()\n tf.compat.v1.logging.info('Checkpoints are stored in %s', self.model_dir)\n self.strategy = strategy or tf.distribute.get_strategy()\n\n @abc.abstractproperty\n def target_sample_rate(self):\n pass\n\n @abc.abstractmethod\n def create_model(self, num_classes, train_whole_model=False):\n pass\n\n @abc.abstractmethod\n def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):\n pass\n\n def preprocess_ds(self, ds, is_training=False, cache_fn=None):\n \"\"\"Returns a preprocessed dataset.\"\"\"\n _ = is_training\n _ = cache_fn\n return ds\n\n def get_default_quantization_config(self):\n \"\"\"Gets the default quantization configuration.\"\"\"\n return None\n\n\ndef _remove_suffix_if_possible(text, suffix):\n return text.rsplit(suffix, 1)[0]\n\n\nTFJS_MODEL_ROOT = 'https://storage.googleapis.com/tfjs-models/tfjs'\n\n\ndef _load_browser_fft_preprocess_model():\n \"\"\"Load a model replicating WebAudio's AnalyzerNode.getFloatFrequencyData.\"\"\"\n model_name = 'sc_preproc_model'\n file_extension = '.tar.gz'\n filename = model_name + file_extension\n # Load the preprocessing model, which transforms audio waveform into\n # spectrograms (2D image-like representation of sound).\n # This model replicates WebAudio's AnalyzerNode.getFloatFrequencyData\n # (https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getFloatFrequencyData).\n # It performs short-time Fourier transform (STFT) using a length-2048 Blackman\n # window. It opeartes on mono audio at the 44100-Hz sample rate.\n filepath = tf.keras.utils.get_file(\n filename,\n f'{TFJS_MODEL_ROOT}/speech-commands/conversion/{filename}',\n cache_subdir='model_maker',\n extract=True)\n model_path = _remove_suffix_if_possible(filepath, file_extension)\n return tf.keras.models.load_model(model_path)\n\n\ndef _load_tfjs_speech_command_model():\n \"\"\"Download TFJS speech command model for fine-tune.\"\"\"\n origin_root = f'{TFJS_MODEL_ROOT}/speech-commands/v0.3/browser_fft/18w'\n files_to_download = [\n 'metadata.json', 'model.json', 'group1-shard1of2', 'group1-shard2of2'\n ]\n for filename in files_to_download:\n filepath = tf.keras.utils.get_file(\n filename,\n f'{origin_root}/{filename}',\n cache_subdir='model_maker/tfjs-sc-model')\n model_path = os.path.join(os.path.dirname(filepath), 'model.json')\n return model_util.load_tfjs_keras_model(model_path)\n\n\n@mm_export('audio_classifier.BrowserFftSpec')\nclass BrowserFFTSpec(BaseSpec):\n \"\"\"Model good at detecting speech commands, using Browser FFT spectrum.\"\"\"\n\n EXPECTED_WAVEFORM_LENGTH = 44032\n\n # Information used to populate TFLite metadata.\n _MODEL_NAME = 'AudioClassifier'\n _MODEL_DESCRIPTION = ('Identify the most prominent type in the audio clip '\n 'from a known set of categories.')\n\n _MODEL_VERSION = 'v1'\n _MODEL_AUTHOR = 'TensorFlow Lite Model Maker'\n _MODEL_LICENSES = ('Apache License. Version 2.0 '\n 'http://www.apache.org/licenses/LICENSE-2.0.')\n\n _SAMPLE_RATE = 44100\n _CHANNELS = 1\n\n _INPUT_NAME = 'audio_clip'\n _INPUT_DESCRIPTION = 'Input audio clip to be classified.'\n\n _OUTPUT_NAME = 'probability'\n _OUTPUT_DESCRIPTION = 'Scores of the labels respectively.'\n\n def __init__(self, model_dir=None, strategy=None):\n \"\"\"Initialize a new instance for BrowserFFT spec.\n\n Args:\n model_dir: The location to save the model checkpoint files.\n strategy: An instance of TF distribute strategy. If none, it will use the\n default strategy (either SingleDeviceStrategy or the current scoped\n strategy.\n \"\"\"\n super(BrowserFFTSpec, self).__init__(model_dir, strategy)\n self._preprocess_model = _load_browser_fft_preprocess_model()\n self._tfjs_sc_model = _load_tfjs_speech_command_model()\n\n @property\n def target_sample_rate(self):\n return 44100\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _ensure_length(self, wav, unused_label):\n return len(wav) >= self.EXPECTED_WAVEFORM_LENGTH\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _split(self, wav, label):\n \"\"\"Split the long audio samples into multiple trunks.\"\"\"\n # wav shape: (audio_samples, )\n chunks = tf.math.floordiv(len(wav), self.EXPECTED_WAVEFORM_LENGTH)\n unused = tf.math.floormod(len(wav), self.EXPECTED_WAVEFORM_LENGTH)\n # Drop unused data\n wav = wav[:len(wav) - unused]\n # Split the audio sample into multiple chunks\n wav = tf.reshape(wav, (chunks, 1, self.EXPECTED_WAVEFORM_LENGTH))\n\n return wav, tf.repeat(tf.expand_dims(label, 0), len(wav))\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, EXPECTED_WAVEFORM_LENGTH], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _preprocess(self, x, label):\n \"\"\"Preprocess the dataset to extract the spectrum.\"\"\"\n # x has shape (1, EXPECTED_WAVEFORM_LENGTH)\n spectrum = self._preprocess_model(x)\n # y has shape (1, embedding_len)\n spectrum = tf.squeeze(spectrum, axis=0)\n # y has shape (embedding_len,)\n return spectrum, label\n\n def preprocess_ds(self, ds, is_training=False, cache_fn=None):\n del is_training\n\n autotune = tf.data.AUTOTUNE\n ds = ds.filter(self._ensure_length)\n ds = ds.map(self._split, num_parallel_calls=autotune).unbatch()\n ds = ds.map(self._preprocess, num_parallel_calls=autotune)\n if cache_fn:\n ds = cache_fn(ds)\n return ds\n\n def create_model(self, num_classes, train_whole_model=False):\n if num_classes <= 1:\n raise ValueError(\n 'AudioClassifier expects `num_classes` to be greater than 1')\n model = tf.keras.Sequential()\n for layer in self._tfjs_sc_model.layers[:-1]:\n model.add(layer)\n model.add(\n tf.keras.layers.Dense(\n name='classification_head', units=num_classes,\n activation='softmax'))\n if not train_whole_model:\n # Freeze all but the last layer of the model. The last layer will be\n # fine-tuned during transfer learning.\n for layer in model.layers[:-1]:\n layer.trainable = False\n return model\n\n def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):\n model.compile(\n optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\n\n hist = model.fit(\n train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)\n return hist\n\n def create_serving_model(self, training_model):\n \"\"\"Create a model for serving.\"\"\"\n combined = tf.keras.Sequential()\n combined.add(self._preprocess_model)\n combined.add(training_model)\n # Build the model.\n combined.build([None, self.EXPECTED_WAVEFORM_LENGTH])\n return combined\n\n def _export_metadata(self, tflite_filepath, index_to_label,\n export_metadata_json_file):\n \"\"\"Export TFLite metadata.\"\"\"\n with MetadataWriter(\n tflite_filepath,\n name=self._MODEL_NAME,\n description=self._MODEL_DESCRIPTION,\n version=self._MODEL_VERSION,\n author=self._MODEL_AUTHOR,\n licenses=self._MODEL_LICENSES) as writer:\n writer.add_input(\n name=self._INPUT_NAME,\n description=self._INPUT_DESCRIPTION,\n sample_rate=self._SAMPLE_RATE,\n channels=self._CHANNELS)\n\n writer.add_output(\n labels=index_to_label,\n name=self._OUTPUT_NAME,\n description=self._OUTPUT_DESCRIPTION)\n\n json_filepath = (os.path.splitext(tflite_filepath)[0] +\n '.json') if export_metadata_json_file else None\n writer.save(tflite_filepath, json_filepath)\n\n def export_tflite(self,\n model,\n tflite_filepath,\n with_metadata=True,\n export_metadata_json_file=True,\n index_to_label=None):\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n This method overrides the default `CustomModel._export_tflite` method, and\n include the pre-processing in the exported TFLite library since support\n library can't handle audio tasks yet.\n\n Args:\n model: An instance of the keras classification model to be exported.\n tflite_filepath: File path to save tflite model.\n with_metadata: Whether the output tflite model contains metadata.\n export_metadata_json_file: Whether to export metadata in json file. If\n True, export the metadata in the same directory as tflite model.Used\n only if `with_metadata` is True.\n index_to_label: A list that map from index to label class name.\n \"\"\"\n combined = self.create_serving_model(model)\n\n # Sets batch size from None to 1 when converting to tflite.\n model_util.set_batch_size(model, batch_size=1)\n\n model_util.export_tflite(\n combined, tflite_filepath, quantization_config=None)\n\n # Sets batch size back to None to support retraining later.\n model_util.set_batch_size(model, batch_size=None)\n\n if with_metadata:\n if not ENABLE_METADATA:\n print('Writing Metadata is not support in the installed tflite-support '\n 'version. Please use tflite-support >= 0.2.*')\n else:\n self._export_metadata(tflite_filepath, index_to_label,\n export_metadata_json_file)\n\n\n@mm_export('audio_classifier.YamNetSpec')\nclass YAMNetSpec(BaseSpec):\n \"\"\"Model good at detecting environmental sounds, using YAMNet embedding.\"\"\"\n\n EXPECTED_WAVEFORM_LENGTH = 15600 # effectively 0.975s\n EMBEDDING_SIZE = 1024\n\n # Information used to populate TFLite metadata.\n _MODEL_NAME = 'yamnet/classification'\n _MODEL_DESCRIPTION = 'Recognizes sound events'\n _MODEL_VERSION = 'v1'\n _MODEL_AUTHOR = 'TensorFlow Lite Model Maker'\n _MODEL_LICENSES = ('Apache License. Version 2.0 '\n 'http://www.apache.org/licenses/LICENSE-2.0.')\n\n _SAMPLE_RATE = 16000\n _CHANNELS = 1\n\n _INPUT_NAME = 'audio_clip'\n _INPUT_DESCRIPTION = 'Input audio clip to be classified.'\n\n _YAMNET_OUTPUT_NAME = 'yamnet'\n _YAMNET_OUTPUT_DESCRIPTION = ('Scores in range 0..1.0 for each of the 521 '\n 'output classes.')\n\n _CUSTOM_OUTPUT_NAME = 'custom'\n _CUSTOM_OUTPUT_DESCRIPTION = (\n 'Scores in range 0..1.0 for each output classes.')\n\n def __init__(\n self,\n model_dir: None = None,\n strategy: None = None,\n yamnet_model_handle='https://tfhub.dev/google/yamnet/1',\n frame_length=EXPECTED_WAVEFORM_LENGTH, # Window size 0.975 s\n frame_step=EXPECTED_WAVEFORM_LENGTH // 2, # Hop of 0.975 /2 s\n keep_yamnet_and_custom_heads=True):\n \"\"\"Initialize a new instance for YAMNet spec.\n\n Args:\n model_dir: The location to save the model checkpoint files.\n strategy: An instance of TF distribute strategy. If none, it will use the\n default strategy (either SingleDeviceStrategy or the current scoped\n strategy.\n yamnet_model_handle: Path of the TFHub model for retrining.\n frame_length: The number of samples in each audio frame. If the audio file\n is shorter than `frame_length`, then the audio file will be ignored.\n frame_step: The number of samples between two audio frames. This value\n should be bigger than `frame_length`.\n keep_yamnet_and_custom_heads: Boolean, decides if the final TFLite model\n contains both YAMNet and custom trained classification heads. When set\n to False, only the trained custom head will be preserved.\n \"\"\"\n super(YAMNetSpec, self).__init__(model_dir, strategy)\n self._yamnet_model_handle = yamnet_model_handle\n self._yamnet_model = hub.load(yamnet_model_handle)\n self._frame_length = frame_length\n self._frame_step = frame_step\n self._keep_yamnet_and_custom_heads = keep_yamnet_and_custom_heads\n\n @property\n def target_sample_rate(self):\n return self._SAMPLE_RATE\n\n def create_model(self, num_classes, train_whole_model=False):\n model = tf.keras.Sequential([\n tf.keras.layers.InputLayer(\n input_shape=(YAMNetSpec.EMBEDDING_SIZE),\n dtype=tf.float32,\n name='embedding'),\n tf.keras.layers.Dense(\n num_classes, name='classification_head', activation='softmax')\n ])\n return model\n\n def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):\n model.compile(\n optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\n\n hist = model.fit(\n train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)\n return hist\n\n # Annotate the TF function with input_signature to avoid re-tracing. Otherwise\n # the TF function gets retraced everytime the input shape is changed.\n # Check https://www.tensorflow.org/api_docs/python/tf/function#args_1 for more\n # information.\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _frame(self, wav, label):\n clips = tf.signal.frame(\n wav, frame_length=self._frame_length, frame_step=self._frame_step)\n batch_labels = tf.repeat(tf.expand_dims(label, 0), len(clips))\n\n return clips, batch_labels\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _extract_embedding(self, wav, label):\n _, embeddings, _ = self._yamnet_model(wav) # (chunks, EMBEDDING_SIZE)\n embedding = tf.reduce_mean(embeddings, axis=0)\n return embedding, label\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[EMBEDDING_SIZE], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _add_noise(self, embedding, label):\n noise = tf.random.normal(\n embedding.shape, mean=0.0, stddev=.2, dtype=tf.dtypes.float32)\n return noise + embedding, label\n\n def preprocess_ds(self, ds, is_training=False, cache_fn=None):\n autotune = tf.data.AUTOTUNE\n ds = ds.map(self._frame, num_parallel_calls=autotune).unbatch()\n ds = ds.map(self._extract_embedding, num_parallel_calls=autotune)\n\n # Cache intermediate results right before data augmentation.\n if cache_fn:\n ds = cache_fn(ds)\n\n if is_training:\n ds = ds.map(self._add_noise, num_parallel_calls=autotune)\n return ds\n\n def _yamnet_labels(self):\n class_map_path = self._yamnet_model.class_map_path().numpy()\n class_map_csv_text = tf.io.read_file(class_map_path).numpy().decode('utf-8')\n class_map_csv = io.StringIO(class_map_csv_text)\n class_names = [\n display_name for (class_index, mid,\n display_name) in csv.reader(class_map_csv)\n ]\n class_names = class_names[1:] # Skip CSV header\n return class_names\n\n def _export_metadata(self, tflite_filepath, index_to_label,\n export_metadata_json_file):\n \"\"\"Export TFLite metadata.\"\"\"\n with MetadataWriter(\n tflite_filepath,\n name=self._MODEL_NAME,\n description=self._MODEL_DESCRIPTION,\n version=self._MODEL_VERSION,\n author=self._MODEL_AUTHOR,\n licenses=self._MODEL_LICENSES) as writer:\n writer.add_input(\n name=self._INPUT_NAME,\n description=self._INPUT_DESCRIPTION,\n sample_rate=self._SAMPLE_RATE,\n channels=self._CHANNELS)\n\n if self._keep_yamnet_and_custom_heads:\n writer.add_output(\n labels=self._yamnet_labels(),\n name=self._YAMNET_OUTPUT_NAME,\n description=self._YAMNET_OUTPUT_DESCRIPTION)\n\n writer.add_output(\n labels=index_to_label,\n name=self._CUSTOM_OUTPUT_NAME,\n description=self._CUSTOM_OUTPUT_DESCRIPTION)\n\n json_filepath = (os.path.splitext(tflite_filepath)[0] +\n '.json') if export_metadata_json_file else None\n writer.save(tflite_filepath, json_filepath)\n\n def create_serving_model(self, training_model):\n \"\"\"Create a model for serving.\"\"\"\n embedding_extraction_layer = hub.KerasLayer(\n self._yamnet_model_handle, trainable=False)\n keras_input = tf.keras.Input(\n shape=(YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,),\n dtype=tf.float32,\n name='audio') # (1, wav)\n reshaped_input = tf.reshape(keras_input,\n (YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,)) # (wav)\n\n scores, embeddings, _ = embedding_extraction_layer(reshaped_input)\n serving_outputs = training_model(embeddings)\n\n if self._keep_yamnet_and_custom_heads:\n serving_model = tf.keras.Model(keras_input, [scores, serving_outputs])\n else:\n serving_model = tf.keras.Model(keras_input, serving_outputs)\n\n return serving_model\n\n def export_tflite(self,\n model,\n tflite_filepath,\n with_metadata=True,\n export_metadata_json_file=True,\n index_to_label=None):\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n This method overrides the default `CustomModel._export_tflite` method, and\n include the spectrom extraction in the model.\n\n The exported model has input shape (1, number of wav samples)\n\n Args:\n model: An instance of the keras classification model to be exported.\n tflite_filepath: File path to save tflite model.\n with_metadata: Whether the output tflite model contains metadata.\n export_metadata_json_file: Whether to export metadata in json file. If\n True, export the metadata in the same directory as tflite model. Used\n only if `with_metadata` is True.\n index_to_label: A list that map from index to label class name.\n \"\"\"\n serving_model = self.create_serving_model(model)\n\n # TODO(b/164229433): Remove SELECT_TF_OPS once changes in the bug are\n # released.\n model_util.export_tflite(\n serving_model, tflite_filepath, quantization_config=None)\n\n if with_metadata:\n if not ENABLE_METADATA:\n print('Writing Metadata is not support in the current tflite-support '\n 'version. Please use tflite-support >= 0.2.*')\n else:\n self._export_metadata(tflite_filepath, index_to_label,\n export_metadata_json_file)\n"
] | [
[
"tensorflow.keras.Input",
"tensorflow.reshape",
"tensorflow.keras.models.load_model",
"tensorflow.keras.utils.get_file",
"tensorflow.keras.Sequential",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.keras.Model",
"tensorflow.squeeze",
"tensorflow.keras.layers.InputLayer",
"tensorflow.random.normal",
"tensorflow.keras.layers.Dense",
"tensorflow.signal.frame",
"tensorflow.TensorSpec",
"tensorflow.distribute.get_strategy",
"tensorflow.io.read_file",
"tensorflow.compat.v1.logging.info"
]
] |
Jmion/SwisscomMIP | [
"d29b0de222be44f85a84bc7dc3f4521741fdeda1"
] | [
"dataFetcher.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Loading data\n\n\n\nimport pandas as pd\nimport plotly.express as px\nfrom tqdm import tqdm\nimport functools\nimport numpy as np\nfrom difflib import SequenceMatcher\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom requests_oauthlib import OAuth2Session\nfrom datetime import datetime, timedelta\nimport pprint\nimport requests\nimport os\nimport getpass\nimport json\n\nfrom queue import Queue\nfrom threading import Thread\nfrom time import time\nimport logging\nimport os\n\n\n\n#cashing in case of multiple calls.\[email protected]_cache(maxsize=128)\ndef get_tiles(municipalityId: int) -> pd.DataFrame:\n \"\"\"Fetches tile information for a municipality id.\n \n Args:\n municipalityId: id of the municipality as defined in by the federal office of statistics,\n https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html\n \n Return:\n A dataframe containing the following columns:\n [tileId, ll_lon, ll_lat, urL-lon, ur_lat]\n \n tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.\n ll_lon: longitude coordinate of the lower left corner of the tile.\n ll_lat: latitude coordinate of the lower left corner of the tile.\n ur_lon: longitude coordinate of the upper right corner of the tile.\n ur_lat: latitude coordinate of the upper right corner of the tile.\n \n If municipalityId is invalid will print an error message and return an empty DataFrame\n \"\"\"\n api_request = (\n BASE_URL\n + f'/grids/municipalities/{municipalityId}'\n )\n\n data = oauth.get(api_request, headers=headers).json()\n if(data.get('status') == None):\n tileID = [t['tileId'] for t in data['tiles']]\n ll_lon = [t['ll']['x'] for t in data['tiles']]\n ll_lat= [t['ll']['y'] for t in data['tiles']]\n ur_lon = [t['ur']['x'] for t in data['tiles']]\n ur_lat = [t['ur']['y'] for t in data['tiles']]\n else:\n print(f'get_tiles: failed with status code {data.get(\"status\")}. {data.get(\"message\")}')\n return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})\n \n return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})\n\n\n\ndef get_municipalityID(name: str) -> np.array(int):\n \"\"\"Converts a municipality name to ID\n \n Args:\n name of municipality.\n \n Returns:\n An array containing all the municipality ID's corresponding to the name.\n \n If the name invalid will return an empty array.\n \"\"\"\n return commune.loc[commune.GDENAME == name].GDENR.to_numpy()\n\n\n\ndef visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :\n \"\"\"Visualizes coordinates in dataframe on map\n \n Retrieves columns with name latitude and logitude and visualizes it on a map.\n \n Args:\n df: A dataframe containing the coordinates.\n latitude: String key of the column in the dataframe containing the latitude.\n longitude: String key of the column in the dataframe containing the longitude.\n \"\"\"\n fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,\n mapbox_style=\"carto-positron\")\n fig.show()\n\n\n\n\ndef get_all_tiles_switzerland() -> pd.DataFrame:\n \"\"\"Fetches the tile information for all the tiles in Switzerland.\n \n Returns:\n A Dataframe containg the tile information for every tile in switzerland.\n \n The format of the DataFrame is the same as the return of get_tiles()\n \n \"\"\"\n tiles = get_tiles(commune.GDENR.unique()[0])\n for c in tqdm(commune.GDENR.unique().tolist()):\n tiles = tiles.append(get_tiles(c))\n return tiles\n\n\n\n\n\ndef get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):\n \"\"\"Fetches daily demographics\n \n Fetches the daily demographics, age distribution, of the tiles.\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion\n \n +----------+-----------------------+---------------------+\n | | ageDistribution | maleProportion |\n +----------+-----------------------+---------------------+\n | 44554639 | NaN | 0.49828359484672546 |\n +----------+-----------------------+---------------------+\n | 44271906 | [0.21413850784301758, | 0.493218 |\n | | 0.27691012620925903, | |\n | | 0.37422287464141846, | |\n | | 0.13472850620746613] | |\n +----------+-----------------------+---------------------+\n In the example above tile 44554639 does not have any age distribution data.\n \n The data is k-anonymized. Therefor is some tiles are missing data it\n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n dates = [(day + timedelta(hours=delta)) for delta in range(24)]\n date2score = dict()\n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n for t in data.get(\"tiles\", []):\n if date2score.get(t['tileId']) == None:\n date2score[t['tileId']] = dict()\n date2score[t['tileId']] = {\"ageDistribution\": t.get(\"ageDistribution\"),\"maleProportion\": t.get(\"maleProportion\")}\n \n \n return pd.DataFrame.from_dict(date2score).transpose()\n\n\n\n\n\ndef get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):\n \"\"\"Fetches hourly demographics of age categories for 24 hours\n \n Fetches the hourly demographics, age distribution, of the tiles.\n \n Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n DataFrame containing the demographics. The name\n of the collumns are:\n [age_cat, age_distribution, male_proportion]\n \n +----------+---------------------+---------+------------------+-----------------+\n | | | age_cat | age_distribution | male_proportion |\n +----------+---------------------+---------+------------------+-----------------+\n | tileID | time | | | |\n +----------+---------------------+---------+------------------+-----------------+\n | 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |\n +----------+---------------------+---------+------------------+-----------------+\n | | ... | | | |\n +----------+---------------------+---------+------------------+-----------------+\n | 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n \n The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it \n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):\n \"\"\"Fetches hourly male proportion and age categories for 24 hours\n\n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n\n Returns:\n Returns a dictionary with as a key the tileID, and as a value an object that is as follows:\n\n {tileID: {dateTime:{ \"ageDistribution\": [0-19, 20-39, 40-64, 64+], \"maleProportion\": value},\n {dateTime2: ...}}}\n\n\n\n 26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,\n 0.2758632302284241,\n 0.362215131521225,\n 0.16940800845623016],\n 'maleProportion': 0.4727686941623688},\n '2020-01-27T01:00:00': {'ageDistribution': None,\n 'maleProportion': 0.4896690547466278},\n '2020-01-27T02:00:00': {'ageDistribution': None,\n 'maleProportion': 0.48882684111595154},\n\n The data is k-anonymized. Therefor is some values are None it means that no data was available \n To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n dates = [(day + timedelta(hours=delta)) for delta in range(24)]\n date2score = dict()\n for dt in tqdm(dates, desc=\"get_hourly_demographics: hours\", leave=True):\n for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n for t in data.get(\"tiles\", []):\n if date2score.get(t['tileId']) == None:\n date2score[t['tileId']] = dict()\n date2score.get(t['tileId'])[dt.isoformat()] = {\"ageDistribution\": t.get(\"ageDistribution\"),\"maleProportion\": t.get(\"maleProportion\")}\n return date2score\n \n \n \n data = get_hourly_demographics(tiles, day)\n tile_id = []\n time_data = []\n age_distribution = []\n age_cat = []\n male_proportion = []\n for i in data:\n for time in data[i]:\n if data[i][time].get(\"ageDistribution\") != None:\n for (idx,a) in enumerate(data[i][time].get(\"ageDistribution\", [])):\n age_cat.append(idx)\n age_distribution.append(a)\n tile_id.append(i)\n time_data.append(time)\n male_proportion.append(data[i][time].get(\"maleProportion\"))\n else:\n tile_id.append(i)\n time_data.append(time)\n age_distribution.append(None)\n male_proportion.append(data[i][time].get(\"maleProportion\"))\n age_cat.append(None)\n return pd.DataFrame(data={'tileID': tile_id, \"age_cat\": age_cat, 'age_distribution':age_distribution, \"male_proportion\": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])\n\n\n\n\ndef get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:\n \"\"\"Fetches the daily density of tiles.\n \n Fetches the daily density of the tiles and creates a dataframe of the fetched data.\n \n Args:\n tiles: Array of tile id's that daily density data needs to be fetched.\n day: Day to fetch the density data for.\n \n Returns:\n DataFrame containg the tileId and the score. The name of the collumns are:\n [score]\n \n The identifier of the row is bassed on the tileID\n \n +----------+-------+\n | | score |\n +----------+-------+\n | tileID | |\n +----------+-------+\n | 44394309 | 1351 |\n +----------+-------+\n | 44394315 | 1103 |\n +----------+-------+\n | 44460297 | 875 |\n +----------+-------+\n | 44488589 | 1387 |\n +----------+-------+\n | 44498028 | 678 |\n +----------+-------+\n \n Tile with k-anonymized dwell density score. If tile not present Swisscom is\n unable to provide a value due to k-anonymization. To find out more on density\n scores read the Heatmap FAQ. \n \"\"\"\n tileID = []\n score = []\n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-density/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n if data.get(\"tiles\") != None:\n for t in data[\"tiles\"]:\n tileID.append(t['tileId'])\n score.append(t[\"score\"])\n return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index(\"tileID\")\n\n\n\n\ndef get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):\n \"\"\"Fetches the hourly density of tiles for 24 hours.\n\n Fetches the hourly density of the tiles and creates a dataframe of the fetched data.\n\n Args:\n tiles: Array of tile id's that daily density data needs to be fetched.\n day: Day to fetch the density data for.\n\n Returns:\n DataFrame containg the tileId and the score. The name of the collumns are:\n [score]\n The identifier of the row is bassed on the [tileID, time]\n \n +----------+---------------------+-------+\n | | | score |\n +----------+---------------------+-------+\n | tileID | time | |\n +----------+---------------------+-------+\n | 44394309 | 2020-01-27T00:00:00 | 52 |\n | +---------------------+-------+\n | | 2020-01-27T01:00:00 | 68 |\n | +---------------------+-------+\n | | 2020-01-27T02:00:00 | 69 |\n | +---------------------+-------+\n | | 2020-01-27T03:00:00 | 69 |\n | +---------------------+-------+\n | | 2020-01-27T04:00:00 | 69 |\n +----------+---------------------+-------+\n\n Tile with k-anonymized dwell density score. If tile not present Swisscom is\n unable to provide a value due to k-anonymization. To find out more on density\n scores read the Heatmap FAQ. \n \"\"\"\n \n def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):\n dates = [(day + timedelta(hours=delta)) for delta in range(24)]\n date2score = dict()\n print(\"getHourlyDensity\")\n for dt in tqdm(dates, desc=\"get_hourly_density: hours\", leave=True):\n for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n for t in oauth.get(api_request, headers=headers).json().get(\"tiles\",[]):\n if date2score.get(t['tileId']) == None:\n date2score[t['tileId']] = dict()\n date2score.get(t['tileId'])[dt.isoformat()] = t['score']\n\n return date2score\n \n \n tiles_data = []\n time_data = []\n score = []\n data = get_hourly_density(tiles, day)\n for t in data:\n for time in data[t]:\n time_data.append(time)\n tiles_data.append(t)\n score.append(data[t][time])\n return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])\n\n\n\n\ndef fetch_data_city(city: str) -> None:\n \"\"\"Fetches the data for a city if the data is not yet cashed on the computer.\n \"\"\"\n compression = \".xz\"\n folder = os.path.join(\".\",\"data\")\n def file_path(file_name: str) -> str:\n return os.path.join(folder, file_name)\n\n if not(os.path.exists(folder)):\n os.mkdir(folder)\n \n \n tiles_path = file_path(f'{city}Tiles.pkl{compression}')\n hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')\n hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')\n daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')\n daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')\n\n\n if not(os.path.isfile(tiles_path)):\n tiles = get_tiles(get_municipalityID(city)[0])\n tiles.to_pickle(tiles_path)\n else:\n tiles = pd.read_pickle(tiles_path)\n if not(os.path.isfile(hourly_dem_path)):\n hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())\n hourly_dem.to_pickle(hourly_dem_path)\n if not(os.path.isfile(hourly_density_path)):\n hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())\n hourly_dens.to_pickle(hourly_density_path)\n if not(os.path.isfile(daily_density_path)):\n get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)\n if not(os.path.isfile(daily_demographics_path)):\n get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)\n\n\ndef clean_cities_list(cities: [str]) -> [str]:\n \"\"\"Cleans the list of cities by removing all the cities that are not found in the \n official list of cities provided by the Federal Statisitics Office.\n \n Args:\n List of cities to check and clean.\n \n Return:\n List containing a subset of the input list such that all elements are valid.\n \"\"\"\n invalid_cities = []\n #validation that the cities names are valid\n for c in cities:\n if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:\n city = []\n sim_value = []\n for f in commune.GDENAME:\n r = SequenceMatcher(None, c, f).ratio()\n if r > 0.5:\n city.append(f)\n sim_value.append(r)\n\n d = pd.DataFrame(data={\"city\": city, \"value\": sim_value})\n \n potential_cities = d.sort_values(\"value\", ascending=False).head(5).city.to_numpy()\n print(f\"City nammed: {c} cannot be found in official records. Did you mean: {potential_cities} ? {c} will be ignored.\")\n invalid_cities.append(c)\n return [c for c in cities if not(c in invalid_cities)]\n\n\n# Multithread fetch implementation\n\nclass DownloadWorker(Thread):\n\n def __init__(self, queue):\n Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n while True:\n # Get the work from the queue and expand the tuple\n city = self.queue.get()\n if city == -1:\n self.queue.put(-1)\n break\n try:\n fetch_data_city(city)\n finally:\n self.queue.task_done()\n\n\ndef download_commune_excel() -> None:\n '''\n Downloads the excel spreadsheet from the Swiss Federal Statistical Office that maps the town name to unique ID\n '''\n \n print('Beginning commune file download with requests')\n\n folder = os.path.join(\".\",\"data\")\n if not(os.path.exists(folder)):\n os.mkdir(folder)\n \n url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/11467406/master'\n r = requests.get(url)\n\n with open(os.path.join(\".\", \"data\", 'commune.xlsx'), 'wb') as f:\n f.write(r.content)\n print(\"End of commune file download\")\n \n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nBASE_URL = \"https://api.swisscom.com/layer/heatmaps/demo\"\nTOKEN_URL = \"https://consent.swisscom.com/o/oauth2/token\"\nMAX_NB_TILES_REQUEST = 100\nheaders = {\"scs-version\": \"2\"}\nclient_id = \"\" # customer key in the Swisscom digital market place\nclient_secret = \"\" # customer secret in the Swisscom digital market place\n\nif client_id == \"\":\n client_id = os.environ.get(\"CLIENT_ID\", \"\")\n if client_id == \"\":\n client_id = input(\"Enter MIP Client ID: \")\n os.environ[\"CLIENT_ID\"] = client_id\nif client_secret == \"\":\n client_secret = os.environ.get(\"CLIENT_SECRET\", \"\")\n if client_secret == \"\":\n client_secret = getpass.getpass('Enter MIP client secret:')\n os.environ[\"CLIENT_SECRET\"] = client_secret\n\n# Fetch an access token\nclient = BackendApplicationClient(client_id=client_id)\noauth = OAuth2Session(client=client)\noauth.fetch_token(token_url=TOKEN_URL, client_id=client_id,\n client_secret=client_secret)\n\n\ndef main():\n ts = time()\n\n if not(os.path.exists(os.path.join(\".\", \"data\", 'commune.xlsx'))):\n download_commune_excel()\n global commune\n commune = pd.read_excel(os.path.join(\".\", \"data\", 'commune.xlsx'), sheet_name='GDE')\n \n cities = [\"Saas-Fee\", \"Arosa\", \"Bulle\", \"Laax\",\"Belp\" ,\"Saanen\",\"Adelboden\", \"Andermatt\", \"Davos\", \"Bulle\", \"Bern\", \"Genève\", \"Lausanne\", \"Zürich\", \"Neuchâtel\", \"Sion\", \"St. Gallen\", \"Appenzell\", \"Solothurn\", \"Zug\", \"Fribourg\", \"Luzern\", \"Ecublens (VD)\", \"Kloten\", \"Le Grand-Saconnex\", \"Nyon\", \"Zermatt\", \"Lugano\"]\n cities = clean_cities_list(cities)\n queue = Queue()\n for x in range(2):\n worker = DownloadWorker(queue)\n worker.deamen = True\n worker.start()\n for c in cities:\n logger.info('Queueing {}'.format(c))\n queue.put(c)\n queue.join()\n\n queue.put(-1)\n logger.info('Took %s', time() - ts)\n\n\n list_of_cities_path = os.path.join(\".\", \"data\",\"CityList.json\")\n cityList=[]\n if os.path.isfile(list_of_cities_path):\n with open(list_of_cities_path, \"r\") as filehandle:\n cityList = json.load(filehandle)\n with open(list_of_cities_path, \"w\") as filehandle:\n for city in cities:\n if not(city in cityList):\n cityList.append(city)\n json.dump(cityList, filehandle)\n \n \nif __name__ == \"__main__\":\n main()\n\n\n \n# Other functions not currently used\n\ndef get_daily_demographics_male(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:\n \"\"\"Fetches Daily demographics.\n \n Fetches the daily male proportion of the tiles and creates a dataframe of the fetched data.\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n DataFrame containing the tileId and the proportion of male. The name of the collumns are:\n [tileID, maleProportion]\n The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it \n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n \n tileID = []\n maleProportion = []\n\n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n if data.get(\"tiles\") != None:\n for t in data[\"tiles\"]:\n if t.get(\"maleProportion\") != None:\n tileID.append(t['tileId'])\n maleProportion.append(t[\"maleProportion\"])\n return pd.DataFrame(data={'tileID': tileID, 'maleProportion':maleProportion})\n\n\n\ndef get_daily_demographics_age(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:\n \"\"\"Fetches daily demographics of age categories\n \n Fetches the daily demographics, age distribution, of the tiles and creates a dataframe of the fetched data.\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n DataFrame containing the tileId and a array of values corresponding to the age distribution. The name\n of the collumns are:\n [tileID, ageDistribution]\n The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it \n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n tileID = []\n ageDistribution = []\n \n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n for t in data.get(\"tiles\", []):\n if t.get(\"ageDistribution\") != None:\n tileID.append(t['tileId'])\n ageDistribution.append(t[\"ageDistribution\"])\n return pd.DataFrame(data={'tileID': tileID, 'ageDistribution':ageDistribution})\n\n"
] | [
[
"numpy.array",
"pandas.read_pickle",
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] |
acmlia/ann_training | [
"8cb39123203445cf79c4bd65350fa4063705a518",
"8cb39123203445cf79c4bd65350fa4063705a518"
] | [
"security/training_ann_3.py",
"src/training_ann_6.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.decomposition import PCA\nfrom src.meteoro_skills import CategoricalScores\nfrom src.meteoro_skills import ContinuousScores\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import backend\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\n#from keras.models import model_from_yaml\nfrom keras.models import load_model\n\nprint('TF version '+tf.__version__)\n\n# ------------------------------------------------------------------------------\n\ndef tic():\n global _start_time\n _start_time = time.time()\n\n\ndef tac():\n t_sec = round(time.time() - _start_time)\n (t_min, t_sec) = divmod(t_sec, 60)\n (t_hour, t_min) = divmod(t_min, 60)\n print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n\ndef mean_squared_error(y_test, y_pred):\n return K.mean(K.square(y_pred - y_test), axis=-1)\n# ------------------------------------------------------------------------------\n\n\nclass Training:\n \"\"\"\n This module is intended to automate the TensorFlow Neural Network training.\n \"\"\"\n PCA = PCA()\n seed = 0\n run_prefix = ''\n version = ''\n vernick = ''\n file = ''\n path = ''\n fig_title = ''\n path_fig = ''\n mod_out_pth = ''\n mod_out_name = ''\n\n def __init__(self, random_seed=0,\n run_prefix='',\n version='',\n version_nickname='',\n csv_entry='',\n csv_path='',\n figure_path='',\n model_out_path='',\n model_out_name=''):\n\n self.run_prefix = run_prefix\n self.seed = random_seed\n self.ver = version\n self.vernick = version_nickname\n self.file = csv_entry\n self.path = csv_path\n self.path_fig = figure_path\n self.fig_title = run_prefix + version + version_nickname\n self.mod_out_pth = model_out_path\n self.mod_out_name = model_out_name\n # -------------------------------------------------------------------------\n # DROP DATA OUTSIDE INTERVAL\n # -------------------------------------------------------------------------\n \n @staticmethod\n def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):\n keepinterval = np.where((dataframe[target_col] >= keepfrom) &\n (dataframe[target_col] <= keepto))\n result = dataframe.iloc[keepinterval]\n return result\n\n # -------------------------------------------------------------------------\n # BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION\n # -------------------------------------------------------------------------\n\n @staticmethod\n def build_class_model():\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n # Create the Keras model:\n model = Sequential()\n model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(2, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)\n return model\n\n @staticmethod\n def build_reg_model(input_size):\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n model = Sequential()\n model.add(GaussianNoise(0.01, input_shape=(input_size,)))\n model.add(Dense(33, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n # -------------------------------------------------------------------------\n # EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!\n # -------------------------------------------------------------------------\n\n def autoExecClass(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n\n # Load dataset:\n df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]\n \n x_arr = np.asanyarray(x)\n y_arr = np.asanyarray(y)\n y_arr = np.ravel(y_arr)\n\n # Scaling the input paramaters:\n# scaler_min_max = MinMaxScaler()\n norm_sc = Normalizer()\n x_normalized= norm_sc.fit_transform(x_arr)\n\n # Split the dataset in test and train samples:\n x_train, x_test, y_train, y_test = train_test_split(x_normalized,\n y_arr, test_size=0.10,\n random_state=101)\n\n # Create the instance for KerasRegressor:\n model=self.build_class_model()\n tic()\n#------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(x_train, y_train,\n epochs=EPOCHS, validation_split=0.2, batch_size=10,\n verbose=0, callbacks=[PrintDot()])\n print(history.history.keys())\n\n# ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n# ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n# model_yaml = model.to_yaml()\n# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n# yaml_file.write(model_yaml)\n#\n# # serialize weights to HDF5\n# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n# print(\"Saved model to disk\")\n# tac()\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '.h5')\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n\n def autoExecReg(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n# ------------------------------------------------------------------------------\n\n df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n\n df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n\n colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']\n\n scaler = StandardScaler()\n\n normed_input = scaler.fit_transform(df_input)\n df_normed_input = pd.DataFrame(normed_input[:],\n columns=colunas)\n ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n # regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]\n # ------------------------------------------------------------------------------\n # Choosing the number of components:\n\n TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]\n TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]\n\n # ------------------------------------------------------------------------------\n # Verifying the number of components that most contribute:\n pca = self.PCA\n pca1 = pca.fit(TB1)\n plt.plot(np.cumsum(pca1.explained_variance_ratio_))\n plt.xlabel('Number of components for TB1')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')\n # ---\n pca_trans1 = PCA(n_components=2)\n pca1 = pca_trans1.fit(TB1)\n TB1_transformed = pca_trans1.transform(TB1)\n print(\"original shape: \", TB1.shape)\n print(\"transformed shape:\", TB1_transformed.shape)\n # ------------------------------------------------------------------------------\n pca = PCA()\n pca2 = pca.fit(TB2)\n plt.plot(np.cumsum(pca2.explained_variance_ratio_))\n plt.xlabel('Number of components for TB2')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')\n # ---\n pca_trans2 = PCA(n_components=2)\n pca2 = pca_trans2.fit(TB2)\n TB2_transformed = pca_trans2.transform(TB2)\n print(\"original shape: \", TB2.shape)\n print(\"transformed shape:\", TB2_transformed.shape)\n # ------------------------------------------------------------------------------\n # JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:\n\n PCA1 = pd.DataFrame(TB1_transformed[:],\n columns=['pca1_1', 'pca_2'])\n PCA2 = pd.DataFrame(TB2_transformed[:],\n columns=['pca2_1', 'pca2_2'])\n\n dataset = PCA1.join(PCA2, how='right')\n dataset = dataset.join(ancillary, how='right')\n dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')\n # ------------------------------------------------------------------------------\n\n dataset = self.keep_interval(0.2, 110.0, dataset, 'sfcprcp')\n\n # ----------------------------------------\n # SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\n# n = 0.98\n# to_remove = np.random.choice(\n# dataset.index,\n# size=int(dataset.shape[0] * n),\n# replace=False)\n# dataset = dataset.drop(to_remove)\n\n # ------------------------------------------------------------------------------\n # Split the data into train and test\n # Now split the dataset into a training set and a test set.\n # We will use the test set in the final evaluation of our model.\n\n train_dataset = dataset.sample(frac=0.8, random_state=0)\n test_dataset = dataset.drop(train_dataset.index)\n\n # ------------------------------------------------------------------------------\n # Inspect the data:\n # Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\n colunas = list(dataset.columns.values)\n\n # ------------------------------------------------------------------------------\n # Also look at the overall statistics:\n train_stats = train_dataset.describe()\n train_stats.pop(\"sfcprcp\")\n train_stats = train_stats.transpose()\n\n # ------------------------------------------------------------------------------\n # Split features from labels:\n # Separate the target value, or \"label\", from the features.\n # This label is the value that you will train the model to predict.\n\n y_train = train_dataset.pop('sfcprcp')\n y_test = test_dataset.pop('sfcprcp')\n\n # ------------------------------------------------------------------------------\n # Normalize the data:\n\n scaler = StandardScaler()\n normed_train_data = scaler.fit_transform(train_dataset)\n normed_test_data = scaler.fit_transform(test_dataset)\n\n # ------------------------------------------------------------------------------\n # Build the model:\n\n model = self.build_reg_model(len(train_dataset.keys()))\n # ------------------------------------------------------------------------------\n # Inspect the model:\n # Use the .summary method to print a simple description of the model\n\n model.summary()\n\n # ------------------------------------------------------------------------------\n # It seems to be working, and it produces a result\n # of the expected shape and type.\n\n # Train the model:\n # Train the model for 1000 epochs, and record the training\n # and validation accuracy in the history object.\n\n # ------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split=0.2, verbose=0,\n callbacks=[PrintDot()])\n print(history.history.keys())\n\n # ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n\n self.plot_history(history)\n # ------------------------------------------------------------------------------\n\n model = self.build_reg_model(len(train_dataset.keys()))\n\n # The patience parameter is the amount of epochs to check for improvement\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\n history = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\n # ------------------------------------------------------------------------------\n # Ploting again, but with the EarlyStopping apllied:\n\n self.plot_history_EarlyStopping(history)\n\n # The graph shows that on the validation set, the average error\n # is usually around +/- 2 MPG. Is this good?\n # We'll leave that decision up to you.\n # ------------------------------------------------------------------------------\n # Let's see how well the model generalizes by using\n # the test set, which we did not use when training the model.\n # This tells us how well we can expect the model to predict\n # when we use it in the real world.\n\n loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\n print(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n #------------------------------------------------------------------------------\n # -----------------------------------------------------------------------------\n # Make predictions\n # Finally, predict SFCPRCP values using data in the testing set:\n\n test_predictions = model.predict(normed_test_data).flatten()\n\n # Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:\n\n skills = ContinuousScores()\n val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)\n \n #converting to text file\n print(\"converting arrays to text files\")\n my_scores = {'val_y_pred_mean': val_y_pred_mean,\n 'val_y_test_mean': val_y_test_mean,\n 'val_mae': val_mae,\n 'val_rmse': val_rmse,\n 'val_std': val_std,\n 'val_fseperc': val_fseperc,\n 'val_fse': val_fse,\n 'val_corr': val_corr,\n 'val_num_pixels': val_num_pixels}\n\n with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:\n myfile.write(str(my_scores))\n print(\"Text file saved!\")\n\n plt.figure()\n plt.scatter(y_test, test_predictions)\n plt.xlabel('True Values [sfcprcp]')\n plt.ylabel('Predictions [sfcprcp]')\n plt.axis('equal')\n plt.axis('square')\n plt.xlim([0, plt.xlim()[1]])\n plt.ylim([0, plt.ylim()[1]])\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n\n #------------------------------------------------------------------------------\n ax = plt.gca()\n ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel('True Values [sfcprcp]')\n ax.set_ylabel('Predictions [sfcprcp]')\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_LOG_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig+fig_name)\n plt.clf()\n #------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------\n # It looks like our model predicts reasonably well.\n # Let's take a look at the error distribution.\n\n error = test_predictions - y_test\n plt.hist(error, bins=25)\n plt.xlabel(\"Prediction Error [sfcprcp]\")\n plt.ylabel(\"Count\")\n fig_name = self.fig_title + \"_prediction_error.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n \n # ------------------------------------------------------------------------------\n # HISTROGRAM 2D\n\n plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 110), (0.2, 110)]))\n plt.axis('equal')\n plt.axis('square')\n plt.plot([0, 100], [0, 100], ls=\"--\", c=\".3\")\n plt.xlim([0, max(y_test)])\n plt.ylim([0, max(y_test)])\n plt.colorbar()\n plt.xlabel(\"Observed rain rate (mm/h) - Training\")\n plt.ylabel(\"Predicted rain rate (mm/h) - Training\")\n fig_name = self.fig_title + \"_hist2D.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n\n # ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n model_yaml = model.to_yaml()\n with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n yaml_file.write(model_yaml)\n\n # serialize weights to HDF5\n model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n print(\"Saved model to disk\")\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')\n\n # -------------------------------------------------------------------------\n # FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:\n # -------------------------------------------------------------------------\n def plot_history(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n # plt.show()\n fig_name = self.fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(self.path_fig + fig_name)\n\n def plot_history_EarlyStopping(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n fig_name = self.fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(self.path_fig + fig_name)\n",
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.decomposition import PCA\nfrom src.meteoro_skills import CategoricalScores\nfrom src.meteoro_skills import ContinuousScores\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import backend\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\n#from keras.models import model_from_yaml\nfrom keras.models import load_model\n\nprint('TF version '+tf.__version__)\n\n# ------------------------------------------------------------------------------\n\ndef tic():\n global _start_time\n _start_time = time.time()\n\n\ndef tac():\n t_sec = round(time.time() - _start_time)\n (t_min, t_sec) = divmod(t_sec, 60)\n (t_hour, t_min) = divmod(t_min, 60)\n print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n\ndef mean_squared_error(y_test, y_pred):\n return K.mean(K.square(y_pred - y_test), axis=-1)\n# ------------------------------------------------------------------------------\n\n\nclass Training:\n \"\"\"\n This module is intended to automate the TensorFlow Neural Network training.\n \"\"\"\n PCA = PCA()\n seed = 0\n run_prefix = ''\n version = ''\n vernick = ''\n file = ''\n path = ''\n fig_title = ''\n path_fig = ''\n mod_out_pth = ''\n mod_out_name = ''\n\n def __init__(self, random_seed=0,\n run_prefix='',\n version='',\n version_nickname='',\n csv_entry='',\n csv_path='',\n figure_path='',\n model_out_path='',\n model_out_name=''):\n\n self.run_prefix = run_prefix\n self.seed = random_seed\n self.version = version\n self.vernick = version_nickname\n self.file = csv_entry\n self.path = csv_path\n self.path_fig = figure_path\n self.fig_title = run_prefix + version + version_nickname\n self.mod_out_pth = model_out_path\n self.mod_out_name = model_out_name\n # -------------------------------------------------------------------------\n # DROP DATA OUTSIDE INTERVAL\n # -------------------------------------------------------------------------\n \n @staticmethod\n def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):\n keepinterval = np.where((dataframe[target_col] >= keepfrom) &\n (dataframe[target_col] <= keepto))\n result = dataframe.iloc[keepinterval]\n return result\n\n # -------------------------------------------------------------------------\n # BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION\n # -------------------------------------------------------------------------\n\n @staticmethod\n def build_class_model():\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n # Create the Keras model:\n model = Sequential()\n model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(2, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)\n return model\n\n @staticmethod\n def build_reg_model(input_size):\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n model = Sequential()\n model.add(GaussianNoise(0.01, input_shape=(input_size,)))\n model.add(Dense(33, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n # -------------------------------------------------------------------------\n # EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!\n # -------------------------------------------------------------------------\n\n def autoExecClass(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n\n # Load dataset:\n df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]\n \n x_arr = np.asanyarray(x)\n y_arr = np.asanyarray(y)\n y_arr = np.ravel(y_arr)\n\n # Scaling the input paramaters:\n# scaler_min_max = MinMaxScaler()\n norm_sc = Normalizer()\n x_normalized= norm_sc.fit_transform(x_arr)\n\n # Split the dataset in test and train samples:\n x_train, x_test, y_train, y_test = train_test_split(x_normalized,\n y_arr, test_size=0.10,\n random_state=101)\n\n # Create the instance for KerasRegressor:\n model=self.build_class_model()\n tic()\n#------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(x_train, y_train,\n epochs=EPOCHS, validation_split=0.2, batch_size=10,\n verbose=0, callbacks=[PrintDot()])\n print(history.history.keys())\n\n# ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n# ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n# model_yaml = model.to_yaml()\n# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n# yaml_file.write(model_yaml)\n#\n# # serialize weights to HDF5\n# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n# print(\"Saved model to disk\")\n# tac()\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '.h5')\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n\n def autoExecReg(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n# ------------------------------------------------------------------------------\n\n df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n\n df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n\n colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']\n\n scaler = StandardScaler()\n\n normed_input = scaler.fit_transform(df_input)\n df_normed_input = pd.DataFrame(normed_input[:],\n columns=colunas)\n ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n # regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]\n # ------------------------------------------------------------------------------\n # Choosing the number of components:\n\n TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]\n TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]\n\n # ------------------------------------------------------------------------------\n # Verifying the number of components that most contribute:\n pca = self.PCA\n pca1 = pca.fit(TB1)\n plt.plot(np.cumsum(pca1.explained_variance_ratio_))\n plt.xlabel('Number of components for TB1')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')\n # ---\n pca_trans1 = PCA(n_components=2)\n pca1 = pca_trans1.fit(TB1)\n TB1_transformed = pca_trans1.transform(TB1)\n print(\"original shape: \", TB1.shape)\n print(\"transformed shape:\", TB1_transformed.shape)\n # ------------------------------------------------------------------------------\n pca = PCA()\n pca2 = pca.fit(TB2)\n plt.plot(np.cumsum(pca2.explained_variance_ratio_))\n plt.xlabel('Number of components for TB2')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')\n # ---\n pca_trans2 = PCA(n_components=2)\n pca2 = pca_trans2.fit(TB2)\n TB2_transformed = pca_trans2.transform(TB2)\n print(\"original shape: \", TB2.shape)\n print(\"transformed shape:\", TB2_transformed.shape)\n # ------------------------------------------------------------------------------\n # JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:\n\n PCA1 = pd.DataFrame(TB1_transformed[:],\n columns=['pca1_1', 'pca_2'])\n PCA2 = pd.DataFrame(TB2_transformed[:],\n columns=['pca2_1', 'pca2_2'])\n\n dataset = PCA1.join(PCA2, how='right')\n dataset = dataset.join(ancillary, how='right')\n dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')\n # ------------------------------------------------------------------------------\n\n dataset = self.keep_interval(0.2, 75, dataset, 'sfcprcp')\n\n # ----------------------------------------\n# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\n# n = 0.98\n# to_remove = np.random.choice(\n# dataset.index,\n# size=int(dataset.shape[0] * n),\n# replace=False)\n# dataset = dataset.drop(to_remove)\n\n # ------------------------------------------------------------------------------\n # Split the data into train and test\n # Now split the dataset into a training set and a test set.\n # We will use the test set in the final evaluation of our model.\n\n train_dataset = dataset.sample(frac=0.8, random_state=0)\n test_dataset = dataset.drop(train_dataset.index)\n\n # ------------------------------------------------------------------------------\n # Inspect the data:\n # Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\n colunas = list(dataset.columns.values)\n\n # ------------------------------------------------------------------------------\n # Also look at the overall statistics:\n train_stats = train_dataset.describe()\n train_stats.pop(\"sfcprcp\")\n train_stats = train_stats.transpose()\n\n # ------------------------------------------------------------------------------\n # Split features from labels:\n # Separate the target value, or \"label\", from the features.\n # This label is the value that you will train the model to predict.\n\n y_train = train_dataset.pop('sfcprcp')\n y_test = test_dataset.pop('sfcprcp')\n\n # ------------------------------------------------------------------------------\n # Normalize the data:\n\n scaler = StandardScaler()\n normed_train_data = scaler.fit_transform(train_dataset)\n normed_test_data = scaler.fit_transform(test_dataset)\n\n # ------------------------------------------------------------------------------\n # Build the model:\n\n model = self.build_reg_model(len(train_dataset.keys()))\n # ------------------------------------------------------------------------------\n # Inspect the model:\n # Use the .summary method to print a simple description of the model\n\n model.summary()\n\n # ------------------------------------------------------------------------------\n # It seems to be working, and it produces a result\n # of the expected shape and type.\n\n # Train the model:\n # Train the model for 1000 epochs, and record the training\n # and validation accuracy in the history object.\n\n # ------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split=0.2, verbose=0,\n callbacks=[PrintDot()])\n print(history.history.keys())\n\n # ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n\n self.plot_history(history)\n # ------------------------------------------------------------------------------\n\n model = self.build_reg_model(len(train_dataset.keys()))\n\n # The patience parameter is the amount of epochs to check for improvement\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\n history = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\n # ------------------------------------------------------------------------------\n # Ploting again, but with the EarlyStopping apllied:\n\n self.plot_history_EarlyStopping(history)\n\n # The graph shows that on the validation set, the average error\n # is usually around +/- 2 MPG. Is this good?\n # We'll leave that decision up to you.\n # ------------------------------------------------------------------------------\n # Let's see how well the model generalizes by using\n # the test set, which we did not use when training the model.\n # This tells us how well we can expect the model to predict\n # when we use it in the real world.\n\n loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\n print(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n #------------------------------------------------------------------------------\n # -----------------------------------------------------------------------------\n # Make predictions\n # Finally, predict SFCPRCP values using data in the testing set:\n\n test_predictions = model.predict(normed_test_data).flatten()\n\n # Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:\n\n skills = ContinuousScores()\n val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)\n \n #converting to text file\n print(\"converting arrays to text files\")\n my_scores = {'val_y_pred_mean': val_y_pred_mean,\n 'val_y_test_mean': val_y_test_mean,\n 'val_mae': val_mae,\n 'val_rmse': val_rmse,\n 'val_std': val_std,\n 'val_fseperc': val_fseperc,\n 'val_fse': val_fse,\n 'val_corr': val_corr,\n 'val_num_pixels': val_num_pixels}\n\n with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:\n myfile.write(str(my_scores))\n print(\"Text file saved!\")\n\n plt.figure()\n plt.scatter(y_test, test_predictions)\n plt.xlabel('True Values [sfcprcp]')\n plt.ylabel('Predictions [sfcprcp]')\n plt.axis('equal')\n plt.axis('square')\n plt.xlim([0, plt.xlim()[1]])\n plt.ylim([0, plt.ylim()[1]])\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n\n #------------------------------------------------------------------------------\n ax = plt.gca()\n ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel('True Values [sfcprcp]')\n ax.set_ylabel('Predictions [sfcprcp]')\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_LOG_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig+fig_name)\n plt.clf()\n #------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------\n # It looks like our model predicts reasonably well.\n # Let's take a look at the error distribution.\n\n error = test_predictions - y_test\n plt.hist(error, bins=25)\n plt.xlabel(\"Prediction Error [sfcprcp]\")\n plt.ylabel(\"Count\")\n fig_name = self.fig_title + \"_prediction_error.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n \n # ------------------------------------------------------------------------------\n # HISTROGRAM 2D\n\n plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 60), (0.2, 60)]))\n plt.axis('equal')\n plt.axis('square')\n plt.plot([0, 100], [0, 100], ls=\"--\", c=\".3\")\n plt.xlim([0, max(y_test)])\n plt.ylim([0, max(y_test)])\n plt.colorbar()\n plt.xlabel(\"Observed rain rate (mm/h) - Training\")\n plt.ylabel(\"Predicted rain rate (mm/h) - Training\")\n fig_name = self.fig_title + \"_hist2D.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n # ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n model_yaml = model.to_yaml()\n with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n yaml_file.write(model_yaml)\n\n # serialize weights to HDF5\n model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n print(\"Saved model to disk\")\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')\n\n # -------------------------------------------------------------------------\n # FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:\n # -------------------------------------------------------------------------\n def plot_history(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n # plt.show()\n fig_name = self.fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(self.path_fig + fig_name)\n\n def plot_history_EarlyStopping(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n fig_name = self.fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(self.path_fig + fig_name)\n"
] | [
[
"numpy.random.seed",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.plot",
"sklearn.preprocessing.Normalizer",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.hist",
"numpy.where",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.asanyarray",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylim",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.colorbar",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.ravel",
"numpy.array",
"matplotlib.pyplot.xlabel"
],
[
"numpy.random.seed",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.plot",
"sklearn.preprocessing.Normalizer",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.hist",
"numpy.where",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.asanyarray",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylim",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.colorbar",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.ravel",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
vballoli/flower | [
"e8c58c09a8fd4d29186b2f590b0cbb44bb022e9a"
] | [
"src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py"
] | [
"# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generate plots for Fashion-MNIST results.\"\"\"\n\n\nfrom typing import List, Tuple\n\nimport numpy as np\n\nfrom flwr_experimental.baseline.plot import bar_chart, line_chart\n\nRESULTS = {\n \"fedavg-t10\": [\n (0, 0.03759999945759773),\n (1, 0.03759999945759773),\n (2, 0.03759999945759773),\n (3, 0.03759999945759773),\n (4, 0.03759999945759773),\n (5, 0.03759999945759773),\n (6, 0.03759999945759773),\n (7, 0.03759999945759773),\n (8, 0.03759999945759773),\n (9, 0.03759999945759773),\n (10, 0.03759999945759773),\n (11, 0.03759999945759773),\n (12, 0.03759999945759773),\n (13, 0.03759999945759773),\n (14, 0.03759999945759773),\n (15, 0.03759999945759773),\n (16, 0.03759999945759773),\n (17, 0.03759999945759773),\n (18, 0.03759999945759773),\n (19, 0.03759999945759773),\n (20, 0.03759999945759773),\n ],\n \"fedavg-t12\": [\n (0, 0.03759999945759773),\n (1, 0.03759999945759773),\n (2, 0.03759999945759773),\n (3, 0.03759999945759773),\n (4, 0.03759999945759773),\n (5, 0.03759999945759773),\n (6, 0.03759999945759773),\n (7, 0.03759999945759773),\n (8, 0.03759999945759773),\n (9, 0.03759999945759773),\n (10, 0.03759999945759773),\n (11, 0.03759999945759773),\n (12, 0.03759999945759773),\n (13, 0.03759999945759773),\n (14, 0.03759999945759773),\n (15, 0.03759999945759773),\n (16, 0.03759999945759773),\n (17, 0.03759999945759773),\n (18, 0.03759999945759773),\n (19, 0.03759999945759773),\n (20, 0.03759999945759773),\n ],\n \"fedavg-t14\": [\n (0, 0.03759999945759773),\n (1, 0.03759999945759773),\n (2, 0.6743999719619751),\n (3, 0.6802999973297119),\n (4, 0.6802999973297119),\n (5, 0.6802999973297119),\n (6, 0.6802999973297119),\n (7, 0.7853999733924866),\n (8, 0.7853999733924866),\n (9, 0.7876999974250793),\n (10, 0.7642999887466431),\n (11, 0.8054999709129333),\n (12, 0.8181999921798706),\n (13, 0.8108999729156494),\n (14, 0.7907000184059143),\n (15, 0.763700008392334),\n (16, 0.8091999888420105),\n (17, 0.8296999931335449),\n (18, 0.8123999834060669),\n (19, 0.8123999834060669),\n (20, 0.8101999759674072),\n ],\n \"fedavg-t16\": [\n (0, 0.03759999945759773),\n (1, 0.7197999954223633),\n (2, 0.7720999717712402),\n (3, 0.7900999784469604),\n (4, 0.7811999917030334),\n (5, 0.7724000215530396),\n (6, 0.8023999929428101),\n (7, 0.8043000102043152),\n (8, 0.8230999708175659),\n (9, 0.8327999711036682),\n (10, 0.8299000263214111),\n (11, 0.8402000069618225),\n (12, 0.853600025177002),\n (13, 0.8370000123977661),\n (14, 0.83160001039505),\n (15, 0.8424000144004822),\n (16, 0.830299973487854),\n (17, 0.8476999998092651),\n (18, 0.8632000088691711),\n (19, 0.8636999726295471),\n (20, 0.8657000064849854),\n ],\n \"fedfs-t10\": [\n (0, 0.03759999945759773),\n (1, 0.7343000173568726),\n (2, 0.7664999961853027),\n (3, 0.7900000214576721),\n (4, 0.805899977684021),\n (5, 0.8237000107765198),\n (6, 0.8406999707221985),\n (7, 0.8263000249862671),\n (8, 0.8442999720573425),\n (9, 0.8564000129699707),\n (10, 0.8651999831199646),\n (11, 0.8375999927520752),\n (12, 0.8646000027656555),\n (13, 0.8669999837875366),\n (14, 0.861299991607666),\n (15, 0.8773999810218811),\n (16, 0.800599992275238),\n (17, 0.8676999807357788),\n (18, 0.8763999938964844),\n (19, 0.8695999979972839),\n (20, 0.873199999332428),\n ],\n \"fedfs-t12\": [\n (0, 0.03759999945759773),\n (1, 0.7153000235557556),\n (2, 0.7835999727249146),\n (3, 0.8083999752998352),\n (4, 0.816100001335144),\n (5, 0.8215000033378601),\n (6, 0.8429999947547913),\n (7, 0.8464000225067139),\n (8, 0.8603000044822693),\n (9, 0.8482999801635742),\n (10, 0.8450000286102295),\n (11, 0.866599977016449),\n (12, 0.863099992275238),\n (13, 0.8709999918937683),\n (14, 0.873199999332428),\n (15, 0.8701000213623047),\n (16, 0.8600000143051147),\n (17, 0.8766999840736389),\n (18, 0.8697999715805054),\n (19, 0.8795999884605408),\n (20, 0.8830999732017517),\n ],\n \"fedfs-t14\": [\n (0, 0.03759999945759773),\n (1, 0.7245000004768372),\n (2, 0.7972000241279602),\n (3, 0.8059999942779541),\n (4, 0.8252999782562256),\n (5, 0.8334000110626221),\n (6, 0.8560000061988831),\n (7, 0.8510000109672546),\n (8, 0.8650000095367432),\n (9, 0.8621000051498413),\n (10, 0.866599977016449),\n (11, 0.8615999817848206),\n (12, 0.8636999726295471),\n (13, 0.8740000128746033),\n (14, 0.866100013256073),\n (15, 0.867900013923645),\n (16, 0.83160001039505),\n (17, 0.8741999864578247),\n (18, 0.8736000061035156),\n (19, 0.8810999989509583),\n (20, 0.8762000203132629),\n ],\n \"fedfs-t16\": [\n (0, 0.03759999945759773),\n (1, 0.7476999759674072),\n (2, 0.7982000112533569),\n (3, 0.8276000022888184),\n (4, 0.8256999850273132),\n (5, 0.8312000036239624),\n (6, 0.8536999821662903),\n (7, 0.8483999967575073),\n (8, 0.85589998960495),\n (9, 0.8687000274658203),\n (10, 0.8664000034332275),\n (11, 0.8586999773979187),\n (12, 0.8662999868392944),\n (13, 0.8754000067710876),\n (14, 0.878600001335144),\n (15, 0.8763999938964844),\n (16, 0.748199999332428),\n (17, 0.8806999921798706),\n (18, 0.8794000148773193),\n (19, 0.8813999891281128),\n (20, 0.8708000183105469),\n ],\n}\n\nRESULTS_WALL_CLOCK_TIME = {\n \"fedavg-14\": 218.49,\n \"fedfs-14\": 61.16,\n \"fedavg-16\": 153.56,\n \"fedfs-16\": 66.84,\n}\n\n\ndef accuracy_t10() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=10\", RESULTS[\"fedavg-t10\"]),\n (\"FedFS, t=10\", RESULTS[\"fedfs-t10\"]),\n ]\n plot(lines, \"fmnist-progress-t10\")\n\n\ndef accuracy_t12() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=12\", RESULTS[\"fedavg-t12\"]),\n (\"FedFS, t=12\", RESULTS[\"fedfs-t12\"]),\n ]\n plot(lines, \"fmnist-progress-t12\")\n\n\ndef accuracy_t14() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=14\", RESULTS[\"fedavg-t14\"]),\n (\"FedFS, t=14\", RESULTS[\"fedfs-t14\"]),\n ]\n plot(lines, \"fmnist-progress-t14\")\n\n\ndef accuracy_t16() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=16\", RESULTS[\"fedavg-t16\"]),\n (\"FedFS, t=16\", RESULTS[\"fedfs-t16\"]),\n ]\n plot(lines, \"fmnist-progress-t16\")\n\n\ndef accuracy_fedavg_vs_fedfs() -> None:\n \"\"\"Comparision of FedAvg vs FedFS.\"\"\"\n fedavg = [\n RESULTS[\"fedavg-t10\"][-1][1],\n RESULTS[\"fedavg-t12\"][-1][1],\n RESULTS[\"fedavg-t14\"][-1][1],\n RESULTS[\"fedavg-t16\"][-1][1],\n ]\n fedfs = [\n RESULTS[\"fedfs-t10\"][-1][1],\n RESULTS[\"fedfs-t12\"][-1][1],\n RESULTS[\"fedfs-t14\"][-1][1],\n RESULTS[\"fedfs-t16\"][-1][1],\n ]\n bar_chart(\n y_values=[\n np.array([x * 100 for x in fedavg]),\n np.array([x * 100 for x in fedfs]),\n ],\n bar_labels=[\"FedAvg\", \"FedFS\"],\n x_label=\"Timeout\",\n x_tick_labels=[\"T=10\", \"T=12\", \"T=14\", \"T=16\"],\n y_label=\"Accuracy\",\n filename=\"fmnist-accuracy_fedavg_vs_fedfs\",\n )\n\n\ndef wall_clock_time_fedavg_vs_fedfs() -> None:\n \"\"\"Comparision of FedAvg vs FedFS.\"\"\"\n\n bar_chart(\n y_values=[\n np.array(\n [\n RESULTS_WALL_CLOCK_TIME[\"fedavg-14\"],\n RESULTS_WALL_CLOCK_TIME[\"fedavg-16\"],\n ]\n ),\n np.array(\n [\n RESULTS_WALL_CLOCK_TIME[\"fedfs-t14\"],\n RESULTS_WALL_CLOCK_TIME[\"fedfs-16\"],\n ]\n ),\n ],\n bar_labels=[\"FedAvg\", \"FedFS\"],\n x_label=\"Timeout\",\n x_tick_labels=[\"T=14\", \"T=16\"],\n y_label=\"Completion time\",\n filename=\"fmnist-time_fedavg_vs_fedfs\",\n )\n\n\ndef plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None:\n \"\"\"Plot a single line chart.\"\"\"\n values = [np.array([x * 100 for _, x in val]) for _, val in lines]\n labels = [label for label, _ in lines]\n line_chart(\n values, labels, \"Round\", \"Accuracy\", filename=filename, y_floor=0, y_ceil=100,\n )\n\n\ndef main() -> None:\n \"\"\"Call all plot functions.\"\"\"\n accuracy_t10()\n accuracy_t12()\n accuracy_t14()\n accuracy_t16()\n accuracy_fedavg_vs_fedfs()\n wall_clock_time_fedavg_vs_fedfs()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] |
strawlab/flyvr | [
"335892cae740e53e82e07b526e1ba53fbd34b0ce"
] | [
"src/freemovr_engine/cvnumpy.py"
] | [
"import numpy as np\nimport cv2\n\ndef rodrigues2matrix_cv(params):\n rvec = np.array(params,dtype=np.float64)\n rvec.shape = (1,3)\n Rmat, jacobian = cv2.Rodrigues(rvec)\n return Rmat\n\ndef rodrigues2matrix(params):\n # Written after the docs at\n # http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#rodrigues\n\n try:\n rvec = np.array(params,dtype=np.float)\n rvec.shape = (1,3)\n except:\n print('bad rvec',rvec)\n raise\n\n theta = np.sqrt(np.sum(rvec**2))\n if theta==0:\n rvec = rvec\n else:\n rvec = rvec/theta\n r = rvec[0] # drop dim\n\n s = np.sin(theta)\n c = np.cos(theta)\n R = c*np.eye(3) + (1-c)*rvec*rvec.T + s*np.array([[0, -r[2], r[1]],\n [r[2], 0, -r[0]],\n [-r[1], r[0], 0]])\n\n # -R.T might also be considered a valid rotation matrix, but it\n # -does not have an eigenvector of 1.\n\n return R\n\ndef matrix2rodrigues(R):\n Rmat = np.array(R,dtype=np.float64)\n assert Rmat.shape == (3,3)\n rvec, jacobian = cv2.Rodrigues(Rmat)\n return rvec\n\ndef rodrigues2angle_axis(params):\n rvec = np.array(params)\n rvec.shape = (1,3)\n\n theta = np.sqrt(np.sum(rvec**2))\n if theta==0:\n rvec = rvec\n else:\n rvec = rvec/theta\n r = rvec[0] # drop dim\n return theta, r\n"
] | [
[
"numpy.eye",
"numpy.sum",
"numpy.cos",
"numpy.array",
"numpy.sin"
]
] |
kjordahl/pandas | [
"e660c058a662426afc4d8855aabf4677f01b4a4c"
] | [
"pandas/tests/test_common.py"
] | [
"# -*- coding: utf-8 -*-\nimport collections\nfrom datetime import datetime\nimport re\n\nimport nose\nfrom nose.tools import assert_equal\nimport numpy as np\nfrom pandas.tslib import iNaT, NaT\nfrom pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index\nfrom pandas import compat\nfrom pandas.compat import range, long, lrange, lmap, u\nfrom pandas.core.common import notnull, isnull, array_equivalent\nimport pandas.core.common as com\nimport pandas.util.testing as tm\nimport pandas.core.config as cf\n\n_multiprocess_can_split_ = True\n\n\ndef test_mut_exclusive():\n msg = \"mutually exclusive arguments: '[ab]' and '[ab]'\"\n with tm.assertRaisesRegexp(TypeError, msg):\n com._mut_exclusive(a=1, b=2)\n assert com._mut_exclusive(a=1, b=None) == 1\n assert com._mut_exclusive(major=None, major_axis=None) is None\n\n\ndef test_is_sequence():\n is_seq = com.is_sequence\n assert(is_seq((1, 2)))\n assert(is_seq([1, 2]))\n assert(not is_seq(\"abcd\"))\n assert(not is_seq(u(\"abcd\")))\n assert(not is_seq(np.int64))\n\n class A(object):\n def __getitem__(self):\n return 1\n\n assert(not is_seq(A()))\n\ndef test_get_callable_name():\n from functools import partial\n getname = com._get_callable_name\n\n def fn(x):\n return x\n lambda_ = lambda x: x\n part1 = partial(fn)\n part2 = partial(part1)\n class somecall(object):\n def __call__(self):\n return x\n\n assert getname(fn) == 'fn'\n assert getname(lambda_)\n assert getname(part1) == 'fn'\n assert getname(part2) == 'fn'\n assert getname(somecall()) == 'somecall'\n assert getname(1) is None\n\n\ndef test_notnull():\n assert notnull(1.)\n assert not notnull(None)\n assert not notnull(np.NaN)\n\n with cf.option_context(\"mode.use_inf_as_null\", False):\n assert notnull(np.inf)\n assert notnull(-np.inf)\n\n arr = np.array([1.5, np.inf, 3.5, -np.inf])\n result = notnull(arr)\n assert result.all()\n\n with cf.option_context(\"mode.use_inf_as_null\", True):\n assert not notnull(np.inf)\n assert not notnull(-np.inf)\n\n arr = np.array([1.5, np.inf, 3.5, -np.inf])\n result = notnull(arr)\n assert result.sum() == 2\n\n with cf.option_context(\"mode.use_inf_as_null\", False):\n for s in [tm.makeFloatSeries(),tm.makeStringSeries(),\n tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:\n assert(isinstance(isnull(s), Series))\n\ndef test_isnull():\n assert not isnull(1.)\n assert isnull(None)\n assert isnull(np.NaN)\n assert not isnull(np.inf)\n assert not isnull(-np.inf)\n\n # series\n for s in [tm.makeFloatSeries(),tm.makeStringSeries(),\n tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:\n assert(isinstance(isnull(s), Series))\n\n # frame\n for df in [tm.makeTimeDataFrame(),tm.makePeriodFrame(),tm.makeMixedDataFrame()]:\n result = isnull(df)\n expected = df.apply(isnull)\n tm.assert_frame_equal(result, expected)\n\n # panel\n for p in [ tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel()) ]:\n result = isnull(p)\n expected = p.apply(isnull)\n tm.assert_panel_equal(result, expected)\n\n # panel 4d\n for p in [ tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D()) ]:\n result = isnull(p)\n expected = p.apply(isnull)\n tm.assert_panel4d_equal(result, expected)\n\ndef test_isnull_lists():\n result = isnull([[False]])\n exp = np.array([[False]])\n assert(np.array_equal(result, exp))\n\n result = isnull([[1], [2]])\n exp = np.array([[False], [False]])\n assert(np.array_equal(result, exp))\n\n # list of strings / unicode\n result = isnull(['foo', 'bar'])\n assert(not result.any())\n\n result = isnull([u('foo'), u('bar')])\n assert(not result.any())\n\ndef test_isnull_nat():\n result = isnull([NaT])\n exp = np.array([True])\n assert(np.array_equal(result, exp))\n\n result = isnull(np.array([NaT], dtype=object))\n exp = np.array([True])\n assert(np.array_equal(result, exp))\n\ndef test_isnull_datetime():\n assert (not isnull(datetime.now()))\n assert notnull(datetime.now())\n\n idx = date_range('1/1/1990', periods=20)\n assert(notnull(idx).all())\n\n idx = np.asarray(idx)\n idx[0] = iNaT\n idx = DatetimeIndex(idx)\n mask = isnull(idx)\n assert(mask[0])\n assert(not mask[1:].any())\n\n # GH 9129\n pidx = idx.to_period(freq='M')\n mask = isnull(pidx)\n assert(mask[0])\n assert(not mask[1:].any())\n\n mask = isnull(pidx[1:])\n assert(not mask.any())\n\n\nclass TestIsNull(tm.TestCase):\n def test_0d_array(self):\n self.assertTrue(isnull(np.array(np.nan)))\n self.assertFalse(isnull(np.array(0.0)))\n self.assertFalse(isnull(np.array(0)))\n # test object dtype\n self.assertTrue(isnull(np.array(np.nan, dtype=object)))\n self.assertFalse(isnull(np.array(0.0, dtype=object)))\n self.assertFalse(isnull(np.array(0, dtype=object)))\n\n\ndef test_downcast_conv():\n # test downcasting\n\n arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])\n result = com._possibly_downcast_to_dtype(arr, 'infer')\n assert (np.array_equal(result, arr))\n\n arr = np.array([8., 8., 8., 8., 8.9999999999995])\n result = com._possibly_downcast_to_dtype(arr, 'infer')\n expected = np.array([8, 8, 8, 8, 9])\n assert (np.array_equal(result, expected))\n\n arr = np.array([8., 8., 8., 8., 9.0000000000005])\n result = com._possibly_downcast_to_dtype(arr, 'infer')\n expected = np.array([8, 8, 8, 8, 9])\n assert (np.array_equal(result, expected))\n\n # conversions\n\n expected = np.array([1,2])\n for dtype in [np.float64,object,np.int64]:\n arr = np.array([1.0,2.0],dtype=dtype)\n result = com._possibly_downcast_to_dtype(arr,'infer')\n tm.assert_almost_equal(result, expected)\n\n expected = np.array([1.0,2.0,np.nan])\n for dtype in [np.float64,object]:\n arr = np.array([1.0,2.0,np.nan],dtype=dtype)\n result = com._possibly_downcast_to_dtype(arr,'infer')\n tm.assert_almost_equal(result, expected)\n\n # empties\n for dtype in [np.int32,np.float64,np.float32,np.bool_,np.int64,object]:\n arr = np.array([],dtype=dtype)\n result = com._possibly_downcast_to_dtype(arr,'int64')\n tm.assert_almost_equal(result, np.array([],dtype=np.int64))\n assert result.dtype == np.int64\n\ndef test_array_equivalent():\n assert array_equivalent(np.array([np.nan, np.nan]),\n np.array([np.nan, np.nan]))\n assert array_equivalent(np.array([np.nan, 1, np.nan]),\n np.array([np.nan, 1, np.nan]))\n assert array_equivalent(np.array([np.nan, None], dtype='object'),\n np.array([np.nan, None], dtype='object'))\n assert array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),\n np.array([np.nan, 1+1j], dtype='complex'))\n assert not array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),\n np.array([np.nan, 1+2j], dtype='complex'))\n assert not array_equivalent(np.array([np.nan, 1, np.nan]),\n np.array([np.nan, 2, np.nan]))\n assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))\n assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan]))\n assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan]))\n assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]))\n assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))\n\ndef test_datetimeindex_from_empty_datetime64_array():\n for unit in [ 'ms', 'us', 'ns' ]:\n idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))\n assert(len(idx) == 0)\n\n\ndef test_nan_to_nat_conversions():\n\n df = DataFrame(dict({\n 'A' : np.asarray(lrange(10),dtype='float64'),\n 'B' : Timestamp('20010101') }))\n df.iloc[3:6,:] = np.nan\n result = df.loc[4,'B'].value\n assert(result == iNaT)\n\n s = df['B'].copy()\n s._data = s._data.setitem(indexer=tuple([slice(8,9)]),value=np.nan)\n assert(isnull(s[8]))\n\n # numpy < 1.7.0 is wrong\n from distutils.version import LooseVersion\n if LooseVersion(np.__version__) >= '1.7.0':\n assert(s[8].value == np.datetime64('NaT').astype(np.int64))\n\n\ndef test_any_none():\n assert(com._any_none(1, 2, 3, None))\n assert(not com._any_none(1, 2, 3, 4))\n\n\ndef test_all_not_none():\n assert(com._all_not_none(1, 2, 3, 4))\n assert(not com._all_not_none(1, 2, 3, None))\n assert(not com._all_not_none(None, None, None, None))\n\n\ndef test_repr_binary_type():\n import string\n letters = string.ascii_letters\n btype = compat.binary_type\n try:\n raw = btype(letters, encoding=cf.get_option('display.encoding'))\n except TypeError:\n raw = btype(letters)\n b = compat.text_type(compat.bytes_to_str(raw))\n res = com.pprint_thing(b, quote_strings=True)\n assert_equal(res, repr(b))\n res = com.pprint_thing(b, quote_strings=False)\n assert_equal(res, b)\n\n\ndef test_adjoin():\n data = [['a', 'b', 'c'],\n ['dd', 'ee', 'ff'],\n ['ggg', 'hhh', 'iii']]\n expected = 'a dd ggg\\nb ee hhh\\nc ff iii'\n\n adjoined = com.adjoin(2, *data)\n\n assert(adjoined == expected)\n\n\ndef test_iterpairs():\n data = [1, 2, 3, 4]\n expected = [(1, 2),\n (2, 3),\n (3, 4)]\n\n result = list(com.iterpairs(data))\n\n assert(result == expected)\n\n\ndef test_split_ranges():\n def _bin(x, width):\n \"return int(x) as a base2 string of given width\"\n return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))\n\n def test_locs(mask):\n nfalse = sum(np.array(mask) == 0)\n\n remaining = 0\n for s, e in com.split_ranges(mask):\n remaining += e - s\n\n assert 0 not in mask[s:e]\n\n # make sure the total items covered by the ranges are a complete cover\n assert remaining + nfalse == len(mask)\n\n # exhaustively test all possible mask sequences of length 8\n ncols = 8\n for i in range(2 ** ncols):\n cols = lmap(int, list(_bin(i, ncols))) # count up in base2\n mask = [cols[i] == 1 for i in range(len(cols))]\n test_locs(mask)\n\n # base cases\n test_locs([])\n test_locs([0])\n test_locs([1])\n\n\ndef test_indent():\n s = 'a b c\\nd e f'\n result = com.indent(s, spaces=6)\n\n assert(result == ' a b c\\n d e f')\n\n\ndef test_banner():\n ban = com.banner('hi')\n assert(ban == ('%s\\nhi\\n%s' % ('=' * 80, '=' * 80)))\n\n\ndef test_map_indices_py():\n data = [4, 3, 2, 1]\n expected = {4: 0, 3: 1, 2: 2, 1: 3}\n\n result = com.map_indices_py(data)\n\n assert(result == expected)\n\n\ndef test_union():\n a = [1, 2, 3]\n b = [4, 5, 6]\n\n union = sorted(com.union(a, b))\n\n assert((a + b) == union)\n\n\ndef test_difference():\n a = [1, 2, 3]\n b = [1, 2, 3, 4, 5, 6]\n\n inter = sorted(com.difference(b, a))\n\n assert([4, 5, 6] == inter)\n\n\ndef test_intersection():\n a = [1, 2, 3]\n b = [1, 2, 3, 4, 5, 6]\n\n inter = sorted(com.intersection(a, b))\n\n assert(a == inter)\n\n\ndef test_groupby():\n values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']\n expected = {'f': ['foo', 'foo3'],\n 'b': ['bar', 'baz', 'baz2'],\n 'q': ['qux']}\n\n grouped = com.groupby(values, lambda x: x[0])\n\n for k, v in grouped:\n assert v == expected[k]\n\n\ndef test_is_list_like():\n passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),\n Series([]), Series(['a']).str)\n fails = (1, '2', object())\n\n for p in passes:\n assert com.is_list_like(p)\n\n for f in fails:\n assert not com.is_list_like(f)\n\n\ndef test_is_hashable():\n\n # all new-style classes are hashable by default\n class HashableClass(object):\n pass\n\n class UnhashableClass1(object):\n __hash__ = None\n\n class UnhashableClass2(object):\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n hashable = (\n 1, 3.14, np.float64(3.14), 'a', tuple(), (1,), HashableClass(),\n )\n not_hashable = (\n [], UnhashableClass1(),\n )\n abc_hashable_not_really_hashable = (\n ([],), UnhashableClass2(),\n )\n\n for i in hashable:\n assert com.is_hashable(i)\n for i in not_hashable:\n assert not com.is_hashable(i)\n for i in abc_hashable_not_really_hashable:\n assert not com.is_hashable(i)\n\n # numpy.array is no longer collections.Hashable as of\n # https://github.com/numpy/numpy/pull/5326, just test\n # pandas.common.is_hashable()\n assert not com.is_hashable(np.array([]))\n\n # old-style classes in Python 2 don't appear hashable to\n # collections.Hashable but also seem to support hash() by default\n if compat.PY2:\n class OldStyleClass():\n pass\n c = OldStyleClass()\n assert not isinstance(c, collections.Hashable)\n assert com.is_hashable(c)\n hash(c) # this will not raise\n\n\ndef test_ensure_int32():\n values = np.arange(10, dtype=np.int32)\n result = com._ensure_int32(values)\n assert(result.dtype == np.int32)\n\n values = np.arange(10, dtype=np.int64)\n result = com._ensure_int32(values)\n assert(result.dtype == np.int32)\n\n\ndef test_ensure_platform_int():\n\n # verify that when we create certain types of indices\n # they remain the correct type under platform conversions\n from pandas.core.index import Int64Index\n\n # int64\n x = Int64Index([1, 2, 3], dtype='int64')\n assert(x.dtype == np.int64)\n\n pi = com._ensure_platform_int(x)\n assert(pi.dtype == np.int_)\n\n # int32\n x = Int64Index([1, 2, 3], dtype='int32')\n assert(x.dtype == np.int32)\n\n pi = com._ensure_platform_int(x)\n assert(pi.dtype == np.int_)\n\n# TODO: fix this broken test\n\n# def test_console_encode():\n# \"\"\"\n# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)\n# common.console_encode should encode things as utf-8.\n# \"\"\"\n# if compat.PY3:\n# raise nose.SkipTest\n\n# with tm.stdin_encoding(encoding=None):\n# result = com.console_encode(u\"\\u05d0\")\n# expected = u\"\\u05d0\".encode('utf-8')\n# assert (result == expected)\n\n\ndef test_is_re():\n passes = re.compile('ad'),\n fails = 'x', 2, 3, object()\n\n for p in passes:\n assert com.is_re(p)\n\n for f in fails:\n assert not com.is_re(f)\n\n\ndef test_is_recompilable():\n passes = (r'a', u('x'), r'asdf', re.compile('adsf'),\n u(r'\\u2233\\s*'), re.compile(r''))\n fails = 1, [], object()\n\n for p in passes:\n assert com.is_re_compilable(p)\n\n for f in fails:\n assert not com.is_re_compilable(f)\n\ndef test_random_state():\n import numpy.random as npr\n # Check with seed\n state = com._random_state(5)\n assert_equal(state.uniform(), npr.RandomState(5).uniform())\n\n # Check with random state object\n state2 = npr.RandomState(10)\n assert_equal(com._random_state(state2).uniform(), npr.RandomState(10).uniform())\n\n # check with no arg random state\n assert isinstance(com._random_state(), npr.RandomState)\n\n # Error for floats or strings\n with tm.assertRaises(ValueError):\n com._random_state('test')\n\n with tm.assertRaises(ValueError):\n com._random_state(5.5)\n\n\ndef test_maybe_match_name():\n\n matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='x'))\n assert(matched == 'x')\n\n matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='y'))\n assert(matched is None)\n\n matched = com._maybe_match_name(Series([1]), Series([2], name='x'))\n assert(matched is None)\n\n matched = com._maybe_match_name(Series([1], name='x'), Series([2]))\n assert(matched is None)\n\n matched = com._maybe_match_name(Series([1], name='x'), [2])\n assert(matched == 'x')\n\n matched = com._maybe_match_name([1], Series([2], name='y'))\n assert(matched == 'y')\n\n\nclass TestTake(tm.TestCase):\n # standard incompatible fill error\n fill_error = re.compile(\"Incompatible type for fill_value\")\n\n _multiprocess_can_split_ = True\n\n def test_1d_with_out(self):\n def _test_dtype(dtype, can_hold_na):\n data = np.random.randint(0, 2, 4).astype(dtype)\n\n indexer = [2, 1, 0, 1]\n out = np.empty(4, dtype=dtype)\n com.take_1d(data, indexer, out=out)\n expected = data.take(indexer)\n tm.assert_almost_equal(out, expected)\n\n indexer = [2, 1, 0, -1]\n out = np.empty(4, dtype=dtype)\n if can_hold_na:\n com.take_1d(data, indexer, out=out)\n expected = data.take(indexer)\n expected[3] = np.nan\n tm.assert_almost_equal(out, expected)\n else:\n with tm.assertRaisesRegexp(TypeError, self.fill_error):\n com.take_1d(data, indexer, out=out)\n # no exception o/w\n data.take(indexer, out=out)\n\n _test_dtype(np.float64, True)\n _test_dtype(np.float32, True)\n _test_dtype(np.uint64, False)\n _test_dtype(np.uint32, False)\n _test_dtype(np.uint16, False)\n _test_dtype(np.uint8, False)\n _test_dtype(np.int64, False)\n _test_dtype(np.int32, False)\n _test_dtype(np.int16, False)\n _test_dtype(np.int8, False)\n _test_dtype(np.object_, True)\n _test_dtype(np.bool, False)\n\n def test_1d_fill_nonna(self):\n def _test_dtype(dtype, fill_value, out_dtype):\n data = np.random.randint(0, 2, 4).astype(dtype)\n\n indexer = [2, 1, 0, -1]\n\n result = com.take_1d(data, indexer, fill_value=fill_value)\n assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())\n assert(result[3] == fill_value)\n assert(result.dtype == out_dtype)\n\n indexer = [2, 1, 0, 1]\n\n result = com.take_1d(data, indexer, fill_value=fill_value)\n assert((result[[0, 1, 2, 3]] == data[indexer]).all())\n assert(result.dtype == dtype)\n\n _test_dtype(np.int8, np.int16(127), np.int8)\n _test_dtype(np.int8, np.int16(128), np.int16)\n _test_dtype(np.int32, 1, np.int32)\n _test_dtype(np.int32, 2.0, np.float64)\n _test_dtype(np.int32, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.int32, True, np.object_)\n _test_dtype(np.int32, '', np.object_)\n _test_dtype(np.float64, 1, np.float64)\n _test_dtype(np.float64, 2.0, np.float64)\n _test_dtype(np.float64, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.float64, True, np.object_)\n _test_dtype(np.float64, '', np.object_)\n _test_dtype(np.complex128, 1, np.complex128)\n _test_dtype(np.complex128, 2.0, np.complex128)\n _test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.complex128, True, np.object_)\n _test_dtype(np.complex128, '', np.object_)\n _test_dtype(np.bool_, 1, np.object_)\n _test_dtype(np.bool_, 2.0, np.object_)\n _test_dtype(np.bool_, 3.0 + 4.0j, np.object_)\n _test_dtype(np.bool_, True, np.bool_)\n _test_dtype(np.bool_, '', np.object_)\n\n def test_2d_with_out(self):\n def _test_dtype(dtype, can_hold_na, writeable=True):\n data = np.random.randint(0, 2, (5, 3)).astype(dtype)\n data.flags.writeable = writeable\n\n indexer = [2, 1, 0, 1]\n out0 = np.empty((4, 3), dtype=dtype)\n out1 = np.empty((5, 4), dtype=dtype)\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n\n indexer = [2, 1, 0, -1]\n out0 = np.empty((4, 3), dtype=dtype)\n out1 = np.empty((5, 4), dtype=dtype)\n if can_hold_na:\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n expected0[3, :] = np.nan\n expected1[:, 3] = np.nan\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n else:\n for i, out in enumerate([out0, out1]):\n with tm.assertRaisesRegexp(TypeError, self.fill_error):\n com.take_nd(data, indexer, out=out, axis=i)\n # no exception o/w\n data.take(indexer, out=out, axis=i)\n\n for writeable in [True, False]:\n # Check that take_nd works both with writeable arrays (in which\n # case fast typed memoryviews implementation) and read-only\n # arrays alike.\n _test_dtype(np.float64, True, writeable=writeable)\n _test_dtype(np.float32, True, writeable=writeable)\n _test_dtype(np.uint64, False, writeable=writeable)\n _test_dtype(np.uint32, False, writeable=writeable)\n _test_dtype(np.uint16, False, writeable=writeable)\n _test_dtype(np.uint8, False, writeable=writeable)\n _test_dtype(np.int64, False, writeable=writeable)\n _test_dtype(np.int32, False, writeable=writeable)\n _test_dtype(np.int16, False, writeable=writeable)\n _test_dtype(np.int8, False, writeable=writeable)\n _test_dtype(np.object_, True, writeable=writeable)\n _test_dtype(np.bool, False, writeable=writeable)\n\n def test_2d_fill_nonna(self):\n def _test_dtype(dtype, fill_value, out_dtype):\n data = np.random.randint(0, 2, (5, 3)).astype(dtype)\n\n indexer = [2, 1, 0, -1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())\n assert((result[3, :] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())\n assert((result[:, 3] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n indexer = [2, 1, 0, 1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2, 3], :] == data[indexer, :]).all())\n assert(result.dtype == dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())\n assert(result.dtype == dtype)\n\n _test_dtype(np.int8, np.int16(127), np.int8)\n _test_dtype(np.int8, np.int16(128), np.int16)\n _test_dtype(np.int32, 1, np.int32)\n _test_dtype(np.int32, 2.0, np.float64)\n _test_dtype(np.int32, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.int32, True, np.object_)\n _test_dtype(np.int32, '', np.object_)\n _test_dtype(np.float64, 1, np.float64)\n _test_dtype(np.float64, 2.0, np.float64)\n _test_dtype(np.float64, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.float64, True, np.object_)\n _test_dtype(np.float64, '', np.object_)\n _test_dtype(np.complex128, 1, np.complex128)\n _test_dtype(np.complex128, 2.0, np.complex128)\n _test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.complex128, True, np.object_)\n _test_dtype(np.complex128, '', np.object_)\n _test_dtype(np.bool_, 1, np.object_)\n _test_dtype(np.bool_, 2.0, np.object_)\n _test_dtype(np.bool_, 3.0 + 4.0j, np.object_)\n _test_dtype(np.bool_, True, np.bool_)\n _test_dtype(np.bool_, '', np.object_)\n\n def test_3d_with_out(self):\n def _test_dtype(dtype, can_hold_na):\n data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)\n\n indexer = [2, 1, 0, 1]\n out0 = np.empty((4, 4, 3), dtype=dtype)\n out1 = np.empty((5, 4, 3), dtype=dtype)\n out2 = np.empty((5, 4, 4), dtype=dtype)\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n com.take_nd(data, indexer, out=out2, axis=2)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n expected2 = data.take(indexer, axis=2)\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n tm.assert_almost_equal(out2, expected2)\n\n indexer = [2, 1, 0, -1]\n out0 = np.empty((4, 4, 3), dtype=dtype)\n out1 = np.empty((5, 4, 3), dtype=dtype)\n out2 = np.empty((5, 4, 4), dtype=dtype)\n if can_hold_na:\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n com.take_nd(data, indexer, out=out2, axis=2)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n expected2 = data.take(indexer, axis=2)\n expected0[3, :, :] = np.nan\n expected1[:, 3, :] = np.nan\n expected2[:, :, 3] = np.nan\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n tm.assert_almost_equal(out2, expected2)\n else:\n for i, out in enumerate([out0, out1, out2]):\n with tm.assertRaisesRegexp(TypeError, self.fill_error):\n com.take_nd(data, indexer, out=out, axis=i)\n # no exception o/w\n data.take(indexer, out=out, axis=i)\n\n _test_dtype(np.float64, True)\n _test_dtype(np.float32, True)\n _test_dtype(np.uint64, False)\n _test_dtype(np.uint32, False)\n _test_dtype(np.uint16, False)\n _test_dtype(np.uint8, False)\n _test_dtype(np.int64, False)\n _test_dtype(np.int32, False)\n _test_dtype(np.int16, False)\n _test_dtype(np.int8, False)\n _test_dtype(np.object_, True)\n _test_dtype(np.bool, False)\n\n def test_3d_fill_nonna(self):\n def _test_dtype(dtype, fill_value, out_dtype):\n data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)\n\n indexer = [2, 1, 0, -1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())\n assert((result[3, :, :] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())\n assert((result[:, 3, :] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)\n assert((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())\n assert((result[:, :, 3] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n indexer = [2, 1, 0, 1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())\n assert(result.dtype == dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())\n assert(result.dtype == dtype)\n\n result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)\n assert((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())\n assert(result.dtype == dtype)\n\n _test_dtype(np.int8, np.int16(127), np.int8)\n _test_dtype(np.int8, np.int16(128), np.int16)\n _test_dtype(np.int32, 1, np.int32)\n _test_dtype(np.int32, 2.0, np.float64)\n _test_dtype(np.int32, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.int32, True, np.object_)\n _test_dtype(np.int32, '', np.object_)\n _test_dtype(np.float64, 1, np.float64)\n _test_dtype(np.float64, 2.0, np.float64)\n _test_dtype(np.float64, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.float64, True, np.object_)\n _test_dtype(np.float64, '', np.object_)\n _test_dtype(np.complex128, 1, np.complex128)\n _test_dtype(np.complex128, 2.0, np.complex128)\n _test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.complex128, True, np.object_)\n _test_dtype(np.complex128, '', np.object_)\n _test_dtype(np.bool_, 1, np.object_)\n _test_dtype(np.bool_, 2.0, np.object_)\n _test_dtype(np.bool_, 3.0 + 4.0j, np.object_)\n _test_dtype(np.bool_, True, np.bool_)\n _test_dtype(np.bool_, '', np.object_)\n\n def test_1d_other_dtypes(self):\n arr = np.random.randn(10).astype(np.float32)\n\n indexer = [1, 2, 3, -1]\n result = com.take_1d(arr, indexer)\n expected = arr.take(indexer)\n expected[-1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_2d_other_dtypes(self):\n arr = np.random.randn(10, 5).astype(np.float32)\n\n indexer = [1, 2, 3, -1]\n\n # axis=0\n result = com.take_nd(arr, indexer, axis=0)\n expected = arr.take(indexer, axis=0)\n expected[-1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = com.take_nd(arr, indexer, axis=1)\n expected = arr.take(indexer, axis=1)\n expected[:, -1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_1d_bool(self):\n arr = np.array([0, 1, 0], dtype=bool)\n\n result = com.take_1d(arr, [0, 2, 2, 1])\n expected = arr.take([0, 2, 2, 1])\n self.assert_numpy_array_equal(result, expected)\n\n result = com.take_1d(arr, [0, 2, -1])\n self.assertEqual(result.dtype, np.object_)\n\n def test_2d_bool(self):\n arr = np.array([[0, 1, 0],\n [1, 0, 1],\n [0, 1, 1]], dtype=bool)\n\n result = com.take_nd(arr, [0, 2, 2, 1])\n expected = arr.take([0, 2, 2, 1], axis=0)\n self.assert_numpy_array_equal(result, expected)\n\n result = com.take_nd(arr, [0, 2, 2, 1], axis=1)\n expected = arr.take([0, 2, 2, 1], axis=1)\n self.assert_numpy_array_equal(result, expected)\n\n result = com.take_nd(arr, [0, 2, -1])\n self.assertEqual(result.dtype, np.object_)\n\n def test_2d_float32(self):\n arr = np.random.randn(4, 3).astype(np.float32)\n indexer = [0, 2, -1, 1, -1]\n\n # axis=0\n result = com.take_nd(arr, indexer, axis=0)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=0, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=0)\n expected[[2, 4], :] = np.nan\n tm.assert_almost_equal(result, expected)\n\n #### this now accepts a float32! # test with float64 out buffer\n out = np.empty((len(indexer), arr.shape[1]), dtype='float32')\n com.take_nd(arr, indexer, out=out) # it works!\n\n # axis=1\n result = com.take_nd(arr, indexer, axis=1)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=1, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=1)\n expected[:, [2, 4]] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_2d_datetime64(self):\n # 2005/01/01 - 2006/01/01\n arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000\n arr = arr.view(dtype='datetime64[ns]')\n indexer = [0, 2, -1, 1, -1]\n\n # axis=0\n result = com.take_nd(arr, indexer, axis=0)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=0, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=0)\n expected.view(np.int64)[[2, 4], :] = iNaT\n tm.assert_almost_equal(result, expected)\n\n result = com.take_nd(arr, indexer, axis=0,\n fill_value=datetime(2007, 1, 1))\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, out=result2, axis=0,\n fill_value=datetime(2007, 1, 1))\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=0)\n expected[[2, 4], :] = datetime(2007, 1, 1)\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = com.take_nd(arr, indexer, axis=1)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=1, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=1)\n expected.view(np.int64)[:, [2, 4]] = iNaT\n tm.assert_almost_equal(result, expected)\n\n result = com.take_nd(arr, indexer, axis=1,\n fill_value=datetime(2007, 1, 1))\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, out=result2, axis=1,\n fill_value=datetime(2007, 1, 1))\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=1)\n expected[:, [2, 4]] = datetime(2007, 1, 1)\n tm.assert_almost_equal(result, expected)\n\n\nclass TestMaybe(tm.TestCase):\n\n def test_maybe_convert_string_to_array(self):\n result = com._maybe_convert_string_to_object('x')\n tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))\n self.assertTrue(result.dtype == object)\n\n result = com._maybe_convert_string_to_object(1)\n self.assertEqual(result, 1)\n\n arr = np.array(['x', 'y'], dtype=str)\n result = com._maybe_convert_string_to_object(arr)\n tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))\n self.assertTrue(result.dtype == object)\n\n # unicode\n arr = np.array(['x', 'y']).astype('U')\n result = com._maybe_convert_string_to_object(arr)\n tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))\n self.assertTrue(result.dtype == object)\n\n # object\n arr = np.array(['x', 2], dtype=object)\n result = com._maybe_convert_string_to_object(arr)\n tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))\n self.assertTrue(result.dtype == object)\n\n\ndef test_dict_compat():\n data_datetime64 = {np.datetime64('1990-03-15'): 1,\n np.datetime64('2015-03-15'): 2}\n data_unchanged = {1: 2, 3: 4, 5: 6}\n expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}\n assert(com._dict_compat(data_datetime64) == expected)\n assert(com._dict_compat(expected) == expected)\n assert(com._dict_compat(data_unchanged) == data_unchanged)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.util.testing.makeTimeSeries",
"pandas.util.testing.makePeriodPanel",
"pandas.core.common.isnull",
"pandas.Series",
"pandas.compat.bytes_to_str",
"pandas.core.common.banner",
"pandas.core.common.is_hashable",
"pandas.core.common.intersection",
"pandas.compat.long",
"numpy.asarray",
"pandas.Float64Index",
"pandas.core.common.take_1d",
"numpy.random.RandomState",
"pandas.core.common.is_re_compilable",
"pandas.core.common._ensure_int32",
"pandas.util.testing.makePeriodFrame",
"pandas.core.common._maybe_convert_string_to_object",
"pandas.util.testing.assert_almost_equal",
"numpy.datetime64",
"pandas.core.common._all_not_none",
"numpy.float64",
"pandas.core.common.union",
"pandas.core.index.Int64Index",
"pandas.core.config.get_option",
"pandas.util.testing.makeTimeDataFrame",
"pandas.core.common.is_re",
"pandas.util.testing.assert_panel_equal",
"numpy.empty_like",
"pandas.util.testing.assert_panel4d_equal",
"pandas.core.common._ensure_platform_int",
"pandas.core.common._dict_compat",
"pandas.core.common.is_list_like",
"pandas.core.common.map_indices_py",
"pandas.util.testing.makeFloatSeries",
"pandas.core.common.take_nd",
"pandas.util.testing.makeObjectSeries",
"pandas.util.testing.makePeriodSeries",
"pandas.Timestamp",
"numpy.random.randint",
"pandas.core.common._random_state",
"pandas.core.common.indent",
"pandas.date_range",
"numpy.arange",
"pandas.core.common.iterpairs",
"pandas.util.testing.assertRaisesRegexp",
"pandas.core.common._possibly_downcast_to_dtype",
"pandas.core.common._any_none",
"pandas.core.common.split_ranges",
"pandas.core.common.notnull",
"pandas.util.testing.makeMixedDataFrame",
"pandas.compat.u",
"pandas.core.common.adjoin",
"numpy.int16",
"pandas.DatetimeIndex",
"numpy.empty",
"pandas.core.common.groupby",
"pandas.core.common._mut_exclusive",
"pandas.compat.lrange",
"numpy.random.randn",
"pandas.util.testing.makeStringSeries",
"pandas.util.testing.makePanel4D",
"pandas.compat.range",
"pandas.core.config.option_context",
"pandas.util.testing.makePanel",
"numpy.array_equal",
"numpy.array",
"pandas.core.common.difference",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.assertRaises",
"pandas.core.common.pprint_thing"
]
] |
ieliz/openvino | [
"403339f8f470c90dee6f6d94ed58644b2787f66b"
] | [
"tools/mo/openvino/tools/mo/ops/Reverse.py"
] | [
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.common.partial_infer.utils import int64_array\nfrom openvino.tools.mo.graph.graph import Graph\nfrom openvino.tools.mo.ops.op import Op\n\n\nclass Reverse(Op):\n op = 'Reverse'\n\n def __init__(self, graph: Graph, attrs: dict):\n mandatory_props = {\n 'type': None,\n 'axis': None,\n 'op': self.op,\n 'in_ports_count': 2,\n 'out_ports_count': 1,\n 'infer': self.infer,\n }\n super().__init__(graph, mandatory_props, attrs)\n\n @staticmethod\n def infer(node):\n input_shape = node.in_port(0).data.get_shape()\n input_value = node.in_port(0).data.get_value()\n assert input_shape is not None\n if not node.has_valid('axis'):\n assert 1 in node.in_nodes()\n assert node.in_node(1).has_valid('value')\n assert node.in_node(1).value.size == 1\n\n node['axis'] = node.in_node(1).value.item()\n node.in_port(1).disconnect()\n\n assert node.has_valid('axis')\n\n assert len(node.out_nodes()) == 1\n if input_value is not None:\n node.out_port(0).data.set_value(np.flip(input_value, node.axis))\n else:\n node.out_port(0).data.set_shape(input_shape)\n"
] | [
[
"numpy.flip"
]
] |
intel/neural-compressor | [
"16a4a12045fcb468da4d33769aff2c1a5e2ba6ba"
] | [
"examples/baremetal/nlp/sst2/bert_mini/bert_mini_export.py"
] | [
"import argparse\n\nimport torch\nfrom transformers import BertForSequenceClassification\n\ndef export_onnx_model(args, model, onnx_model_path):\n with torch.no_grad():\n inputs = {'input_ids': torch.ones(1,args.max_len, dtype=torch.int32),\n 'attention_mask': torch.ones(1,args.max_len, dtype=torch.int32),\n 'token_type_ids': torch.ones(1,args.max_len, dtype=torch.int32)}\n outputs = model(**inputs)\n\n symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}\n torch.onnx.export(model, # model being run\n (inputs['input_ids'], \n inputs['attention_mask'],\n inputs['token_type_ids']), # model input (or a tuple for\n # multiple inputs)\n onnx_model_path, # where to save the model (can be a file\n # or file-like object)\n opset_version=11, # the ONNX version to export the model\n do_constant_folding=True, # whether to execute constant folding\n input_names=['input_ids', # the model's input names\n 'input_mask',\n 'segment_ids'],\n output_names=['output'], # the model's output names\n dynamic_axes={'input_ids': symbolic_names, # variable length axes\n 'input_mask' : symbolic_names,\n 'segment_ids' : symbolic_names})\n print(\"ONNX Model exported to {0}\".format(onnx_model_path))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Export bert onnx model',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--input_dir',\n type=str,\n help='input_dir of bert model, must contain config.json')\n parser.add_argument(\n '--task_name',\n type=str,\n choices=[\"MRPC\", \"MNLI\", \"SST-2\"],\n help='tasks names of bert model')\n parser.add_argument(\n '--max_len',\n type=int,\n default=128,\n help='Maximum length of the sentence pairs')\n parser.add_argument(\n '--do_lower_case',\n type=bool,\n default=True,\n help='whether lower the tokenizer')\n parser.add_argument(\n '--output_model',\n type=str,\n default='bert_mini_sst2.onnx',\n help='path to exported model file')\n args = parser.parse_args()\n\n model = BertForSequenceClassification.from_pretrained(args.input_dir)\n export_onnx_model(args, model, args.output_model)"
] | [
[
"torch.ones",
"torch.no_grad",
"torch.onnx.export"
]
] |
knightvishal/tensorflow | [
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1"
] | [
"tensorflow/python/estimator/canned/dnn_linear_combined.py",
"tensorflow/python/kernel_tests/manip_ops_test.py",
"tensorflow/python/data/experimental/kernel_tests/prefetch_to_device_test.py",
"tensorflow/python/training/proximal_gradient_descent_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow estimators for Linear and DNN joined training models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport six\n\nfrom tensorflow.python.estimator import estimator\nfrom tensorflow.python.estimator.canned import dnn\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.estimator.canned import linear\nfrom tensorflow.python.estimator.canned import optimizers\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import sync_replicas_optimizer\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.util.tf_export import estimator_export\n\n# The default learning rates are a historical artifact of the initial\n# implementation.\n_DNN_LEARNING_RATE = 0.001\n_LINEAR_LEARNING_RATE = 0.005\n\n\ndef _check_no_sync_replicas_optimizer(optimizer):\n if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):\n raise ValueError(\n 'SyncReplicasOptimizer does not support multi optimizers case. '\n 'Therefore, it is not supported in DNNLinearCombined model. '\n 'If you want to use this optimizer, please use either DNN or Linear '\n 'model.')\n\n\ndef _linear_learning_rate(num_linear_feature_columns):\n \"\"\"Returns the default learning rate of the linear model.\n\n The calculation is a historical artifact of this initial implementation, but\n has proven a reasonable choice.\n\n Args:\n num_linear_feature_columns: The number of feature columns of the linear\n model.\n\n Returns:\n A float.\n \"\"\"\n default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)\n return min(_LINEAR_LEARNING_RATE, default_learning_rate)\n\n\ndef _add_layer_summary(value, tag):\n summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))\n summary.histogram('%s/activation' % tag, value)\n\n\ndef _dnn_linear_combined_model_fn(features,\n labels,\n mode,\n head,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n input_layer_partitioner=None,\n config=None,\n batch_norm=False,\n linear_sparse_combiner='sum'):\n \"\"\"Deep Neural Net and Linear combined model_fn.\n\n Args:\n features: dict of `Tensor`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype\n `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n head: A `Head` instance.\n linear_feature_columns: An iterable containing all the feature columns used\n by the Linear model.\n linear_optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training the Linear model. Defaults to the Ftrl\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used by\n the DNN model.\n dnn_optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training the DNN model. Defaults to the Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per DNN layer.\n dnn_activation_fn: Activation function applied to each DNN layer. If `None`,\n will use `tf.nn.relu`.\n dnn_dropout: When not `None`, the probability we will drop out a given DNN\n coordinate.\n input_layer_partitioner: Partitioner for input layer.\n config: `RunConfig` object to configure the runtime settings.\n batch_norm: Whether to use batch normalization after each hidden layer.\n linear_sparse_combiner: A string specifying how to reduce the linear model\n if a categorical column is multivalent. One of \"mean\", \"sqrtn\", and\n \"sum\".\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: If both `linear_feature_columns` and `dnn_features_columns`\n are empty at the same time, or `input_layer_partitioner` is missing,\n or features has the wrong type.\n \"\"\"\n if not isinstance(features, dict):\n raise ValueError('features should be a dictionary of `Tensor`s. '\n 'Given type: {}'.format(type(features)))\n if not linear_feature_columns and not dnn_feature_columns:\n raise ValueError(\n 'Either linear_feature_columns or dnn_feature_columns must be defined.')\n\n num_ps_replicas = config.num_ps_replicas if config else 0\n input_layer_partitioner = input_layer_partitioner or (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=64 << 20))\n\n shared_state_manager = feature_column_v2.maybe_create_shared_state_manager(\n list(linear_feature_columns) + list(dnn_feature_columns))\n\n # Build DNN Logits.\n dnn_parent_scope = 'dnn'\n\n if not dnn_feature_columns:\n dnn_logits = None\n else:\n dnn_optimizer = optimizers.get_optimizer_instance(\n dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)\n _check_no_sync_replicas_optimizer(dnn_optimizer)\n if not dnn_hidden_units:\n raise ValueError(\n 'dnn_hidden_units must be defined when dnn_feature_columns is '\n 'specified.')\n dnn_partitioner = (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas))\n with variable_scope.variable_scope(\n dnn_parent_scope,\n values=tuple(six.itervalues(features)),\n partitioner=dnn_partitioner) as scope:\n dnn_absolute_scope = scope.name\n dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access\n units=head.logits_dimension,\n hidden_units=dnn_hidden_units,\n feature_columns=dnn_feature_columns,\n activation_fn=dnn_activation_fn,\n dropout=dnn_dropout,\n batch_norm=batch_norm,\n input_layer_partitioner=input_layer_partitioner,\n shared_state_manager=shared_state_manager)\n dnn_logits = dnn_logit_fn(features=features, mode=mode)\n\n linear_parent_scope = 'linear'\n\n if not linear_feature_columns:\n linear_logits = None\n else:\n linear_optimizer = optimizers.get_optimizer_instance(\n linear_optimizer,\n learning_rate=_linear_learning_rate(len(linear_feature_columns)))\n _check_no_sync_replicas_optimizer(linear_optimizer)\n with variable_scope.variable_scope(\n linear_parent_scope,\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner) as scope:\n linear_absolute_scope = scope.name\n logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access\n units=head.logits_dimension,\n feature_columns=linear_feature_columns,\n sparse_combiner=linear_sparse_combiner)\n linear_logits = logit_fn(features=features)\n _add_layer_summary(linear_logits, scope.name)\n\n # Combine logits and build full model.\n if dnn_logits is not None and linear_logits is not None:\n logits = dnn_logits + linear_logits\n elif dnn_logits is not None:\n logits = dnn_logits\n else:\n logits = linear_logits\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n train_ops = []\n global_step = training_util.get_global_step()\n if dnn_logits is not None:\n train_ops.append(\n dnn_optimizer.minimize(\n loss,\n var_list=ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=dnn_absolute_scope)))\n if linear_logits is not None:\n train_ops.append(\n linear_optimizer.minimize(\n loss,\n var_list=ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=linear_absolute_scope)))\n\n train_op = control_flow_ops.group(*train_ops)\n with ops.control_dependencies([train_op]):\n return state_ops.assign_add(global_step, 1).op\n\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=_train_op_fn,\n logits=logits)\n\n\n@estimator_export('estimator.DNNLinearCombinedClassifier')\nclass DNNLinearCombinedClassifier(estimator.Estimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined classification models.\n\n Note: This estimator is also known as wide-n-deep.\n\n Example:\n\n ```python\n numeric_feature = numeric_column(...)\n categorical_column_a = categorical_column_with_hash_bucket(...)\n categorical_column_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_x_categorical_feature_b = crossed_column(...)\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_id_column=categorical_feature_b, ...)\n\n estimator = DNNLinearCombinedClassifier(\n # wide settings\n linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[\n categorical_feature_a_emb, categorical_feature_b_emb,\n numeric_feature],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),\n # warm-start settings\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # To apply L1 and L2 regularization, you can set dnn_optimizer to:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # To apply learning rate decay, you can set dnn_optimizer to a callable:\n lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96)\n # It is the same for linear_optimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using softmax cross entropy.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM,\n batch_norm=False,\n linear_sparse_combiner='sum'):\n \"\"\"Initializes a DNNLinearCombinedClassifier instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n n_classes: Number of label classes. Defaults to 2, namely binary\n classification. Must be > 1.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are\n already encoded as integer or float within [0, 1] for `n_classes=2` and\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\n Also there will be errors if vocabulary is not provided and labels are\n string.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: RunConfig object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM`.\n batch_norm: Whether to use batch normalization after each hidden layer.\n linear_sparse_combiner: A string specifying how to reduce the linear model\n if a categorical column is multivalent. One of \"mean\", \"sqrtn\", and\n \"sum\" -- these are effectively different ways to do example-level\n normalization, which can be useful for bag-of-words features. For more\n details, see `tf.feature_column.linear_model`.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n linear_feature_columns = linear_feature_columns or []\n dnn_feature_columns = dnn_feature_columns or []\n self._feature_columns = (\n list(linear_feature_columns) + list(dnn_feature_columns))\n if not self._feature_columns:\n raise ValueError('Either linear_feature_columns or dnn_feature_columns '\n 'must be defined.')\n if n_classes == 2:\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n else:\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access\n n_classes,\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the _dnn_linear_combined_model_fn.\"\"\"\n return _dnn_linear_combined_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm,\n linear_sparse_combiner=linear_sparse_combiner)\n\n super(DNNLinearCombinedClassifier, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config,\n warm_start_from=warm_start_from)\n\n\n@estimator_export('estimator.DNNLinearCombinedRegressor')\nclass DNNLinearCombinedRegressor(estimator.Estimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined models for regression.\n\n Note: This estimator is also known as wide-n-deep.\n\n Example:\n\n ```python\n numeric_feature = numeric_column(...)\n categorical_column_a = categorical_column_with_hash_bucket(...)\n categorical_column_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_x_categorical_feature_b = crossed_column(...)\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_column=categorical_feature_b, ...)\n\n estimator = DNNLinearCombinedRegressor(\n # wide settings\n linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[\n categorical_feature_a_emb, categorical_feature_b_emb,\n numeric_feature],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),\n # warm-start settings\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # To apply L1 and L2 regularization, you can set dnn_optimizer to:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # To apply learning rate decay, you can set dnn_optimizer to a callable:\n lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96)\n # It is the same for linear_optimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using mean squared error.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n label_dimension=1,\n weight_column=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM,\n batch_norm=False,\n linear_sparse_combiner='sum'):\n \"\"\"Initializes a DNNLinearCombinedRegressor instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n label_dimension: Number of regression targets per example. This is the\n size of the last dimension of the labels and logits `Tensor` objects\n (typically, these have shape `[batch_size, label_dimension]`).\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: RunConfig object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM`.\n batch_norm: Whether to use batch normalization after each hidden layer.\n linear_sparse_combiner: A string specifying how to reduce the linear model\n if a categorical column is multivalent. One of \"mean\", \"sqrtn\", and\n \"sum\" -- these are effectively different ways to do example-level\n normalization, which can be useful for bag-of-words features. For more\n details, see `tf.feature_column.linear_model`.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n linear_feature_columns = linear_feature_columns or []\n dnn_feature_columns = dnn_feature_columns or []\n self._feature_columns = (\n list(linear_feature_columns) + list(dnn_feature_columns))\n if not self._feature_columns:\n raise ValueError('Either linear_feature_columns or dnn_feature_columns '\n 'must be defined.')\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the _dnn_linear_combined_model_fn.\"\"\"\n return _dnn_linear_combined_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head( # pylint: disable=protected-access\n label_dimension=label_dimension, weight_column=weight_column,\n loss_reduction=loss_reduction),\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm,\n linear_sparse_combiner=linear_sparse_combiner)\n\n super(DNNLinearCombinedRegressor, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config,\n warm_start_from=warm_start_from)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for manip_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import manip_ops\nfrom tensorflow.python.platform import test as test_lib\n\n# pylint: disable=g-import-not-at-top\ntry:\n from distutils.version import StrictVersion as Version\n # numpy.roll for multiple shifts was introduced in numpy version 1.12.0\n NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version(\"1.12.0\")\nexcept ImportError:\n NP_ROLL_CAN_MULTISHIFT = False\n# pylint: enable=g-import-not-at-top\n\n\nclass RollTest(test_util.TensorFlowTestCase):\n\n def _testRoll(self, np_input, shift, axis):\n expected_roll = np.roll(np_input, shift, axis)\n with self.cached_session():\n roll = manip_ops.roll(np_input, shift, axis)\n self.assertAllEqual(roll.eval(), expected_roll)\n\n def _testGradient(self, np_input, shift, axis):\n with self.cached_session():\n inx = constant_op.constant(np_input.tolist())\n xs = list(np_input.shape)\n y = manip_ops.roll(inx, shift, axis)\n # Expected y's shape to be the same\n ys = xs\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, xs, y, ys, x_init_value=np_input)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _testAll(self, np_input, shift, axis):\n self._testRoll(np_input, shift, axis)\n if np_input.dtype == np.float32:\n self._testGradient(np_input, shift, axis)\n\n def testIntTypes(self):\n for t in [np.int32, np.int64]:\n self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n self._testAll(\n np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],\n [0, 1, 2])\n self._testAll(\n np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],\n [1, 2, 3])\n\n def testFloatTypes(self):\n for t in [np.float32, np.float64]:\n self._testAll(np.random.rand(5).astype(t), 2, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])\n self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])\n\n def testComplexTypes(self):\n for t in [np.complex64, np.complex128]:\n x = np.random.rand(4, 4).astype(t)\n self._testAll(x + 1j * x, 2, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n x = np.random.rand(2, 5).astype(t)\n self._testAll(x + 1j * x, [1, 2], [1, 0])\n x = np.random.rand(3, 2, 1, 1).astype(t)\n self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])\n\n def testNegativeAxis(self):\n self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)\n self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)\n # Make sure negative axis should be 0 <= axis + dims < dims\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"is out of range\"):\n manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),\n 3, -10).eval()\n\n def testInvalidInputShape(self):\n # The input should be 1-D or higher, checked in shape function.\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at least rank 1 but is rank 0\"):\n manip_ops.roll(7, 1, 0)\n\n def testRollInputMustVectorHigherRaises(self):\n # The input should be 1-D or higher, checked in kernel.\n tensor = array_ops.placeholder(dtype=dtypes.int32)\n shift = 1\n axis = 0\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"input must be 1-D or higher\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})\n\n def testInvalidAxisShape(self):\n # The axis should be a scalar or 1-D, checked in shape function.\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at most rank 1 but is rank 2\"):\n manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])\n\n def testRollAxisMustBeScalarOrVectorRaises(self):\n # The axis should be a scalar or 1-D, checked in kernel.\n tensor = [[1, 2], [3, 4]]\n shift = 1\n axis = array_ops.placeholder(dtype=dtypes.int32)\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"axis must be a scalar or a 1-D vector\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})\n\n def testInvalidShiftShape(self):\n # The shift should be a scalar or 1-D, checked in shape function.\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at most rank 1 but is rank 2\"):\n manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)\n\n def testRollShiftMustBeScalarOrVectorRaises(self):\n # The shift should be a scalar or 1-D, checked in kernel.\n tensor = [[1, 2], [3, 4]]\n shift = array_ops.placeholder(dtype=dtypes.int32)\n axis = 1\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"shift must be a scalar or a 1-D vector\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})\n\n def testInvalidShiftAndAxisNotEqualShape(self):\n # The shift and axis must be same size, checked in shape function.\n with self.assertRaisesRegexp(ValueError, \"both shapes must be equal\"):\n manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])\n\n def testRollShiftAndAxisMustBeSameSizeRaises(self):\n # The shift and axis must be same size, checked in kernel.\n tensor = [[1, 2], [3, 4]]\n shift = array_ops.placeholder(dtype=dtypes.int32)\n axis = [0, 1]\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"shift and axis must have the same size\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})\n\n def testRollAxisOutOfRangeRaises(self):\n tensor = [1, 2]\n shift = 1\n axis = 1\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"is out of range\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n\nif __name__ == \"__main__\":\n test_lib.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.prefetch_to_device()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.experimental.ops import prefetching_ops\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass PrefetchToDeviceTest(test_base.DatasetTestBase):\n\n def testPrefetchToDevice(self):\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n self.assertEqual([], next_element.shape)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToSameDevice(self):\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\n \"/job:localhost/replica:0/task:0/device:CPU:0\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n self.assertEqual([], next_element.shape)\n\n with self.cached_session() as sess:\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchDictToDevice(self):\n host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {\"a\": x})\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element[\"a\"].dtype)\n self.assertEqual([], next_element[\"a\"].shape)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n for i in range(10):\n self.assertEqual({\"a\": i}, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchSparseTensorsToDevice(self):\n def make_tensor(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0, 0]], values=(i*[1]), dense_shape=[2, 2])\n host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)\n\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n for i in range(10):\n actual = sess.run(next_element)\n self.assertAllEqual([i], actual.values)\n self.assertAllEqual([[0, 0]], actual.indices)\n self.assertAllEqual([2, 2], actual.dense_shape)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToDeviceGpu(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/gpu:0\"))\n\n iterator = device_dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToDeviceWithReInit(self):\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_initializable_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n self.assertEqual([], next_element.shape)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n sess.run(iterator.initializer)\n for i in range(5):\n self.assertEqual(i, sess.run(next_element))\n sess.run(iterator.initializer)\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToDeviceGpuWithReInit(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/gpu:0\"))\n\n iterator = device_dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n sess.run(iterator.initializer)\n for i in range(5):\n self.assertEqual(i, sess.run(next_element))\n sess.run(iterator.initializer)\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for Proximal Gradient Descent operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import proximal_gradient_descent\n\n\nclass ProximalGradientDescentOptimizerTest(test.TestCase):\n\n def doTestProximalGradientDescentwithoutRegularization(\n self, use_resource=False):\n with self.cached_session() as sess:\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])\n var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])\n else:\n var0 = variables.Variable([0.0, 0.0])\n var1 = variables.Variable([0.0, 0.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose([0.0, 0.0], v0_val)\n self.assertAllClose([0.0, 0.0], v1_val)\n\n # Run 3 steps Proximal Gradient Descent.\n for _ in range(3):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose(np.array([-0.9, -1.8]), v0_val)\n self.assertAllClose(np.array([-0.09, -0.18]), v1_val)\n\n def testProximalGradientDescentwithoutRegularization(self):\n self.doTestProximalGradientDescentwithoutRegularization(use_resource=False)\n\n def testResourceProximalGradientDescentwithoutRegularization(self):\n self.doTestProximalGradientDescentwithoutRegularization(use_resource=True)\n\n def testProximalGradientDescentwithoutRegularization2(self):\n with self.cached_session() as sess:\n var0 = variables.Variable([1.0, 2.0])\n var1 = variables.Variable([4.0, 3.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n\n opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose([1.0, 2.0], v0_val)\n self.assertAllClose([4.0, 3.0], v1_val)\n\n # Run 3 steps Proximal Gradient Descent\n for _ in range(3):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose(np.array([0.1, 0.2]), v0_val)\n self.assertAllClose(np.array([3.91, 2.82]), v1_val)\n\n def testMinimizeSparseResourceVariable(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n with self.cached_session():\n var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)\n x = constant_op.constant([[4.0], [5.0]], dtype=dtype)\n pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)\n loss = pred * pred\n sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 1.0).minimize(loss)\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())\n # Run 1 step of sgd\n sgd_op.run()\n # Validate updated params\n self.assertAllCloseAccordingToType(\n [[-111, -138]], var0.eval(), atol=0.01)\n\n def testProximalGradientDescentWithL1_L2(self):\n with self.cached_session() as sess:\n var0 = variables.Variable([1.0, 2.0])\n var1 = variables.Variable([4.0, 3.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n\n opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose([1.0, 2.0], v0_val)\n self.assertAllClose([4.0, 3.0], v1_val)\n\n # Run 10 steps Proximal Gradient Descent\n for _ in range(10):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose(np.array([-0.0495, -0.0995]), v0_val)\n self.assertAllClose(np.array([-0.0045, -0.0095]), v1_val)\n\n def applyOptimizer(self, opt, steps=5, is_sparse=False):\n if is_sparse:\n var0 = variables.Variable([[1.0], [2.0]])\n var1 = variables.Variable([[3.0], [4.0]])\n grads0 = ops.IndexedSlices(\n constant_op.constant(\n [0.1], shape=[1, 1]),\n constant_op.constant([0]),\n constant_op.constant([2, 1]))\n grads1 = ops.IndexedSlices(\n constant_op.constant(\n [0.02], shape=[1, 1]),\n constant_op.constant([1]),\n constant_op.constant([2, 1]))\n else:\n var0 = variables.Variable([1.0, 2.0])\n var1 = variables.Variable([3.0, 4.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n sess = ops.get_default_session()\n v0_val, v1_val = sess.run([var0, var1])\n if is_sparse:\n self.assertAllClose([[1.0], [2.0]], v0_val)\n self.assertAllClose([[3.0], [4.0]], v1_val)\n else:\n self.assertAllClose([1.0, 2.0], v0_val)\n self.assertAllClose([3.0, 4.0], v1_val)\n\n # Run ProximalAdagrad for a few steps\n for _ in range(steps):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n return v0_val, v1_val\n\n def testEquivSparseGradientDescentwithoutRegularization(self):\n with self.cached_session():\n val0, val1 = self.applyOptimizer(\n proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0),\n is_sparse=True)\n\n with self.cached_session():\n val2, val3 = self.applyOptimizer(\n gradient_descent.GradientDescentOptimizer(3.0), is_sparse=True)\n\n self.assertAllClose(val0, val2)\n self.assertAllClose(val1, val3)\n\n def testEquivGradientDescentwithoutRegularization(self):\n with self.cached_session():\n val0, val1 = self.applyOptimizer(\n proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0))\n\n with self.cached_session():\n val2, val3 = self.applyOptimizer(\n gradient_descent.GradientDescentOptimizer(3.0))\n\n self.assertAllClose(val0, val2)\n self.assertAllClose(val1, val3)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.estimator.canned.head._multi_class_head_with_softmax_cross_entropy_loss",
"tensorflow.python.estimator.canned.head._regression_head",
"tensorflow.python.estimator.canned.optimizers.get_optimizer_instance",
"tensorflow.python.util.tf_export.estimator_export",
"tensorflow.python.training.training_util.get_global_step",
"tensorflow.python.estimator.canned.linear._linear_logit_fn_builder",
"tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.estimator.canned.dnn._dnn_logit_fn_builder",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.python.ops.control_flow_ops.group"
],
[
"numpy.roll",
"tensorflow.python.ops.manip_ops.roll",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.test.main",
"numpy.random.rand",
"tensorflow.python.ops.gradient_checker.compute_gradient",
"numpy.random.randint"
],
[
"tensorflow.python.data.experimental.ops.prefetching_ops.prefetch_to_device",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.framework.test_util.is_gpu_available",
"tensorflow.python.framework.ops.device"
],
[
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.training.proximal_gradient_descent.ProximalGradientDescentOptimizer",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"numpy.array",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.framework.constant_op.constant"
]
] |
imperial-genomics-facility/data-management-python | [
"7b867d8d4562a49173d0b823bdc4bf374a3688f0"
] | [
"igf_data/igfdb/useradaptor.py"
] | [
"import pandas as pd\r\nimport json, hashlib, os, codecs, base64\r\nfrom igf_data.igfdb.baseadaptor import BaseAdaptor\r\nfrom igf_data.igfdb.igfTables import User\r\n\r\nclass UserAdaptor(BaseAdaptor):\r\n '''\r\n An adaptor class for table User\r\n '''\r\n def _email_check(self, email):\r\n '''\r\n An internal function to check if email_id has '@' or not\r\n \r\n :param email: a string containing the email id\r\n '''\r\n if '@' not in email:\r\n raise ValueError('Email id {0} is not correctly formatted'.format(email))\r\n\r\n\r\n def _encrypt_password(self, series, password_column='password', \r\n salt_column='encryption_salt', \r\n ht_pass_column='ht_password'):\r\n '''\r\n An internal function for encrypting password\r\n\r\n :param series: A pandas data series\r\n :param password_column: Name of the password column, default password\r\n :param salt_column: Name of the salt column, default encryption_salt\r\n :param ht_pass_column: Name of the ht_password column, default ht_password\r\n :returns: A pandas series\r\n '''\r\n try:\r\n if not isinstance(series, pd.Series):\r\n series=pd.DataFrame(series)\r\n\r\n if password_column in series.index and \\\r\n not pd.isnull(series[password_column]): # password is optional\r\n salt=codecs.encode(os.urandom(32),\"hex\").decode(\"utf-8\") # calculate salt value\r\n password=series[password_column] # fetch password\r\n if not isinstance(password, str):\r\n password=str(series.password_column).encode('utf-8') # encode password if its not a string\r\n\r\n if password: # always encrypt password\r\n ht_pass=\\\r\n '{0}{1}'.format(\\\r\n '{SHA}',\r\n base64.b64encode(\\\r\n hashlib.sha1(password.encode('utf-8')).\\\r\n digest()).decode()) # calculate sha1 for htaccess password\r\n series[ht_pass_column]=ht_pass # set htaccess password\r\n key=salt+password # construct key using salt and password\r\n password=hashlib.sha512(str(key).encode('utf-8')).hexdigest() # create password hash\r\n series[password_column]=password # set hash to data series\r\n series[salt_column]=salt # set salt to data series\r\n return series\r\n except:\r\n raise\r\n\r\n\r\n def _map_missing_user_status(self,data_series,categoty_column,hpc_user_column,\r\n hpc_user,non_hpc_user):\r\n '''\r\n An internal function for assigning user status\r\n\r\n :param data_series: A pandas data series\r\n :param categoty_column: Name of the category column ## FIX TYPO\r\n :param hpc_user_column: Name of the hpc username column\r\n :param hpc_user: HPC user tag\r\n :param non_hpc_user: Non HPC user tag\r\n :returns: A pandas data series\r\n '''\r\n try:\r\n if not isinstance(data_series, pd.Series):\r\n data_series=pd.DataFrame(data_series)\r\n\r\n if categoty_column not in data_series or \\\r\n pd.isnull(data_series[categoty_column]):\r\n if hpc_user_column in data_series and \\\r\n not pd.isnull(data_series[hpc_user_column]) and \\\r\n data_series[hpc_user_column]!='':\r\n data_series[categoty_column]=hpc_user # assign hpc user\r\n else:\r\n data_series[categoty_column]=non_hpc_user # non hpc user\r\n\r\n return data_series\r\n except:\r\n raise\r\n\r\n\r\n def _preprocess_data(self,data, password_column='password', categoty_column='category',\r\n email_column='email_id', hpc_user_column='hpc_username',\r\n hpc_user='HPC_USER', non_hpc_user='NON_HPC_USER',\r\n user_igf_id_column='user_igf_id', username_column='username',\r\n salt_column='encryption_salt'):\r\n '''\r\n An internal function for preprocess data before loading\r\n\r\n :param data: A pamdas dataframe or a list of dictionaries\r\n :param password_column: Name of the password column, default password\r\n :param categoty_column: Name of the user category column, default category\r\n :param email_column: Name of the email id column, default email_id\r\n :param hpc_user_column: Name of the hpc username column, default hpc_username\r\n :param hpc_user: Tag name for HPC user, default HPC_USER\r\n :param non_hpc_user: Tag name for non HPC user, default NON_HPC_USER\r\n :param user_igf_id_column: Name of the user id column, default user_igf_id\r\n :param username_column: Name of the igf username column, default username\r\n :param salt_column: Name of the salt column, default encryption_salt\r\n :returns: A pandas dataframe\r\n '''\r\n try:\r\n if not isinstance(data, pd.DataFrame):\r\n data=pd.DataFrame(data)\r\n\r\n new_data=data.apply(lambda x: self._encrypt_password(series=x),1) # encrypt password\r\n new_data[email_column].map(lambda x: self._email_check(email=x)) # check email id, it should contail '@'\r\n new_data=new_data.fillna('')\r\n if categoty_column not in new_data.columns:\r\n new_data[categoty_column]=None # add category column if it doesn't exists\r\n\r\n new_data.apply(\\\r\n lambda x: self._map_missing_user_status(\\\r\n data_series=x,\r\n categoty_column=categoty_column,\r\n hpc_user_column=hpc_user_column,\r\n hpc_user=hpc_user,\r\n non_hpc_user=non_hpc_user),\r\n axis=1) # assign categoty, if user has hpc_username, then its 'HPC_USER'\r\n return new_data\r\n except:\r\n raise\r\n\r\n\r\n def store_user_data(self, data, autosave=True):\r\n '''\r\n Load data to user table\r\n\r\n :param data: A pandas dataframe\r\n :param autosave: A toggle for autocommit, default True\r\n :returns: None\r\n '''\r\n try:\r\n if not isinstance(data, pd.DataFrame):\r\n data=pd.DataFrame(data)\r\n\r\n data=self._preprocess_data(data=data)\r\n self.store_records(table=User, data=data, mode='serial' )\r\n if autosave:\r\n self.commit_session()\r\n except:\r\n if autosave:\r\n self.rollback_session()\r\n raise\r\n\r\n\r\n def fetch_user_records_igf_id(self, user_igf_id):\r\n '''\r\n A method for fetching data for User table\r\n \r\n :param user_igf_id: an igf id\r\n :returns: user object\r\n '''\r\n try:\r\n user=\\\r\n self.fetch_records_by_column(\\\r\n table=User,\r\n column_name=User.user_igf_id,\r\n column_id=user_igf_id,\r\n output_mode='one' )\r\n return user\r\n except:\r\n raise\r\n\r\n\r\n def fetch_user_records_email_id(self, user_email_id):\r\n '''\r\n A method for fetching data for User table\r\n \r\n :param user_email_id: an email id\r\n :returns: user object\r\n '''\r\n try:\r\n user=\\\r\n self.fetch_records_by_column(\\\r\n table=User,\r\n column_name=User.email_id,\r\n column_id=user_email_id,\r\n output_mode='one' )\r\n return user\r\n except:\r\n raise\r\n\r\n\r\n def check_user_records_email_id(self,email_id):\r\n '''\r\n A method for checking existing user data in db\r\n \r\n :param email_id: An email id\r\n :returns: True if the file is present in db or False if its not\r\n '''\r\n try:\r\n user_check=False\r\n user_obj=\\\r\n self.fetch_records_by_column(\\\r\n table=User,\r\n column_name=User.email_id,\r\n column_id=email_id,\r\n output_mode='one_or_none' )\r\n if user_obj is not None:\r\n user_check=True\r\n return user_check\r\n except:\r\n raise\r\n"
] | [
[
"pandas.isnull",
"pandas.DataFrame"
]
] |
TheGupta2012/qiskit-terra | [
"5ea6e9557655b144228c29d7099375f5d2c91120"
] | [
"qiskit/pulse/library/continuous.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-return-doc, invalid-unary-operand-type\n\n\"\"\"Module for builtin continuous pulse functions.\"\"\"\n\nimport functools\nfrom typing import Union, Tuple, Optional\n\nimport numpy as np\nfrom qiskit.pulse.exceptions import PulseError\n\n\ndef constant(times: np.ndarray, amp: complex) -> np.ndarray:\n \"\"\"Continuous constant pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Complex pulse amplitude.\n \"\"\"\n return np.full(len(times), amp, dtype=np.complex_)\n\n\ndef zero(times: np.ndarray) -> np.ndarray:\n \"\"\"Continuous zero pulse.\n\n Args:\n times: Times to output pulse for.\n \"\"\"\n return constant(times, 0)\n\n\ndef square(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous square wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n freq: Pulse frequency. units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n x = times * freq + phase / np.pi\n return amp * (2 * (2 * np.floor(x) - np.floor(2 * x)) + 1).astype(np.complex_)\n\n\ndef sawtooth(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous sawtooth wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n freq: Pulse frequency. units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n x = times * freq + phase / np.pi\n return amp * 2 * (x - np.floor(1 / 2 + x)).astype(np.complex_)\n\n\ndef triangle(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous triangle wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n freq: Pulse frequency. units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp * (-2 * np.abs(sawtooth(times, 1, freq, phase=(phase - np.pi / 2) / 2)) + 1).astype(\n np.complex_\n )\n\n\ndef cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous cosine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp * np.cos(2 * np.pi * freq * times + phase).astype(np.complex_)\n\n\ndef sin(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous cosine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp * np.sin(2 * np.pi * freq * times + phase).astype(np.complex_)\n\n\ndef _fix_gaussian_width(\n gaussian_samples,\n amp: float,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_scale_factor: bool = False,\n) -> np.ndarray:\n r\"\"\"Enforce that the supplied gaussian pulse is zeroed at a specific width.\n\n This is achieved by subtracting $\\Omega_g(center \\pm zeroed_width/2)$ from all samples.\n\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Standard deviation of pulse.\n zeroed_width: Subtract baseline from gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse. If unsupplied,\n defaults to $2*(center + 1)$ such that $\\Omega_g(-1)=0$ and $\\Omega_g(2*(center + 1))=0$.\n rescale_amp: If True the pulse will be rescaled so that $\\Omega_g(center)=amp$.\n ret_scale_factor: Return amplitude scale factor.\n \"\"\"\n if zeroed_width is None:\n zeroed_width = 2 * (center + 1)\n\n zero_offset = gaussian(np.array([zeroed_width / 2]), amp, 0, sigma)\n gaussian_samples -= zero_offset\n amp_scale_factor = 1.0\n if rescale_amp:\n amp_scale_factor = amp / (amp - zero_offset) if amp - zero_offset != 0 else 1.0\n gaussian_samples *= amp_scale_factor\n\n if ret_scale_factor:\n return gaussian_samples, amp_scale_factor\n return gaussian_samples\n\n\ndef gaussian(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_x: bool = False,\n) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n r\"\"\"Continuous unnormalized gaussian pulse.\n\n Integrated area under curve is $\\Omega_g(amp, sigma) = amp \\times np.sqrt(2\\pi \\sigma^2)$\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`. If `zeroed_width` is set pulse amplitude at center\n will be $amp-\\Omega_g(center \\pm zeroed_width/2)$ unless `rescale_amp` is set,\n in which case all samples will be rescaled such that the center\n amplitude will be `amp`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline from gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma.\n \"\"\"\n times = np.asarray(times, dtype=np.complex_)\n x = (times - center) / sigma\n gauss = amp * np.exp(-(x ** 2) / 2).astype(np.complex_)\n\n if zeroed_width is not None:\n gauss = _fix_gaussian_width(\n gauss,\n amp=amp,\n center=center,\n sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n )\n\n if ret_x:\n return gauss, x\n return gauss\n\n\ndef gaussian_deriv(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n ret_gaussian: bool = False,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n) -> np.ndarray:\n r\"\"\"Continuous unnormalized gaussian derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_gaussian: Return gaussian with which derivative was taken with.\n zeroed_width: Subtract baseline of pulse to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n \"\"\"\n gauss, x = gaussian(\n times,\n amp=amp,\n center=center,\n sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n ret_x=True,\n )\n gauss_deriv = -x / sigma * gauss\n if ret_gaussian:\n return gauss_deriv, gauss\n return gauss_deriv\n\n\ndef _fix_sech_width(\n sech_samples,\n amp: float,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_scale_factor: bool = False,\n) -> np.ndarray:\n r\"\"\"Enforce that the supplied sech pulse is zeroed at a specific width.\n\n This is achieved by subtracting $\\Omega_g(center \\pm zeroed_width/2)$ from all samples.\n\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Standard deviation of pulse.\n zeroed_width: Subtract baseline from sech pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a sech pulse. If unsupplied,\n defaults to $2*(center + 1)$ such that $\\Omega_g(-1)=0$ and $\\Omega_g(2*(center + 1))=0$.\n rescale_amp: If True the pulse will be rescaled so that $\\Omega_g(center)=amp$.\n ret_scale_factor: Return amplitude scale factor.\n \"\"\"\n if zeroed_width is None:\n zeroed_width = 2 * (center + 1)\n\n zero_offset = sech(np.array([zeroed_width / 2]), amp, 0, sigma)\n sech_samples -= zero_offset\n amp_scale_factor = 1.0\n if rescale_amp:\n amp_scale_factor = amp / (amp - zero_offset) if amp - zero_offset != 0 else 1.0\n sech_samples *= amp_scale_factor\n\n if ret_scale_factor:\n return sech_samples, amp_scale_factor\n return sech_samples\n\n\ndef sech_fn(x, *args, **kwargs):\n r\"\"\"Hyperbolic secant function\"\"\"\n return 1.0 / np.cosh(x, *args, **kwargs)\n\n\ndef sech(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_x: bool = False,\n) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n r\"\"\"Continuous unnormalized sech pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline from pulse to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start and end of the pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma$.\n \"\"\"\n times = np.asarray(times, dtype=np.complex_)\n x = (times - center) / sigma\n sech_out = amp * sech_fn(x).astype(np.complex_)\n\n if zeroed_width is not None:\n sech_out = _fix_sech_width(\n sech_out,\n amp=amp,\n center=center,\n sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n )\n\n if ret_x:\n return sech_out, x\n return sech_out\n\n\ndef sech_deriv(\n times: np.ndarray, amp: complex, center: float, sigma: float, ret_sech: bool = False\n) -> np.ndarray:\n \"\"\"Continuous unnormalized sech derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_sech: Return sech with which derivative was taken with.\n \"\"\"\n sech_out, x = sech(times, amp=amp, center=center, sigma=sigma, ret_x=True)\n sech_out_deriv = -sech_out * np.tanh(x) / sigma\n if ret_sech:\n return sech_out_deriv, sech_out\n return sech_out_deriv\n\n\ndef gaussian_square(\n times: np.ndarray,\n amp: complex,\n center: float,\n square_width: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n) -> np.ndarray:\n r\"\"\"Continuous gaussian square pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude.\n center: Center of the square pulse component.\n square_width: Width of the square pulse component.\n sigma: Standard deviation of Gaussian rise/fall portion of the pulse.\n zeroed_width: Subtract baseline of gaussian square pulse\n to enforce $\\OmegaSquare(center \\pm zeroed_width/2)=0$.\n\n Raises:\n PulseError: if zeroed_width is not compatible with square_width.\n \"\"\"\n square_start = center - square_width / 2\n square_stop = center + square_width / 2\n if zeroed_width:\n if zeroed_width < square_width:\n raise PulseError(\"zeroed_width cannot be smaller than square_width.\")\n gaussian_zeroed_width = zeroed_width - square_width\n else:\n gaussian_zeroed_width = None\n\n funclist = [\n functools.partial(\n gaussian,\n amp=amp,\n center=square_start,\n sigma=sigma,\n zeroed_width=gaussian_zeroed_width,\n rescale_amp=True,\n ),\n functools.partial(\n gaussian,\n amp=amp,\n center=square_stop,\n sigma=sigma,\n zeroed_width=gaussian_zeroed_width,\n rescale_amp=True,\n ),\n functools.partial(constant, amp=amp),\n ]\n condlist = [times <= square_start, times >= square_stop]\n return np.piecewise(times.astype(np.complex_), condlist, funclist)\n\n\ndef drag(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n beta: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n) -> np.ndarray:\n r\"\"\"Continuous Y-only correction DRAG pulse for standard nonlinear oscillator (SNO) [1].\n\n [1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K.\n Analytic control methods for high-fidelity unitary operations\n in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011).\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n beta: Y correction amplitude. For the SNO this is $\\beta=-\\frac{\\lambda_1^2}{4\\Delta_2}$.\n Where $\\lambds_1$ is the relative coupling strength between the first excited and second\n excited states and $\\Delta_2$ is the detuning between the respective excited states.\n zeroed_width: Subtract baseline of drag pulse to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a drag pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n\n \"\"\"\n gauss_deriv, gauss = gaussian_deriv(\n times,\n amp=amp,\n center=center,\n sigma=sigma,\n ret_gaussian=True,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n )\n\n return gauss + 1j * beta * gauss_deriv\n"
] | [
[
"numpy.cosh",
"numpy.floor",
"numpy.asarray",
"numpy.cos",
"numpy.exp",
"numpy.array",
"numpy.sin",
"numpy.tanh"
]
] |
JunjieChen-2020/ColossalAI | [
"0e121a256ac4f628f5d26a16dc553cd0024ca2d5"
] | [
"colossalai/context/moe_context.py"
] | [
"import torch\r\nimport torch.distributed as dist\r\nfrom .parallel_mode import ParallelMode\r\nfrom typing import Tuple\r\n\r\n\r\ndef _check_sanity():\r\n from colossalai.core import global_context as gpc\r\n if gpc.tensor_parallel_size > 1 or gpc.pipeline_parallel_size > 1:\r\n raise NotImplementedError(\"Moe is not compatible with tensor or \"\r\n \"pipeline parallel at present.\")\r\n\r\n\r\nclass MoeParallelInfo:\r\n \"\"\"Moe parallelism information, storing parallel sizes and groups.\r\n \"\"\"\r\n\r\n def __init__(self, ep_size: int, dp_size: int):\r\n _check_sanity()\r\n self.ep_size = ep_size\r\n self.dp_size = dp_size\r\n self.ep_group = None\r\n # data parallel group for experts, since ep_group is different\r\n # we may have different dp_group from get_group(ParallelMode.DATA)\r\n self.dp_group = None\r\n\r\n # Here we assume tensor parallel size = 1\r\n # Otherwise, MoE can't be used\r\n # Since TENSOR parallel group and DATA parallel group\r\n # have been created, we can use them directly.\r\n if ep_size == 1:\r\n from colossalai.core import global_context as gpc\r\n self.ep_group = gpc.get_group(ParallelMode.TENSOR)\r\n self.dp_group = gpc.get_group(ParallelMode.DATA)\r\n return\r\n\r\n if dp_size == 1:\r\n from colossalai.core import global_context as gpc\r\n self.ep_group = gpc.get_group(ParallelMode.DATA)\r\n self.dp_group = gpc.get_group(ParallelMode.TENSOR)\r\n return\r\n\r\n rank = dist.get_rank()\r\n # Create expert parallel group\r\n for i in range(dp_size):\r\n ranks = [i * ep_size + j for j in range(ep_size)]\r\n group = dist.new_group(ranks)\r\n if rank in ranks:\r\n self.ep_group = group\r\n\r\n # Create data parallel group\r\n for j in range(ep_size):\r\n ranks = [i * ep_size + j for i in range(dp_size)]\r\n group = dist.new_group(ranks)\r\n if rank in ranks:\r\n self.dp_group = group\r\n\r\n\r\nclass MoeContext:\r\n \"\"\"MoE parallel context manager. This class manages different\r\n parallel groups in MoE context and MoE loss in training.\r\n \"\"\"\r\n __instance = None\r\n\r\n @staticmethod\r\n def get_instance():\r\n if MoeContext.__instance is None:\r\n MoeContext.__instance = MoeContext()\r\n return MoeContext.__instance\r\n\r\n def __init__(self):\r\n self.world_size = 1\r\n # Users may want to set maximum expert parallel size smaller than the world size\r\n # since very low bandwidth across nodes may constrain the performance of MoE\r\n # When we have a maximum expert parallel size, we have a minimum data parallel size naturally\r\n self.max_ep_size = 1\r\n self.min_dp_size = 1\r\n self.aux_loss = None\r\n self.use_kernel_optim = True\r\n\r\n self.has_setup = False\r\n self._parallel_info_dict = dict()\r\n\r\n @property\r\n def parallel_info_dict(self):\r\n return self._parallel_info_dict\r\n\r\n @property\r\n def is_initialized(self):\r\n return self.has_setup\r\n\r\n def setup(self, seed: int, use_kernel_optim: bool = True):\r\n\r\n assert not self.is_initialized, \"MoE distributed context shouldn't be set up again\"\r\n _check_sanity()\r\n assert torch.cuda.is_available(), \"MoE requires to enable CUDA first\"\r\n\r\n self.world_size = dist.get_world_size()\r\n\r\n from colossalai.core import global_context as gpc\r\n self.max_ep_size = gpc.config.get('max_ep_size', self.world_size)\r\n assert self.world_size % self.max_ep_size == 0, \\\r\n \"Maximum epxert parallel size must be a factor of the number of GPUs\"\r\n self.min_dp_size = self.world_size // self.max_ep_size\r\n\r\n # Enabling kernel optimization may raise error in some cases\r\n # Users can close kernel optimization manually\r\n self.use_kernel_optim = use_kernel_optim\r\n\r\n from .random import moe_set_seed\r\n moe_set_seed(seed)\r\n self.has_setup = True\r\n\r\n def get_info(self, num_experts: int) -> Tuple[int, MoeParallelInfo]:\r\n \"\"\"Calculate the Data Parallel Group and Expert Parallel Group.\r\n\r\n Parameters\r\n ----------\r\n num_experts : int\r\n The number experts\r\n\r\n Returns\r\n -------\r\n int, MoeParallelInfo\r\n number of local experts, the MoeParallelInfo of the current ep_size\r\n \"\"\"\r\n\r\n gt_flag = num_experts % self.max_ep_size == 0 # check whether num_experts is greater\r\n lt_flag = self.max_ep_size % num_experts == 0 # check whether num_experts is less\r\n\r\n assert gt_flag or lt_flag, \"Automatic experts placement dose not not support expert number\"\\\r\n \" is not a multiple of ep size or vice versa.\"\r\n\r\n # If the number of experts is greater than maximum expert parallel size. a.k.a ep_size,\r\n # there are multiple experts in each GPU and each GPU has different experts\r\n # So it's data parallel size is 1\r\n # Otherwise, there is only one expert in each GPU\r\n # The data parallel size should be calculated\r\n dp_size = 1 if gt_flag else self.max_ep_size // num_experts\r\n ep_size = self.max_ep_size // dp_size\r\n\r\n # Calculate the number of experts for each GPU\r\n num_local_experts = 1 if lt_flag else num_experts // self.max_ep_size\r\n\r\n # Don't forget to multiply minimum data parallel size\r\n dp_size *= self.min_dp_size\r\n if not (ep_size in self.parallel_info_dict):\r\n self.parallel_info_dict[ep_size] = MoeParallelInfo(ep_size, dp_size)\r\n\r\n return num_local_experts, self.parallel_info_dict[ep_size]\r\n\r\n def set_kernel_not_use(self):\r\n self.use_kernel_optim = False\r\n\r\n def reset_loss(self):\r\n self.aux_loss = 0\r\n\r\n def add_loss(self, loss):\r\n self.aux_loss += loss\r\n\r\n def get_loss(self):\r\n return self.aux_loss\r\n"
] | [
[
"torch.distributed.new_group",
"torch.cuda.is_available",
"torch.distributed.get_world_size",
"torch.distributed.get_rank"
]
] |
dprada/molsysmt | [
"83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d"
] | [
"molsysmt/structure/get_sasa.py"
] | [
"from molsysmt import puw\nfrom molsysmt.basic import convert, select, get\nfrom molsysmt._private_tools._digestion import digest_engine, digest_target\nimport numpy as np\n\ndef get_sasa (molecular_system, target='atom', selection='all', frame_indices='all', syntaxis='MolSysMT',\n engine='MDTraj'):\n\n engine = digest_engine(engine)\n target = digest_target(target)\n\n if engine == 'MDTraj':\n\n from mdtraj import shrake_rupley\n\n tmp_item = convert(molecular_system, frame_indices=frame_indices, to_form='mdtraj.Trajectory')\n\n sasa_array = shrake_rupley(tmp_item, mode='atom') # tiene probe_radius y n_sphere_points\n\n if target=='atom':\n\n if selection is not 'all':\n\n atom_indices = select(molecular_system, selection=selection, syntaxis=syntaxis)\n sasa_array = sasa_array[:,atom_indices]\n\n else:\n\n sets_atoms = get(molecular_system, target=target, selection=selection, syntaxis=syntaxis, atom_index=True)\n\n n_sets = len(sets_atoms)\n n_frames = sasa_array.shape[0]\n\n new_sasa_array = np.empty([n_frames, n_sets], dtype='float')\n for ii in range(n_sets):\n new_sasa_array[:,ii] = sasa_array[:,sets_atoms[ii].astype(int)].sum(axis=1)\n sasa_array = new_sasa_array\n\n sasa_array = puw.quantity(sasa_array, 'nm**2')\n sasa_array = puw.standardize(sasa_array)\n\n else:\n\n raise NotImplementedError(\"Engine not implemented yet\")\n\n return sasa_array\n\n"
] | [
[
"numpy.empty"
]
] |
agrinh/nih_prediction | [
"e94ae81935452e7928cda6b101ef58163525d81c"
] | [
"nih_prediction/data.py"
] | [
"\"\"\"Produce metadata and datasets of NIH Chest Xray images\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef get_metadata(path):\n \"\"\"Produce metadata with relevant columns from NIH Chest Xray images\n\n Args:\n path: Path to NIH dataset\n\n Returns:\n metadata Dataframe with image and label\n \"\"\"\n raw_meta = pd.read_csv(os.path.join(path, 'Data_Entry_2017.csv'))\n meta = raw_meta[['Image Index', 'Finding Labels']].copy()\n meta.columns = ['image', 'label']\n meta.image = os.path.join(path, 'images/') + meta.image\n return meta\n\n\ndef build_dataset(meta, mean=None, std=None, num_parallel_calls=32):\n \"\"\"Produce tf Dataset from metadata\n\n If mean and std are provided those values will be used to normalise the\n image intensities to zero mean and unit variance.\n\n Args:\n meta: Dataframe with paths to images under column name image\n mean:\n std: If both provided will be used to normalize images\n num_parallel_calls: Number of threads for loading images\n \"\"\"\n encoded_labels = meta.label.str.get_dummies(sep='|').sort_index(axis=1)\n ds = tf.data.Dataset.from_tensor_slices({\n 'index': meta.index,\n 'path': meta['image'].values,\n 'label': encoded_labels.values.astype(np.float32)\n })\n if None in (mean, std):\n mean = 0\n std = 1\n return ds.map(\n lambda item: normalize_image(decode_image(read_file(item)), mean, std),\n num_parallel_calls=num_parallel_calls\n )\n\n\ndef read_file(item):\n \"\"\"Read file in key path into key image\n \"\"\"\n item['image'] = tf.read_file(item['path'])\n return item\n\n\ndef decode_image(item):\n \"\"\"Decode raw image file into float32 image tensor with key image\n \"\"\"\n decoded = tf.image.decode_image(item['image'])\n item['image'] = tf.image.convert_image_dtype(decoded, tf.float32)\n # All images are B&W, but some seem to have the channel replicated,\n # to avoid issues we simply select the first channel\n item['image'] = tf.expand_dims(item['image'][:, :, 0], axis=-1)\n item['image'].set_shape([None, None, 1])\n return item\n\n\ndef normalize_image(item, mean, std):\n \"\"\"Normalize image with key image to zero mean and unit variance\n \"\"\"\n item['image'] = (item['image'] - mean) / std\n return item\n"
] | [
[
"tensorflow.image.convert_image_dtype",
"tensorflow.expand_dims",
"tensorflow.image.decode_image",
"tensorflow.read_file"
]
] |
Matrix-King-Studio/MaskDetection | [
"5fed65833a8c08380299d606f66e14df814b022f"
] | [
"yolov5/detect.py"
] | [
"import argparse\r\nimport time\r\nfrom pathlib import Path\r\n\r\nimport cv2\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nfrom numpy import random\r\n\r\nfrom models.experimental import attempt_load\r\nfrom utils.datasets import LoadStreams, LoadImages\r\nfrom utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords\r\nfrom utils.general import xyxy2xywh, strip_optimizer, set_logging, increment_path\r\nfrom utils.plots import plot_one_box\r\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\r\n\r\nfrom utils.draw_name import draw_name\r\n\r\n\r\ndef detect(save_img=False):\r\n source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size\r\n webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(\r\n ('rtsp://', 'rtmp://', 'http://'))\r\n\r\n # Directories\r\n save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run\r\n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir\r\n\r\n # Initialize\r\n set_logging()\r\n device = select_device(opt.device)\r\n half = device.type != 'cpu' # half precision only supported on CUDA\r\n\r\n # Load model\r\n model = attempt_load(weights, map_location=device) # load FP32 model\r\n imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size\r\n if half:\r\n model.half() # to FP16\r\n\r\n # Second-stage classifier\r\n classify = False\r\n if classify:\r\n modelc = load_classifier(name='resnet101', n=2) # initialize\r\n modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()\r\n\r\n # Set Dataloader\r\n vid_path, vid_writer = None, None\r\n if webcam:\r\n view_img = True\r\n cudnn.benchmark = True # set True to speed up constant image size inference\r\n dataset = LoadStreams(source, img_size=imgsz)\r\n else:\r\n save_img = True\r\n dataset = LoadImages(source, img_size=imgsz)\r\n\r\n # Get names and colors\r\n names = model.module.names if hasattr(model, 'module') else model.names\r\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]\r\n\r\n # Run inference\r\n t0 = time.time()\r\n img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img\r\n _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once\r\n for path, img, im0s, vid_cap in dataset:\r\n img = torch.from_numpy(img).to(device)\r\n img = img.half() if half else img.float() # uint8 to fp16/32\r\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\r\n if img.ndimension() == 3:\r\n img = img.unsqueeze(0)\r\n\r\n # Inference\r\n t1 = time_synchronized()\r\n pred = model(img, augment=opt.augment)[0]\r\n\r\n # Apply NMS\r\n pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)\r\n t2 = time_synchronized()\r\n\r\n # Apply Classifier\r\n if classify:\r\n pred = apply_classifier(pred, modelc, img, im0s)\r\n\r\n # Process detections\r\n for i, det in enumerate(pred): # detections per image\r\n if webcam: # batch_size >= 1\r\n p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count\r\n else:\r\n p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)\r\n\r\n p = Path(p) # to Path\r\n save_path = str(save_dir / p.name) # img.jpg\r\n txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt\r\n s += '%gx%g ' % img.shape[2:] # print string\r\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\r\n if len(det):\r\n # Rescale boxes from img_size to im0 size\r\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\r\n\r\n # Print results\r\n for c in det[:, -1].unique():\r\n n = (det[:, -1] == c).sum() # detections per class\r\n s += f'{n} {names[int(c)]}s, ' # add to string\r\n\r\n # Write results\r\n for *xyxy, conf, cls in reversed(det):\r\n if save_txt: # Write to file\r\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\r\n line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format\r\n with open(txt_path + '.txt', 'a') as f:\r\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\r\n\r\n if save_img or view_img: # Add bbox to image\r\n cv2.imwrite(\"img.jpg\", im0)\r\n im0 = draw_name(im0, colors[int(cls)]) # 填上人名\r\n label = f'{names[int(cls)]} {conf:.2f}'\r\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)\r\n\r\n # Print time (inference + NMS)\r\n print(f'{s}Done. ({t2 - t1:.3f}s)')\r\n\r\n # Stream results\r\n if view_img:\r\n cv2.imshow('Masks detect', im0)\r\n\r\n # Save results (image with detections)\r\n if save_img:\r\n if dataset.mode == 'image':\r\n cv2.imwrite(save_path, im0)\r\n else: # 'video'\r\n if vid_path != save_path: # new video\r\n vid_path = save_path\r\n if isinstance(vid_writer, cv2.VideoWriter):\r\n vid_writer.release() # release previous video writer\r\n\r\n fourcc = 'mp4v' # output video codec\r\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\r\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))\r\n vid_writer.write(im0)\r\n\r\n if save_txt or save_img:\r\n s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\r\n print(f\"Results saved to {save_dir}{s}\")\r\n\r\n print(f'Done. ({time.time() - t0:.3f}s)')\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp/weights/best.pt',\r\n help='model.pt path(s)')\r\n parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam\r\n parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')\r\n parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')\r\n parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')\r\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\r\n parser.add_argument('--view-img', action='store_true', help='display results')\r\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\r\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\r\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')\r\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\r\n parser.add_argument('--augment', action='store_true', help='augmented inference')\r\n parser.add_argument('--update', action='store_true', help='update all models')\r\n parser.add_argument('--project', default='runs/detect', help='save results to project/name')\r\n parser.add_argument('--name', default='exp', help='save results to project/name')\r\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\r\n global opt\r\n opt = parser.parse_args()\r\n print(opt)\r\n check_requirements()\r\n\r\n with torch.no_grad():\r\n if opt.update: # update all models (to fix SourceChangeWarning)\r\n for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:\r\n detect()\r\n strip_optimizer(opt.weights)\r\n else:\r\n detect()\r\n"
] | [
[
"torch.load",
"torch.no_grad",
"torch.tensor",
"torch.from_numpy",
"torch.zeros",
"numpy.random.randint"
]
] |
rolandbernard/adventofcode-2021 | [
"9249815af62d0fcf79b71357330a1456ea3be1ed"
] | [
"20.trench-map/py/part1.py"
] | [
"\nimport sys\nimport numpy as np\n\nrawalgo, rawimg = sys.stdin.read().strip().split('\\n\\n')\n\nalgo = np.array([1 if c == '#' else 0 for c in rawalgo], dtype=np.int8)\nimg = np.array([[1 if c == '#' else 0 for c in line] for line in rawimg.split('\\n')], dtype=np.int8)\n\ndef enhance(img, algo):\n img = np.pad(img, 2, 'edge')\n new = np.copy(img)\n for i in range(1, img.shape[0] - 1):\n for j in range(1, img.shape[1] - 1):\n values = img[i-1:i+2,j-1:j+2].flatten()\n index = (values * 2**np.arange(9)[::-1]).sum()\n new[i,j] = algo[index]\n return new[1:-1,1:-1]\n\nimg = np.pad(img, 1)\nfor _ in range(2):\n img = enhance(img, algo)\n\nprint(\"Result:\", img.sum())\n\n"
] | [
[
"numpy.array",
"numpy.pad",
"numpy.arange",
"numpy.copy"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.