repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
LeftThink/pytorch-lighthead
[ "5f4bf1c87b9be77bf7242ad89900239a9d66914c" ]
[ "lib/datasets/adas.py" ]
[ "# coding: utf-8\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import print_function\n\nimport xml.dom.minidom as minidom\n\nimport os\n# import PIL\nimport numpy as np\nimport scipy.sparse\nimport subprocess\n\ntry:\n import cPickle\nexcept ImportError:\n import pickle as cPickle\n\nimport math\nimport glob\nimport uuid\nimport scipy.io as sio\nimport xml.etree.ElementTree as ET\n\nfrom .imdb import imdb\nfrom .imdb import ROOT_DIR\nfrom . import ds_utils\nfrom .adas_eval import adas_eval\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom model.utils.config import cfg\n\n\n# <<<< obsolete\n\n\nclass adas(imdb):\n def __init__(self, image_set, year, devkit_path=None, sub_type='car'):\n imdb.__init__(self, 'adas_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path, 'ADAS' + self._year)\n\n if sub_type == 'car':\n self._classes = ('__background__', #always index 0\n 'car',)\n elif sub_type == 'tired':\n self._classes = ('__background__', #always index 0\n 'o','s','w')\n\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n\n self._image_ext = '.jpg'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n # self._roidb_handler = self.selective_search_roidb\n self._roidb_handler = self.gt_roidb\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp4'\n\n # PASCAL specific config options\n self.config = {'cleanup': True,\n 'use_salt': True,\n 'use_diff': False,\n 'matlab_eval': False,\n 'rpn_file': None,\n 'min_size': 2}\n\n assert os.path.exists(self._devkit_path), \\\n 'ADASdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_id_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return i\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /ADASdevkit2007/ADAS2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL ADAS is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'ADASdevkit' + self._year)\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n print(cache_file)\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = cPickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_selective_search_roidb(self, gt_roidb):\n filename = os.path.abspath(os.path.join(cfg.DATA_DIR,\n 'selective_search_data',\n self.name + '.mat'))\n assert os.path.exists(filename), \\\n 'Selective search data not found at: {}'.format(filename)\n raw_data = sio.loadmat(filename)['boxes'].ravel()\n\n box_list = []\n for i in range(raw_data.shape[0]):\n boxes = raw_data[i][:, (1, 0, 3, 2)] - 1\n keep = ds_utils.unique_boxes(boxes)\n boxes = boxes[keep, :]\n keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])\n boxes = boxes[keep, :]\n box_list.append(boxes)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL ADAS\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n # if not self.config['use_diff']:\n # # Exclude the samples labeled as difficult\n # non_diff_objs = [\n # obj for obj in objs if int(obj.find('difficult').text) == 0]\n # # if len(non_diff_objs) != len(objs):\n # # print 'Removed {} difficult objects'.format(\n # # len(objs) - len(non_diff_objs))\n # objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n ishards = np.zeros((num_objs), dtype=np.int32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n \n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n ishards[ix] = difficult\n\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n \n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_ishard': ishards,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_adas_results_file_template(self):\n # ADASdevkit/results/ADAS2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n filedir = os.path.join(self._devkit_path, 'results', 'ADAS' + self._year, 'Main')\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_adas_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} ADAS results file'.format(cls))\n filename = self._get_adas_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the ADASdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._devkit_path,\n 'ADAS' + self._year,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'ADAS' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_adas_results_file_template().format(cls)\n rec, prec, ap = adas_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:\n cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'ADASdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'adas_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print('Running:\\n{}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_adas_results_file(all_boxes)\n self._do_python_eval(output_dir)\n if self.config['matlab_eval']:\n self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_adas_results_file_template().format(cls)\n os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n d = adas('trainval', '2017')\n res = d.roidb\n from IPython import embed;\n\n embed()\n" ]
[ [ "scipy.io.loadmat", "numpy.zeros", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mehulfollytobevice/MachineLearning
[ "7d442907df4e8560bf5067d8bac660a3cb303393" ]
[ "K-NN Classification/KNN Classification from scratch/knn_from_scratch.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 9 21:03:57 2020\r\n\r\n@author: Mehul\r\n\"\"\"\r\n\r\n#importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport random\r\nimport warnings\r\nfrom matplotlib import style\r\nfrom collections import Counter\r\nfrom math import sqrt\r\nstyle.use('fivethirtyeight')\r\n\r\n#defining knn function\r\ndef k_nearest_neighbors(data,predict,k=3):\r\n\tdistances=[]\r\n\tif(len(data)>=k):\r\n\t\t#this is not an error it is just a warning , the algorithm still works \r\n\t\twarnings.warn('The value of k is less than the number of voting groups.')\r\n \r\n\tfor group in data:\r\n\t\t#data is a dictionary of lists with different groups of classes \r\n\t\tfor features in data[group]:\r\n\t\t\t#features represent the points in the dataset\r\n\t\t\t\r\n\t\t\t#original way\r\n\t\t\t#euclidean_distance=sqrt((features[0]-predict[0])**2+(features[1]-predict[1])**2)\r\n\t\t\t\r\n\t\t\t#faster way\r\n\t\t\teuclidean_distance=np.linalg.norm(np.array(features)-np.array(predict))\r\n\t\t\tdistances.append([euclidean_distance,group])\r\n\t\r\n\t#once we have the distances we dont care about them\r\n\t#we populate the list of votes which has the top k neighbors to the prediction point \r\n\tvotes=[i[1] for i in sorted(distances)[:k] ]\r\n\t#using counter we calculate the most common out of the nearest neighbors\r\n\tvote_result=Counter(votes).most_common(1)[0][0]\r\n\t\r\n\t#we can also give our confidence,confidence is the probability of your prediction being right\r\n\t#confidence=Counter(votes).most_common(1)[0][1]/k\r\n\t\r\n\treturn vote_result\r\n\r\ndef accuracy_of_result(train_set,test_set):\r\n\t#intialising \r\n\tcorrect=0\r\n\ttotal=0\r\n\t\r\n\t#testing and finding accuracy\r\n\tfor group in test_set:\r\n\t\tfor data in test_set[group]:\r\n\t\t\t#iterating through all the data in a class \r\n\t\t\tresult=k_nearest_neighbors(train_set,data,k=5)\r\n\t\t\tif (group==result):\r\n\t\t\t\tcorrect=correct+1\r\n\t\t\ttotal=total+1\r\n\taccuracy=correct/total\r\n\treturn accuracy\r\n\r\n''''\r\n#trial data\r\n#our data is in form of dictionary of lists\r\ndataset={'k':[[1,2],[2,3,],[3,1]],'r':[[6,5],[7,7],[8,6]]}\r\nnew_features=[5,7]\r\n\r\n#plotting the data\r\nplt.scatter(new_features[0],new_features[1],s=50)\r\nfor i in dataset:\r\n\tfor j in dataset[i]:\r\n\t\tprint(j)\r\n\t\tplt.scatter(j[0],j[1],s=100,color=i)\r\n\r\n#applying knn model\r\nresult=k_nearest_neighbors(dataset,new_features,k=3)#result represents the class the prediction point belongs to \r\n\r\n#plotting the prediction\r\nplt.scatter(new_features[0],new_features[1],s=50,color=result)\r\nfor i in dataset:\r\n\tfor j in dataset[i]:\r\n\t\tprint(j)\r\n\t\tplt.scatter(j[0],j[1],s=100,color=i)\r\n'''\r\n\r\n#Implmenting the model on the test dataset\r\n\r\n#importing the dataset\r\ndataset=pd.read_csv('breast-cancer-wisconsin.data.txt')\r\n\r\n#replacing missing instances with large numbers \r\ndataset.replace('?',-99999,inplace=True)\r\ndataset.drop(['id'],1,inplace=True)\r\ndataset=dataset.astype(float).values.tolist()\r\n\r\n#shuffling to data to include some randomness\r\n#this does not change the raltionship between the data\r\n#this is what can be used for cross-validation \r\nrandom.shuffle(dataset)\r\n\r\n#splitting the dataset into test set and train set\r\ntest_size=0.2\r\n\r\n#the train set and the test set are dictionary of lists\r\ntrain_set={2:[],4:[]}\r\ntest_set={2:[],4:[]}\r\n\r\n#slicing the data into train_data and test_data\r\ntrain_data=dataset[:-int(test_size*len(dataset))] #all the data upto the last 20%\r\ntest_data=dataset[-int(test_size*len(dataset)):] #the last 20%\r\n\r\n#populating the dictionary\r\n#here we take the data from the train_data and the test_data and use it to populate our dictionaries\r\n\r\nfor i in train_data:\r\n\ttrain_set[i[-1]].append(i[:-1])# i[-1] represents the class of the particular row\r\n\r\nfor i in test_data:\r\n\ttest_set[i[-1]].append(i[:-1])# i[-1] represents the class of the particular row\r\n\r\n#getting the accuracy of our knn model on the dataset\r\nprint('Accuracy of the result:',accuracy_of_result(train_set,test_set))" ]
[ [ "numpy.array", "pandas.read_csv", "matplotlib.style.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
WonMian/coach
[ "67978248927f24ee09df6f1df842a14103aaf11b" ]
[ "rl_coach/agents/actor_critic_agent.py" ]
[ "#\n# Copyright (c) 2017 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom typing import Union\n\nimport numpy as np\nimport scipy.signal\n\nfrom rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler\nfrom rl_coach.architectures.tensorflow_components.heads.policy_head import PolicyHeadParameters\nfrom rl_coach.architectures.tensorflow_components.heads.v_head import VHeadParameters\nfrom rl_coach.architectures.tensorflow_components.middlewares.fc_middleware import FCMiddlewareParameters\nfrom rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \\\n AgentParameters\nfrom rl_coach.logger import screen\nfrom rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters\nfrom rl_coach.spaces import DiscreteActionSpace\nfrom rl_coach.utils import last_sample\nfrom rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedderParameters\n\n\nclass ActorCriticAlgorithmParameters(AlgorithmParameters):\n def __init__(self):\n super().__init__()\n self.policy_gradient_rescaler = PolicyGradientRescaler.A_VALUE\n self.apply_gradients_every_x_episodes = 5\n self.beta_entropy = 0\n self.num_steps_between_gradient_updates = 5000 # this is called t_max in all the papers\n self.gae_lambda = 0.96\n self.estimate_state_value_using_gae = False\n\n\nclass ActorCriticNetworkParameters(NetworkParameters):\n def __init__(self):\n super().__init__()\n self.input_embedders_parameters = {'observation': InputEmbedderParameters()}\n self.middleware_parameters = FCMiddlewareParameters()\n self.heads_parameters = [VHeadParameters(), PolicyHeadParameters()]\n self.loss_weights = [0.5, 1.0]\n self.rescale_gradient_from_head_by_factor = [1, 1]\n self.optimizer_type = 'Adam'\n self.clip_gradients = 40.0\n self.async_training = True\n\n\nclass ActorCriticAgentParameters(AgentParameters):\n def __init__(self):\n super().__init__(algorithm=ActorCriticAlgorithmParameters(),\n exploration=None, #TODO this should be different for continuous (ContinuousEntropyExploration)\n # and discrete (CategoricalExploration) action spaces.\n memory=SingleEpisodeBufferParameters(),\n networks={\"main\": ActorCriticNetworkParameters()})\n\n @property\n def path(self):\n return 'rl_coach.agents.actor_critic_agent:ActorCriticAgent'\n\n\n# Actor Critic - https://arxiv.org/abs/1602.01783\nclass ActorCriticAgent(PolicyOptimizationAgent):\n def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):\n super().__init__(agent_parameters, parent)\n self.last_gradient_update_step_idx = 0\n self.action_advantages = self.register_signal('Advantages')\n self.state_values = self.register_signal('Values')\n self.value_loss = self.register_signal('Value Loss')\n self.policy_loss = self.register_signal('Policy Loss')\n\n # Discounting function used to calculate discounted returns.\n def discount(self, x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n def get_general_advantage_estimation_values(self, rewards, values):\n # values contain n+1 elements (t ... t+n+1), rewards contain n elements (t ... t + n)\n bootstrap_extended_rewards = np.array(rewards.tolist() + [values[-1]])\n\n # Approximation based calculation of GAE (mathematically correct only when Tmax = inf,\n # although in practice works even in much smaller Tmax values, e.g. 20)\n deltas = rewards + self.ap.algorithm.discount * values[1:] - values[:-1]\n gae = self.discount(deltas, self.ap.algorithm.discount * self.ap.algorithm.gae_lambda)\n\n if self.ap.algorithm.estimate_state_value_using_gae:\n discounted_returns = np.expand_dims(gae + values[:-1], -1)\n else:\n discounted_returns = np.expand_dims(np.array(self.discount(bootstrap_extended_rewards,\n self.ap.algorithm.discount)), 1)[:-1]\n return gae, discounted_returns\n\n def learn_from_batch(self, batch):\n # batch contains a list of episodes to learn from\n network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()\n\n # get the values for the current states\n\n result = self.networks['main'].online_network.predict(batch.states(network_keys))\n current_state_values = result[0]\n\n self.state_values.add_sample(current_state_values)\n\n # the targets for the state value estimator\n num_transitions = batch.size\n state_value_head_targets = np.zeros((num_transitions, 1))\n\n # estimate the advantage function\n action_advantages = np.zeros((num_transitions, 1))\n\n if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:\n if batch.game_overs()[-1]:\n R = 0\n else:\n R = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]\n\n for i in reversed(range(num_transitions)):\n R = batch.rewards()[i] + self.ap.algorithm.discount * R\n state_value_head_targets[i] = R\n action_advantages[i] = R - current_state_values[i]\n\n elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:\n # get bootstraps\n bootstrapped_value = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]\n values = np.append(current_state_values, bootstrapped_value)\n if batch.game_overs()[-1]:\n values[-1] = 0\n\n # get general discounted returns table\n gae_values, state_value_head_targets = self.get_general_advantage_estimation_values(batch.rewards(), values)\n action_advantages = np.vstack(gae_values)\n else:\n screen.warning(\"WARNING: The requested policy gradient rescaler is not available\")\n\n action_advantages = action_advantages.squeeze(axis=-1)\n actions = batch.actions()\n if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) < 2:\n actions = np.expand_dims(actions, -1)\n\n # train\n result = self.networks['main'].online_network.accumulate_gradients({**batch.states(network_keys),\n 'output_1_0': actions},\n [state_value_head_targets, action_advantages])\n\n # logging\n total_loss, losses, unclipped_grads = result[:3]\n self.action_advantages.add_sample(action_advantages)\n self.unclipped_grads.add_sample(unclipped_grads)\n self.value_loss.add_sample(losses[0])\n self.policy_loss.add_sample(losses[1])\n\n return total_loss, losses, unclipped_grads\n\n def get_prediction(self, states):\n tf_input_state = self.prepare_batch_for_inference(states, \"main\")\n return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value\n" ]
[ [ "numpy.append", "numpy.expand_dims", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kuantan/pandas
[ "e18921eb0cc86f71c84a4aa0bd6d0c1b7de89def", "e18921eb0cc86f71c84a4aa0bd6d0c1b7de89def" ]
[ "pandas/io/parquet.py", "pandas/tests/io/test_common.py" ]
[ "\"\"\" parquet compat \"\"\"\nfrom __future__ import annotations\n\nimport io\nimport os\nfrom typing import Any\nfrom warnings import catch_warnings\n\nfrom pandas._typing import (\n FilePath,\n ReadBuffer,\n StorageOptions,\n WriteBuffer,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import doc\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n get_option,\n)\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.util.version import Version\n\nfrom pandas.io.common import (\n IOHandles,\n get_handle,\n is_fsspec_url,\n is_url,\n stringify_path,\n)\n\n\ndef get_engine(engine: str) -> BaseImpl:\n \"\"\"return our implementation\"\"\"\n if engine == \"auto\":\n engine = get_option(\"io.parquet.engine\")\n\n if engine == \"auto\":\n # try engines in this order\n engine_classes = [PyArrowImpl, FastParquetImpl]\n\n error_msgs = \"\"\n for engine_class in engine_classes:\n try:\n return engine_class()\n except ImportError as err:\n error_msgs += \"\\n - \" + str(err)\n\n raise ImportError(\n \"Unable to find a usable engine; \"\n \"tried using: 'pyarrow', 'fastparquet'.\\n\"\n \"A suitable version of \"\n \"pyarrow or fastparquet is required for parquet \"\n \"support.\\n\"\n \"Trying to import the above resulted in these errors:\"\n f\"{error_msgs}\"\n )\n\n if engine == \"pyarrow\":\n return PyArrowImpl()\n elif engine == \"fastparquet\":\n return FastParquetImpl()\n\n raise ValueError(\"engine must be one of 'pyarrow', 'fastparquet'\")\n\n\ndef _get_path_or_handle(\n path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],\n fs: Any,\n storage_options: StorageOptions = None,\n mode: str = \"rb\",\n is_dir: bool = False,\n) -> tuple[\n FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any\n]:\n \"\"\"File handling for PyArrow.\"\"\"\n path_or_handle = stringify_path(path)\n if is_fsspec_url(path_or_handle) and fs is None:\n fsspec = import_optional_dependency(\"fsspec\")\n\n fs, path_or_handle = fsspec.core.url_to_fs(\n path_or_handle, **(storage_options or {})\n )\n elif storage_options and (not is_url(path_or_handle) or mode != \"rb\"):\n # can't write to a remote url\n # without making use of fsspec at the moment\n raise ValueError(\"storage_options passed with buffer, or non-supported URL\")\n\n handles = None\n if (\n not fs\n and not is_dir\n and isinstance(path_or_handle, str)\n and not os.path.isdir(path_or_handle)\n ):\n # use get_handle only when we are very certain that it is not a directory\n # fsspec resources can also point to directories\n # this branch is used for example when reading from non-fsspec URLs\n handles = get_handle(\n path_or_handle, mode, is_text=False, storage_options=storage_options\n )\n fs = None\n path_or_handle = handles.handle\n return path_or_handle, handles, fs\n\n\nclass BaseImpl:\n @staticmethod\n def validate_dataframe(df: DataFrame):\n\n if not isinstance(df, DataFrame):\n raise ValueError(\"to_parquet only supports IO with DataFrames\")\n\n # must have value column names for all index levels (strings only)\n if isinstance(df.columns, MultiIndex):\n if not all(\n x.inferred_type in {\"string\", \"empty\"} for x in df.columns.levels\n ):\n raise ValueError(\n \"\"\"\n parquet must have string column names for all values in\n each level of the MultiIndex\n \"\"\"\n )\n else:\n if df.columns.inferred_type not in {\"string\", \"empty\"}:\n raise ValueError(\"parquet must have string column names\")\n\n # index level names must be strings\n valid_names = all(\n isinstance(name, str) for name in df.index.names if name is not None\n )\n if not valid_names:\n raise ValueError(\"Index level names must be strings\")\n\n def write(self, df: DataFrame, path, compression, **kwargs):\n raise AbstractMethodError(self)\n\n def read(self, path, columns=None, **kwargs):\n raise AbstractMethodError(self)\n\n\nclass PyArrowImpl(BaseImpl):\n def __init__(self):\n import_optional_dependency(\n \"pyarrow\", extra=\"pyarrow is required for parquet support.\"\n )\n import pyarrow.parquet\n\n # import utils to register the pyarrow extension types\n import pandas.core.arrays._arrow_utils # noqa:F401\n\n self.api = pyarrow\n\n def write(\n self,\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes],\n compression: str | None = \"snappy\",\n index: bool | None = None,\n storage_options: StorageOptions = None,\n partition_cols: list[str] | None = None,\n **kwargs,\n ):\n self.validate_dataframe(df)\n\n from_pandas_kwargs: dict[str, Any] = {\"schema\": kwargs.pop(\"schema\", None)}\n if index is not None:\n from_pandas_kwargs[\"preserve_index\"] = index\n\n table = self.api.Table.from_pandas(df, **from_pandas_kwargs)\n\n path_or_handle, handles, kwargs[\"filesystem\"] = _get_path_or_handle(\n path,\n kwargs.pop(\"filesystem\", None),\n storage_options=storage_options,\n mode=\"wb\",\n is_dir=partition_cols is not None,\n )\n try:\n if partition_cols is not None:\n # writes to multiple files under the given path\n self.api.parquet.write_to_dataset(\n table,\n path_or_handle,\n compression=compression,\n partition_cols=partition_cols,\n **kwargs,\n )\n else:\n # write to single output file\n self.api.parquet.write_table(\n table, path_or_handle, compression=compression, **kwargs\n )\n finally:\n if handles is not None:\n handles.close()\n\n def read(\n self,\n path,\n columns=None,\n use_nullable_dtypes=False,\n storage_options: StorageOptions = None,\n **kwargs,\n ):\n kwargs[\"use_pandas_metadata\"] = True\n\n to_pandas_kwargs = {}\n if use_nullable_dtypes:\n import pandas as pd\n\n mapping = {\n self.api.int8(): pd.Int8Dtype(),\n self.api.int16(): pd.Int16Dtype(),\n self.api.int32(): pd.Int32Dtype(),\n self.api.int64(): pd.Int64Dtype(),\n self.api.uint8(): pd.UInt8Dtype(),\n self.api.uint16(): pd.UInt16Dtype(),\n self.api.uint32(): pd.UInt32Dtype(),\n self.api.uint64(): pd.UInt64Dtype(),\n self.api.bool_(): pd.BooleanDtype(),\n self.api.string(): pd.StringDtype(),\n }\n to_pandas_kwargs[\"types_mapper\"] = mapping.get\n manager = get_option(\"mode.data_manager\")\n if manager == \"array\":\n to_pandas_kwargs[\"split_blocks\"] = True # type: ignore[assignment]\n\n path_or_handle, handles, kwargs[\"filesystem\"] = _get_path_or_handle(\n path,\n kwargs.pop(\"filesystem\", None),\n storage_options=storage_options,\n mode=\"rb\",\n )\n try:\n result = self.api.parquet.read_table(\n path_or_handle, columns=columns, **kwargs\n ).to_pandas(**to_pandas_kwargs)\n if manager == \"array\":\n result = result._as_manager(\"array\", copy=False)\n return result\n finally:\n if handles is not None:\n handles.close()\n\n\nclass FastParquetImpl(BaseImpl):\n def __init__(self):\n # since pandas is a dependency of fastparquet\n # we need to import on first use\n fastparquet = import_optional_dependency(\n \"fastparquet\", extra=\"fastparquet is required for parquet support.\"\n )\n self.api = fastparquet\n\n def write(\n self,\n df: DataFrame,\n path,\n compression=\"snappy\",\n index=None,\n partition_cols=None,\n storage_options: StorageOptions = None,\n **kwargs,\n ):\n self.validate_dataframe(df)\n # thriftpy/protocol/compact.py:339:\n # DeprecationWarning: tostring() is deprecated.\n # Use tobytes() instead.\n\n if \"partition_on\" in kwargs and partition_cols is not None:\n raise ValueError(\n \"Cannot use both partition_on and \"\n \"partition_cols. Use partition_cols for partitioning data\"\n )\n elif \"partition_on\" in kwargs:\n partition_cols = kwargs.pop(\"partition_on\")\n\n if partition_cols is not None:\n kwargs[\"file_scheme\"] = \"hive\"\n\n # cannot use get_handle as write() does not accept file buffers\n path = stringify_path(path)\n if is_fsspec_url(path):\n fsspec = import_optional_dependency(\"fsspec\")\n\n # if filesystem is provided by fsspec, file must be opened in 'wb' mode.\n kwargs[\"open_with\"] = lambda path, _: fsspec.open(\n path, \"wb\", **(storage_options or {})\n ).open()\n elif storage_options:\n raise ValueError(\n \"storage_options passed with file object or non-fsspec file path\"\n )\n\n with catch_warnings(record=True):\n self.api.write(\n path,\n df,\n compression=compression,\n write_index=index,\n partition_on=partition_cols,\n **kwargs,\n )\n\n def read(\n self, path, columns=None, storage_options: StorageOptions = None, **kwargs\n ):\n parquet_kwargs: dict[str, Any] = {}\n use_nullable_dtypes = kwargs.pop(\"use_nullable_dtypes\", False)\n if Version(self.api.__version__) >= Version(\"0.7.1\"):\n # We are disabling nullable dtypes for fastparquet pending discussion\n parquet_kwargs[\"pandas_nulls\"] = False\n if use_nullable_dtypes:\n raise ValueError(\n \"The 'use_nullable_dtypes' argument is not supported for the \"\n \"fastparquet engine\"\n )\n path = stringify_path(path)\n handles = None\n if is_fsspec_url(path):\n fsspec = import_optional_dependency(\"fsspec\")\n\n if Version(self.api.__version__) > Version(\"0.6.1\"):\n parquet_kwargs[\"fs\"] = fsspec.open(\n path, \"rb\", **(storage_options or {})\n ).fs\n else:\n parquet_kwargs[\"open_with\"] = lambda path, _: fsspec.open(\n path, \"rb\", **(storage_options or {})\n ).open()\n elif isinstance(path, str) and not os.path.isdir(path):\n # use get_handle only when we are very certain that it is not a directory\n # fsspec resources can also point to directories\n # this branch is used for example when reading from non-fsspec URLs\n handles = get_handle(\n path, \"rb\", is_text=False, storage_options=storage_options\n )\n path = handles.handle\n\n parquet_file = self.api.ParquetFile(path, **parquet_kwargs)\n\n result = parquet_file.to_pandas(columns=columns, **kwargs)\n\n if handles is not None:\n handles.close()\n return result\n\n\n@doc(storage_options=_shared_docs[\"storage_options\"])\ndef to_parquet(\n df: DataFrame,\n path: FilePath | WriteBuffer[bytes] | None = None,\n engine: str = \"auto\",\n compression: str | None = \"snappy\",\n index: bool | None = None,\n storage_options: StorageOptions = None,\n partition_cols: list[str] | None = None,\n **kwargs,\n) -> bytes | None:\n \"\"\"\n Write a DataFrame to the parquet format.\n\n Parameters\n ----------\n df : DataFrame\n path : str, path object, file-like object, or None, default None\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``write()`` function. If None, the result is\n returned as bytes. If a string, it will be used as Root Directory path\n when writing a partitioned dataset. The engine fastparquet does not\n accept file-like objects.\n\n .. versionchanged:: 1.2.0\n\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},\n default 'snappy'. Name of the compression to use. Use ``None``\n for no compression. The supported compression methods actually\n depend on which engine is used. For 'pyarrow', 'snappy', 'gzip',\n 'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet',\n only 'gzip' and 'snappy' are supported.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output. If\n ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n partition_cols : str or list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n kwargs\n Additional keyword arguments passed to the engine\n\n Returns\n -------\n bytes if no path argument is provided else None\n \"\"\"\n if isinstance(partition_cols, str):\n partition_cols = [partition_cols]\n impl = get_engine(engine)\n\n path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path\n\n impl.write(\n df,\n path_or_buf,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n **kwargs,\n )\n\n if path is None:\n assert isinstance(path_or_buf, io.BytesIO)\n return path_or_buf.getvalue()\n else:\n return None\n\n\n@doc(storage_options=_shared_docs[\"storage_options\"])\ndef read_parquet(\n path,\n engine: str = \"auto\",\n columns=None,\n storage_options: StorageOptions = None,\n use_nullable_dtypes: bool = False,\n **kwargs,\n):\n \"\"\"\n Load a parquet object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : str, path object or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``read()`` function.\n The string could be a URL. Valid URL schemes include http, ftp, s3,\n gs, and file. For file URLs, a host is expected. A local file could be:\n ``file://localhost/path/to/table.parquet``.\n A file URL can also be a path to a directory that contains multiple\n partitioned parquet files. Both pyarrow and fastparquet support\n paths to directories as well as file URLs. A directory path could be:\n ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n columns : list, default=None\n If not None, only these columns will be read from the file.\n\n {storage_options}\n\n .. versionadded:: 1.3.0\n\n use_nullable_dtypes : bool, default False\n If True, use dtypes that use ``pd.NA`` as missing value indicator\n for the resulting DataFrame. (only applicable for the ``pyarrow``\n engine)\n As new dtypes are added that support ``pd.NA`` in the future, the\n output with this option will change to use those dtypes.\n Note: this is an experimental option, and behaviour (e.g. additional\n support dtypes) may change without notice.\n\n .. versionadded:: 1.2.0\n\n **kwargs\n Any additional kwargs are passed to the engine.\n\n Returns\n -------\n DataFrame\n \"\"\"\n impl = get_engine(engine)\n\n return impl.read(\n path,\n columns=columns,\n storage_options=storage_options,\n use_nullable_dtypes=use_nullable_dtypes,\n **kwargs,\n )\n", "\"\"\"\nTests for the pandas.io.common functionalities\n\"\"\"\nimport codecs\nimport errno\nfrom functools import partial\nfrom io import (\n BytesIO,\n StringIO,\n UnsupportedOperation,\n)\nimport mmap\nimport os\nfrom pathlib import Path\nimport tempfile\n\nimport pytest\n\nfrom pandas.compat import is_platform_windows\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\n\nimport pandas.io.common as icom\n\n\nclass CustomFSPath:\n \"\"\"For testing fspath on unknown objects\"\"\"\n\n def __init__(self, path):\n self.path = path\n\n def __fspath__(self):\n return self.path\n\n\n# Functions that consume a string path and return a string or path-like object\npath_types = [str, CustomFSPath, Path]\n\ntry:\n from py.path import local as LocalPath\n\n path_types.append(LocalPath)\nexcept ImportError:\n pass\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\n# https://github.com/cython/cython/issues/1720\[email protected](\"ignore:can't resolve package:ImportWarning\")\nclass TestCommonIOCapabilities:\n data1 = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n\n def test_expand_user(self):\n filename = \"~/sometest\"\n expanded_name = icom._expand_user(filename)\n\n assert expanded_name != filename\n assert os.path.isabs(expanded_name)\n assert os.path.expanduser(filename) == expanded_name\n\n def test_expand_user_normal_path(self):\n filename = \"/somefolder/sometest\"\n expanded_name = icom._expand_user(filename)\n\n assert expanded_name == filename\n assert os.path.expanduser(filename) == expanded_name\n\n def test_stringify_path_pathlib(self):\n rel_path = icom.stringify_path(Path(\".\"))\n assert rel_path == \".\"\n redundant_path = icom.stringify_path(Path(\"foo//bar\"))\n assert redundant_path == os.path.join(\"foo\", \"bar\")\n\n @td.skip_if_no(\"py.path\")\n def test_stringify_path_localpath(self):\n path = os.path.join(\"foo\", \"bar\")\n abs_path = os.path.abspath(path)\n lpath = LocalPath(path)\n assert icom.stringify_path(lpath) == abs_path\n\n def test_stringify_path_fspath(self):\n p = CustomFSPath(\"foo/bar.csv\")\n result = icom.stringify_path(p)\n assert result == \"foo/bar.csv\"\n\n def test_stringify_file_and_path_like(self):\n # GH 38125: do not stringify file objects that are also path-like\n fsspec = pytest.importorskip(\"fsspec\")\n with tm.ensure_clean() as path:\n with fsspec.open(f\"file://{path}\", mode=\"wb\") as fsspec_obj:\n assert fsspec_obj == icom.stringify_path(fsspec_obj)\n\n @pytest.mark.parametrize(\"path_type\", path_types)\n def test_infer_compression_from_path(self, compression_format, path_type):\n extension, expected = compression_format\n path = path_type(\"foo/bar.csv\" + extension)\n compression = icom.infer_compression(path, compression=\"infer\")\n assert compression == expected\n\n @pytest.mark.parametrize(\"path_type\", [str, CustomFSPath, Path])\n def test_get_handle_with_path(self, path_type):\n # ignore LocalPath: it creates strange paths: /absolute/~/sometest\n with tempfile.TemporaryDirectory(dir=Path.home()) as tmp:\n filename = path_type(\"~/\" + Path(tmp).name + \"/sometest\")\n with icom.get_handle(filename, \"w\") as handles:\n assert Path(handles.handle.name).is_absolute()\n assert os.path.expanduser(filename) == handles.handle.name\n\n def test_get_handle_with_buffer(self):\n input_buffer = StringIO()\n with icom.get_handle(input_buffer, \"r\") as handles:\n assert handles.handle == input_buffer\n assert not input_buffer.closed\n input_buffer.close()\n\n # Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time\n def test_bytesiowrapper_returns_correct_bytes(self):\n # Test latin1, ucs-2, and ucs-4 chars\n data = \"\"\"a,b,c\n1,2,3\n©,®,®\nLook,a snake,🐍\"\"\"\n with icom.get_handle(StringIO(data), \"rb\", is_text=False) as handles:\n result = b\"\"\n chunksize = 5\n while True:\n chunk = handles.handle.read(chunksize)\n # Make sure each chunk is correct amount of bytes\n assert len(chunk) <= chunksize\n if len(chunk) < chunksize:\n # Can be less amount of bytes, but only at EOF\n # which happens when read returns empty\n assert len(handles.handle.read()) == 0\n result += chunk\n break\n result += chunk\n assert result == data.encode(\"utf-8\")\n\n # Test that pyarrow can handle a file opened with get_handle\n @td.skip_if_no(\"pyarrow\", min_version=\"0.15.0\")\n def test_get_handle_pyarrow_compat(self):\n from pyarrow import csv\n\n # Test latin1, ucs-2, and ucs-4 chars\n data = \"\"\"a,b,c\n1,2,3\n©,®,®\nLook,a snake,🐍\"\"\"\n expected = pd.DataFrame(\n {\"a\": [\"1\", \"©\", \"Look\"], \"b\": [\"2\", \"®\", \"a snake\"], \"c\": [\"3\", \"®\", \"🐍\"]}\n )\n s = StringIO(data)\n with icom.get_handle(s, \"rb\", is_text=False) as handles:\n df = csv.read_csv(handles.handle).to_pandas()\n tm.assert_frame_equal(df, expected)\n assert not s.closed\n\n def test_iterator(self):\n with pd.read_csv(StringIO(self.data1), chunksize=1) as reader:\n result = pd.concat(reader, ignore_index=True)\n expected = pd.read_csv(StringIO(self.data1))\n tm.assert_frame_equal(result, expected)\n\n # GH12153\n with pd.read_csv(StringIO(self.data1), chunksize=1) as it:\n first = next(it)\n tm.assert_frame_equal(first, expected.iloc[[0]])\n tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])\n\n @pytest.mark.parametrize(\n \"reader, module, error_class, fn_ext\",\n [\n (pd.read_csv, \"os\", FileNotFoundError, \"csv\"),\n (pd.read_fwf, \"os\", FileNotFoundError, \"txt\"),\n (pd.read_excel, \"xlrd\", FileNotFoundError, \"xlsx\"),\n (pd.read_feather, \"pyarrow\", OSError, \"feather\"),\n (pd.read_hdf, \"tables\", FileNotFoundError, \"h5\"),\n (pd.read_stata, \"os\", FileNotFoundError, \"dta\"),\n (pd.read_sas, \"os\", FileNotFoundError, \"sas7bdat\"),\n (pd.read_json, \"os\", ValueError, \"json\"),\n (pd.read_pickle, \"os\", FileNotFoundError, \"pickle\"),\n ],\n )\n def test_read_non_existent(self, reader, module, error_class, fn_ext):\n pytest.importorskip(module)\n\n path = os.path.join(HERE, \"data\", \"does_not_exist.\" + fn_ext)\n msg1 = fr\"File (b')?.+does_not_exist\\.{fn_ext}'? does not exist\"\n msg2 = fr\"\\[Errno 2\\] No such file or directory: '.+does_not_exist\\.{fn_ext}'\"\n msg3 = \"Expected object or value\"\n msg4 = \"path_or_buf needs to be a string file path or file-like\"\n msg5 = (\n fr\"\\[Errno 2\\] File .+does_not_exist\\.{fn_ext} does not exist: \"\n fr\"'.+does_not_exist\\.{fn_ext}'\"\n )\n msg6 = fr\"\\[Errno 2\\] 没有那个文件或目录: '.+does_not_exist\\.{fn_ext}'\"\n msg7 = (\n fr\"\\[Errno 2\\] File o directory non esistente: '.+does_not_exist\\.{fn_ext}'\"\n )\n msg8 = fr\"Failed to open local file.+does_not_exist\\.{fn_ext}\"\n\n with pytest.raises(\n error_class,\n match=fr\"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})\",\n ):\n reader(path)\n\n @pytest.mark.parametrize(\n \"method, module, error_class, fn_ext\",\n [\n (pd.DataFrame.to_csv, \"os\", OSError, \"csv\"),\n (pd.DataFrame.to_html, \"os\", OSError, \"html\"),\n (pd.DataFrame.to_excel, \"xlrd\", OSError, \"xlsx\"),\n (pd.DataFrame.to_feather, \"pyarrow\", OSError, \"feather\"),\n (pd.DataFrame.to_parquet, \"pyarrow\", OSError, \"parquet\"),\n (pd.DataFrame.to_stata, \"os\", OSError, \"dta\"),\n (pd.DataFrame.to_json, \"os\", OSError, \"json\"),\n (pd.DataFrame.to_pickle, \"os\", OSError, \"pickle\"),\n ],\n )\n # NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables\n def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):\n pytest.importorskip(module)\n\n dummy_frame = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4], \"c\": [3, 4, 5]})\n\n path = os.path.join(HERE, \"data\", \"missing_folder\", \"does_not_exist.\" + fn_ext)\n\n with pytest.raises(\n error_class,\n match=r\"Cannot save file into a non-existent directory: .*missing_folder\",\n ):\n method(dummy_frame, path)\n\n @pytest.mark.parametrize(\n \"reader, module, error_class, fn_ext\",\n [\n (pd.read_csv, \"os\", FileNotFoundError, \"csv\"),\n (pd.read_table, \"os\", FileNotFoundError, \"csv\"),\n (pd.read_fwf, \"os\", FileNotFoundError, \"txt\"),\n (pd.read_excel, \"xlrd\", FileNotFoundError, \"xlsx\"),\n (pd.read_feather, \"pyarrow\", OSError, \"feather\"),\n (pd.read_hdf, \"tables\", FileNotFoundError, \"h5\"),\n (pd.read_stata, \"os\", FileNotFoundError, \"dta\"),\n (pd.read_sas, \"os\", FileNotFoundError, \"sas7bdat\"),\n (pd.read_json, \"os\", ValueError, \"json\"),\n (pd.read_pickle, \"os\", FileNotFoundError, \"pickle\"),\n ],\n )\n def test_read_expands_user_home_dir(\n self, reader, module, error_class, fn_ext, monkeypatch\n ):\n pytest.importorskip(module)\n\n path = os.path.join(\"~\", \"does_not_exist.\" + fn_ext)\n monkeypatch.setattr(icom, \"_expand_user\", lambda x: os.path.join(\"foo\", x))\n\n msg1 = fr\"File (b')?.+does_not_exist\\.{fn_ext}'? does not exist\"\n msg2 = fr\"\\[Errno 2\\] No such file or directory: '.+does_not_exist\\.{fn_ext}'\"\n msg3 = \"Unexpected character found when decoding 'false'\"\n msg4 = \"path_or_buf needs to be a string file path or file-like\"\n msg5 = (\n fr\"\\[Errno 2\\] File .+does_not_exist\\.{fn_ext} does not exist: \"\n fr\"'.+does_not_exist\\.{fn_ext}'\"\n )\n msg6 = fr\"\\[Errno 2\\] 没有那个文件或目录: '.+does_not_exist\\.{fn_ext}'\"\n msg7 = (\n fr\"\\[Errno 2\\] File o directory non esistente: '.+does_not_exist\\.{fn_ext}'\"\n )\n msg8 = fr\"Failed to open local file.+does_not_exist\\.{fn_ext}\"\n\n with pytest.raises(\n error_class,\n match=fr\"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})\",\n ):\n reader(path)\n\n @pytest.mark.parametrize(\n \"reader, module, path\",\n [\n (pd.read_csv, \"os\", (\"io\", \"data\", \"csv\", \"iris.csv\")),\n (pd.read_table, \"os\", (\"io\", \"data\", \"csv\", \"iris.csv\")),\n (\n pd.read_fwf,\n \"os\",\n (\"io\", \"data\", \"fixed_width\", \"fixed_width_format.txt\"),\n ),\n (pd.read_excel, \"xlrd\", (\"io\", \"data\", \"excel\", \"test1.xlsx\")),\n (\n pd.read_feather,\n \"pyarrow\",\n (\"io\", \"data\", \"feather\", \"feather-0_3_1.feather\"),\n ),\n (\n pd.read_hdf,\n \"tables\",\n (\"io\", \"data\", \"legacy_hdf\", \"datetimetz_object.h5\"),\n ),\n (pd.read_stata, \"os\", (\"io\", \"data\", \"stata\", \"stata10_115.dta\")),\n (pd.read_sas, \"os\", (\"io\", \"sas\", \"data\", \"test1.sas7bdat\")),\n (pd.read_json, \"os\", (\"io\", \"json\", \"data\", \"tsframe_v012.json\")),\n (\n pd.read_pickle,\n \"os\",\n (\"io\", \"data\", \"pickle\", \"categorical.0.25.0.pickle\"),\n ),\n ],\n )\n @pytest.mark.filterwarnings(\n \"ignore:CategoricalBlock is deprecated:DeprecationWarning\"\n )\n @pytest.mark.filterwarnings( # pytables np.object usage\n \"ignore:`np.object` is a deprecated alias:DeprecationWarning\"\n )\n def test_read_fspath_all(self, reader, module, path, datapath):\n pytest.importorskip(module)\n path = datapath(*path)\n\n mypath = CustomFSPath(path)\n result = reader(mypath)\n expected = reader(path)\n\n if path.endswith(\".pickle\"):\n # categorical\n tm.assert_categorical_equal(result, expected)\n else:\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\"ignore:In future versions `DataFrame.to_latex`\")\n @pytest.mark.parametrize(\n \"writer_name, writer_kwargs, module\",\n [\n (\"to_csv\", {}, \"os\"),\n (\"to_excel\", {\"engine\": \"xlwt\"}, \"xlwt\"),\n (\"to_feather\", {}, \"pyarrow\"),\n (\"to_html\", {}, \"os\"),\n (\"to_json\", {}, \"os\"),\n (\"to_latex\", {}, \"os\"),\n (\"to_pickle\", {}, \"os\"),\n (\"to_stata\", {\"time_stamp\": pd.to_datetime(\"2019-01-01 00:00\")}, \"os\"),\n ],\n )\n def test_write_fspath_all(self, writer_name, writer_kwargs, module):\n p1 = tm.ensure_clean(\"string\")\n p2 = tm.ensure_clean(\"fspath\")\n df = pd.DataFrame({\"A\": [1, 2]})\n\n with p1 as string, p2 as fspath:\n pytest.importorskip(module)\n mypath = CustomFSPath(fspath)\n writer = getattr(df, writer_name)\n\n writer(string, **writer_kwargs)\n with open(string, \"rb\") as f:\n expected = f.read()\n\n writer(mypath, **writer_kwargs)\n with open(fspath, \"rb\") as f:\n result = f.read()\n\n assert result == expected\n\n @pytest.mark.filterwarnings( # pytables np.object usage\n \"ignore:`np.object` is a deprecated alias:DeprecationWarning\"\n )\n def test_write_fspath_hdf5(self):\n # Same test as write_fspath_all, except HDF5 files aren't\n # necessarily byte-for-byte identical for a given dataframe, so we'll\n # have to read and compare equality\n pytest.importorskip(\"tables\")\n\n df = pd.DataFrame({\"A\": [1, 2]})\n p1 = tm.ensure_clean(\"string\")\n p2 = tm.ensure_clean(\"fspath\")\n\n with p1 as string, p2 as fspath:\n mypath = CustomFSPath(fspath)\n df.to_hdf(mypath, key=\"bar\")\n df.to_hdf(string, key=\"bar\")\n\n result = pd.read_hdf(fspath, key=\"bar\")\n expected = pd.read_hdf(string, key=\"bar\")\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef mmap_file(datapath):\n return datapath(\"io\", \"data\", \"csv\", \"test_mmap.csv\")\n\n\nclass TestMMapWrapper:\n def test_constructor_bad_file(self, mmap_file):\n non_file = StringIO(\"I am not a file\")\n non_file.fileno = lambda: -1\n\n # the error raised is different on Windows\n if is_platform_windows():\n msg = \"The parameter is incorrect\"\n err = OSError\n else:\n msg = \"[Errno 22]\"\n err = mmap.error\n\n with pytest.raises(err, match=msg):\n icom._MMapWrapper(non_file)\n\n target = open(mmap_file)\n target.close()\n\n msg = \"I/O operation on closed file\"\n with pytest.raises(ValueError, match=msg):\n icom._MMapWrapper(target)\n\n def test_get_attr(self, mmap_file):\n with open(mmap_file) as target:\n wrapper = icom._MMapWrapper(target)\n\n attrs = dir(wrapper.mmap)\n attrs = [attr for attr in attrs if not attr.startswith(\"__\")]\n attrs.append(\"__next__\")\n\n for attr in attrs:\n assert hasattr(wrapper, attr)\n\n assert not hasattr(wrapper, \"foo\")\n\n def test_next(self, mmap_file):\n with open(mmap_file) as target:\n wrapper = icom._MMapWrapper(target)\n lines = target.readlines()\n\n for line in lines:\n next_line = next(wrapper)\n assert next_line.strip() == line.strip()\n\n with pytest.raises(StopIteration, match=r\"^$\"):\n next(wrapper)\n\n def test_unknown_engine(self):\n with tm.ensure_clean() as path:\n df = tm.makeDataFrame()\n df.to_csv(path)\n with pytest.raises(ValueError, match=\"Unknown engine\"):\n pd.read_csv(path, engine=\"pyt\")\n\n def test_binary_mode(self):\n \"\"\"\n 'encoding' shouldn't be passed to 'open' in binary mode.\n\n GH 35058\n \"\"\"\n with tm.ensure_clean() as path:\n df = tm.makeDataFrame()\n df.to_csv(path, mode=\"w+b\")\n tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))\n\n @pytest.mark.parametrize(\"encoding\", [\"utf-16\", \"utf-32\"])\n @pytest.mark.parametrize(\"compression_\", [\"bz2\", \"xz\"])\n def test_warning_missing_utf_bom(self, encoding, compression_):\n \"\"\"\n bz2 and xz do not write the byte order mark (BOM) for utf-16/32.\n\n https://stackoverflow.com/questions/55171439\n\n GH 35681\n \"\"\"\n df = tm.makeDataFrame()\n with tm.ensure_clean() as path:\n with tm.assert_produces_warning(UnicodeWarning):\n df.to_csv(path, compression=compression_, encoding=encoding)\n\n # reading should fail (otherwise we wouldn't need the warning)\n msg = r\"UTF-\\d+ stream does not start with BOM\"\n with pytest.raises(UnicodeError, match=msg):\n pd.read_csv(path, compression=compression_, encoding=encoding)\n\n\ndef test_is_fsspec_url():\n assert icom.is_fsspec_url(\"gcs://pandas/somethingelse.com\")\n assert icom.is_fsspec_url(\"gs://pandas/somethingelse.com\")\n # the following is the only remote URL that is handled without fsspec\n assert not icom.is_fsspec_url(\"http://pandas/somethingelse.com\")\n assert not icom.is_fsspec_url(\"random:pandas/somethingelse.com\")\n assert not icom.is_fsspec_url(\"/local/path\")\n assert not icom.is_fsspec_url(\"relative/local/path\")\n\n\[email protected](\"encoding\", [None, \"utf-8\"])\[email protected](\"format\", [\"csv\", \"json\"])\ndef test_codecs_encoding(encoding, format):\n # GH39247\n expected = tm.makeDataFrame()\n with tm.ensure_clean() as path:\n with codecs.open(path, mode=\"w\", encoding=encoding) as handle:\n getattr(expected, f\"to_{format}\")(handle)\n with codecs.open(path, mode=\"r\", encoding=encoding) as handle:\n if format == \"csv\":\n df = pd.read_csv(handle, index_col=0)\n else:\n df = pd.read_json(handle)\n tm.assert_frame_equal(expected, df)\n\n\ndef test_codecs_get_writer_reader():\n # GH39247\n expected = tm.makeDataFrame()\n with tm.ensure_clean() as path:\n with open(path, \"wb\") as handle:\n with codecs.getwriter(\"utf-8\")(handle) as encoded:\n expected.to_csv(encoded)\n with open(path, \"rb\") as handle:\n with codecs.getreader(\"utf-8\")(handle) as encoded:\n df = pd.read_csv(encoded, index_col=0)\n tm.assert_frame_equal(expected, df)\n\n\[email protected](\n \"io_class,mode,msg\",\n [\n (BytesIO, \"t\", \"a bytes-like object is required, not 'str'\"),\n (StringIO, \"b\", \"string argument expected, got 'bytes'\"),\n ],\n)\ndef test_explicit_encoding(io_class, mode, msg):\n # GH39247; this test makes sure that if a user provides mode=\"*t\" or \"*b\",\n # it is used. In the case of this test it leads to an error as intentionally the\n # wrong mode is requested\n expected = tm.makeDataFrame()\n with io_class() as buffer:\n with pytest.raises(TypeError, match=msg):\n expected.to_csv(buffer, mode=f\"w{mode}\")\n\n\[email protected](\"encoding_errors\", [None, \"strict\", \"replace\"])\[email protected](\"format\", [\"csv\", \"json\"])\ndef test_encoding_errors(encoding_errors, format):\n # GH39450\n msg = \"'utf-8' codec can't decode byte\"\n bad_encoding = b\"\\xe4\"\n\n if format == \"csv\":\n return\n content = bad_encoding + b\"\\n\" + bad_encoding\n reader = pd.read_csv\n else:\n content = (\n b'{\"'\n + bad_encoding * 2\n + b'\": {\"'\n + bad_encoding\n + b'\":\"'\n + bad_encoding\n + b'\"}}'\n )\n reader = partial(pd.read_json, orient=\"index\")\n with tm.ensure_clean() as path:\n file = Path(path)\n file.write_bytes(content)\n\n if encoding_errors != \"replace\":\n with pytest.raises(UnicodeDecodeError, match=msg):\n reader(path, encoding_errors=encoding_errors)\n else:\n df = reader(path, encoding_errors=encoding_errors)\n decoded = bad_encoding.decode(errors=encoding_errors)\n expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])\n tm.assert_frame_equal(df, expected)\n\n\ndef test_bad_encdoing_errors():\n # GH 39777\n with tm.ensure_clean() as path:\n with pytest.raises(LookupError, match=\"unknown error handler name\"):\n icom.get_handle(path, \"w\", errors=\"bad\")\n\n\ndef test_errno_attribute():\n # GH 13872\n with pytest.raises(FileNotFoundError, match=\"\\\\[Errno 2\\\\]\") as err:\n pd.read_csv(\"doesnt_exist\")\n assert err.errno == errno.ENOENT\n\n\ndef test_fail_mmap():\n with pytest.raises(UnsupportedOperation, match=\"fileno\"):\n with BytesIO() as buffer:\n icom.get_handle(buffer, \"rb\", memory_map=True)\n" ]
[ [ "pandas.Int16Dtype", "pandas.Int32Dtype", "pandas.get_option", "pandas.io.common.get_handle", "pandas.errors.AbstractMethodError", "pandas.io.common.is_url", "pandas.StringDtype", "pandas.compat._optional.import_optional_dependency", "pandas.util.version.Version", "pandas.UInt16Dtype", "pandas.io.common.is_fsspec_url", "pandas.io.common.stringify_path", "pandas.UInt64Dtype", "pandas.Int8Dtype", "pandas.UInt32Dtype", "pandas.BooleanDtype", "pandas.UInt8Dtype", "pandas.Int64Dtype", "pandas.util._decorators.doc" ], [ "pandas.to_datetime", "pandas.DataFrame", "pandas.io.common._MMapWrapper", "pandas.io.common.infer_compression", "pandas._testing.assert_frame_equal", "pandas.read_csv", "pandas.io.common.get_handle", "pandas._testing.makeDataFrame", "pandas.concat", "pandas.read_hdf", "pandas._testing.assert_produces_warning", "pandas.io.common.is_fsspec_url", "pandas.io.common.stringify_path", "pandas.compat.is_platform_windows", "pandas.read_json", "pandas.io.common._expand_user", "pandas._testing.ensure_clean", "pandas._testing.assert_categorical_equal", "pandas.util._test_decorators.skip_if_no" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlumiK/bagel-tensorflow
[ "791a89a54f15aeed0c4e1ea43afb9300f18b60cd" ]
[ "bagel/testing.py" ]
[ "import bagel\nimport numpy as np\n\nfrom sklearn.metrics import precision_recall_curve\nfrom typing import Sequence, Tuple, Dict, Optional\n\n\ndef _adjust_scores(labels: np.ndarray,\n scores: np.ndarray,\n delay: Optional[int] = None,\n inplace: bool = False) -> np.ndarray:\n if np.shape(scores) != np.shape(labels):\n raise ValueError('`labels` and `scores` must have same shape')\n if delay is None:\n delay = len(scores)\n splits = np.where(labels[1:] != labels[:-1])[0] + 1\n is_anomaly = labels[0] == 1\n adjusted_scores = np.copy(scores) if not inplace else scores\n pos = 0\n for part in splits:\n if is_anomaly:\n ptr = min(pos + delay + 1, part)\n adjusted_scores[pos: ptr] = np.max(adjusted_scores[pos: ptr])\n adjusted_scores[ptr: part] = np.maximum(adjusted_scores[ptr: part], adjusted_scores[pos])\n is_anomaly = not is_anomaly\n pos = part\n part = len(labels)\n if is_anomaly:\n ptr = min(pos + delay + 1, part)\n adjusted_scores[pos: part] = np.max(adjusted_scores[pos: ptr])\n return adjusted_scores\n\n\ndef _ignore_missing(series_list: Sequence, missing: np.ndarray) -> Tuple[np.ndarray, ...]:\n ret = []\n for series in series_list:\n series = np.copy(series)\n ret.append(series[missing != 1])\n return tuple(ret)\n\n\ndef _best_f1score(labels: np.ndarray, scores: np.ndarray) -> Tuple[float, float, float, float]:\n precision, recall, thresholds = precision_recall_curve(y_true=labels, probas_pred=scores)\n f1score = 2 * precision * recall / np.clip(precision + recall, a_min=1e-8, a_max=None)\n\n best_threshold = thresholds[np.argmax(f1score)]\n best_precision = precision[np.argmax(f1score)]\n best_recall = recall[np.argmax(f1score)]\n\n return best_threshold, best_precision, best_recall, np.max(f1score)\n\n\ndef get_test_results(labels: np.ndarray,\n scores: np.ndarray,\n missing: np.ndarray,\n window_size: int,\n delay: Optional[int] = None) -> Dict:\n labels = labels[window_size - 1:]\n scores = scores[window_size - 1:]\n missing = missing[window_size - 1:]\n adjusted_scores = _adjust_scores(labels=labels, scores=scores, delay=delay)\n adjusted_labels, adjusted_scores = _ignore_missing([labels, adjusted_scores], missing=missing)\n threshold, precision, recall, f1score = _best_f1score(labels=adjusted_labels, scores=adjusted_scores)\n return {'threshold': threshold,\n 'precision': precision,\n 'recall': recall,\n 'f1score': f1score}\n\n\nclass KPIStats:\n\n def __init__(self, kpi: bagel.data.KPI):\n self.num_points = len(kpi.values)\n self.num_missing = len(kpi.missing[kpi.missing == 1])\n self.num_anomaly = len(kpi.labels[kpi.labels == 1])\n self.missing_rate = self.num_missing / self.num_points\n self.anomaly_rate = self.num_anomaly / self.num_points\n\n\ndef get_kpi_stats(*kpis: bagel.data.KPI) -> Tuple[KPIStats, ...]:\n ret = []\n for kpi in kpis:\n ret.append(KPIStats(kpi))\n return tuple(ret)\n" ]
[ [ "numpy.maximum", "numpy.clip", "sklearn.metrics.precision_recall_curve", "numpy.max", "numpy.copy", "numpy.argmax", "numpy.shape", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dreamflake/GADA
[ "9891ce06e15e53abc72ce57b144e288799967d8c" ]
[ "_3DDFA_V2/TDDFA.py" ]
[ "# coding: utf-8\n\n__author__ = 'cleardusk'\n\nimport os.path as osp\nimport time\nimport numpy as np\nimport cv2\nimport torch\nfrom torchvision.transforms import Compose\nimport torch.backends.cudnn as cudnn\n\nimport _3DDFA_V2.models as models\nfrom _3DDFA_V2.bfm import BFMModel\nfrom _3DDFA_V2.utils.io import _load\nfrom _3DDFA_V2.utils.functions import (\n crop_img, parse_roi_box_from_bbox, parse_roi_box_from_landmark,\n)\nfrom _3DDFA_V2.utils.tddfa_util import (\n load_model, _parse_param, similar_transform,\n ToTensorGjz, NormalizeGjz\n)\n\nmake_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)\n\n\nclass TDDFA(object):\n \"\"\"TDDFA: named Three-D Dense Face Alignment (TDDFA)\"\"\"\n\n def __init__(self, **kvs):\n torch.set_grad_enabled(False)\n print(make_abs_path('configs/bfm_noneck_v3.pkl'))\n # load BFM\n self.bfm = BFMModel(\n bfm_fp=kvs.get('bfm_fp', make_abs_path('configs/bfm_noneck_v3.pkl')),\n shape_dim=kvs.get('shape_dim', 40),\n exp_dim=kvs.get('exp_dim', 10)\n )\n self.tri = self.bfm.tri\n\n # config\n self.gpu_mode = kvs.get('gpu_mode', False)\n self.gpu_id = kvs.get('gpu_id', 0)\n self.size = kvs.get('size', 120)\n\n param_mean_std_fp = kvs.get(\n 'param_mean_std_fp', make_abs_path(f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl')\n )\n\n # load model, default output is dimension with length 62 = 12(pose) + 40(shape) +10(expression)\n model = getattr(models, kvs.get('arch'))(\n num_classes=kvs.get('num_params', 62),\n widen_factor=kvs.get('widen_factor', 1),\n size=self.size,\n mode=kvs.get('mode', 'small')\n )\n model = load_model(model, kvs.get('checkpoint_fp'))\n\n if self.gpu_mode:\n cudnn.benchmark = True\n model = model.cuda(device=self.gpu_id)\n\n self.model = model\n self.model.eval() # eval mode, fix BN\n\n # data normalization\n transform_normalize = NormalizeGjz(mean=127.5, std=128)\n transform_to_tensor = ToTensorGjz()\n transform = Compose([transform_to_tensor, transform_normalize])\n self.transform = transform\n\n # params normalization config\n r = _load(param_mean_std_fp)\n self.param_mean = r.get('mean')\n self.param_std = r.get('std')\n\n # print('param_mean and param_srd', self.param_mean, self.param_std)\n\n def __call__(self, img_ori, objs, **kvs):\n \"\"\"The main call of TDDFA, given image and box / landmark, return 3DMM params and roi_box\n :param img_ori: the input image\n :param objs: the list of box or landmarks\n :param kvs: options\n :return: param list and roi_box list\n \"\"\"\n # Crop image, forward to get the param\n param_lst = []\n roi_box_lst = []\n\n crop_policy = kvs.get('crop_policy', 'box')\n for obj in objs:\n if crop_policy == 'box':\n # by face box\n roi_box = parse_roi_box_from_bbox(obj)\n elif crop_policy == 'landmark':\n # by landmarks\n roi_box = parse_roi_box_from_landmark(obj)\n else:\n raise ValueError(f'Unknown crop policy {crop_policy}')\n\n roi_box_lst.append(roi_box)\n img = crop_img(img_ori, roi_box)\n img = cv2.resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_LINEAR)\n inp = self.transform(img).unsqueeze(0)\n\n if self.gpu_mode:\n inp = inp.cuda(device=self.gpu_id)\n\n if kvs.get('timer_flag', False):\n end = time.time()\n param = self.model(inp)\n elapse = f'Inference: {(time.time() - end) * 1000:.1f}ms'\n print(elapse)\n else:\n param = self.model(inp)\n\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n param = param * self.param_std + self.param_mean # re-scale\n # print('output', param)\n param_lst.append(param)\n\n return param_lst, roi_box_lst\n\n def recon_vers(self, param_lst, roi_box_lst, **kvs):\n dense_flag = kvs.get('dense_flag', False)\n size = self.size\n\n ver_lst = []\n for param, roi_box in zip(param_lst, roi_box_lst):\n if dense_flag:\n R, offset, alpha_shp, alpha_exp = _parse_param(param)\n pts3d = R @ (self.bfm.u + self.bfm.w_shp @ alpha_shp + self.bfm.w_exp @ alpha_exp). \\\n reshape(3, -1, order='F') + offset\n pts3d = similar_transform(pts3d, roi_box, size)\n else:\n R, offset, alpha_shp, alpha_exp = _parse_param(param)\n pts3d = R @ (self.bfm.u_base + self.bfm.w_shp_base @ alpha_shp + self.bfm.w_exp_base @ alpha_exp). \\\n reshape(3, -1, order='F') + offset\n pts3d = similar_transform(pts3d, roi_box, size)\n\n ver_lst.append(pts3d)\n\n return ver_lst\n" ]
[ [ "torch.set_grad_enabled" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
enikon/MACP
[ "2de004d4eaf09f3b02dde3b7041ce6d693d0c25c", "2de004d4eaf09f3b02dde3b7041ce6d693d0c25c", "2de004d4eaf09f3b02dde3b7041ce6d693d0c25c" ]
[ "experiments/experiments/Test6.py", "multiagent/scenarios/simple_push.py", "multiagent/scenarios/simple_reference.py" ]
[ "from experiments.experiments.PubIntegBackground import PubIntegBackground\nimport numpy as np\n\nif __name__ == \"__main__\":\n for i in np.arange(0.0, 10.0, 0.1):\n PubIntegBackground(correlation=False, listing=True, pub='None', intensity=i)\n", "import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent import BaseScenario\n\nclass Scenario(BaseScenario):\n def make_world(self):\n world = World()\n # set any world properties first\n world.dim_c = 2\n num_agents = 2\n num_adversaries = 1\n num_landmarks = 2\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n if i < num_adversaries:\n agent.adversary = True\n else:\n agent.adversary = False\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.1, 0.1, 0.1])\n landmark.color[i + 1] += 0.8\n landmark.index = i\n # set goal landmark\n goal = np.random.choice(world.landmarks)\n for i, agent in enumerate(world.agents):\n agent.goal_a = goal\n agent.color = np.array([0.25, 0.25, 0.25])\n if agent.adversary:\n agent.color = np.array([0.75, 0.25, 0.25])\n else:\n j = goal.index\n agent.color[j + 1] += 0.5\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)\n\n def agent_reward(self, agent, world):\n # the distance to the goal\n return -np.sqrt(np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos)))\n\n def adversary_reward(self, agent, world):\n # keep the nearest good agents away from the goal\n agent_dist = [np.sqrt(np.sum(np.square(a.state.p_pos - a.goal_a.state.p_pos))) for a in world.agents if not a.adversary]\n pos_rew = min(agent_dist)\n #nearest_agent = world.good_agents[np.argmin(agent_dist)]\n #neg_rew = np.sqrt(np.sum(np.square(nearest_agent.state.p_pos - agent.state.p_pos)))\n neg_rew = np.sqrt(np.sum(np.square(agent.goal_a.state.p_pos - agent.state.p_pos)))\n #neg_rew = sum([np.sqrt(np.sum(np.square(a.state.p_pos - agent.state.p_pos))) for a in world.good_agents])\n return pos_rew - neg_rew\n \n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks: # world.entities:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n entity_color = []\n for entity in world.landmarks: # world.entities:\n entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n other_pos = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not agent.adversary:\n return np.concatenate([agent.state.p_vel] + [agent.goal_a.state.p_pos - agent.state.p_pos] + [agent.color] + entity_pos + entity_color + other_pos)\n else:\n #other_pos = list(reversed(other_pos)) if random.uniform(0,1) > 0.5 else other_pos # randomize position of other agents in adversary network\n return np.concatenate([agent.state.p_vel] + entity_pos + other_pos)\n", "import numpy as np\nfrom multiagent.core import World, Agent, Landmark\nfrom multiagent.environment import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self):\n world = World()\n # set any world properties first\n world.dim_c = 10\n world.collaborative = True # whether agents share rewards\n # add agents\n world.agents = [Agent() for i in range(2)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = False\n # add landmarks\n world.landmarks = [Landmark() for i in range(3)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # assign goals to agents\n for agent in world.agents:\n agent.goal_a = None\n agent.goal_b = None\n # want other agent to go to the goal landmark\n world.agents[0].goal_a = world.agents[1]\n world.agents[0].goal_b = np.random.choice(world.landmarks)\n world.agents[1].goal_a = world.agents[0]\n world.agents[1].goal_b = np.random.choice(world.landmarks)\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.25,0.25,0.25]) \n # random properties for landmarks\n world.landmarks[0].color = np.array([0.75,0.25,0.25]) \n world.landmarks[1].color = np.array([0.25,0.75,0.25]) \n world.landmarks[2].color = np.array([0.25,0.25,0.75]) \n # special colors for goals\n world.agents[0].goal_a.color = world.agents[0].goal_b.color \n world.agents[1].goal_a.color = world.agents[1].goal_b.color \n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n if agent.goal_a is None or agent.goal_b is None:\n return 0.0\n dist2 = np.sum(np.square(agent.goal_a.state.p_pos - agent.goal_b.state.p_pos))\n return -dist2\n\n def observation(self, agent, world):\n # goal color\n goal_color = [np.zeros(world.dim_color), np.zeros(world.dim_color)]\n if agent.goal_b is not None:\n goal_color[1] = agent.goal_b.color \n\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n entity_color = []\n for entity in world.landmarks:\n entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n return np.concatenate([agent.state.p_vel] + entity_pos + [goal_color[1]] + comm)\n" ]
[ [ "numpy.arange" ], [ "numpy.square", "numpy.random.choice", "numpy.concatenate", "numpy.random.uniform", "numpy.array", "numpy.zeros" ], [ "numpy.square", "numpy.random.choice", "numpy.concatenate", "numpy.random.uniform", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JoshuaAnickat/mlflow
[ "6dee5cb250460e8dc7accb487e54df8c95921e0e" ]
[ "mlflow/pytorch/__init__.py" ]
[ "\"\"\"\nThe ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module\nexports PyTorch models with the following flavors:\n\nPyTorch (native) format\n This is the main flavor that can be loaded back into PyTorch.\n:py:mod:`mlflow.pyfunc`\n Produced for use by generic pyfunc-based deployment tools and batch inference.\n\"\"\"\nimport importlib\nimport logging\nimport os\nimport yaml\n\nimport cloudpickle\nimport numpy as np\nimport pandas as pd\nfrom distutils.version import LooseVersion\nimport posixpath\n\nimport mlflow\nimport shutil\nimport mlflow.pyfunc.utils as pyfunc_utils\nfrom mlflow import pyfunc\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model, ModelSignature\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.models.utils import ModelInputExample, _save_example\nfrom mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST\nfrom mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.annotations import experimental\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.file_utils import _copy_file_or_tree, TempDir\nfrom mlflow.utils.model_utils import _get_flavor_configuration\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.utils.autologging_utils import autologging_integration, safe_patch\n\nFLAVOR_NAME = \"pytorch\"\n\n_SERIALIZED_TORCH_MODEL_FILE_NAME = \"model.pth\"\n_PICKLE_MODULE_INFO_FILE_NAME = \"pickle_module_info.txt\"\n_EXTRA_FILES_KEY = \"extra_files\"\n_REQUIREMENTS_FILE_KEY = \"requirements_file\"\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_default_conda_env():\n \"\"\"\n :return: The default Conda environment as a dictionary for MLflow Models produced by calls to\n :func:`save_model()` and :func:`log_model()`.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow.pytorch\n\n # Log PyTorch model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # Fetch the associated conda environment\n env = mlflow.pytorch.get_default_conda_env()\n print(\"conda env: {}\".format(env))\n\n .. code-block:: text\n :caption: Output\n\n conda env {'name': 'mlflow-env',\n 'channels': ['defaults', 'conda-forge', 'pytorch'],\n 'dependencies': ['python=3.7.5', 'pytorch=1.5.1',\n 'torchvision=0.6.1',\n 'pip', {'pip': ['mlflow', 'cloudpickle==1.6.0']}]}\n \"\"\"\n import torch\n import torchvision\n\n return _mlflow_conda_env(\n additional_conda_deps=[\n \"pytorch={}\".format(torch.__version__),\n \"torchvision={}\".format(torchvision.__version__),\n ],\n additional_pip_deps=[\n # We include CloudPickle in the default environment because\n # it's required by the default pickle module used by `save_model()`\n # and `log_model()`: `mlflow.pytorch.pickle_module`.\n \"cloudpickle=={}\".format(cloudpickle.__version__)\n ],\n additional_conda_channels=[\"pytorch\"],\n )\n\n\ndef log_model(\n pytorch_model,\n artifact_path,\n conda_env=None,\n code_paths=None,\n pickle_module=None,\n registered_model_name=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n requirements_file=None,\n extra_files=None,\n **kwargs\n):\n \"\"\"\n Log a PyTorch model as an MLflow artifact for the current run.\n\n :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of\n ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``\n or ``torch.jit.trace``.\n\n The model accept a single ``torch.FloatTensor`` as\n input and produce a single output tensor.\n\n If saving an eager model, any code dependencies of the\n model's class, including the class definition itself, should be\n included in one of the following locations:\n\n - The package(s) listed in the model's Conda environment, specified\n by the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_paths`` parameter.\n\n :param artifact_path: Run-relative artifact path.\n :param conda_env: Path to a Conda environment file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the model. The\n following is an *example* dictionary representation of a Conda environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'pytorch=0.4.1',\n 'torchvision=0.2.1'\n ]\n }\n\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path when the model is loaded.\n :param pickle_module: The module that PyTorch should use to serialize (\"pickle\") the specified\n ``pytorch_model``. This is passed as the ``pickle_module`` parameter\n to ``torch.save()``. By default, this module is also used to\n deserialize (\"unpickle\") the PyTorch model at load time.\n :param registered_model_name: (Experimental) If given, create a model version under\n ``registered_model_name``, also creating a registered model if one\n with the given name does not exist.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n\n :param requirements_file: A string containing the path to requirements file. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``requirements_file`` string -\n\n requirements_file = \"s3://my-bucket/path/to/my_file\"\n\n In this case, the ``\"my_file\"`` requirements file is downloaded from S3.\n\n If ``None``, no requirements file is added to the model.\n\n :param extra_files: A list containing the paths to corresponding extra files. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``extra_files`` list -\n\n extra_files = [\"s3://my-bucket/path/to/my_file1\",\n \"s3://my-bucket/path/to/my_file2\"]\n\n In this case, the ``\"my_file1 & my_file2\"`` extra file is downloaded from S3.\n\n If ``None``, no extra files are added to the model.\n\n :param kwargs: kwargs to pass to ``torch.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n import numpy as np\n import torch\n import mlflow.pytorch\n\n class LinearNNModel(torch.nn.Module):\n def __init__(self):\n super(LinearNNModel, self).__init__()\n self.linear = torch.nn.Linear(1, 1) # One in and one out\n\n def forward(self, x):\n y_pred = self.linear(x)\n return y_pred\n\n def gen_data():\n # Example linear model modified to use y = 2x\n # from https://github.com/hunkim/PyTorchZeroToAll\n # X training data, y labels\n X = torch.arange(1.0, 25.0).view(-1, 1)\n y = torch.from_numpy(np.array([x * 2 for x in X])).view(-1, 1)\n return X, y\n\n # Define model, loss, and optimizer\n model = LinearNNModel()\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n\n # Training loop\n epochs = 250\n X, y = gen_data()\n for epoch in range(epochs):\n # Forward pass: Compute predicted y by passing X to the model\n y_pred = model(X)\n\n # Compute the loss\n loss = criterion(y_pred, y)\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Log the model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # convert to scripted model and log the model\n scripted_pytorch_model = torch.jit.script(model)\n mlflow.pytorch.log_model(scripted_pytorch_model, \"scripted_model\")\n\n # Fetch the logged model artifacts\n print(\"run_id: {}\".format(run.info.run_id))\n for artifact_path in [\"model/data\", \"scripted_model/data\"]:\n artifacts = [f.path for f in MlflowClient().list_artifacts(run.info.run_id,\n artifact_path)]\n print(\"artifacts: {}\".format(artifacts))\n\n .. code-block:: text\n :caption: Output\n\n run_id: 1a1ec9e413ce48e9abf9aec20efd6f71\n artifacts: ['model/data/model.pth',\n 'model/data/pickle_module_info.txt']\n artifacts: ['scripted_model/data/model.pth',\n 'scripted_model/data/pickle_module_info.txt']\n\n .. figure:: ../_static/images/pytorch_logged_models.png\n\n PyTorch logged models\n \"\"\"\n pickle_module = pickle_module or mlflow_pytorch_pickle_module\n Model.log(\n artifact_path=artifact_path,\n flavor=mlflow.pytorch,\n pytorch_model=pytorch_model,\n conda_env=conda_env,\n code_paths=code_paths,\n pickle_module=pickle_module,\n registered_model_name=registered_model_name,\n signature=signature,\n input_example=input_example,\n await_registration_for=await_registration_for,\n requirements_file=requirements_file,\n extra_files=extra_files,\n **kwargs,\n )\n\n\ndef save_model(\n pytorch_model,\n path,\n conda_env=None,\n mlflow_model=None,\n code_paths=None,\n pickle_module=None,\n signature: ModelSignature = None,\n input_example: ModelInputExample = None,\n requirements_file=None,\n extra_files=None,\n **kwargs\n):\n \"\"\"\n Save a PyTorch model to a path on the local file system.\n\n :param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of\n ``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``\n or ``torch.jit.trace``.\n\n The model accept a single ``torch.FloatTensor`` as\n input and produce a single output tensor.\n\n If saving an eager model, any code dependencies of the\n model's class, including the class definition itself, should be\n included in one of the following locations:\n\n - The package(s) listed in the model's Conda environment, specified\n by the ``conda_env`` parameter.\n - One or more of the files specified by the ``code_paths`` parameter.\n\n :param path: Local path where the model is to be saved.\n :param conda_env: Either a dictionary representation of a Conda environment or the path to a\n Conda environment yaml file. If provided, this decsribes the environment\n this model should be run in. At minimum, it should specify the dependencies\n contained in :func:`get_default_conda_env()`. If ``None``, the default\n :func:`get_default_conda_env()` environment is added to the model. The\n following is an *example* dictionary representation of a Conda environment::\n\n {\n 'name': 'mlflow-env',\n 'channels': ['defaults'],\n 'dependencies': [\n 'python=3.7.0',\n 'pytorch=0.4.1',\n 'torchvision=0.2.1'\n ]\n }\n\n :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.\n :param code_paths: A list of local filesystem paths to Python file dependencies (or directories\n containing file dependencies). These files are *prepended* to the system\n path when the model is loaded.\n :param pickle_module: The module that PyTorch should use to serialize (\"pickle\") the specified\n ``pytorch_model``. This is passed as the ``pickle_module`` parameter\n to ``torch.save()``. By default, this module is also used to\n deserialize (\"unpickle\") the PyTorch model at load time.\n\n :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`\n describes model input and output :py:class:`Schema <mlflow.types.Schema>`.\n The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`\n from datasets with valid model input (e.g. the training dataset with target\n column omitted) and valid model output (e.g. model predictions generated on\n the training dataset), for example:\n\n .. code-block:: python\n\n from mlflow.models.signature import infer_signature\n train = df.drop_column(\"target_label\")\n predictions = ... # compute model predictions\n signature = infer_signature(train, predictions)\n :param input_example: (Experimental) Input example provides one or several instances of valid\n model input. The example can be used as a hint of what data to feed the\n model. The given example will be converted to a Pandas DataFrame and then\n serialized to json using the Pandas split-oriented format. Bytes are\n base64-encoded.\n\n :param requirements_file: A string containing the path to requirements file. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``requirements_file`` string -\n\n requirements_file = \"s3://my-bucket/path/to/my_file\"\n\n In this case, the ``\"my_file\"`` requirements file is downloaded from S3.\n\n If ``None``, no requirements file is added to the model.\n\n :param extra_files: A list containing the paths to corresponding extra files. Remote URIs\n are resolved to absolute filesystem paths.\n For example, consider the following ``extra_files`` list -\n\n extra_files = [\"s3://my-bucket/path/to/my_file1\",\n \"s3://my-bucket/path/to/my_file2\"]\n\n In this case, the ``\"my_file1 & my_file2\"`` extra file is downloaded from S3.\n\n If ``None``, no extra files are added to the model.\n\n :param kwargs: kwargs to pass to ``torch.save`` method.\n\n .. code-block:: python\n :caption: Example\n\n import os\n\n import torch\n import mlflow.pytorch\n\n # Class defined here\n class LinearNNModel(torch.nn.Module):\n ...\n\n # Initialize our model, criterion and optimizer\n ...\n\n # Training loop\n ...\n\n # Save PyTorch models to current working directory\n with mlflow.start_run() as run:\n mlflow.pytorch.save_model(model, \"model\")\n\n # Convert to a scripted model and save it\n scripted_pytorch_model = torch.jit.script(model)\n mlflow.pytorch.save_model(scripted_pytorch_model, \"scripted_model\")\n\n # Load each saved model for inference\n for model_path in [\"model\", \"scripted_model\"]:\n model_uri = \"{}/{}\".format(os.getcwd(), model_path)\n loaded_model = mlflow.pytorch.load_model(model_uri)\n print(\"Loaded {}:\".format(model_path))\n for x in [6.0, 8.0, 12.0, 30.0]:\n X = torch.Tensor([[x]])\n y_pred = loaded_model(X)\n print(\"predict X: {}, y_pred: {:.2f}\".format(x, y_pred.data.item()))\n print(\"--\")\n\n .. code-block:: text\n :caption: Output\n\n Loaded model:\n predict X: 6.0, y_pred: 11.90\n predict X: 8.0, y_pred: 15.92\n predict X: 12.0, y_pred: 23.96\n predict X: 30.0, y_pred: 60.13\n --\n Loaded scripted_model:\n predict X: 6.0, y_pred: 11.90\n predict X: 8.0, y_pred: 15.92\n predict X: 12.0, y_pred: 23.96\n predict X: 30.0, y_pred: 60.13\n \"\"\"\n import torch\n\n pickle_module = pickle_module or mlflow_pytorch_pickle_module\n\n if not isinstance(pytorch_model, torch.nn.Module):\n raise TypeError(\"Argument 'pytorch_model' should be a torch.nn.Module\")\n if code_paths is not None:\n if not isinstance(code_paths, list):\n raise TypeError(\"Argument code_paths should be a list, not {}\".format(type(code_paths)))\n path = os.path.abspath(path)\n if os.path.exists(path):\n raise RuntimeError(\"Path '{}' already exists\".format(path))\n\n if mlflow_model is None:\n mlflow_model = Model()\n\n os.makedirs(path)\n if signature is not None:\n mlflow_model.signature = signature\n if input_example is not None:\n _save_example(mlflow_model, input_example, path)\n\n model_data_subpath = \"data\"\n model_data_path = os.path.join(path, model_data_subpath)\n os.makedirs(model_data_path)\n # Persist the pickle module name as a file in the model's `data` directory. This is necessary\n # because the `data` directory is the only available parameter to `_load_pyfunc`, and it\n # does not contain the MLmodel configuration; therefore, it is not sufficient to place\n # the module name in the MLmodel\n #\n # TODO: Stop persisting this information to the filesystem once we have a mechanism for\n # supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc`\n pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME)\n with open(pickle_module_path, \"w\") as f:\n f.write(pickle_module.__name__)\n # Save pytorch model\n model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME)\n if isinstance(pytorch_model, torch.jit.ScriptModule):\n torch.jit.ScriptModule.save(pytorch_model, model_path)\n else:\n torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs)\n\n torchserve_artifacts_config = {}\n\n if requirements_file:\n if not isinstance(requirements_file, str):\n raise TypeError(\"Path to requirements file should be a string\")\n\n with TempDir() as tmp_requirements_dir:\n _download_artifact_from_uri(\n artifact_uri=requirements_file, output_path=tmp_requirements_dir.path()\n )\n rel_path = os.path.basename(requirements_file)\n torchserve_artifacts_config[_REQUIREMENTS_FILE_KEY] = {\"path\": rel_path}\n shutil.move(tmp_requirements_dir.path(rel_path), path)\n\n if extra_files:\n torchserve_artifacts_config[_EXTRA_FILES_KEY] = []\n if not isinstance(extra_files, list):\n raise TypeError(\"Extra files argument should be a list\")\n\n with TempDir() as tmp_extra_files_dir:\n for extra_file in extra_files:\n _download_artifact_from_uri(\n artifact_uri=extra_file, output_path=tmp_extra_files_dir.path()\n )\n rel_path = posixpath.join(_EXTRA_FILES_KEY, os.path.basename(extra_file),)\n torchserve_artifacts_config[_EXTRA_FILES_KEY].append({\"path\": rel_path})\n shutil.move(\n tmp_extra_files_dir.path(), posixpath.join(path, _EXTRA_FILES_KEY),\n )\n\n conda_env_subpath = \"conda.yaml\"\n if conda_env is None:\n conda_env = get_default_conda_env()\n elif not isinstance(conda_env, dict):\n with open(conda_env, \"r\") as f:\n conda_env = yaml.safe_load(f)\n with open(os.path.join(path, conda_env_subpath), \"w\") as f:\n yaml.safe_dump(conda_env, stream=f, default_flow_style=False)\n\n if code_paths is not None:\n code_dir_subpath = \"code\"\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)\n else:\n code_dir_subpath = None\n\n mlflow_model.add_flavor(\n FLAVOR_NAME,\n model_data=model_data_subpath,\n pytorch_version=torch.__version__,\n **torchserve_artifacts_config,\n )\n pyfunc.add_to_model(\n mlflow_model,\n loader_module=\"mlflow.pytorch\",\n data=model_data_subpath,\n pickle_module_name=pickle_module.__name__,\n code=code_dir_subpath,\n env=conda_env_subpath,\n )\n mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))\n\n\ndef _load_model(path, **kwargs):\n \"\"\"\n :param path: The path to a serialized PyTorch model.\n :param kwargs: Additional kwargs to pass to the PyTorch ``torch.load`` function.\n \"\"\"\n import torch\n\n if os.path.isdir(path):\n # `path` is a directory containing a serialized PyTorch model and a text file containing\n # information about the pickle module that should be used by PyTorch to load it\n model_path = os.path.join(path, \"model.pth\")\n pickle_module_path = os.path.join(path, _PICKLE_MODULE_INFO_FILE_NAME)\n with open(pickle_module_path, \"r\") as f:\n pickle_module_name = f.read()\n if \"pickle_module\" in kwargs and kwargs[\"pickle_module\"].__name__ != pickle_module_name:\n _logger.warning(\n \"Attempting to load the PyTorch model with a pickle module, '%s', that does not\"\n \" match the pickle module that was used to save the model: '%s'.\",\n kwargs[\"pickle_module\"].__name__,\n pickle_module_name,\n )\n else:\n try:\n kwargs[\"pickle_module\"] = importlib.import_module(pickle_module_name)\n except ImportError as exc:\n raise MlflowException(\n message=(\n \"Failed to import the pickle module that was used to save the PyTorch\"\n \" model. Pickle module name: `{pickle_module_name}`\".format(\n pickle_module_name=pickle_module_name\n )\n ),\n error_code=RESOURCE_DOES_NOT_EXIST,\n ) from exc\n\n else:\n model_path = path\n\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n return torch.load(model_path, **kwargs)\n else:\n try:\n # load the model as an eager model.\n return torch.load(model_path, **kwargs)\n except Exception: # pylint: disable=broad-except\n # If fails, assume the model as a scripted model\n return torch.jit.load(model_path)\n\n\ndef load_model(model_uri, **kwargs):\n \"\"\"\n Load a PyTorch model from a local file or a run.\n\n :param model_uri: The location, in URI format, of the MLflow model, for example:\n\n - ``/Users/me/path/to/local/model``\n - ``relative/path/to/local/model``\n - ``s3://my_bucket/path/to/model``\n - ``runs:/<mlflow_run_id>/run-relative/path/to/model``\n - ``models:/<model_name>/<model_version>``\n - ``models:/<model_name>/<stage>``\n\n For more information about supported URI schemes, see\n `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#\n artifact-locations>`_.\n\n :param kwargs: kwargs to pass to ``torch.load`` method.\n :return: A PyTorch model.\n\n .. code-block:: python\n :caption: Example\n\n import torch\n import mlflow.pytorch\n\n # Class defined here\n class LinearNNModel(torch.nn.Module):\n ...\n\n # Initialize our model, criterion and optimizer\n ...\n\n # Training loop\n ...\n\n # Log the model\n with mlflow.start_run() as run:\n mlflow.pytorch.log_model(model, \"model\")\n\n # Inference after loading the logged model\n model_uri = \"runs:/{}/model\".format(run.info.run_id)\n loaded_model = mlflow.pytorch.load_model(model_uri)\n for x in [4.0, 6.0, 30.0]:\n X = torch.Tensor([[x]])\n y_pred = loaded_model(X)\n print(\"predict X: {}, y_pred: {:.2f}\".format(x, y_pred.data.item()))\n\n .. code-block:: text\n :caption: Output\n\n predict X: 4.0, y_pred: 7.57\n predict X: 6.0, y_pred: 11.64\n predict X: 30.0, y_pred: 60.48\n \"\"\"\n import torch\n\n local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)\n try:\n pyfunc_conf = _get_flavor_configuration(\n model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME\n )\n except MlflowException:\n pyfunc_conf = {}\n code_subpath = pyfunc_conf.get(pyfunc.CODE)\n if code_subpath is not None:\n pyfunc_utils._add_code_to_system_path(\n code_path=os.path.join(local_model_path, code_subpath)\n )\n\n pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)\n if torch.__version__ != pytorch_conf[\"pytorch_version\"]:\n _logger.warning(\n \"Stored model version '%s' does not match installed PyTorch version '%s'\",\n pytorch_conf[\"pytorch_version\"],\n torch.__version__,\n )\n torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf[\"model_data\"])\n return _load_model(path=torch_model_artifacts_path, **kwargs)\n\n\ndef _load_pyfunc(path, **kwargs):\n \"\"\"\n Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.\n\n :param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor.\n \"\"\"\n return _PyTorchWrapper(_load_model(path, **kwargs))\n\n\nclass _PyTorchWrapper(object):\n \"\"\"\n Wrapper class that creates a predict function such that\n predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)\n \"\"\"\n\n def __init__(self, pytorch_model):\n self.pytorch_model = pytorch_model\n\n def predict(self, data, device=\"cpu\"):\n import torch\n\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\"Input data should be pandas.DataFrame\")\n self.pytorch_model.to(device)\n self.pytorch_model.eval()\n with torch.no_grad():\n input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device)\n preds = self.pytorch_model(input_tensor)\n if not isinstance(preds, torch.Tensor):\n raise TypeError(\n \"Expected PyTorch model to output a single output tensor, \"\n \"but got output of type '{}'\".format(type(preds))\n )\n predicted = pd.DataFrame(preds.numpy())\n predicted.index = data.index\n return predicted\n\n\n@experimental\n@autologging_integration(FLAVOR_NAME)\ndef autolog(log_every_n_epoch=1, log_models=True, disable=False): # pylint: disable=unused-argument\n \"\"\"\n Enables (or disables) and configures autologging from `PyTorch Lightning\n <https://pytorch-lightning.readthedocs.io/en/latest>`_ to MLflow.\n\n Autologging is performed when you call the `fit` method of\n `pytorch_lightning.Trainer() \\\n <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html#>`_.\n\n Explore the complete `PyTorch MNIST \\\n <https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST/example1>`_ for\n an expansive example with implementation of additional lightening steps.\n\n **Note**: Autologging is only supported for PyTorch Lightning models,\n i.e., models that subclass\n `pytorch_lightning.LightningModule \\\n <https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html>`_.\n In particular, autologging support for vanilla PyTorch models that only subclass\n `torch.nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_\n is not yet available.\n\n :param log_every_n_epoch: If specified, logs metrics once every `n` epochs. By default, metrics\n are logged after every epoch.\n :param log_models: If ``True``, trained models are logged as MLflow model artifacts.\n If ``False``, trained models are not logged.\n :param disable: If ``True``, disables all supported autologging integrations. If ``False``,\n enables all supported autologging integrations.\n\n .. code-block:: python\n :caption: Example\n\n import os\n\n import pytorch_lightning as pl\n import torch\n from torch.nn import functional as F\n from torch.utils.data import DataLoader\n from torchvision import transforms\n from torchvision.datasets import MNIST\n from pytorch_lightning.metrics.functional import accuracy\n\n import mlflow.pytorch\n from mlflow.tracking import MlflowClient\n\n # For brevity, here is the simplest most minimal example with just a training\n # loop step, (no validation, no testing). It illustrates how you can use MLflow\n # to auto log parameters, metrics, and models.\n\n class MNISTModel(pl.LightningModule):\n def __init__(self):\n super(MNISTModel, self).__init__()\n self.l1 = torch.nn.Linear(28 * 28, 10)\n\n def forward(self, x):\n return torch.relu(self.l1(x.view(x.size(0), -1)))\n\n def training_step(self, batch, batch_nb):\n x, y = batch\n loss = F.cross_entropy(self(x), y)\n acc = accuracy(loss, y)\n\n # Use the current of PyTorch logger\n self.log(\"train_loss\", loss, on_epoch=True)\n self.log(\"acc\", acc, on_epoch=True)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.02)\n\n def print_auto_logged_info(r):\n\n tags = {k: v for k, v in r.data.tags.items() if not k.startswith(\"mlflow.\")}\n artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, \"model\")]\n print(\"run_id: {}\".format(r.info.run_id))\n print(\"artifacts: {}\".format(artifacts))\n print(\"params: {}\".format(r.data.params))\n print(\"metrics: {}\".format(r.data.metrics))\n print(\"tags: {}\".format(tags))\n\n # Initialize our model\n mnist_model = MNISTModel()\n\n # Initialize DataLoader from MNIST Dataset\n train_ds = MNIST(os.getcwd(), train=True,\n download=True, transform=transforms.ToTensor())\n train_loader = DataLoader(train_ds, batch_size=32)\n\n # Initialize a trainer\n trainer = pl.Trainer(max_epochs=20, progress_bar_refresh_rate=20)\n\n # Auto log all MLflow entities\n mlflow.pytorch.autolog()\n\n # Train the model\n with mlflow.start_run() as run:\n trainer.fit(mnist_model, train_loader)\n\n # fetch the auto logged parameters and metrics\n print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))\n\n .. code-block:: text\n :caption: Output\n\n run_id: 42caa17b60cb489c8083900fb52506a7\n artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/data']\n params: {'betas': '(0.9, 0.999)',\n 'weight_decay': '0',\n 'epochs': '20',\n 'eps': '1e-08',\n 'lr': '0.02',\n 'optimizer_name': 'Adam', '\n amsgrad': 'False'}\n metrics: {'acc_step': 0.0,\n 'train_loss_epoch': 1.0917967557907104,\n 'train_loss_step': 1.0794280767440796,\n 'train_loss': 1.0794280767440796,\n 'acc_epoch': 0.0033333334140479565,\n 'acc': 0.0}\n tags: {'Mode': 'training'}\n\n .. figure:: ../_static/images/pytorch_lightening_autolog.png\n\n PyTorch autologged MLflow entities\n \"\"\"\n import pytorch_lightning as pl\n from mlflow.pytorch._pytorch_autolog import _create_patch_fit\n\n fit = _create_patch_fit(log_every_n_epoch=log_every_n_epoch, log_models=log_models)\n safe_patch(FLAVOR_NAME, pl.Trainer, \"fit\", fit, manage_run=True)\n" ]
[ [ "torch.jit.load", "torch.load", "torch.no_grad", "torch.jit.ScriptModule.save", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dhruvramani/CodeFunDo-2017
[ "e102202ef0219c249a1666daa3dd6426ab899800" ]
[ "src/random/weights.py" ]
[ "import os\nimport cv2\nimport imutils\nimport numpy as np\nfrom imutils import contours\nfrom imutils import perspective\nfrom scipy.spatial import distance as dist\n\n\ndef detect_shape(filepath, min_width=15, debug=False):\n image = cv2.imread(filepath, 0)\n\n resized = imutils.resize(image, width=300)\n ratio = image.shape[0] / float(resized.shape[0])\n '''\n blurred = cv2.GaussianBlur(resized, (5, 5), 0)\n thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n '''\n gray = cv2.bilateralFilter(resized, 1, 10, 120 )\n edges = cv2.Canny( gray, 10, 250 )\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))\n closed = cv2.morphologyEx( edges, cv2.MORPH_CLOSE, kernel )\n '''\n cnts = cv2.findContours( closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )\n gray = cv2.GaussianBlur(resized, (7, 7), 0)\n edged = cv2.Canny(gray, 10, 250)\n edged = cv2.dilate(edged, None, iterations=1)\n edged = cv2.erode(edged, None, iterations=1)\n '''\n cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n \n shapes = dict()\n print(len(cnts))\n for idx, c in enumerate(cnts):\n try :\n perimeter = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.1 * perimeter, True)\n if len(approx) == 4:\n (x, y, w, h) = cv2.boundingRect(approx)\n shapes[\"rect_{}\".format(idx)] = (x, y, w, h)\n if(debug == True):\n M = cv2.moments(c)\n cX = int((M[\"m10\"] / M[\"m00\"]) * ratio)\n cY = int((M[\"m01\"] / M[\"m00\"]) * ratio)\n c = c.astype(\"float\")\n c *= ratio\n c = c.astype(\"int\")\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n cv2.putText(image, \"square\", (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', 300,300)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n except :\n pass\n\n return shapes\n\ndef midpoint(ptA, ptB):\n return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)\n\ndef min_dif(list1, list2): \n min_d, ind = 1000000, -1\n for i in range(0, len(list1)):\n for j in range(0, len(list2)):\n if(list1[i]-list2[j] < min_d):\n ind = j\n min_d = list1[i]-list2[j]\n return ind\n\ndef object_size(filepath, left_width=15):\n image = cv2.imread(filepath, 0)\n #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(image, (7, 7), 0)\n \n edged = cv2.Canny(gray, 50, 100)\n edged = cv2.dilate(edged, None, iterations=1)\n edged = cv2.erode(edged, None, iterations=1)\n\n # NOTE : Contour - Outlines\n cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n (cnts, _) = contours.sort_contours(cnts)\n pixelsPerMetric = None\n\n dimensions = list()\n for c in cnts:\n if cv2.contourArea(c) < 100:\n continue\n orig = image.copy()\n box = cv2.minAreaRect(c)\n box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)\n box = np.array(box, dtype=\"int\")\n box = perspective.order_points(box)\n\n (tl, tr, br, bl) = box\n (tltrX, tltrY) = midpoint(tl, tr)\n (blbrX, blbrY) = midpoint(bl, br)\n (tlblX, tlblY) = midpoint(tl, bl)\n (trbrX, trbrY) = midpoint(tr, br)\n\n cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)\n cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)\n cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)\n cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)\n \n # draw lines between the midpoints\n cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)\n cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)\n\n dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))\n dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))\n\n if pixelsPerMetric is None:\n pixelsPerMetric = dB / left_width\n\n dimA = dA / pixelsPerMetric\n dimB = dB / pixelsPerMetric\n\n cv2.putText(orig, \"{:.1f}in\".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)\n cv2.putText(orig, \"{:.1f}in\".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('image', 300,300)\n cv2.imshow(\"image\", orig)\n cv2.waitKey(0)\n\n dimensions.append((dimA, dimB))\n\n max_dim = [-1, -1]\n for dims in dimensions:\n if(dims[0] * dims[1] > max_dim[0] * max_dim[1] and left_width not in dims):\n max_dim[0] = dims[0]\n max_dim[1] = dims[1]\n return max_dim\n\ndef weight(file1, file2, left_width=21, const_div=6000.0): # left_width = A4 Size\n size1 = object_size(file1, left_width)\n size2 = object_size(file2, left_width)\n rem_ind = min_dif(size1, size2)\n weight = (size1[0] * size1[1] * size2[1-rem_ind]) / const_div\n return weight\n\nif __name__ == '__main__':\n print(detect_shape(\"img.jpg\", debug=True))\n" ]
[ [ "numpy.array", "scipy.spatial.distance.euclidean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
spitzc32/CropMe
[ "6f3c0c9512cbf56d64b40c5c05a33627d6eaf51d" ]
[ "utils/data_operations.py" ]
[ "import numpy as np\n\n\ndef euclidean_distance(p1,p2):\n\t\"\"\"\n\treturns euclidean distance between matrices\t\n\t@params:\n\t\tp1, p2: np.ndarray\n\t\t\tmatrices to perform operation to.\n\t\"\"\"\n\treturn np.sqrt(np.sum((p1-p2)**2, axis=1))\n\n\ndef entropy(p):\n\t\t\"\"\"\n\t\tWill be our measurement for uncertainty in our construction \n\t\tof descision tree\n\t\t@params:\n\t\t\tp: float\n\n\t\t\"\"\"\n\t\tif p == 0:\n\t\t\treturn 0\n\t\telif p == 1:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn -(p * np.log2(p) + (1 - p) * np.log2(1 - p))\n\n\ndef information_gain(left_child, right_child):\n\t\t\"\"\"\n\t\tmeasurement of how much info we gained when splitting a node\n\t\tusing our entropy method.\n\t\t@def:\n\t\t\ttakes in a list of classes from left and right child to return\n\t\t\tthe information gain of our curr split\n\t\t@params:\n\t\t\tleft_child: np.ndarray\n\t\t\t\tcurr left child arr\n\t\t\tright_child: np.ndarray\n\t\t\t\tcurr left child arr\n\t\t\"\"\"\n\t\tparent = left_child + right_child\n\t\tp_par = parent.count(1) / len(parent) if len(parent) > 0 else 0\n\t\tp_left = left_child.count(1) / len(left_child) if len(left_child) \\\n\t\t> 0 else 0\n\t\tp_right = right_child.count(1) / len(right_child) if len(right_child) \\\n\t\t> 0 else 0\n\n\t\tinfogain_p = self.entropy(p_par)\n\t\tinfogain_l = self.entropy(p_left)\n\t\tinfogain_r = self.entropy(p_right)\n\n\t\treturn infogain_p - len(left_child) / len(parent) * infogain_l - \\\n\t\tlen(right_child) / len(parent) * infogain_r\n" ]
[ [ "numpy.log2", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
linamnt/PySyft
[ "4b60a86c003acbe1967d6c3d611df3d5f2d377ee", "4b60a86c003acbe1967d6c3d611df3d5f2d377ee" ]
[ "test/generic/test_object_storage.py", "test/torch/test_functions.py" ]
[ "import torch\n\nfrom syft.generic import object_storage\n\n\ndef test_clear_objects():\n obj_storage = object_storage.ObjectStorage()\n\n x = torch.tensor(1)\n obj_storage.set_obj(x)\n\n objs = obj_storage.current_objects()\n\n assert len(objs) == 1\n assert objs[x.id] == x\n\n ret_val = obj_storage.clear_objects()\n\n objs = obj_storage.current_objects()\n assert len(objs) == 0\n assert ret_val == obj_storage\n\n\ndef test_clear_objects_return_None():\n obj_storage = object_storage.ObjectStorage()\n\n x = torch.tensor(1)\n obj_storage.set_obj(x)\n\n objs = obj_storage.current_objects()\n\n assert len(objs) == 1\n assert objs[x.id] == x\n\n ret_val = obj_storage.clear_objects(return_self=False)\n\n objs = obj_storage.current_objects()\n assert len(objs) == 0\n assert ret_val is None\n", "import pytest\nimport torch as th\nimport syft as sy\n\nfrom syft.frameworks.torch.tensors.decorators.logging import LoggingTensor\n\n\ndef test_combine_pointers(workers):\n \"\"\"\n Ensure that the sy.combine_pointers works as expected\n \"\"\"\n\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n y = th.tensor([1, 2, 3, 4, 5]).send(alice)\n\n a = sy.combine_pointers(*[x, y])\n b = a + a\n\n c = b.get(sum_results=True)\n assert (c == th.tensor([4, 8, 12, 16, 20])).all()\n\n b = a + a\n c = b.get(sum_results=False)\n assert len(c) == 2\n assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all\n" ]
[ [ "torch.tensor" ], [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xuyuandong/sequence_behavior_ctr_model
[ "e1bb71b4579456b1c6fbf3b432a84a3cb52611b7" ]
[ "script/utils.py" ]
[ "import tensorflow as tf\n#from tensorflow.python.ops.rnn_cell import *\n#from tensorflow.python.ops.rnn_cell_impl import _Linear\nfrom tensorflow.contrib.rnn.python.ops.core_rnn_cell import *\n#from tensorflow import keras\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variable_scope as vs\n#from keras import backend as K\n\ndef din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n print (\"query_size mismatch\")\n query = tf.concat(values = [\n query,\n query,\n ], axis=1)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)\n d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)\n d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])\n scores = d_layer_3_all\n\n if mask is not None:\n mask = tf.equal(mask, tf.ones_like(mask))\n key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(scores) * (-2 ** 32 + 1)\n scores = tf.where(key_masks, scores, paddings) # [B, 1, T]\n\n # Activation\n if softmax_stag:\n scores = tf.nn.softmax(scores) # [B, 1, T]\n\n # Weighted sum\n if mode == 'SUM':\n output = tf.matmul(scores, facts) # [B, 1, H]\n # output = tf.reshape(output, [-1, tf.shape(facts)[-1]])\n else:\n scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])\n output = facts * tf.expand_dims(scores, -1)\n output = tf.reshape(output, tf.shape(facts))\n \n if return_alphas:\n return output, scores\n \n return output\n\n\nclass VecAttGRUCell(RNNCell):\n \"\"\"Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).\n Args:\n num_units: int, The number of units in the GRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight and\n projection matrices.\n bias_initializer: (optional) The initializer to use for the bias.\n \"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None):\n super(VecAttGRUCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n self._gate_linear = None\n self._candidate_linear = None\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n def __call__(self, inputs, state, att_score):\n return self.call(inputs, state, att_score)\n def call(self, inputs, state, att_score=None):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n if self._gate_linear is None:\n bias_ones = self._bias_initializer\n if self._bias_initializer is None:\n bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)\n with vs.variable_scope(\"gates\"): # Reset gate and update gate.\n self._gate_linear = _Linear(\n [inputs, state],\n 2 * self._num_units,\n True,\n bias_initializer=bias_ones,\n kernel_initializer=self._kernel_initializer)\n\n value = math_ops.sigmoid(self._gate_linear([inputs, state]))\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n if self._candidate_linear is None:\n with vs.variable_scope(\"candidate\"):\n self._candidate_linear = _Linear(\n [inputs, r_state],\n self._num_units,\n True,\n bias_initializer=self._bias_initializer,\n kernel_initializer=self._kernel_initializer)\n c = self._activation(self._candidate_linear([inputs, r_state]))\n u = (1.0 - att_score) * u\n new_h = u * state + (1 - u) * c\n return new_h, new_h\n\ndef prelu(_x, scope=''):\n \"\"\"parametric ReLU activation\"\"\"\n with tf.variable_scope(name_or_scope=scope, default_name=\"prelu\"):\n _alpha = tf.get_variable(\"prelu_\"+scope, shape=_x.get_shape()[-1],\n dtype=_x.dtype, initializer=tf.constant_initializer(0.1))\n return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)\n\ndef calc_auc(raw_arr):\n \"\"\"Summary\n\n Args:\n raw_arr (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n\n arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)\n pos, neg = 0., 0.\n for record in arr:\n if record[1] == 1.:\n pos += 1\n else:\n neg += 1\n\n fp, tp = 0., 0.\n xy_arr = []\n for record in arr:\n if record[1] == 1.:\n tp += 1\n else:\n fp += 1\n xy_arr.append([fp/neg, tp/pos])\n\n auc = 0.\n prev_x = 0.\n prev_y = 0.\n for x, y in xy_arr:\n if x != prev_x:\n auc += ((x - prev_x) * (y + prev_y) / 2.)\n prev_x = x\n prev_y = y\n\n return auc\n\ndef calc_gauc(raw_arr, nick_index):\n \"\"\"Summary\n\n Args:\n raw_arr (TYPE): Description\n\n Returns:\n TYPE: Description\n \"\"\"\n last_index = 0\n gauc = 0.\n pv_sum = 0\n for idx in xrange(len(nick_index)):\n if nick_index[idx] != nick_index[last_index]:\n input_arr = raw_arr[last_index:idx]\n auc_val=calc_auc(input_arr)\n if auc_val >= 0.0:\n gauc += auc_val * len(input_arr)\n pv_sum += len(input_arr)\n else:\n pv_sum += len(input_arr) \n last_index = idx\n return gauc / pv_sum\n \n\n\n\ndef attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n\n mask = tf.equal(mask, tf.ones_like(mask))\n hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n input_size = query.get_shape().as_list()[-1]\n\n # Trainable parameters\n w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))\n w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))\n b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n with tf.name_scope('v'):\n # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\n # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\n tmp1 = tf.tensordot(facts, w1, axes=1)\n tmp2 = tf.tensordot(query, w2, axes=1)\n tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])\n tmp = tf.tanh((tmp1 + tmp2) + b)\n\n # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector\n v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape\n key_masks = mask # [B, 1, T]\n # key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)\n v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]\n alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape\n\n # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\n #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)\n output = facts * tf.expand_dims(alphas, -1)\n output = tf.reshape(output, tf.shape(facts))\n # output = output / (facts.get_shape().as_list()[-1] ** 0.5)\n if not return_alphas:\n return output\n else:\n return output, alphas\n\n\ndef din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n # Trainable parameters\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)\n query = prelu(query)\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)\n d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)\n d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])\n scores = d_layer_3_all\n # Mask\n if mask is not None:\n # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]\n key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(scores) * (-2 ** 32 + 1)\n if not forCnn:\n scores = tf.where(key_masks, scores, paddings) # [B, 1, T]\n\n # Scale\n # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)\n\n # Activation\n if softmax_stag:\n scores = tf.nn.softmax(scores) # [B, 1, T]\n\n # Weighted sum\n if mode == 'SUM':\n output = tf.matmul(scores, facts) # [B, 1, H]\n # output = tf.reshape(output, [-1, tf.shape(facts)[-1]])\n else:\n scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])\n output = facts * tf.expand_dims(scores, -1)\n output = tf.reshape(output, tf.shape(facts))\n if return_alphas:\n return output, scores\n return output\n\ndef self_attention(facts, ATTENTION_SIZE, mask, stag='null'):\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n def cond(batch, output, i):\n return tf.less(i, tf.shape(batch)[1])\n\n def body(batch, output, i):\n self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],\n ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,\n mode='LIST')\n self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)\n output = output.write(i, self_attention_tmp)\n return batch, output, i + 1\n\n output_ta = tf.TensorArray(dtype=tf.float32,\n size=0,\n dynamic_size=True,\n element_shape=(facts[:, 0, :].get_shape()))\n _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])\n self_attention = output_op.stack()\n self_attention = tf.transpose(self_attention, perm = [1, 0, 2])\n return self_attention\n\ndef self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n def cond(batch, output, i):\n return tf.less(i, tf.shape(batch)[1])\n\n def body(batch, output, i):\n self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,\n ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,\n mode='LIST')\n self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)\n output = output.write(i, self_attention_tmp)\n return batch, output, i + 1\n\n output_ta = tf.TensorArray(dtype=tf.float32,\n size=0,\n dynamic_size=True,\n element_shape=(facts[:, 0, :].get_shape()))\n _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])\n self_attention = output_op.stack()\n self_attention = tf.transpose(self_attention, perm = [1, 0, 2])\n return self_attention\n\ndef din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n # Trainable parameters\n mask = tf.equal(mask, tf.ones_like(mask))\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)\n query = prelu(query)\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)\n d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))\n output = d_layer_2_all\n return output\n\n" ]
[ [ "tensorflow.concat", "tensorflow.python.ops.array_ops.split", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.tanh", "tensorflow.where", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.while_loop", "tensorflow.layers.dense", "tensorflow.name_scope", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.tensordot", "tensorflow.matmul", "tensorflow.shape", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.maximum", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.array_ops.transpose", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
boutproject/VECMA-hackathon
[ "07632a267fcaff582bf410eba13f7bc81d8ea6eb" ]
[ "workflows/sc_adaptive_restartable/example_restartable_sc_adaptive.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\n\nCAMPAIGN_NAME = \"Conduction.\"\n\n\ndef refine_sampling_plan(campaign, analysis, number_of_refinements):\n \"\"\"\n Refine the sampling plan.\n\n Parameters\n ----------\n number_of_refinements (int)\n The number of refinement iterations that must be performed.\n\n Returns\n -------\n None. The new accepted indices are stored in analysis.l_norm and the admissible indices\n in sampler.admissible_idx.\n \"\"\"\n\n sampler = campaign.get_active_sampler()\n\n for _ in range(number_of_refinements):\n # compute the admissible indices\n sampler.look_ahead(analysis.l_norm)\n\n print(f\"Code will be evaluated {sampler.n_new_points[-1]} times\")\n # run the ensemble\n campaign.execute().collate(progress_bar=True)\n\n # accept one of the multi indices of the new admissible set\n data_frame = campaign.get_collation_result()\n analysis.adapt_dimension(\"T\", data_frame)\n analysis.save_state(f\"{campaign.campaign_dir}/analysis.state\")\n\n\ndef plot_grid_2D(campaign, analysis, i, filename=\"out.pdf\"):\n fig = plt.figure(figsize=[12, 4])\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n accepted_grid = campaign.get_active_sampler().generate_grid(analysis.l_norm)\n ax1.plot(accepted_grid[:, 0], accepted_grid[:, 1], \"o\")\n ax2.plot(accepted_grid[:, 2], accepted_grid[:, 3], \"o\")\n ax1.set_title(f\"iteration {i}\")\n\n fig.tight_layout()\n fig.savefig(filename)\n\n\ndef custom_moments_plot(results, filename, i):\n fig, ax = plt.subplots()\n xvalues = np.arange(len(results.describe(\"T\", \"mean\")))\n ax.fill_between(\n xvalues,\n results.describe(\"T\", \"mean\") - results.describe(\"T\", \"std\"),\n results.describe(\"T\", \"mean\") + results.describe(\"T\", \"std\"),\n label=\"std\",\n alpha=0.2,\n )\n ax.plot(xvalues, results.describe(\"T\", \"mean\"), label=\"mean\")\n try:\n ax.plot(xvalues, results.describe(\"T\", \"1%\"), \"--\", label=\"1%\", color=\"black\")\n ax.plot(xvalues, results.describe(\"T\", \"99%\"), \"--\", label=\"99%\", color=\"black\")\n except RuntimeError:\n pass\n ax.grid(True)\n ax.set_ylabel(\"T\")\n ax.set_xlabel(r\"$\\rho$\")\n ax.set_title(\"iteration \" + str(i))\n ax.legend()\n fig.savefig(filename)\n\n\ndef first_time_setup():\n encoder = boutvecma.BOUTEncoder(\n template_input=\"../../models/conduction/data/BOUT.inp\"\n )\n # decoder = boutvecma.LogDataBOUTDecoder(variables=[\"T\"])\n decoder = boutvecma.SimpleBOUTDecoder(variables=[\"T\"])\n params = {\n \"conduction:chi\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:scale\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:gauss_width\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 0.2},\n \"T:gauss_centre\": {\n \"type\": \"float\",\n \"min\": 0.0,\n \"max\": 2 * np.pi,\n \"default\": np.pi,\n },\n }\n actions = uq.actions.local_execute(\n encoder,\n os.path.abspath(\n \"../../build/models/conduction/conduction -q -q -q -q -d . |& tee run.log\"\n ),\n decoder,\n root=\".\",\n )\n campaign = uq.Campaign(name=CAMPAIGN_NAME, actions=actions, params=params)\n\n vary = {\n \"conduction:chi\": chaospy.Uniform(0.2, 4.0),\n \"T:scale\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_width\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_centre\": chaospy.Uniform(0.5 * np.pi, 1.5 * np.pi),\n }\n\n sampler = uq.sampling.SCSampler(\n vary=vary,\n polynomial_order=1,\n quadrature_rule=\"C\",\n sparse=True,\n growth=True,\n midpoint_level1=True,\n dimension_adaptive=True,\n )\n campaign.set_sampler(sampler)\n\n print(f\"Output will be in {campaign.campaign_dir}\")\n\n sampler = campaign.get_active_sampler()\n\n print(f\"Computing {sampler.n_samples} samples\")\n\n time_start = time.time()\n campaign.execute().collate(progress_bar=True)\n\n # Create an analysis class and run the analysis.\n analysis = create_analysis(campaign)\n campaign.apply_analysis(analysis)\n analysis.save_state(f\"{campaign.campaign_dir}/analysis.state\")\n plot_grid_2D(campaign, analysis, 0, f\"{campaign.campaign_dir}/grid0.png\")\n\n for i in np.arange(1, 10):\n refine_once(campaign, analysis, i)\n time_end = time.time()\n\n print(f\"Finished, took {time_end - time_start}\")\n\n return campaign\n\n\ndef create_analysis(campaign):\n return uq.analysis.SCAnalysis(sampler=campaign.get_active_sampler(), qoi_cols=[\"T\"])\n\n\ndef refine_once(campaign, analysis, iteration):\n refine_sampling_plan(campaign, analysis, 1)\n campaign.apply_analysis(analysis)\n analysis.save_state(f\"{campaign.campaign_dir}/analysis.state\")\n\n results = campaign.last_analysis\n plot_grid_2D(\n campaign,\n analysis,\n iteration,\n f\"{campaign.campaign_dir}/grid{iteration:02}.png\",\n )\n moment_plot_filename = os.path.join(\n f\"{campaign.campaign_dir}\", f\"moments{iteration:02}.png\"\n )\n sobols_plot_filename = os.path.join(\n f\"{campaign.campaign_dir}\", f\"sobols_first{iteration:02}.png\"\n )\n results.plot_sobols_first(\n \"T\",\n ylabel=f\"iteration{iteration}\",\n xlabel=r\"$\\rho$\",\n filename=sobols_plot_filename,\n )\n plt.ylim(0, 1)\n plt.savefig(f\"{campaign.campaign_dir}/sobols{iteration:02}.png\")\n\n custom_moments_plot(results, moment_plot_filename, iteration)\n\n with open(f\"{campaign.campaign_dir}/last_iteration\", \"w\") as f:\n f.write(f\"{iteration}\")\n\n\ndef plot_results(campaign, moment_plot_filename, sobols_plot_filename):\n results = campaign.get_last_analysis()\n\n results.plot_sobols_first(\"T\", xlabel=r\"$\\rho$\", filename=sobols_plot_filename)\n\n fig, ax = plt.subplots()\n xvalues = np.arange(len(results.describe(\"T\", \"mean\")))\n ax.fill_between(\n xvalues,\n results.describe(\"T\", \"mean\") - results.describe(\"T\", \"std\"),\n results.describe(\"T\", \"mean\") + results.describe(\"T\", \"std\"),\n label=\"std\",\n alpha=0.2,\n )\n ax.plot(xvalues, results.describe(\"T\", \"mean\"), label=\"mean\")\n try:\n ax.plot(xvalues, results.describe(\"T\", \"1%\"), \"--\", label=\"1%\", color=\"black\")\n ax.plot(xvalues, results.describe(\"T\", \"99%\"), \"--\", label=\"99%\", color=\"black\")\n except RuntimeError:\n pass\n ax.grid(True)\n ax.set_ylabel(\"T\")\n ax.set_xlabel(r\"$\\rho$\")\n ax.legend()\n fig.savefig(moment_plot_filename)\n\n print(f\"Results are in:\\n\\t{moment_plot_filename}\\n\\t{sobols_plot_filename}\")\n\n\ndef reload_campaign(directory):\n \"\"\"Reload a campaign from a directory\n\n Returns the campaign, analysis, and last iteration number\n \"\"\"\n\n campaign = uq.Campaign(\n name=CAMPAIGN_NAME,\n db_location=f\"sqlite:///{os.path.abspath(directory)}/campaign.db\",\n )\n analysis = create_analysis(campaign)\n analysis.load_state(f\"{campaign.campaign_dir}/analysis.state\")\n\n with open(f\"{campaign.campaign_dir}/last_iteration\", \"r\") as f:\n iteration = int(f.read())\n\n return campaign, analysis, iteration\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n \"conduction_sc\",\n description=\"Adaptive dimension refinement for 1D conduction model\",\n )\n parser.add_argument(\n \"--restart\", type=str, help=\"Restart previous campaign\", default=None\n )\n parser.add_argument(\n \"-n\", \"--refinement-num\", type=int, default=1, help=\"Number of refinements\"\n )\n\n args = parser.parse_args()\n\n if args.restart is None:\n first_time_setup()\n else:\n campaign, analysis, last_iteration = reload_campaign(args.restart)\n for iteration in range(\n last_iteration + 1, last_iteration + args.refinement_num + 1\n ):\n refine_once(campaign, analysis, iteration)\n" ]
[ [ "matplotlib.pyplot.ylim", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mchelem/cref2
[ "3324c34892dfaba2c99a0a564ede9f0c40ad65a5" ]
[ "cref/structure/plot.py" ]
[ "import os\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport pandas\n\n\n_ramachandran_densities = pandas.read_csv(\n 'data/rama500-general.data',\n skiprows=6,\n delimiter=' ',\n names=['phi', 'psi', 'value']\n)\n\n\"\"\"\nDSSP output:\n H = α-helix\n B = residue in isolated β-bridge\n E = extended strand, participates in β ladder\n G = 3-helix (310 helix)\n I = 5 helix (π-helix)\n T = hydrogen bonded turn\n S = bend\n\nColors extracted from rcsb.org.\n\"\"\"\n\nDSSP_to_color = {\n 'H': '#ED6161',\n 'B': '#CCA200',\n 'E': '#FFFB00',\n 'G': '#FFC2C2',\n 'I': '#900000',\n 'T': '#990099',\n 'S': '#0000FF',\n '-': 'black',\n}\n\n\ndef ramachandran_surface():\n \"\"\"\n Plot density surface for generic ramachandran\n \"\"\"\n fontsize = 18\n ticks = [-180, -90, 0, 90, 180]\n plt.contourf(\n list(OrderedDict.fromkeys(_ramachandran_densities['phi'])),\n list(OrderedDict.fromkeys(_ramachandran_densities['psi'])),\n _ramachandran_densities['value'].values.reshape(180, 180).T,\n levels=[0, 0.0005, 0.02, 1],\n colors=['#FFFFFF', '#B3E8FF', '#7FD9FF']\n )\n plt.xlabel('$\\phi$', fontsize=fontsize)\n plt.ylabel('$\\psi$', fontsize=fontsize)\n plt.xticks(ticks)\n plt.yticks(ticks)\n plt.tick_params(direction=\"out\")\n plt.margins(0.05)\n ax = plt.axes()\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.spines['left'].set_smart_bounds(True)\n ax.spines['bottom'].set_smart_bounds(True)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n\ndef ramachandran(torsion_angles, fragment, target_pdb=None,\n output_writer=None, output_dir=None):\n \"\"\"\n Plot ramachandran of a set of torsion angles for a given fragment\n\n :param torsion_angles: Dictionary with torsion angles phi and psi\n :param fragment: Fragment identifier, used for displaying purposes\n \"\"\"\n target_pdb = None\n plt.figure()\n ramachandran_surface()\n plt.title('Ramachandran plot for ' + fragment)\n plt.scatter(\n x=torsion_angles['phi'],\n y=torsion_angles['psi'],\n s=[1.05 ** x for x in torsion_angles['identity']],\n c=[DSSP_to_color[ss] for ss in torsion_angles['central_ss']],\n marker='o',\n alpha=0.5,\n )\n if target_pdb and (target_pdb in list(torsion_angles['pdb'])):\n i = list(torsion_angles['pdb']).index(target_pdb)\n plt.scatter(\n x=torsion_angles['phi'][i],\n y=torsion_angles['psi'][i],\n marker='D',\n c='red',\n s=50\n )\n if output_writer:\n output_writer.savefig(dpi=150)\n if output_dir:\n plt.savefig(\n os.path.join(output_dir, 'ramachandran', fragment + '.svg'),\n format='svg', dpi=300\n )\n plt.close()\n" ]
[ [ "matplotlib.pyplot.yticks", "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.pyplot.figure", "matplotlib.pyplot.margins", "matplotlib.pyplot.axes", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
buctlab/NIO
[ "094e688dd1cd3def7f31cd16ff927d4324651422", "094e688dd1cd3def7f31cd16ff927d4324651422" ]
[ "visualizer/plot_mf_param_opt/plot_time_cost_bar.py", "applications/parameter_optimization/optimized_pso.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom numpy import arange, array\nimport os\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger('PlotTimeCost')\nlogger.setLevel('INFO')\n\n\nclass PlotTimeCostBar:\n\n def __init__(self, data, path, show=False):\n self.data = data\n self.path = path\n self.show_flag = show\n (filepath, tempfilename) = os.path.split(path)\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n (filename, extension) = os.path.splitext(tempfilename)\n self.format = extension[1:]\n\n def plot(self):\n data = array([0, 0, 0])\n data[1:] = self.data['Time Cost'].values\n\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n width = 0.5\n\n xticks = self.data.index\n n = data.shape[0]\n ind = arange(n)\n data = data / 3600\n colors = ['black', 'tab:blue', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown']\n plt.bar(x=ind, height=data, width=width, color=colors)\n\n ax.set_xticks(ind[1:])\n ax.set_xticklabels(xticks)\n\n # ax.set_xlabel('Multi-fidelity control strategy', fontsize=16)\n ax.tick_params(labelsize=12)\n ax.set_ylabel('Time Cost (h)', fontsize=16)\n\n if self.show_flag:\n plt.show()\n fig.savefig(self.path, format=self.format, dpi=80, bbox_inches='tight')\n", "from applications.parameter_optimization.optimized_nio_base import OptimizedNIOBase\nfrom algorithms import ParticleSwarmOptimization\nfrom numpy import array\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger('OptimizedPSOFunc')\nlogger.setLevel('INFO')\n\n\nclass OptimizedPSOFunc(OptimizedNIOBase):\n\n def __init__(self, lower=(0, 0, 0, 0), upper=(1, 10, 10, 20), dimension=4, benchmark=None):\n super(OptimizedPSOFunc, self).__init__(lower, upper, dimension, benchmark)\n\n def get_optimum(self):\n return array([[0.7, 2.0, 2.0, 4.0]]), self.benchmark.get_optimum()[-1]\n\n def eval(self, params):\n pso = ParticleSwarmOptimization(w=params[0], c1=params[1], c2=params[2], v_max=params[3], func=self.benchmark, iterations=200)\n best = pso.run_return_best_val()\n self.eval_count += pso.eval_count\n return best\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.bar", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bernardolemos/Automatic_Face_Blurt
[ "7f9127763b391dacc0f89b62a05fe149f02a065b" ]
[ "blur_faces.py" ]
[ "import os\nimport cv2\nimport time\nimport argparse\nimport numpy as np\nfrom mtcnn import detect_face\nimport tensorflow as tf\nfrom PIL import Image, ImageDraw\n\n## MTCNN face localizer\ndef mtcnn_localize_faces(image, pnet, rnet, onet, minsize=20, threshold=[0.7, 0.8, 0.85], factor=0.75):\n \"\"\"\n Localize faces & its landmarks in image using MTCNN\n \n Params\n :image\n :minsize - min. face size\n :threshold - a list/array with 3 values. The thresholds for pnet, rnet & onet, respectively \n :factor - sclaing factor for image octave\n\n Return\n :bbs - list of bounding boxes\n :lds - list of face landmarks\n \"\"\"\n \n\n image = image[:, :, 0:3]\n bounding_boxes, landmarks = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)\n nrof_faces = bounding_boxes.shape[0]\n\n bbs = list()\n lds = list()\n if nrof_faces > 0:\n det = bounding_boxes[:, 0:4]\n \n bb = np.zeros((nrof_faces,4), dtype=np.int32)\n lands = np.zeros((nrof_faces,10), dtype=np.int32)\n landmarks = np.reshape(landmarks, (nrof_faces, 10))\n for i in range(nrof_faces):\n ## Convert to int32\n lands[i] = np.ravel(landmarks[i])\n bb[i] = np.ravel(det[i])\n # inner exception\n if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][2] >= len(image[0]) or bb[i][3] >= len(image):\n print('face is inner of range!')\n continue\n else:\n ## get as top, right, bottom, left\n bbs.append((bb[i][1], bb[i][2], bb[i][3], bb[i][0]))\n lds.append(lands[i])\n \n return bbs, lds\n\n\ndef load_images(images_path):\n \"\"\"\n Read images from directory\n\n Params\n :images_path - path to images\n\n Return\n :image_l - list of images as arrays\n : images_name - list of images' file names\n \"\"\"\n # list of images, as arrays\n images_l = []\n # get images\n images_name = os.listdir(images_path)\n # read images\n for i in images_name:\n image = cv2.imread(os.path.join(images_path, i))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # if image.endswith(\".png\"):\n # images_l.append(image)\n images_l.append(image)\n \n return images_l, images_name\n\ndef main(args):\n st = time.time()\n #check if input directory exists\n if not os.path.exists(args.input_directory):\n print(\"Error! No input direcotory\", args.input_directory)\n return -1\n\n # read images\n images_l, images_paths = load_images(args.input_directory)\n\n #create tensorflow session\n # init. tensorflow session\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, './mtcnn')\n #localize and blur faces, iterate over images\n for image, image_path in zip(images_l, images_paths):\n print(\"Processing\", image_path + \"...\")\n\n bbs, lds = mtcnn_localize_faces(image, pnet, rnet, onet, minsize=20, threshold=[0.7, 0.8, 0.85], factor=0.75)\n\n # jumpt iteration if there's no face\n if len(bbs) == 0:\n print(\"Couldn't find faces!\")\n continue\n\n #get faces\n for bb, ld in zip(bbs, lds):\n #get bounding box\n #top, righ, bottom, left\n top = bb[0]\n right = bb[1]\n bottom = bb[2]\n left = bb[3]\n # build landmarks' x, y pairs\n points = []\n for x, y in zip(ld[:5], ld[5:]):\n points.append(x)\n points.append(y)\n\n #get face thumbnail\n face_image = image[top:bottom, left:right]\n #blur face thumbnail\n if args.blur > 0:\n face_image = cv2.GaussianBlur(face_image, (105, 105), args.blur)\n #black\n else:\n face_image = np.zeros(face_image.shape)\n \n #write blured face to image\n image[top:bottom, left:right] = face_image\n\n #PIL image \n # pil_image = Image.fromarray(image)\n # pil_image_face = Image.fromarray(face_image)\n\n #eyes' landmarks: first two pairs\n # get larger rectangle\n # points[0] = points[0] * 0.9\n # points[1] = points[1] * 0.9\n # points[2] = points[2] * 1.1\n # points[3] = points[3] * 1.1\n # draw = ImageDraw.Draw(pil_image)\n #cover eyes with rectangle\n # draw.rectangle(points[:4], fill=\"black\")\n\n #create output directory if it doesn't exist\n if not os.path.exists(args.output_directory):\n os.makedirs(args.output_directory)\n\n #save image\n pil_image = Image.fromarray(image)\n pil_image.save(os.path.join(args.output_directory, image_path))\n\n print(\"Total running time:\", time.time() - st, \"sec.\")\n \n return 0\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-id', '--input_directory', type=str, nargs='?', default=\"./images\")\n parser.add_argument('-od', '--output_directory', type=str, nargs='?', default=\"./blurs\")\n parser.add_argument('-b', '--blur', type=int, nargs='?', default=46)\n args = parser.parse_args()\n\n main(args)" ]
[ [ "tensorflow.Graph", "numpy.reshape", "tensorflow.ConfigProto", "tensorflow.GPUOptions", "numpy.ravel", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcuntz/pyjams
[ "1393c68a9e21a1e7b88291229120641fdaddc998" ]
[ "tests/test_gridcellarea.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nThis is the unittest for gridcellarea module.\n\npython -m unittest -v tests/test_gridcellarea.py\npython -m pytest --cov=pyjams --cov-report term-missing -v tests/test_gridcellarea.py\n\n\"\"\"\nimport unittest\n\n\ndef _flatten(itr):\n import numpy as np\n fitr = np.array(itr).flatten()\n if len(fitr) == 0:\n return list(fitr)\n else:\n if isinstance(fitr[0], str):\n return [ i for i in fitr ]\n else:\n return [ i if np.isfinite(i) else np.finfo(float).max\n for i in fitr ]\n\n\nclass TestGridcellarea(unittest.TestCase):\n \"\"\"\n Tests for gridcellarea.py\n \"\"\"\n\n def test_gridcellarea(self):\n import numpy as np\n from pyjams import gridcellarea\n\n lat = [0., 2.5, 5.0]\n lon = [0., 3.75, 7.5]\n\n rearth = 6371009.\n fsoll = [[1.15906555e+11, 1.15906555e+11, 1.15906555e+11],\n [1.15796237e+11, 1.15796237e+11, 1.15796237e+11],\n [1.15465495e+11, 1.15465495e+11, 1.15465495e+11]]\n\n rearth1 = 6371000.\n fsoll1 = [[1.15906227e+11, 1.15906227e+11, 1.15906227e+11],\n [1.15795910e+11, 1.15795910e+11, 1.15795910e+11],\n [1.15465169e+11, 1.15465169e+11, 1.15465169e+11]]\n\n # descending latitudes\n dlat = [0., -2.5, -5.0]\n\n # meridian within longitudes\n lon360 = [360., 3.75, 7.5]\n # dateline within longitudes\n lon180 = [180., -180.+3.75, -180.+7.5]\n\n # list\n fout = gridcellarea(lat, lon)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # tuple, list\n fout = gridcellarea(tuple(lat), lon)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # 2 tuple\n fout = gridcellarea(tuple(lat), tuple(lon))\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # array, list\n fout = gridcellarea(np.array(lat), lon)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # 2 array\n fout = gridcellarea(np.array(lat), np.array(lon))\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # rearth\n fout = gridcellarea(lat, lon, rearth=rearth)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # rearth classic\n fout = gridcellarea(lat, lon, rearth=rearth1)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll1))\n\n # globe\n fout = gridcellarea(lat, lon, globe=True)\n fsoll2 = [[3.79774834e+12, 3.79774834e+12, 3.79774834e+12],\n [1.15796240e+11, 1.15796240e+11, 1.15796240e+11],\n [3.61823239e+12, 3.61823239e+12, 3.61823239e+12]]\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -4)), _flatten(fsoll2))\n\n # descending lats\n fout = gridcellarea(dlat, lon, globe=True)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -4)), _flatten(fsoll2))\n\n # meridian in lon\n fout = gridcellarea(lat, lon360)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # date line in lon\n fout = gridcellarea(lat, lon180)\n assert isinstance(fout, np.ndarray)\n self.assertEqual(_flatten(np.around(fout, -3)), _flatten(fsoll))\n\n # errors\n # lat > 90\n lat1 = [0., 2.5, 95.0]\n self.assertRaises(AssertionError, gridcellarea, lat1, lon)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.around", "numpy.array", "numpy.isfinite", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sudheernaidu53/other_utils
[ "8e7f32ff0a3ded3910a957d821d6f4eb15bae3d8" ]
[ "loan_estimator/loan_estimator.py" ]
[ "# This file is to get a rough estimation of how much you need to pay or how many months you need to pay for a loan\n\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import display\n\ndef group(number):\n \"\"\"show money in laks and crores (indian way of presenting money)\"\"\"\n s = '%d' % number\n groups = []\n groups.append(s[-3:])\n s = s[:-3]\n while s and s[-1].isdigit():\n groups.append(s[-2:])\n s = s[:-2]\n return s + ','.join(reversed(groups))\n\n\nclass loan:\n def __init__(self, R=8.1, principal=30, years=5):\n \"\"\"R is yearly interest\n principal is principal amount in lakhs\n years = number of years\n \"\"\"\n self.R = R * 0.01\n self.r = R * 0.01 * (1 / 12)\n self.principal = principal * 100000\n self.years = years\n self.num_months = self.years * 12\n self.months = {\"Jan\": 31, \"Feb\": 28, \"Mar\": 31, \"Apr\": 30, \"May\": 31, \"June\": 30, \"Jul\": 31, \"Aug\": 31,\n \"Sep\": 30, \"Oct\": 31, \"Nov\": 30, \"Dec\": 31}\n\n def find_monthly_emi_flat(self, print_=True):\n \"\"\" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat\"\"\"\n\n total = self.principal * (1 + self.R * (self.num_months / 12))\n if print_:\n print(\"------------- flat interest -------------------\")\n print(\"total amount you are paying over full period:\", total)\n print(\"monthly installment/emi : {}\".format(total / self.num_months))\n return total, total / self.num_months\n\n def num_months_emi_diminishing(self, emi, principal=0, interest=0, print_=True):\n \"\"\"find the number of months you need to pay for, if you are paying emi every month\"\"\"\n \"\"\"emi is in rupees, principal is in lakhs, interest is yearly interest\"\"\"\n \"\"\"n = np.log((E/r)/(E/r -P))/np.log(1+r) \"\"\"\n\n if not principal:\n principal = self.principal\n if not interest:\n interest = self.r\n num_months = np.log((emi / interest) / (emi / interest - principal)) / np.log(1 + interest)\n if print_:\n print(\"------------- diminishing interest -------------------\")\n print(\"you need to pay {} monthly, for {} months\".format(emi, num_months))\n return num_months\n\n def find_monthly_emi_diminishing(self, num_months=0, principal=0, print_=True):\n \"\"\" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat\"\"\"\n \"\"\"P*r*(1 + 1/(np.power(1+r,60)-1))\"\"\"\n\n if not num_months:\n num_months = self.num_months\n if not principal:\n principal = self.principal\n else:\n principal *= 100000\n monthly_emi = principal * self.r * (1 + 1 / (np.power(1 + self.r, num_months) - 1))\n if print_:\n print(\"------------- diminishing interest -------------------\")\n print(\" you need to pay {} monthly, for {} months\".format(monthly_emi, num_months))\n print(\"total amount you will pay over full period is roughly {}\".format(monthly_emi * num_months))\n return monthly_emi\n\n def confirm_diminishing(self, emi, print_=False):\n \"\"\" function to confirm if the interest scheme is dimishing\"\"\"\n principal = self.principal\n i = 1\n while principal > 0:\n principal += ((self.r) * principal - emi)\n if print_:\n print(i, principal)\n i += 1\n if abs(principal / self.principal) < 0.001:\n print(\"final net amount is {} after {} months\".format(principal, i - 1))\n return principal, i\n\n\n## Usage\nR = 10.5 #10.5 % monthly interest rate\nprincipal = 30 # principal is 30 lakhs\nyears = 4.5 # loan term period is 4.5 years\nloan1 = loan(R,principal,years) # initialize a loan instance\n\nloan1.find_monthly_emi_flat()\nloan1.num_months_emi_diminishing(35000)\nloan1.find_monthly_emi_diminishing()\n\n#-----------output-----------------------\n# ------------- flat interest -------------------\n# total amount you are paying over full period: 4417500.0\n# monthly installment/emi : 81805.55555555556\n# ------------- diminishing interest -------------------\n# you need to pay 35000 monthly, for 159.1257820098328 months\n# ------------- diminishing interest -------------------\n# you need to pay 69948.58010333449 monthly, for 54.0 months\n# total amount you will pay over full period is roughly 3777223.3255800623\n\ndef get_df():\n # make a table to find how much emi to be paid for different principals over different tenure/periods\n\n loan1 = loan(10.5,principal = 30, years =5)\n # print(loan1.find_monthly_emi_diminishing())\n\n years = [2,3,4,5]\n amounts = [15,20,25]\n yearss = [str(x)+'y' for x in years]\n df = pd.DataFrame(columns=yearss)\n total = pd.DataFrame(columns = yearss)\n for amount in amounts:\n arr=[]\n arr1 = []\n for year in years:\n temp = loan1.find_monthly_emi_diminishing(num_months=year*12, principal=amount,print_ = False)\n arr.append(group(round(int(temp),-2))) # rounding to closest hundred\n arr1.append(group(round(int(temp*year*12),-2)))\n df.loc[str(amount)+'Lks']=arr\n total.loc[str(amount)+'Lks']=arr1\n\n print(\"--------------------- emi ------------------\")\n display(df)\n\n print(\"---------------------- total ---------------------\")\n display(total)\n\n# get_df()" ]
[ [ "numpy.log", "pandas.DataFrame", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hmhuy2000/Reinforcement-Learning-SuttonBartoI
[ "97ca9dc11c4cb4fda74b144e658c3eac756131ff" ]
[ "chap 5/5_5.py" ]
[ "import numpy as np \nimport matplotlib.pyplot as plt\nfrom tqdm import trange\nimport seaborn as sns\nimport random\n\n# ========================== CFG =======================\n\nclass CFG:\n HIT = 1\n STOP = 0\n actions = [STOP, HIT]\n WIN = 1\n DRAW = 0\n LOSE = -1\n\n\n# ======================== function ======================\n\ndef random_card():\n card = np.random.randint(13) + 1\n card = min(card, 10)\n return card\n\ndef value_card(card):\n if (card == 1):\n return 11\n else:\n return card\n\ndef random_play(policy_player, policy_dealer, init_state = None, debug = False):\n player_ace = 0\n player_ace_1 = 0\n dealer_ace = 0\n dealer_ace_1 = 0\n player_sum = 0\n dealer_sum = 0\n dealer_show = 0\n his = []\n if (init_state):\n (player_ace, dealer_show, player_sum, action) = init_state\n if (debug):\n print(f'player init {player_sum} dealer show {dealer_show} action {action}')\n\n if (dealer_show == 1):\n dealer_ace += 1\n dealer_sum += value_card(dealer_show)\n\n card = random_card()\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n if (dealer_sum > 21):\n dealer_sum -= 10\n dealer_ace_1 += 1\n\n his.append((player_ace > player_ace_1, player_sum, dealer_show, action))\n if (action == CFG.HIT):\n card = random_card()\n if (debug):\n print(f'player {player_sum} {card}')\n if (card == 1):\n player_ace += 1\n player_sum += value_card(card)\n if (player_sum > 21 and player_ace > player_ace_1):\n player_sum -= 10\n player_ace_1 += 1\n \n\n else:\n while(player_sum <12):\n card = random_card()\n if (card == 1):\n player_ace += 1\n player_sum += value_card(card)\n if (player_sum > 21):\n player_sum -= 10\n player_ace_1 += 1\n \n if (True):\n card = random_card()\n dealer_show = card\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n\n card = random_card()\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n if (dealer_sum > 21):\n dealer_sum -= 10\n dealer_ace_1 += 1\n\n while(True):\n if (player_sum > 21):\n if (debug):\n print(f'quát {player_sum}')\n return his, -1\n action = policy_player[int(player_ace > player_ace_1), player_sum, dealer_show]\n his.append((player_ace > player_ace_1, player_sum, dealer_show, action))\n if (action == CFG.STOP):\n break\n card = random_card()\n if (debug):\n print(f'player {player_sum} {card}')\n if (card == 1):\n player_ace += 1\n player_sum += value_card(card)\n if (player_sum > 21 and player_ace > player_ace_1):\n player_sum -= 10\n player_ace_1 += 1\n \n while(True):\n if (dealer_sum == 21):\n if(debug):\n print(f'player {player_sum} dealer {dealer_sum}')\n if (player_sum == 21):\n return his, 0\n else:\n return his, -1\n if (dealer_sum > 21):\n return his, 1\n action = policy_dealer[dealer_sum]\n if (action == CFG.STOP):\n break\n card = random_card()\n if(debug):\n print(f'dealer {dealer_sum} {card}')\n if (card == 1):\n dealer_ace += 1\n dealer_sum += value_card(card)\n if(dealer_sum > 21 and dealer_ace > dealer_ace_1):\n dealer_sum -= 10\n dealer_ace_1 += 1\n \n if(debug):\n print(f'player sum {player_sum} dealer sum {dealer_sum}')\n if (player_sum < dealer_sum):\n return his, -1\n if (player_sum == dealer_sum):\n return his, 0\n if (player_sum > dealer_sum):\n return his, 1\n \n\ndef MonteCarloPrediction(Num_iter, debug = False):\n\n # ========================== init =======================\n\n policy_dealer = np.zeros((22))\n policy_dealer[:17] = CFG.HIT\n policy_dealer[17:] = CFG.STOP\n\n policy_player = np.zeros((2, 22, 11), dtype = int)\n for i in range(2):\n for j in range(22):\n for k in range(11):\n policy_player[i,j,k] = random.choice(CFG.actions)\n\n \n\n value_action = np.zeros((2, 10, 10, 2))\n cnt = np.ones((2, 10, 10, 2))\n\n for iter in trange(Num_iter):\n if (debug):\n print(f'---------------- {iter} -------------------------')\n check = set()\n init_usable = random.choice(range(2))\n init_show = random_card()\n init_player_sum = random.choice(range(12,22))\n init_action = random.choice(CFG.actions)\n\n his, reward = random_play(policy_player, policy_dealer,\n (init_usable, init_show, init_player_sum, init_action), debug)\n if (debug):\n print(his, reward)\n for (usable, player_sum, dealer_show, action) in his:\n if ((usable, player_sum, dealer_show, action) in check):\n continue\n check.add((usable, player_sum, dealer_show, action))\n\n value_action[int(usable), player_sum - 12, dealer_show - 1, action] += reward\n cnt[int(usable), player_sum - 12, dealer_show - 1, action] += 1\n Q = np.zeros((2))\n Q[0] = value_action[int(usable), player_sum - 12, dealer_show - 1, 0]/cnt[int(usable), player_sum - 12, dealer_show - 1, 0]\n Q[1] = value_action[int(usable), player_sum - 12, dealer_show - 1, 1]/cnt[int(usable), player_sum - 12, dealer_show - 1, 1]\n policy_player[int(usable), player_sum, dealer_show] = np.argmax(Q)\n arr = value_action/cnt\n return policy_player[0, 12:,1:], policy_player[1, 12:,1:], arr\n\n# ======================== main ==========================\n\nNoUsable500k, Usable500k, arr = MonteCarloPrediction(10000000)\n\nvalue = np.zeros((2,10,10))\n\nfor i in range(2):\n for j in range(10):\n for k in range(10):\n value[i,j,k] = np.max(arr[i,j,k,:])\n\n\nax = sns.heatmap(value[0,...], cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_value_NoUsable.png')\nplt.close()\n\nax = sns.heatmap(value[1,...], cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_value_Usable.png')\nplt.close()\n\nax = sns.heatmap(NoUsable500k, cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_policy_NoUsable.png')\nplt.close()\n\n\nax = sns.heatmap(Usable500k, cmap=\"YlGnBu\", xticklabels=range(1, 11)\n ,yticklabels=list(range(12, 22)))\nplt.savefig('figure_5_5_policy_Usable.png')\nplt.close()" ]
[ [ "matplotlib.pyplot.savefig", "numpy.ones", "numpy.max", "numpy.argmax", "matplotlib.pyplot.close", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SebastianMM-96/regex-wordToken
[ "1e707f03638ebe9365974bcced8ab8b0d42c1295" ]
[ "fake-news/training-testing-classification-model/fakeNewsModel-CountVectorizer.py" ]
[ "# Import the necessary modules\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\n\n# Instantiate a Multinomial Naive Bayes classifier: nb_classifier\nnb_classifier = MultinomialNB()\n\n# Fit the classifier to the training data\nnb_classifier.fit(count_train, y_train)\n\n# Create the predicted tags: pred\npred = nb_classifier.predict(count_test)\n\n# Calculate the accuracy score: score\nscore = metrics.accuracy_score(y_test, pred)\nprint(score)\n\n# Calculate the confusion matrix: cm\ncm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])\nprint(cm)" ]
[ [ "sklearn.naive_bayes.MultinomialNB", "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MagiaSN/pytorch
[ "7513455c743d3d644b45a804902c1a0d14b69f45", "7513455c743d3d644b45a804902c1a0d14b69f45", "7513455c743d3d644b45a804902c1a0d14b69f45", "7513455c743d3d644b45a804902c1a0d14b69f45" ]
[ "torch/nn/quantized/modules/__init__.py", "torch/nn/modules/lazy.py", "torch/utils/tensorboard/_pytorch_graph.py", "benchmarks/operator_benchmark/c2/batch_gather_test.py" ]
[ "import torch\nfrom torch.nn.modules.pooling import MaxPool2d\n\nfrom .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid\nfrom .batchnorm import BatchNorm2d, BatchNorm3d\nfrom .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \\\n InstanceNorm2d, InstanceNorm3d\nfrom .conv import _ConvNd, Conv1d, Conv2d, Conv3d\nfrom .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d\nfrom .linear import Linear\nfrom .embedding_ops import Embedding, EmbeddingBag\n\nfrom .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional\n\n\nclass Quantize(torch.nn.Module):\n r\"\"\"Quantizes an incoming tensor\n\n Args:\n `scale`: scale of the output Quantized Tensor\n `zero_point`: zero_point of output Quantized Tensor\n `dtype`: data type of output Quantized Tensor\n\n Attributes:\n `scale`, `zero_point`, `dtype`\n\n Examples::\n >>> t = torch.tensor([[1., -1.], [1., -1.]])\n >>> scale, zero_point, dtype = 1.0, 2, torch.qint8\n >>> qm = Quantize(scale, zero_point, dtype)\n >>> qt = qm(t)\n >>> print(qt)\n tensor([[ 1., -1.],\n [ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)\n \"\"\"\n\n scale: torch.Tensor\n zero_point: torch.Tensor\n\n def __init__(self, scale, zero_point, dtype):\n super(Quantize, self).__init__()\n self.register_buffer('scale', torch.tensor([scale]))\n self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.long))\n self.dtype = dtype\n\n def forward(self, X):\n return torch.quantize_per_tensor(X, float(self.scale),\n int(self.zero_point), self.dtype)\n\n @staticmethod\n def from_float(mod):\n assert hasattr(mod, 'activation_post_process')\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)\n\n def extra_repr(self):\n return 'scale={}, zero_point={}, dtype={}'.format(self.scale, self.zero_point, self.dtype)\n\n\nclass DeQuantize(torch.nn.Module):\n r\"\"\"Dequantizes an incoming tensor\n\n Examples::\n >>> input = torch.tensor([[1., -1.], [1., -1.]])\n >>> scale, zero_point, dtype = 1.0, 2, torch.qint8\n >>> qm = Quantize(scale, zero_point, dtype)\n >>> quantized_input = qm(input)\n >>> dqm = DeQuantize()\n >>> dequantized = dqm(quantized_input)\n >>> print(dequantized)\n tensor([[ 1., -1.],\n [ 1., -1.]], dtype=torch.float32)\n \"\"\"\n\n def __init__(self):\n super(DeQuantize, self).__init__()\n\n def forward(self, Xq):\n return Xq.dequantize()\n\n @staticmethod\n def from_float(mod):\n return DeQuantize()\n\n__all__ = [\n 'BatchNorm2d',\n 'BatchNorm3d',\n '_ConvNd',\n 'Conv1d',\n 'Conv2d',\n 'Conv3d',\n 'ConvTranspose1d',\n 'ConvTranspose2d',\n 'ConvTranspose3d',\n 'DeQuantize',\n 'ELU',\n 'Embedding',\n 'EmbeddingBag',\n 'GroupNorm',\n 'Hardswish',\n 'InstanceNorm1d',\n 'InstanceNorm2d',\n 'InstanceNorm3d',\n 'LayerNorm',\n 'LeakyReLU',\n 'Linear',\n 'MaxPool2d',\n 'Quantize',\n 'ReLU6',\n 'Sigmoid',\n # Wrapper modules\n 'FloatFunctional',\n 'FXFloatFunctional',\n 'QFunctional',\n]\n", "import itertools\nfrom typing_extensions import Protocol\nimport warnings\n\nimport torch\nfrom ..parameter import is_lazy\n\n\nclass _LazyProtocol(Protocol):\n \"\"\"This is to avoid errors with mypy checks for\n The attributes in a mixin:\n https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes\n \"\"\"\n def _register_load_state_dict_pre_hook(self, hook):\n ...\n\n def register_forward_pre_hook(self, hook):\n ...\n\n def _lazy_load_hook(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n ...\n\n def _get_name(self):\n ...\n\n def _infer_parameters(self, module, input):\n ...\n\n @property\n def _parameters(self):\n ...\n\n @property\n def _buffers(self):\n ...\n\n @property\n def _non_persistent_buffers_set(self):\n ...\n\n @property\n def _load_hook(self):\n ...\n\n @property\n def _initialize_hook(self):\n ...\n\n\nclass LazyModuleMixin:\n r\"\"\"A mixin for modules that lazily initialize parameters, also known as \"lazy modules.\"\n\n .. warning:\n Lazy modules are an experimental new feature under active development,\n and their API is likely to change.\n\n Modules that lazily initialize parameters, or \"lazy modules\",\n derive the shapes of their parameters from the first input(s)\n to their forward method. Until that first forward they contain\n :class:`torch.nn.UninitializedParameter` s that should not be accessed\n or used, and afterward they contain regular :class:`torch.nn.Parameter` s.\n Lazy modules are convenient since they don't require computing some\n module arguments, like the :attr:`in_features` argument of a\n typical :class:`torch.nn.Linear`.\n\n After construction, networks with lazy modules should first\n be converted to the desired dtype and placed on the expected device.\n This is because lazy modules only perform shape inference so the usual dtype\n and device placement behavior applies.\n The lazy modules should then perform \"dry runs\" to initialize all the components in the module.\n These \"dry runs\" send inputs of the correct size, dtype, and device through\n the network and to each one of its lazy modules. After this the network can be used as usual.\n\n >>> class LazyMLP(torch.nn.Module):\n ... def __init__(self):\n ... super().__init__()\n ... self.fc1 = torch.nn.LazyLinear(10)\n ... self.relu1 = torch.nn.ReLU()\n ... self.fc2 = torch.nn.LazyLinear(1)\n ... self.relu2 = torch.nn.ReLU()\n ...\n ... def forward(self, input):\n ... x = self.relu1(self.fc1(input))\n ... y = self.relu2(self.fc2(x))\n ... return y\n >>> # constructs a network with lazy modules\n >>> lazy_mlp = LazyMLP()\n >>> # transforms the network's device and dtype\n >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'\n >>> lazy_mlp = mlp.cuda().double()\n >>> lazy_mlp\n LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)\n (relu1): ReLU()\n (fc2): LazyLinear(in_features=0, out_features=1, bias=True)\n (relu2): ReLU()\n )\n >>> # performs a dry run to initialize the network's lazy modules\n >>> lazy_mlp(torch.ones(10,10).cuda())\n >>> # after initialization, LazyLinear modules become regular Linear modules\n >>> lazy_mlp\n LazyMLP(\n (fc1): Linear(in_features=10, out_features=10, bias=True)\n (relu1): ReLU()\n (fc2): Linear(in_features=10, out_features=1, bias=True)\n (relu2): ReLU()\n )\n >>> # attaches an optimizer, since parameters can now be used as usual\n >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)\n\n A final caveat when using lazy modules is that the order of initialization of a network's\n parameters may change, since the lazy modules are always initialized after other modules.\n For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module\n first and then a regular :class:`torch.nn.Linear` second, the second module would be\n initialized on construction and the first module would be initialized during the first dry run.\n This can cause the parameters of a network using lazy modules to be initialized differently\n than the parameters of a network without lazy modules as the order of parameter initializations,\n which often depends on a stateful random number generator, is different.\n Check :doc:`/notes/randomness` for more details.\n\n Lazy modules can be serialized with a state dict like other modules. For example:\n\n >>> lazy_mlp = LazyMLP()\n >>> # The state dict shows the uninitialized parameters\n >>> lazy_mlp.state_dict()\n OrderedDict([('fc1.weight', Uninitialized parameter),\n ('fc1.bias',\n tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,\n 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),\n ('fc2.weight', Uninitialized parameter),\n ('fc2.bias', tensor([0.0019]))])\n\n\n Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize\n initialized LazyModules and they will remain initialized)\n\n\n >>> full_mlp = LazyMLP()\n >>> # Dry run to initialize another module\n >>> full_mlp.forward(torch.ones(10, 1))\n >>> # Load an initialized state into a lazy module\n >>> lazy_mlp.load_state_dict(full_mlp.state_dict())\n >>> # The state dict now holds valid values\n >>> lazy_mlp.state_dict()\n OrderedDict([('fc1.weight',\n tensor([[-0.3837],\n [ 0.0907],\n [ 0.6708],\n [-0.5223],\n [-0.9028],\n [ 0.2851],\n [-0.4537],\n [ 0.6813],\n [ 0.5766],\n [-0.8678]])),\n ('fc1.bias',\n tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,\n 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),\n ('fc2.weight',\n tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,\n 0.2479, 0.1091]])),\n ('fc2.bias', tensor([0.0019]))])\n\n Note, however, that the loaded parameters will not be replaced when doing a \"dry run\" if they are initialized\n when the state is loaded. This prevents using initialized modules in different contexts.\n \"\"\"\n\n # modules inheriting from this will change their __class__ to the specified\n # one after they are fully initialized\n cls_to_become = None\n\n def __init__(self: _LazyProtocol, *args, **kwargs):\n # Mypy doesnt like this super call in a mixin\n super().__init__(*args, **kwargs) # type: ignore\n self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)\n self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters)\n warnings.warn('Lazy modules are a new feature under heavy development '\n 'so changes to the API or functionality can happen at any moment.')\n\n def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):\n # This should be ideally implemented as a hook,\n # but we should override `detach` in the UninitializedParameter to return itself\n # which is not clean\n for name, param in self._parameters.items():\n if param is not None:\n if not (is_lazy(param) or keep_vars):\n param = param.detach()\n destination[prefix + name] = param\n for name, buf in self._buffers.items():\n if buf is not None and name not in self._non_persistent_buffers_set:\n if not (is_lazy(buf) or keep_vars):\n buf = buf.detach()\n destination[prefix + name] = buf\n\n def _lazy_load_hook(\n self: _LazyProtocol, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"load_state_dict pre-hook function for lazy buffers and parameters.\n\n The purpose of this hook is to adjust the current state and/or\n ``state_dict`` being loaded so that a module instance serialized in\n both un/initialized state can be deserialized onto both un/initialized\n module instance.\n See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``\n for the details of the hook specification.\n \"\"\"\n for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):\n key = prefix + name\n if key in state_dict and param is not None:\n input_param = state_dict[key]\n if is_lazy(param):\n # The current parameter is not initialized but the one being loaded one is\n # create a new parameter based on the uninitialized one\n if not is_lazy(input_param):\n with torch.no_grad():\n param.materialize(input_param.shape)\n\n def initialize_parameters(self: _LazyProtocol, *args, **kwargs):\n r\"\"\"Initialize parameters according to the input batch properties.\n This adds an interface to isolate parameter initialization from the\n forward pass when doing parameter shape inference.\n \"\"\"\n raise NotImplementedError('initialize_parameters is not implemented for {}'.format(self.__class__.__name__))\n\n def has_uninitialized_params(self: _LazyProtocol):\n r\"\"\"Check if a module has parameters that are not initialized\n \"\"\"\n # This is to avoid the JIT to track this parameter and force\n # custom modules __setstate__ to add it\n params = self._parameters.values()\n buffers = self._buffers.values()\n for param in itertools.chain(params, buffers):\n if is_lazy(param):\n return True\n return False\n\n def _infer_parameters(self: _LazyProtocol, module, input):\n r\"\"\"Infers the size and initializes the parameters according to the\n provided input batch.\n Given a module that contains parameters that were declared inferrable\n using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass\n in the complete module using the provided input to initialize all the parameters\n as needed.\n The module is set into evaluation mode before running the forward pass in order\n to avoid saving statistics or calculating gradients\n \"\"\"\n module.initialize_parameters(*input)\n if module.has_uninitialized_params():\n raise RuntimeError('module {} has not been fully initialized'.format(self._get_name()))\n module._initialize_hook.remove()\n module._load_hook.remove()\n delattr(module, '_initialize_hook')\n delattr(module, '_load_hook')\n if module.cls_to_become is not None:\n module.__class__ = module.cls_to_become\n\n\n def _replicate_for_data_parallel(self: _LazyProtocol):\n raise RuntimeError('Modules with uninitialized parameters can\\'t be used with `DataParallel`. '\n 'Run a dummy forward pass to correctly initialize the modules')\n", "from collections import OrderedDict\nfrom typing import Dict, Any\n\nfrom tensorboard.compat.proto.config_pb2 import RunMetadata\nfrom tensorboard.compat.proto.graph_pb2 import GraphDef\nfrom tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats\nfrom tensorboard.compat.proto.versions_pb2 import VersionDef\n\nimport torch\nfrom ._proto_graph import node_proto\n\nmethods_OP = ['attributeNames', 'hasMultipleOutputs', 'hasUses', 'inputs',\n 'kind', 'outputs', 'outputsSize', 'scopeName']\n# Some additional methods to explure for methods_IO are\n#\n# 'unique' (type int)\n# 'type' (type <Tensor<class 'torch._C.Type'>>)\n#\n# But the below are sufficient for now.\nmethods_IO = ['node', 'offset', 'debugName']\n\nGETATTR_KIND = 'prim::GetAttr'\nCLASSTYPE_KIND = 'ClassType'\n\nclass NodeBase(object):\n def __init__(self, debugName=None, inputs=None, scope=None, tensor_size=None, op_type='UnSpecified', attributes=''):\n # TODO; Specify a __slots__ for this class or potentially\n # used namedtuple instead\n self.debugName = debugName\n self.inputs = inputs\n self.tensor_size = tensor_size\n self.kind = op_type\n self.attributes = attributes\n self.scope = scope\n\n def __repr__(self):\n repr = []\n repr.append(str(type(self)))\n for m in dir(self):\n if '__' not in m:\n repr.append(m + ': ' + str(getattr(self, m)) + str(type(getattr(self, m))))\n return '\\n'.join(repr) + '\\n\\n'\n\n\nclass NodePy(NodeBase):\n def __init__(self, node_cpp, valid_methods):\n super(NodePy, self).__init__(node_cpp)\n valid_methods = valid_methods[:]\n self.inputs = []\n\n for m in valid_methods:\n if m == 'inputs' or m == 'outputs':\n list_of_node = list(getattr(node_cpp, m)())\n io_unique_names = []\n io_tensor_sizes = []\n for n in list_of_node:\n io_unique_names.append(n.debugName())\n if n.isCompleteTensor():\n io_tensor_sizes.append(n.type().sizes())\n else:\n io_tensor_sizes.append(None)\n\n setattr(self, m, io_unique_names)\n setattr(self, m + 'tensor_size', io_tensor_sizes)\n\n else:\n setattr(self, m, getattr(node_cpp, m)())\n\n\nclass NodePyIO(NodePy):\n def __init__(self, node_cpp, input_or_output=None):\n super(NodePyIO, self).__init__(node_cpp, methods_IO)\n try:\n tensor_size = node_cpp.type().sizes()\n except RuntimeError:\n tensor_size = [1, ] # fail when constant model is used.\n self.tensor_size = tensor_size\n # Kind attribute string is purely descriptive and will be shown\n # in detailed information for the node in TensorBoard's graph plugin.\n #\n # NodePyOP nodes get this from their kind() method.\n self.kind = 'Parameter'\n if input_or_output:\n self.input_or_output = input_or_output\n self.kind = 'IO Node'\n\n\nclass NodePyOP(NodePy):\n def __init__(self, node_cpp):\n super(NodePyOP, self).__init__(node_cpp, methods_OP)\n # Replace single quote which causes strange behavior in TensorBoard\n # TODO: See if we can remove this in the future\n self.attributes = str({k: node_cpp[k] for k in node_cpp.attributeNames()}).replace(\"'\", ' ')\n self.kind = node_cpp.kind()\n\n\nclass GraphPy(object):\n \"\"\"Helper class to convert torch.nn.Module to GraphDef proto and visualization\n with TensorBoard.\n\n GraphDef generation operates in two passes:\n\n In the first pass, all nodes are read and saved to two lists.\n One list is for input/output nodes (nodes_io), which only have inbound\n or outbound connections, but not both. Another list is for internal\n operator nodes (nodes_op). The first pass also saves all scope name\n appeared in the nodes in scope_name_appeared list for later processing.\n\n In the second pass, scope names are fully applied to all nodes.\n debugNameToScopedName is a mapping from a node's ID to its fully qualified\n scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have\n totally correct scope output, so this is nontrivial. The function\n populate_namespace_from_OP_to_IO and find_common_root are used to\n assign scope name to a node based on the connection between nodes\n in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name\n and scope_name_appeared.\n \"\"\"\n def __init__(self):\n self.nodes_op = []\n self.nodes_io = OrderedDict()\n self.unique_name_to_scoped_name = {}\n self.shallowest_scope_name = 'default'\n self.scope_name_appeared = []\n\n def append(self, x):\n if isinstance(x, NodePyIO):\n self.nodes_io[x.debugName] = x\n if isinstance(x, NodePyOP):\n self.nodes_op.append(x)\n\n def printall(self):\n print('all nodes')\n for node in self.nodes_op:\n print(node)\n for key in self.nodes_io:\n print(self.nodes_io[key])\n\n def find_common_root(self):\n for fullscope in self.scope_name_appeared:\n if fullscope:\n self.shallowest_scope_name = fullscope.split('/')[0]\n\n def populate_namespace_from_OP_to_IO(self):\n for node in self.nodes_op:\n for node_output, outputSize in zip(node.outputs, node.outputstensor_size):\n self.scope_name_appeared.append(node.scopeName)\n self.nodes_io[node_output] = NodeBase(node_output,\n node.inputs,\n node.scopeName,\n outputSize,\n op_type=node.kind,\n attributes=node.attributes)\n\n self.find_common_root()\n\n for node in self.nodes_op:\n for input_node_id in node.inputs:\n self.unique_name_to_scoped_name[input_node_id] = node.scopeName + '/' + input_node_id\n\n for key, node in self.nodes_io.items():\n if type(node) == NodeBase:\n self.unique_name_to_scoped_name[key] = node.scope + '/' + node.debugName\n if hasattr(node, 'input_or_output'):\n self.unique_name_to_scoped_name[key] = node.input_or_output + '/' + node.debugName\n\n if hasattr(node, 'scope') and node.scope is not None:\n self.unique_name_to_scoped_name[key] = node.scope + '/' + node.debugName\n if node.scope == '' and self.shallowest_scope_name:\n self.unique_name_to_scoped_name[node.debugName] = self.shallowest_scope_name + '/' + node.debugName\n\n # replace name\n for key, node in self.nodes_io.items():\n self.nodes_io[key].inputs = [self.unique_name_to_scoped_name[node_input_id] for node_input_id in node.inputs]\n if node.debugName in self.unique_name_to_scoped_name:\n self.nodes_io[key].debugName = self.unique_name_to_scoped_name[node.debugName]\n\n def to_proto(self):\n \"\"\"\n Converts graph representation of GraphPy object to TensorBoard\n required format.\n \"\"\"\n # TODO: compute correct memory usage and CPU time once\n # PyTorch supports it\n nodes = []\n for v in self.nodes_io.values():\n nodes.append(node_proto(v.debugName,\n input=v.inputs,\n outputsize=v.tensor_size,\n op=v.kind,\n attributes=v.attributes))\n return nodes\n\n\ndef parse(graph, trace, args=None, omit_useless_nodes=True):\n \"\"\"This method parses an optimized PyTorch model graph and produces\n a list of nodes and node stats for eventual conversion to TensorBoard\n protobuf format.\n\n Args:\n graph (PyTorch module): The model graph to be parsed.\n trace (PyTorch JIT TracedModule): The model trace to be parsed.\n args (tuple): input tensor[s] for the model.\n omit_useless_nodes (boolean): Whether to remove nodes from the graph.\n \"\"\"\n n_inputs = len(args)\n\n scope = {}\n nodes_py = GraphPy()\n for node in graph.inputs():\n if omit_useless_nodes:\n if len(node.uses()) == 0: # number of user of the node (= number of outputs/ fanout)\n continue\n\n if node.type().kind() != CLASSTYPE_KIND:\n nodes_py.append(NodePyIO(node, 'input'))\n\n attr_to_scope: Dict[Any, str] = dict()\n for node in graph.nodes():\n if node.kind() == GETATTR_KIND:\n attr_name = node.s('name')\n parent = node.input().node()\n if parent.kind() == GETATTR_KIND: # If the parent node is not the top-level \"self\" node\n parent_attr_name = parent.s('name')\n parent_scope = attr_to_scope[parent_attr_name]\n attr_scope = parent_scope.split('/')[-1]\n attr_to_scope[attr_name] = '{}/{}.{}'.format(parent_scope, attr_scope, attr_name)\n else:\n attr_to_scope[attr_name] = '__module.{}'.format(attr_name)\n # We don't need classtype nodes; scope will provide this information\n if node.output().type().kind() != CLASSTYPE_KIND:\n node_py = NodePyOP(node)\n node_py.scopeName = attr_to_scope[attr_name] # type: ignore\n nodes_py.append(node_py)\n else:\n nodes_py.append(NodePyOP(node))\n\n for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops\n node_pyio = NodePyIO(node, 'output')\n node_pyio.debugName = \"output.{}\".format(i + 1)\n node_pyio.inputs = [node.debugName()]\n nodes_py.append(node_pyio)\n\n def parse_traced_name(module):\n if isinstance(module, torch.jit.TracedModule):\n module_name = module._name\n else:\n module_name = getattr(module, 'original_name', \"Module\")\n return module_name\n\n alias_to_name = dict()\n base_name = parse_traced_name(trace)\n for name, module in trace.named_modules(prefix='__module'):\n mod_name = parse_traced_name(module)\n attr_name = name.split('.')[-1]\n alias_to_name[name] = '{}[{}]'.format(mod_name, attr_name)\n\n for node in nodes_py.nodes_op:\n module_aliases = node.scopeName.split('/')\n replacements = [\n alias_to_name[alias]\n if alias in alias_to_name\n else alias.split('.')[-1]\n for alias in module_aliases\n ]\n node.scopeName = base_name\n if any(replacements):\n node.scopeName += '/' + '/'.join(replacements)\n\n nodes_py.populate_namespace_from_OP_to_IO()\n return nodes_py.to_proto()\n\n\ndef graph(model, args, verbose=False):\n \"\"\"\n This method processes a PyTorch model and produces a `GraphDef` proto\n that can be logged to TensorBoard.\n\n Args:\n model (PyTorch module): The model to be parsed.\n args (tuple): input tensor[s] for the model.\n verbose (bool): Whether to print out verbose information while\n processing.\n \"\"\"\n with torch.onnx.select_model_mode_for_export(model, torch.onnx.TrainingMode.EVAL): # TODO: move outside of torch.onnx?\n try:\n trace = torch.jit.trace(model, args)\n graph = trace.graph\n torch._C._jit_pass_inline(graph)\n except RuntimeError as e:\n print(e)\n print('Error occurs, No graph saved')\n raise e\n\n if verbose:\n print(graph)\n list_of_nodes = parse(graph, trace, args)\n # We are hardcoding that this was run on CPU even though it might have actually\n # run on GPU. Note this is what is shown in TensorBoard and has no bearing\n # on actual execution.\n # TODO: See if we can extract GPU vs CPU information from the PyTorch model\n # and pass it correctly to TensorBoard.\n #\n # Definition of StepStats and DeviceStepStats can be found at\n # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/test/graph-test.ts\n # and\n # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto\n stepstats = RunMetadata(step_stats=StepStats(dev_stats=[DeviceStepStats(device=\"/device:CPU:0\")]))\n return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats\n # The producer version has been reverse engineered from standard\n # TensorBoard logged data.\n", "import benchmark_caffe2 as op_bench_c2\nimport operator_benchmark as op_bench\nfrom benchmark_caffe2 import Caffe2BenchmarkBase # noqa\nfrom caffe2.python import core\nimport numpy\n\n\n\"\"\"Microbenchmarks for element-wise BatchGather operator.\"\"\"\n\n# Configs for C2 BatherGather operator\nbatch_gather_configs_short = op_bench.config_list(\n attr_names=[\"M\", \"N\", \"K\"],\n attrs=[\n [8, 8, 1],\n [256, 512, 1],\n [512, 512, 1],\n [8, 8, 2],\n [256, 512, 2],\n [512, 512, 2],\n ],\n cross_product_configs={\n 'device': ['cpu', 'cuda'],\n },\n tags=[\"short\"]\n)\n\nbatch_gather_configs_long = op_bench.cross_product_configs(\n M=[128, 1024],\n N=[128, 1024],\n K=[1, 2],\n device=['cpu', 'cuda'],\n tags=[\"long\"]\n)\n\nclass BatchGatherBenchmark(op_bench_c2.Caffe2BenchmarkBase):\n def init(self, M, N, K, device):\n self.input_one = self.tensor([M, N, K], device=device)\n max_val = N\n numpy.random.seed((1 << 32) - 1)\n index_dim = numpy.random.randint(0, N)\n self.index = self.feed_tensor(numpy.random.randint(0, max_val, index_dim), device=device)\n self.output = self.tensor([M, index_dim, K], device=device)\n self.set_module_name(\"batch_gather\")\n\n def forward(self):\n op = core.CreateOperator(\"BatchGather\", [self.input_one, self.index], self.output)\n return op\n\n\nop_bench_c2.generate_c2_test(\n batch_gather_configs_long + batch_gather_configs_short, BatchGatherBenchmark\n)\n\n\nif __name__ == \"__main__\":\n op_bench.benchmark_runner.main()\n" ]
[ [ "torch.tensor" ], [ "torch.no_grad" ], [ "torch.onnx.select_model_mode_for_export", "torch.jit.trace", "torch._C._jit_pass_inline" ], [ "numpy.random.seed", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hirano1412/bdpy
[ "cee6f36dcdf4f4d29fc3a6980777e1c3d7c66cbb", "cee6f36dcdf4f4d29fc3a6980777e1c3d7c66cbb", "cee6f36dcdf4f4d29fc3a6980777e1c3d7c66cbb" ]
[ "test/test_preproc.py", "test/test_stats.py", "bdpy/dl/caffe.py" ]
[ "'''Tests for bdpy.preprocessor'''\n\n\nfrom unittest import TestCase, TestLoader, TextTestRunner\n\nimport numpy as np\nfrom scipy.signal import detrend\n\nfrom bdpy import preproc\n\n\nclass TestPreprocessor(TestCase):\n '''Tests of 'preprocessor' module'''\n\n @classmethod\n def test_average_sample(cls):\n '''Test for average_sample'''\n\n x = np.random.rand(10, 100)\n group = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2])\n\n exp_output_x = np.vstack((np.average(x[0:5, :], axis=0),\n np.average(x[5:10, :], axis=0)))\n exp_output_ind = np.array([0, 5])\n\n test_output_x, test_output_ind = preproc.average_sample(x, group,\n verbose=True)\n\n np.testing.assert_array_equal(test_output_x, exp_output_x)\n np.testing.assert_array_equal(test_output_ind, exp_output_ind)\n\n @classmethod\n def test_detrend_sample_default(cls):\n '''Test for detrend_sample (default)'''\n\n x = np.random.rand(20, 10)\n group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])\n\n exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear')\n + np.mean(x[0:10, :], axis=0),\n detrend(x[10:20, :], axis=0, type='linear')\n + np.mean(x[10:20, :], axis=0)))\n\n test_output = preproc.detrend_sample(x, group, verbose=True)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n @classmethod\n def test_detrend_sample_nokeepmean(cls):\n '''Test for detrend_sample (keep_mean=False)'''\n\n x = np.random.rand(20, 10)\n group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])\n\n exp_output = np.vstack((detrend(x[0:10, :], axis=0, type='linear'),\n detrend(x[10:20, :], axis=0, type='linear')))\n\n test_output = preproc.detrend_sample(x, group, keep_mean=False,\n verbose=True)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n @classmethod\n def test_normalize_sample(cls):\n '''Test for normalize_sample (default)'''\n\n x = np.random.rand(20, 10)\n group = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])\n\n mean_a = np.mean(x[0:10, :], axis=0)\n mean_b = np.mean(x[10:20, :], axis=0)\n\n exp_output = np.vstack((100 * (x[0:10, :] - mean_a) / mean_a,\n 100 * (x[10:20, :] - mean_b) / mean_b))\n\n test_output = preproc.normalize_sample(x, group, verbose=True)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n @classmethod\n def test_shift_sample_singlegroup(cls):\n '''Test for shift_sample (single group, shift_size=1)'''\n\n x = np.array([[1, 2, 3],\n [11, 12, 13],\n [21, 22, 23],\n [31, 32, 33],\n [41, 42, 43]])\n grp = np.array([1, 1, 1, 1, 1])\n\n exp_output_data = np.array([[11, 12, 13],\n [21, 22, 23],\n [31, 32, 33],\n [41, 42, 43]])\n exp_output_ind = [0, 1, 2, 3]\n\n # Default shift_size = 1\n test_output_data, test_output_ind = preproc.shift_sample(x, grp,\n verbose=True)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_ind, exp_output_ind)\n\n @classmethod\n def test_shift_sample_twogroup(cls):\n '''Test for shift_sample (two groups, shift_size=1)'''\n\n x = np.array([[1, 2, 3],\n [11, 12, 13],\n [21, 22, 23],\n [31, 32, 33],\n [41, 42, 43],\n [51, 52, 53]])\n grp = np.array([1, 1, 1, 2, 2, 2])\n\n exp_output_data = np.array([[11, 12, 13],\n [21, 22, 23],\n [41, 42, 43],\n [51, 52, 53]])\n exp_output_ind = [0, 1, 3, 4]\n\n # Default shift_size=1\n test_output_data, test_output_ind = preproc.shift_sample(x, grp,\n verbose=True)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_ind, exp_output_ind)\n\n @classmethod\n def test_select_top_default(cls):\n '''Test for select_top (default, axis=0)'''\n\n test_data = np.array([[1, 2, 3, 4, 5],\n [11, 12, 13, 14, 15],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35],\n [41, 42, 43, 44, 45]])\n test_value = np.array([15, 3, 6, 20, 0])\n test_num = 3\n\n exp_output_data = np.array([[1, 2, 3, 4, 5],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35]])\n exp_output_index = np.array([0, 2, 3])\n\n test_output_data, test_output_index = preproc.select_top(test_data,\n test_value,\n test_num)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_index, exp_output_index)\n\n @classmethod\n def test_select_top_axisone(cls):\n '''Test for select_top (axis=1)'''\n\n test_data = np.array([[1, 2, 3, 4, 5],\n [11, 12, 13, 14, 15],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35],\n [41, 42, 43, 44, 45]])\n test_value = np.array([15, 3, 6, 20, 0])\n test_num = 3\n\n exp_output_data = np.array([[1, 3, 4],\n [11, 13, 14],\n [21, 23, 24],\n [31, 33, 34],\n [41, 43, 44]])\n exp_output_index = np.array([0, 2, 3])\n\n test_output_data, test_output_index = preproc.select_top(test_data,\n test_value,\n test_num,\n axis=1)\n\n np.testing.assert_array_equal(test_output_data, exp_output_data)\n np.testing.assert_array_equal(test_output_index, exp_output_index)\n\n\nif __name__ == '__main__':\n test_suite = TestLoader().loadTestsFromTestCase(TestPreprocessor)\n TextTestRunner(verbosity=2).run(test_suite)\n", "'''Tests for bdpy.stats'''\n\n\nfrom unittest import TestCase, TestLoader, TextTestRunner\n\nimport numpy as np\n\nimport bdpy.stats as bdst\n\n\nclass TestStats(TestCase):\n '''Tests for bdpy.stats'''\n\n def test_corrcoef_matrix_matrix_default(self):\n '''Test for corrcoef (matrix and matrix, default, var=row)'''\n\n x = np.random.rand(100, 10)\n y = np.random.rand(100, 10)\n\n exp_output = np.diag(np.corrcoef(x, y)[:x.shape[0], x.shape[0]:])\n\n test_output = bdst.corrcoef(x, y)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n def test_corrcoef_matrix_matrix_varcol(self):\n '''Test for corrcoef (matrix and matrix, var=col)'''\n\n x = np.random.rand(100, 10)\n y = np.random.rand(100, 10)\n\n exp_output = np.diag(np.corrcoef(x, y, rowvar=0)[:x.shape[1],\n x.shape[1]:])\n\n test_output = bdst.corrcoef(x, y, var='col')\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n def test_corrcoef_vector_vector(self):\n '''Test for corrcoef (vector and vector)'''\n\n x = np.random.rand(100)\n y = np.random.rand(100)\n\n exp_output = np.corrcoef(x, y)[0, 1]\n\n test_output = bdst.corrcoef(x, y)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n def test_corrcoef_hvector_hvector(self):\n '''Test for corrcoef (horizontal vector and horizontal vector)'''\n\n x = np.random.rand(1, 100)\n y = np.random.rand(1, 100)\n\n exp_output = np.corrcoef(x, y)[0, 1]\n\n test_output = bdst.corrcoef(x, y)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n def test_corrcoef_vvector_vvector(self):\n '''Test for corrcoef (vertical vector and vertical vector)'''\n\n x = np.random.rand(100, 1)\n y = np.random.rand(100, 1)\n\n exp_output = np.corrcoef(x.T, y.T)[0, 1]\n\n test_output = bdst.corrcoef(x, y)\n\n np.testing.assert_array_equal(test_output, exp_output)\n\n def test_corrcoef_matrix_vector_varrow(self):\n '''Test for corrcoef (matrix and vector, var=row)'''\n\n x = np.random.rand(100, 10)\n y = np.random.rand(10)\n\n exp_output = np.corrcoef(y, x)[0, 1:]\n\n test_output = bdst.corrcoef(x, y)\n\n np.testing.assert_array_almost_equal(test_output, exp_output)\n\n def test_corrcoef_matrix_vector_varcol(self):\n '''Test for corrcoef (matrix and vector, var=col)'''\n\n x = np.random.rand(100, 10)\n y = np.random.rand(100)\n\n exp_output = np.corrcoef(y, x, rowvar=0)[0, 1:]\n\n test_output = bdst.corrcoef(x, y, var='col')\n\n np.testing.assert_array_almost_equal(test_output, exp_output)\n\n def test_corrcoef_vector_matrix_varrow(self):\n '''Test for corrcoef (vector and matrix, var=row)'''\n\n x = np.random.rand(10)\n y = np.random.rand(100, 10)\n\n exp_output = np.corrcoef(x, y)[0, 1:]\n\n test_output = bdst.corrcoef(x, y)\n\n np.testing.assert_array_almost_equal(test_output, exp_output)\n\n def test_corrcoef_vector_matrix_varcol(self):\n '''Test for corrcoef (vector and matrix, var=col)'''\n\n x = np.random.rand(100)\n y = np.random.rand(100, 10)\n\n exp_output = np.corrcoef(x, y, rowvar=0)[0, 1:]\n\n test_output = bdst.corrcoef(x, y, var='col')\n\n np.testing.assert_array_almost_equal(test_output, exp_output)\n\n def test_corrmat_default(self):\n '''Test for corrmat (default, var=row)'''\n\n x = np.random.rand(100, 10)\n y = np.random.rand(100, 10)\n\n exp_output = np.corrcoef(x, y)[:x.shape[0], x.shape[0]:]\n\n test_output = bdst.corrmat(x, y)\n\n np.testing.assert_array_almost_equal(test_output, exp_output)\n\n def test_corrmat_varcol(self):\n '''Test for corrmat (var=col)'''\n\n x = np.random.rand(100, 10)\n y = np.random.rand(100, 10)\n\n exp_output = np.corrcoef(x, y, rowvar=0)[:x.shape[1], x.shape[1]:]\n\n test_output = bdst.corrmat(x, y, var='col')\n\n np.testing.assert_array_almost_equal(test_output, exp_output)\n\n\nif __name__ == '__main__':\n suite = TestLoader().loadTestsFromTestCase(TestStats)\n TextTestRunner(verbosity=2).run(suite)\n", "'''Caffe module.'''\n\n\nimport os\n\nimport PIL\nimport caffe\nimport numpy as np\nfrom bdpy.dataform import save_array\nfrom tqdm import tqdm\n\n\ndef extract_image_features(image_file, net, layers=[], crop_center=False, image_preproc=[], save_dir=None, verbose=False, progbar=False, return_features=True):\n '''\n Extract DNN features of a given image.\n\n Parameters\n ----------\n image_file : str or list\n (List of) path to the input image file(s).\n net : Caffe network instance\n layers : list\n List of DNN layers of which features are returned.\n crop_center : bool (default: False)\n Crop the center of an image or not.\n image_preproc : list (default: [])\n List of additional preprocessing functions. The function input/output\n should be a PIL.Image instance. The preprocessing functions are applied\n after RGB conversion, center-cropping, and resizing of the input image.\n save_dir : None or str (default: None)\n Save the features in the specified directory if not None.\n verbose : bool (default: False)\n Output verbose messages or not.\n return_features: bool (default: True)\n Return the extracted features or not.\n\n Returns\n -------\n dict\n Dictionary in which keys are DNN layers and values are features.\n '''\n\n if isinstance(image_file, str):\n image_file = [image_file]\n\n features_dict = {}\n\n if progbar:\n image_file = tqdm(image_file)\n\n for imgf in image_file:\n if verbose:\n print('Image: %s' % imgf)\n\n image_size = net.blobs['data'].data.shape[-2:]\n mean_img = net.transformer.mean['data']\n\n # Open the image\n img = PIL.Image.open(imgf)\n\n # Convert non-RGB to RGB\n if img.mode == 'CMYK':\n img = img.convert('RGB')\n\n if img.mode == 'RGBA':\n bg = PIL.Image.new('RGB', img.size, (255, 255, 255))\n bg.paste(img, mask=img.split()[3])\n img = bg\n\n # Convert monochrome to RGB\n if img.mode == 'L':\n img = img.convert('RGB')\n\n # Center cropping\n if crop_center:\n w, h = img.size\n img = img.crop(((w - min(img.size)) // 2,\n (h - min(img.size)) // 2,\n (w + min(img.size)) // 2,\n (h + min(img.size)) // 2))\n\n # Resize\n img = img.resize(image_size, PIL.Image.BICUBIC)\n\n for p in image_preproc:\n img = p(img)\n\n img_array = np.array(img)\n\n try:\n img_array = np.float32(np.transpose(img_array, (2, 0, 1))[::-1]) - np.reshape(mean_img, (3, 1, 1))\n except:\n import pdb; pdb.set_trace()\n\n # Forwarding\n net.blobs['data'].reshape(1, 3, img_array.shape[1], img_array.shape[2])\n net.blobs['data'].data[0] = img_array\n net.forward()\n\n # Get features\n for lay in layers:\n feat = net.blobs[lay].data.copy()\n\n if return_features:\n if lay in features_dict:\n features_dict.update({\n lay: np.vstack([features_dict[lay], feat])\n })\n else:\n features_dict.update({lay: feat})\n\n if not save_dir is None:\n # Save the features\n save_dir_lay = os.path.join(save_dir, lay.replace('/', ':'))\n save_file = os.path.join(save_dir_lay,\n os.path.splitext(os.path.basename(imgf))[0] + '.mat')\n if not os.path.exists(save_dir_lay):\n os.makedirs(save_dir_lay)\n if os.path.exists(save_file):\n if verbose:\n print('%s already exists. Skipped.' % save_file)\n continue\n save_array(save_file, feat, key='feat', dtype=np.float32, sparse=False)\n if verbose:\n print('%s saved.' % save_file)\n\n if return_features:\n return features_dict\n else:\n return None\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.mean", "numpy.random.rand", "numpy.average", "numpy.array", "numpy.vstack", "scipy.signal.detrend" ], [ "numpy.testing.assert_array_equal", "numpy.corrcoef", "numpy.random.rand", "numpy.testing.assert_array_almost_equal" ], [ "numpy.reshape", "numpy.array", "numpy.vstack", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ogarokpeter/gene_network_sirius_2019
[ "419cc430dbde4332acf5cd6eb5cfa669270c53af" ]
[ "RankAggregation/SimpleRankAggregation.py" ]
[ "# RUN WITH /usr/bin/python3 minet.py (python 3.6)\n\nimport sys\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc\nimport pandas as pd\n\n\ndef compute_aggregated_matrix(matrixfiles_num, matrixfiles, savematrixfile, saveresultfile, coeffs=[1, 1, 1, 1]):\n # matrixfiles_num = int(sys.argv[1])\n # matrixfiles = [sys.argv[i] for i in range(2, matrixfiles_num + 2)]\n # savematrixfile = sys.argv[matrixfiles_num + 2]\n # saveresultfile = sys.argv[matrixfiles_num + 3]\n matrices = [pd.read_csv(f, index_col=0, sep='\\t') for f in matrixfiles]\n genes = matrices[0].index\n # print(genes)\n\n # print(matrices)\n sz = len(matrices[0])\n for matrix in matrices:\n assert len(matrix) == sz\n\n for matrix in matrices:\n for column in matrix:\n temp = matrix[column].argsort()\n ranks = np.empty_like(temp)\n ranks[temp] = np.arange(len(matrix[column]))\n matrix[column] = ranks\n \n res = np.zeros(shape=(sz, sz))\n for s in range(sz):\n for i, matrix in enumerate(matrices):\n res[s] += matrix.iloc[:, s].values * coeffs[i]\n res[s] /= len(matrices)\n\n for row in res:\n row /= row.sum()\n\n result_df = pd.DataFrame(res, columns=genes, index=genes)\n \n result_df.to_csv(saveresultfile, index=True, header=True, sep='\\t')\n # print(result_df)\n return result_df\n\n\nmatricesdirname = \"/home/user/Sirius/gene_network_sirius_2019/Matrices_1\"\nsavematricesdirname = \"/home/user/Sirius/gene_network_sirius_2019/Matrices_6\"\npredictedfilename = matricesdirname + \"/{1}_{0}_predicted.txt\"\ntruefilename = matricesdirname + \"/{1}_{0}_true.txt\"\nsavematricesfilename = savematricesdirname + \"/{0}_predicted.txt\"\n# datalist = ['exps_10', 'exps_10_2', 'exps_10_bgr', 'exps_50', 'exps_50_2', 'exps_50_bgr', 'exps_100', 'exps_100_2', 'exps_100_bgr', 'genes_200_exps_10_bgr', 'genes_400_exps_10_bgr', 'genes_600_exps_10_bgr', 'genes_700_exps_10_bgr', 'genes_1000_exps_10_bgr']\ndatalist = ['genes_200_exps_10_bgr', 'genes_200_exps_20_bgr', 'genes_200_exps_40_bgr', 'genes_400_exps_10_bgr', 'genes_400_exps_40_bgr', 'genes_400_exps_80_bgr', 'genes_500_exps_10_bgr', 'genes_500_exps_50_bgr', 'genes_500_exps_100_bgr']\nalgolist = ['aracne', 'mrnet', 'mrnetb']\nsaveresultsfile = \"/home/user/Sirius/gene_network_sirius_2019/RankAggregation/res_arrgeg_on_petr_big_data_many_exps.txt\"\ntmpfile = \"/home/user/Sirius/gene_network_sirius_2019/RankAggregation/data/tmp5.txt\"\n\n\nif __name__ == \"__main__\":\n results = np.zeros(shape=(len(datalist)))\n\n for i, dataname in enumerate(datalist):\n\n true_df = pd.read_csv(truefilename.format(dataname, algolist[1]), index_col=0, sep='\\t')\n predicted_df = compute_aggregated_matrix(len(algolist), [predictedfilename.format(dataname, algo) for algo in algolist], tmpfile, savematricesfilename.format(dataname))\n true_df.to_csv(savematricesdirname + \"/{0}_true.txt\".format(dataname), index=True, header=True, sep='\\t')\n # print(true_df)\n\n true_array = true_df.values[np.triu_indices(true_df.values.shape[0], k=1)]\n predicted_array = predicted_df.values[np.triu_indices(predicted_df.values.shape[0], k=1)]\n \n roc_auc = 0\n # try:\n # fpr, tpr, thresholds = roc_curve(true_array, predicted_array)\n # roc_auc = auc(fpr, tpr)\n # except:\n # print(\"error\", dataname, algo)\n fpr, tpr, thresholds = roc_curve(true_array, predicted_array)\n roc_auc = auc(fpr, tpr)\n results[i] = roc_auc\n\n with open(savematricesdirname + \"/{0}_auc.txt\".format(dataname), 'w') as f:\n f.write(str(roc_auc) + '\\n')\n print(\"done\", dataname, results[i])\n with open(saveresultsfile, \"a\") as f:\n f.write(\"done \" + dataname + str(results[i]))\n \n # print(\"done\", dataname, algo)\n\n print(results)\n\n" ]
[ [ "pandas.read_csv", "numpy.triu_indices", "numpy.empty_like", "sklearn.metrics.roc_curve", "pandas.DataFrame", "sklearn.metrics.auc", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Yoshi-0921/MAEXP
[ "cc03fdd46db9b1838df8f7782b4bd1b2bb3f11d5" ]
[ "core/agents/models/customs/da3.py" ]
[ "\"\"\"Source code for distributed attentional actor architecture (DA3) model.\n\nAuthor: Yoshinari Motokawa <[email protected]>\n\"\"\"\nfrom typing import List\n\nimport torch\nfrom core.utils.logging import initialize_logging\nfrom omegaconf import DictConfig\nfrom torch import nn\n\nfrom ..hard_shrink_attention import HardShrinkBlock\nfrom ..vit import Block, PatchEmbed\n\nlogger = initialize_logging(__name__)\n\n\nclass DA3(nn.Module):\n def __init__(self, config: DictConfig, input_shape: List[int], output_size: int):\n super().__init__()\n patched_size_x = input_shape[1] // config.model.patch_size\n patched_size_y = input_shape[2] // config.model.patch_size\n self.view_method = config.observation_area_mask\n\n self.patch_embed = PatchEmbed(\n patch_size=config.model.patch_size,\n in_chans=input_shape[0],\n embed_dim=config.model.embed_dim,\n )\n\n self.saliency_vector = nn.Parameter(torch.zeros(1, 1, config.model.embed_dim))\n self.pos_embed = nn.Parameter(\n torch.zeros(1, patched_size_x * patched_size_y + 1, config.model.embed_dim)\n )\n\n block = HardShrinkBlock if config.model.attention == \"hard\" else Block\n self.blocks = nn.ModuleList(\n [\n block(\n dim=config.model.embed_dim,\n num_heads=config.model.num_heads,\n mlp_ratio=config.model.mlp_ratio,\n **{\"af_lambd\": config.model.af_lambd}\n )\n for _ in range(config.model.block_loop)\n ]\n )\n\n self.norm = nn.LayerNorm(config.model.embed_dim)\n self.head = nn.Linear(config.model.embed_dim, output_size)\n\n def forward(self, state):\n x = self.state_encoder(state)\n\n out = self.patch_embed(x)\n saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)\n out = torch.cat((saliency_vector, out), dim=1)\n out = out + self.pos_embed\n\n for blk in self.blocks:\n out = blk(out)\n\n out = self.norm(out)\n out = out[:, 0]\n\n out = self.head(out)\n\n return out\n\n def forward_attn(self, state):\n x = self.state_encoder(state)\n\n out = self.patch_embed(x)\n saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)\n out = torch.cat((saliency_vector, out), dim=1)\n out = out + self.pos_embed\n\n attns = list()\n for blk in self.blocks:\n out, attn = blk.forward_attn(out)\n attns.append(attn.detach())\n\n out = self.norm(out)\n out = out[:, 0]\n\n out = self.head(out)\n\n return out, [attns]\n\n def state_encoder(self, state):\n\n return state[self.view_method]\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.cat", "torch.nn.LayerNorm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joeferg425/ws281x_lightberries
[ "c6a5a3ffeeb3642b34e3e6e3b759af9e4725efce" ]
[ "LightBerries/LightStrings.py" ]
[ "\"\"\"Defines basic light string data and functions.\"\"\"\nimport os\nimport sys\nimport atexit\nimport inspect\nimport time\nimport logging\nfrom typing import Any, Optional, Sequence, Union, overload\nfrom nptyping import NDArray\nimport numpy as np\nfrom LightBerries.LightBerryExceptions import LightStringException\nfrom LightBerries.RpiWS281xPatch import rpi_ws281x\nfrom LightBerries.LightPixels import Pixel, PixelColors\n\nLOGGER = logging.getLogger(\"LightBerries\")\n\n\nclass LightString(Sequence[np.int_]):\n \"\"\"Defines basic LED array data and functions.\"\"\"\n\n def __init__(\n self,\n ledCount: Optional[int] = None,\n pixelStrip: rpi_ws281x.PixelStrip = None,\n simulate: bool = False,\n ) -> None:\n \"\"\"Creates a pixel array using the rpipixelStrip library and Pixels.\n\n Args:\n ledCount: the number of LEDs desired in the LightString\n pixelStrip: the ws281x object that actually controls the LED signaling\n simulate: dont use GPIO\n\n Raises:\n Warning: if something unexpected could happen\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n # cant run GPIO stuff without root, tell the user if they forgot\n # linux check is just for debugging with fake GPIO on windows\n if sys.platform == \"linux\" and not os.getuid() == 0: # pylint: disable = no-member\n raise LightStringException(\n \"GPIO functionality requires root privilege. Please run command again as root\"\n )\n\n # catch error cases first\n if ledCount is None and pixelStrip is None and simulate is False:\n raise LightStringException(\n \"Cannot create LightString object without ledCount or \" + \"pixelStrip object being specified\"\n )\n # catch error cases first\n # if ledCount is not None and pixelStrip is not None:\n # raise Warning(\n # \"ledCount is overridden when pixelStrip is and ledcount \"\n # + \"are both passed to LightString constructor\"\n # )\n\n try:\n self.simulate = simulate\n # use passed led count if it is valid\n if ledCount is not None:\n self._ledCount = ledCount\n\n # used passed pixel strip if it is not none\n if pixelStrip is not None:\n self.pixelStrip = pixelStrip\n self.pixelStrip.begin()\n self._ledCount = self.pixelStrip.numPixels()\n LOGGER.debug(\n \"%s.%s Created WS281X object\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n )\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"%s.%s Exception: %s\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n ex,\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n try:\n # validate led count\n if not isinstance(self._ledCount, int):\n raise LightStringException(\n f'Cannot create LightString object with LED count \"{self._ledCount}\"',\n )\n # if led count is good, create our pixel sequence\n self.rgbArray: NDArray[(3, Any), np.int32] = np.zeros((self._ledCount, 3))\n self.rgbArray[:] = np.array([Pixel().array for i in range(self._ledCount)])\n LOGGER.debug(\n \"%s.%s Created Numpy Light array\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n )\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"%s.%s Exception: %s\",\n self.__class__.__name__,\n inspect.stack()[0][3],\n ex,\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n # try to force cleanup of underlying c objects when user exits\n atexit.register(self.__del__)\n\n def __del__(\n self,\n ) -> None:\n \"\"\"Properly disposes of the rpipixelStrip object.\n\n Prevents memory leaks (hopefully) that were happening in the rpi.PixelStrip module.\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n # check if pixel strip has been created\n if isinstance(self.pixelStrip, rpi_ws281x.PixelStrip):\n # turn off leds\n self.off()\n # cleanup c memory usage\n try:\n self.pixelStrip._cleanup()\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\"Failed to clean up WS281X object: %s\", str(ex))\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n def __len__(\n self,\n ) -> int:\n \"\"\"Return length of the light string (the number of LEDs).\n\n Returns:\n the number of LEDs in the array\n \"\"\"\n if self.rgbArray is not None:\n return len(self.rgbArray)\n else:\n return 0\n\n @overload\n def __getitem__( # noqa D105\n self,\n idx: int,\n ) -> NDArray[(3,), np.int32]:\n ... # pylint: disable=pointless-statement\n\n @overload\n def __getitem__( # noqa D105 # pylint: disable=function-redefined\n self,\n s: slice,\n ) -> NDArray[(3, Any), np.int32]:\n ... # pylint: disable=pointless-statement\n\n def __getitem__( # pylint: disable=function-redefined\n self, key: Union[int, slice]\n ) -> Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]]:\n \"\"\"Return a LED index or slice from LED array.\n\n Args:\n key: an index of a single LED, or a slice specifying a range of LEDs\n\n Returns:\n the LED value or values as requested\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n try:\n if isinstance(self.rgbArray, np.ndarray):\n return self.rgbArray[key].array\n else:\n raise LightStringException(\"Cannot index into uninitialized LightString object\")\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception('Failed to get key \"%s\" from %s: %s', key, self.rgbArray, ex)\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n def __setitem__(\n self,\n key: Union[int, slice],\n value: Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]],\n ) -> None:\n \"\"\"Set LED value(s) in the array.\n\n Args:\n key: the index or slice specifying one or more LED indices\n value: the RGB value or values to assign to the given LED indices\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n try:\n if isinstance(self.rgbArray, np.ndarray):\n if isinstance(key, slice):\n if isinstance(value, np.ndarray):\n self.rgbArray.__setitem__(key, value)\n elif isinstance(value, Sequence):\n self.rgbArray.__setitem__(key, [Pixel(v).array for v in value])\n else:\n raise LightStringException(\n \"Cannot assign multiple indices of LightString using a single value\"\n )\n else:\n if isinstance(value, np.ndarray):\n self.rgbArray.__setitem__(key, value)\n elif isinstance(value, Pixel):\n self.rgbArray.__setitem__(key, Pixel(value).array)\n else:\n raise LightStringException(\n \"Cannot assign single index of LightString using multiple values\"\n )\n else:\n raise LightStringException(\"Cannot index into uninitialized LightString object\")\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\"Failed to set light %s to value %s: %s\", key, value, ex)\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n def __enter__(\n self,\n ) -> \"LightString\":\n \"\"\"Get an instance of this object object.\n\n Returns:\n an instance of LightString\n \"\"\"\n return self\n\n def __exit__(\n self,\n *args,\n ) -> None:\n \"\"\"Cleanup the instance of this object.\n\n Args:\n args: ignored\n \"\"\"\n self.__del__()\n\n def off(\n self,\n ) -> None:\n \"\"\"Turn all of the LEDs in the LightString off.\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n for index in range(len(self.rgbArray)):\n try:\n self[index] = PixelColors.OFF.array\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"Failed to set pixel %s in WS281X to value %s: %s\",\n index,\n LightString(0),\n ex,\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n self.refresh()\n\n def refresh(\n self,\n ) -> None:\n \"\"\"Update the ws281x signal using the numpy array.\n\n Raises:\n SystemExit: if exiting\n KeyboardInterrupt: if user quits\n LightStringException: if something bad happens\n \"\"\"\n try:\n # define callback for map method (fast iterator)\n if self.simulate is False:\n\n def SetPixel(irgb):\n try:\n i = irgb[0]\n rgb = irgb[1]\n value = (int(rgb[0]) << 16) + (int(rgb[1]) << 8) + int(rgb[2])\n self.pixelStrip.setPixelColor(i, value)\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception(\n \"Failed to set pixel %d in WS281X to value %d: %s\",\n i,\n value,\n str(ex),\n )\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n # copy this class's array into the ws281x array\n if self.simulate is False:\n list(\n map(\n SetPixel,\n enumerate(self.rgbArray),\n )\n )\n # send the signal out\n self.pixelStrip.show()\n except SystemExit: # pylint:disable=try-except-raise\n raise\n except KeyboardInterrupt: # pylint:disable=try-except-raise\n raise\n except Exception as ex:\n LOGGER.exception('Function call \"show\" in WS281X object failed: %s', str(ex))\n raise LightStringException(str(ex)).with_traceback(ex.__traceback__)\n\n\nif __name__ == \"__main__\":\n LOGGER.info(\"Running LightString\")\n # the number of pixels in the light string\n PIXEL_COUNT = 100\n # GPIO pin to use for PWM signal\n GPIO_PWM_PIN = 18\n # DMA channel\n DMA_CHANNEL = 5\n # frequency to run the PWM signal at\n PWM_FREQUENCY = 800000\n GAMMA = None\n LED_STRIP_TYPE = None\n INVERT = False\n PWM_CHANNEL = 0\n with LightString(\n pixelStrip=rpi_ws281x.PixelStrip(\n num=PIXEL_COUNT,\n pin=GPIO_PWM_PIN,\n dma=DMA_CHANNEL,\n freq_hz=PWM_FREQUENCY,\n channel=PWM_CHANNEL,\n invert=INVERT,\n gamma=GAMMA,\n strip_type=LED_STRIP_TYPE,\n ),\n ) as liteStr:\n liteStr.refresh()\n p = Pixel((255, 0, 0))\n liteStr[4] = PixelColors.RED\n liteStr.refresh()\n time.sleep(1)\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xadupre/mlprodict
[ "f82c8a26a60104948c67849b1c4af95ca812c153", "f82c8a26a60104948c67849b1c4af95ca812c153", "f82c8a26a60104948c67849b1c4af95ca812c153", "f82c8a26a60104948c67849b1c4af95ca812c153" ]
[ "mlprodict/onnxrt/ops_cpu/op_solve.py", "mlprodict/asv_benchmark/template/skl_model_multi_classifier.py", "mlprodict/onnxrt/model_checker.py", "_unittests/ut_grammar_sklearn/test_grammar_sklearn_linear.py" ]
[ "# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nfrom scipy.linalg import solve\nfrom ._op import OpRunBinaryNum\nfrom ._new_ops import OperatorSchema\n\n\nclass Solve(OpRunBinaryNum):\n\n atts = {'lower': False,\n 'transposed': False}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRunBinaryNum.__init__(self, onnx_node, desc=desc,\n expected_attributes=Solve.atts,\n **options)\n\n def _find_custom_operator_schema(self, op_name):\n if op_name == \"Solve\":\n return SolveSchema()\n raise RuntimeError( # pragma: no cover\n \"Unable to find a schema for operator '{}'.\".format(op_name))\n\n def _run(self, a, b): # pylint: disable=W0221\n if self.inplaces.get(1, False):\n return (solve(a, b, overwrite_b=True, lower=self.lower,\n transposed=self.transposed), )\n return (solve(a, b, lower=self.lower, transposed=self.transposed), )\n\n def _infer_shapes(self, a, b): # pylint: disable=W0221\n \"\"\"\n Returns the shapes.\n \"\"\"\n return (b, )\n\n def to_python(self, inputs):\n return ('from scipy.linalg import solve',\n \"return solve({}, {}, lower={}, transposed={})\".format(\n inputs[0], inputs[1], self.lower, self.transposed))\n\n\nclass SolveSchema(OperatorSchema):\n \"\"\"\n Defines a schema for operators added in this package\n such as @see cl TreeEnsembleClassifierDouble.\n \"\"\"\n\n def __init__(self):\n OperatorSchema.__init__(self, 'Solve')\n self.attributes = Solve.atts\n", "\"\"\"\nA template to benchmark a model\nwith :epkg:`asv`. The benchmark can be run through\nfile :epkg:`run_asv.sh` on Linux or :epkg:`run_asv.bat` on\nWindows.\n\n.. warning::\n On Windows, you should avoid cloning the repository\n on a folder with a long full name. Visual Studio tends to\n abide by the rule of the maximum path length even though\n the system is told otherwise.\n\"\"\"\nimport numpy # pylint: disable=W0611\nfrom mlprodict.tools.asv_options_helper import get_opset_number_from_onnx\n# Import specific to this model.\nfrom sklearn.tree import DecisionTreeClassifier # pylint: disable=C0411\n\nfrom mlprodict.asv_benchmark import _CommonAsvSklBenchmarkMultiClassifier # pylint: disable=C0412\nfrom mlprodict.onnx_conv import to_onnx # pylint: disable=W0611, C0412\nfrom mlprodict.onnxrt import OnnxInference # pylint: disable=W0611, C0412\n\n\nclass TemplateBenchmarkMultiClassifier(_CommonAsvSklBenchmarkMultiClassifier):\n \"\"\"\n :epkg:`asv` example for a classifier,\n Full template can be found in\n `common_asv_skl.py <https://github.com/sdpython/mlprodict/\n blob/master/mlprodict/asv_benchmark/common_asv_skl.py>`_.\n \"\"\"\n params = [\n ['skl', 'pyrtc', 'ort'], # values for runtime\n [1, 10, 100, 1000, 10000, 100000], # values for N\n [4, 20], # values for nf\n [get_opset_number_from_onnx()], # values for opset\n ['float', 'double'], # values for dtype\n [None], # values for optim\n ]\n\n # additional parameters\n\n def setup_cache(self): # pylint: disable=W0235\n super().setup_cache()\n\n def _create_model(self):\n return DecisionTreeClassifier()\n", "\"\"\"\n@file\n@brief Investigate issues happening with float32.\n\"\"\"\nimport numpy\nfrom numpy.random import randint\n\n\ndef astype_range(arr, dtype=numpy.float32, force=1):\n \"\"\"\n Computes ranges for every number in an array\n once converted into *float32*. The function returns\n two matrices which produces two numbers\n *a* et *b*, the number rounded to float32\n is in interval :math:`[a, b]`.\n\n @param arr array\n @param dtype type to convert to\n @param force does something like *[i] +/- force |i - [i]|*\n @return minimum, maximum\n \"\"\"\n conv = arr.astype(dtype)\n delta = numpy.abs(arr - conv)\n delta = numpy.maximum(numpy.abs(arr) * 1e-7, delta)\n maxa = (conv + delta * force).astype(dtype)\n mina = (conv - delta * force).astype(dtype)\n return mina, maxa\n\n\ndef enumerate_random_inputs(inputs, n=100, dtype=numpy.float32, force=1):\n \"\"\"\n Enumerates random matrices.\n\n @param inputs inputs (dictionary)\n @param n number of iterations\n @param dtype type to convert to\n @param force does something like *[i] +/- force |i - [i]|*\n \"\"\"\n keys = list(inputs)\n ranges = {k: astype_range(v, dtype=dtype, force=force)\n for k, v in inputs.items()}\n for _ in range(n):\n new_inputs = {}\n for k in keys:\n rnd = randint(0, 2, inputs[k].size).reshape( # pylint: disable=E1101\n inputs[k].shape) # pylint: disable=E1101\n if rnd.min() == rnd.max() or rnd.max() != 1:\n raise RuntimeError(\"Minimum and maximum are equal or maximum is not 1. \"\n \"Randomness failed.\")\n rnd = rnd.astype(dtype)\n ma1 = ranges[k][0] * rnd\n ma2 = ranges[k][1] * (-(rnd - 1))\n inp = (ma1 + ma2)\n new_inputs[k] = inp\n yield new_inputs\n\n\ndef onnx_shaker(oinf, inputs, output_fct, n=100, dtype=numpy.float32, force=1):\n \"\"\"\n Shakes a model :epkg:`ONNX`.\n Explores the ranges for every prediction.\n Uses @see fn astype_range\n\n @param oinf object of type @see cl OnnxInference\n @param inputs inputs\n @param output_fct output function which extracts\n a single array from the output\n @param dtype type to convert to\n @param force does something like *[i] +/- force |i - [i]|*\n @return ranges for each predictions\n\n See notebook :ref:`onnxshakerrst` for an example of use.\n \"\"\"\n results = None\n for i, new_inputs in enumerate(enumerate_random_inputs(\n inputs, n=n, dtype=dtype, force=force)):\n res_ = oinf.run(new_inputs)\n res = output_fct(res_)\n sq = numpy.squeeze(res)\n if len(sq.shape) != 1:\n raise ValueError(\n \"The function only works with shape={}\".format(sq.shape))\n if results is None:\n results = numpy.empty((sq.shape[0], n), dtype=sq.dtype)\n results[:, i] = sq\n\n results.sort(axis=1)\n return results\n", "\"\"\"\n@brief test log(time=2s)\n\"\"\"\nimport unittest\nimport platform\nimport numpy\nfrom pyquickhelper.pycode import ExtTestCase\nfrom mlprodict.testing import iris_data, check_model_representation\nfrom mlprodict.grammar_sklearn import sklearn2graph, identify_interpreter\nfrom mlprodict.cc import compile_c_function\n\n\nclass TestGrammarSklearnLinear(ExtTestCase):\n\n def test_sklearn_lr(self):\n from sklearn.linear_model import LogisticRegression\n lr = LogisticRegression()\n gr = identify_interpreter(lr)\n self.assertCallable(gr)\n\n def test_sklearn_train_lr(self):\n from sklearn.linear_model import LogisticRegression\n from sklearn.datasets import load_iris\n iris = load_iris()\n X = iris.data[:, :2]\n y = iris.target\n y[y == 2] = 1\n lr = LogisticRegression()\n lr.fit(X, y)\n gr = sklearn2graph(lr, output_names=['Prediction', 'Score'])\n\n X = numpy.array([[numpy.float32(1), numpy.float32(2)]])\n e1 = lr.predict(X)\n p1 = lr.decision_function(X)\n e2 = gr.execute(Features=X[0, :])\n self.assertEqual(e1[0], e2[0])\n self.assertEqualFloat(p1, e2[1])\n\n ser = gr.export(lang=\"json\", hook={'array': lambda v: v.tolist()})\n self.maxDiff = None\n self.assertEqual(6, len(ser))\n # import json\n # print(json.dumps(ser, sort_keys=True, indent=2))\n # self.assertEqual(ser, exp) # training not always the same\n\n @unittest.skipIf(platform.system().lower() == \"darwin\",\n reason=\"compilation issue with CFFI\")\n def test_sklearn_train_lr_into_c(self):\n from sklearn.linear_model import LogisticRegression\n from sklearn.datasets import load_iris\n iris = load_iris()\n X = iris.data[:, :2]\n y = iris.target\n y[y == 2] = 1\n lr = LogisticRegression()\n lr.fit(X, y)\n gr = sklearn2graph(lr, output_names=['Prediction', 'Score'])\n\n code_c = gr.export(lang=\"c\")['code']\n if code_c is None:\n raise ValueError(\"cannot be None\")\n\n X = numpy.array([[numpy.float32(1), numpy.float32(2)]])\n fct = compile_c_function(code_c, 2)\n\n e2 = fct(X[0, :])\n e1 = lr.predict(X)\n p1 = lr.decision_function(X)\n self.assertEqual(e1[0], e2[0])\n self.assertEqualFloat(p1, e2[1])\n\n @unittest.skipIf(platform.system().lower() == \"darwin\", reason=\"compilation issue with CFFI\")\n def test_sklearn_linear_regression_verbose(self):\n from sklearn.linear_model import LinearRegression\n X, y = iris_data()\n rows = []\n\n def myprint(*args, **kwargs):\n rows.append(' '.join(map(str, args)))\n\n check_model_representation(\n LinearRegression, X, y, verbose=True, fLOG=myprint)\n self.assertGreater(len(rows), 2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "scipy.linalg.solve" ], [ "sklearn.tree.DecisionTreeClassifier" ], [ "numpy.squeeze", "numpy.abs", "numpy.empty", "numpy.random.randint" ], [ "sklearn.datasets.load_iris", "sklearn.linear_model.LogisticRegression", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
smellslikeml/rikai
[ "179526dfe98b21059371d83f7540e3d43aa1200f" ]
[ "python/rikai/types/vision.py" ]
[ "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Vision Related User-defined Types:\n\n- :py:class:`Image`\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom io import IOBase\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import Union\nfrom urllib.parse import urlparse\n\n# Third-party libraries\nimport numpy as np\nfrom PIL import Image as PILImage\n\n# Rikai\nfrom rikai.internal.uri_utils import normalize_uri\nfrom rikai.io import copy\nfrom rikai.mixin import Asset, Displayable, ToNumpy, ToPIL\nfrom rikai.spark.types import ImageType\n\n__all__ = [\"Image\"]\n\n\nclass Image(ToNumpy, ToPIL, Asset, Displayable):\n \"\"\"An external Image Asset.\n\n It contains a reference URI to an image stored on the remote system.\n\n Parameters\n ----------\n image : bytes, file-like object, str or :py:class:`~pathlib.Path`\n It can be the content of image, or a URI / Path of an image.\n \"\"\"\n\n __UDT__ = ImageType()\n\n def __init__(\n self,\n image: Union[bytes, bytearray, IOBase, str, Path],\n ):\n data, uri = None, None\n if isinstance(image, IOBase):\n data = image.read()\n elif isinstance(image, (bytes, bytearray)):\n data = image\n else:\n uri = image\n super().__init__(data=data, uri=uri)\n\n @classmethod\n def from_array(\n cls,\n array: np.ndarray,\n uri: Union[str, Path],\n mode: str = None,\n format: str = None,\n **kwargs,\n ) -> Image:\n \"\"\"Create an image in memory from numpy array.\n\n Parameters\n ----------\n array : np.ndarray\n Array data\n uri : str or Path\n The external URI to store the data.\n mode : str, optional\n The mode which PIL used to create image. See supported\n `modes on PIL document <https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes>`_.\n format : str, optional\n The image format to save as. See\n `supported formats <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_ for details.\n kwargs : dict, optional\n Optional arguments to pass to `PIL.Image.save <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_.\n\n See Also\n --------\n :py:class:`PIL.Image.fromarray`\n :py:func:`~rikai.spark.functions.vision.numpy_to_image`\n\n \"\"\" # noqa: E501\n\n assert array is not None\n img = PILImage.fromarray(array, mode=mode)\n return cls.from_pil(img, uri, format=format, **kwargs)\n\n @staticmethod\n def from_pil(\n img: PILImage, uri: Union[str, Path], format: str = None, **kwargs\n ) -> Image:\n \"\"\"Create an image in memory from a :py:class:`PIL.Image`.\n\n Parameters\n ----------\n img : :py:class:`PIL.Image`\n An PIL Image instance\n uri : str or Path\n The URI to store the image externally.\n format : str, optional\n The image format to save as. See\n `supported formats <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_ for details.\n kwargs : dict, optional\n Optional arguments to pass to `PIL.Image.save <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_.\n \"\"\" # noqa: E501\n parsed = urlparse(normalize_uri(uri))\n if parsed.scheme == \"file\":\n img.save(uri, format=format, **kwargs)\n else:\n with NamedTemporaryFile() as fobj:\n img.save(fobj, format=format, **kwargs)\n fobj.flush()\n copy(fobj.name, uri)\n return Image(uri)\n\n def display(self, **kwargs):\n \"\"\"\n Custom visualizer for this image in jupyter notebook\n\n Parameters\n ----------\n kwargs: dict\n Optional display arguments\n\n Returns\n -------\n img: IPython.display.Image\n \"\"\"\n from IPython.display import Image\n\n with self.open() as fobj:\n return Image(fobj.read(), **kwargs)\n\n def __repr__(self) -> str:\n return f\"Image(uri={self.uri})\"\n\n def _repr_html_(self):\n \"\"\"Default visualizer for remote ref (or local ref under cwd)\"\"\"\n return self.display()._repr_html_()\n\n def _repr_mimebundle_(self, include=None, exclude=None):\n \"\"\"default visualizer for embedded mime bundle\"\"\"\n return self.display()._repr_mimebundle_(\n include=include, exclude=exclude\n )\n\n def _repr_jpeg_(self):\n \"\"\"default visualizer for embedded jpeg\"\"\"\n return self.display()._repr_jpeg_()\n\n def _repr_png_(self):\n \"\"\"default visualizer for embedded png\"\"\"\n return self.display()._repr_png_()\n\n def __eq__(self, other) -> bool:\n return isinstance(other, Image) and super().__eq__(other)\n\n def to_pil(self) -> PILImage:\n \"\"\"Return an PIL image.\n\n Note\n ----\n The caller should close the image.\n https://pillow.readthedocs.io/en/stable/reference/open_files.html#image-lifecycle\n \"\"\"\n return PILImage.open(self.open())\n\n def to_numpy(self) -> np.ndarray:\n \"\"\"Convert this image into an :py:class:`numpy.ndarray`.\"\"\"\n with self.to_pil() as pil_img:\n return np.asarray(pil_img)\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JamesTheZ/BladeDISC
[ "e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34", "e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34", "e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34" ]
[ "pytorch_blade/tests/tensorrt/test_support_info.py", "pytorch_blade/tests/disc/test_is_mlir_mhlo_supported.py", "tao/tao_bridge/test/tao_ut_common.py" ]
[ "# Copyright 2022 The BladeDISC Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport unittest\nimport torch\nfrom torch.nn import functional as F\nfrom torch_blade import tensorrt\nfrom torch_blade import utils\nfrom torch_blade import tools\nfrom torch_blade import Config\nfrom torch_blade.logging import logger\nfrom torch_blade.testing.common_utils import Feedforward, TestCase\nfrom tests.tensorrt import skipIfNoTensorRT\nfrom torch_blade.onnx_backends.backend_testbed import OnnxBackendChecker\n\n\n@skipIfNoTensorRT()\nclass TestTensorRTSupportInfo(TestCase):\n def test_support_info(self):\n input = torch.ones([10, 10]).cuda()\n net = Feedforward(10, 10)\n net.eval().cuda()\n module = torch.jit.trace(net, input)\n module = tools.freeze_module(module._c, disableShapePeephole=False)\n graph = module.forward.graph\n\n unsupported = tensorrt.get_unsupported_nodes(graph)\n self.assertEqual(len(unsupported), 0)\n\n def test_empty_onnx_export(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = torch.nn.Linear(3, 4)\n self.dropout = torch.nn.Dropout(p=0.8)\n\n def forward(self, x):\n x = self.linear(x)\n x = self.dropout(x)\n return x.contiguous().detach()\n\n model = Model().cuda().eval()\n module = torch.jit.trace(model, torch.ones([2, 3]).cuda())\n module = tools.freeze_module(module._c, disableShapePeephole=False)\n graph = module.forward.graph\n\n unsupported = tensorrt.get_unsupported_nodes(graph)\n self.assertEqual(len(unsupported), 0)\n\n def test_inplace_safety(self):\n class BasicBlock(torch.nn.Module):\n def __init__(self):\n super(BasicBlock, self).__init__()\n self.conv1 = torch.nn.Conv2d(3, 10, kernel_size=3, padding=1)\n self.conv2 = torch.nn.Conv2d(10, 3, kernel_size=3, padding=1)\n self.conv3 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)\n self.bnorm = torch.nn.BatchNorm2d(3)\n\n def forward_inplace(self, x):\n out = self.conv1(x)\n # this inplace bias is supported\n out += 1\n # this inplace relu_ is supported\n out = F.relu_(out)\n out = self.conv2(out)\n # this inplace relu_ is supported\n out = F.relu_(out)\n shortcut = out\n # this inplace add_ is supported\n out += shortcut\n shortcut = out\n out = self.conv3(out)\n out = self.bnorm(out)\n # this inplace add_ is supported\n out += shortcut\n out1 = out[:, :1, :, :]\n out2 = out[:, 1:, :, :]\n out1 = F.relu_(out1)\n out2 = F.relu_(out2)\n out[:, :1, :, :] = out1\n out[:, 1:, :, :] = out2\n return out\n\n def forward_no_inplace(self, x):\n out = self.conv1(x)\n out = out + 1\n out = F.relu(out)\n out = self.conv2(out)\n out = F.relu(out)\n shortcut = out\n out = out + shortcut\n shortcut = out\n out = self.conv3(out)\n out = self.bnorm(out)\n out = out + shortcut\n out = F.relu(out)\n return out\n\n class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.block1 = BasicBlock()\n self.block2 = BasicBlock()\n\n def forward(self, x):\n out1 = self.block1.forward_inplace(x)\n out1 = self.block2.forward_inplace(out1)\n out2 = self.block1.forward_no_inplace(x)\n out2 = self.block2.forward_no_inplace(out2)\n return out1, out2\n\n model = Model()\n model.eval()\n model.cuda()\n\n batch = torch.ones([1, 3, 224, 224])\n batch = batch.cuda()\n out1, out2 = model(batch)\n self.assertEqual(out1, out2)\n traced_model = torch.jit.trace(model, batch)\n frozen_module = tools.freeze_module(traced_model._c, disableShapePeephole=False)\n graph = frozen_module.forward.graph\n ops_counter = utils.list_ops_count(graph)\n unspt_counter = collections.Counter()\n unsupported = tensorrt.get_unsupported_nodes(graph)\n for node in unsupported:\n unspt_counter[node.kind()] += 1\n self.assertEqual(ops_counter[\"aten::slice\"], unspt_counter[\"aten::slice\"])\n self.assertEqual(ops_counter[\"aten::view\"], unspt_counter[\"aten::view\"])\n self.assertEqual(ops_counter[\"aten::copy_\"], unspt_counter[\"aten::copy_\"])\n self.assertEqual(ops_counter[\"aten::expand\"], unspt_counter[\"aten::expand\"])\n self.assertEqual(unspt_counter[\"aten::relu_\"], 4)\n logger.info(ops_counter)\n logger.info(unspt_counter)\n self.assertEqual(unspt_counter[\"aten::add_\"], 0)\n\n def test_inplace_safety_another(self):\n def op(x):\n return x + 1\n\n def op_(x):\n x -= 1\n return x\n\n def _count_unsupported(unspt):\n unspt_counter = collections.Counter()\n for node in unspt:\n unspt_counter[node.kind()] += 1\n return unspt_counter\n\n def _count_graph(graph):\n unsupported = tensorrt.get_unsupported_nodes(graph, ignore_device=True)\n return _count_unsupported(unsupported)\n\n def _count_model(model):\n model.eval().cuda()\n input = torch.zeros([4]).cuda()\n output = model(input)\n traced_module = torch.jit.trace(model, (input,))\n graph = traced_module.graph\n return _count_graph(graph)\n\n class Model1(torch.nn.Module):\n \"\"\"\n Within this model, torch.jit.trace will produce graph like:\n %2 : Float = aten::add(%1, some_constant)\n %3 : Float = aten::sub_(%2, some_constant)\n %4 : Float = aten::add(%3, some_constant)\n\n The input of the third node is %3 instead of %2 which is not consistent with the definition of the\n corresponding nn.Module. So the inplace node aten::sub_ is the last consumer of its inputs which make it\n inplace-safe, and therefore all the nodes in this graph is inplace-safe.\n\n The same phenomenon occurs in model2. So we manually add two graphs that have 'correct' topology structures\n with corresponding nn.Module (i.e. Model1 and Model2) and use them as UTs.\n \"\"\"\n\n def forward(self, x):\n x1 = op(x)\n x2 = op_(x1)\n x3 = op(x1)\n return x3\n\n class Model2(torch.nn.Module):\n def forward(self, x):\n x1 = op(x)\n x2 = op_(x1) # support\n x3 = op_(x2) # support\n x4 = op(x3)\n x5 = op_(x3) # not support\n x6 = op_(x5) # not support\n x7 = op(x3)\n return x7\n\n unspt_counter = _count_model(Model1())\n self.assertEqual(unspt_counter[\"aten::sub_\"], 0)\n unspt_counter = _count_model(Model2())\n self.assertEqual(unspt_counter[\"aten::sub_\"], 0)\n\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph1 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4) = aten::add(%2, %5, %5)\n return (%6)\n \"\"\"\n )\n\n graph2 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4) = aten::sub_(%4, %5, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : Float(4) = aten::add(%6, %7, %7)\n %9 : int = prim::Constant[value=1]()\n %10 : Float(4) = aten::sub_(%6, %9, %9)\n %11 : int = prim::Constant[value=1]()\n %12 : Float(4) = aten::sub_(%10, %11, %11)\n %13 : int = prim::Constant[value=1]()\n %14 : Float(4) = aten::add(%6, %13, %13)\n return (%14)\n \"\"\"\n )\n else:\n graph1 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4:1)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4:1) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4:1) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4:1) = aten::add(%2, %5, %5)\n return (%6)\n \"\"\"\n )\n\n graph2 = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(4:1)):\n %1 : int = prim::Constant[value=1]()\n %2 : Float(4:1) = aten::add(%x.1, %1, %1)\n %3 : int = prim::Constant[value=1]()\n %4 : Float(4:1) = aten::sub_(%2, %3, %3)\n %5 : int = prim::Constant[value=1]()\n %6 : Float(4:1) = aten::sub_(%4, %5, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : Float(4:1) = aten::add(%6, %7, %7)\n %9 : int = prim::Constant[value=1]()\n %10 : Float(4:1) = aten::sub_(%6, %9, %9)\n %11 : int = prim::Constant[value=1]()\n %12 : Float(4:1) = aten::sub_(%10, %11, %11)\n %13 : int = prim::Constant[value=1]()\n %14 : Float(4:1) = aten::add(%6, %13, %13)\n return (%14)\n \"\"\"\n )\n unspt_counter = _count_graph(graph1)\n self.assertEqual(unspt_counter[\"aten::sub_\"], 1)\n unspt_counter = _count_graph(graph2)\n self.assertEqual(unspt_counter[\"aten::sub_\"], 2)\n\n def test_graph_input_inplace_safe(self):\n class Model(torch.nn.Module):\n def forward(self, x):\n return F.relu_(x)\n\n batch = torch.Tensor([1, -1, 1, -1])\n batch = batch.cuda()\n model = Model().eval().cuda()\n traced_model = torch.jit.trace(model, batch)\n self.assertEqual(batch, torch.Tensor([1, 0, 1, 0]))\n\n frozen_module = torch._C._freeze_module(traced_model._c)\n graph = frozen_module.forward.graph\n unspt_counter = collections.Counter()\n unsupported = tensorrt.get_unsupported_nodes(graph)\n for node in unsupported:\n unspt_counter[node.kind()] += 1\n self.assertEqual(unspt_counter[\"aten::relu_\"], 1)\n\n def test_view_kinds_0(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1, 1, 1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1, 1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1) = aten::add(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1:1, 1:1, 1:1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1:1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1:1) = aten::add(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n unsupported = tensorrt.get_unsupported_nodes(graph, True)\n self.assertEqual(len(unsupported), 0)\n\n def test_view_kinds_1(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1, 1, 1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1, 1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1) = aten::add_(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1:1, 1:1, 1:1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1:1) = aten::select(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1:1) = aten::add_(%6, %7, %8)\n return (%9)\n \"\"\"\n )\n unsupported = tensorrt.get_unsupported_nodes(graph, True)\n self.assertEqual(len(unsupported), 3)\n\n def test_view_kinds_2(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1, 1, 1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1, 1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1, 1) = aten::add_(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1) = aten::select(%3, %7, %8)\n return (%9)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph( %x.1 : Float(1:1, 1:1, 1:1)):\n %1 : int = prim::Constant[value=0]()\n %2 : int = prim::Constant[value=1]()\n %3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)\n %4 : int = prim::Constant[value=0]()\n %5 : int = prim::Constant[value=1]()\n %6 : Float(1:1, 1:1) = aten::add_(%3, %4, %5)\n %7 : int = prim::Constant[value=1]()\n %8 : int = prim::Constant[value=1]()\n %9 : Float(1:1) = aten::select(%3, %7, %8)\n return (%9)\n \"\"\"\n )\n unsupported = tensorrt.get_unsupported_nodes(graph, True)\n self.assertEqual(len(unsupported), 3)\n\n # NOTE: this unsupported set length should be 3 (two aten::select and one aten::add_)\n # However, due to a flaw of the inplace safety check algorithm, aten::add_ is excluded\n # in the set.\n # todo: fix this error.\n # graph = torch.parse_ir(\n # '''\n # graph( %x.1 : Float(1:1, 1:1, 1:1)):\n # %1 : int = prim::Constant[value=0]()\n # %2 : int = prim::Constant[value=1]()\n # %3 : Float(1:1, 1:1, 1:1) = aten::add(%x.1, %1, %2)\n # %4 : int = prim::Constant[value=0]()\n # %5 : int = prim::Constant[value=1]()\n # %6 : Float(1:1, 1:1) = aten::select(%3, %4, %5)\n # %7 : Float(1:1, 1:1) = aten::add_(%3, %4, %5)\n # %8 : int = prim::Constant[value=1]()\n # %9 : int = prim::Constant[value=1]()\n # %10 : Float(1:1) = aten::select(%6, %8, %9)\n # return (%9)\n # '''\n # )\n # unsupported = tensorrt.get_unsupported_nodes(graph, True)\n # self.assertEqual(len(unsupported), 2)\n\n@skipIfNoTensorRT()\nclass TestManRules(TestCase):\n def _make_check(self, graph, target):\n checker = OnnxBackendChecker(graph, tensorrt.is_onnx2trt_supported, \"TensorRT\")\n is_supported = checker()\n self.assertEqual(is_supported, target)\n\n def test_aten_mul(self):\n graph = torch.parse_ir(\n \"\"\"\n graph(%0 : int[]):\n %1 : int = prim::Constant[value=1]()\n %3 : int = aten::mul(%0, %1)\n return (%3)\n \"\"\"\n )\n self._make_check(graph, False)\n\n def test_aten_add(self):\n graph = torch.parse_ir(\n \"\"\"\n graph(%0 : int[], %1 : int[]):\n %2 : int[] = aten::add(%0, %1)\n return (%2)\n \"\"\"\n )\n self._make_check(graph, False)\n\n def test_aten_eq(self):\n graph = torch.parse_ir(\n \"\"\"\n graph(%0 : int[]):\n %1 : int = prim::Constant[value=1]()\n %2 : int[] = prim::ListConstruct(%1)\n %3 : bool = aten::eq(%0, %2)\n return (%3)\n \"\"\"\n )\n self._make_check(graph, False)\n\n def test_const_fold_before_export(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph(%input0.2 : Float(1, 512, 18, 18, requires_grad=0, device=cuda:0)):\n %1 : None = prim::Constant() # :0:0\n %2 : bool = prim::Constant[value=1]()\n %3 : float[] = prim::Constant[value=[2., 2.]]()\n %x1.3 : Float(1, 512, 36, 36, requires_grad=0, device=cuda:0) = aten::upsample_bilinear2d(%input0.2, %1, %2, %3)\n return (%x1.3)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph(%input0.2 : Float(1:165888, 512:324, 18:18, 18:1, requires_grad=0, device=cuda:0)):\n %1 : None = prim::Constant() # :0:0\n %2 : bool = prim::Constant[value=1]()\n %3 : float[] = prim::Constant[value=[2., 2.]]()\n %x1.3 : Float(1:663552, 512:1296, 36:36, 36:1, requires_grad=0, device=cuda:0) = aten::upsample_bilinear2d(%input0.2, %1, %2, %3)\n return (%x1.3)\n \"\"\"\n )\n cfg = Config.get_current_context_or_new().clone()\n cfg.customize_onnx_opset_version = 11\n with cfg:\n self._make_check(graph, True)\n\n def test_scalar_input_on_graph(self):\n if utils.torch_version_number() >= utils.parse_version(\"1.8.1\"):\n graph = torch.parse_ir(\n \"\"\"\n graph(%x.3 : Float(1, 64, 1, 1, requires_grad=0, device=cuda:0),\n %1 : int):\n %2 : int = prim::Constant[value=-1]()\n %3 : int[] = prim::ListConstruct(%1, %2)\n %input.14 : Float(1, 64, requires_grad=0, device=cuda:0) = aten::view(%x.3, %3)\n return (%input.14)\n \"\"\"\n )\n else:\n graph = torch.parse_ir(\n \"\"\"\n graph(%x.3 : Float(1:64, 64:1, 1:1, 1:1, requires_grad=0, device=cuda:0),\n %1 : int):\n %2 : int = prim::Constant[value=-1]()\n %3 : int[] = prim::ListConstruct(%1, %2)\n %input.14 : Float(1:64, 64:1, requires_grad=0, device=cuda:0) = aten::view(%x.3, %3)\n return (%input.14)\n \"\"\"\n )\n self._make_check(graph, True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2021 The BladeDISC Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport unittest\n\nfrom torch_blade import mlir\n\nfrom tests.disc.testing_base import DiscTestCase\n\n\nclass TestDiscTools(DiscTestCase):\n\n def test_const_tensor(self):\n gstr = \"\"\"\n graph(%x.1 : Tensor,\n %y.1 : Tensor):\n %4 : Tensor = aten::mul(%x.1, %y.1)\n return (%4)\"\"\"\n\n graph = torch._C.parse_ir(gstr)\n all_unspt = all(not mlir.is_mlir_mhlo_supported(n) for n in graph.nodes())\n self.assertTrue(all_unspt)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "#!/usr/bin/env python\n# Copyright 2021 The BladeDISC Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import print_function\n\nimport datetime as dt\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\n\nimport numpy as np\n\n\nclass TaoTestCase(unittest.TestCase):\n \"\"\"The base class for all tao tests. It helps to setup and tear down test.\"\"\"\n DUMP_PATH = \"tmp/graph\"\n LIB_TAO_OPS = None\n\n @staticmethod\n def _locate_tao_compiler():\n \"\"\"\n Try to find tao_compiler binary under bazel build path if not specified by\n user or found already.\n \"\"\"\n if 'TAO_COMPILER_PATH' not in os.environ:\n file_dir = os.path.abspath(os.path.dirname(__file__))\n compiler = os.path.join(file_dir, os.pardir, os.pardir, os.pardir, \"tf_community\",\n \"bazel-bin\", \"tensorflow\", \"compiler\", \"decoupling\",\n \"tao_compiler_main\")\n compiler = os.path.abspath(compiler)\n assert os.path.exists(compiler), \\\n \"Tao compiler not found at: \" + compiler\n assert os.path.isfile(compiler), \\\n \"Tao compiler is not a regular file: \" + compiler\n assert os.access(compiler, os.X_OK), \\\n \"Tao compiler is not executable: \" + compiler\n os.environ['TAO_COMPILER_PATH'] = compiler\n\n @staticmethod\n def _locate_lib_tao_ops():\n \"\"\"Try to find libtao_ops.so under tao build path.\"\"\"\n import tensorflow as tf\n file_dir = os.path.abspath(os.path.dirname(__file__))\n if TaoTestCase.LIB_TAO_OPS is None:\n tao_lib = os.path.join(\n file_dir, os.pardir, os.pardir, \"bazel-bin\", \"libtao_ops.so\")\n tao_lib = os.path.abspath(tao_lib)\n assert os.path.exists(tao_lib), \\\n \"libtao_ops.so not found at: \" + tao_lib\n assert os.path.isfile(tao_lib), \\\n \"libtao_ops.so is not a regular file: \" + tao_lib\n TaoTestCase.LIB_TAO_OPS = tf.load_op_library(tao_lib)\n\n @staticmethod\n def _setup_tf_logging():\n if 'TF_CPP_MIN_LOG_LEVEL' not in os.environ:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ERROR log\n if 'TF_CPP_MIN_VLOG_LEVEL' not in os.environ:\n os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'\n\n @staticmethod\n def _setup_dump_graph(testcase=''):\n os.environ['TAO_DUMP_PASS_OUTPUT'] = 'true'\n os.environ['TAO_DUMP_WITH_UNIQUE_ID'] = 'false'\n if 'TAO_GRAPH_DUMP_PATH' not in os.environ:\n file_dir = os.path.abspath(os.path.dirname(__file__))\n TaoTestCase.DUMP_PATH = tempfile.mkdtemp(\n dir=file_dir,\n prefix=\"test_dump_{}_{}_\".format(\n testcase, dt.datetime.now().strftime('%m%d%H%M%S')))\n os.environ['TAO_GRAPH_DUMP_PATH'] = TaoTestCase.DUMP_PATH\n os.environ['TEST_TMPDIR'] = TaoTestCase.DUMP_PATH\n else:\n TaoTestCase.DUMP_PATH = os.environ['TAO_GRAPH_DUMP_PATH']\n\n if os.path.exists(TaoTestCase.DUMP_PATH):\n shutil.rmtree(TaoTestCase.DUMP_PATH)\n os.makedirs(TaoTestCase.DUMP_PATH)\n\n @staticmethod\n def dumped_file(name):\n \"\"\"Get full path of dumped file\".\"\"\"\n return os.path.join(TaoTestCase.DUMP_PATH, name)\n\n @staticmethod\n def setUpWithoutTaoOpLib(testcase=''):\n os.environ['BRIDGE_ENABLE_TAO'] = 'true'\n TaoTestCase._setup_dump_graph(testcase)\n TaoTestCase._setup_tf_logging()\n TaoTestCase._locate_tao_compiler()\n np.random.seed(1)\n\n def setUp(self):\n TaoTestCase.setUpWithoutTaoOpLib()\n TaoTestCase._locate_lib_tao_ops()\n\n def tearDown(self):\n if os.path.exists(TaoTestCase.DUMP_PATH) and os.getenv(\"KEEP_DUMP\", \"false\") != \"true\":\n shutil.rmtree(TaoTestCase.DUMP_PATH)\n sys.stdout.flush()\n sys.stderr.flush()\n\n def new_sess(self, allow_growth=True,\n allocator_type='BFC',\n log_device_placement=False,\n allow_soft_placement=True):\n try:\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\n except:\n import tensorflow as tf\n gpu_options = tf.GPUOptions(\n allow_growth=True, allocator_type='BFC')\n config = tf.ConfigProto(log_device_placement=False,\n allow_soft_placement=True,\n gpu_options=gpu_options)\n return tf.Session(config=config)\n\n\nclass TaoCpuTestCase(TaoTestCase):\n \"\"\" overriding TaoTestCase setUp method to enable cpu xla jit\"\"\"\n\n def setUp(self):\n super(TaoCpuTestCase, self).setUp()\n os.environ['TF_XLA_FLAGS'] = \"--tf_xla_cpu_global_jit=true\"\n" ]
[ [ "torch.nn.Dropout", "torch.jit.trace", "torch.ones", "torch.Tensor", "torch.zeros", "torch._C._freeze_module", "torch.nn.Conv2d", "torch.parse_ir", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.functional.relu_", "torch.nn.BatchNorm2d" ], [ "torch._C.parse_ir" ], [ "numpy.random.seed", "tensorflow.ConfigProto", "tensorflow.GPUOptions", "tensorflow.Session", "tensorflow.load_op_library", "tensorflow.disable_v2_behavior" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13" ] } ]
Gattocrucco/sipmfilter
[ "74215d6c53b998808fc6c677b46030234d996bdf" ]
[ "figthesis/figlaserpos.py" ]
[ "from matplotlib import pyplot as plt\n\nimport figlatex\nimport afterpulse_tile21\nimport textbox\nimport colormap\n\nvov = 5.5\n\n################\n\nap21 = afterpulse_tile21.AfterPulseTile21(vov)\n\nfig = plt.figure(num='figlaserpos-0', clear=True, figsize=[4.5, 3])\n\nap21.sim.hist('mainpos-offset', 'mainnpe==1', fig=fig, selection=False)\nax, = fig.get_axes()\ntextbox.textbox(ax, f'{vov} VoV', fontsize='medium', loc='lower center')\nax.set_xlabel('Laser peak position [ns]')\n\nfigs = [fig]\n\nfig = plt.figure(num='figlaserpos-1', clear=True, figsize=[4.5, 3])\n\nap21.sim.hist2d('mainpos-offset', 'mainampl', '(mainnpe==1)&(length==128)', fig=fig, cmap=colormap.uniform(), selection=False)\nax, _ = fig.get_axes()\ntextbox.textbox(ax, f'{vov} VoV', fontsize='medium', loc='lower center')\nax.set_xlabel('Laser peak position [ns]')\nax.set_ylabel('Peak height')\n\nfigs.append(fig)\n\nfor fig in figs:\n fig.tight_layout()\n fig.show()\n\nfiglatex.save([figs])\n" ]
[ [ "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
egpbos/amuse
[ "64b3bc5b7fef9496012b023578c4d71cecef92b7", "64b3bc5b7fef9496012b023578c4d71cecef92b7", "64b3bc5b7fef9496012b023578c4d71cecef92b7" ]
[ "examples/simple/salpeter.py", "examples/simple/unstable_binary.py", "test/core_tests/test_grids.py" ]
[ "\"\"\"\nGenerates a cluster using a plummer model with a salpeter Initial Mass Function.\nCompares the generated IMF against the expected line.\n\"\"\"\n\nimport numpy \nfrom matplotlib import pyplot\nfrom amuse.units import units\nfrom amuse.units import nbody_system\nfrom amuse.ic.plummer import new_plummer_model\nfrom amuse.ic.salpeter import new_salpeter_mass_distribution\n\ndef new_cluster(number_of_stars = 1000):\n masses = new_salpeter_mass_distribution(\n number_of_stars, \n mass_min = 0.1 | units.MSun,\n mass_max = 125.0 | units.MSun, \n alpha = -2.35\n )\n nbody_converter = nbody_system.nbody_to_si(masses.sum(), 1 | units.parsec)\n particles = new_plummer_model(number_of_stars, nbody_converter)\n particles.mass = masses\n particles.move_to_center()\n return particles\n\ndef plot_particles_and_mass_distribution(particles):\n figure = pyplot.figure(figsize= (12,6))\n \n subplot = figure.add_subplot(1, 2, 1)\n \n subplot.scatter(\n particles.x.value_in(units.parsec),\n particles.y.value_in(units.parsec),\n s = particles.mass.value_in(units.MSun),# * len(particles),\n edgecolors = 'red',\n facecolors = 'red'\n )\n \n subplot.set_xlim(-4,4)\n subplot.set_ylim(-4,4)\n subplot.set_xlabel('x (parsec)')\n subplot.set_ylabel('y (parsec)')\n \n subplot = figure.add_subplot(1, 2, 2)\n \n masses = particles.mass.value_in(units.MSun)\n \n bins = 10**numpy.linspace(-1, 2, 100)\n number_of_particles, bin_edges= numpy.histogram(masses, bins = bins)\n \n bin_sizes = bin_edges[1:] - bin_edges[:-1]\n \n y = number_of_particles / bin_sizes\n x = (bin_edges[1:] + bin_edges[:-1]) / 2.0\n \n y = y[number_of_particles > 10.0] \n x = x[number_of_particles > 10.0]\n subplot.scatter(x, y)\n \n c = ((0.1**-1.35) - (125.0**-1.35)) / 1.35\n subplot.plot(x, len(particles)/ c * (x**-2.35))\n \n subplot.set_xscale('log')\n subplot.set_yscale('log')\n \n subplot.set_xlabel(u'M [M\\u2299]')\n subplot.set_ylabel('N')\n \n pyplot.show()\n \nif __name__ == \"__main__\":\n particles = new_cluster(20000)\n plot_particles_and_mass_distribution(particles)\n", "\"\"\"\nEvolves two stars dynamically (hermit, nbody code) each star will \nlose mass during the evolution (evtwin, stellar evolution code)\n\nWe start with two stars, one 10.0 and one 1.0 solar mass star. These\nstars start orbiting with a stable kepler orbit. \nAfter 2 orbital periods the stars will begin to lose mass and the binary\nwill become unstable.\n\"\"\"\n\nfrom amuse.plot import scatter, xlabel, ylabel, plot\nfrom matplotlib import pyplot\nfrom math import pi\nfrom amuse.units.optparse import OptionParser\n\nfrom amuse.units import units\nfrom amuse.units import constants\nfrom amuse.units.nbody_system import nbody_to_si\nfrom amuse.community.evtwin.interface import EVtwin\nfrom amuse.community.sse.interface import SSE\nfrom amuse.community.hermite0.interface import Hermite\n\nfrom amuse.datamodel import Particles\n\n\ndef set_up_initial_conditions(orbital_period, kinetic_to_potential_ratio):\n print(\"Setting up initial conditions\")\n stars = Particles(2)\n stars.mass = [10.0, 1.0] | units.MSun\n stars.radius = 0 | units.RSun\n stars.position = [0.0, 0.0, 0.0] | units.AU\n stars.velocity = [0.0, 0.0, 0.0] | units.km / units.s\n \n print(\"Binary with masses: \"+str(stars.mass)+\", and orbital period: \", orbital_period)\n semimajor_axis = ((constants.G * stars.total_mass() * (orbital_period / (2 * pi))**2.0)**(1.0/3.0))\n separation = 2 * semimajor_axis * (1 - kinetic_to_potential_ratio)\n print(\"Initial separation:\", separation.as_quantity_in(units.AU))\n relative_velocity = ( (kinetic_to_potential_ratio / (1.0 - kinetic_to_potential_ratio)) * \n constants.G * stars.total_mass() / semimajor_axis).sqrt()\n print(\"Initial relative velocity:\", relative_velocity.as_quantity_in(units.km / units.s))\n \n stars[0].x = separation\n stars[0].vy = relative_velocity\n stars.move_to_center()\n return stars\n\ndef set_up_stellar_evolution_code(stars):\n stellar_evolution = EVtwin()\n stellar_evolution.initialize_code()\n # if you run with mesa, you can play with the wind efficiency\n # stellar_evolution.parameters.RGB_wind_scheme = 1\n # stellar_evolution.parameters.reimers_wind_efficiency = 1.0e6 # ridiculous, but instructive\n stellar_evolution.particles.add_particles(stars)\n return stellar_evolution\n \ndef set_up_gravitational_dynamics_code(stars):\n convert_nbody = nbody_to_si(11.0 | units.MSun, 10.0 | units.AU)\n gravitational_dynamics = Hermite(convert_nbody)\n gravitational_dynamics.parameters.epsilon_squared = 0.0 | units.AU ** 2\n view_on_the_primary = gravitational_dynamics.particles.add_particle(stars[0])\n gravitational_dynamics.particles.add_particle(stars[1])\n return gravitational_dynamics, view_on_the_primary\n \n\ndef simulate_binary_evolution(binary, orbital_period, t_offset_stars, t_end):\n distance = [] | units.AU\n mass = [] | units.MSun\n time = [] | units.yr\n \n stellar_evolution = set_up_stellar_evolution_code(binary)\n gravitational_dynamics, primary = set_up_gravitational_dynamics_code(binary)\n from_se_to_gd = stellar_evolution.particles.new_channel_to(gravitational_dynamics.particles)\n \n current_time = 0.0 * t_end\n \n \n print(\"Evolving with stellar wind\")\n while current_time < t_end:\n current_time += orbital_period / 10\n gravitational_dynamics.evolve_model(current_time)\n stellar_evolution.evolve_model(current_time + t_offset_stars)\n from_se_to_gd.copy_attributes(['mass'])\n separation = (gravitational_dynamics.particles[0].position - gravitational_dynamics.particles[1].position).length()\n distance.append(separation)\n mass.append(primary.mass)\n time.append(current_time)\n print(\"System evolved to time: \", current_time, \", primary mass:\", primary.mass.as_quantity_in(units.MSun), \", separation:\", separation.as_quantity_in(units.AU))\n \n print(\"Evolution done\")\n return distance, mass, time\n\ndef orbit_plot(distance, mass, time):\n figure = pyplot.figure(figsize = (6, 10), dpi = 100)\n subplot = figure.add_subplot(2, 1, 1)\n plot(time, distance)\n xlabel('t')\n ylabel('separation')\n pyplot.margins(0.05)\n subplot = figure.add_subplot(2, 1, 2)\n plot(time, ((mass - mass[0]) / mass[0]) * 100.0)\n xlabel('t')\n ylabel('mass')\n pyplot.margins(0.05)\n pyplot.show()\n\ndef main(\n orbital_period = 1000.0 | units.yr, \n kinetic_to_potential_ratio = 0.8, \n periods = 10,\n age = 10 | units.Myr\n ):\n \n t_offset_stars = age\n t_end = periods * orbital_period\n \n binary = set_up_initial_conditions(orbital_period, kinetic_to_potential_ratio)\n distance, mass, time = simulate_binary_evolution(binary, orbital_period, t_offset_stars, t_end)\n orbit_plot(distance, mass, time)\n \ndef new_option_parser():\n result = OptionParser()\n result.add_option(\n \"-o\", \"--orbitalperiod\", \n default = 1000 | units.yr,\n dest=\"orbital_period\",\n help=\"initial orbital period of the binary (in years)\",\n type=\"float\",\n unit=units.yr\n )\n \n result.add_option(\n \"-k\", \"--kpratio\", \n default = 0.8,\n dest=\"kinetic_to_potential_ratio\",\n help=\"kinetec to potential energy ratio, values less than 1.0 correspond to bound systems\",\n type=\"float\"\n )\n result.add_option(\n \"--periods\", \n default = 10,\n dest=\"periods\",\n help=\"number of orbital periods to evolve the binary\",\n type=\"int\"\n )\n result.add_option(\n \"--age\", \n default = 10 | units.Myr,\n dest=\"age\",\n help=\"initial age of the stars to start the simulation with\",\n type=\"float\",\n unit=units.Myr\n )\n \n \n return result\n \n \nif __name__ == \"__plot__\":\n main(1000 | units.yr, 0.8, 10, 10 | units.Myr)\n \nif __name__ == \"__main__\":\n options, args = new_option_parser().parse_args()\n main(**options.__dict__)\n", "from amuse.test import amusetest\n\nfrom amuse.support.interface import InCodeComponentImplementation\n\nfrom amuse.datamodel.indexing import *\n\nfrom amuse.datamodel.grids import *\n\nimport numpy\nimport inspect\nimport collections\nfrom amuse.units import units\nfrom amuse.units import constants\nfrom amuse.units import nbody_system\nfrom amuse import datamodel\n\nclass TestGrids(amusetest.TestCase):\n \n def test1(self):\n grid = datamodel.Grid(5,4,3)\n grid.mass = 2.0 | units.kg\n self.assertEquals(grid.mass[0][1][2], 2.0 | units.kg)\n self.assertEquals(grid[0][1][2].mass, 2.0 | units.kg)\n self.assertEquals(len(grid.mass), 5)\n \n def test2(self):\n grid = datamodel.Grid(5,4,3)\n grid.mass = units.kg.new_quantity(numpy.arange(5*4*3).reshape(5,4,3))\n self.assertEquals(grid.number_of_dimensions(), 3)\n subgrid = grid[1]\n self.assertEquals(subgrid.number_of_dimensions(), 2)\n self.assertEquals(subgrid.mass.number.shape, (4,3))\n \n def test3(self):\n grid = datamodel.Grid(5,4,3)\n values = numpy.arange(5*4*3).reshape(5,4,3)\n grid.mass = units.kg.new_quantity(values)\n self.assertEquals(grid.number_of_dimensions(), 3)\n \n subgrid = grid[1][2]\n self.assertEquals(subgrid.number_of_dimensions(), 1)\n self.assertEquals(subgrid.mass.number.shape, (3,))\n self.assertTrue(numpy.all(values[1][2] == subgrid.mass.value_in(units.kg)))\n \n def test4(self):\n grid = datamodel.Grid(5,4,3)\n values = numpy.arange(5*4*3).reshape(5,4,3)\n grid.mass = units.kg.new_quantity(values)\n self.assertEquals(grid.number_of_dimensions(), 3)\n \n gridpoint = grid[1][2][1]\n self.assertEquals(gridpoint.mass, 19.0 | units.kg)\n gridpoint = grid[1][2][2]\n self.assertEquals(gridpoint.mass, 20.0 | units.kg)\n \n def test5(self):\n grid = datamodel.Grid(5,4,3)\n grid.add_calculated_attribute(\"squared_mass\", lambda m : m * m, [\"mass\",])\n values = numpy.arange(5*4*3).reshape(5,4,3)\n grid.mass = units.kg.new_quantity(values)\n gridpoint = grid[1][2][1]\n self.assertEquals(gridpoint.mass, 19.0 | units.kg)\n self.assertEquals(gridpoint.squared_mass, (19.0 | units.kg) ** 2)\n subgrid = grid[1][2]\n self.assertTrue(numpy.all(subgrid.squared_mass == ([18.0, 19.0, 20.0] | units.kg) ** 2))\n \n def test6(self):\n grid = datamodel.Grid(5,4,3)\n grid.add_function_attribute(\"sum_mass\", lambda grid, x : grid.mass.sum() + x, lambda grid, gridpoint, x : gridpoint.mass + x)\n values = numpy.arange(5*4*3).reshape(5,4,3)\n grid.mass = units.kg.new_quantity(values)\n gridpoint = grid[1][2][1]\n self.assertEquals(gridpoint.mass, 19.0 | units.kg)\n self.assertEquals(gridpoint.sum_mass(2.0 | units.kg), (21.0 | units.kg) )\n subgrid = grid[1][2]\n self.assertTrue(numpy.all(subgrid.sum_mass(2 | units.kg) == (18.0 + 19.0 + 20.0 + 2.0 | units.kg)))\n \n def test7(self):\n grid = datamodel.Grid(5,4,3)\n grid.add_vector_attribute(\"position\", [\"x\",\"y\",\"z\"])\n x = numpy.arange(5*4*3).reshape(5,4,3)\n y = x + 100.0\n z = x + 200.0\n grid.x = units.m.new_quantity(x)\n grid.y = units.m.new_quantity(y)\n grid.z = units.m.new_quantity(z)\n gridpoint = grid[1][2][1]\n self.assertEquals(gridpoint.position[0], 19 | units.m)\n self.assertEquals(gridpoint.position[1], 119 | units.m)\n self.assertEquals(gridpoint.position[2], 219 | units.m)\n subgrid = grid[1][2]\n self.assertEquals(subgrid.position[1][0], 19 | units.m)\n self.assertEquals(subgrid.position[1][1], 119 | units.m)\n self.assertEquals(subgrid.position[1][2], 219 | units.m)\n \n \n def test8(self):\n grid0 = datamodel.Grid(5,4,3)\n x = numpy.arange(5*4*3).reshape(5,4,3)\n y = x + 100.0\n grid0.x = units.m.new_quantity(x)\n grid0.y = units.m.new_quantity(y)\n \n grid1 = datamodel.Grid(5,4,3)\n x = numpy.arange(5*4*3).reshape(5,4,3)\n x = x + 200.0\n y = x + 200.0\n grid1.x = units.m.new_quantity(x)\n grid1.y = units.m.new_quantity(y)\n \n self.assertTrue(numpy.all(grid0[1][2].x != grid1[1][2].x))\n \n channel = grid0.new_channel_to(grid1)\n channel.copy_attributes([\"x\",])\n \n self.assertTrue(numpy.all(grid0[1][2].x == grid1[1][2].x))\n self.assertTrue(numpy.all(grid0[1][2].y != grid1[1][2].y))\n \n\n \n\n def test9(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[0][0][0].x, 0.1 | units.m)\n self.assertEquals(grid[0][0][0].y, 0.125 | units.m)\n self.assertEquals(grid[0][0][0].z, 0.25 | units.m)\n self.assertEquals(grid[...,0,0].x, [0.1,0.3,0.5,0.7,0.9] | units.m)\n self.assertEquals(grid[0,0,...].z, [0.25, 0.75] | units.m)\n \n cellsize = grid.cellsize()\n self.assertAlmostRelativeEquals(cellsize[0], 0.2 | units.m)\n self.assertAlmostRelativeEquals(cellsize[1], 0.25 | units.m)\n self.assertAlmostRelativeEquals(cellsize[2], 0.5 | units.m)\n \n \n\n def test11(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n iarray,jarray,karray = grid.indices()\n for i in range(5):\n for j in range(4):\n for k in range(2):\n self.assertEquals(iarray[i][j][k], i)\n self.assertEquals(jarray[i][j][k], j)\n self.assertEquals(karray[i][j][k], k)\n iarray,jarray,karray = grid.indices()\n i = 0\n for j in range(4):\n for k in range(2):\n self.assertEquals(iarray[i][j][k], i)\n self.assertEquals(jarray[i][j][k], j)\n self.assertEquals(karray[i][j][k], k)\n \n iarray,jarray,karray = grid[...,0,0].indices()\n j = 0\n k = 0\n for i in range(5):\n self.assertEquals(iarray[i], i)\n self.assertEquals(jarray[i], j)\n self.assertEquals(karray[i], k)\n iarray,jarray,karray = grid[3,2,...].indices()\n i = 3\n j = 2\n for k in range(2):\n self.assertEquals(iarray[k], i)\n self.assertEquals(jarray[k], j)\n self.assertEquals(karray[k], k)\n iarray,jarray,karray = grid[2,...,1].indices()\n i = 2\n k = 1\n for j in range(4):\n self.assertEquals(iarray[j], i)\n self.assertEquals(jarray[j], j)\n self.assertEquals(karray[j], k)\n \n \n def test12(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[grid.x > 0.4| units.m].x.shape, (24,))\n self.assertEquals(grid[grid.x>0.4| units.m].x, grid.x[grid.x>0.4|units.m])\n iarray,jarray,karray = grid.indices()\n self.assertEquals(grid[grid.x>0.4| units.m].indices()[0], iarray[grid.x>0.4| units.m])\n self.assertEquals(grid[grid.x>0.4| units.m].indices()[1], jarray[grid.x>0.4| units.m])\n self.assertEquals(grid[grid.x>0.4| units.m].indices()[2], karray[grid.x>0.4| units.m]) \n \n def test13(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[0].shape, (4,2))\n self.assertEquals(grid[0][0].shape, (2,))\n self.assertEquals(grid[...,2,1].shape, (5,))\n \n def test14(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[0].x, grid.x[0])\n self.assertEquals(grid[0][1].x, grid.x[0][1])\n self.assertEquals(grid[1][2][1].x, grid.x[1][2][1])\n self.assertEquals(grid[...,2,1].x, grid.x[...,2,1])\n self.assertEquals(grid[1,...,1].x, grid.x[1,...,1])\n self.assertEquals(grid[1,2,...].x, grid.x[1,2,...])\n self.assertEquals(grid[...,1].x, grid.x[...,1])\n self.assertEquals(grid[2,...].x, grid.x[2,...])\n self.assertEquals(grid[:,3,:].x, grid.x[:,3,:])\n self.assertEquals(grid[:,3,:].y, grid.y[:,3,:])\n self.assertEquals(grid[:,3,:].z, grid.z[:,3,:])\n \n def test15(self):\n \n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n nk = nj = ni =0\n for plane1 in grid:\n nk += 1\n for plane2 in plane1:\n nj += 1\n for plane3 in plane2:\n ni += 1\n self.assertEquals(nk, 5)\n self.assertEquals(nj, 4 * 5)\n self.assertEquals(ni, 2 * 4 * 5)\n \n def test16(self):\n grid = datamodel.new_regular_grid((5,4,2,4), [1.0 | units.m, 1.0 | units.m, 1.0 | units.m, 1.0 | units.s], ('x', 'y', 'z', 't') )\n self.assertEquals(grid.shape, (5,4,2,4))\n self.assertEquals(grid.x.shape, (5,4,2,4))\n self.assertAlmostRelativeEquals( grid[1][2][1].x, ([0.3] * 4) | units.m)\n self.assertAlmostRelativeEquals( grid[1][2][1].t, [0.125, 0.375, 0.625, 0.875] | units.s)\n self.assertEquals(grid[0].x, grid.x[0])\n self.assertEquals(grid[0][1].x, grid.x[0][1])\n self.assertEquals(grid[1][2][1].x, grid.x[1][2][1])\n self.assertEquals(grid[1][2][1][2].x, grid.x[1][2][1][2])\n\n def test17(self):\n grid = datamodel.new_regular_grid((4,2), [1.0 | units.m, 1.0 | units.m])\n self.assertEquals(grid.shape, (4,2) )\n self.assertEquals(grid.x.shape, (4,2))\n self.assertAlmostRelativeEquals( grid[1].x, ([0.375] * 2) | units.m)\n self.assertAlmostRelativeEquals( grid[1][1].y, 0.75 | units.m)\n self.assertEquals(grid[0].x, grid.x[0])\n self.assertEquals(grid[0][1].x, grid.x[0][1])\n \n def test18(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid.shape, (5,4,2) )\n self.assertEquals(grid[1:2,...].x.shape, (1,4,2 ))\n self.assertEquals(grid[1:2,...].shape, (1,4,2) )\n self.assertEquals(grid[1:2,...].x, grid.x[1:2,...])\n self.assertEquals(grid[1:3,...].x.shape, (2,4,2) )\n self.assertEquals(grid[1:3,...].shape, (2,4,2) )\n self.assertEquals(grid[1:3,...].x, grid.x[1:3,...])\n\n def test19(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[1:3,...].x, grid.x[1:3,...])\n self.assertEquals(grid[1:3,2:3,...].x, grid.x[1:3,2:3,...])\n self.assertEquals(grid[1:3,2:3,0:1].x, grid.x[1:3,2:3,0:1])\n self.assertEquals(grid[1:3,...,0:1].x, grid.x[1:3,...,0:1])\n self.assertEquals(grid[...,0:1].x, grid.x[...,0:1])\n self.assertEquals(grid[...,2:3,0:1].x, grid.x[...,2:3,0:1])\n \n def test20(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[1:3,:,:].x, grid.x[1:3,:,:])\n self.assertEquals(grid[1:3,2:3,:].x, grid.x[1:3,2:3,:])\n self.assertEquals(grid[1:3,2:3,0:1].x, grid.x[1:3,2:3,0:1])\n self.assertEquals(grid[1:3,:,0:1].x, grid.x[1:3,:,0:1])\n self.assertEquals(grid[:,:,0:1].x, grid.x[:,:,0:1])\n self.assertEquals(grid[:,2:3,0:1].x, grid.x[:,2:3,0:1])\n \n \n def test21(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals(grid[1:3,:,:].copy().x, grid.x[1:3,:,:])\n self.assertEquals(grid[1:3,:,:].copy().shape, (2,4,2))\n self.assertEquals(grid[1:3,2:3,:].copy().x, grid.x[1:3,2:3,:])\n self.assertEquals(grid[1:3,2:3,:].copy().shape, (2,1,2))\n self.assertEquals(grid[1:3,2:3,0:1].copy().x, grid.x[1:3,2:3,0:1])\n self.assertEquals(grid[1:3,2:3,0:1].copy().shape, (2,1,1))\n self.assertEquals(grid[1:3,:,0:1].copy().x, grid.x[1:3,:,0:1])\n self.assertEquals(grid[1:3,:,0:1].copy().shape, (2,4,1))\n self.assertEquals(grid[:,:,0:1].copy().x, grid.x[:,:,0:1])\n self.assertEquals(grid[:,:,0:1].copy().shape, (5,4,1))\n self.assertEquals(grid[:,2:3,0:1].copy().x, grid.x[:,2:3,0:1])\n self.assertEquals(grid[:,2:3,0:1].copy().shape, (5,1,1))\n \n \n def test22(self):\n grid1 = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n grid2 = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n slice1 = grid1[1:3,:,:].copy()\n slice2 = grid2[1:3,:,:]\n slice1.x = -10 | units.m\n channel = slice1.new_channel_to(slice2)\n channel.copy()\n self.assertEquals(grid2.x[1:3], -10 | units.m)\n self.assertEquals(grid2.x[4],grid1.x[4])\n \n \n def test23(self):\n grid1 = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n grid2 = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEqual(grid1[1:3,...].shape,(2,4,2))\n slice1 = grid1[1:3,...].copy()\n slice2 = grid2[1:3,...]\n slice1.x = -10 | units.m\n channel = slice1.new_channel_to(slice2)\n channel.copy()\n self.assertEquals(grid2.x[1:3], -10 | units.m)\n self.assertEquals(grid2.x[4],grid1.x[4])\n \n \n def test24(self):\n particle = datamodel.Particle()\n particle.mass = 10 | units.kg\n \n grid = datamodel.Grid(5,4,3)\n grid.mass = 2.0 | units.kg\n grid.nounit = 10\n self.assertEquals(grid.nounit[0][1][2], 10)\n self.assertEquals(grid[0][1][2].nounit, 10)\n self.assertEquals(len(grid.nounit), 5)\n #grid[0][1][0].particle = particle\n #self.assertEquals(grid.mass[0][1][2], 2.0 | units.kg)\n #self.assertEquals(grid[0][1][0].particle, particle)\n #self.assertEquals(grid[0][1][1].particle, None)\n\n\n def test25(self):\n grid = datamodel.Grid(5,4,3)\n grid.mass = 2.0 | units.kg\n for cell in grid.iter_cells():\n self.assertEquals(cell.mass, 2.0 | units.kg)\n \n def test26(self):\n \n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n \n xv, yv, zv = numpy.mgrid[0:5,0:4,0:2]\n xv = xv.flatten()\n yv = yv.flatten()\n zv = zv.flatten()\n i = 0\n for cell in grid.iter_cells():\n expected_position = grid[xv[i], yv[i], zv[i]].position\n self.assertEquals(cell.position, expected_position)\n i += 1\n\n def test27(self):\n grid = datamodel.new_regular_grid((3,3), [1.0, 1.0] | units.m)\n subgrid1=grid[0:1,0:2]\n subgrid2=grid[0:1,0:2]\n subgrid3=grid[0:2,0:3][0:1,0:2]\n subgrid4=grid[0:1,0:3]\n \n self.assertTrue(subgrid1==subgrid2)\n self.assertTrue(subgrid1==subgrid3)\n self.assertTrue(subgrid2==subgrid3)\n self.assertFalse(subgrid1==subgrid4)\n self.assertFalse(subgrid2==subgrid4)\n self.assertFalse(subgrid3==subgrid4)\n\n def test28(self):\n grid = datamodel.Grid(200,400)\n subgrid1=grid[1:-1,1:-1]\n subgrid2=subgrid1[3:5,1:399]\n self.assertEqual(subgrid2.shape,(2,397))\n\n def test29(self):\n grid = datamodel.Grid(200)\n subgrid1=grid[1:-1]\n subgrid2=subgrid1[3:5]\n self.assertEqual(subgrid2.shape,(2,))\n\n def test30(self):\n grid = datamodel.Grid(200)\n subgrid1=grid[1:199]\n subgrid2=subgrid1[3:5]\n self.assertEqual(subgrid2.shape,(2,))\n\n def test31(self):\n grid = datamodel.Grid(20,20,20)\n a=numpy.zeros((20,20,20))\n self.assertEqual(a[1].shape,grid[1].shape) \n self.assertEqual(a[1,2].shape,grid[1,2].shape) \n self.assertEqual(a[1:5].shape,grid[1:5].shape) \n self.assertEqual(a[2,1:5].shape,grid[2,1:5].shape)\n self.assertEqual(a[2,...].shape,grid[2,...].shape) \n self.assertEqual(a[...,3,:].shape,grid[...,3,:].shape) \n self.assertEqual(a[...,3:5,:].shape,grid[...,3:5,:].shape) \n self.assertEqual(a[...,3:5,:].shape,grid[...,3:5,:].shape)\n self.assertEqual(a[::,::2].shape,grid[::,::2].shape) \n\n def test32(self):\n grid = datamodel.Grid(200)\n grid.mass=numpy.arange(200)\n\n self.assertEquals(grid[1].mass,grid.mass[1]) \n self.assertEquals(grid[1:-1].mass,grid.mass[1:-1])\n self.assertEquals(grid[-1:1:-1].mass,grid.mass[-1:1:-1])\n self.assertEquals(grid[-1:1:-1][1:-1].mass,grid.mass[-1:1:-1][1:-1])\n self.assertEquals(grid[-1:1:-1][-1:1].mass,grid.mass[-1:1:-1][-1:1])\n self.assertEquals(grid[-1:1:-1][-1:1:-3].mass,grid.mass[-1:1:-1][-1:1:-3])\n self.assertEquals(grid[300:1:-2][-1:5:-3].mass,grid.mass[300:1:-2][-1:5:-3])\n self.assertEquals(grid[100::-2][::3].mass,grid.mass[100::-2][::3])\n self.assertEquals(grid[100::-2][::-3].mass,grid.mass[100::-2][::-3])\n\n\n def test32b(self):\n grid = datamodel.Grid(200)\n grid.mass=numpy.arange(200)\n\n self.assertEquals(grid[::-1].mass,grid.mass[::-1])\n self.assertEquals(grid[10::-1].mass,grid.mass[10::-1])\n self.assertEquals(grid[:100:2].mass,grid.mass[:100:2]) \n self.assertEquals(grid[-1::-1].mass,grid.mass[-1::-1])\n self.assertEquals(grid[-1:-300:-1].mass,grid.mass[-1:-300:-1])\n self.assertEquals(grid[300:-300:-1].mass,grid.mass[300:-300:-1]) \n self.assertEquals(grid[300:-300:-7].mass,grid.mass[300:-300:-7]) \n \n \n \n def test33(self):\n grid = datamodel.Grid(20)\n grid.mass=numpy.zeros((20,5))\n self.assertEquals(grid[1].mass,numpy.zeros(5))\n grid.mass=numpy.arange(5)\n self.assertEquals(grid[-1].mass,numpy.arange(5))\n subgrid=grid[::2]\n self.assertEquals(subgrid[-1].mass,numpy.arange(5))\n subgrid[1].mass=5-numpy.arange(5)\n self.assertEquals(subgrid[1].mass,5-numpy.arange(5))\n self.assertEquals(grid[2].mass,5-numpy.arange(5))\n\n def test34(self):\n grid = datamodel.Grid(20)\n grid.mass=numpy.zeros((20,5)) | units.kg\n self.assertEquals(grid[1].mass,numpy.zeros(5) | units.kg)\n grid.mass=numpy.arange(5) | units.kg\n self.assertEquals(grid[-1].mass,numpy.arange(5)| units.kg)\n subgrid=grid[::2]\n self.assertEquals(subgrid[-1].mass,numpy.arange(5)| units.kg)\n subgrid[1].mass=(5-numpy.arange(5))| units.kg\n self.assertEquals(subgrid[1].mass,(5-numpy.arange(5))| units.kg)\n self.assertEquals(grid[2].mass,(5-numpy.arange(5))| units.kg)\n\n def test35(self):\n grid=datamodel.Grid(10,5)\n grid.mass=numpy.zeros((10,5,3))\n self.assertEquals(grid[2,2].mass,numpy.zeros(3))\n grid[::3,::2].mass=numpy.arange(3)\n self.assertEquals(grid[3,2].mass,numpy.arange(3))\n\n def test36(self):\n grid=datamodel.Grid(10)\n grid.mass=numpy.zeros((10,5,3))\n self.assertEquals(grid[2].mass,numpy.zeros((5,3)))\n grid[::3].mass=numpy.ones((5,3))\n self.assertEquals(grid[3].mass,numpy.ones((5,3)))\n\n def test37(self):\n grid = datamodel.Grid(20)\n grid.mass=numpy.zeros((20,5))\n grid.mass=numpy.arange(5)\n self.assertEquals(grid[-1].mass,numpy.arange(5))\n self.assertEquals(grid.mass.shape,(20,5))\n subgrid=grid[::2]\n self.assertEquals(subgrid[-1].mass,numpy.arange(5))\n subgrid[1].mass=5-numpy.arange(5)\n self.assertEquals(subgrid[1].mass,5-numpy.arange(5))\n self.assertEquals(grid[2].mass,5-numpy.arange(5))\n cp=subgrid.copy()\n self.assertEquals(cp[1].mass,5-numpy.arange(5))\n self.assertEquals(cp.mass.shape,(10,5)) \n cp=grid.copy()\n self.assertEquals(cp.mass.shape,(20,5))\n self.assertEquals(cp[2].mass,5-numpy.arange(5))\n self.assertEquals(cp[-1].mass,numpy.arange(5))\n\n def test38(self):\n grid=datamodel.new_cartesian_grid((10,),1)\n sub=grid[::2]\n self.assertEqual(sub[0].x,0.5)\n self.assertEqual(sub[(0,)].x,0.5)\n\n grid=datamodel.new_cartesian_grid((10,10),1)\n sub=grid[::2,::]\n self.assertEqual(sub[0,0].x,0.5)\n self.assertEqual(sub[(0,1)].y,1.5)\n\n def test39(self):\n grid=datamodel.new_cartesian_grid((10,10),1)\n sub=grid[3:6,5:8]\n self.assertEqual(sub[0:-1,0:-1].x,sub.x[0:-1,0:-1])\n self.assertEqual(sub[0:-1,-1].x,sub.x[0:-1,-1])\n self.assertEqual(sub[-1,-1].x,sub.x[-1,-1])\n self.assertEqual(sub[-1,-2].x,sub.x[-1,-2])\n\n def test40(self):\n grid1 = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n grid2 = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n grid1.m1 = 1\n grid1.m2 = 2\n channel = grid1.new_channel_to(grid2)\n channel.transform([\"m3\"],lambda x,y: (x,),[\"m1\",\"m2\"])\n self.assertEquals(grid2.m3, 1)\n channel.transform([\"m3\"],lambda x,y: (y,),[\"m1\",\"m2\"])\n self.assertEquals(grid2.m3, 2)\n channel.transform([\"m3\"],lambda x,y: (x+y,),[\"m1\",\"m2\"])\n self.assertEquals(grid2.m3, 3)\n channel.transform([\"m3\",\"m4\"],lambda x,y: (x+y,2*x-y),[\"m1\",\"m2\"])\n self.assertEquals(grid2.m3, 3)\n self.assertEquals(grid2.m4, 0)\n\n def test40b(self):\n grid = datamodel.new_regular_grid((50,), [1.0] | units.m)\n for index in [ [0], [0,3,4], [1,2,2],[[2,3]],[[0,1],[2,3]],range(50) ]:\n i=numpy.array(index)\n self.assertEquals(grid[i].x, grid.x[ i ])\n self.assertEquals(grid[i][1:].x, grid.x[ i ][1:])\n self.assertEquals(grid[i][1::2].x, grid.x[ i ][1::2])\n\n def test41(self):\n grid = datamodel.new_regular_grid((10,10), [1.0,2.0] | units.m)\n for _i,_j in [ ([0,1],[2,3]) ]:\n i=numpy.array(_i)\n j=numpy.array(_j)\n self.assertEquals(grid[i,j].x, grid.x[ i,j ])\n self.assertEquals(grid[i,j][1:].x, grid.x[ i,j ][1:])\n self.assertEquals(grid[i,j][1::2].x, grid.x[ i,j ][1::2])\n\n def test42(self):\n grid = datamodel.new_regular_grid((3,4,5,6), [1.0,2.0,3.0,4.0],axes_names=\"abcd\")\n for _i,_j in [ ([0],[3]),([0,1],[2,3]) ]:\n i=numpy.array(_i)\n j=numpy.array(_j)\n self.assertEquals(grid[i,j].a, grid.a[ i,j ])\n self.assertEquals(grid[0,1,i,j].a, grid.a[0,1, i,j ])\n self.assertEquals(grid[i,j,0,0].a, grid.a[ i,j,0,0 ])\n self.assertEquals(grid[i,1,2,j].a, grid.a[ i,1,2,j])\n\n def test43(self):\n grid = datamodel.new_regular_grid((3,4,5,6), [1.0,2.0,3.0,4.0],axes_names=\"abcd\")\n for _i,_j in [ ([0,1],[2,3]) ]:\n i=numpy.array(_i)\n j=numpy.array(_j)\n subgrid=grid[:,0,:,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n subgrid=grid[:,0,2,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n subgrid=grid[:,0,-2,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n\n def test44(self):\n grid = datamodel.new_regular_grid((3,4,5,6), [1.0,2.0,3.0,4.0],axes_names=\"abcd\")\n for _i,_j in [ ([-2],[-4]),([0,-1],[-2,3]) ]:\n i=numpy.array(_i)\n j=numpy.array(_j)\n subgrid=grid[:,0,:,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n subgrid=grid[:,0,2,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n subgrid=grid[:,0,-2,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n\n def test45(self):\n grid = datamodel.new_regular_grid((3,4,5,6), [1.0,2.0,3.0,4.0],axes_names=\"abcd\")\n for _i,_j in [ ([-1],[-4]) ]:\n i=numpy.array(_i)\n j=numpy.array(_j)\n subgrid=grid[::2,0,:,:]\n self.assertEquals(subgrid[i,j].a, subgrid.a[ i,j ])\n\n def test46(self):\n grid = datamodel.new_regular_grid((6,), [1.0],axes_names=\"abcd\")\n subgrid=grid[::2]\n self.assertEquals(subgrid[-2].a, subgrid.a[ -2 ])\n\n grid = datamodel.new_regular_grid((7,), [1.0],axes_names=\"abcd\")\n subgrid=grid[::2]\n self.assertEquals(subgrid[-2].a, subgrid.a[ -2 ])\n\n grid = datamodel.new_regular_grid((7,), [1.0],axes_names=\"abcd\")\n subgrid=grid[6:0:-2]\n self.assertEquals(subgrid[-2].a, subgrid.a[ -2 ])\n\n def test47(self):\n grid = datamodel.Grid()\n grid.mass=12.\n self.assertEquals(grid[...].mass,12.)\n self.assertEquals(grid.mass,12.)\n\n\n\nclass TestGridFactories(amusetest.TestCase):\n def test1(self):\n grid1 = datamodel.new_cartesian_grid( (4,5), 1.0 | units.m)\n grid2 = datamodel.new_regular_grid( (4,5), [4.0,5.0] | units.m)\n grid3 = datamodel.new_rectilinear_grid( (4,5), [numpy.arange(5.) | units.m,numpy.arange(6.) | units.m])\n \n self.assertEqual(grid1.position,grid2.position)\n self.assertEqual(grid2.position,grid3.position)\n\n def test2(self):\n grid=datamodel.new_rectilinear_grid((10,),(1.*numpy.arange(11),))\n self.assertEqual(grid._axes_cell_boundaries,1.*numpy.arange(11))\n grid=datamodel.new_regular_grid((10,),[10.])\n self.assertEqual(grid._lengths,[10.])\n grid=datamodel.new_cartesian_grid((10,),1.)\n self.assertEqual(grid._cellsize,1.)\n grid=datamodel.new_regular_grid((10,20,),[10.,15.])\n self.assertEquals(grid._lengths,[10.,15.])\n\n def test3(self):\n N=10\n x,y=numpy.indices((N+1,N+1))\n grid=datamodel.new_structured_grid((N,N),[x,y])\n self.assertEqual(grid.shape,(N,N))\n x,y=numpy.indices((N,N))\n x=x+0.5\n y=y+0.5\n self.assertEqual(grid.x,x)\n self.assertEqual(grid.y,y)\n \n def test4(self):\n N=2\n x,y,z=numpy.indices((N+1,N+1,2*N+1))\n grid=datamodel.new_structured_grid((N,N,2*N),[x,y,z])\n self.assertEqual(grid.shape,(N,N,2*N))\n x,y,z=numpy.indices((N,N,2*N))\n x=x+0.5\n y=y+0.5\n z=z+0.5\n self.assertEqual(grid.x,x)\n self.assertEqual(grid.y,y)\n self.assertEqual(grid.z,z)\n\n\n\nclass TestGridAttributes(amusetest.TestCase):\n \n def test1(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertAlmostRelativeEquals(grid.get_minimum_position(), ([0.0, 0.0, 0.0] | units.m) )\n self.assertAlmostRelativeEquals(grid.get_maximum_position(), [1.0, 1.0, 1.0] | units.m)\n self.assertAlmostRelativeEquals(grid.get_volume(), 1.0 | units.m ** 3)\n self.assertTrue(grid.contains([0.5,0.5,0.5] | units.m))\n \n def test2(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertTrue(numpy.all(grid.contains([[0.5,0.5,0.5] , [0.1,0.1,0.1]]| units.m)))\n self.assertFalse(numpy.all(grid.contains([[1.1,0.5,0.5] , [0.1,1.1,0.1]]| units.m)))\n \n def test3(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n points = grid.points()\n self.assertEquals(points.shape, (6,5,3, 3))\n self.assertAlmostRelativeEquals(points[0][0][0], ([0.0,0.0, 0.0] | units.m) )\n self.assertAlmostRelativeEquals(points[1][0][0] , ([0.2,0.0, 0.0] | units.m) )\n self.assertAlmostRelativeEquals(points[1][1][1], [0.2,0.25, 0.5] | units.m )\n self.assertAlmostRelativeEquals(points[0][-1][-1] , ([0.0, 1.0, 1.0] | units.m) )\n self.assertAlmostRelativeEquals(points[-1][0][-1] , ([1.0, 0.0, 1.0] | units.m) )\n self.assertAlmostRelativeEquals(points[-1][-1][0] , ([1.0, 1.0, 0.0] | units.m) )\n self.assertAlmostRelativeEquals(points[-1][-1][-1] , ([1.0,1.0, 1.0] | units.m) )\n \n \n def test4(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n points = grid.points().reshape([6*5*3,3])\n connectivity = grid.connectivity()\n \n self.assertEquals(connectivity.shape, (5,4,2, 8))\n first_cell = connectivity[0][0][0]\n self.assertAlmostRelativeEquals(points[first_cell[0]], [0,0,0 ] | units.m)\n self.assertAlmostRelativeEquals(points[first_cell[1]], [0.2,0,0] | units.m)\n self.assertAlmostRelativeEquals(points[first_cell[2]] , ([0,0.25,0] | units.m))\n self.assertAlmostRelativeEquals(points[first_cell[3]] , ([0.2,0.25,0] | units.m))\n self.assertAlmostRelativeEquals(points[first_cell[4]] , ([0.0,0.0,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[first_cell[5]] , ([0.2,0.0,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[first_cell[6]] , ([0.0,0.25,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[first_cell[7]] , ([0.2,0.25,0.5] | units.m))\n \n self.assertEquals(connectivity[0][0][0], [ 0,15, 3, 18, 1, 16, 4, 19])\n \n \n \n def test5(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n points = grid.points().reshape([6*5*3,3])\n connectivity = grid.connectivity()\n \n self.assertEquals(connectivity.shape, (5,4,2, 8))\n cell = connectivity[0][0][1]\n self.assertAlmostRelativeEquals(points[cell[0]] , ([0.0,0.0,0.5 ] | units.m))\n self.assertAlmostRelativeEquals(points[cell[1]] , ([0.2,0,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[cell[2]] , ([0,0.25,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[cell[3]] , ([0.2,0.25,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[cell[4]] , ([0.0,0.0,1.0] | units.m))\n self.assertAlmostRelativeEquals(points[cell[5]] , ([0.2,0.0,1.0] | units.m))\n self.assertAlmostRelativeEquals(points[cell[6]] , ([0.0,0.25,1.0] | units.m))\n self.assertAlmostRelativeEquals(points[cell[7]] , ([0.2,0.25,1.0] | units.m))\n \n self.assertEquals(connectivity[0][0][0], [ 0,15, 3, 18, 1, 16, 4, 19])\n \n \n \n def test6(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n points = grid.points().reshape([6*5*3,3])\n connectivity = grid.connectivity()\n \n self.assertEquals(connectivity.shape, (5,4,2, 8))\n cell = connectivity[1][1][1]\n self.assertAlmostRelativeEquals(points[cell[0]], ([0.2, 0.25, 0.5]|units.m) + ([0.0,0.0,0.0 ] | units.m))\n self.assertAlmostRelativeEquals(points[cell[1]], ([0.2, 0.25, 0.5]|units.m) + ([0.2,0,0.0] | units.m))\n self.assertAlmostRelativeEquals(points[cell[2]], ([0.2, 0.25, 0.5]|units.m) + ([0,0.25,0.0] | units.m))\n self.assertAlmostRelativeEquals(points[cell[3]], ([0.2, 0.25, 0.5]|units.m) + ([0.2,0.25,0.0] | units.m))\n self.assertAlmostRelativeEquals(points[cell[4]], ([0.2, 0.25, 0.5]|units.m) + ([0.0,0.0,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[cell[5]], ([0.2, 0.25, 0.5]|units.m) + ([0.2,0.0,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[cell[6]], ([0.2, 0.25, 0.5]|units.m) + ([0.0,0.25,0.5] | units.m))\n self.assertAlmostRelativeEquals(points[cell[7]], ([0.2, 0.25, 0.5]|units.m) + ([0.2,0.25,0.5] | units.m))\n \n self.assertEquals(connectivity[0][0][0], [ 0,15, 3, 18, 1, 16, 4, 19])\n \n def test7(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n \n self.assertAlmostRelativeEquals(grid[1][2][3].position, [3,5,7] |units.m)\n \n grid[1][2][3].position = [7,5,3] |units.m\n self.assertAlmostRelativeEquals(grid[1][2][3].position, [7,5,3] |units.m)\n \n \n grid[1][2][3].position += [1,2,3] |units.m\n self.assertAlmostRelativeEquals(grid[1][2][3].position, [8,7,6] |units.m)\n\n def test8(self):\n grid = datamodel.new_regular_grid((5,4), [1.0, 1.0] | units.m)\n self.assertAlmostRelativeEquals(grid.get_minimum_position(), ([0.0, 0.0] | units.m) )\n self.assertAlmostRelativeEquals(grid.get_maximum_position(), [1.0, 1.0] | units.m)\n self.assertAlmostRelativeEquals(grid.get_volume(), 1.0 | units.m ** 2)\n self.assertTrue(grid.contains([0.5,0.5] | units.m))\n\n def test9(self):\n grid = datamodel.new_regular_grid((5,4,2), [1.0, 1.0, 1.0] | units.m)\n self.assertEquals((0,0,0),grid.get_minimum_index())\n self.assertEquals((4,3,1),grid.get_maximum_index())\n\n def test10(self):\n grid1 = datamodel.new_regular_grid((5,4), [1.0, 1.0] | units.m)\n grid2 = datamodel.new_regular_grid((5,4), [.1, .1] | units.m)\n grid3 = datamodel.new_regular_grid((5,4), [.1, .1] | units.m,offset=[0.5,0.6] | units.m)\n self.assertTrue(grid1.overlaps(grid2))\n self.assertTrue(grid1.overlaps(grid3))\n self.assertFalse(grid2.overlaps(grid3))\n self.assertTrue(grid2.overlaps(grid1))\n self.assertTrue(grid3.overlaps(grid1))\n self.assertFalse(grid3.overlaps(grid2))\n\n def test11(self):\n grid1 = datamodel.new_regular_grid((4,4), [1.0, 1.0] | units.m)\n grid2 = datamodel.new_regular_grid((4,4), [1.0, 1.0] | units.m,offset=[-0.5,-0.5] | units.m)\n self.assertTrue(grid1.overlaps(grid2))\n overlap=grid1.get_overlap_with(grid2)\n self.assertEquals(overlap.position,grid1[0:3,0:3].position)\n\n def test12(self):\n grid1 = datamodel.new_regular_grid((4,4), [1.0, 1.0] | units.m)\n grid2 = datamodel.new_regular_grid((4,4), [1.0, 1.0] | units.m,offset=[-0.5,-0.5] | units.m)\n self.assertTrue(grid1.overlaps(grid2))\n overlap=grid1.get_overlap_with(grid2,eps=grid2.cellsize()[0]*1.e-6)\n self.assertEquals(overlap.position,grid1[0:2,0:2].position)\n\n\nclass TestGridSampling(amusetest.TestCase):\n \n def test1(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m)\n self.assertEquals(sample.index , [1,1,1])\n sample = grid.samplePoint([2.5,2.5,2.5]| units.m)\n self.assertEquals(sample.index , [1,1,1])\n sample = grid.samplePoint([3.5,3.5,3.5]| units.m)\n self.assertEquals(sample.index , [1,1,1])\n \n for x in range(0,200):\n sample = grid.samplePoint([0.0 + (x/100.0),4.0+(x/100.0),6.0+(x/100.0)]| units.m)\n self.assertEquals(sample.index , [0,2,3])\n \n for x in range(200,400):\n sample = grid.samplePoint([0.0 + (x/100.0),4.0+(x/100.0),6.0+(x/100.0)]| units.m)\n self.assertEquals(sample.index , [1,3,4])\n\n def test2(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m)\n self.assertEquals(sample.index_for_000_cell , [1,1,1])\n sample = grid.samplePoint([2.5,2.5,2.5]| units.m)\n self.assertEquals(sample.index_for_000_cell , [0,0,0])\n sample = grid.samplePoint([3.5,3.5,3.5]| units.m)\n self.assertEquals(sample.index_for_000_cell , [1,1,1])\n sample = grid.samplePoint([4.5,4.5,4.5]| units.m)\n self.assertEquals(sample.index_for_000_cell , [1,1,1])\n self.assertEquals(sample.index , [2,2,2])\n \n for x in range(0,100):\n \n sample = grid.samplePoint([0.0 + (x/100.0),4.0+(x/100.0),6.0+(x/100.0)]| units.m)\n self.assertEquals(sample.index_for_000_cell , [-1,1,2])\n for x in range(100,300):\n \n sample = grid.samplePoint([0.0 + (x/100.0),4.0+(x/100.0),6.0+(x/100.0)]| units.m)\n self.assertEquals(sample.index_for_000_cell , [0,2,3])\n \n for x in range(300,400):\n sample = grid.samplePoint([0.0 + (x/100.0),4.0+(x/100.0),6.0+(x/100.0)]| units.m)\n self.assertEquals(sample.index_for_000_cell , [1,3,4])\n \n \n def test3(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m)\n self.assertEquals(sample.index_for_000_cell , [1,1,1])\n self.assertEquals(sample.surrounding_cell_indices , [\n [1,1,1],\n [2,1,1],\n [1,2,1],\n [1,1,2],\n [2,1,2],\n [1,2,2],\n [2,2,1],\n [2,2,2],\n ])\n \n def test4(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m)\n self.assertEquals(sample.surrounding_cells[0].position , [3.0,3.0,3.0] | units.m ) \n self.assertEquals(sample.surrounding_cells[1].position , [5.0,3.0,3.0] | units.m ) \n self.assertEquals(sample.surrounding_cells[-1].position , [5.0,5.0,5.0] | units.m ) \n\n \n def test5(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m)\n masses = sample.get_values_of_attribute(\"mass\")\n self.assertEquals(masses[0] , 3.0 | units.kg ) \n self.assertEquals(masses[1] , 5.0 | units.kg ) \n self.assertEquals(masses[2] , 3.0 | units.kg ) \n self.assertEquals(masses[3] , 3.0 | units.kg ) \n self.assertEquals(masses[4] , 5.0 | units.kg ) \n self.assertEquals(masses[5] , 3.0 | units.kg ) \n self.assertEquals(masses[6] , 5.0 | units.kg ) \n self.assertEquals(masses[7] , 5.0 | units.kg ) \n factors = sample.weighing_factors\n self.assertEquals(factors[0] , 1.0 | units.none ) \n self.assertEquals(factors[1] , 0.0 | units.none ) \n self.assertEquals(factors[2] , 0.0 | units.none ) \n self.assertEquals(factors[3] , 0.0 | units.none ) \n self.assertEquals(factors[4] , 0.0 | units.none ) \n self.assertEquals(factors[5] , 0.0 | units.none ) \n self.assertEquals(factors[6] , 0.0 | units.none ) \n self.assertEquals(factors[7] , 0.0 | units.none ) \n \n self.assertAlmostRelativeEquals(sample.mass , 3.0 | units.kg ) \n \n def test6(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n for xpos in numpy.arange(3.0,5.0,0.1):\n sample = grid.samplePoint([xpos,3.0,3.0]| units.m)\n self.assertAlmostRelativeEquals(sample.mass , (3.0 | units.kg) + ((2.0 * (xpos - 3.0) / 2.0) | units.kg) ) \n \n sample = grid.samplePoint([xpos,3.0,3.0]| units.m)\n self.assertAlmostRelativeEquals(sample.mass , (3.0 | units.kg) + ((2.0 * (xpos - 3.0) / 2.0) | units.kg) ) \n \n sample = grid.samplePoint([xpos,5.0,3.0]| units.m)\n self.assertAlmostRelativeEquals(sample.mass , (3.0 | units.kg) + ((2.0 * (xpos - 3.0) / 2.0) | units.kg) ) \n \n sample = grid.samplePoint([xpos,3.0,5.0]| units.m)\n self.assertAlmostRelativeEquals(sample.mass , (3.0 | units.kg) + ((2.0 * (xpos - 3.0) / 2.0) | units.kg) ) \n \n\n sample = grid.samplePoint([4.0,4.0,4.0]| units.m)\n self.assertAlmostRelativeEquals(sample.mass , (4.0 | units.kg)) \n\n def test7(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m)\n self.assertTrue(sample.isvalid)\n sample = grid.samplePoint([11.0,3.0,3.0]| units.m)\n self.assertFalse(sample.isvalid)\n sample = grid.samplePoint([3.0,-1.0,3.0]| units.m)\n self.assertFalse(sample.isvalid)\n \n \n def test8(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m, must_return_values_on_cell_center = True) \n self.assertEquals(sample.position, [3.0,3.0,3.0]| units.m) \n self.assertEquals(sample.mass, 3.0 | units.kg)\n sample = grid.samplePoint([3.5,3.0,3.0]| units.m, must_return_values_on_cell_center = True) \n self.assertEquals(sample.position, [3.0,3.0,3.0]| units.m) \n self.assertEquals(sample.mass, 3.0 | units.kg)\n \n def test9(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n sample = grid.samplePoint([3.0,3.0,3.0]| units.m, must_return_values_on_cell_center = False) \n self.assertEquals(sample.position, [3.0,3.0,3.0]| units.m) \n self.assertEquals(sample.mass, 3.0 | units.kg)\n sample = grid.samplePoint([3.5,3.0,3.0]| units.m, must_return_values_on_cell_center = False) \n self.assertEquals(sample.position, [3.5,3.0,3.0]| units.m) \n self.assertEquals(sample.mass, 3.5 | units.kg)\n \n \nclass TestGridSamplingMultiplePoints(amusetest.TestCase):\n \n def test1(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n samples = grid.samplePoints([[3.0,3.0,3.0], [4.0,3.0,3.0]]| units.m)\n self.assertEquals(len(samples), 2)\n self.assertEquals(samples.position[0] , [3.0,3.0,3.0]| units.m)\n self.assertEquals(samples.position[0] , samples[0].position)\n self.assertEquals(samples.position[1] , samples[1].position)\n self.assertEquals(samples.mass , [3.0, 4.0] | units.kg)\n \n def test2(self):\n grid = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid.mass = grid.x.value_in(units.m) | units.kg\n samples = grid.samplePoints([[3.5,3.0,3.0], [4.5,3.0,3.0]]| units.m)\n self.assertEquals(len(samples), 2)\n self.assertEquals(samples.mass , [3.5, 4.5] | units.kg)\n \n def test3(self):\n grid1 = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid1.mass = grid1.x.value_in(units.m) | units.kg\n grid2 = datamodel.new_regular_grid((5,5,5), [10.0, 10.0, 10.0] | units.m)\n grid2.position += (10.0,0,0) | units.m\n grid2.mass = grid2.x.value_in(units.m) | units.kg\n samples = SamplePointsOnMultipleGrids((grid1, grid2), [[3.0,3.0,3.0], [4.0,3.0,3.0], [13,3,3]]| units.m)\n self.assertEquals(len(samples), 3)\n self.assertEquals(samples.mass , [3.0, 4.0, 13.0] | units.kg)\n" ]
[ [ "matplotlib.pyplot.show", "numpy.histogram", "numpy.linspace", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.margins", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.arange", "numpy.indices", "numpy.ones", "numpy.all", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
suyukun666/UFO
[ "ba481b39b80d78c98e11cc22444d69de9e010439", "ba481b39b80d78c98e11cc22444d69de9e010439" ]
[ "Intra_MLP.py", "demo_matching.py" ]
[ "import torch\nimport numpy\n\n# codes of this function are borrowed from https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/models/pointnet2_utils.py\ndef index_points(device, points, idx):\n \"\"\"\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n # batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\ndef knn_l2(device, net, k, u):\n '''\n Input:\n k: int32, number of k in k-nn search\n net: (batch_size, npoint, c) float32 array, points\n u: int32, block size\n Output:\n idx: (batch_size, npoint, k) int32 array, indices to input points\n '''\n INF = 1e8\n batch_size = net.size(0)\n npoint = net.size(1)\n n_channel = net.size(2)\n\n square = torch.pow(torch.norm(net, dim=2,keepdim=True),2)\n\n def u_block(batch_size, npoint, u):\n block = numpy.zeros([batch_size, npoint, npoint])\n n = npoint // u\n for i in range(n):\n block[:, (i*u):(i*u+u), (i*u):(i*u+u)] = numpy.ones([batch_size, u, u]) * (-INF)\n return block\n\n # minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).to(device)\n minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).cuda()\n _, indices = torch.topk(minus_distance, k, largest=True, sorted=False)\n \n return indices\n\n", "import os\nimport torch\nimport argparse\nimport queue\nimport threading\nfrom model_image import build_model, weights_init\nfrom torchvision import transforms\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport time\nimport datetime\n\nimport torch.nn.functional as F\ntorch.backends.cudnn.benchmark = True\n\n\ndef Idx(cur,ii,jj):\n return ii*14+jj+cur*14*14\n\ndef pix_idx(ii,jj):\n return (ii*16+8,jj*16+8)\n\ndef main(net, datapath, device, group_size=5, img_size=224, img_dir_name='image', gt_dir_name='groundtruth',\n img_ext=['.jpg', '.jpg', '.jpg', '.jpg'], gt_ext=['.png', '.bmp', '.jpg', '.png'],output_dir='./matching_result'):\n img_transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n gt_transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor()])\n img_transform_gray = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor(),\n transforms.Normalize(mean=[0.449], std=[0.226])])\n res_transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor()])\n net.eval()\n net = net.module.to(device)\n \n col_tab=[(101,67,254),(154,157,252),(173,205,249),(169,200,200),(155,175,131)]\n with torch.no_grad():\n ave_p, ave_j = [], []\n for p in range(len(datapath)):\n \n all_p, all_j = [], []\n all_class = [os.path.split(datapath[p])[-1]]\n \n datapath[p]=os.path.split(os.path.split(datapath[p])[0])[0]\n cur_idx=0\n image_list, gt_list = list(), list()\n for s in range(len(all_class)):\n image_path = sorted(os.listdir(os.path.join(datapath[p], img_dir_name, all_class[s])))\n image_list.append(list(map(lambda x: os.path.join(datapath[p], img_dir_name, all_class[s], x), image_path)))\n gt_list.append(list(map(lambda x: os.path.join(datapath[p], gt_dir_name, all_class[s], x.replace(img_ext[p], gt_ext[p])), image_path)))\n for i in range(len(image_list)):\n cur_class_all_image = sorted(image_list[i])\n cur_class_all_gt = gt_list[i]\n\n cur_class_gt = torch.zeros(len(cur_class_all_gt), img_size, img_size)\n \n cur_class_rgb = torch.zeros(len(cur_class_all_image), 3, img_size, img_size)\n real_img=[]\n \n idx=0\n idx_i,idx_j=3,4\n for m in range(len(cur_class_all_image)):\n rgb_ = Image.open(cur_class_all_image[m])\n if rgb_.mode == 'RGB':\n rgb_ = img_transform(rgb_)\n ans_ori=cv2.cvtColor((res_transform(Image.open(cur_class_all_image[m]).convert('RGB'))*255).permute(1,2,0).numpy().astype(np.uint8),cv2.COLOR_BGR2RGB)\n real_img.append(ans_ori*1)\n \n else:\n rgb_ = img_transform_gray(rgb_)\n cur_class_rgb[m, :, :, :] = rgb_\n\n cur_class_mask = torch.zeros(len(cur_class_all_image), img_size, img_size)\n divided = len(cur_class_all_image) // group_size\n rested = len(cur_class_all_image) % group_size\n if divided != 0:\n for k in range(divided):\n group_rgb = cur_class_rgb[(k * group_size): ((k + 1) * group_size)]\n # group_rgb = group_rgb.to(device)\n group_rgb = group_rgb.cuda()\n _, pred_mask,feat,feat2 = net(group_rgb)\n feat_list=[]\n #print(feat.shape)\n first=None\n for j in range(group_size):\n x_visualize = feat[j].unsqueeze(0).cpu() \n \n x_visualize = -np.mean(x_visualize.numpy(),axis=1).reshape(x_visualize.shape[-2],x_visualize.shape[-1])\n \n x_visualize=(x_visualize-x_visualize.min())/(x_visualize.max()-x_visualize.min())\n feat_list.append(x_visualize)\n CAM = cv2.applyColorMap((x_visualize*255).astype(np.uint8), cv2.COLORMAP_JET)\n CAM=F.interpolate(torch.from_numpy(CAM).permute(2,0,1).float().view(1,3,CAM.shape[0],CAM.shape[1]),size=[224,224],mode='bilinear').squeeze().permute(1,2,0).numpy().astype(np.uint8)\n cv2.imwrite(os.path.join(output_dir,str(j)+'_CAM.png'),CAM)\n if first is None:\n first=x_visualize\n #break\n th=0.9\n cat_img=np.concatenate([real_img[0],real_img[1]],axis=1)\n cur_patch=0\n for ii in range(14):\n for jj in range(14):\n if first[ii][jj]>=th:\n \n Max=-10\n Max_pp=0\n Max_qq=0\n for pp in range(14):\n for qq in range(14):\n if feat2[Idx(0,ii,jj)][Idx(1,pp,qq)]>Max:\n Max_pp=pp\n Max_qq=qq\n Max=feat2[Idx(0,ii,jj)][Idx(1,pp,qq)]\n cur_patch+=1\n \n cv2.line(cat_img,pix_idx(jj,ii),pix_idx(Max_qq+14,Max_pp),col_tab[cur_patch%len(col_tab)],1)\n cv2.circle(cat_img,pix_idx(jj,ii),1,col_tab[cur_patch%len(col_tab)],1)\n cv2.circle(cat_img,pix_idx(Max_qq+14,Max_pp),1,col_tab[cur_patch%len(col_tab)],1)\n \n cur_class_mask[(k * group_size): ((k + 1) * group_size)] = pred_mask\n cv2.imwrite('./matching_result/matching_result.jpg',cat_img)\n if rested != 0:\n group_rgb_tmp_l = cur_class_rgb[-rested:]\n group_rgb_tmp_r = cur_class_rgb[:group_size-rested]\n group_rgb = torch.cat((group_rgb_tmp_l, group_rgb_tmp_r), dim=0)\n group_rgb = group_rgb.cuda()\n _, pred_mask,feat = net(group_rgb)\n for j in range(group_size):\n x_visualize = feat[j].unsqueeze(0).cpu().numpy()\n x_visualize = np.mean(x_visualize,axis=1).reshape(x_visualize.shape[-2],x_visualize.shape[-1])\n x_visualize = (((x_visualize - np.min(x_visualize))/(np.max(x_visualize)-np.min(x_visualize)))*255).astype(np.uint8)\n savedir = './visual_of_transformer/' \n \n x_visualize = cv2.applyColorMap(x_visualize, cv2.COLORMAP_JET) \n ans=torch.zeros(img_size,img_size,3).numpy()\n for ii in range(img_size):\n for jj in range(img_size):\n ans[ii][jj]=x_visualize[ii//((img_size+group_size-1)//feat_size)][jj//((img_size+group_size-1)//feat_size)]\n cur_idx+=1 \n cv2.imwrite(savedir+str(cur_idx)+'.jpg',ans)\n cur_class_mask[(divided * group_size): ] = pred_mask[:rested]\n\nif __name__ == '__main__':\n # train_val_config\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='./models/image_best.pth',help=\"restore checkpoint\")\n parser.add_argument('--data_path',default='./matching_data/image/camel', help=\"dataset for evaluation\")\n parser.add_argument('--output_dir',default='./matching_result', help=\"dataset for evaluation\")\n args = parser.parse_args()\n \n val_datapath = [args.data_path]\n \n # project config\n project_name = 'UFO'\n device = torch.device('cuda:0')\n img_size = 224\n lr = 1e-5\n lr_de = 20000\n epochs = 100000\n batch_size = 4\n group_size = 5\n log_interval = 100\n val_interval = 1000\n\n model_path = args.model\n gpu_id='cuda:0'\n device = torch.device(gpu_id)\n net = build_model(device,demo_mode=True).to(device)\n net=torch.nn.DataParallel(net)\n net.load_state_dict(torch.load(model_path, map_location=gpu_id))\n \n net.eval()\n with torch.no_grad(): \n main(net, val_datapath, device, group_size=5, img_size=224, img_dir_name='image', gt_dir_name='groundtruth',\n img_ext=['.jpg', '.jpg', '.jpg', '.jpg'], gt_ext=['.png', '.bmp', '.jpg', '.png'],output_dir=args.output_dir)" ]
[ [ "torch.norm", "numpy.ones", "torch.arange", "torch.topk", "numpy.zeros" ], [ "torch.cat", "torch.load", "torch.zeros", "numpy.min", "torch.from_numpy", "numpy.concatenate", "numpy.max", "torch.no_grad", "numpy.mean", "torch.device", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
triplet02/KoSpeech
[ "74d267b76ec72cf8bc916982af9a58df2dc1ee4e" ]
[ "kospeech/data/audio/parser.py" ]
[ "import numpy as np\nfrom torch import Tensor, FloatTensor\nfrom kospeech.data.audio.core import load_audio\nfrom kospeech.data.audio.augment import NoiseInjector, SpecAugment\nfrom kospeech.data.audio.feature import MelSpectrogram, MFCC, Spectrogram, FilterBank\n\n\nclass AudioParser(object):\n \"\"\"\n Provides inteface of audio parser.\n\n Note:\n Do not use this class directly, use one of the sub classes.\n\n Method:\n - **parse_audio()**: abstract method. you have to override this method.\n - **parse_transcript()**: abstract method. you have to override this method.\n \"\"\"\n def __init__(self, dataset_path, noiseset_size, sample_rate=16000, noise_level=0.7, noise_augment=False):\n if noise_augment:\n self.noise_injector = NoiseInjector(dataset_path, noiseset_size, sample_rate, noise_level)\n\n def parse_audio(self, *args, **kwargs):\n raise NotImplementedError\n\n def parse_transcript(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass SpectrogramParser(AudioParser):\n \"\"\"\n Parses audio file into (spectrogram / mel spectrogram / mfcc) with various options.\n\n Args:\n transform_method (str): which feature to use (default: mel)\n sample_rate (int): Sample rate of audio signal. (Default: 16000)\n n_mels (int): Number of mfc coefficients to retain. (Default: 40)\n frame_length (int): frame length for spectrogram (ms) (Default : 20)\n frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)\n feature_extract_by (str): which library to use for feature extraction(default: librosa)\n del_silence (bool): flag indication whether to delete silence or not (default: True)\n input_reverse (bool): flag indication whether to reverse input or not (default: True)\n normalize (bool): flag indication whether to normalize spectrum or not (default:True)\n time_mask_para (int): Hyper Parameter for Time Masking to limit time masking length\n freq_mask_para (int): Hyper Parameter for Freq Masking to limit freq masking length\n time_mask_num (int): how many time-masked area to make\n freq_mask_num (int): how many freq-masked area to make\n sos_id (int): start of sentence token`s identification\n eos_id (int): end of sentence token`s identification\n target_dict (dict): dictionary of filename and labels\n \"\"\"\n VANILLA = 0 # Not apply augmentation\n SPEC_AUGMENT = 1 # SpecAugment\n NOISE_INJECTION = 2 # Noise Injection\n HYBRID_AUGMENT = 3 # Noise Injection & SpecAugment\n\n def __init__(self, feature_extract_by: str = 'librosa', sample_rate: int = 16000,\n n_mels: int = 80, frame_length: int = 20, frame_shift: int = 10,\n del_silence: bool = False, input_reverse: bool = True,\n normalize: bool = False, transform_method: str = 'mel',\n time_mask_para: int = 70, freq_mask_para: int = 12, time_mask_num: int = 2, freq_mask_num: int = 2,\n sos_id: int = 1, eos_id: int = 2, target_dict: dict = None, noise_augment: bool = False,\n dataset_path: str = None, noiseset_size: int = 0, noise_level: float = 0.7) -> None:\n super(SpectrogramParser, self).__init__(dataset_path, noiseset_size, sample_rate, noise_level, noise_augment)\n self.del_silence = del_silence\n self.input_reverse = input_reverse\n self.normalize = normalize\n self.sos_id = sos_id\n self.eos_id = eos_id\n self.target_dict = target_dict\n self.spec_augment = SpecAugment(time_mask_para, freq_mask_para, time_mask_num, freq_mask_num)\n\n if transform_method.lower() == 'mel':\n self.transforms = MelSpectrogram(sample_rate, n_mels, frame_length, frame_shift, feature_extract_by)\n\n elif transform_method.lower() == 'mfcc':\n self.transforms = MFCC(sample_rate, n_mels, frame_length, frame_shift, feature_extract_by)\n\n elif transform_method.lower() == 'spect':\n self.transforms = Spectrogram(sample_rate, frame_length, frame_shift, feature_extract_by)\n\n elif transform_method.lower() == 'fbank':\n self.transforms = FilterBank(sample_rate, n_mels, frame_length, frame_shift)\n\n else:\n raise ValueError(\"Unsupported feature : {0}\".format(transform_method))\n\n def parse_audio(self, audio_path: str, augment_method: int) -> Tensor:\n \"\"\"\n Parses audio.\n\n Args:\n audio_path (str): path of audio file\n augment_method (int): flag indication which augmentation method to use.\n\n Returns: feature_vector\n - **feature_vector** (torch.FloatTensor): feature from audio file.\n \"\"\"\n signal = load_audio(audio_path, self.del_silence)\n\n if signal is None:\n return None\n\n if augment_method == SpectrogramParser.NOISE_INJECTION or augment_method == SpectrogramParser.HYBRID_AUGMENT:\n signal = self.noise_injector(signal)\n\n feature_vector = self.transforms(signal)\n\n if self.normalize:\n feature_vector -= feature_vector.mean()\n\n if self.input_reverse: # Refer to \"Sequence to Sequence Learning with Neural Network\" paper\n feature_vector = feature_vector[:, ::-1]\n feature_vector = FloatTensor(np.ascontiguousarray(np.swapaxes(feature_vector, 0, 1)))\n else:\n feature_vector = FloatTensor(feature_vector).transpose(0, 1)\n\n if augment_method == SpectrogramParser.SPEC_AUGMENT or augment_method == SpectrogramParser.HYBRID_AUGMENT:\n feature_vector = self.spec_augment(feature_vector)\n\n return feature_vector\n\n def parse_transcript(self, *args, **kwargs):\n raise NotImplementedError\n" ]
[ [ "numpy.swapaxes", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZhangJianAI-CV/Awesome-project
[ "b07c8c270bd511246133541c4aee28c2472c633f" ]
[ "PaddleDetection/deploy/pptracking/python/mot/tracker/jde_tracker.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py\n\"\"\"\n\nimport numpy as np\nfrom collections import defaultdict\n\nfrom ..matching import jde_matching as matching\nfrom ..motion import KalmanFilter\nfrom .base_jde_tracker import TrackState, STrack\nfrom .base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks\n\n__all__ = ['JDETracker']\n\n\nclass JDETracker(object):\n __shared__ = ['num_classes']\n \"\"\"\n JDE tracker, support single class and multi classes\n\n Args:\n num_classes (int): the number of classes\n det_thresh (float): threshold of detection score\n track_buffer (int): buffer for tracker\n min_box_area (int): min box area to filter out low quality boxes\n vertical_ratio (float): w/h, the vertical ratio of the bbox to filter\n bad results. If set <0 means no need to filter bboxes,usually set\n 1.6 for pedestrian tracking.\n tracked_thresh (float): linear assignment threshold of tracked \n stracks and detections\n r_tracked_thresh (float): linear assignment threshold of \n tracked stracks and unmatched detections\n unconfirmed_thresh (float): linear assignment threshold of \n unconfirmed stracks and unmatched detections\n motion (str): motion model, KalmanFilter as default\n conf_thres (float): confidence threshold for tracking\n metric_type (str): either \"euclidean\" or \"cosine\", the distance metric \n used for measurement to track association.\n \"\"\"\n\n def __init__(self,\n use_byte=False,\n num_classes=1,\n det_thresh=0.3,\n track_buffer=30,\n min_box_area=200,\n vertical_ratio=1.6,\n tracked_thresh=0.7,\n r_tracked_thresh=0.5,\n unconfirmed_thresh=0.7,\n conf_thres=0,\n match_thres=0.8,\n low_conf_thres=0.2,\n motion='KalmanFilter',\n metric_type='euclidean'):\n self.use_byte = use_byte\n self.num_classes = num_classes\n self.det_thresh = det_thresh if not use_byte else conf_thres + 0.1\n self.track_buffer = track_buffer\n self.min_box_area = min_box_area\n self.vertical_ratio = vertical_ratio\n\n self.tracked_thresh = tracked_thresh\n self.r_tracked_thresh = r_tracked_thresh\n self.unconfirmed_thresh = unconfirmed_thresh\n self.conf_thres = conf_thres\n self.match_thres = match_thres\n self.low_conf_thres = low_conf_thres\n\n if motion == 'KalmanFilter':\n self.motion = KalmanFilter()\n self.metric_type = metric_type\n\n self.frame_id = 0\n self.tracked_tracks_dict = defaultdict(list) # dict(list[STrack])\n self.lost_tracks_dict = defaultdict(list) # dict(list[STrack])\n self.removed_tracks_dict = defaultdict(list) # dict(list[STrack])\n\n self.max_time_lost = 0\n # max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)\n\n def update(self, pred_dets, pred_embs=None):\n \"\"\"\n Processes the image frame and finds bounding box(detections).\n Associates the detection with corresponding tracklets and also handles\n lost, removed, refound and active tracklets.\n\n Args:\n pred_dets (np.array): Detection results of the image, the shape is\n [N, 6], means 'cls_id, score, x0, y0, x1, y1'.\n pred_embs (np.array): Embedding results of the image, the shape is\n [N, 128] or [N, 512].\n\n Return:\n output_stracks_dict (dict(list)): The list contains information\n regarding the online_tracklets for the recieved image tensor.\n \"\"\"\n self.frame_id += 1\n if self.frame_id == 1:\n STrack.init_count(self.num_classes)\n activated_tracks_dict = defaultdict(list)\n refined_tracks_dict = defaultdict(list)\n lost_tracks_dict = defaultdict(list)\n removed_tracks_dict = defaultdict(list)\n output_tracks_dict = defaultdict(list)\n\n pred_dets_dict = defaultdict(list)\n pred_embs_dict = defaultdict(list)\n\n # unify single and multi classes detection and embedding results\n for cls_id in range(self.num_classes):\n cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)\n pred_dets_dict[cls_id] = pred_dets[cls_idx]\n if pred_embs is not None:\n pred_embs_dict[cls_id] = pred_embs[cls_idx]\n else:\n pred_embs_dict[cls_id] = None\n\n for cls_id in range(self.num_classes):\n \"\"\" Step 1: Get detections by class\"\"\"\n pred_dets_cls = pred_dets_dict[cls_id]\n pred_embs_cls = pred_embs_dict[cls_id]\n remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)\n if remain_inds.sum() > 0:\n pred_dets_cls = pred_dets_cls[remain_inds]\n if self.use_byte:\n detections = [\n STrack(\n STrack.tlbr_to_tlwh(tlbrs[2:6]),\n tlbrs[1],\n cls_id,\n 30,\n temp_feat=None) for tlbrs in pred_dets_cls\n ]\n else:\n pred_embs_cls = pred_embs_cls[remain_inds]\n detections = [\n STrack(\n STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,\n 30, temp_feat)\n for (tlbrs, temp_feat\n ) in zip(pred_dets_cls, pred_embs_cls)\n ]\n else:\n detections = []\n ''' Add newly detected tracklets to tracked_stracks'''\n unconfirmed_dict = defaultdict(list)\n tracked_tracks_dict = defaultdict(list)\n for track in self.tracked_tracks_dict[cls_id]:\n if not track.is_activated:\n # previous tracks which are not active in the current frame are added in unconfirmed list\n unconfirmed_dict[cls_id].append(track)\n else:\n # Active tracks are added to the local list 'tracked_stracks'\n tracked_tracks_dict[cls_id].append(track)\n \"\"\" Step 2: First association, with embedding\"\"\"\n # building tracking pool for the current frame\n track_pool_dict = defaultdict(list)\n track_pool_dict[cls_id] = joint_stracks(\n tracked_tracks_dict[cls_id], self.lost_tracks_dict[cls_id])\n\n # Predict the current location with KalmanFilter\n STrack.multi_predict(track_pool_dict[cls_id], self.motion)\n\n if self.use_byte:\n dists = matching.iou_distance(track_pool_dict[cls_id],\n detections)\n matches, u_track, u_detection = matching.linear_assignment(\n dists, thresh=self.match_thres) # not self.tracked_thresh\n else:\n dists = matching.embedding_distance(\n track_pool_dict[cls_id],\n detections,\n metric=self.metric_type)\n dists = matching.fuse_motion(\n self.motion, dists, track_pool_dict[cls_id], detections)\n matches, u_track, u_detection = matching.linear_assignment(\n dists, thresh=self.tracked_thresh)\n\n for i_tracked, idet in matches:\n # i_tracked is the id of the track and idet is the detection\n track = track_pool_dict[cls_id][i_tracked]\n det = detections[idet]\n if track.state == TrackState.Tracked:\n # If the track is active, add the detection to the track\n track.update(detections[idet], self.frame_id)\n activated_tracks_dict[cls_id].append(track)\n else:\n # We have obtained a detection from a track which is not active,\n # hence put the track in refind_stracks list\n track.re_activate(det, self.frame_id, new_id=False)\n refined_tracks_dict[cls_id].append(track)\n\n # None of the steps below happen if there are no undetected tracks.\n \"\"\" Step 3: Second association, with IOU\"\"\"\n if self.use_byte:\n inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres\n inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres\n inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)\n pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]\n\n # association the untrack to the low score detections\n if len(pred_dets_cls_second) > 0:\n detections_second = [\n STrack(\n STrack.tlbr_to_tlwh(tlbrs[:4]),\n tlbrs[4],\n cls_id,\n 30,\n temp_feat=None)\n for tlbrs in pred_dets_cls_second[:, :5]\n ]\n else:\n detections_second = []\n r_tracked_stracks = [\n track_pool_dict[cls_id][i] for i in u_track\n if track_pool_dict[cls_id][i].state == TrackState.Tracked\n ]\n dists = matching.iou_distance(r_tracked_stracks,\n detections_second)\n matches, u_track, u_detection_second = matching.linear_assignment(\n dists, thresh=0.4) # not r_tracked_thresh\n else:\n detections = [detections[i] for i in u_detection]\n r_tracked_stracks = []\n for i in u_track:\n if track_pool_dict[cls_id][i].state == TrackState.Tracked:\n r_tracked_stracks.append(track_pool_dict[cls_id][i])\n dists = matching.iou_distance(r_tracked_stracks, detections)\n\n matches, u_track, u_detection = matching.linear_assignment(\n dists, thresh=self.r_tracked_thresh)\n\n for i_tracked, idet in matches:\n track = r_tracked_stracks[i_tracked]\n det = detections[\n idet] if not self.use_byte else detections_second[idet]\n if track.state == TrackState.Tracked:\n track.update(det, self.frame_id)\n activated_tracks_dict[cls_id].append(track)\n else:\n track.re_activate(det, self.frame_id, new_id=False)\n refined_tracks_dict[cls_id].append(track)\n\n for it in u_track:\n track = r_tracked_stracks[it]\n if not track.state == TrackState.Lost:\n track.mark_lost()\n lost_tracks_dict[cls_id].append(track)\n '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''\n detections = [detections[i] for i in u_detection]\n dists = matching.iou_distance(unconfirmed_dict[cls_id], detections)\n matches, u_unconfirmed, u_detection = matching.linear_assignment(\n dists, thresh=self.unconfirmed_thresh)\n for i_tracked, idet in matches:\n unconfirmed_dict[cls_id][i_tracked].update(detections[idet],\n self.frame_id)\n activated_tracks_dict[cls_id].append(unconfirmed_dict[cls_id][\n i_tracked])\n for it in u_unconfirmed:\n track = unconfirmed_dict[cls_id][it]\n track.mark_removed()\n removed_tracks_dict[cls_id].append(track)\n \"\"\" Step 4: Init new stracks\"\"\"\n for inew in u_detection:\n track = detections[inew]\n if track.score < self.det_thresh:\n continue\n track.activate(self.motion, self.frame_id)\n activated_tracks_dict[cls_id].append(track)\n \"\"\" Step 5: Update state\"\"\"\n for track in self.lost_tracks_dict[cls_id]:\n if self.frame_id - track.end_frame > self.max_time_lost:\n track.mark_removed()\n removed_tracks_dict[cls_id].append(track)\n\n self.tracked_tracks_dict[cls_id] = [\n t for t in self.tracked_tracks_dict[cls_id]\n if t.state == TrackState.Tracked\n ]\n self.tracked_tracks_dict[cls_id] = joint_stracks(\n self.tracked_tracks_dict[cls_id], activated_tracks_dict[cls_id])\n self.tracked_tracks_dict[cls_id] = joint_stracks(\n self.tracked_tracks_dict[cls_id], refined_tracks_dict[cls_id])\n self.lost_tracks_dict[cls_id] = sub_stracks(\n self.lost_tracks_dict[cls_id], self.tracked_tracks_dict[cls_id])\n self.lost_tracks_dict[cls_id].extend(lost_tracks_dict[cls_id])\n self.lost_tracks_dict[cls_id] = sub_stracks(\n self.lost_tracks_dict[cls_id], self.removed_tracks_dict[cls_id])\n self.removed_tracks_dict[cls_id].extend(removed_tracks_dict[cls_id])\n self.tracked_tracks_dict[cls_id], self.lost_tracks_dict[\n cls_id] = remove_duplicate_stracks(\n self.tracked_tracks_dict[cls_id],\n self.lost_tracks_dict[cls_id])\n\n # get scores of lost tracks\n output_tracks_dict[cls_id] = [\n track for track in self.tracked_tracks_dict[cls_id]\n if track.is_activated\n ]\n\n return output_tracks_dict\n" ]
[ [ "numpy.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tkhe/simple-mtcnn
[ "f39b66ec958efc745e1af8a4e0c65a63e0d4a6d8" ]
[ "tools/train_net.py" ]
[ "import argparse\nimport pprint\nimport sys\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom mtcnn.config import cfg\nfrom mtcnn.datasets.iteration_based_batch_sampler import build_batch_sampler\nfrom mtcnn.datasets.roidb import get_roidb\nfrom mtcnn.engine.trainer import do_train\nfrom mtcnn.modeling.model_builder import build_model\nfrom mtcnn.utils.logger import setup_logging\nfrom mtcnn.utils.lr_scheduler import make_optimizer\nfrom mtcnn.utils.lr_scheduler import make_scheduler\n\nlogger = setup_logging(__name__)\n\n\ndef train():\n model = build_model(cfg.MODEL.TYPE)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_scheduler(cfg, optimizer)\n transform = transforms.ToTensor()\n\n roidb = get_roidb(transform=transform)\n batch_sampler = build_batch_sampler(\n roidb,\n cfg.TRAIN.BATCH_SIZE,\n shuffle=True\n )\n data_loader = DataLoader(roidb, batch_sampler=batch_sampler)\n\n do_train(model, data_loader, optimizer, scheduler, device)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--cfg',\n dest='cfg_file',\n default=None,\n type=str\n )\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n logger.info('Called with args:')\n logger.info(pprint.pformat(args))\n if args.cfg_file:\n cfg.merge_from_file(args.cfg_file)\n logger.info('Using configs:')\n logger.info(pprint.pformat(cfg))\n\n train()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.device", "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NegriLuca/pigasus
[ "d5057b771f81cfa05bb08ea4b0fd99088150cd7a", "d5057b771f81cfa05bb08ea4b0fd99088150cd7a", "d5057b771f81cfa05bb08ea4b0fd99088150cd7a", "d5057b771f81cfa05bb08ea4b0fd99088150cd7a", "d5057b771f81cfa05bb08ea4b0fd99088150cd7a" ]
[ "python/fem/norm.py", "python/plugin/figa_circ.py", "python/plugin/grad_shafranov.py", "python/fem/quadratures.py", "tests/test_nonlin_ex1_newton.py" ]
[ "# -*- coding: UTF-8 -*-\n#! /usr/bin/python\n\n# To change this template, choose Tools | Templates\n# and open the template in the editor.\n\n__author__=\"ARA\"\n__all__ = ['norm']\n__date__ =\"$Feb 14, 2012 11:40:06 AM$\"\n\nfrom . import common_obj as _com\nfrom . import constants as _cst\nimport numpy as _np\nfrom .pigasusObject import *\n\nclass norm(pigasusObject):\n def __init__ ( self, field = None, type = None, func = None, paramevalfunc = False, exact = None ):\n pigasusObject.__init__(self)\n\n self.id = self.com.nnorms\n self.nparam = 0\n self.paramevalfunc = paramevalfunc\n\n if field is not None:\n self.field = field\n self.space = field.space\n self.loc_id = self.space.grids.add_norm_id(self)\n else:\n raise(\"You must give a field for the current norm\")\n\n if type is not None:\n self.type = type\n else:\n self.type = _cst.NORM_L2\n\n self._set_nparam()\n\n from .utils import function\n if func is not None:\n self.func = function(func, space=self.space)\n else:\n self.defaultFuncParam()\n\n if exact is not None:\n self.exact = function(exact, space=self.space)\n else:\n self.defaultFuncExact()\n\n # this must be the last thing to do\n self.com.nnorms += 1\n self.com.norms.append(self)\n\n def setInfoData(self):\n \"\"\"\n prints informations about the current norm\n \"\"\"\n self.infoData['id'] = str(self.id)\n self.infoData['field'] = str(self.field.id)\n self.infoData['space'] = str(self.space.id)\n self.infoData['loc_id'] = str(self.loc_id)\n self.infoData['nparam'] = str(self.nparam)\n self.infoData['paramevalfunc'] = str(self.paramevalfunc)\n self.infoData['type'] = str(self.type)\n\n def _getGlobalNorm(self):\n return self.com.pyfem.getglobalnorm ( self.id )\n\n def _getPatchNorm(self):\n li_npatchs = self.space.grids.npatchs\n return self.com.pyfem._getPatchNorm ( self.id, li_npatchs )\n\n def _getElementNorm(self, ai_patch):\n\n li_nel = self.space.grids.list_grid[ai_patch].nel\n return self.com.pyfem._getElementNorm ( self.id, ai_patch, li_nel)\n\n def get(self, type=0, ai_patch=None):\n \"\"\"\n returns values for a given type of norm\n type = 0 : for a global computation\n type = 1 : for a patch computation\n type = 2 : for an element computation\n \"\"\"\n if (type == 0) :\n return self._getGlobalNorm()\n if (type == 1) :\n return self._getPatchNorm()\n if (type == 2) and (ai_patch is not None):\n return self._getElementNorm(ai_patch)\n\n def setEvalNorm(self, ai_patch=0, fields=[], funcs=[]):\n \"\"\"\n fields is a list of fields\n funcs is a list of functions\n \"\"\"\n lpr_pts = self.space.get_points(ai_patch)\n list_pts = []\n for i in range(0, self.space.dim):\n list_pts.append(lpr_pts[i,0,:])\n lpr_pts = list_pts\n\n li_dim = self.space.dim\n if li_dim not in [2]:\n print(\"setEvalNorm: Not yet implemetend for the desired dimension\")\n\n lpi_shape = lpr_pts.shape[0:-1]\n lpr_val = _np.zeros((1,lpi_shape[0],lpi_shape[1]))\n for F in fields:\n lpr_f = F.eval(ai_patch, elts)[ai_patch,:,:]\n lpr_val[0,:,:] += lpr_f[:,:]\n for func in funcs:\n lpr_f = _np.zeros(lpr_pts.shape[0:-1])\n for (i,list_p) in enumerate(lpr_pts):\n for (j,p) in enumerate(list_p):\n lpr_f[i,j] =func (p[0], p[1])[0]\n lpr_val[0,:,:] += lpr_f[:,:]\n self.com.pyfem.set_field_on_grids(self.field.id, ai_patch, lpr_val)\n\n def _set_nparam(self):\n\n if ( self.type in [ _cst.NORM_L2 ] ):\n self.nparam = 1\n return\n if ( self.type in [ _cst.NORM_H1 ] ):\n li_dim = self.space.dim\n self.nparam = li_dim**2\n return\n else :\n print(\"NORM-_set_nparam : type not implemented yet\")\n import sys; sys.exit(1)\n\n def evalfunc(self, ai_patch, apr_points, elts=None, type=\"param\"):\n \"\"\"\n Evaluation of the param-function over a given list of points\n \"\"\"\n if not self.paramevalfunc :\n lpr_val = self._evalfunc_std(ai_patch, apr_points, elts, type)\n else:\n lpr_parampts = self.space.get_parametricPoints(ai_patch_id=ai_patch)\n lpr_val = self._evalfunc_std(ai_patch, lpr_parampts, elts, type)\n return lpr_val\n\n def _evalfunc_std(self, ai_patch, apr_points, elts, type):\n \"\"\"\n sequential version of the evaluation\n \"\"\"\n if type == \"param\":\n# print \"==== param evaluation\"\n return self.func(apr_points)\n if type == \"exact\":\n# print \"==== exact evaluation\"\n return self.exact(apr_points)\n\n def defaultFuncParam(self):\n li_dim = self.space.dim\n\n if ( self.type in [ _cst.NORM_L2 ] ):\n if li_dim == 1:\n func = lambda x : [1.0]\n if li_dim == 2:\n func = lambda x,y : [1.0]\n if li_dim == 3:\n func = lambda x,y,z : [1.0]\n elif ( self.type in [ _cst.NORM_H1 ] ):\n if li_dim == 1:\n func = lambda x : [1.0]\n if li_dim == 2:\n func = lambda x,y : [1.0, 0.0, 0.0, 1.0]\n if li_dim == 3:\n func = lambda x,y,z : [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]\n else :\n print(\"NORM-defaultFuncParam : type not implemented yet\")\n import sys; sys.exit(1)\n\n from .utils import function\n self.func = function(func, space=self.space)\n\n def defaultFuncExact(self):\n li_dim = self.space.dim\n\n if li_dim == 1:\n func = lambda x : [0.0] * self.field.ndof\n elif li_dim == 2:\n func = lambda x,y : [0.0] * self.field.ndof\n elif li_dim == 3:\n func = lambda x,y,z : [0.0] * self.field.ndof\n else :\n raise(\"type not implemented yet\")\n\n from .utils import function\n self.exact = function(exact, space=self.space)\n\n\n def set_func(self, exact):\n \"\"\"\n this sets the param-function of the current field\n \"\"\"\n from .utils import function\n self.exact = function(exact, space=self.space)\n", "# -*- coding: UTF-8 -*-\n\"\"\"\nThis module is intend to solve the matrix equation\n\nsum_i=1^r Ax_i X Ay_i = F\n\nWhere Ay_i are circulant matrices\n Ax_i is in general are band matrices\n\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import circulant, inv\nfrom scipy.sparse import csr_matrix, diags\nfrom scipy.sparse.linalg import gmres, splu\nfrom scipy.sparse import kron\nfrom scipy.io import mmwrite, mmread\nfrom scipy.optimize import minimize\nfrom scipy.sparse.linalg import LinearOperator\n\n\n# -----------------------\npi = np.pi\ncos = np.cos\nsin = np.sin\n# -----------------------\n\n# ...\ndef CMPLX(x,y):\n return x + y * 1j\n# ...\n\n# ...\ndef genTestMatrices(r, nx, ny, p, EXPORT=False, IMPORT=False):\n list_Ax = [] ; list_Ay = []\n\n # ... Define non singular diagonal matrices for the x-direction\n shift = 0\n for i in range(0,r):\n if IMPORT:\n Ax = mmread(\"figa/Ax\"+str(i)+\".mtx\")\n else:\n# a = np.random.random(nx)\n a = np.ones(nx)\n Ax = diags(a,shift)\n Ax = csr_matrix(Ax)\n if EXPORT:\n mmwrite(\"figa/Ax\"+str(i)+\".mtx\", Ax)\n\n list_Ax.append(Ax)\n # ...\n\n # ... Define circulant matrices for the x-direction\n for i in range(0,r):\n if IMPORT:\n Ay = mmread(\"figa/Ay\"+str(i)+\".mtx\")\n else:\n ay = np.zeros(ny)\n ay[:2*p+1] = np.random.random(2*p+1)\n Ay = circulant(ay)\n Ay = csr_matrix(Ay)\n if EXPORT:\n mmwrite(\"figa/Ay\"+str(i)+\".mtx\", Ay)\n\n list_Ay.append(Ay)\n # ...\n\n return list_Ax, list_Ay\n# ...\n\n# ...\ndef computeEigenValues(list_Ay, cmplx=True):\n # ...\n def computeEigenVal(A):\n dtype = np.double\n if cmplx:\n dtype = np.complex\n n,m = A.shape\n a = np.zeros(n)\n for i in range(0,n):\n a[i] = A[0,i]\n eigenA = np.zeros(n, dtype=dtype)\n for k in range(0,n):\n ck = 2*pi*k/n\n for j in range(0,n):\n if cmplx:\n eigenA[k] += a[j] * CMPLX( cos(ck*j) , sin(ck*j) )\n else:\n eigenA[k] += a[j] * cos(ck*j)\n return eigenA\n # ...\n list_eigenAy = []\n for Ay in list_Ay:\n eigenAy = computeEigenVal(Ay)\n list_eigenAy.append(eigenAy)\n return list_eigenAy\n# ...\n\n# ...\ndef AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy):\n \"\"\"\n j must be in range(0,ny)\n \"\"\"\n Sp = np.zeros((nx,nx))\n for Ax,eigenAy in zip(list_Ax,list_eigenAy):\n Sp = Sp + eigenAy[j] * Ax.todense()\n return csr_matrix(Sp)\n# ...\n\n# ...\ndef solveSp(Sp, b):\n x = gmres(Sp,b)[0]\n return x\n# ...\n\n# ...\ndef rsolve(list_Ax, list_eigenAy, F):\n fft = np.fft.rfft\n ifft = np.fft.irfft\n\n # ...\n nx,ny = F.shape\n n = nx ; m = ny\n mmax = m/2 -1\n x = F.transpose()\n _F = np.zeros((m, n))\n U = np.zeros_like(_F)\n # ...\n\n # ...\n y = np.zeros((m/2 + 1, n), dtype=np.complex)\n for j in range(0, n):\n x1d = x[:,j]\n y1d = fft(x1d)\n y[:,j] = y1d\n # ...\n\n # ... if ny is even\n for j in range(0,n):\n _F[0,j] = y[0,j].real\n for i in range(1,mmax+1):\n z = y[i,j]\n _F[2*i-1,j] = z.real\n _F[2*i ,j] = z.imag\n _F[m-1,j] = y[m/2,j].real\n # ...\n\n # ...\n\n # ... treatment of the 0-mode\n f1d = _F[0, :]\n Sp = AssembleColumnMatrix(0, nx, ny, list_Ax, list_eigenAy)\n u1d = solveSp(Sp, f1d)\n U[0, :] = u1d\n\n for j in range(1, mmax+1):\n Sp = AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy)\n\n # ... treatment of the mode 2j-1\n f1d = _F[2*j-1, :]\n u1d = solveSp(Sp, f1d)\n U[2*j-1, :] = u1d\n\n # ... treatment of the mode 2j\n f1d = _F[2*j, :]\n u1d = solveSp(Sp, f1d)\n U[2*j, :] = u1d\n\n # ... treatment of the last mode\n f1d = _F[m-1, :]\n Sp = AssembleColumnMatrix(mmax+1, nx, ny, list_Ax, list_eigenAy)\n u1d = solveSp(Sp, f1d)\n U[m-1, :] = u1d\n # ...\n\n # ... if ny is even\n y = np.zeros_like(y)\n for j in range(0,n):\n y[0, j] = CMPLX(U[0, j], 0.0)\n\n for i in range(1, mmax+1):\n y[i, j] = CMPLX ( U[2*i - 1, j] , U[2*i, j] )\n\n y[m/2, j] = CMPLX ( U[m-1, j] , 0.0 )\n # ...\n\n # ...\n x = np.zeros_like(x)\n for j in range(0, n):\n y1d = y[:,j]\n x1d = ifft(y1d)\n x[:,j] = x1d\n # ...\n\n # ...\n X = x.transpose()\n #print X\n # ...\n\n return X\n# ...\n\n# ...\ndef csolve(list_Ax, list_eigenAy, F, EXPORT=False, list_opSj=None):\n fft = np.fft.fft\n ifft = np.fft.ifft\n\n X = np.zeros_like(F)\n Yp = np.zeros_like(F, dtype=np.complex)\n Xp = np.zeros_like(F, dtype=np.complex)\n\n # ...\n for i in range(0, nx):\n # ... extract the i^th line as a vector\n y = F[i,:]\n # ... move to the commun basis using FFT\n yp = fft(y)\n Yp[i,:] = yp\n # ...\n\n # ...\n for j in range(0, ny):\n if list_opSj is None:\n # ... assemble the 1D matrix\n Sj = AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy)\n if EXPORT:\n mmwrite(\"figa/S\"+str(j)+\".mtx\", Sj)\n # ... extract the j^th column as a vector\n yp = Yp[:,j]\n # ... solve the 1D linear system in the commun basis\n if list_opSj is None:\n xp = gmres(Sj,yp)[0]\n else:\n opSj = list_opSj[j]\n xp = opSj.solve(yp)\n Xp[:,j] = xp\n # ...\n\n # ...\n for i in range(0, nx):\n xp = Xp[i,:]\n # ... come back to the real space\n x = ifft(xp)\n # ... ... make sur that it is real\n x = x.real\n # ... update the global matrix\n X[i,:] = x\n # ...\n\n return X\n# ...\n\n\n# ...\ndef verification(list_Ax, list_Ay, X, F):\n _F = np.zeros_like(X)\n for Ax,Ay in zip(list_Ax, list_Ay):\n _F += Ax * X * Ay.transpose()\n# print \"F \", F\n# print \"_F \", _F\n print((np.allclose(F, _F)))\n# assert(np.allclose(F, _F))\n# ...\n\n# ...\ndef constructGlobalSystem(list_Ax, list_Ay):\n # ...\n list_eigenAy = computeEigenValues(list_Ay)\n # ...\n\n # ...\n Ax0 = list_Ax[0]\n Ay0 = list_Ay[0]\n S = kron(Ay0, Ax0)\n r = len(list_Ax)\n for i in range(1, r):\n Ax = list_Ax[i]\n Ay = list_Ay[i]\n S = S + kron(Ay, Ax)\n return S\n # ...\n# ...\n\n# ...\nclass nearestCirculant(object):\n \"\"\"\n this class constructs a list of circulant matrices that approche a given\n list of matrices A by minimizing the Frobenius norm\n \"\"\"\n def __init__(self, list_A, cost=0):\n self.list_A = list_A\n self.method = method\n\n norm = lambda M: np.linalg.norm(M, 'fro')\n\n # ...\n def cost0(M, c):\n C = circulant(c)\n nr = norm(M-C)\n return nr\n # ...\n\n # ...\n def cost1(M, c):\n n,m = M.shape\n C = circulant(c)\n invC = inv(C)\n I = np.eye(n)\n nr = norm(I-invC*M)\n return nr\n # ...\n\n # ...\n def cost2(M, c):\n diag = M.diagonal()\n shift = 0\n D = diags(diag,shift)\n Z = M-D\n C = circulant(c)\n nr = norm(Z-C)\n return nr\n # ...\n\n self.cost0 = cost0\n self.cost1 = cost1\n self.cost2 = cost2\n self.cost = getattr(self, 'cost%d' % cost)\n\n def construct(self, method='BFGS', tol = 1.e-7):\n list_C = []\n for A in self.list_A:\n # ...\n if method is None:\n n,m = A.shape\n MD = A.todense()\n c = np.zeros(n)\n for k in range(0,n):\n c1 =0.; c2=0.\n for i in range(0,n-k):\n c1 += MD[i,k+i]\n for i in range(n-k,n):\n c2 += MD[i,k+i-n]\n c[k] = ( c1 + c2 ) / n\n else:\n cost = lambda c: self.cost(A,c)\n\n n,m = A.shape\n x0 = np.zeros(n)\n x0[0] = 1.\n res = minimize( cost, x0 \\\n , method=method \\\n , options={'gtol': tol, 'disp': verbose})\n c = res.x\n # ...\n\n C = circulant(c)\n C = csr_matrix(C)\n list_C.append(C)\n\n return list_C\n# ...\n\n# ...\nclass circulantPrecond(object):\n def __init__(self, list_Ax, list_Ay \\\n , cost=0, method='BFGS' \\\n , tol = 1.e-7, verbose=False):\n\n # ... construct the nearest circulant matrices for list_Ay\n nearCirc = nearestCirculant(list_Ay, cost=cost)\n list_C = nearCirc.construct(method=method, tol=tol)\n # ...\n\n self.list_C = list_C\n\n # ...\n self.list_eigenC = computeEigenValues(list_C)\n # ...\n\n # ...\n n,m = list_Ax[0].shape ; nx = n\n n,m = list_Ay[0].shape ; ny = n\n self.n = [nx,ny]\n # ...\n\n # ...\n r = len(list_Ax)\n Ax0 = list_Ax[0]\n C0 = list_C[0]\n P = kron(C0, Ax0)\n for i in range(1, r):\n Ax = list_Ax[i]\n C = list_C[i]\n P = P + kron(C, Ax)\n self.P = P\n # ...\n\n # ...\n list_opSj = []\n for j in range(0, ny):\n # ... assemble the 1D matrix\n Sj = AssembleColumnMatrix(j, nx, ny, list_Ax, self.list_eigenC)\n opSj = splu(Sj.tocsc())\n list_opSj.append(opSj)\n self.list_opSj = list_opSj\n # ...\n\n def aspreconditioner(self):\n \"\"\"Create a preconditioner\n\n Returns\n -------\n precond : LinearOperator\n Preconditioner suitable for the iterative solvers in defined in\n the scipy.sparse.linalg module (e.g. cg, gmres) and any other\n solver that uses the LinearOperator interface. Refer to the\n LinearOperator documentation in scipy.sparse.linalg\n\n See Also\n --------\n scipy.sparse.linalg.LinearOperator\n\n Examples\n --------\n >>>\n\n \"\"\"\n shape = self.P.shape\n dtype = self.P.dtype\n\n nx, ny = self.n\n\n self.i = 0\n def matvec(b):\n F = b.reshape((ny,nx))\n F = F.transpose()\n X = csolve(self.list_C, self.list_eigenC, F, list_opSj=self.list_opSj)\n x = X.transpose().reshape(nx*ny)\n# print \">> iteration \", self.i\n self.i += 1\n return x\n\n return LinearOperator(shape, matvec, dtype=dtype)\n# ...\n\n# ...\ndef testcase(r, nx, ny, p, EXPORT=False, IMPORT=False):\n # ...\n if IMPORT:\n F = np.genfromtxt(\"figa/F.txt\")\n try:\n nx,ny = F.shape\n except:\n nx = 1\n ny, = F.shape\n _F = F\n F = np.zeros((nx,ny))\n F[0,:] = _F\n else:\n F = np.random.random((nx,ny))\n np.savetxt(\"figa/F.txt\", F)\n # ...\n\n # ...\n list_Ax, list_Ay = genTestMatrices(r, nx, ny, p \\\n , EXPORT=EXPORT \\\n , IMPORT=IMPORT)\n # ...\n\n return list_Ax, list_Ay, F\n# ...\n\n# ...\ndef testcase_poisson(scale=False):\n Mx = mmread(\"figa/Mx.mtx\") ; Mx = Mx.tocsr()\n Sx = mmread(\"figa/Sx.mtx\") ; Sx = Sx.tocsr()\n Kx = mmread(\"figa/Kx.mtx\") ; Kx = Kx.tocsr()\n KTx = Kx.transpose().tocsr()\n\n My = mmread(\"figa/My.mtx\") ; My = My.tocsr()\n Sy = mmread(\"figa/Sy.mtx\") ; Sy = Sy.tocsr()\n Ky = mmread(\"figa/Ky.mtx\") ; Ky = Ky.tocsr()\n KTy = Ky.transpose().tocsr()\n\n# # ...\n# list_Ax = [Mx, Sx, Kx, KTx]\n# list_A = [Sy, My, KTy, Ky]\n# # ...\n\n# # ...\n# Kmx = np.sqrt(2) * (Kx+KTx)\n# Kjx = - np.sqrt(2) * (Kx-KTx)\n#\n# Kmy = np.sqrt(2) * (Ky+KTy)\n# Kjy = np.sqrt(2) * (Ky-KTy)\n#\n# list_Ax = [Mx, Sx, Kmx, Kjx]\n# list_A = [Sy, My, Kmy, Kjy]\n# # ...\n\n# # ...\n# list_Ax = [ Kx, KTx, Sx]\n# list_A = [KTy, Ky, My]\n# # ...\n\n # ...\n list_Ax = [Mx, Sx]\n list_A = [Sy, My]\n # ...\n\n if scale:\n print(\"MUST IMPROVED: WE HAVE TO MULTIPLY BY ONE MATRIX FOR ALL MATRICES\")\n shift = 0\n list_Ay = []\n for A in list_A:\n diag = 1./A.diagonal()\n D = diags(diag, shift).tocsr()\n Ay = A * D\n Ay.tocsr()\n list_Ay.append(Ay)\n else:\n list_Ay = list_A\n\n n,m = Mx.shape ; nx = n\n n,m = My.shape ; ny = n\n# F = np.random.random((nx,ny))\n F = np.ones((nx,ny))\n\n return list_Ax, list_Ay, F\n# ...\n\n# ---------------------------------------------------------------\nif __name__==\"__main__\":\n from time import time\n # -------------------------\n# nx = 512 ; ny = 512\n# nx = 256 ; ny = 256\n# nx = 128 ; ny = 128\n# nx = 64 ; ny = 64\n nx = 32 ; ny = 32\n# nx = 16 ; ny = 16\n\n r = 4\n p = 3\n\n# EXPORT = True\n EXPORT = False\n\n IMPORT = False\n# IMPORT = True\n\n method = None\n cost = 0\n# method = 'BFGS'\n tol = 1.e-7\n# verbose = True\n verbose = False\n\n# scale = True\n scale = False\n\n# CIRCULANT = True\n CIRCULANT = False\n # -------------------------\n\n # ...\n if CIRCULANT:\n list_Ax, list_Ay, F = testcase(r, nx, ny, p, EXPORT=False, IMPORT=False)\n else:\n list_Ax, list_Ay, F = testcase_poisson(scale=scale)\n# n,m = list_Ax[0].shape\n# r = len(list_Ax)\n# list_Ax = []\n# for i in range(0,r):\n## diag = np.random.random(n)\n# diag = np.ones(n)\n# shift = 0\n# A = diags(diag, shift)\n# list_Ax.append(A)\n\n# _list_Ax = list_Ax[:3]\n# _list_Ay = list_Ay[:3]\n\n _list_Ax = list_Ax[:2]\n _list_Ay = list_Ay[:2]\n PrecConstruct = circulantPrecond(_list_Ax, _list_Ay \\\n , cost=cost, method=method \\\n , tol=tol, verbose=verbose)\n mmwrite('figa/P.mtx', PrecConstruct.P)\n\n# mmwrite('figa/C_Sy.mtx', PrecConstruct.list_C[0])\n# mmwrite('figa/C_My.mtx', PrecConstruct.list_C[1])\n# mmwrite('figa/C_Kmy.mtx', PrecConstruct.list_C[2])\n# mmwrite('figa/Kmy.mtx', list_Ay[2])\n\n# mmwrite('figa/C_KTy.mtx', PrecConstruct.list_C[2])\n# mmwrite('figa/C_Ky.mtx' , PrecConstruct.list_C[3])\n\n# mmwrite('figa/C_KTy.mtx', PrecConstruct.list_C[0])\n# mmwrite('figa/C_Ky.mtx' , PrecConstruct.list_C[1])\n# mmwrite('figa/C_My.mtx' , PrecConstruct.list_C[2])\n\n mmwrite('figa/C_Sy.mtx', PrecConstruct.list_C[0])\n mmwrite('figa/C_My.mtx', PrecConstruct.list_C[1])\n\n Precond = PrecConstruct.aspreconditioner()\n # ...\n\n # ...\n n,m = list_Ax[0].shape ; nx = n\n n,m = list_Ay[0].shape ; ny = n\n # ...\n\n # ...\n S = constructGlobalSystem(list_Ax, list_Ay)\n mmwrite('figa/S.mtx', S)\n # ...\n\n # ...\n print(\"=============================\")\n print(\" nx, ny \", nx, ny)\n print(\" size \", S.shape)\n print(\" nnz \", S.nnz)\n print(\"=============================\")\n # ...\n\n# import sys ; sys.exit(0)\n\n # ...\n print(\"=============================\")\n print(\">>> using the global system\")\n y = F.transpose().reshape(nx*ny)\n tb = time()\n Xg,it = gmres(S, y)\n Xg = Xg.reshape((ny,nx))\n Xg = Xg.transpose()\n te = time()\n print(\"Elapsed time \", te-tb)\n # ...\n\n # ...\n if CIRCULANT:\n print(\"=============================\")\n print(\">>> using circulant fast solver\")\n list_eigenAy = computeEigenValues(list_Ay)\n tb = time()\n X = csolve(list_Ax, list_eigenAy, F)\n te = time()\n print(\"Elapsed time \", te-tb)\n print(\"Internal verification \")\n verification(list_Ax, list_Ay, X, F)\n else:\n print(\"=============================\")\n print(\">>> using circulant preconditioner solver\")\n tb = time()\n y = F.transpose().reshape(nx*ny)\n x,it = gmres(S, y, M=Precond)\n X = x.reshape((ny,nx))\n X = X.transpose()\n te = time()\n print(\"Elapsed time \", te-tb)\n # ...\n\n # ...\n print(\"=============================\")\n print(\"Is everything OK?\")\n print(np.allclose(Xg,X, rtol=1e-07) \\\n , \" with error \", np.linalg.norm(Xg-X)/np.linalg.norm(X))\n\n", "# -*- coding: UTF-8 -*-\n#!/usr/bin/env python\n\nimport numpy as np\nfrom sympy import *\nfrom sympy.matrices import *\nfrom scipy.optimize import root\nfrom matplotlib import pyplot as plt\nfrom scipy.integrate import quad\n\n#from poisson_nonlin import poisson_picard as PDE_picard\n\n#__all__ = ['genPoints', 'genFigure', 'genDomain', 'picard', 'picardTwoGrids', 'testcase']\n#__all__ = ['genPoints', 'genFigure', 'genDomain', 'picard', 'picardTwoGrids', 'testcase']\n\nsqrt = np.sqrt\nabs = np.abs; sin = np.sin ; cos = np.cos ; exp = np.exp ; sqrt = np.sqrt\npi = np.pi; atan = np.arctan2 ; cosh = np.cosh\nsech = lambda x: 1./cosh(x)\n\n# ------------------------------------------------------\n# ... generate the boundary in the clock sens\ndef genPoints(R0=1., eps=0.32, k=1.7, d=0.33, mbnd=1500, m=500 \\\n # needed for the newton algo to converge\n , rmin=0.67, rmax=1.33, delta=0.05 \\\n , dd = 1.e-3 \\\n , PLOT=False \\\n ):\n print (\"eps, k, d = \", eps, k, d)\n\n if (R0==1.) and (eps==0.32):\n d1, d2, d3 = [0.0753850296600659, -0.206294962187880, -0.0314337072805334]\n else:\n d1, d2, d3 = compute_ds(eps, k, d)\n\n print (\"d1, d2, d3 = \", d1, d2, d3)\n\n psi = lambda r,z: r**4/8 + d1 + d2 * r**2 + d3 * (r**4 - 4 * (r*z)**2)\n psidr = lambda r,z: 2*d2*r + d3*(4*r**3 - 8*r*z**2) + r**3/2\n psidz = lambda r,z: -8*d3*r**2*z\n\n # .....................................\n rgrid = list(np.linspace(rmin, rmin+delta, mbnd)[:-1])\n rgrid += list(np.linspace(rmin+delta, rmax-delta, m))\n rgrid += list(np.linspace(rmax-delta, rmax, mbnd)[1:])\n rgrid = np.array(rgrid)\n\n zgrid = np.zeros_like(rgrid)\n\n # ...\n from pigasus.utils.impeqpy import impeqpy\n import pigasus.utils.impeqpy as impe\n# print \"============================\"\n# print impe.__file__\n# print \"============================\"\n level = 0.0\n imp=impeqpy(tol=1.e-9, maxniter = 300, verbose=False)\n imp.solve2Dx(psi,psidz,level,rgrid,zgrid)\n list_r = [R0-eps] ; list_z = [0.]\n for (r,z) in zip(rgrid, zgrid):\n if (not np.isnan(r)) and (not np.isnan(z)):\n list_r.append(r) ; list_z.append(z)\n list_r.append(R0+eps) ; list_z.append(0.)\n # ...\n # .....................................\n\n # .....................................\n# def Z2(R):\n# v = 0.\n# v += d1/(4.*d3) * 1./R**2\n# v += d2/(4.*d3)\n# v += (1./8 + d3) / (4.*d3) * R**2\n# return v\n#\n# def Z_plus(R):\n# return np.sqrt(Z2(R))\n#\n# def Z_minus(R):\n# return - np.sqrt(Z2(R))\n#\n# sol = root(Z2, rmin, jac=False)\n# Rmin = sol.x\n#\n# sol = root(Z2, rmax, jac=False)\n# Rmax = sol.x\n#\n# def dZdR(R):\n# gamma = (1./8 + d3) / (4.*d3)\n# alpha = d1/(4.*d3)\n# Z = Z_plus(R)\n# v = gamma * R - alpha / R**3\n# v /= Z\n# return v\n#\n# def measure(R):\n# meas = dZdR(R)**2\n# # meas += 1.\n# return meas\n#\n# def density(ti,tj):\n# return quad(measure, ti, tj)\n#\n# def adaptive_mesh(n, xmin, xmax, amin, amax):\n# R = np.linspace(xmin, xmax, n)\n# D = []\n# for a,b in zip(R[:-1], R[1:]):\n# D.append(density(a,b)[0])\n#\n# D = np.asarray(D)\n# m_total = D.sum()\n#\n# M = np.zeros(n-1)\n# for i in range(0,n-1):\n# v = D[0:i]\n# M[i] = v.sum() / m_total\n#\n# Rnew = (amax-amin)*M+amin\n# return Rnew\n#\n# R = []\n# R += list(adaptive_mesh(mbnd, Rmin+dd, Rmin*(1.+delta), Rmin, Rmin*(1.+delta)))\n# R += list(adaptive_mesh(m, Rmin*(1.+delta), Rmax*(1.-delta), Rmin*(1.+delta), Rmax*(1.-delta)))\n# R += list(adaptive_mesh(mbnd, Rmax*(1.-delta), Rmax-dd, Rmax*(1.-delta), Rmax))\n# R = np.array(R)\n#\n# Z = Z_plus(R)\n# R = np.array([Rmin] + list(R) + [Rmax])\n# Z = np.array([ 0.] + list(Z) + [ 0.])\n#\n# list_r = R\n# list_z = Z\n # .....................................\n\n # ... y < 0 part\n rgrid = np.array(list_r); zgrid = np.array(list_z)\n\n _rgrid = rgrid[::-1]\n _zgrid = -zgrid[::-1]\n # ...\n\n # ...\n if PLOT:\n import matplotlib.pyplot as plt\n plt.plot(rgrid, zgrid, '.b')\n plt.plot(_rgrid, _zgrid, '.k')\n\n tx = np.linspace(0.6,1.4, 300)\n ty = np.linspace(-0.6,0.6, 300)\n x,y = np.meshgrid(tx,ty)\n u = psi(x,y)\n levels = np.linspace(-0.04, 0.0, 100)\n CS = plt.contourf(x,y,u, levels)\n plt.colorbar()\n\n plt.show()\n\n genFigure(d=[d1,d2,d3], origin=\"upper\")\n # ...\n\n r = list(rgrid) + list(_rgrid)\n z = list(zgrid) + list(_zgrid)\n\n return r,z\n# ...\n# ------------------------------------------------------\n\n\n# ------------------------------------------------------\ndef genFigure(d=None, origin=\"upper\"):\n #origin = 'lower'\n\n if d is None:\n d1, d2, d3 = [ 0.07538503, -0.20629496, -0.03143371]\n else:\n d1,d2,d3=d[0:3]\n\n import matplotlib.pyplot as plt\n\n # ITER and ASDEX-Upgrade\n tx = np.linspace(0.6,1.4, 300)\n ty = np.linspace(-0.6,0.6, 300)\n levels = np.linspace(-0.04, 0.0, 100)\n\n# # JET\n# levels = np.linspace(-0.045, 0., 100)\n# tx = np.linspace(0.6,1.4, 300)\n# ty = np.linspace(-0.65,0.65, 300)\n\n\n x,y = np.meshgrid(tx,ty)\n\n psi = lambda r,z: r**4/8 + d1 + d2 * r**2 + d3 * (r**4 - 4 * (r*z)**2)\n u = psi(x,y)\n #plt.contourf(x,y,u) ; plt.colorbar(); plt.show()\n\n\n\n CS = plt.contourf(x,y,u, levels\n # , colors = ('r', 'g', 'b') \\\n # , origin=origin \\\n # , extend='both' \\\n )\n plt.colorbar()\n\n CS2 = plt.contour(CS, levels=CS.levels[::10] \\\n , colors = 'k' \\\n , origin=origin \\\n , hold='on' \\\n , linewidths = (1,) \\\n )\n\n plt.show()\n# plt.pcolor(x,y,u, vmin=-0.04, vmax=0.01) ; plt.colorbar() ; plt.show()\n# ------------------------------------------------------\n\n# ------------------------------------------------------\nclass genDomain(object):\n def __init__(self, R0=1, eps=0.32, mbnd=1500, m=500 \\\n # needed for the newton algo to converge\n , rmin=0.67, rmax=1.33, delta=0.05 \\\n , PLOT=False):\n\n r,z = genPoints(R0=R0, eps=eps, mbnd=mbnd, m=m \\\n , rmin=rmin, rmax=rmax, delta=delta \\\n , PLOT=PLOT\\\n )\n self.boundary = [r,z]\n# ------------------------------------------------------\ndef compute_ds(epsilon, kappa, delta):\n # ... OLD VERSION\n# from scipy import matrix\n# from scipy.linalg import inv\n# M = np.zeros((3,3))\n# M[:,0] = 1.\n# M[0,1] = (1+eps)**2\n# M[1,1] = (1-eps)**2\n# M[2,1] = (1-d*eps)**2\n# M[0,2] = (1+eps)**4\n# M[1,2] = (1-eps)**4\n# M[2,2] = (1-d*eps)**4 - 4 * ( (1-d*eps)*k*eps )**2\n# Y = np.zeros(3)\n# Y[0] = -(1./8) * (1+eps)**4\n# Y[1] = -(1./8) * (1-eps)**4\n# Y[2] = -(1./8) * (1-d*eps)**4\n# A = matrix(M)\n# invA = inv(A)\n# X = invA.dot(Y)\n # ...\n def compute_M():\n e = Symbol('e')\n k = Symbol('k')\n d = Symbol('d')\n\n A = Matrix([ [1, (1+e)**2, (1+e)**4] \\\n , [1, (1-e)**2, (1-e)**4] \\\n , [1,(1-d*e)**2,(1-d*e)**4 - 4.*((1.-d*e)*k*e)**2] \\\n ])\n\n Ainv = A.inv()\n M = lambdify((e,k,d), Ainv)\n return M\n\n M = compute_M()\n\n Y = np.zeros(3)\n Y[0] = -(1./8) * (1+epsilon)**4\n Y[1] = -(1./8) * (1-epsilon)**4\n Y[2] = -(1./8) * (1-delta * epsilon)**4\n\n D= M(epsilon, kappa, delta).dot(Y)\n d1 = D[0,0]\n d2 = D[0,1]\n d3 = D[0,2]\n return d1, d2, d3\n\nclass testcase(object):\n def __init__(self, TEST):\n initTEST = getattr(self, 'initTEST%d' % TEST)\n initTEST()\n\n def initTEST1(self):\n \"\"\"\n ITER relevant parameters\n d1, d2, d3 = [ 0.0753850296600659 -0.206294962187880 -0.0314337072805334]\n \"\"\"\n d1, d2, d3 = compute_ds(eps=0.32, k=1.7, d=0.33)\n\n # ...\n F = lambda psi,x,y : x**2\n psi = lambda r,z: r**4/8 + d1 + d2 * r**2 + d3 * (r**4 - 4 * (r*z)**2)\n # ...\n\n self.F = F\n\n#if __name__ == '__main__':\n# from caid.cad_geometry import square\n# from matplotlib import pylab as plt\n\n", "# -*- coding: UTF-8 -*-\n#! /usr/bin/python\n\n\"\"\"\nThis module contains some routines to generate quadrature points in 1D\nit has also a routine uniform, which generates uniform points\nwith weights equal to 1\n\"\"\"\n\n__author__=\"ARA\"\n__all__ = ['quadratures']\n__date__ =\"$Jan 13, 2012 11:55:18 AM$\"\n\nimport numpy as np\nclass quadratures:\n def __init__(self):\n pass\n\n def uniform(self,k):\n xg = np.linspace(-1., 1., k+1)\n w = np.ones(k+1)\n\n return xg, w\n\n def gauss_lobatto(self,k):\n beta = .5 / np.sqrt(1-(2 * np.arange(1., k + 1)) ** (-2)) #3-term recurrence coeffs\n beta[-1] = np.sqrt((k / (2 * k-1.)))\n T = np.diag(beta, 1) + np.diag(beta, -1) # jacobi matrix\n D, V = np.linalg.eig(T) # eigenvalue decomposition\n xg = np.real(D); i = xg.argsort(); xg.sort() # nodes (= Legendres points)\n w = 2 * (V[0, :]) ** 2; # weights\n\n return xg, w[i]\n\n def gauss_legendre(self,ordergl,tol=10e-9):\n ''' x,A = gaussNodes(m,tol=10e-9)\n Returns nodal abscissas {x} and weights {A} of\n Gauss-Legendre m-point quadrature.\n '''\n m = ordergl + 1\n from math import cos,pi\n from numpy import zeros\n\n def legendre(t,m):\n p0 = 1.0; p1 = t\n for k in range(1,m):\n p = ((2.0*k + 1.0)*t*p1 - k*p0)/(1.0 + k )\n p0 = p1; p1 = p\n dp = m*(p0 - t*p1)/(1.0 - t**2)\n return p1,dp\n\n A = zeros(m)\n x = zeros(m)\n nRoots = (m + 1)/2 # Number of non-neg. roots\n for i in range(nRoots):\n t = cos(pi*(i + 0.75)/(m + 0.5)) # Approx. root\n for j in range(30):\n p,dp = legendre(t,m) # Newton-Raphson\n dt = -p/dp; t = t + dt # method\n if abs(dt) < tol:\n x[i] = t; x[m-i-1] = -t\n A[i] = 2.0/(1.0 - t**2)/(dp**2) # Eq.(6.25)\n A[m-i-1] = A[i]\n break\n return x,A\n\n def generate(self,apr_a, k,as_type=\"lobatto\"):\n# def generate(self,a, b, N, k,as_type=\"lobatto\"):\n \"\"\"\n this routine generates a quad pts on the grid linspace(a,b,N)\n \"\"\"\n if k == 1:\n x = np.asarray([-1., 1.])\n w = np.asarray([0.5, 0.5])\n grid = apr_a\n N = len(apr_a)\n xgl = np.zeros((N-1, k + 1))\n wgl = np.zeros((N-1, k + 1))\n for i in range (0, N-1):\n xmin = grid[i];xmax = grid[i + 1];dx = 0.5 * (xmax-xmin)\n tab = dx * x + dx + xmin\n xgl[i, :] = tab[::-1]\n wgl[i, :] = 0.5 * ( xmax - xmin ) * w\n\n return xgl,wgl\n\n if as_type.split('-')[0] == \"radau\":\n # this will generates radau qd points at the left and right of the interval\n # second rule is needed for the inside\n grid = apr_a\n N = len(apr_a)\n xgl = np.zeros((N-1, k + 1))\n wgl = np.zeros((N-1, k + 1))\n\n # ...\n # left\n # ...\n from . import qd_radau as qr\n x, w = qr.radau_left(k)\n x = x[::-1] ; w = w[::-1]\n i = 0\n xmin = grid[i];xmax = grid[i + 1];dx = 0.5 * (xmax-xmin)\n tab = dx * x + dx + xmin\n xgl[i, :] = tab[::-1]\n wgl[i, :] = 0.5 * ( xmax - xmin ) * w\n # ...\n\n # ...\n # inside\n # ...\n if as_type.split('-')[1] == \"uniform\":\n x, w = self.uniform(k)\n if as_type.split('-')[1] == \"lobatto\":\n x, w = self.gauss_lobatto(k)\n if as_type.split('-')[1] == \"legendre\":\n x, w = self.gauss_legendre(k)\n for i in range (1, N-2):\n xmin = grid[i];xmax = grid[i + 1];dx = 0.5 * (xmax-xmin)\n tab = dx * x + dx + xmin\n xgl[i, :] = tab[::-1]\n wgl[i, :] = 0.5 * ( xmax - xmin ) * w\n # ...\n\n # ...\n # right\n # ...\n from . import qd_radau as qr\n x, w = qr.radau_right(k)\n x = x[::-1] ; w = w[::-1]\n i = N-2\n xmin = grid[i];xmax = grid[i + 1];dx = 0.5 * (xmax-xmin)\n tab = dx * x + dx + xmin\n xgl[i, :] = tab[::-1]\n wgl[i, :] = 0.5 * ( xmax - xmin ) * w\n # ...\n\n return xgl,wgl\n if as_type == \"uniform\":\n x, w = self.uniform(k)\n if as_type == \"lobatto\":\n x, w = self.gauss_lobatto(k)\n if as_type == \"legendre\":\n x, w = self.gauss_legendre(k)\n if as_type == \"radau_left\":\n from . import qd_radau as qr\n x, w = qr.radau_left(k)\n x = x[::-1] ; w = w[::-1]\n if as_type == \"radau_right\":\n from . import qd_radau as qr\n x, w = qr.radau_right(k)\n x = x[::-1] ; w = w[::-1]\n# grid = np.linspace(a, b, N)\n grid = apr_a\n N = len(apr_a)\n xgl = np.zeros((N-1, k + 1))\n wgl = np.zeros((N-1, k + 1))\n for i in range (0, N-1):\n xmin = grid[i];xmax = grid[i + 1];dx = 0.5 * (xmax-xmin)\n tab = dx * x + dx + xmin\n xgl[i, :] = tab[::-1]\n wgl[i, :] = 0.5 * ( xmax - xmin ) * w\n\n return xgl,wgl\n", "# -*- coding: UTF-8 -*-\n#! /usr/bin/python\nfrom pigasus.utils.manager import context\n\n# ...\ntry:\n from matplotlib import pyplot as plt\n PLOT=True\nexcept ImportError:\n PLOT=False\n# ...\nimport numpy as np\nfrom caid.cad_geometry import circle\nfrom pigasus.gallery.poisson_nonlin import poisson_newton\nimport sys\nimport inspect\nfilename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)\nsys.stdout = open(filename.split('.py')[0]+'.txt', 'w')\n\nexp = np.exp ; log = np.log ; sqrt = np.sqrt\n\n#-----------------------------------\nAllDirichlet = True\n\ntry:\n nx = int(sys.argv[1])\nexcept:\n nx = 31\n\ntry:\n ny = int(sys.argv[2])\nexcept:\n ny = 31\n\ntry:\n px = int(sys.argv[3])\nexcept:\n px = 2\n\ntry:\n py = int(sys.argv[4])\nexcept:\n py = 2\n\ngeo = circle (radius = 1. / sqrt (2), n =[nx, ny], p =[px, py])\n#-----------------------------------\n\n# ...\nu_exact = lambda x,y : [- 2.0 * log ( x**2 + y**2 + 0.5 )]\n\ndef F(U,x,y):\n _U = U.evaluate()\n return [4. * exp(_U)]\n\ndef dF (U,x, y):\n _U = U.evaluate()\n return[-4 * exp(_U)]\n# ...\n\nwith context():\n\n PDE_newton = poisson_newton( geometry=geo \\\n , AllDirichlet=AllDirichlet )\n\n print(\">>> Solving using Newton <<<\")\n # ...\n PDE = PDE_newton\n if PDE.Dirichlet:\n U = PDE.unknown_dirichlet\n else:\n U = PDE.unknown\n # ...\n\n # ...\n list_L2, list_H1 = PDE_newton.solve(F, dF, u0=None, maxiter=100, rtol=1.e-6, verbose=True)\n\n print(\"norm using Newton \", PDE_newton.norm(exact=u_exact))\n\n # ...\n if PLOT:\n fig = plt.figure()\n\n plt.subplot(121, aspect='equal')\n U.fast_plot() ; plt.colorbar(orientation='horizontal') ; plt.title('$u_h$')\n\n # plot error evolution\n plt.subplot(122)\n plt.plot(list_L2, '-vb', label='$L^2$ norm')\n plt.plot(list_H1, '-xr', label='$H^1$ norm')\n plt.xlabel('N')\n plt.semilogy()\n plt.title('Norm evolution of $u^{n+1} - u^n$')\n plt.legend()\n # ...\n\n plt.savefig(filename.split('.py')[0]+'.png', format='png')\n plt.clf()\n # ...\n\n PDE.free()\n" ]
[ [ "numpy.zeros" ], [ "numpy.zeros_like", "numpy.allclose", "numpy.eye", "scipy.sparse.diags", "scipy.sparse.linalg.gmres", "scipy.linalg.inv", "numpy.zeros", "scipy.io.mmread", "scipy.sparse.csr_matrix", "scipy.io.mmwrite", "numpy.genfromtxt", "scipy.optimize.minimize", "scipy.sparse.linalg.LinearOperator", "numpy.savetxt", "scipy.sparse.kron", "numpy.random.random", "numpy.linalg.norm", "numpy.ones", "scipy.linalg.circulant" ], [ "matplotlib.pyplot.contourf", "numpy.linspace", "numpy.meshgrid", "numpy.isnan", "matplotlib.pyplot.plot", "matplotlib.pyplot.colorbar", "numpy.zeros_like", "matplotlib.pyplot.contour", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ], [ "numpy.diag", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.linalg.eig", "numpy.arange", "numpy.ones", "numpy.real", "numpy.zeros" ], [ "matplotlib.pyplot.semilogy", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iacolippo/octconv-pytorch
[ "032641413f1e8ece2893118e13cd1815d71ce0a9" ]
[ "octconv.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass OctConv(nn.Module):\n def __init__(self, ch_in, ch_out, kernel_size, stride=1, alphas=(0.5, 0.5)):\n super(OctConv, self).__init__()\n self.alpha_in, self.alpha_out = alphas\n assert 0 <= self.alpha_in <= 1 and 0 <= self.alpha_in <= 1, \"Alphas must be in interval [0, 1]\"\n\n # CH IN\n self.ch_in_hf = int((1 - self.alpha_in) * ch_in)\n self.ch_in_lf = ch_in - self.ch_in_hf\n\n # CH OUT\n self.ch_out_hf = int((1 - self.alpha_out) * ch_out)\n self.ch_out_lf = ch_out - self.ch_out_hf\n\n # FILTERS\n self.wHtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_hf, kernel_size, kernel_size))\n self.wHtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_hf, kernel_size, kernel_size))\n self.wLtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_lf, kernel_size, kernel_size))\n self.wLtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_lf, kernel_size, kernel_size))\n\n # PADDING: (H - F + 2P)/S + 1 = 2 * [(0.5 H - F + 2P)/S +1] -> P = (F-S)/2\n self.padding = (kernel_size - stride) // 2\n\n def forward(self, input):\n # logic to handle input tensors:\n # if alpha_in = 0., we assume to be at the first layer, with only high freq repr\n if self.alpha_in == 0:\n hf_input = input\n lf_input = torch.Tensor([]).reshape(1, 0)\n else:\n fmap_size = input.shape[-1]\n hf_input = input[:, :self.ch_in_hf * 4, ...].reshape(-1, self.ch_in_hf, fmap_size * 2, fmap_size * 2)\n lf_input = input[:, self.ch_in_hf * 4:, ...]\n\n HtoH = HtoL = LtoL = LtoH = 0.\n if self.alpha_in < 1:\n # if alpha < 1 there is high freq component\n if self.ch_out_hf > 0:\n HtoH = F.conv2d(hf_input, self.wHtoH, padding=self.padding)\n if self.ch_out_lf > 0:\n HtoL = F.conv2d(F.avg_pool2d(hf_input, 2), self.wHtoL, padding=self.padding)\n if self.alpha_in > 0:\n # if alpha > 0 there is low freq component\n if self.ch_out_hf > 0:\n LtoH = F.interpolate(F.conv2d(lf_input, self.wLtoH, padding=self.padding),\n scale_factor=2, mode='nearest')\n if self.ch_out_lf > 0:\n LtoL = F.conv2d(lf_input, self.wLtoL, padding=self.padding)\n\n hf_output = HtoH + LtoH\n lf_output = LtoL + HtoL\n if 0 < self.alpha_out < 1:\n # if alpha in (0, 1)\n fmap_size = hf_output.shape[-1] // 2\n hf_output = hf_output.reshape(-1, 4 * self.ch_out_hf, fmap_size, fmap_size)\n output = torch.cat([hf_output, lf_output], dim=1) # cat over channel dim\n elif np.isclose(self.alpha_out, 1., atol=1e-8):\n # if only low req (alpha_out = 1.)\n output = lf_output\n elif np.isclose(self.alpha_out, 0., atol=1e-8):\n # if only high freq (alpha_out = 0.)\n output = hf_output\n return output\n\n\noc = OctConv(ch_in=3, ch_out=3, kernel_size=3, alphas=(0., 0.5))\noc1 = OctConv(ch_in=3, ch_out=10, kernel_size=7, alphas=(0.5, 0.8))\noc2 = OctConv(ch_in=10, ch_out=1, kernel_size=3, alphas=(0.8, 0.))\nout = oc2(oc1(oc(torch.randn(2, 3, 32, 32))))\nprint(out.shape)\n" ]
[ [ "torch.Tensor", "torch.cat", "torch.randn", "torch.nn.functional.conv2d", "torch.nn.functional.avg_pool2d", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
you74674/pytorch
[ "06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb", "06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb", "06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb", "06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb" ]
[ "test/fx2trt/converters/acc_op/test_reshape.py", "test/fx_acc/test_acc_tracer.py", "test/fx2trt/passes/test_fuse_permute_linear_trt.py", "test/fx2trt/converters/acc_op/test_unsqueeze.py" ]
[ "# Owner(s): [\"oncall: fx\"]\n\nimport torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nfrom torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec\nfrom parameterized import parameterized\nfrom torch.testing._internal.common_utils import run_tests\n\n\nclass TestReshapeConverter(AccTestCase):\n @parameterized.expand(\n [\n ((1, 20),),\n ((1, 10, -1),),\n ]\n )\n def test_reshape(self, target_shape):\n class TestModule(torch.nn.Module):\n def __init__(self, target_shape):\n super().__init__()\n self.target_shape = target_shape\n\n def forward(self, x):\n return torch.reshape(x, self.target_shape)\n\n inputs = [torch.randn(1, 2, 10)]\n self.run_test(TestModule(target_shape), inputs, expected_ops={acc_ops.reshape})\n\n @parameterized.expand(\n [\n ((-1, 2),),\n ((1, 2, -1),),\n ]\n )\n def test_reshape_with_dynamic_shape(self, target_shape):\n class TestModule(torch.nn.Module):\n def __init__(self, target_shape):\n super().__init__()\n self.target_shape = target_shape\n\n def forward(self, x):\n return torch.reshape(x, self.target_shape)\n\n input_specs = [\n InputTensorSpec(\n shape=(-1, -1, -1),\n dtype=torch.float32,\n shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],\n ),\n ]\n self.run_test_with_dynamic_shape(\n TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape}\n )\n\nif __name__ == '__main__':\n run_tests()\n", "# Owner(s): [\"oncall: fx\"]\n\nimport unittest\nfrom typing import Callable, List\n\nimport numpy as np\nimport torch\nimport torch.fx.experimental.fx_acc.acc_normalizer as acc_normalizer\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nimport torch.fx.experimental.fx_acc.acc_tracer as acc_tracer\nimport torch.fx.experimental.fx_acc.acc_utils as acc_utils\nimport torch.nn as nn\nimport torchvision\nfrom parameterized import parameterized, param\n\ntorch.manual_seed(0)\n\n\nclass AccTracerTest(unittest.TestCase):\n def _make_model_unit_test(\n self,\n model,\n *args,\n input_shape=None,\n enable_allclose=False,\n **kwargs,\n ):\n \"\"\"\n Test that the model can be traced correctly and is producing correct\n result.\n \"\"\"\n if input_shape is None:\n input_shape = [1, 3, 224, 224]\n input = torch.randn(input_shape)\n traced = acc_tracer.trace(model, [input])\n if enable_allclose:\n torch.testing.assert_allclose(model(input), traced(input))\n else:\n self.assertTrue(torch.equal(model(input), traced(input)))\n\n def _make_acc_op_function_test(\n self,\n acc_op: Callable,\n torch_op,\n *args,\n input_shape=(2, 3),\n validate_same_kwargs=True,\n enable_allclose=False,\n **kwargs,\n ):\n \"\"\"\n Test that acc_op is traced somewhat.\n \"\"\"\n\n class TestModule(torch.nn.Module):\n def __init__(self, torch_op, args, kwargs):\n super().__init__()\n self._torch_op = torch_op\n self._args = args\n self._kwargs = kwargs\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self._torch_op(a, *self._args, **self._kwargs)\n\n m = TestModule(torch_op, args, kwargs)\n\n a = torch.randn(*input_shape)\n traced = acc_tracer.trace(m, [a])\n ph_a = acc_op_node = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_op)\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n if validate_same_kwargs:\n for key, value in kwargs.items():\n self.assertEqual(node.kwargs[key], value)\n acc_op_node = node\n elif node.op == \"output\":\n if acc_op is None:\n # If we expect no new acc_op after graph building\n # and found we have only output in traced graph\n continue\n self.assertEqual(acc_op_node, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n ref_outputs = m(a)\n outputs = traced(a)\n if isinstance(ref_outputs, torch.Tensor):\n ref_outputs = [ref_outputs]\n outputs = [outputs]\n\n for ref_output, output in zip(ref_outputs, outputs):\n if enable_allclose:\n torch.testing.assert_allclose(\n torch.nan_to_num(ref_output), torch.nan_to_num(output)\n )\n else:\n self.assertTrue(\n torch.equal(torch.nan_to_num(ref_output), torch.nan_to_num(output))\n )\n\n def test_sum(self):\n self._make_acc_op_function_test(acc_ops.sum, torch.sum)\n self._make_acc_op_function_test(acc_ops.sum, torch.sum, dim=(1,), keepdim=True)\n\n def test_mean(self):\n self._make_acc_op_function_test(acc_ops.mean, torch.mean)\n self._make_acc_op_function_test(acc_ops.mean, torch.mean, dim=(1,), keepdim=True)\n\n def test_pad(self):\n self._make_acc_op_function_test(acc_ops.pad, torch.nn.functional.pad, pad=(2, 0))\n\n def test_max(self):\n def torch_max(x, *args, **kwargs):\n return x.max(*args, **kwargs)\n\n self._make_acc_op_function_test(acc_ops.max_full_reduce, torch_max)\n self._make_acc_op_function_test(\n acc_ops.max_dim_reduce, torch_max, dim=1, keepdim=True\n )\n self._make_acc_op_function_test(\n acc_ops.max_dim_reduce, torch_max, input_shape=(1, 4), dim=1, keepdim=True\n )\n self._make_acc_op_function_test(\n acc_ops.max_dim_reduce, torch_max, input_shape=(3, 4, 3), dim=2\n )\n\n @parameterized.expand(\n [\n param(\"max_maximum\", orig_op=torch.max, expected_op=acc_ops.maximum),\n param(\n \"maximum_maximum\", orig_op=torch.maximum, expected_op=acc_ops.maximum\n ),\n param(\"min_minimum\", orig_op=torch.min, expected_op=acc_ops.minimum),\n param(\n \"minimum_minimum\", orig_op=torch.minimum, expected_op=acc_ops.minimum\n ),\n ]\n )\n def test_maximum_minimum(self, _: str, orig_op, expected_op):\n class TestModule(torch.nn.Module):\n def __init__(self, orig_op):\n super().__init__()\n self.orig_op = orig_op\n\n def forward(self, input: torch.Tensor, other: torch.Tensor) -> torch.Tensor:\n return self.orig_op(input, other)\n\n m = TestModule(orig_op)\n input, other = torch.randn(2, 2), torch.randn(2, 2)\n traced = acc_tracer.trace(m, [input, other])\n\n ph_in = ph_oth = mxm = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"other\":\n ph_oth = node\n else:\n self.assertTrue(str(node.target) == \"input\")\n ph_in = node\n elif node.op == \"call_function\":\n if node.target == expected_op:\n self.assertEqual(node.kwargs[\"input\"], ph_in)\n self.assertEqual(node.kwargs[\"other\"], ph_oth)\n mxm = node\n elif node.op == \"output\":\n self.assertEqual(mxm, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input, other), traced(input, other)))\n\n def test_conv(self):\n \"\"\"\n Test that a conv is traced as expected.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(8, 7, 3, stride=2)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.conv(a)\n\n m = TestModule()\n input = torch.randn(3, 8, 10, 10)\n traced = acc_tracer.trace(m, [input])\n\n ph = weight_attr = bias_attr = conv = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"conv.weight\":\n weight_attr = node\n elif node.op == \"get_attr\" and node.target == \"conv.bias\":\n bias_attr = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.conv2d)\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"bias\"], bias_attr)\n self.assertEqual(node.kwargs[\"stride\"], (2, 2))\n self.assertEqual(node.kwargs[\"padding\"], (0, 0))\n self.assertEqual(node.kwargs[\"dilation\"], (1, 1))\n self.assertEqual(node.kwargs[\"groups\"], 1)\n conv = node\n elif node.op == \"output\":\n self.assertEqual(conv, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_quantized_conv2d(self):\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.quantized.Conv2d(3, 3, 1)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.conv(a)\n\n m = TestModule()\n input = torch.quantize_per_tensor(\n torch.randn(1, 3, 1, 1), scale=0.01, zero_point=3, dtype=torch.quint8\n )\n traced = acc_tracer.trace(m, [input])\n print(traced.graph)\n ph = weight_attr = bias_attr = conv = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"conv_weight\":\n weight_attr = node\n elif node.op == \"get_attr\" and node.target == \"conv_bias\":\n bias_attr = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.quantized_conv2d)\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"bias\"], bias_attr)\n conv = node\n elif node.op == \"output\":\n self.assertEqual(conv, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_quantized_convrelu2d(self):\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.intrinsic.quantized.ConvReLU2d(3, 3, 1)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.conv(a)\n\n m = TestModule()\n input = torch.quantize_per_tensor(\n torch.randn(1, 3, 1, 1), scale=0.01, zero_point=3, dtype=torch.quint8\n )\n traced = acc_tracer.trace(m, [input])\n ph = weight_attr = bias_attr = conv = relu = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"conv_weight\":\n weight_attr = node\n elif node.op == \"get_attr\" and node.target == \"conv_bias\":\n bias_attr = node\n elif node.op == \"call_function\" and node.target == acc_ops.quantized_conv2d:\n self.assertEqual(node.target, acc_ops.quantized_conv2d)\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"bias\"], bias_attr)\n conv = node\n elif node.op == \"call_function\" and node.target == acc_ops.relu:\n self.assertEqual(node.target, acc_ops.relu)\n self.assertEqual(node.kwargs[\"input\"], conv)\n relu = node\n elif node.op == \"output\":\n self.assertEqual(relu, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_embedding_bag(self):\n \"\"\"\n Test that an embedding_bag is traced as expected.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.eb = nn.EmbeddingBag(10, 3, mode=\"sum\", include_last_offset=True)\n\n def forward(self, inp: torch.Tensor, offsets: torch.Tensor) -> torch.Tensor:\n return self.eb(inp, offsets)\n\n m = TestModule()\n inp = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])\n offsets = torch.LongTensor([0, 4])\n traced = acc_tracer.trace(m, [inp, offsets])\n\n inp_node = offsets_node = weight_attr = eb_node = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"inp\":\n inp_node = node\n elif str(node.target) == \"offsets\":\n offsets_node = node\n else:\n self.fail(f\"Unexpected placeholder {node.target}.\")\n continue\n elif node.op == \"get_attr\" and node.target == \"eb.weight\":\n weight_attr = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.embedding_bag)\n # Note: Normalization called from acc_tracer means we use all kwargs.\n self.assertEqual(node.kwargs[\"input\"], inp_node)\n self.assertEqual(node.kwargs[\"offsets\"], offsets_node)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"mode\"], \"sum\")\n self.assertEqual(node.kwargs[\"include_last_offset\"], True)\n # The rest of these were unspecified, so verify they fell back\n # to their respective default values thanks to normalization.\n self.assertEqual(node.kwargs[\"max_norm\"], None)\n self.assertEqual(node.kwargs[\"norm_type\"], 2.0)\n self.assertEqual(node.kwargs[\"scale_grad_by_freq\"], False)\n self.assertEqual(node.kwargs[\"sparse\"], False)\n self.assertEqual(node.kwargs[\"per_sample_weights\"], None)\n eb_node = node\n elif node.op == \"output\":\n self.assertEqual(eb_node, node.args[0])\n\n self.assertTrue(torch.equal(m(inp, offsets), traced(inp, offsets)))\n\n def test_embedding_bag_byte_and_4bit_rowwise_offsets(self):\n \"\"\"\n Test that 4 bit quantized embedding_bag is traced as expected.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(\n self,\n op,\n q_weights,\n per_index_weights,\n ):\n super().__init__()\n self.emb = op\n self.q_weights = q_weights\n self.per_index_weights = per_index_weights\n\n def forward(\n self,\n indices,\n offsets,\n ):\n return self.emb(\n self.q_weights,\n indices,\n offsets,\n mode=0,\n per_sample_weights=self.per_index_weights,\n include_last_offset=True,\n )\n\n def run_embedding_bag_test(is_4bit, use_weights):\n # generate random indices, offsets, and weights.\n num_embeddings = 16\n embedding_dim = 32\n num_lengths = 10\n\n weights = torch.from_numpy(\n (np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(\n np.float32\n )\n )\n q_weights = (\n torch.ops.quantized.embedding_bag_4bit_prepack(weights)\n if is_4bit\n else torch.ops.quantized.embedding_bag_byte_prepack(weights)\n )\n np_lengths = np.random.randint(0, num_lengths, size=10).astype(np.int32)\n\n num_lengths = np.sum(np_lengths)\n indices = torch.from_numpy(\n np.random.randint(low=0, high=num_embeddings, size=num_lengths)\n ).int()\n\n lengths = torch.from_numpy(np_lengths)\n offsets = torch.cat([torch.zeros([1]), torch.cumsum(lengths, 0)]).int()\n\n weights = torch.randint(low=0, high=4, size=indices.size())\n per_sample_weights = weights.to(torch.float32)\n\n indices = indices.to(torch.int32)\n offsets = offsets.to(torch.int32)\n inputs = [\n indices,\n offsets,\n ]\n\n op = (\n torch.ops.quantized.embedding_bag_4bit_rowwise_offsets\n if is_4bit\n else torch.ops.quantized.embedding_bag_byte_rowwise_offsets\n )\n\n m = TestModule(\n op,\n q_weights,\n per_sample_weights,\n )\n\n traced = acc_tracer.trace(m, inputs)\n print(traced.graph)\n\n expected_target = (\n acc_ops.embedding_bag_4bit_rowwise_offsets\n if is_4bit\n else acc_ops.embedding_bag_byte_rowwise_offsets\n )\n\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"indices\":\n inp_node = node\n elif str(node.target) == \"offsets\":\n offsets_node = node\n else:\n self.fail(f\"Unexpected placeholder {node.target}.\")\n continue\n elif node.op == \"get_attr\" and node.target == \"q_weights\":\n weight_attr = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, expected_target)\n # Note: Normalization called from acc_tracer means we use all kwargs.\n self.assertEqual(node.kwargs[\"indices\"], inp_node)\n self.assertEqual(node.kwargs[\"offsets\"], offsets_node)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"mode\"], 0)\n self.assertEqual(node.kwargs[\"include_last_offset\"], True)\n # The rest of these were unspecified, so verify they fell back\n # to their respective default values thanks to normalization.\n eb_node = node\n elif node.op == \"output\":\n self.assertEqual(eb_node, node.args[0])\n self.assertTrue(torch.equal(m(indices, offsets), traced(indices, offsets)))\n\n # test 8-bit\n run_embedding_bag_test(is_4bit=False, use_weights=True)\n # test 4-bit\n run_embedding_bag_test(is_4bit=True, use_weights=True)\n\n def test_quantized_batch_norm2d(self):\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.bn = nn.quantized.BatchNorm2d(3)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.bn(a)\n\n m = TestModule()\n m.eval()\n input = torch.quantize_per_tensor(\n torch.randn(1, 3, 1, 1), scale=0.01, zero_point=3, dtype=torch.quint8\n )\n traced = acc_tracer.trace(m, [input])\n ph = weight_attr = bias_attr = bn_mean = bn_var = bn = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"bn.weight\":\n weight_attr = node\n elif node.op == \"get_attr\" and node.target == \"bn.bias\":\n bias_attr = node\n elif node.op == \"get_attr\" and node.target == \"bn.running_mean\":\n bn_mean = node\n elif node.op == \"get_attr\" and node.target == \"bn.running_var\":\n bn_var = node\n elif node.op == \"get_attr\" and node.target == \"bn.scale\":\n bn_scale = node\n elif node.op == \"get_attr\" and node.target == \"bn.zero_point\":\n bn_zero_point = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.quantized_batch_norm2d)\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"bias\"], bias_attr)\n self.assertEqual(node.kwargs[\"running_mean\"], bn_mean)\n self.assertEqual(node.kwargs[\"running_var\"], bn_var)\n self.assertEqual(node.kwargs[\"acc_out_ty\"][6][\"scale\"], bn_scale)\n self.assertEqual(node.kwargs[\"acc_out_ty\"][6][\"zero_point\"], bn_zero_point)\n bn = node\n elif node.op == \"output\":\n self.assertEqual(bn, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_linear(self):\n \"\"\"\n Test that a linear is traced as expected, i.e. to the functional level and with\n kwarg normalization. Also verify that symbolic shape inference worked as part of\n the acc_tracer.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(3, 5, bias=True)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.linear(a)\n\n m = TestModule()\n test_input = torch.randn(1, 3)\n traced = acc_tracer.trace(m, test_input)\n ph = weight_attr = bias_attr = linear = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"linear.weight\":\n weight_attr = node\n elif node.op == \"get_attr\" and node.target == \"linear.bias\":\n bias_attr = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.linear)\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"bias\"], bias_attr)\n linear = node\n elif node.op == \"output\":\n self.assertEqual(linear, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n self.assertTrue(torch.equal(m(test_input), traced(test_input)))\n\n def test_quantized_linear(self):\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.quantized.Linear(3, 5)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.linear(a)\n\n m = TestModule()\n input = torch.quantize_per_tensor(\n torch.randn(2, 3), scale=0.01, zero_point=3, dtype=torch.quint8\n )\n traced = acc_tracer.trace(m, [input])\n ph = weight_attr = bias_attr = linear = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"linear_weight\":\n weight_attr = node\n elif node.op == \"get_attr\" and node.target == \"linear_bias\":\n bias_attr = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.quantized_linear)\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight_attr)\n self.assertEqual(node.kwargs[\"bias\"], bias_attr)\n linear = node\n elif node.op == \"output\":\n self.assertEqual(linear, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n @parameterized.expand(\n [\n param(\"remove_exceptions_false\", remove_exceptions=False),\n param(\"remove_exceptions_true\", remove_exceptions=True),\n ]\n )\n def test_batch_norm(self, _, remove_exceptions):\n \"\"\"\n Test that a batch norm is traced as expected, i.e. to the functional level\n and with kwarg normalization. Note that we also expect to see a\n ConditionalExceptionWrapper in the graph that the AST rewriter converted\n from `if x: raise y`.\n\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.bn = torch.nn.BatchNorm2d(2)\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.bn(a)\n\n m = TestModule()\n input = torch.randn(2, 2, 1, 1)\n # Note: Explicitly not removing exceptions so that we can check they\n # were found and exist below.\n traced = acc_tracer.trace(\n m,\n [input],\n remove_exceptions=remove_exceptions,\n )\n\n ph = exception_wrapper = weight = bias = mean = var = bn = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"get_attr\" and node.target == \"bn.weight\":\n weight = node\n elif node.op == \"get_attr\" and node.target == \"bn.bias\":\n bias = node\n elif node.op == \"get_attr\" and node.target == \"bn.running_mean\":\n mean = node\n elif node.op == \"get_attr\" and node.target == \"bn.running_var\":\n var = node\n elif node.op == \"call_function\" and node.target == acc_ops.batch_norm:\n # Note: Normalization called from acc_tracer means we use\n # all kwargs.\n self.assertEqual(node.kwargs[\"input\"], ph)\n self.assertEqual(node.kwargs[\"weight\"], weight)\n self.assertEqual(node.kwargs[\"bias\"], bias)\n self.assertEqual(node.kwargs[\"running_mean\"], mean)\n self.assertEqual(node.kwargs[\"running_var\"], var)\n bn = node\n elif (\n node.op == \"call_module\"\n and node.target == \"bn._conditional_exception_wrapper_ValueError\"\n ):\n exception_wrapper = node\n elif node.op == \"output\":\n self.assertEqual(bn, node.args[0])\n\n self.assertTrue(remove_exceptions or exception_wrapper is not None)\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_remove_asserts(self):\n \"\"\"\n Test that a Module with asserts has the asserts automatically removed, as\n well as calls to a class method that should be dead.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def _test_method(self, a):\n return a\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n assert torch.equal(self._test_method(a), a)\n return a\n\n m = TestModule()\n input = torch.randn(10)\n traced = acc_tracer.trace(m, [input], ast_rewriter_allow_list={TestModule})\n # Check we have no call_functions. If remove asserts didn't work\n # correctly we would see a call to torch._assert, _test_method, and\n # torch.equal.\n for node in traced.graph.nodes:\n self.assertFalse(node.op == \"call_function\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_sequential(self):\n \"\"\"\n Test that the tracer works for torch.nn.Sequential.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.model = nn.Sequential(nn.Sigmoid(), nn.ReLU())\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return self.model(a)\n\n m = TestModule()\n input = torch.randn(10)\n traced = acc_tracer.trace(m, [input])\n\n for node in traced.graph.nodes:\n if node.op == \"call_function\":\n is_sigmoid = node.target == acc_ops.sigmoid\n is_relu = node.target == acc_ops.relu\n self.assertTrue(is_sigmoid or is_relu)\n else:\n self.assertTrue(node.op == \"placeholder\" or node.op == \"output\")\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_unsqueeze(self):\n \"\"\"\n Test that torch.unsqueeze is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(\n acc_ops.unsqueeze,\n torch.unsqueeze,\n validate_same_kwargs=False,\n dim=1,\n )\n\n def test_stack(self):\n \"\"\"\n Test that torch.stack is traced correctly.\n \"\"\"\n\n class TestModule(torch.nn.Module):\n def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n return torch.stack((a, b), dim=1)\n\n a, b = torch.randn(4, 5, 6), torch.randn(4, 5, 6)\n mod = TestModule()\n traced = acc_tracer.trace(mod, [a, b])\n self.assertTrue(torch.equal(mod(a, b), traced(a, b)))\n\n ph_a = ph_b = unsqueeze_a = unsqueeze_b = cat_node = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n ph_b = node\n elif node.op == \"call_function\":\n if node.target == acc_ops.unsqueeze:\n if node.kwargs[\"input\"] is ph_a:\n unsqueeze_a = node\n else:\n self.assertEqual(node.kwargs[\"input\"], ph_b)\n unsqueeze_b = node\n else:\n self.assertEqual(node.target, acc_ops.cat)\n self.assertEqual(node.kwargs[\"tensors\"], [unsqueeze_a, unsqueeze_b])\n cat_node = node\n elif node.op == \"output\":\n self.assertEqual(cat_node, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n def test_no_raise(self):\n \"\"\"\n self that we can trace `if x: raise y(msg)` when the raise isn't executed.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a, b):\n if torch.equal(a, b):\n raise AssertionError(\"a equaled b!\")\n return a\n\n m = TestModule()\n in_a, in_b = torch.randn(5), torch.randn(5)\n traced = acc_tracer.trace(\n m,\n [in_a, in_b],\n remove_exceptions=False,\n use_acc_normalization=False,\n ast_rewriter_allow_list={TestModule},\n )\n\n # Verify the structure of the graph, including the existence of the\n # exception_wrapper.\n ph_a = exception_wrapper = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n elif node.op == \"call_module\":\n self.assertEqual(\n node.target, \"_conditional_exception_wrapper_AssertionError\"\n )\n exception_wrapper = node\n elif node.op == \"output\":\n self.assertEqual(ph_a, node.args[0])\n\n self.assertTrue(exception_wrapper is not None)\n\n self.assertTrue(torch.equal(m(in_a, in_b), traced(in_a, in_b)))\n\n def test_yes_raise(self):\n \"\"\"\n Test that we can trace `if x: raise y(msg)` when the raise is executed.\n \"\"\"\n err_str = \"a equaled b!\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.err_str = err_str\n\n def forward(self, a, b):\n if torch.equal(a, b):\n raise RuntimeError(self.err_str)\n return a\n\n m = TestModule()\n # Note: We must use different inputs here in order for shape_prop to work, as\n # otherwise the exception is thrown (as expected/checked below).\n in_a, in_b = torch.randn(5), torch.randn(5)\n traced = acc_tracer.trace(\n m,\n [in_a, in_b],\n remove_exceptions=False,\n ast_rewriter_allow_list={TestModule},\n )\n\n # Verify the structure of the graph, including the existence of the\n # exception_wrapper.\n ph_a = exception_wrapper = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n elif node.op == \"call_module\":\n self.assertEqual(\n node.target, \"_conditional_exception_wrapper_RuntimeError\"\n )\n exception_wrapper = node\n elif node.op == \"output\":\n self.assertEqual(ph_a, node.args[0])\n\n self.assertTrue(exception_wrapper is not None)\n\n def test(mod):\n try:\n # Note: Use the same input here to ensure the exception is thrown.\n mod(in_a, in_a)\n self.fail(\"Shouldn't get here because exception should be thrown.\")\n except RuntimeError as e:\n self.assertEqual(err_str, str(e))\n\n test(m)\n test(traced)\n\n def test_remove_raise(self):\n \"\"\"\n Test that we can trace `if x: raise y(msg)` and then remove the exception_wrapper.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a, b):\n if torch.equal(a, b):\n raise AssertionError(\"a equaled b!\")\n return a\n\n m = TestModule()\n in_a, in_b = torch.randn(5), torch.randn(5)\n traced = acc_tracer.trace(\n m,\n [in_a, in_b],\n remove_exceptions=True,\n ast_rewriter_allow_list={TestModule},\n )\n\n # Verify the structure of the graph, including the existence of the\n # exception_wrapper.\n ph_a = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n elif node.op == \"output\":\n self.assertEqual(ph_a, node.args[0])\n else:\n # Should not encounter any call_modules, e.g. to the\n # exception_wrapper.\n self.assertFalse(node.op == \"call_module\")\n\n # Note: Using input in_a twice for the tracer version, which would\n # trigger the raise if it was still there.\n self.assertTrue(torch.equal(m(in_a, in_b), traced(in_a, in_a)))\n\n def test_raise_no_message(self):\n \"\"\"\n Test that we can trace `if x: raise y` when `y` has no message.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a, b):\n if torch.equal(a, b):\n raise AssertionError\n return a\n\n m = TestModule()\n in_a, in_b = torch.randn(5), torch.randn(5)\n traced = acc_tracer.trace(\n m,\n [in_a, in_b],\n remove_exceptions=False,\n use_acc_normalization=False,\n ast_rewriter_allow_list={TestModule},\n )\n\n # Verify the structure of the graph, including the existence of the\n # exception_wrapper.\n ph_a = exception_wrapper = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n elif node.op == \"call_module\":\n self.assertEqual(\n node.target, \"_conditional_exception_wrapper_AssertionError\"\n )\n exception_wrapper = node\n elif node.op == \"output\":\n self.assertEqual(ph_a, node.args[0])\n\n self.assertTrue(exception_wrapper is not None)\n self.assertTrue(torch.equal(m(in_a, in_b), traced(in_a, in_b)))\n\n def test_quantized_add(self):\n \"\"\"\n Test that a quantized_add and acc_ops.quantize_per_tensor are traced as expected,\n verifying the acc_out_tys are set as expected.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.q_input = torch.nn.quantized.Quantize(\n scale=1.0 / 128, zero_point=5, dtype=torch.quint8\n )\n self.q_other = torch.nn.quantized.Quantize(\n scale=1.0 / 128, zero_point=10, dtype=torch.quint8\n )\n\n def forward(self, input: torch.Tensor, other: torch.Tensor) -> torch.Tensor:\n return torch.ops.quantized.add(\n self.q_input(input),\n self.q_other(other),\n scale=0.05,\n zero_point=1,\n )\n\n m = TestModule()\n input, other = torch.randn(2, 3, 4), torch.randn(2, 3, 4)\n traced = acc_tracer.trace(m, [input, other])\n\n input_ph = other_ph = q_input = q_other = q_add = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"input\":\n input_ph = node\n else:\n self.assertTrue(str(node.target) == \"other\")\n other_ph = node\n elif (\n node.op == \"call_function\"\n and node.target == acc_ops.quantize_per_tensor\n ):\n qparams = {\n \"scale\": 1.0 / 128,\n \"zero_point\": 5,\n }\n expected_md = acc_utils.build_raw_tensor_meta(\n dtype=torch.quint8,\n qparams=qparams,\n )\n if node.kwargs[\"input\"] == input_ph:\n q_input = node\n else:\n self.assertTrue(node.kwargs[\"input\"] == other_ph)\n q_other = node\n qparams_copy = qparams.copy()\n qparams_copy[\"zero_point\"] = 10\n expected_md = expected_md._replace(qparams=qparams_copy)\n self.assertEqual(node.kwargs[\"acc_out_ty\"], expected_md)\n elif node.op == \"call_function\" and node.target == acc_ops.quantized_add:\n self.assertEqual(node.kwargs[\"input\"], q_input)\n self.assertEqual(node.kwargs[\"other\"], q_other)\n qparams = {\n \"scale\": 0.05,\n \"zero_point\": 1,\n }\n expected_md = acc_utils.build_raw_tensor_meta(qparams=qparams)\n self.assertEqual(node.kwargs[\"acc_out_ty\"], expected_md)\n q_add = node\n elif node.op == \"output\":\n self.assertEqual(q_add, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input, other), traced(input, other)))\n\n def test_quantized_mul(self):\n \"\"\"\n Test that a quantized_mul and acc_ops.quantize_per_tensor are traced as expected,\n verifying the acc_out_tys are set as expected.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.q_input = torch.nn.quantized.Quantize(\n scale=1.0 / 128, zero_point=5, dtype=torch.quint8\n )\n self.q_other = torch.nn.quantized.Quantize(\n scale=1.0 / 128, zero_point=10, dtype=torch.quint8\n )\n\n def forward(self, input: torch.Tensor, other: torch.Tensor) -> torch.Tensor:\n return torch.ops.quantized.mul(\n self.q_input(input),\n self.q_other(other),\n scale=0.05,\n zero_point=1,\n )\n\n m = TestModule()\n input, other = torch.randn(2, 3, 4), torch.randn(2, 3, 4)\n traced = acc_tracer.trace(m, [input, other])\n\n input_ph = other_ph = q_input = q_other = q_add = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"input\":\n input_ph = node\n else:\n self.assertTrue(str(node.target) == \"other\")\n other_ph = node\n elif (\n node.op == \"call_function\"\n and node.target == acc_ops.quantize_per_tensor\n ):\n qparams = {\n \"scale\": 1.0 / 128,\n \"zero_point\": 5,\n }\n expected_md = acc_utils.build_raw_tensor_meta(\n dtype=torch.quint8,\n qparams=qparams,\n )\n if node.kwargs[\"input\"] == input_ph:\n q_input = node\n else:\n self.assertTrue(node.kwargs[\"input\"] == other_ph)\n q_other = node\n qparams_copy = qparams.copy()\n qparams_copy[\"zero_point\"] = 10\n expected_md = expected_md._replace(qparams=qparams_copy)\n self.assertEqual(node.kwargs[\"acc_out_ty\"], expected_md)\n elif node.op == \"call_function\" and node.target == acc_ops.quantized_mul:\n self.assertEqual(node.kwargs[\"input\"], q_input)\n self.assertEqual(node.kwargs[\"other\"], q_other)\n qparams = {\n \"scale\": 0.05,\n \"zero_point\": 1,\n }\n expected_md = acc_utils.build_raw_tensor_meta(qparams=qparams)\n self.assertEqual(node.kwargs[\"acc_out_ty\"], expected_md)\n q_add = node\n elif node.op == \"output\":\n self.assertEqual(q_add, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input, other), traced(input, other)))\n\n def test_cat(self):\n \"\"\"\n Test that torch.cat is traced correctly.\n \"\"\"\n\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n return torch.cat([a, a, b], 0)\n\n m = TestModule()\n a, b = torch.randn(2, 2), torch.randn(2, 2)\n traced = acc_tracer.trace(m, (a, b))\n\n ph_a = ph_b = cat = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n ph_b = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.cat)\n self.assertEqual(node.kwargs[\"tensors\"][0], ph_a)\n self.assertEqual(node.kwargs[\"tensors\"][1], ph_a)\n self.assertEqual(node.kwargs[\"tensors\"][2], ph_b)\n self.assertEqual(node.kwargs[\"dim\"], 0)\n cat = node\n elif node.op == \"output\":\n self.assertEqual(cat, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(a, b), traced(a, b)))\n\n def test_square(self):\n \"\"\"\n Test that torch.square is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(acc_ops.mul, torch.square)\n\n def test_reshape(self):\n \"\"\"\n Test that torch.reshape is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(acc_ops.reshape, torch.reshape, (1, -1))\n # arg = (1, -1)\n self._make_acc_op_function_test(acc_ops.reshape, lambda x: x.reshape(1, -1))\n # arg = ((1, -1))\n self._make_acc_op_function_test(acc_ops.reshape, lambda x: x.reshape((1, -1)))\n\n def test_transpose(self):\n \"\"\"\n Test that torch.transpose is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(\n acc_ops.permute, lambda x: torch.transpose(x, 1, 0)\n )\n\n def test_permute(self):\n \"\"\"\n Test that torch.permute is traced correctly.\n \"\"\"\n\n def torch_permute(a, *dim):\n return a.permute(*dim)\n\n self._make_acc_op_function_test(acc_ops.permute, torch_permute, 1, 0)\n\n def test_min_full_reduce(self):\n \"\"\"\n Test that test_min_full_reduce is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(acc_ops.min_full_reduce, torch.min)\n\n def test_matmul(self):\n \"\"\"\n Test that torch.matmul is traced correctly.\n \"\"\"\n\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n return torch.matmul(a, b)\n\n m = TestModule()\n a, b = torch.randn(2, 2), torch.randn(2, 2)\n traced = acc_tracer.trace(m, [a, b])\n\n ph_a = ph_b = matmul = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n else:\n self.assertTrue(str(node.target) == \"b\")\n ph_b = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.matmul)\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"other\"], ph_b)\n matmul = node\n elif node.op == \"output\":\n self.assertEqual(matmul, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(a, b), traced(a, b)))\n\n def test_bmm(self):\n self._make_acc_op_function_test(\n acc_ops.matmul, lambda x: torch.bmm(x, x), input_shape=(2, 4, 4)\n )\n\n def test_tile(self):\n return self._make_acc_op_function_test(\n acc_ops.tile, lambda x: torch.tile(x, (2, 1, 2)), input_shape=(1, 2)\n )\n\n def test_dropout(self):\n self._make_acc_op_function_test(\n None,\n lambda x: nn.functional.dropout(x, training=False),\n input_shape=(1, 2, 3),\n )\n\n def test_hardsigmoid(self):\n self._make_acc_op_function_test(\n acc_ops.hardsigmoid,\n lambda x: nn.functional.hardsigmoid(x),\n input_shape=(3, 4, 5),\n )\n\n def test_hardtanh(self):\n self._make_acc_op_function_test(\n acc_ops.hardtanh,\n lambda x: nn.functional.hardtanh(x),\n input_shape=(3, 4, 5),\n )\n\n def test_hardswish(self):\n class TestModule(nn.Module):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n y = nn.functional.hardswish(x)\n return y\n\n m = TestModule()\n x = torch.randn(3, 4, 5)\n traced = acc_tracer.trace(m, x)\n ph_x = hardsigmoid_y = res_y = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n ph_x = node\n elif node.op == \"call_function\" and node.target == acc_ops.hardsigmoid:\n hardsigmoid_y = node\n self.assertEqual(node.kwargs[\"input\"], ph_x)\n elif node.op == \"call_function\" and node.target == acc_ops.mul:\n res_y = node\n self.assertEqual(node.kwargs[\"input\"], hardsigmoid_y)\n self.assertEqual(node.kwargs[\"other\"], ph_x)\n elif node.op == \"output\":\n self.assertEqual(node.args[0], res_y)\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n ref = m(x)\n res = traced(x)\n torch.testing.assert_allclose(ref, res)\n\n def test_add_with_alpha(self):\n \"\"\"\n Test that normalization works for torch add with alpha, which requires special\n normalization handling.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n a1 = torch.add(a, b)\n a2 = torch.add(a, b, alpha=1.0)\n a3 = torch.add(a, b, alpha=0.5)\n return a1, a2, a3\n\n m = TestModule()\n input_a = torch.randn(2, 3)\n input_b = torch.randn(2, 3)\n traced = acc_tracer.trace(m, [input_a, input_b])\n\n ph_a = ph_b = add_1 = add_2 = add_3 = mul = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n elif str(node.target) == \"b\":\n ph_b = node\n else:\n self.fail(f\"Unexpected placeholder {node.target}.\")\n elif node.op == \"call_function\" and node.target == acc_ops.mul:\n mul = node\n self.assertEqual(node.kwargs[\"input\"], ph_b)\n self.assertEqual(node.kwargs[\"other\"], 0.5)\n elif node.op == \"call_function\" and node.target == acc_ops.add:\n if add_1 is None:\n add_1 = node\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"other\"], ph_b)\n elif add_2 is None:\n add_2 = node\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"other\"], ph_b)\n elif add_3 is None:\n add_3 = node\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"other\"], mul)\n else:\n self.fail(f\"Unexpected add: {node.format_node()}\")\n elif node.op == \"output\":\n self.assertEqual(node.args[0][0], add_1)\n self.assertEqual(node.args[0][1], add_2)\n self.assertEqual(node.args[0][2], add_3)\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n ref = m(input_a, input_b)\n res = traced(input_a, input_b)\n self.assertTrue(torch.equal(ref[0], res[0]))\n self.assertTrue(torch.equal(ref[1], res[1]))\n self.assertTrue(torch.equal(ref[2], res[2]))\n\n def test_leaf_module_list(self):\n \"\"\"\n Test leaf_module_list is working properly.\n \"\"\"\n\n class LeafModule(nn.Module):\n def forward(self, x):\n return x\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.mod = LeafModule()\n\n def forward(self, x):\n return self.mod(x)\n\n x = torch.randn(1, 1)\n mod = TestModule()\n acc_mod = acc_tracer.trace(\n mod,\n [x],\n leaf_module_list={LeafModule},\n )\n ph = leaf_module = None\n for node in acc_mod.graph.nodes:\n if node.op == \"placeholder\":\n ph = node\n elif node.op == \"call_module\":\n leaf_module = node\n self.assertEqual(leaf_module.target, \"mod\")\n self.assertEqual(leaf_module.args[0], ph)\n elif node.op == \"output\":\n self.assertEqual(node.args[0], leaf_module)\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n self.assertTrue(torch.equal(mod(x), acc_mod(x)))\n\n def test_sign(self):\n self._make_acc_op_function_test(acc_ops.sign, torch.sign)\n\n def test_relu(self):\n self._make_acc_op_function_test(acc_ops.relu, torch.relu)\n\n def test_leaky_relu(self):\n self._make_acc_op_function_test(acc_ops.leaky_relu, torch.nn.functional.leaky_relu)\n\n def test_elu(self):\n self._make_acc_op_function_test(acc_ops.elu, torch.nn.functional.elu)\n\n def test_selu(self):\n self._make_acc_op_function_test(acc_ops.selu, torch.nn.functional.selu)\n\n def test_softsign(self):\n self._make_acc_op_function_test(acc_ops.softsign, torch.nn.functional.softsign)\n\n def test_sigmoid(self):\n self._make_acc_op_function_test(acc_ops.sigmoid, torch.sigmoid)\n\n def test_sin(self):\n self._make_acc_op_function_test(acc_ops.sin, torch.sin)\n\n def test_cos(self):\n self._make_acc_op_function_test(acc_ops.cos, torch.cos)\n\n def test_tan(self):\n self._make_acc_op_function_test(acc_ops.tan, torch.tan)\n\n def test_sinh(self):\n self._make_acc_op_function_test(acc_ops.sinh, torch.sinh)\n\n def test_cosh(self):\n self._make_acc_op_function_test(acc_ops.cosh, torch.cosh)\n\n def test_tanh(self):\n self._make_acc_op_function_test(acc_ops.tanh, torch.tanh)\n\n def test_asin(self):\n self._make_acc_op_function_test(acc_ops.asin, torch.asin)\n\n def test_acos(self):\n self._make_acc_op_function_test(acc_ops.acos, torch.acos)\n\n def test_atan(self):\n self._make_acc_op_function_test(acc_ops.atan, torch.atan)\n\n def test_exp(self):\n self._make_acc_op_function_test(acc_ops.exp, torch.exp)\n\n def test_log(self):\n self._make_acc_op_function_test(acc_ops.log, torch.log)\n\n def test_sqrt(self):\n self._make_acc_op_function_test(acc_ops.sqrt, torch.sqrt)\n\n def test_reciprocal(self):\n self._make_acc_op_function_test(acc_ops.reciprocal, torch.reciprocal)\n\n def test_abs(self):\n self._make_acc_op_function_test(acc_ops.abs, torch.abs)\n\n def test_neg(self):\n self._make_acc_op_function_test(acc_ops.neg, torch.neg)\n\n def test_floor(self):\n self._make_acc_op_function_test(acc_ops.floor, torch.floor)\n\n def test_ceil(self):\n self._make_acc_op_function_test(acc_ops.ceil, torch.ceil)\n\n def test_softmax(self):\n self._make_acc_op_function_test(acc_ops.softmax, torch.nn.functional.softmax)\n\n def test_tensor_squeeze(self):\n self._make_acc_op_function_test(acc_ops.squeeze, lambda x: x.squeeze())\n\n def test_torch_squeeze(self):\n self._make_acc_op_function_test(acc_ops.squeeze, lambda x: torch.squeeze(x))\n\n def test_operator_mul(self):\n self._make_acc_op_function_test(acc_ops.mul, lambda x: x * 7)\n\n def test_torch_mul(self):\n self._make_acc_op_function_test(acc_ops.mul, lambda x: torch.mul(x, 7))\n\n def test_div(self):\n self._make_acc_op_function_test(acc_ops.div, lambda x: torch.div(x, 2))\n self._make_acc_op_function_test(acc_ops.div, lambda x: x / 2)\n\n def test_floor_div(self):\n self._make_acc_op_function_test(acc_ops.floor_div, lambda x: torch.div(x, 2, rounding_mode=\"floor\"))\n\n def test_trunc_div(self):\n self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.div(x, 2, rounding_mode=\"trunc\"))\n self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.floor_divide(x, 2))\n\n def test_view(self):\n \"\"\"\n Test that Tensor.view is traced correctly.\n \"\"\"\n\n self._make_acc_op_function_test(acc_ops.reshape, lambda x: x.view(1, -1))\n\n def test_narrow(self):\n \"\"\"\n Test that torch.narrow is traced correctly.\n \"\"\"\n return self._make_acc_op_function_test(\n acc_ops.slice_tensor,\n torch.narrow,\n validate_same_kwargs=False,\n dim=1,\n start=1,\n length=2,\n )\n\n def test_pow(self):\n self._make_acc_op_function_test(acc_ops.pow, torch.pow, exponent=2)\n\n def test_size(self):\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a):\n idx = a.size(1)\n return a.shape[idx]\n\n m = TestModule()\n a = torch.randn(2, 1, 4)\n traced = acc_tracer.trace(m, [a])\n\n ph_a = size_1 = size_2 = getitem_1 = getitem_2 = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertTrue(node.target == \"a\")\n ph_a = node\n elif node.op == \"call_function\" and node.target == acc_ops.size:\n if size_1:\n size_2 = node\n self.assertTrue(size_2.kwargs[\"input\"] is ph_a)\n else:\n size_1 = node\n self.assertTrue(size_1.kwargs[\"input\"] is ph_a)\n elif node.op == \"call_function\" and node.target == acc_ops.getitem:\n if getitem_1:\n getitem_2 = node\n self.assertTrue(getitem_2.kwargs[\"idx\"] == getitem_1)\n self.assertTrue(getitem_2.kwargs[\"input\"] == size_2)\n else:\n getitem_1 = node\n self.assertTrue(getitem_1.kwargs[\"idx\"] == 1)\n self.assertTrue(getitem_1.kwargs[\"input\"] == size_1)\n elif node.op == \"output\":\n self.assertEqual(node.args[0], getitem_2)\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n ref = m(a)\n res = traced(a)\n self.assertEqual(ref, res)\n\n def test_flatten(self):\n \"\"\"\n Test that torch.flatten is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(\n acc_ops.flatten, torch.flatten, start_dim=1, end_dim=1\n )\n self._make_acc_op_function_test(acc_ops.flatten, lambda x: x.flatten())\n\n def test_topk_multi_output(self):\n \"\"\"\n Test that torch.topk multi outputs work.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a: torch.Tensor) -> torch.Tensor:\n return torch.topk(a, 3)[1]\n\n m = TestModule()\n input_a = torch.randn(10)\n traced = acc_tracer.trace(m, [input_a])\n\n ph_a = topk = getitem = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\" and str(node.target) == \"a\":\n ph_a = node\n elif node.op == \"call_function\" and node.target == acc_ops.topk:\n topk = node\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"k\"], 3)\n elif node.op == \"call_function\" and node.target == acc_ops.getitem:\n getitem = node\n self.assertEqual(node.kwargs[\"input\"], topk)\n self.assertEqual(node.kwargs[\"idx\"], 1)\n elif node.op == \"output\":\n self.assertEqual(node.args[0], getitem)\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input_a), traced(input_a)))\n\n def test_addmm_with_alpha_beta(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(\n self, input: torch.Tensor, a: torch.Tensor, b: torch.Tensor\n ) -> torch.Tensor:\n return torch.addmm(input, a, b, alpha=1.2, beta=1.1)\n\n m = TestModule()\n input, a, b = torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)\n traced = acc_tracer.trace(m, [input, a, b])\n\n ph_in = ph_a = ph_b = mm = add = mm_mul = add_mul = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n elif str(node.target) == \"b\":\n ph_b = node\n else:\n self.assertTrue(str(node.target) == \"input\")\n ph_in = node\n elif node.op == \"call_function\":\n if node.target == acc_ops.matmul:\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"other\"], ph_b)\n mm = node\n elif node.target == acc_ops.add:\n self.assertEqual(node.kwargs[\"input\"], mm_mul)\n self.assertEqual(node.kwargs[\"other\"], add_mul)\n add = node\n elif mm_mul:\n self.assertEqual(node.kwargs[\"input\"], ph_in)\n self.assertEqual(node.kwargs[\"other\"], 1.1)\n add_mul = node\n else:\n self.assertEqual(node.kwargs[\"input\"], mm)\n self.assertEqual(node.kwargs[\"other\"], 1.2)\n mm_mul = node\n elif node.op == \"output\":\n self.assertEqual(add, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n torch.testing.assert_allclose(m(input, a, b), traced(input, a, b))\n\n def test_log1p(self):\n class TestModule(torch.nn.Module):\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n return torch.log1p(input)\n\n m = TestModule().eval()\n input = torch.tensor([[1.2, 0.3, -0.4]])\n traced = acc_tracer.trace(m, [input])\n\n ph_in = add = log = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertTrue(str(node.target) == \"input\")\n ph_in = node\n elif node.op == \"call_function\":\n if node.target == acc_ops.add:\n self.assertEqual(node.kwargs[\"input\"], ph_in)\n self.assertEqual(node.kwargs[\"other\"], 1)\n add = node\n else:\n self.assertEqual(node.target, acc_ops.log)\n self.assertEqual(node.kwargs[\"input\"], add)\n log = node\n elif node.op == \"output\":\n self.assertEqual(log, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n torch.testing.assert_allclose(m(input), traced(input))\n\n def test_addmm(self):\n class TestModule(torch.nn.Module):\n def forward(\n self, input: torch.Tensor, a: torch.Tensor, b: torch.Tensor\n ) -> torch.Tensor:\n return torch.addmm(input, a, b)\n\n m = TestModule()\n input, a, b = torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)\n traced = acc_tracer.trace(m, [input, a, b])\n\n ph_in = ph_a = ph_b = mm = add = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n if str(node.target) == \"a\":\n ph_a = node\n elif str(node.target) == \"b\":\n ph_b = node\n else:\n self.assertTrue(str(node.target) == \"input\")\n ph_in = node\n elif node.op == \"call_function\":\n if node.target == acc_ops.matmul:\n self.assertEqual(node.kwargs[\"input\"], ph_a)\n self.assertEqual(node.kwargs[\"other\"], ph_b)\n mm = node\n else:\n self.assertEqual(node.target, acc_ops.add)\n self.assertEqual(node.kwargs[\"input\"], mm)\n self.assertEqual(node.kwargs[\"other\"], ph_in)\n add = node\n elif node.op == \"output\":\n self.assertEqual(add, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n self.assertTrue(torch.equal(m(input, a, b), traced(input, a, b)))\n\n def test_gelu(self):\n return self._make_acc_op_function_test(acc_ops.gelu, torch.nn.functional.gelu)\n\n @parameterized.expand(\n [\n (1, True),\n (1, False),\n (None, False),\n ]\n )\n def test_argmin(self, dim, keepdim):\n class TestModule(torch.nn.Module):\n def __init__(self, dim, keepdim):\n super().__init__()\n self.dim = dim\n self.keepdim = keepdim\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n return torch.argmin(input, dim=self.dim, keepdim=self.keepdim)\n\n m = TestModule(dim, keepdim)\n input = torch.randn(2, 2)\n traced = acc_tracer.trace(m, [input])\n\n ph_in = flatten = topk = getitem = squeeze = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertTrue(str(node.target) == \"input\")\n ph_in = node\n elif node.op == \"call_function\":\n if node.target == acc_ops.flatten:\n self.assertEqual(node.kwargs[\"input\"], ph_in)\n flatten = node\n elif node.target == acc_ops.topk:\n self.assertEqual(\n node.kwargs[\"input\"], flatten if flatten else ph_in\n )\n topk = node\n elif node.target == acc_ops.getitem:\n self.assertEqual(node.kwargs[\"input\"], topk)\n getitem = node\n elif node.target == acc_ops.squeeze:\n self.assertEqual(node.kwargs[\"input\"], getitem)\n squeeze = node\n elif node.op == \"output\":\n self.assertEqual(squeeze if squeeze else getitem, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n if dim is None:\n self.assertTrue(flatten is not None)\n if not keepdim:\n self.assertTrue(squeeze is not None)\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_t(self):\n \"\"\"\n Test Tensor.t() is traced correctly.\n \"\"\"\n self._make_acc_op_function_test(acc_ops.permute, lambda x: x.t())\n self._make_acc_op_function_test(\n acc_ops.permute, lambda x: x.t(), input_shape=(3,)\n )\n\n def test_split_size(self):\n self._make_acc_op_function_test(\n acc_ops.split,\n torch.split,\n validate_same_kwargs=False,\n split_size_or_sections=2,\n dim=1,\n )\n\n def test_split_sections(self):\n class TestModule(torch.nn.Module):\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n return torch.split(input, [2, 5, 3], 1)\n\n m = TestModule()\n input = torch.randn(1, 10)\n traced = acc_tracer.trace(m, [input])\n\n ph_in = slice_node_0 = slice_node_1 = slice_node_2 = None\n tuple_construct_node = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertTrue(str(node.target) == \"input\")\n ph_in = node\n elif node.op == \"call_function\":\n if node.target == acc_ops.slice_tensor:\n self.assertEqual(node.kwargs[\"input\"], ph_in)\n if slice_node_0:\n if slice_node_1:\n slice_node_2 = node\n else:\n slice_node_1 = node\n else:\n slice_node_0 = node\n else:\n self.assertEqual(node.target, acc_ops.tuple_construct)\n self.assertEqual(\n node.kwargs[\"tensors\"],\n (slice_node_0, slice_node_1, slice_node_2),\n )\n tuple_construct_node = node\n elif node.op == \"output\":\n self.assertEqual(tuple_construct_node, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n ref_output = m(input)\n output = traced(input)\n for i, j in zip(ref_output, output):\n self.assertTrue(torch.equal(i, j))\n\n def test_list_input(self):\n \"\"\"\n Test that list inputs are traced correctly.\n \"\"\"\n\n class TestModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a: List[torch.Tensor]) -> torch.Tensor:\n return a[0] + a[1]\n\n m = TestModule()\n input = [torch.randn(2, 3), torch.randn(2, 3)]\n traced = acc_tracer.trace(m, [input])\n\n ph = getitem_0 = getitem_1 = add = None\n for node in traced.graph.nodes:\n if node.op == \"placeholder\":\n self.assertEqual(str(node.target), \"a\")\n ph = node\n elif node.op == \"call_function\" and node.target == acc_ops.getitem:\n self.assertTrue(node.kwargs[\"idx\"] == 0 or node.kwargs[\"idx\"] == 1)\n if node.kwargs[\"idx\"] == 0:\n getitem_0 = node\n else:\n getitem_1 = node\n elif node.op == \"call_function\":\n self.assertEqual(node.target, acc_ops.add)\n self.assertEqual(node.kwargs[\"input\"], getitem_0)\n self.assertEqual(node.kwargs[\"other\"], getitem_1)\n add = node\n elif node.op == \"output\":\n self.assertEqual(add, node.args[0])\n else:\n self.fail(f\"Unexpected node: {node.format_node()}\")\n\n # Check the tensor metadatas are correct given the input is a list.\n self.assertTrue(isinstance(ph.meta[\"tensor_meta\"], list))\n self.assertEqual(len(ph.meta[\"tensor_meta\"]), 2)\n self.assertEqual(getitem_0.meta[\"tensor_meta\"], ph.meta[\"tensor_meta\"][0])\n self.assertEqual(getitem_1.meta[\"tensor_meta\"], ph.meta[\"tensor_meta\"][1])\n\n self.assertTrue(torch.equal(m(input), traced(input)))\n\n def test_mobilenet_v3(self):\n \"\"\"\n Test that we can trace mobilenet v3 small and run/compare against the untraced version.\n \"\"\"\n m = torchvision.models.mobilenet_v3_small(pretrained=True)\n self._make_model_unit_test(m, enable_allclose=True)\n\n def test_mobilenet_v2(self):\n \"\"\"\n Test that we can trace mobilenet v2 small and run/compare against the untraced version.\n \"\"\"\n m = torchvision.models.mobilenet_v2(pretrained=True)\n self._make_model_unit_test(m)\n\n def test_vgg16(self):\n \"\"\"\n Test that we can trace vgg16 and run/compare against the untraced version.\n \"\"\"\n m = torchvision.models.vgg16(pretrained=True)\n self._make_model_unit_test(m)\n\n def test_resnet18(self):\n \"\"\"\n Test that we can trace resnet18 and run/compare against the untraced version.\n \"\"\"\n m = torchvision.models.resnet18(pretrained=True)\n self._make_model_unit_test(m)\n\n def test_resnext50_32x4d(self):\n \"\"\"\n Test that we can trace resnext and run/compare against the untraced version.\n \"\"\"\n m = torchvision.models.resnext50_32x4d(pretrained=True)\n self._make_model_unit_test(m)\n\n def test_cumsum(self):\n self._make_acc_op_function_test(acc_ops.cumsum, torch.cumsum, dim=1)\n self._make_acc_op_function_test(\n acc_ops.cumsum, torch.cumsum, dim=1, dtype=torch.float\n )\n\n def test_chunk(self):\n self._make_acc_op_function_test(acc_ops.chunk, torch.chunk, chunks=2, dim=0)\n\n def test_all_acc_ops_registered(self):\n self.assertEqual(\n acc_normalizer._acc_ops,\n {\n acc_ops.linear,\n acc_ops.max_pool2d,\n acc_ops.flatten,\n acc_ops.adaptive_avg_pool2d,\n acc_ops.avg_pool2d,\n acc_ops.add,\n acc_ops.min_full_reduce,\n acc_ops.min_dim_reduce,\n acc_ops.minimum,\n acc_ops.cat,\n acc_ops.softmax,\n acc_ops.sign,\n acc_ops.permute,\n acc_ops.matmul,\n acc_ops.quantize_per_tensor,\n acc_ops.quantize_per_channel,\n acc_ops.quantized_add,\n acc_ops.quantized_mul,\n acc_ops.dequantize,\n acc_ops.sub,\n acc_ops.mul,\n acc_ops.div,\n acc_ops.floor_div,\n acc_ops.trunc_div,\n acc_ops.pow,\n acc_ops.relu,\n acc_ops.leaky_relu,\n acc_ops.elu,\n acc_ops.selu,\n acc_ops.softsign,\n acc_ops.tuple_construct,\n acc_ops.unsqueeze,\n acc_ops.sigmoid,\n acc_ops.sum,\n acc_ops.max_full_reduce,\n acc_ops.max_dim_reduce,\n acc_ops.maximum,\n acc_ops.sinh,\n acc_ops.cosh,\n acc_ops.tanh,\n acc_ops.asin,\n acc_ops.acos,\n acc_ops.atan,\n acc_ops.exp,\n acc_ops.log,\n acc_ops.sqrt,\n acc_ops.reciprocal,\n acc_ops.abs,\n acc_ops.neg,\n acc_ops.floor,\n acc_ops.ceil,\n acc_ops.size,\n acc_ops.split,\n acc_ops.conv2d,\n acc_ops.batch_norm,\n acc_ops.embedding_bag,\n acc_ops.embedding_bag_byte_rowwise_offsets,\n acc_ops.embedding_bag_4bit_rowwise_offsets,\n acc_ops.contiguous,\n acc_ops.pad,\n acc_ops.sin,\n acc_ops.cos,\n acc_ops.tan,\n acc_ops.topk,\n acc_ops.getitem,\n acc_ops.squeeze,\n acc_ops.tile,\n acc_ops.reshape,\n acc_ops.quantized_linear,\n acc_ops.quantized_conv2d,\n acc_ops.quantized_batch_norm2d,\n acc_ops.to_dtype,\n acc_ops.clamp,\n acc_ops.layer_norm,\n acc_ops.linalg_norm,\n acc_ops.slice_tensor,\n acc_ops.hardsigmoid,\n acc_ops.mean,\n acc_ops.hardtanh,\n acc_ops.gelu,\n acc_ops.cumsum,\n acc_ops.chunk,\n acc_ops.rescale_quantize_per_tensor,\n acc_ops.rescale_quantize_per_channel,\n },\n )\n", "import torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nfrom torch.testing._internal.common_fx2trt import AccTestCase\nfrom torch.fx.experimental.fx2trt.passes.fuse_pass import (\n fuse_permute_linear,\n trt_transposed_linear,\n)\nfrom torch.testing._internal.common_utils import run_tests\n\n\nclass TestFusePermuteLinear(AccTestCase):\n def test_fuse_permute_linear(self):\n class TestModule(torch.nn.Module):\n def __init__(self, in_features, out_features):\n super().__init__()\n self.linear = torch.nn.Linear(in_features, out_features)\n\n def forward(self, x):\n return self.linear(x.permute(0, 2, 1))\n\n inputs = [torch.randn(6, 10, 20)]\n a = TestModule(10, 30)\n self.run_test(\n TestModule(10, 30),\n inputs,\n {trt_transposed_linear},\n apply_passes=[fuse_permute_linear],\n )\n\n def test_fuse_permute_linear_keep_permute(self):\n \"\"\"\n Fusion while keep permute node since permute has more than one consumers\n \"\"\"\n\n class TestModule(torch.nn.Module):\n def __init__(self, in_features, out_features):\n super().__init__()\n self.linear = torch.nn.Linear(in_features, out_features)\n\n def forward(self, x):\n y = x.permute(0, 2, 1)\n return self.linear(y), y\n\n inputs = [torch.randn(6, 10, 20)]\n a = TestModule(10, 30)\n self.run_test(\n TestModule(10, 30),\n inputs,\n {acc_ops.permute, trt_transposed_linear},\n apply_passes=[fuse_permute_linear],\n )\n\n def test_multi_fuse_permute_linear(self):\n \"\"\"\n Fusion when permute output is shared by multiple linears\n \"\"\"\n\n class TestModule(torch.nn.Module):\n def __init__(self, in_features, out_features):\n super().__init__()\n self.linear1 = torch.nn.Linear(in_features, out_features)\n self.linear2 = torch.nn.Linear(in_features, out_features)\n\n def forward(self, x):\n y = x.permute(0, 2, 1)\n return self.linear1(y) + self.linear2(y)\n\n inputs = [torch.randn(8, 10, 20)]\n a = TestModule(10, 30)\n self.run_test(\n TestModule(10, 30),\n inputs,\n {trt_transposed_linear},\n apply_passes=[fuse_permute_linear],\n )\n\nif __name__ == '__main__':\n run_tests()\n", "# Owner(s): [\"oncall: fx\"]\n\nimport torch\nimport torch.fx\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nimport torch.nn as nn\nfrom torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec\nfrom parameterized import parameterized\nfrom torch.testing._internal.common_utils import run_tests\n\n\nclass TestUnsqueeze(AccTestCase):\n @parameterized.expand(\n [\n (\"negative_dim\", -2),\n (\"positive_dim\", 2),\n ]\n )\n def test_unsqueeze(self, _, dim):\n class Unsqueeze(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n return torch.unsqueeze(x, self.dim)\n\n inputs = [torch.randn(1, 2, 3)]\n self.run_test(Unsqueeze(dim), inputs, expected_ops={acc_ops.unsqueeze})\n\n @parameterized.expand(\n [\n (\"negative_dim_dynamic\", -4),\n (\"positive_dim_dynamic\", 1),\n ]\n )\n def test_unsqueeze_with_dynamic_shape(self, _, dim):\n class Unsqueeze(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n return torch.unsqueeze(x, self.dim)\n\n input_specs = [\n InputTensorSpec(\n shape=(-1, 2, 3),\n dtype=torch.float32,\n shape_ranges=[((1, 2, 3), (2, 2, 3), (3, 2, 3))],\n ),\n ]\n self.run_test_with_dynamic_shape(\n Unsqueeze(dim), input_specs, expected_ops={acc_ops.unsqueeze}\n )\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.testing._internal.common_fx2trt.InputTensorSpec", "torch.randn", "torch.reshape", "torch.testing._internal.common_utils.run_tests" ], [ "torch.transpose", "torch.cat", "torch.nn.functional.dropout", "torch.zeros", "numpy.random.random_sample", "torch.split", "torch.topk", "torch.nn.quantized.Quantize", "torch.addmm", "torch.nn.EmbeddingBag", "numpy.random.randint", "torch.testing.assert_allclose", "torch.add", "torch.randn", "torch.tile", "torch.argmin", "torch.from_numpy", "torch.equal", "torch.tensor", "torch.nn.Sigmoid", "torch.mul", "torch.bmm", "torch.nn.functional.hardswish", "torch.ops.quantized.embedding_bag_4bit_prepack", "torch.nn.intrinsic.quantized.ConvReLU2d", "torch.squeeze", "torch.div", "torch.LongTensor", "torch.nn.functional.hardsigmoid", "torch.nn.Conv2d", "torch.floor_divide", "torch.nn.Linear", "torch.log1p", "torch.nn.quantized.BatchNorm2d", "torch.nn.functional.hardtanh", "torch.nn.BatchNorm2d", "torch.stack", "torch.ops.quantized.embedding_bag_byte_prepack", "numpy.sum", "torch.fx.experimental.fx_acc.acc_utils.build_raw_tensor_meta", "torch.nn.quantized.Conv2d", "torch.fx.experimental.fx_acc.acc_tracer.trace", "torch.manual_seed", "torch.nan_to_num", "torch.matmul", "torch.nn.ReLU", "torch.cumsum", "torch.nn.quantized.Linear" ], [ "torch.nn.Linear", "torch.randn", "torch.testing._internal.common_utils.run_tests" ], [ "torch.testing._internal.common_fx2trt.InputTensorSpec", "torch.randn", "torch.testing._internal.common_utils.run_tests", "torch.unsqueeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joybanerjee08/imgaug
[ "e9d3515b52f2205cee1d3c9a913fcc638d15993b" ]
[ "test/augmenters/test_blur.py" ]
[ "from __future__ import print_function, division, absolute_import\n\nimport time\n\nimport matplotlib\nmatplotlib.use('Agg') # fix execution of tests involving matplotlib on travis\nimport numpy as np\nimport six.moves as sm\nimport cv2\nfrom scipy import ndimage\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\nfrom imgaug import parameters as iap\nfrom imgaug.testutils import keypoints_equal, reseed\nfrom imgaug.augmenters import meta\n\n\ndef main():\n time_start = time.time()\n\n test_GaussianBlur()\n test_AverageBlur()\n test_MedianBlur()\n # TODO BilateralBlur\n\n time_end = time.time()\n print(\"<%s> Finished without errors in %.4fs.\" % (__file__, time_end - time_start,))\n\n\ndef test_GaussianBlur():\n reseed()\n\n base_img = np.array([[0, 0, 0],\n [0, 255, 0],\n [0, 0, 0]], dtype=np.uint8)\n base_img = base_img[:, :, np.newaxis]\n\n images = np.array([base_img])\n images_list = [base_img]\n outer_pixels = ([], [])\n for i in sm.xrange(base_img.shape[0]):\n for j in sm.xrange(base_img.shape[1]):\n if i != j:\n outer_pixels[0].append(i)\n outer_pixels[1].append(j)\n\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)], shape=base_img.shape)]\n\n # no blur, shouldnt change anything\n aug = iaa.GaussianBlur(sigma=0)\n\n observed = aug.augment_images(images)\n expected = images\n assert np.array_equal(observed, expected)\n\n # weak blur of center pixel\n aug = iaa.GaussianBlur(sigma=0.5)\n aug_det = aug.to_deterministic()\n\n # images as numpy array\n observed = aug.augment_images(images)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n observed = aug_det.augment_images(images)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n # images as list\n observed = aug.augment_images(images_list)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n observed = aug_det.augment_images(images_list)\n assert 100 < observed[0][1, 1] < 255\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()\n\n # keypoints shouldnt be changed\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n # varying blur sigmas\n aug = iaa.GaussianBlur(sigma=(0, 1))\n aug_det = aug.to_deterministic()\n\n last_aug = None\n last_aug_det = None\n nb_changed_aug = 0\n nb_changed_aug_det = 0\n nb_iterations = 1000\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n assert nb_changed_aug >= int(nb_iterations * 0.8)\n assert nb_changed_aug_det == 0\n\n #############################\n # test other dtypes below\n # ndimage.gaussian_filter() rejects: float16\n # float64 implementation in gaussian_filter() was too inaccurate\n #############################\n\n # --\n # blur of various dtypes at sigma=0\n # --\n aug = iaa.GaussianBlur(sigma=0)\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == image)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n _min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = int(center_value)\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.all(image_aug == image)\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n _min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = center_value\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.allclose(image_aug, image)\n\n # --\n # blur of various dtypes at sigma=1.0\n # and using an example value of 100 for int/uint/float and True for bool\n # --\n aug = iaa.GaussianBlur(sigma=1.0)\n\n # prototype kernel, generated via:\n # mask = np.zeros((3, 3), dtype=np.float64)\n # mask[1, 1] = 1.0\n # mask = ndimage.gaussian_filter(mask, 1.0)\n kernel = np.float64([\n [0.08767308, 0.12075024, 0.08767308],\n [0.12075024, 0.16630671, 0.12075024],\n [0.08767308, 0.12075024, 0.08767308]\n ])\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image_aug = aug.augment_image(image)\n expected = kernel > 0.5\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == expected)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100\n image_aug = aug.augment_image(image)\n expected = (kernel * 100).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) <= 2\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100.0\n image_aug = aug.augment_image(image)\n expected = (kernel * 100.0).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) < 1.0\n\n # --\n # blur of various dtypes at sigma=0.4\n # and using an example value of 100 for int/uint/float and True for bool\n # --\n aug = iaa.GaussianBlur(sigma=0.4)\n\n # prototype kernel, generated via:\n # mask = np.zeros((3, 3), dtype=np.float64)\n # mask[1, 1] = 1.0\n # kernel = ndimage.gaussian_filter(mask, 0.4)\n kernel = np.float64([\n [0.00163144, 0.03712817, 0.00163144],\n [0.03712817, 0.84496158, 0.03712817],\n [0.00163144, 0.03712817, 0.00163144]\n ])\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image_aug = aug.augment_image(image)\n expected = kernel > 0.5\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == expected)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100\n image_aug = aug.augment_image(image)\n expected = (kernel * 100).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) <= 2\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100.0\n image_aug = aug.augment_image(image)\n expected = (kernel * 100.0).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) < 1.0\n\n # --\n # blur of various dtypes at sigma=0.75\n # and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any\n # sense here)\n # The goal of this test is to verify that no major loss of resolution happens for large dtypes.\n # Such inaccuracies appear for float64 if used.\n # --\n aug = iaa.GaussianBlur(sigma=0.75)\n\n # prototype kernel, generated via:\n # mask = np.zeros((3, 3), dtype=np.float64)\n # mask[1, 1] = 1.0\n # kernel = ndimage.gaussian_filter(mask, 0.75)\n kernel = np.float64([\n [0.05469418, 0.12447951, 0.05469418],\n [0.12447951, 0.28330525, 0.12447951],\n [0.05469418, 0.12447951, 0.05469418]\n ])\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:\n _min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)\n value = int(center_value + 0.4 * max_value)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image_aug = aug.augment_image(image)\n expected = (kernel * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n # accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)\n assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)\n\n # float\n for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image_aug = aug.augment_image(image)\n expected = (kernel * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)\n assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)\n\n # assert failure on invalid dtypes\n aug = iaa.GaussianBlur(sigma=1.0)\n for dt in [np.uint64, np.int64, np.float128]:\n got_exception = False\n try:\n _ = aug.augment_image(np.zeros((1, 1), dtype=dt))\n except Exception as exc:\n assert \"forbidden dtype\" in str(exc)\n got_exception = True\n assert got_exception\n\n\ndef test_AverageBlur():\n reseed()\n\n base_img = np.zeros((11, 11, 1), dtype=np.uint8)\n base_img[5, 5, 0] = 200\n base_img[4, 5, 0] = 100\n base_img[6, 5, 0] = 100\n base_img[5, 4, 0] = 100\n base_img[5, 6, 0] = 100\n\n blur3x3 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],\n [0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],\n [0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],\n [0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],\n [0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]\n\n blur4x4 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],\n [0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],\n [0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],\n [0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],\n [0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],\n [0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]\n\n blur5x5 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],\n [0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],\n [0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],\n [0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],\n [0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],\n [0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],\n [0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]\n blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]\n\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)], shape=base_img.shape)]\n\n # no blur, shouldnt change anything\n aug = iaa.AverageBlur(k=0)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, base_img)\n\n # k=3\n aug = iaa.AverageBlur(k=3)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur3x3)\n\n # k=5\n aug = iaa.AverageBlur(k=5)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur5x5)\n\n # k as (3, 4)\n aug = iaa.AverageBlur(k=(3, 4))\n nb_iterations = 100\n nb_seen = [0, 0]\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n nb_seen[0] += 1\n elif np.array_equal(observed, blur4x4):\n nb_seen[1] += 1\n else:\n raise Exception(\"Unexpected result in AverageBlur@1\")\n p_seen = [v/nb_iterations for v in nb_seen]\n assert 0.4 <= p_seen[0] <= 0.6\n assert 0.4 <= p_seen[1] <= 0.6\n\n # k as (3, 5)\n aug = iaa.AverageBlur(k=(3, 5))\n nb_iterations = 100\n nb_seen = [0, 0, 0]\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n nb_seen[0] += 1\n elif np.array_equal(observed, blur4x4):\n nb_seen[1] += 1\n elif np.array_equal(observed, blur5x5):\n nb_seen[2] += 1\n else:\n raise Exception(\"Unexpected result in AverageBlur@2\")\n p_seen = [v/nb_iterations for v in nb_seen]\n assert 0.23 <= p_seen[0] <= 0.43\n assert 0.23 <= p_seen[1] <= 0.43\n assert 0.23 <= p_seen[2] <= 0.43\n\n # k as stochastic parameter\n aug = iaa.AverageBlur(k=iap.Choice([3, 5]))\n nb_iterations = 100\n nb_seen = [0, 0]\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n nb_seen[0] += 1\n elif np.array_equal(observed, blur5x5):\n nb_seen[1] += 1\n else:\n raise Exception(\"Unexpected result in AverageBlur@3\")\n p_seen = [v/nb_iterations for v in nb_seen]\n assert 0.4 <= p_seen[0] <= 0.6\n assert 0.4 <= p_seen[1] <= 0.6\n\n # k as ((3, 5), (3, 5))\n aug = iaa.AverageBlur(k=((3, 5), (3, 5)))\n\n possible = dict()\n for kh in [3, 4, 5]:\n for kw in [3, 4, 5]:\n key = (kh, kw)\n if kh == 0 or kw == 0:\n possible[key] = np.copy(base_img)\n else:\n possible[key] = cv2.blur(base_img, (kh, kw))[..., np.newaxis]\n\n nb_iterations = 250\n nb_seen = dict([(key, 0) for key, val in possible.items()])\n for i in sm.xrange(nb_iterations):\n observed = aug.augment_image(base_img)\n for key, img_aug in possible.items():\n if np.array_equal(observed, img_aug):\n nb_seen[key] += 1\n # dont check sum here, because 0xX and Xx0 are all the same, i.e. much\n # higher sum than nb_iterations\n assert all([v > 0 for v in nb_seen.values()])\n\n # keypoints shouldnt be changed\n aug = iaa.AverageBlur(k=3)\n aug_det = aug.to_deterministic()\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n #############################\n # test other dtypes below\n #############################\n\n # --\n # blur of various dtypes at k=0\n # --\n aug = iaa.AverageBlur(k=0)\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image[2, 2] = True\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == image)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.int8, np.int16]:\n _min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = int(center_value + 0.4 * max_value)\n image[2, 2] = int(center_value + 0.4 * max_value)\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.all(image_aug == image)\n\n # float\n for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image[2, 2] = value\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.type == dtype\n assert np.allclose(image_aug, image)\n\n # --\n # blur of various dtypes at k=3\n # and using an example value of 100 for int/uint/float and True for bool\n # --\n aug = iaa.AverageBlur(k=3)\n\n # prototype mask\n # we place values in a 3x3 grid at positions (row=1, col=1) and (row=2, col=2) (beginning with 0)\n # AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its default padding mode,\n # see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html\n # the matrix below shows the 3x3 grid and the padded row/col values around it\n # [1, 0, 1, 0, 1]\n # [0, 0, 0, 0, 0]\n # [1, 0, 1, 0, 1]\n # [0, 0, 0, 1, 0]\n # [1, 0, 1, 0, 1]\n mask = np.float64([\n [4/9, 2/9, 4/9],\n [2/9, 2/9, 3/9],\n [4/9, 3/9, 5/9]\n ])\n\n # bool\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n image[2, 2] = True\n image_aug = aug.augment_image(image)\n expected = mask > 0.5\n assert image_aug.dtype.type == np.bool_\n assert np.all(image_aug == expected)\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.int8, np.int16]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100\n image[2, 2] = 100\n image_aug = aug.augment_image(image)\n expected = np.round(mask * 100).astype(dtype) # cv2.blur() applies rounding for int/uint dtypes\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) <= 2\n\n # float\n for dtype in [np.float16, np.float32, np.float64]:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = 100.0\n image[2, 2] = 100.0\n image_aug = aug.augment_image(image)\n expected = (mask * 100.0).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n assert np.max(diff) < 1.0\n\n # --\n # blur of various dtypes at k=3\n # and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any\n # sense here)\n # The goal of this test is to verify that no major loss of resolution happens for large dtypes.\n # --\n aug = iaa.AverageBlur(k=3)\n\n # prototype mask (see above)\n mask = np.float64([\n [4/9, 2/9, 4/9],\n [2/9, 2/9, 3/9],\n [4/9, 3/9, 5/9]\n ])\n\n # uint, int\n for dtype in [np.uint8, np.uint16, np.int8, np.int16]:\n _min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)\n value = int(center_value + 0.4 * max_value)\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image[2, 2] = value\n image_aug = aug.augment_image(image)\n expected = (mask * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))\n assert image_aug.dtype.type == dtype\n # accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)\n assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)\n\n # float\n for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n image[2, 2] = value\n image_aug = aug.augment_image(image)\n expected = (mask * value).astype(dtype)\n diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))\n assert image_aug.dtype.type == dtype\n # accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)\n assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)\n\n # assert failure on invalid dtypes\n aug = iaa.AverageBlur(k=3)\n for dt in [np.uint32, np.uint64, np.int32, np.int64]:\n got_exception = False\n try:\n _ = aug.augment_image(np.zeros((1, 1), dtype=dt))\n except Exception as exc:\n assert \"forbidden dtype\" in str(exc)\n got_exception = True\n assert got_exception\n\n\ndef test_MedianBlur():\n reseed()\n\n base_img = np.zeros((11, 11, 1), dtype=np.uint8)\n base_img[3:8, 3:8, 0] = 1\n base_img[4:7, 4:7, 0] = 2\n base_img[5:6, 5:6, 0] = 3\n\n blur3x3 = np.zeros_like(base_img)\n blur3x3[3:8, 3:8, 0] = 1\n blur3x3[4:7, 4:7, 0] = 2\n blur3x3[4, 4, 0] = 1\n blur3x3[4, 6, 0] = 1\n blur3x3[6, 4, 0] = 1\n blur3x3[6, 6, 0] = 1\n blur3x3[3, 3, 0] = 0\n blur3x3[3, 7, 0] = 0\n blur3x3[7, 3, 0] = 0\n blur3x3[7, 7, 0] = 0\n\n blur5x5 = np.copy(blur3x3)\n blur5x5[4, 3, 0] = 0\n blur5x5[3, 4, 0] = 0\n blur5x5[6, 3, 0] = 0\n blur5x5[7, 4, 0] = 0\n blur5x5[4, 7, 0] = 0\n blur5x5[3, 6, 0] = 0\n blur5x5[6, 7, 0] = 0\n blur5x5[7, 6, 0] = 0\n blur5x5[blur5x5 > 1] = 1\n\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)], shape=base_img.shape)]\n\n # no blur, shouldnt change anything\n aug = iaa.MedianBlur(k=1)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, base_img)\n\n # k=3\n aug = iaa.MedianBlur(k=3)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur3x3)\n\n # k=5\n aug = iaa.MedianBlur(k=5)\n observed = aug.augment_image(base_img)\n assert np.array_equal(observed, blur5x5)\n\n # k as (3, 5)\n aug = iaa.MedianBlur(k=(3, 5))\n seen = [False, False]\n for i in sm.xrange(100):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n seen[0] = True\n elif np.array_equal(observed, blur5x5):\n seen[1] = True\n else:\n raise Exception(\"Unexpected result in MedianBlur@1\")\n if all(seen):\n break\n assert all(seen)\n\n # k as stochastic parameter\n aug = iaa.MedianBlur(k=iap.Choice([3, 5]))\n seen = [False, False]\n for i in sm.xrange(100):\n observed = aug.augment_image(base_img)\n if np.array_equal(observed, blur3x3):\n seen[0] += True\n elif np.array_equal(observed, blur5x5):\n seen[1] += True\n else:\n raise Exception(\"Unexpected result in MedianBlur@2\")\n if all(seen):\n break\n assert all(seen)\n\n # keypoints shouldnt be changed\n aug = iaa.MedianBlur(k=3)\n aug_det = aug.to_deterministic()\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n\ndef test_MotionBlur():\n reseed()\n\n # simple scenario\n aug = iaa.MotionBlur(k=3, angle=0, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 1.0/3, 0],\n [0, 1.0/3, 0],\n [0, 1.0/3, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # 90deg angle\n aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0, 0],\n [1.0/3, 1.0/3, 1.0/3],\n [0, 0, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # 45deg angle\n aug = iaa.MotionBlur(k=3, angle=45, direction=0.0, order=0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0, 1.0/3],\n [0, 1.0/3, 0],\n [1.0/3, 0, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # random angle\n aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]\n expected1 = np.float32([\n [0, 1.0/3, 0],\n [0, 1.0/3, 0],\n [0, 1.0/3, 0]\n ])\n expected2 = np.float32([\n [0, 0, 0],\n [1.0/3, 1.0/3, 1.0/3],\n [0, 0, 0],\n ])\n nb_seen = [0, 0]\n for matrices_image in matrices:\n assert np.allclose(matrices_image[0], matrices_image[1])\n assert np.allclose(matrices_image[1], matrices_image[2])\n for matrix_channel in matrices_image:\n if np.allclose(matrix_channel, expected1):\n nb_seen[0] += 1\n elif np.allclose(matrix_channel, expected2):\n nb_seen[1] += 1\n assert nb_seen[0] > 0\n assert nb_seen[1] > 0\n\n # 5x5\n aug = iaa.MotionBlur(k=5, angle=90, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected)\n\n # random k\n aug = iaa.MotionBlur(k=[3, 5], angle=90, direction=0.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]\n expected1 = np.float32([\n [0, 0, 0],\n [1.0/3, 1.0/3, 1.0/3],\n [0, 0, 0],\n ])\n expected2 = np.float32([\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n ])\n nb_seen = [0, 0]\n for matrices_image in matrices:\n assert np.allclose(matrices_image[0], matrices_image[1])\n assert np.allclose(matrices_image[1], matrices_image[2])\n for matrix_channel in matrices_image:\n if matrix_channel.shape == expected1.shape and np.allclose(matrix_channel, expected1):\n nb_seen[0] += 1\n elif matrix_channel.shape == expected2.shape and np.allclose(matrix_channel, expected2):\n nb_seen[1] += 1\n assert nb_seen[0] > 0\n assert nb_seen[1] > 0\n\n # k with choice [a, b, c, ...] must error in case of non-discrete values\n got_exception = False\n try:\n _ = iaa.MotionBlur(k=[3, 3.5, 4])\n except Exception as exc:\n assert \"to only contain integer\" in str(exc)\n got_exception = True\n assert got_exception\n\n # no error in case of (a, b), checks for #215\n aug = iaa.MotionBlur(k=(3, 7))\n for _ in range(10):\n _ = aug.augment_image(np.zeros((11, 11, 3), dtype=np.uint8))\n\n # direction 1.0\n aug = iaa.MotionBlur(k=3, angle=0, direction=1.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 1.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 0.0/1.5, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)\n\n # direction -1.0\n aug = iaa.MotionBlur(k=3, angle=0, direction=-1.0)\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]\n expected = np.float32([\n [0, 0.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 1.0/1.5, 0]\n ])\n for matrices_image in matrices:\n for matrix_channel in matrices_image:\n assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)\n\n # random direction\n aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=[-1.0, 1.0])\n matrix_func = aug.matrix\n matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]\n expected1 = np.float32([\n [0, 1.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 0.0/1.5, 0]\n ])\n expected2 = np.float32([\n [0, 0.0/1.5, 0],\n [0, 0.5/1.5, 0],\n [0, 1.0/1.5, 0]\n ])\n nb_seen = [0, 0]\n for matrices_image in matrices:\n assert np.allclose(matrices_image[0], matrices_image[1])\n assert np.allclose(matrices_image[1], matrices_image[2])\n for matrix_channel in matrices_image:\n if np.allclose(matrix_channel, expected1, rtol=0, atol=1e-2):\n nb_seen[0] += 1\n elif np.allclose(matrix_channel, expected2, rtol=0, atol=1e-2):\n nb_seen[1] += 1\n assert nb_seen[0] > 0\n assert nb_seen[1] > 0\n\n # test of actual augmenter\n img = np.zeros((7, 7, 3), dtype=np.uint8)\n img[3-1:3+2, 3-1:3+2, :] = 255\n aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)\n img_aug = aug.augment_image(img)\n v1 = (255*(1/3))\n v2 = (255*(1/3)) * 2\n v3 = (255*(1/3)) * 3\n expected = np.float32([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, v1, v2, v3, v2, v1, 0],\n [0, v1, v2, v3, v2, v1, 0],\n [0, v1, v2, v3, v2, v1, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]\n ]).astype(np.uint8)\n expected = np.tile(expected[..., np.newaxis], (1, 1, 3))\n assert np.allclose(img_aug, expected)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.allclose", "numpy.array_equal", "matplotlib.use", "numpy.tile", "numpy.dtype", "numpy.all", "numpy.max", "numpy.copy", "numpy.round", "numpy.zeros_like", "numpy.float64", "numpy.float32", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mdodici/trojan-WD-pollution
[ "ec79a96f0d9517a53df4c82ca1be0d5d38f3346b", "ec79a96f0d9517a53df4c82ca1be0d5d38f3346b" ]
[ "3-Trojan_Results/Scripts/1kB_Evals.py", "3-Trojan_Results/Scripts/10kB_Evals.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ntarget = '1kB'\nradeg = np.pi/180\n\ndef cart_to_pol(x,y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y,x)\n return r, phi\n\ndef pol_to_cart(r,phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y\n\ndef L45(msun,mjup):\n u2 = mjup/(msun+mjup)\n \n x_L4 = 0.5 - u2\n x_L5 = x_L4\n \n y_L4 = np.sqrt(3)/2\n y_L5 = -y_L4\n \n return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])\n\ndef L45_nonnorm(xjup,yjup,xsun,ysun):\n phi_jup = np.arctan2(yjup,xjup)\n \n phi_L4 = phi_jup + np.pi/3\n phi_L5 = phi_jup - np.pi/3\n \n xsep = (xsun - xjup)\n ysep = (ysun - yjup)\n \n r_jupsol = np.sqrt(xsep**2 + ysep**2)\n \n x_L4 = r_jupsol*np.cos(phi_L4)\n x_L5 = r_jupsol*np.cos(phi_L5)\n y_L4 = r_jupsol*np.sin(phi_L4)\n y_L5 = r_jupsol*np.sin(phi_L5)\n \n return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])\n\ndef hill(a,e,m,M):\n return a*(1-e)*np.power(m/(3*M),1/3)\n\n\ndef r_pol(r,psi,M1,M2,a):\n q = M2/M1\n z = np.zeros((len(psi),len(r)))\n for i, phi in enumerate(psi):\n x_ = r*np.cos(phi)\n y_ = r*np.sin(phi)\n x = x_/a\n y = y_/a\n s1 = np.sqrt(x**2 + y**2)\n s2 = np.sqrt((x-1)**2 + y**2)\n \n term1 = 2/(s1*(1+q))\n term2 = 2*q/(s2*(1+q))\n term3 = (x - q/(1+q))**2\n term4 = y**2\n z[i] = term1 + term2 + term3 + term4\n return z\n\nast_d = np.load('{0}_Trojandata.npy'.format(target))\nnum_asts = len(ast_d[0,:,0])\nprint(ast_d.shape)\n\njup_d = np.load('{0}_Planetdata.npy'.format(target))\nsol_d = np.load('{0}_Stardata.npy'.format(target))\ntimes = np.load('{0}_Timesteps.npy'.format(target))\n\nast_a = ast_d[0]; ast_e = ast_d[1]; ast_i = ast_d[2] \nast_o = ast_d[3]; ast_p = ast_d[4]; ast_l = ast_d[5]\nast_x = ast_d[6]; ast_y = ast_d[7]; ast_z = ast_d[8]\nast_meda = np.median(ast_a,axis=0)\n\njup_a = jup_d[0]; jup_e = jup_d[1]; jup_i = jup_d[2]; jup_p = jup_d[3]\njup_l = jup_d[4]; jup_x = jup_d[5]; jup_y = jup_d[6]; jup_z = jup_d[7]\nsol_m = sol_d[0]; sol_l = sol_d[1]; sol_x = sol_d[2]; sol_y = sol_d[3]; sol_z = sol_d[4]\njhill = hill(jup_a,jup_e,9.546e-4,sol_m)\ndst_jall = np.sqrt((ast_x - jup_x)**2 + (ast_y - jup_y)**2)\n\nL45x, L45y = L45_nonnorm(jup_x,jup_y,sol_x,sol_y)\nL4_xs = L45x[0]; L4_ys = L45y[0]\nL5_xs = L45x[1]; L5_ys = L45y[1]\n\ni_dif = np.zeros_like(ast_i)\ni_int = ast_i[:,0]\nfor i in range(len(ast_a[0,:])):\n i_dif[:,i] = ast_i[:,i] - i_int\n \nphi_vals = np.linspace(-np.pi,np.pi,500)\nZ = r_pol(jup_a,phi_vals,sol_m,9.546e-4,jup_a)\nPot = np.flip(Z,1)\n\nast_r, ast_h = cart_to_pol(ast_x,ast_y)\njup_r, jup_h = cart_to_pol(jup_x,jup_y)\nphdif = np.zeros_like(ast_h)\nfor i in range(len(jup_h)):\n phdif[:,i] = ast_h[:,i] - jup_h[i]\n \nid4 = []\nid5 = []\nfor i in range(num_asts):\n for it in range(len(jup_h)):\n if phdif[i,it] < -np.pi:\n phdif[i,it] = phdif[i,it] + 2*np.pi\n if phdif[i,it] > np.pi:\n phdif[i,it] = phdif[i,it] - 2*np.pi\n if phdif[i,0] > 0:\n id4.append(i)\n if phdif[i,0] < 0:\n id5.append(i)\n \nprint('Percentage at L4: %2.1f' %(len(id4)*100/num_asts))\n\nliba = np.zeros((num_asts,200))\nlibp = np.zeros((num_asts,200))\nfor i in range(num_asts):\n for n in range(200):\n high = int(500*(n+1))\n loww = int(500*n)\n pmax = np.amax(phdif[i,loww:high])\n pmin = np.amin(phdif[i,loww:high])\n amax = np.amax(ast_a[i,loww:high])\n amin = np.amin(ast_a[i,loww:high])\n amid = np.median(jup_a[loww:high])\n \n if pmax > 0:\n mid = np.pi/3\n if pmax < 0:\n mid = -np.pi/3\n \n lip = ((pmax - mid) + (pmin - mid)) / 2\n lia = ((amax - amid)+(amin - amid)) / 2\n libp[i,n] = abs(lip)\n liba[i,n] = abs(lia)\n \nindices = []\nhillers = []\nfor i in range(num_asts):\n it = 0\n while it < len(ast_meda):\n a_focus = ast_a[i,it]\n a_media = ast_meda[it]\n if a_focus > a_media + 2:\n indices.append(i)\n break\n elif a_focus < a_media - 2:\n indices.append(i)\n break\n else:\n it += 1\n it = 0\n while it < len(jhill):\n d = dst_jall[i,it]\n h = jhill[it]\n if d <= h + 0.1:\n hillers.append(i)\n break\n else:\n it += 1\n\nidx = np.array(indices)\nhdx = np.array(hillers)\n\nhill_not_sma = np.array(list(set(hillers) - set(indices)))\nndx = np.array(list(set(range(num_asts)) - set(indices)))\n\nprint(\"Number of escapers: \", len(indices))\nprint(\"Number of hill crossers: \", len(hillers))\npct = len(indices)/num_asts\nprint('Pct escaped / Total Asts: %0.2f' %pct)\n\nnrm_a = ast_a[ndx]; nrm_e = ast_e[ndx]; nrm_i = ast_i[ndx]; ndifi = i_dif[ndx]; nrmla = liba[ndx]\nnrm_p = ast_p[ndx]; nrm_l = ast_l[ndx]; nrm_x = ast_x[ndx]; nrm_y = ast_y[ndx]; nrmlp = libp[ndx]\n\n\nodd_a = ast_a[idx]; odd_e = ast_e[idx]; odd_i = ast_i[idx]; odifi = i_dif[idx]; oddla = liba[idx]\nodd_p = ast_p[idx]; odd_l = ast_l[idx]; odd_x = ast_x[idx]; odd_y = ast_y[idx]; oddlp = libp[idx]\n\nnrm_r, nrmph = cart_to_pol(nrm_x,nrm_y); odd_r, oddph = cart_to_pol(odd_x,odd_y)\njup_r, jupph = cart_to_pol(jup_x,jup_y); sol_r, solph = cart_to_pol(sol_x,sol_y)\nL4_rs, L4phs = cart_to_pol(L4_xs,L4_ys); L5_rs, L5phs = cart_to_pol(L5_xs,L5_ys)\n\ndistj = np.sqrt((odd_x - jup_x)**2 + (odd_y - jup_y)**2)\ndisth = np.sqrt((ast_x[hdx] - jup_x)**2 + (ast_y[hdx] - jup_y)**2)\ndists = np.sqrt((odd_x - sol_x)**2 + (odd_y - sol_y)**2)\njdist = np.sqrt((jup_x - sol_x)**2 + (jup_y - sol_y)**2)\n\nearlies = []\nlaties = []\nhill_cross = np.zeros(len(hdx))\n\nfor i in range(len(odd_a)):\n it = 0\n while it < 100000:\n a_focus = odd_a[i,it]\n a_media = ast_meda[it]\n if a_focus > a_media + 2:\n if it < 33333:\n earlies.append(i)\n break\n elif it > 70000:\n laties.append(i)\n break\n else:\n break\n elif a_focus < a_media - 2:\n if it < 33333:\n earlies.append(i)\n break\n elif it > 70000:\n laties.append(i)\n break\n else:\n break\n else:\n it += 1\n \nfor i in range(len(hdx)):\n it = 0\n while it < 100000:\n d = disth[i,it]\n h = jhill[it]\n if d <= h:\n hill_cross[i] = it\n break\n else:\n it += 1\n \nhorses = []\nfor number,n in enumerate(idx):\n i = 0\n while i < 5000:\n val = phdif[n,i]\n if 170*radeg <= val:\n horses.append(n)\n break\n elif val <= -170*radeg:\n horses.append(n)\n break\n elif -5*radeg <= val <= 5*radeg:\n horses.append(n)\n break\n i += 1\n \nhrs = np.array(horses)\ntrs = np.array( list( set(idx) - set(horses) ) )\n \nedx = np.array(earlies)\nldx = np.array(laties)\n\nprint(\"Number of early escapees: \", len(earlies), \" (escaped before .67 Myr)\")\nprint(\"Number of late escapees: \", len(laties), \" (escaped after %1.2f Myr)\" %(times[70000]/1e6))\npct_e = len(earlies)/len(indices)\npct_l = len(laties)/len(indices)\nprint('Number early / Total escapees: %0.2f' %pct_e)\nprint('Number late / Total escapees: %0.2f' %pct_l)\npcT_e = len(earlies)/num_asts\npcT_l = len(laties)/num_asts\nprint('Number early / Total Asts.: %0.2f' %pcT_e)\nprint('Number late / Total Asts.: %0.2f' %pcT_l)\n\n\nx_axis = np.linspace(0,times[33333]/1e6)\nx_axi2 = np.linspace(times[70000]/1e6,times[-1]/1e6)\n\nfig, ax = plt.subplots(3,figsize=(14,13),sharex=True,gridspec_kw={'height_ratios': [3, 1, .75]})\nplt.subplots_adjust(hspace=0)\n\nax[0].plot(times/1e6,ast_meda,'k',lw=3)\nax[0].vlines([times[33333]/1e6,times[70000]/1e6],5,9.5,'b',alpha=0.8,zorder=0)\n\nax[0].fill_between(x_axis,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[0].fill_between(x_axi2,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[0].plot(times/1e6,jup_a,'gold',lw=3)\nax[0].legend(['Median Ast.','Planet'],fontsize=16,frameon=False,loc='upper left')\nax[0].set_ylabel('Semimajor Axis / AU',fontsize=16)\nax[0].set_ylim(5,9.5)\nax[0].set_xlim(0,2)\nax[0].text(0.18,7.25,\"%1.i escaped\" %len(earlies),fontsize=25)\nax[0].text(0.8,7.25,\"%2.i escaped\" %(len(indices) - len(earlies) - len(laties)),fontsize=25)\nax[0].text(1.48,7.25,\"%2.i escaped\" %len(laties),fontsize=25)\n\nax[1].plot(times/1e6,sol_l,'orange',lw=3,zorder=10)\nax[1].plot(times/1e6,sol_m,'g',ls=':',lw=3,zorder=10)\nax[1].vlines([times[33333]/1e6,times[70000]/1e6],0,4,'b',alpha=0.8,zorder=0)\nax[1].legend([\"log Stellar Luminosity\", \"Stellar Mass\"],fontsize=16,loc='center left',frameon=False)\nax[1].set_ylabel(\"Solar Units\",fontsize=16)\nax[1].set_ylim(0,4)\nax[1].fill_between(x_axis,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[1].fill_between(x_axi2,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[1].set_xlabel('Time / Myr',fontsize=16)\nax[1].set_yticks([0,1,2,3])\n\nax[2].hist(hill_cross*20/1e6,edgecolor='k',facecolor='k',alpha=0.5,range=[0,2],bins=20)\nax[2].set_ylabel(\"Escapes\",fontsize=16)\nax[2].set_xlabel(\"Time / Myr\",fontsize=16)\nax[2].set_ylim(0,35)\nax[2].set_yticks([0,10,20,30])\nfig.savefig('{0}_Timeseries.pdf'.format(target),dpi=300)\n\n############\n\nhist, axh = plt.subplots(1,4,figsize=(20,5))\n\naxh[0].hist(nrm_a[:,0],edgecolor='k',histtype='step',range=[4.95,5.45])\naxh[0].hist(odd_a[:,0],facecolor='r',alpha=0.7,range=[4.95,5.45])\naxh[0].set_xlabel(\"SMA (AU)\",fontsize=16)\naxh[0].set_xlim(4.95,5.45)\n\naxh[1].hist(nrm_e[:,0],edgecolor='k',histtype='step',range=[0,.25])\naxh[1].hist(odd_e[:,0],facecolor='r',alpha=0.7,range=[0,.25])\naxh[1].set_xlabel(\"Eccentricity\",fontsize=16)\naxh[1].set_xlim(0,0.25)\n\naxh[2].hist(abs(nrmla[:,0]),edgecolor='k',histtype='step',range=[0,0.02],bins=20)\naxh[2].hist(abs(liba[trs,0]),facecolor='r',alpha=0.7,range=[0,0.02],bins=20)\naxh[2].set_xlabel(\"SMA Libration Amp. (AU)\",fontsize=16)\naxh[2].set_xlim(0,.02)\naxh[2].set_xticks([0,0.005,0.01,0.015,0.02])\n\nradeg = np.pi/180\naxh[3].hist(abs(nrmlp[:,0])/radeg,edgecolor='k',histtype='step',range=[0,35])\naxh[3].hist(abs(libp[trs,0])/radeg,facecolor='r',alpha=0.7,range=[0,35])\naxh[3].set_xlabel(r\"$\\lambda$ Libration Amplitude (Deg.)\",fontsize=16)\naxh[3].set_xlim(0,35)\naxh[3].legend(labels=['Stable','Escaped'],fontsize=14,frameon=False,loc='upper right')\n\nhist.suptitle('Initial conditions',fontsize=18)\nhist.savefig('{0}_Histograms.pdf'.format(target),dpi=300)\n\n#############\n\norf, ora = plt.subplots(1,2,figsize=(15,5),gridspec_kw={'width_ratios': [2, 1]})\nfor i in range(len(ndx)):\n ora[0].plot(phdif[ndx[i],:500],ast_a[ndx[i],:500]/5.2,'k',alpha=0.01,zorder=5)\nfor i,tr in enumerate(trs):\n ora[0].plot(phdif[tr,:500],ast_a[tr,:500]/5.2,'r',alpha=0.05,zorder=10)\nora[0].set_xlim(-np.pi,np.pi)\nora[0].set_ylim(.9,1.1)\nora[0].set_xlabel(r\"$\\phi - \\phi_{jup}$\",fontsize=16)\nora[0].set_ylabel(r\"SMA / $a_{jup}$\",fontsize=16)\nora[0].vlines([-np.pi/3,np.pi/3],0.9,1.1,ls='--',zorder=0)\nora[0].set_xticks([-np.pi,-np.pi/2,-np.pi/3,0,np.pi/3,np.pi/2,np.pi])\nora[0].set_xticklabels([r\"-$\\pi$\",r\"-$\\pi$/2\",r\"$L_5$\",'0',r\"$L_4$\",r\"$\\pi$/2\",r\"$\\pi$\"])\n\nsns.kdeplot(abs(nrmlp[:,0])/radeg,nrmla[:,0],shade=True,shade_lowest=None,cmap='Greys',levels=5,alpha=0.5)\nsns.kdeplot(abs(libp[trs,0])/radeg,liba[trs,0],shade=True,shade_lowest=None,cmap='Reds',levels=5,alpha=0.5)\nora[1].set_ylabel(\"Init. SMA Libration (AU)\",fontsize=16)\nora[1].set_xlabel(r\"Init. $\\lambda$ Libration (Deg.)\",fontsize=16)\nora[1].set_xlim(0,35)\norf.tight_layout()\norf.savefig('{0}_Orbits.pdf'.format(target),dpi=300)\n\n#############\n\nnorm = mpl.colors.Normalize(vmin = np.min(.005), vmax = np.max(.015), clip = False)\n\ntim, tax = plt.subplots(figsize=(7,6))\nscatter = tax.scatter(abs(libp[hdx,0])/radeg,hill_cross*20/1e6,c=abs(liba[hdx,0]),cmap='Reds',norm=norm)\ntax.set_xlim(0,35)\ntax.set_xlabel(r\"Initial $\\lambda$ Libration (Deg.)\",fontsize=16)\ntax.set_ylabel('Time of Encounter (Myr)',fontsize=16)\ntim.colorbar(scatter, label='Initial SMA Libration (AU)')\ntax.set_ylim(0,2)\ntim.savefig('{0}_Eject_Perts.pdf'.format(target),dpi=300)\n\n######################\n\nhill_data = np.array((hdx,hill_cross))\nnp.save('{0}_Ejects.npy'.format(target), idx)\nnp.save('{0}_Hillcr.npy'.format(target), hill_data)", "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ntarget = '10kB'\nradeg = np.pi/180\n\ndef cart_to_pol(x,y):\n r = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y,x)\n return r, phi\n\ndef pol_to_cart(r,phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y\n\ndef L45(msun,mjup):\n u2 = mjup/(msun+mjup)\n \n x_L4 = 0.5 - u2\n x_L5 = x_L4\n \n y_L4 = np.sqrt(3)/2\n y_L5 = -y_L4\n \n return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])\n\ndef L45_nonnorm(xjup,yjup,xsun,ysun):\n phi_jup = np.arctan2(yjup,xjup)\n \n phi_L4 = phi_jup + np.pi/3\n phi_L5 = phi_jup - np.pi/3\n \n xsep = (xsun - xjup)\n ysep = (ysun - yjup)\n \n r_jupsol = np.sqrt(xsep**2 + ysep**2)\n \n x_L4 = r_jupsol*np.cos(phi_L4)\n x_L5 = r_jupsol*np.cos(phi_L5)\n y_L4 = r_jupsol*np.sin(phi_L4)\n y_L5 = r_jupsol*np.sin(phi_L5)\n \n return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])\n\ndef hill(a,e,m,M):\n return a*(1-e)*np.power(m/(3*M),1/3)\n\n\ndef r_pol(r,psi,M1,M2,a):\n q = M2/M1\n z = np.zeros((len(psi),len(r)))\n for i, phi in enumerate(psi):\n x_ = r*np.cos(phi)\n y_ = r*np.sin(phi)\n x = x_/a\n y = y_/a\n s1 = np.sqrt(x**2 + y**2)\n s2 = np.sqrt((x-1)**2 + y**2)\n \n term1 = 2/(s1*(1+q))\n term2 = 2*q/(s2*(1+q))\n term3 = (x - q/(1+q))**2\n term4 = y**2\n z[i] = term1 + term2 + term3 + term4\n return z\n\nast_d = np.load('{0}_Trojandata.npy'.format(target))\nnum_asts = len(ast_d[0,:,0])\nprint(ast_d.shape)\n\njup_d = np.load('{0}_Planetdata.npy'.format(target))\nsol_d = np.load('{0}_Stardata.npy'.format(target))\ntimes = np.load('{0}_Timesteps.npy'.format(target))\n\nast_a = ast_d[0]; ast_e = ast_d[1]; ast_i = ast_d[2] \nast_o = ast_d[3]; ast_p = ast_d[4]; ast_l = ast_d[5]\nast_x = ast_d[6]; ast_y = ast_d[7]; ast_z = ast_d[8]\nast_meda = np.median(ast_a,axis=0)\n\njup_a = jup_d[0]; jup_e = jup_d[1]; jup_i = jup_d[2]; jup_p = jup_d[3]\njup_l = jup_d[4]; jup_x = jup_d[5]; jup_y = jup_d[6]; jup_z = jup_d[7]\nsol_m = sol_d[0]; sol_l = sol_d[1]; sol_x = sol_d[2]; sol_y = sol_d[3]; sol_z = sol_d[4]\njhill = hill(jup_a,jup_e,9.546e-4,sol_m)\ndst_jall = np.sqrt((ast_x - jup_x)**2 + (ast_y - jup_y)**2)\n\nL45x, L45y = L45_nonnorm(jup_x,jup_y,sol_x,sol_y)\nL4_xs = L45x[0]; L4_ys = L45y[0]\nL5_xs = L45x[1]; L5_ys = L45y[1]\n\ni_dif = np.zeros_like(ast_i)\ni_int = ast_i[:,0]\nfor i in range(len(ast_a[0,:])):\n i_dif[:,i] = ast_i[:,i] - i_int\n \nphi_vals = np.linspace(-np.pi,np.pi,500)\nZ = r_pol(jup_a,phi_vals,sol_m,9.546e-4,jup_a)\nPot = np.flip(Z,1)\n\nast_r, ast_h = cart_to_pol(ast_x,ast_y)\njup_r, jup_h = cart_to_pol(jup_x,jup_y)\nphdif = np.zeros_like(ast_h)\nfor i in range(len(jup_h)):\n phdif[:,i] = ast_h[:,i] - jup_h[i]\n \nid4 = []\nid5 = []\nfor i in range(num_asts):\n for it in range(len(jup_h)):\n if phdif[i,it] < -np.pi:\n phdif[i,it] = phdif[i,it] + 2*np.pi\n if phdif[i,it] > np.pi:\n phdif[i,it] = phdif[i,it] - 2*np.pi\n if phdif[i,0] > 0:\n id4.append(i)\n if phdif[i,0] < 0:\n id5.append(i)\n \nprint('Percentage at L4: %2.1f' %(len(id4)*100/num_asts))\n\nliba = np.zeros((num_asts,200))\nlibp = np.zeros((num_asts,200))\nfor i in range(num_asts):\n for n in range(200):\n high = int(500*(n+1))\n loww = int(500*n)\n pmax = np.amax(phdif[i,loww:high])\n pmin = np.amin(phdif[i,loww:high])\n amax = np.amax(ast_a[i,loww:high])\n amin = np.amin(ast_a[i,loww:high])\n amid = np.median(jup_a[loww:high])\n \n if pmax > 0:\n mid = np.pi/3\n if pmax < 0:\n mid = -np.pi/3\n \n lip = ((pmax - mid) + (pmin - mid)) / 2\n lia = ((amax - amid)+(amin - amid)) / 2\n libp[i,n] = abs(lip)\n liba[i,n] = abs(lia)\n \nindices = []\nhillers = []\nfor i in range(num_asts):\n it = 0\n while it < len(ast_meda):\n a_focus = ast_a[i,it]\n a_media = ast_meda[it]\n if a_focus > a_media + 2:\n indices.append(i)\n break\n elif a_focus < a_media - 2:\n indices.append(i)\n break\n else:\n it += 1\n it = 0\n while it < len(jhill):\n d = dst_jall[i,it]\n h = jhill[it]\n if d <= h + 0.1:\n hillers.append(i)\n break\n else:\n it += 1\n\nidx = np.array(indices)\nhdx = np.array(hillers)\n\nhill_not_sma = np.array(list(set(hillers) - set(indices)))\nndx = np.array(list(set(range(num_asts)) - set(indices)))\n\nprint(\"Number of escapers: \", len(indices))\nprint(\"Number of hill crossers: \", len(hillers))\npct = len(indices)/num_asts\nprint('Pct escaped / Total Asts: %0.2f' %pct)\n\nnrm_a = ast_a[ndx]; nrm_e = ast_e[ndx]; nrm_i = ast_i[ndx]; ndifi = i_dif[ndx]; nrmla = liba[ndx]\nnrm_p = ast_p[ndx]; nrm_l = ast_l[ndx]; nrm_x = ast_x[ndx]; nrm_y = ast_y[ndx]; nrmlp = libp[ndx]\n\n\nodd_a = ast_a[idx]; odd_e = ast_e[idx]; odd_i = ast_i[idx]; odifi = i_dif[idx]; oddla = liba[idx]\nodd_p = ast_p[idx]; odd_l = ast_l[idx]; odd_x = ast_x[idx]; odd_y = ast_y[idx]; oddlp = libp[idx]\n\nnrm_r, nrmph = cart_to_pol(nrm_x,nrm_y); odd_r, oddph = cart_to_pol(odd_x,odd_y)\njup_r, jupph = cart_to_pol(jup_x,jup_y); sol_r, solph = cart_to_pol(sol_x,sol_y)\nL4_rs, L4phs = cart_to_pol(L4_xs,L4_ys); L5_rs, L5phs = cart_to_pol(L5_xs,L5_ys)\n\ndistj = np.sqrt((odd_x - jup_x)**2 + (odd_y - jup_y)**2)\ndisth = np.sqrt((ast_x[hdx] - jup_x)**2 + (ast_y[hdx] - jup_y)**2)\ndists = np.sqrt((odd_x - sol_x)**2 + (odd_y - sol_y)**2)\njdist = np.sqrt((jup_x - sol_x)**2 + (jup_y - sol_y)**2)\n\nearlies = []\nlaties = []\nhill_cross = np.zeros(len(hdx))\n\nfor i in range(len(odd_a)):\n it = 0\n while it < 100000:\n a_focus = odd_a[i,it]\n a_media = ast_meda[it]\n if a_focus > a_media + 2:\n if it < 33333:\n earlies.append(i)\n break\n elif it > 70000:\n laties.append(i)\n break\n else:\n break\n elif a_focus < a_media - 2:\n if it < 33333:\n earlies.append(i)\n break\n elif it > 70000:\n laties.append(i)\n break\n else:\n break\n else:\n it += 1\n \nfor i in range(len(hdx)):\n it = 0\n while it < 100000:\n d = disth[i,it]\n h = jhill[it]\n if d <= h:\n hill_cross[i] = it\n break\n else:\n it += 1\n \nhorses = []\nfor number,n in enumerate(idx):\n i = 0\n while i < 5000:\n val = phdif[n,i]\n if 170*radeg <= val:\n horses.append(n)\n break\n elif val <= -170*radeg:\n horses.append(n)\n break\n elif -5*radeg <= val <= 5*radeg:\n horses.append(n)\n break\n i += 1\n \nhrs = np.array(horses)\ntrs = np.array( list( set(idx) - set(horses) ) )\n \nedx = np.array(earlies)\nldx = np.array(laties)\n\nprint(\"Number of early escapees: \", len(earlies), \" (escaped before .67 Myr)\")\nprint(\"Number of late escapees: \", len(laties), \" (escaped after %1.2f Myr)\" %(times[70000]/1e6))\npct_e = len(earlies)/len(indices)\npct_l = len(laties)/len(indices)\nprint('Number early / Total escapees: %0.2f' %pct_e)\nprint('Number late / Total escapees: %0.2f' %pct_l)\npcT_e = len(earlies)/num_asts\npcT_l = len(laties)/num_asts\nprint('Number early / Total Asts.: %0.2f' %pcT_e)\nprint('Number late / Total Asts.: %0.2f' %pcT_l)\n\n\nx_axis = np.linspace(0,times[33333]/1e6)\nx_axi2 = np.linspace(times[70000]/1e6,times[-1]/1e6)\n\nfig, ax = plt.subplots(3,figsize=(14,13),sharex=True,gridspec_kw={'height_ratios': [3, 1, .75]})\nplt.subplots_adjust(hspace=0)\n\nax[0].plot(times/1e6,ast_meda,'k',lw=3)\nax[0].vlines([times[33333]/1e6,times[70000]/1e6],5,9.5,'b',alpha=0.8,zorder=0)\n\nax[0].fill_between(x_axis,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[0].fill_between(x_axi2,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[0].plot(times/1e6,jup_a,'gold',lw=3)\nax[0].legend(['Median Ast.','Planet'],fontsize=16,frameon=False,loc='upper left')\nax[0].set_ylabel('Semimajor Axis / AU',fontsize=16)\nax[0].set_ylim(5,9.5)\nax[0].set_xlim(0,2)\nax[0].text(0.18,7.25,\"%1.i escaped\" %len(earlies),fontsize=25)\nax[0].text(0.8,7.25,\"%2.i escaped\" %(len(indices) - len(earlies) - len(laties)),fontsize=25)\nax[0].text(1.48,7.25,\"%2.i escaped\" %len(laties),fontsize=25)\n\nax[1].plot(times/1e6,sol_l,'orange',lw=3,zorder=10)\nax[1].plot(times/1e6,sol_m,'g',ls=':',lw=3,zorder=10)\nax[1].vlines([times[33333]/1e6,times[70000]/1e6],0,4,'b',alpha=0.8,zorder=0)\nax[1].legend([\"log Stellar Luminosity\", \"Stellar Mass\"],fontsize=16,loc='center left',frameon=False)\nax[1].set_ylabel(\"Solar Units\",fontsize=16)\nax[1].set_ylim(0,4)\nax[1].fill_between(x_axis,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[1].fill_between(x_axi2,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)\nax[1].set_xlabel('Time / Myr',fontsize=16)\nax[1].set_yticks([0,1,2,3])\n\nax[2].hist(hill_cross*20/1e6,edgecolor='k',facecolor='k',alpha=0.5,range=[0,2],bins=20)\nax[2].set_ylabel(\"Escapes\",fontsize=16)\nax[2].set_xlabel(\"Time / Myr\",fontsize=16)\nax[2].set_ylim(0,35)\nax[2].set_yticks([0,10,20,30])\nfig.savefig('{0}_Timeseries.pdf'.format(target),dpi=300)\n\n############\n\nhist, axh = plt.subplots(1,4,figsize=(20,5))\n\naxh[0].hist(nrm_a[:,0],edgecolor='k',histtype='step',range=[4.95,5.45])\naxh[0].hist(odd_a[:,0],facecolor='r',alpha=0.7,range=[4.95,5.45])\naxh[0].set_xlabel(\"SMA (AU)\",fontsize=16)\naxh[0].set_xlim(4.95,5.45)\n\naxh[1].hist(nrm_e[:,0],edgecolor='k',histtype='step',range=[0,.25])\naxh[1].hist(odd_e[:,0],facecolor='r',alpha=0.7,range=[0,.25])\naxh[1].set_xlabel(\"Eccentricity\",fontsize=16)\naxh[1].set_xlim(0,0.25)\n\naxh[2].hist(abs(nrmla[:,0]),edgecolor='k',histtype='step',range=[0,0.02],bins=20)\naxh[2].hist(abs(liba[trs,0]),facecolor='r',alpha=0.7,range=[0,0.02],bins=20)\naxh[2].set_xlabel(\"SMA Libration Amp. (AU)\",fontsize=16)\naxh[2].set_xlim(0,.02)\naxh[2].set_xticks([0,0.005,0.01,0.015,0.02])\n\nradeg = np.pi/180\naxh[3].hist(abs(nrmlp[:,0])/radeg,edgecolor='k',histtype='step',range=[0,35])\naxh[3].hist(abs(libp[trs,0])/radeg,facecolor='r',alpha=0.7,range=[0,35])\naxh[3].set_xlabel(r\"$\\lambda$ Libration Amplitude (Deg.)\",fontsize=16)\naxh[3].set_xlim(0,35)\naxh[3].legend(labels=['Stable','Escaped'],fontsize=14,frameon=False,loc='upper right')\n\nhist.suptitle('Initial conditions',fontsize=18)\nhist.savefig('{0}_Histograms.pdf'.format(target),dpi=300)\n\n#############\n\norf, ora = plt.subplots(1,2,figsize=(15,5),gridspec_kw={'width_ratios': [2, 1]})\nfor i in range(len(ndx)):\n ora[0].plot(phdif[ndx[i],:500],ast_a[ndx[i],:500]/5.2,'k',alpha=0.01,zorder=5)\nfor i,tr in enumerate(trs):\n ora[0].plot(phdif[tr,:500],ast_a[tr,:500]/5.2,'r',alpha=0.05,zorder=10)\nora[0].set_xlim(-np.pi,np.pi)\nora[0].set_ylim(.9,1.1)\nora[0].set_xlabel(r\"$\\phi - \\phi_{jup}$\",fontsize=16)\nora[0].set_ylabel(r\"SMA / $a_{jup}$\",fontsize=16)\nora[0].vlines([-np.pi/3,np.pi/3],0.9,1.1,ls='--',zorder=0)\nora[0].set_xticks([-np.pi,-np.pi/2,-np.pi/3,0,np.pi/3,np.pi/2,np.pi])\nora[0].set_xticklabels([r\"-$\\pi$\",r\"-$\\pi$/2\",r\"$L_5$\",'0',r\"$L_4$\",r\"$\\pi$/2\",r\"$\\pi$\"])\n\nsns.kdeplot(abs(nrmlp[:,0])/radeg,nrmla[:,0],shade=True,shade_lowest=None,cmap='Greys',levels=5,alpha=0.5)\nsns.kdeplot(abs(libp[trs,0])/radeg,liba[trs,0],shade=True,shade_lowest=None,cmap='Reds',levels=5,alpha=0.5)\nora[1].set_ylabel(\"Init. SMA Libration (AU)\",fontsize=16)\nora[1].set_xlabel(r\"Init. $\\lambda$ Libration (Deg.)\",fontsize=16)\nora[1].set_xlim(0,35)\norf.tight_layout()\norf.savefig('{0}_Orbits.pdf'.format(target),dpi=300)\n\n#############\n\nnorm = mpl.colors.Normalize(vmin = np.min(.005), vmax = np.max(.015), clip = False)\n\ntim, tax = plt.subplots(figsize=(7,6))\nscatter = tax.scatter(abs(libp[hdx,0])/radeg,hill_cross*20/1e6,c=abs(liba[hdx,0]),cmap='Reds',norm=norm)\ntax.set_xlim(0,35)\ntax.set_xlabel(r\"Initial $\\lambda$ Libration (Deg.)\",fontsize=16)\ntax.set_ylabel('Time of Encounter (Myr)',fontsize=16)\ntim.colorbar(scatter, label='Initial SMA Libration (AU)')\ntax.set_ylim(0,2)\ntim.savefig('{0}_Eject_Perts.pdf'.format(target),dpi=300)\n\n######################\n\nhill_data = np.array((hdx,hill_cross))\nnp.save('{0}_Ejects.npy'.format(target), idx)\nnp.save('{0}_Hillcr.npy'.format(target), hill_data)" ]
[ [ "numpy.amax", "numpy.ones_like", "numpy.sqrt", "numpy.linspace", "numpy.power", "numpy.amin", "numpy.min", "numpy.median", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.max", "numpy.zeros_like", "matplotlib.pyplot.subplots_adjust", "numpy.array", "numpy.flip", "numpy.zeros" ], [ "numpy.amax", "numpy.ones_like", "numpy.sqrt", "numpy.linspace", "numpy.power", "numpy.amin", "numpy.min", "numpy.median", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.max", "numpy.zeros_like", "matplotlib.pyplot.subplots_adjust", "numpy.array", "numpy.flip", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PowerOlive/mindspore
[ "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "bda20724a94113cedd12c3ed9083141012da1f15", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "bda20724a94113cedd12c3ed9083141012da1f15", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "bda20724a94113cedd12c3ed9083141012da1f15", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "bda20724a94113cedd12c3ed9083141012da1f15", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff" ]
[ "tests/st/pynative/data_parallel/test_pynative_hccl_allreduce.py", "tests/st/auto_monad/test_auto_monad_layer.py", "tests/st/ops/cpu/test_broadcast_to_op.py", "tests/st/scipy_st/test_utils.py", "tests/st/auto_monad/test_effect_random.py", "tests/ut/python/parallel/test_auto_parallel_shard_propagation2.py", "tests/ut/python/dataset/test_datasets_generator.py", "tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py", "mindspore/python/mindspore/ops/operations/array_ops.py", "tests/st/pynative/test_pynative_heterogeneous.py", "tests/st/ops/cpu/test_l2loss_op.py", "tests/st/dump/test_async_a_plus_m_dump.py", "tests/st/fallback/test_graph_fallback_print.py", "tests/st/ops/ascend/test_aicpu_ops/test_stack.py", "tests/st/ops/cpu/test_bias_add_grad.py", "tests/st/ops/gpu/test_batchnorm_op.py", "tests/ut/python/dataset/test_bandreject_biquad.py", "tests/st/control/inner/test_031_for_in_while.py", "tests/ut/python/parallel/test_batch_matmul.py", "tests/st/ops/graph_kernel/test_assign_add.py", "tests/ut/python/parallel/test_batchmm.py", "tests/st/ops/cpu/test_random_choice_with_mask_op.py", "tests/st/ops/ascend/test_dynamic_ops.py", "tests/st/ops/gpu/test_adagrad_op.py", "tests/st/nccl/test_nccl_neighbor_exchange_op.py", "mindspore/python/mindspore/mindrecord/tools/cifar10.py", "tests/st/ops/graph_kernel/test_identity.py", "tests/ut/python/nn/test_cell.py", "tests/ut/python/utils/test_callback.py", "mindspore/lite/examples/export_models/models/vgg_train_export.py", "tests/st/control/test_recrusive_fun.py", "tests/st/ops/cpu/test_greater_op.py", "tests/st/ops/gpu/test_layer_norm_grad_grad_op.py", "tests/st/ops/cpu/test_less_op.py" ]
[ "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"test hccl allreduce performance with 8p\"\"\"\n\nimport os\nfrom multiprocessing import Process, Queue\nimport pytest\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.ops import operations as P\nimport mindspore.communication.management as D\nfrom mindspore import context\nfrom mindspore.context import ParallelMode\n\nMINDSPORE_HCCL_CONFIG_PATH = \"/home/workspace/mindspore_config/hccl/rank_table_8p.json\"\n\nnp.random.seed(1)\nos.environ['GLOG_v'] = str(2)\n\nclass AllReduceNet(nn.Cell):\n def __init__(self):\n super(AllReduceNet, self).__init__()\n self.mul = P.Mul()\n self.all_reduce = P.AllReduce()\n self.add = P.Add()\n\n def construct(self, x):\n x = self.mul(x, 2)\n y1 = Tensor(np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])).astype(np.float32)\n z = self.add(x, y1)\n z = self.all_reduce(z)\n y2 = Tensor(np.array([[-16, -16, -16, -16], [-16, -16, -16, -16], [-16, -16, -16, -16]])).astype(np.float32)\n out = self.add(z, y2)\n out = self.all_reduce(out)\n out = self.mul(out, 2)\n return out\n\ndef train_allreduce_8p(q, device_id, device_num):\n os.system(\"mkdir \" + str(device_id))\n os.chdir(str(device_id))\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\", device_id=device_id)\n os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH\n os.environ['RANK_ID'] = str(device_id)\n os.environ['RANK_SIZE'] = str(device_num)\n D.init()\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False,\n device_num=device_num)\n\n net = AllReduceNet()\n input_x = np.ones([3, 4]).astype(np.float32)\n output = net(Tensor(input_x, mstype.float32))\n q.put(output)\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_single\ndef test_pynative_hccl_allreduce_8p():\n device_num = 8\n process = []\n q = Queue()\n for i in range(device_num):\n device_id = i\n process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))\n\n for i in range(device_num):\n process[i].start()\n\n print(\"Waiting for all subprocesses done...\")\n\n for i in range(device_num):\n process[i].join()\n\n # check result\n for i in range(device_num):\n expect_output = [[256, 256, 256, 256], [256, 256, 256, 256], [256, 256, 256, 256]]\n assert not q.empty()\n output = Tensor(q.get())\n assert np.allclose(output.asnumpy(), expect_output)\n\n for i in range(device_num):\n os.system(\"rm -rf \" + str(i))\n\n print(\"End training...\")\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom tqdm import tqdm\nimport numpy as np\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore.dataset import NumpySlicesDataset\nfrom mindspore import context, Tensor\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\nclass AutoEncoderTrainNetwork(nn.Cell):\n def __init__(self):\n super(AutoEncoderTrainNetwork, self).__init__()\n self.loss_fun = nn.MSELoss()\n self.net = nn.CellList([nn.Dense(2, 32), nn.Dense(32, 2)])\n self.relu = nn.ReLU()\n\n def reconstruct_sample(self, x: Tensor):\n for _, layer in enumerate(self.net):\n x = layer(x)\n x = self.relu(x)\n return x\n\n def construct(self, x: Tensor):\n recon_x = self.reconstruct_sample(x)\n return self.loss_fun(recon_x, x)\n\n def sample_2d_data(self, n_normals=2000, n_outliers=400):\n z = np.random.randn(n_normals, 2)\n outliers = np.random.uniform(low=-6, high=6, size=(n_outliers, 2))\n centers = np.array([(2., 0), (-2., 0)])\n sigma = 0.3\n normal_points = sigma * z + centers[np.random.randint(len(centers), size=(n_normals,))]\n return np.vstack((normal_points, outliers))\n\n def create_synthetic_dataset(self):\n transformed_dataset = self.sample_2d_data()\n for dim in range(transformed_dataset.shape[1]):\n min_val = transformed_dataset[:, dim].min()\n max_val = transformed_dataset[:, dim].max()\n if min_val != max_val:\n transformed_dataset[:, dim] = (transformed_dataset[:, dim] - min_val) / (max_val - min_val)\n elif min_val != 1:\n transformed_dataset[:, dim] = transformed_dataset[:, dim] / min_val\n transformed_dataset = transformed_dataset.astype(np.float32)\n return transformed_dataset\n\n\ndef test_auto_monad_layer():\n ae_with_loss = AutoEncoderTrainNetwork()\n transformed_dataset = ae_with_loss.create_synthetic_dataset()\n dataloader = NumpySlicesDataset(data=(transformed_dataset,), shuffle=True)\n dataloader = dataloader.batch(batch_size=16)\n optim = nn.RMSProp(params=ae_with_loss.trainable_params(), learning_rate=0.002,)\n train_net = nn.TrainOneStepCell(ae_with_loss, optim)\n train_net.set_train()\n gen_samples = dict()\n num_epoch = 21\n for epoch in tqdm(range(num_epoch)):\n loss = []\n for _, (batch,) in enumerate(dataloader):\n batch = Tensor(batch, dtype=ms.float32)\n loss_ = train_net(batch)\n loss.append(loss_.asnumpy())\n avg_loss = np.array(loss).mean()\n if epoch % 10 == 0:\n gen_samples[epoch] = ae_with_loss.reconstruct_sample(Tensor(transformed_dataset)).asnumpy()\n print(f\"epoch: {epoch}/{num_epoch}, avg loss: {avg_loss}\")\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.ops import operations as P\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_broadcast():\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n shape = (4, 5, 2, 3, 4, 5, 6)\n x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float32)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (3, 5, 7, 4, 5, 6)\n x_np = np.arange(20).reshape((4, 5, 1)).astype(np.int32)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (8, 5, 7, 4, 5, 6)\n x_np = np.arange(24).reshape((1, 4, 1, 6)).astype(np.bool)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (3, 4, 5, 2, 3, 4, 5, 7)\n x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float16)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (3, 4, 5, 6)\n x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)\n output = P.BroadcastTo(shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)\n output = P.BroadcastTo(shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (2, 3, 4, 5)\n x1_np = np.random.rand(4, 5).astype(np.float32)\n output = P.BroadcastTo(shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n shape = (4, 5)\n x1_np = np.ones((1,)).astype(np.bool_)\n output = P.BroadcastTo(shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, shape)\n assert np.allclose(output.asnumpy(), expect)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_broadcast_dyn_init():\n \"\"\"\n Test running the op with -1's in the init shape to support varied inputs.\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n ms_shape = (-1, 4, 5, 6)\n np_shape = (3, 4, 5, 6)\n x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)\n output = P.BroadcastTo(ms_shape)(Tensor(x_np))\n expect = np.broadcast_to(x_np, np_shape)\n assert np.allclose(output.asnumpy(), expect)\n\n x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)\n output = P.BroadcastTo(ms_shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, np_shape)\n assert np.allclose(output.asnumpy(), expect)\n\n ms_shape = (2, 3, -1, 5)\n np_shape = (2, 3, 4, 5)\n x1_np = np.random.rand(4, 5).astype(np.float32)\n output = P.BroadcastTo(ms_shape)(Tensor(x1_np))\n expect = np.broadcast_to(x1_np, np_shape)\n assert np.allclose(output.asnumpy(), expect)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_broadcast_dyn_invalid_init():\n \"\"\"\n Test running the op with -1's in the init shape in incorrect positions.\n Expected to fail.\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n ms_shape = (2, -1, 4, 5)\n x_np = np.random.rand(4, 5).astype(np.float32)\n with pytest.raises(ValueError):\n P.BroadcastTo(ms_shape)(Tensor(x_np))\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"st for scipy.utils\"\"\"\n\nimport pytest\nimport numpy as onp\nfrom mindspore import context, Tensor\nfrom mindspore.scipy.utils import _safe_normalize\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_x86_cpu\[email protected]_onecard\[email protected]('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])\[email protected]('shape', [(10,), (10, 1)])\[email protected]('dtype', [onp.float32, onp.float64])\ndef test_safe_normalize(mode, shape, dtype):\n \"\"\"\n Feature: ALL TO ALL\n Description: test cases for _safe_normalize\n Expectation: the result match scipy\n \"\"\"\n context.set_context(mode=mode)\n x = onp.random.random(shape).astype(dtype)\n normalized_x, x_norm = _safe_normalize(Tensor(x))\n\n normalized_x = normalized_x.asnumpy()\n x_norm = x_norm.asnumpy()\n assert onp.allclose(onp.sum(normalized_x ** 2), 1)\n assert onp.allclose(x / x_norm, normalized_x)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport pytest\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.ops.operations as P\nimport mindspore.nn.probability.distribution as msd\nfrom mindspore import context, Tensor\nfrom mindspore.ops import composite as C\nfrom mindspore.common import dtype as mstype\nfrom mindspore import dtype\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n\n\nclass Sampling(nn.Cell):\n \"\"\"\n Test class: sample of Normal distribution.\n \"\"\"\n\n def __init__(self, shape, seed=0):\n super(Sampling, self).__init__()\n self.n1 = msd.Normal(0, 1, seed=seed, dtype=dtype.float32)\n self.shape = shape\n\n def construct(self, mean=None, sd=None):\n s1 = self.n1.sample(self.shape, mean, sd)\n s2 = self.n1.sample(self.shape, mean, sd)\n s3 = self.n1.sample(self.shape, mean, sd)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_sample_graph():\n shape = (2, 3)\n seed = 0\n samp = Sampling(shape, seed=seed)\n sample1, sample2, sample3 = samp()\n assert ((sample1 != sample2).any() and (sample1 != sample3).any() and (sample2 != sample3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeNormalNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeNormalNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, mean, stddev):\n s1 = C.normal(self.shape, mean, stddev, self.seed)\n s2 = C.normal(self.shape, mean, stddev, self.seed)\n s3 = C.normal(self.shape, mean, stddev, self.seed)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_composite_normal():\n shape = (3, 2, 4)\n mean = Tensor(0.0, mstype.float32)\n stddev = Tensor(1.0, mstype.float32)\n net = CompositeNormalNet(shape)\n s1, s2, s3 = net(mean, stddev)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeLaplaceNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeLaplaceNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, mean, lambda_param):\n s1 = C.laplace(self.shape, mean, lambda_param, self.seed)\n s2 = C.laplace(self.shape, mean, lambda_param, self.seed)\n s3 = C.laplace(self.shape, mean, lambda_param, self.seed)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_composite_laplace():\n shape = (3, 2, 4)\n mean = Tensor(1.0, mstype.float32)\n lambda_param = Tensor(1.0, mstype.float32)\n net = CompositeLaplaceNet(shape)\n s1, s2, s3 = net(mean, lambda_param)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeGammaNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeGammaNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, alpha, beta):\n s1 = C.gamma(self.shape, alpha, beta, self.seed)\n s2 = C.gamma(self.shape, alpha, beta, self.seed)\n s3 = C.gamma(self.shape, alpha, beta, self.seed)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_composite_gamma():\n shape = (3, 2, 4)\n alpha = Tensor(1.0, mstype.float32)\n beta = Tensor(1.0, mstype.float32)\n net = CompositeGammaNet(shape)\n s1, s2, s3 = net(alpha, beta)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositePoissonNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositePoissonNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, mean):\n s1 = C.poisson(self.shape, mean, self.seed)\n s2 = C.poisson(self.shape, mean, self.seed)\n s3 = C.poisson(self.shape, mean, self.seed)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_composite_poisson():\n shape = (3, 2, 4)\n mean = Tensor(2.0, mstype.float32)\n net = CompositePoissonNet(shape)\n s1, s2, s3 = net(mean)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass CompositeUniformNet(nn.Cell):\n def __init__(self, shape=None, seed=0):\n super(CompositeUniformNet, self).__init__()\n self.shape = shape\n self.seed = seed\n\n def construct(self, a, b):\n s1 = C.uniform(self.shape, a, b, self.seed)\n s2 = C.uniform(self.shape, a, b, self.seed)\n s3 = C.uniform(self.shape, a, b, self.seed)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_composite_uniform():\n shape = (3, 2, 4)\n a = Tensor(0.0, mstype.float32)\n b = Tensor(1.0, mstype.float32)\n net = CompositeUniformNet(shape)\n s1, s2, s3 = net(a, b)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass StandardNormalNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(StandardNormalNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.standard_normal = P.StandardNormal(seed, seed2)\n\n def construct(self):\n s1 = self.standard_normal(self.shape)\n s2 = self.standard_normal(self.shape)\n s3 = self.standard_normal(self.shape)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_standard_normal():\n shape = (4, 16)\n net = StandardNormalNet(shape)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass StandardLaplaceNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(StandardLaplaceNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.standard_laplace = P.StandardLaplace(seed, seed2)\n\n def construct(self):\n s1 = self.standard_laplace(self.shape)\n s2 = self.standard_laplace(self.shape)\n s3 = self.standard_laplace(self.shape)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_standard_laplace():\n shape = (4, 16)\n net = StandardLaplaceNet(shape)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass GammaNet(nn.Cell):\n def __init__(self, shape, alpha, beta, seed=0, seed2=0):\n super(GammaNet, self).__init__()\n self.shape = shape\n self.alpha = alpha\n self.beta = beta\n self.seed = seed\n self.seed2 = seed2\n self.gamma = P.Gamma(seed, seed2)\n\n def construct(self):\n s1 = self.gamma(self.shape, self.alpha, self.beta)\n s2 = self.gamma(self.shape, self.alpha, self.beta)\n s3 = self.gamma(self.shape, self.alpha, self.beta)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_gamma():\n shape = (4, 16)\n alpha = Tensor(1.0, mstype.float32)\n beta = Tensor(1.0, mstype.float32)\n net = GammaNet(shape, alpha, beta)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass PoissonNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(PoissonNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.poisson = P.Poisson(seed, seed2)\n\n def construct(self, mean):\n s1 = self.poisson(self.shape, mean)\n s2 = self.poisson(self.shape, mean)\n s3 = self.poisson(self.shape, mean)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_poisson():\n shape = (4, 16)\n mean = Tensor(5.0, mstype.float32)\n net = PoissonNet(shape=shape)\n s1, s2, s3 = net(mean)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass UniformIntNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(UniformIntNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.uniform_int = P.UniformInt(seed, seed2)\n\n def construct(self, minval, maxval):\n s1 = self.uniform_int(self.shape, minval, maxval)\n s2 = self.uniform_int(self.shape, minval, maxval)\n s3 = self.uniform_int(self.shape, minval, maxval)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_uniform_int():\n shape = (4, 16)\n minval = Tensor(1, mstype.int32)\n maxval = Tensor(5, mstype.int32)\n net = UniformIntNet(shape)\n s1, s2, s3 = net(minval, maxval)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass UniformRealNet(nn.Cell):\n def __init__(self, shape, seed=0, seed2=0):\n super(UniformRealNet, self).__init__()\n self.shape = shape\n self.seed = seed\n self.seed2 = seed2\n self.uniform_real = P.UniformReal(seed, seed2)\n\n def construct(self):\n s1 = self.uniform_real(self.shape)\n s2 = self.uniform_real(self.shape)\n s3 = self.uniform_real(self.shape)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_uniform_real():\n shape = (4, 16)\n net = UniformRealNet(shape)\n s1, s2, s3 = net()\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass DropoutGenMaskNet(nn.Cell):\n def __init__(self, shape):\n super(DropoutGenMaskNet, self).__init__()\n self.shape = shape\n self.dropout_gen_mask = P.DropoutGenMask(Seed0=0, Seed1=0)\n\n def construct(self, keep_prob):\n s1 = self.dropout_gen_mask(self.shape, keep_prob)\n s2 = self.dropout_gen_mask(self.shape, keep_prob)\n s3 = self.dropout_gen_mask(self.shape, keep_prob)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dropout_gen_mask():\n shape = (2, 4, 5)\n keep_prob = Tensor(0.5, mstype.float32)\n net = DropoutGenMaskNet(shape)\n s1, s2, s3 = net(keep_prob)\n assert ((s1 != s2).any() and (s1 != s3).any() and (s2 != s3).any()), \\\n \"The results should be different!\"\n\n\nclass RandomChoiceWithMaskNet(nn.Cell):\n def __init__(self):\n super(RandomChoiceWithMaskNet, self).__init__()\n self.rnd_choice_mask = P.RandomChoiceWithMask(count=4, seed=0)\n\n def construct(self, x):\n index1, _ = self.rnd_choice_mask(x)\n index2, _ = self.rnd_choice_mask(x)\n index3, _ = self.rnd_choice_mask(x)\n return index1, index2, index3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_random_choice_with_mask():\n mode = context.get_context('mode')\n assert (mode == context.GRAPH_MODE), 'GRAPH_MODE required but got ' + str(mode)\n net = RandomChoiceWithMaskNet()\n x = Tensor(np.array([[1, 0, 1, 0], [0, 0, 0, 1], [1, 1, 1, 1], [0, 0, 0, 1]]).astype(np.bool))\n index1, index2, index3 = net(x)\n assert ((index1 != index2).any() and (index1 != index3).any() and (index2 != index3).any()), \\\n \"The results should be different!\"\n\n\nclass RandomCategoricalNet(nn.Cell):\n def __init__(self, num_sample):\n super(RandomCategoricalNet, self).__init__()\n self.random_categorical = P.RandomCategorical(mstype.int64)\n self.num_sample = num_sample\n\n def construct(self, logits, seed=0):\n s1 = self.random_categorical(logits, self.num_sample, seed)\n s2 = self.random_categorical(logits, self.num_sample, seed)\n s3 = self.random_categorical(logits, self.num_sample, seed)\n return s1, s2, s3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_random_categorical():\n num_sample = 8\n net = RandomCategoricalNet(num_sample)\n x = Tensor(np.random.random((10, 5)).astype(np.float32))\n # Outputs may be the same, only basic functions are verified here.\n net(x)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport mindspore as ms\nimport mindspore.common.dtype as mstype\nfrom mindspore import context, Tensor, Parameter\nfrom mindspore.common.api import _cell_graph_executor\nfrom mindspore.nn import Cell, TrainOneStepCell, Momentum\nfrom mindspore.ops import operations as P\n\n\nclass Net(Cell):\n def __init__(self, mul_weight, strategy1=None, strategy2=None, strategy3=None):\n super().__init__()\n self.mul = P.Mul().shard(strategy1)\n self.cast = P.Cast().shard(strategy2)\n self.sigmoid = P.Sigmoid().shard(strategy3)\n self.mul_weight = Parameter(mul_weight, \"w1\")\n\n def construct(self, x, b):\n out = self.mul(x, self.mul_weight)\n out = self.cast(out, mstype.float16)\n out = self.sigmoid(out)\n return out\n\n\n_x = Tensor(np.ones([64, 32]), dtype=ms.float32)\n_w1 = Tensor(np.ones([64, 32]), dtype=ms.float32)\n_b = Tensor(np.ones([64, 32]), dtype=ms.float32)\n\n\ndef compile_net(net):\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_net = TrainOneStepCell(net, optimizer)\n train_net.set_auto_parallel()\n train_net.set_train()\n _cell_graph_executor.compile(train_net, _x, _b)\n context.reset_auto_parallel_context()\n\n\ndef test_auto_parallel_activation4():\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=16, global_rank=0,\n search_mode=\"sharding_propagation\")\n strategy1 = ((4, 4), (4, 4))\n strategy2 = None\n strategy3 = ((8, 2),)\n net = Net(_w1, strategy1, strategy2, strategy3)\n compile_net(net)\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport copy\nimport numpy as np\nimport pytest\n\nimport mindspore\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as ds\nimport mindspore.dataset.engine.iterators as it\nfrom mindspore import log as logger\nfrom mindspore import Tensor\nimport mindspore.ops as ops\n\n\n# Generate 1d int numpy array from 0 - 63\ndef generator_1d():\n for i in range(64):\n yield (np.array([i]),)\n\n\nclass DatasetGenerator:\n def __init__(self):\n pass\n\n def __getitem__(self, item):\n return (np.array([item]),)\n\n def __len__(self):\n return 10\n\n\nclass DatasetGeneratorLarge:\n def __init__(self):\n self.data = np.array(range(4000))\n\n def __getitem__(self, item):\n return (self.data + item, self.data *10)\n\n def __len__(self):\n return 10\n\n\nclass DatasetGeneratorMixed:\n def __init__(self):\n pass\n\n def __getitem__(self, item):\n flatten = ops.Flatten()\n x = Tensor(np.ones(shape=[2, 3]), mindspore.float32)\n output = flatten(x)\n return (output.asnumpy(),)\n\n def __len__(self):\n return 10\n\n\ndef test_generator_0():\n \"\"\"\n Test 1D Generator\n \"\"\"\n logger.info(\"Test 1D Generator : 0 - 63\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_1d, [\"data\"])\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 1\n\n\n# Generate md int numpy array from [[0, 1], [2, 3]] to [[63, 64], [65, 66]]\ndef generator_md():\n for i in range(64):\n yield (np.array([[i, i + 1], [i + 2, i + 3]]),)\n\n\ndef test_generator_1():\n \"\"\"\n Test MD Generator\n \"\"\"\n logger.info(\"Test MD Generator : 0 - 63, with shape [2, 2]\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_md, [\"data\"])\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 1\n\n\n# Generate two columns, the first column is from Generator1D, the second column is from GeneratorMD\ndef generator_mc(maxid=64):\n for i in range(maxid):\n yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]]))\n\n\ndef test_generator_2():\n \"\"\"\n Test multi column generator\n \"\"\"\n logger.info(\"Test multi column generator\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc, [\"col0\", \"col1\"])\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(item[\"col0\"], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[\"col1\"], golden)\n i = i + 1\n\n\ndef test_generator_3():\n \"\"\"\n Test 1D Generator + repeat(4)\n \"\"\"\n logger.info(\"Test 1D Generator : 0 - 63 + Repeat(4)\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_1d, [\"data\"])\n\n data1 = data1.repeat(4)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 1\n if i == 64:\n i = 0\n\n\ndef test_generator_4():\n \"\"\"\n Test fixed size 1D Generator + batch\n \"\"\"\n logger.info(\"Test 1D Generator : 0 - 63 + batch(4)\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_1d, [\"data\"])\n\n data1 = data1.batch(4)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([[i], [i + 1], [i + 2], [i + 3]])\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 4\n\n\ndef generator_with_type(t):\n for i in range(64):\n yield (np.array([i], dtype=t),)\n\n\ndef type_tester(t):\n logger.info(\"Test with Type {}\".format(t.__name__))\n\n # apply dataset operations\n data1 = ds.GeneratorDataset((lambda: generator_with_type(t)), [\"data\"])\n\n data1 = data1.batch(4)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([[i], [i + 1], [i + 2], [i + 3]], dtype=t)\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 4\n\n\ndef test_generator_5():\n \"\"\"\n Test 1D Generator on different data type\n \"\"\"\n logger.info(\"Test 1D Generator on all data types\")\n\n types = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64]\n\n for t in types:\n type_tester(t)\n\n\ndef type_tester_with_type_check(t, c):\n logger.info(\"Test with Type {}\".format(t.__name__))\n\n # apply dataset operations\n data1 = ds.GeneratorDataset((lambda: generator_with_type(t)), [\"data\"], column_types=[c])\n\n data1 = data1.batch(4)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([[i], [i + 1], [i + 2], [i + 3]], dtype=t)\n np.testing.assert_array_equal(item[\"data\"], golden)\n i = i + 4\n\n\ndef test_generator_6():\n \"\"\"\n Test 1D Generator on different data type with type check\n \"\"\"\n logger.info(\"Test 1D Generator on all data types with type check\")\n\n np_types = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32,\n np.float64]\n de_types = [mstype.int8, mstype.int16, mstype.int32, mstype.int64, mstype.uint8, mstype.uint16, mstype.uint32,\n mstype.uint64, mstype.float32, mstype.float64]\n\n for i, _ in enumerate(np_types):\n type_tester_with_type_check(np_types[i], de_types[i])\n\n\ndef generator_with_type_2c(t):\n for i in range(64):\n yield (np.array([i], dtype=t), np.array([i], dtype=t))\n\n\ndef type_tester_with_type_check_2c(t, c):\n logger.info(\"Test with Type {}\".format(t.__name__))\n\n # apply dataset operations\n data1 = ds.GeneratorDataset((lambda: generator_with_type_2c(t)), [\"data0\", \"data1\"], column_types=c)\n\n data1 = data1.batch(4)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([[i], [i + 1], [i + 2], [i + 3]], dtype=t)\n np.testing.assert_array_equal(item[\"data0\"], golden)\n i = i + 4\n\n\ndef test_generator_7():\n \"\"\"\n Test 2 column Generator on different data type with type check\n \"\"\"\n logger.info(\"Test 2 column Generator on all data types with type check\")\n\n np_types = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32,\n np.float64]\n de_types = [mstype.int8, mstype.int16, mstype.int32, mstype.int64, mstype.uint8, mstype.uint16, mstype.uint32,\n mstype.uint64, mstype.float32, mstype.float64]\n\n for i, _ in enumerate(np_types):\n type_tester_with_type_check_2c(np_types[i], [None, de_types[i]])\n\n\ndef test_generator_8():\n \"\"\"\n Test multi column generator with few mapops\n \"\"\"\n logger.info(\"Test multi column generator with mapops to check the order too\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"])\n data1 = data1.map(operations=(lambda x: x * 3), input_columns=\"col0\", output_columns=\"out0\",\n num_parallel_workers=2)\n data1 = data1.map(operations=(lambda x: (x * 7, x)), input_columns=\"col1\", output_columns=[\"out1\", \"out2\"],\n num_parallel_workers=2, column_order=[\"out0\", \"out1\", \"out2\"])\n data1 = data1.map(operations=(lambda x: x + 1), input_columns=\"out2\", output_columns=\"out2\",\n num_parallel_workers=2)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i * 3])\n np.testing.assert_array_equal(item[\"out0\"], golden)\n golden = np.array([[i * 7, (i + 1) * 7], [(i + 2) * 7, (i + 3) * 7]])\n np.testing.assert_array_equal(item[\"out1\"], golden)\n golden = np.array([[i + 1, i + 2], [i + 3, i + 4]])\n np.testing.assert_array_equal(item[\"out2\"], golden)\n i = i + 1\n\n\ndef test_generator_9():\n \"\"\"\n Test map column order when len(input_columns) == len(output_columns).\n \"\"\"\n logger.info(\"Test map column order when len(input_columns) == len(output_columns).\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"image\", \"label\"])\n data2 = ds.GeneratorDataset(generator_mc(2048), [\"label\", \"image\"])\n data1 = data1.map(operations=(lambda x: x * 3), input_columns=\"label\",\n num_parallel_workers=4)\n data2 = data2.map(operations=(lambda x: x * 3), input_columns=\"label\",\n num_parallel_workers=4)\n\n # Expected column order is not changed.\n # data1 = data[0] is \"image\" and data[1] is \"label\"\n # data2 = data[0] is \"label\" and data[1] is \"image\"\n i = 0\n for data1, data2 in zip(data1, data2): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(data1[0].asnumpy(), golden)\n golden = np.array([[i * 3, (i + 1) * 3], [(i + 2) * 3, (i + 3) * 3]])\n np.testing.assert_array_equal(data1[1].asnumpy(), golden)\n\n golden = np.array([i * 3])\n np.testing.assert_array_equal(data2[0].asnumpy(), golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(data2[1].asnumpy(), golden)\n i = i + 1\n\n\ndef test_generator_10():\n \"\"\"\n Test map column order when len(input_columns) != len(output_columns).\n \"\"\"\n logger.info(\"Test map column order when len(input_columns) != len(output_columns).\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"])\n data1 = data1.map(operations=(lambda x: (x, x * 5)), input_columns=\"col1\", output_columns=[\"out1\", \"out2\"],\n column_order=['col0', 'out1', 'out2'], num_parallel_workers=2)\n\n # Expected column order is |col0|out1|out2|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n golden = np.array([i])\n np.testing.assert_array_equal(item[0], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[1], golden)\n golden = np.array([[i * 5, (i + 1) * 5], [(i + 2) * 5, (i + 3) * 5]])\n np.testing.assert_array_equal(item[2], golden)\n i = i + 1\n\n\ndef test_generator_11():\n \"\"\"\n Test map column order when len(input_columns) != len(output_columns).\n \"\"\"\n logger.info(\"Test map column order when len(input_columns) != len(output_columns), \"\n \"and column_order drops some columns.\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"])\n data1 = data1.map(operations=(lambda x: (x, x * 5)), input_columns=\"col1\", output_columns=[\"out1\", \"out2\"],\n column_order=['out1', 'out2'], num_parallel_workers=2)\n\n # Expected column order is |out1|out2|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n # len should be 2 because col0 is dropped (not included in column_order)\n assert len(item) == 2\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[0], golden)\n golden = np.array([[i * 5, (i + 1) * 5], [(i + 2) * 5, (i + 3) * 5]])\n np.testing.assert_array_equal(item[1], golden)\n i = i + 1\n\n\ndef test_generator_12():\n \"\"\"\n Test map column order when input_columns and output_columns are None.\n \"\"\"\n logger.info(\"Test map column order when input_columns and output_columns are None.\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"])\n data1 = data1.map(operations=(lambda x: (x * 5)), num_parallel_workers=2)\n\n # Expected column order is |col0|col1|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 2\n golden = np.array([i * 5])\n np.testing.assert_array_equal(item[0], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[1], golden)\n i = i + 1\n\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"])\n data1 = data1.map(operations=(lambda x: (x * 5)), column_order=[\"col1\", \"col0\"], num_parallel_workers=2)\n\n # Expected column order is |col0|col1|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 2\n golden = np.array([i * 5])\n np.testing.assert_array_equal(item[1], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[0], golden)\n i = i + 1\n\n\ndef test_generator_13():\n \"\"\"\n Test map column order when input_columns is None.\n \"\"\"\n logger.info(\"Test map column order when input_columns is None.\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"])\n data1 = data1.map(operations=(lambda x: (x * 5)), output_columns=[\"out0\"], num_parallel_workers=2)\n\n # Expected column order is |out0|col1|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 2\n golden = np.array([i * 5])\n np.testing.assert_array_equal(item[0], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[1], golden)\n i = i + 1\n\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n # len should be 2 because col0 is dropped (not included in column_order)\n assert len(item) == 2\n golden = np.array([i * 5])\n np.testing.assert_array_equal(item[\"out0\"], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[\"col1\"], golden)\n i = i + 1\n\n\ndef test_generator_14():\n \"\"\"\n Test 1D Generator MP + CPP sampler\n \"\"\"\n logger.info(\"Test 1D Generator MP : 0 - 63\")\n # Sometimes there are some ITERATORS left in ITERATORS_LIST when run all UTs together,\n # and cause core dump and blocking in this UT. Add cleanup() here to fix it.\n it._cleanup() # pylint: disable=W0212\n\n # Reduce memory needed by reducing queue size\n prefetch_original = ds.config.get_prefetch_size()\n ds.config.set_prefetch_size(1)\n\n source = [(np.array([x]),) for x in range(256)]\n ds1 = ds.GeneratorDataset(source, [\"data\"], sampler=ds.SequentialSampler(),\n num_parallel_workers=4, max_rowsize=1).repeat(2)\n i = 0\n for data in ds1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(data[\"data\"], golden)\n i = i + 1\n if i == 256:\n i = 0\n\n ds.config.set_prefetch_size(prefetch_original)\n\ndef test_generator_15():\n \"\"\"\n Test 1D Generator MP + Python sampler\n \"\"\"\n logger.info(\"Test 1D Generator MP : 0 - 63\")\n\n ## Reduce memory needed by reducing queue size\n prefetch_original = ds.config.get_prefetch_size()\n ds.config.set_prefetch_size(1)\n\n sampler = [x for x in range(256)]\n source = [(np.array([x]),) for x in range(256)]\n ds1 = ds.GeneratorDataset(source, [\"data\"], sampler=sampler,\n num_parallel_workers=4, max_rowsize=1).repeat(1)\n i = 0\n for data in ds1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(data[\"data\"], golden)\n i = i + 1\n if i == 256:\n i = 0\n\n ds.config.set_prefetch_size(prefetch_original)\n\ndef test_generator_16():\n \"\"\"\n Test multi column generator Mp + CPP sampler\n \"\"\"\n logger.info(\"Test multi column generator\")\n\n source = [(np.array([x]), np.array([x + 1])) for x in range(256)]\n # apply dataset operations\n data1 = ds.GeneratorDataset(source, [\"col0\", \"col1\"], sampler=ds.SequentialSampler())\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(item[\"col0\"], golden)\n golden = np.array([i + 1])\n np.testing.assert_array_equal(item[\"col1\"], golden)\n i = i + 1\n\n\ndef test_generator_17():\n \"\"\"\n Test multi column generator Mp + Python sampler\n \"\"\"\n logger.info(\"Test multi column generator\")\n\n sampler = [x for x in range(256)]\n source = [(np.array([x]), np.array([x + 1])) for x in range(256)]\n # apply dataset operations\n data1 = ds.GeneratorDataset(source, [\"col0\", \"col1\"], sampler=sampler)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(item[\"col0\"], golden)\n golden = np.array([i + 1])\n np.testing.assert_array_equal(item[\"col1\"], golden)\n i = i + 1\n\n\ndef test_generator_18():\n \"\"\"\n Test multiprocessing flag (same as test 13 with python_multiprocessing=True flag)\n \"\"\"\n logger.info(\"Test map column order when input_columns is None.\")\n\n # Reduce shm usage by disabling this optimization\n mem_original = ds.config.get_enable_shared_mem()\n ds.config.set_enable_shared_mem(False)\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"col0\", \"col1\"], python_multiprocessing=True)\n data1 = data1.map(operations=(lambda x: (x * 5)), output_columns=[\"out0\"], num_parallel_workers=2,\n python_multiprocessing=True)\n\n # Expected column order is |out0|col1|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 2\n golden = np.array([i * 5])\n np.testing.assert_array_equal(item[0], golden)\n golden = np.array([[i, i + 1], [i + 2, i + 3]])\n np.testing.assert_array_equal(item[1], golden)\n i = i + 1\n\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n # len should be 2 because col0 is dropped (not included in column_order)\n assert len(item) == 2\n golden = np.array([i * 5])\n np.testing.assert_array_equal(item[\"out0\"], golden)\n\n ds.config.set_enable_shared_mem(mem_original)\n\ndef test_generator_19():\n \"\"\"\n Test multiprocessing flag with 2 different large columns\n \"\"\"\n logger.info(\"Test map column order when input_columns is None.\")\n\n # apply dataset operations\n data1 = ds.GeneratorDataset(DatasetGeneratorLarge(), [\"col0\", \"col1\"], python_multiprocessing=True, shuffle=False)\n\n # Expected column order is |out0|col1|\n i = 0\n for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):\n assert len(item) == 2\n golden = np.array(range(4000)) + i\n np.testing.assert_array_equal(item[0], golden)\n golden = np.array(range(4000)) * 10\n np.testing.assert_array_equal(item[1], golden)\n i = i + 1\n\n\nclass RandomAccessDataset:\n def __init__(self):\n self.__data = np.random.sample((5, 1))\n\n def __getitem__(self, item):\n return self.__data[item]\n\n def __len__(self):\n return 5\n\n\nclass RandomAccessDatasetWithoutLen:\n def __init__(self):\n self.__data = np.random.sample((5, 1))\n\n def __getitem__(self, item):\n return self.__data[item]\n\n\nclass IterableDataset:\n def __init__(self):\n self.count = 0\n self.max = 10\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.count >= self.max:\n raise StopIteration\n self.count += 1\n return (np.array(self.count),)\n\n\ndef test_generator_20():\n \"\"\"\n Test mappable and unmappable dataset as source for GeneratorDataset.\n \"\"\"\n logger.info(\"Test mappable and unmappable dataset as source for GeneratorDataset.\")\n\n # Mappable dataset\n data1 = ds.GeneratorDataset(RandomAccessDataset(), [\"col0\"])\n dataset_size1 = data1.get_dataset_size()\n assert dataset_size1 == 5\n\n # Mappable dataset without __len__\n data2 = ds.GeneratorDataset(RandomAccessDatasetWithoutLen(), [\"col0\"])\n try:\n data2.get_dataset_size()\n except RuntimeError as e:\n assert \"'__len__' method is required\" in str(e)\n\n # Unmappable dataset\n data3 = ds.GeneratorDataset(IterableDataset(), [\"col0\"])\n dataset_size3 = data3.get_dataset_size()\n assert dataset_size3 == 10\n\n\ndef test_generator_error_1():\n def generator_np():\n for i in range(64):\n yield (np.array([{i}]),)\n\n with pytest.raises(RuntimeError) as info:\n data1 = ds.GeneratorDataset(generator_np, [\"data\"])\n for _ in data1:\n pass\n assert \"Invalid data type\" in str(info.value)\n\n\ndef test_generator_error_2():\n def generator_np():\n for i in range(64):\n yield ({i},)\n\n with pytest.raises(RuntimeError) as info:\n data1 = ds.GeneratorDataset(generator_np, [\"data\"])\n for _ in data1:\n pass\n print(\"========\", str(info.value))\n assert \"'GeneratorDataset' should return a tuple of NumPy arrays\" in str(info.value)\n\n\ndef test_generator_error_3():\n with pytest.raises(ValueError) as info:\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"label\", \"image\"])\n data1 = data1.map(operations=(lambda x: (x, x * 5)), input_columns=[\"label\"], output_columns=[\"out1\", \"out2\"],\n num_parallel_workers=2)\n\n for _ in data1:\n pass\n assert \"When length of input_columns and output_columns are not equal, column_order must be specified.\" in \\\n str(info.value)\n\n\ndef test_generator_error_4():\n with pytest.raises(RuntimeError) as info:\n # apply dataset operations\n data1 = ds.GeneratorDataset(generator_mc(2048), [\"label\", \"image\"])\n data1 = data1.map(operations=(lambda x: (x, x * 5)), input_columns=[\"label\"],\n num_parallel_workers=2)\n\n for _ in data1:\n pass\n assert \"the number of columns returned in 'map' operations should match the number of 'output_columns'\"\\\n in str(info.value)\n\n\ndef test_generator_sequential_sampler():\n source = [(np.array([x]),) for x in range(64)]\n ds1 = ds.GeneratorDataset(source, [\"data\"], sampler=ds.SequentialSampler())\n i = 0\n for data in ds1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(data[\"data\"], golden)\n i = i + 1\n\n\ndef test_generator_random_sampler():\n source = [(np.array([x]),) for x in range(64)]\n ds1 = ds.GeneratorDataset(source, [\"data\"], shuffle=True)\n for _ in ds1.create_dict_iterator(num_epochs=1): # each data is a dictionary\n pass\n\n\ndef test_generator_distributed_sampler():\n source = [(np.array([x]),) for x in range(64)]\n for sid in range(8):\n ds1 = ds.GeneratorDataset(source, [\"data\"], shuffle=False, num_shards=8, shard_id=sid)\n i = sid\n for data in ds1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([i])\n np.testing.assert_array_equal(data[\"data\"], golden)\n i = i + 8\n\n\ndef test_generator_num_samples():\n source = [(np.array([x]),) for x in range(64)]\n num_samples = 32\n ds1 = ds.GeneratorDataset(source, [\"data\"], sampler=ds.SequentialSampler(num_samples=num_samples))\n ds2 = ds.GeneratorDataset(source, [\"data\"], sampler=[i for i in range(32)], num_samples=num_samples)\n ds3 = ds.GeneratorDataset(generator_1d, [\"data\"], num_samples=num_samples)\n\n count = 0\n for _ in ds1.create_dict_iterator(num_epochs=1):\n count = count + 1\n assert count == num_samples\n\n count = 0\n for _ in ds2.create_dict_iterator(num_epochs=1):\n count = count + 1\n assert count == num_samples\n\n count = 0\n for _ in ds3.create_dict_iterator(num_epochs=1):\n count = count + 1\n assert count == num_samples\n\n\ndef test_generator_num_samples_underflow():\n source = [(np.array([x]),) for x in range(64)]\n num_samples = 256\n ds2 = ds.GeneratorDataset(source, [\"data\"], sampler=[i for i in range(64)], num_samples=num_samples)\n ds3 = ds.GeneratorDataset(generator_1d, [\"data\"], num_samples=num_samples)\n\n count = 0\n for _ in ds2.create_dict_iterator(num_epochs=1):\n count = count + 1\n assert count == 64\n\n count = 0\n for _ in ds3.create_dict_iterator(num_epochs=1):\n count = count + 1\n assert count == 64\n\n\ndef type_tester_with_type_check_2c_schema(t, c):\n logger.info(\"Test with Type {}\".format(t.__name__))\n\n schema = ds.Schema()\n schema.add_column(\"data0\", c[0])\n schema.add_column(\"data1\", c[1])\n\n # apply dataset operations\n data1 = ds.GeneratorDataset((lambda: generator_with_type_2c(t)), schema=schema)\n\n data1 = data1.batch(4)\n\n i = 0\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n golden = np.array([[i], [i + 1], [i + 2], [i + 3]], dtype=t)\n np.testing.assert_array_equal(item[\"data0\"], golden)\n i = i + 4\n\n\ndef test_generator_schema():\n \"\"\"\n Test 2 column Generator on different data type with type check with schema input\n \"\"\"\n logger.info(\"Test 2 column Generator on all data types with type check\")\n\n np_types = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32,\n np.float64]\n de_types = [mstype.int8, mstype.int16, mstype.int32, mstype.int64, mstype.uint8, mstype.uint16, mstype.uint32,\n mstype.uint64, mstype.float32, mstype.float64]\n\n for i, _ in enumerate(np_types):\n type_tester_with_type_check_2c_schema(np_types[i], [de_types[i], de_types[i]])\n\n\ndef test_generator_dataset_size_0():\n \"\"\"\n Test GeneratorDataset get_dataset_size by iterator method.\n \"\"\"\n logger.info(\"Test 1D Generator : 0 - 63 get_dataset_size\")\n\n data1 = ds.GeneratorDataset(generator_1d, [\"data\"])\n data_size = data1.get_dataset_size()\n\n num_rows = 0\n for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True): # each data is a dictionary\n num_rows = num_rows + 1\n assert data_size == num_rows\n\n\ndef test_generator_dataset_size_1():\n \"\"\"\n Test GeneratorDataset get_dataset_size by __len__ method.\n \"\"\"\n logger.info(\"Test DatasetGenerator get_dataset_size\")\n\n dataset_generator = DatasetGenerator()\n data1 = ds.GeneratorDataset(dataset_generator, [\"data\"])\n\n data_size = data1.get_dataset_size()\n\n num_rows = 0\n for _ in data1.create_dict_iterator(num_epochs=1):\n num_rows = num_rows + 1\n assert data_size == num_rows\n\n\ndef test_generator_dataset_size_2():\n \"\"\"\n Test GeneratorDataset + repeat get_dataset_size\n \"\"\"\n logger.info(\"Test 1D Generator + repeat get_dataset_size\")\n\n data1 = ds.GeneratorDataset(generator_1d, [\"data\"])\n data1 = data1.repeat(2)\n\n data_size = data1.get_dataset_size()\n\n num_rows = 0\n for _ in data1.create_dict_iterator(num_epochs=1):\n num_rows = num_rows + 1\n assert data_size == num_rows\n\n\ndef test_generator_dataset_size_3():\n \"\"\"\n Test GeneratorDataset + batch get_dataset_size\n \"\"\"\n logger.info(\"Test 1D Generator + batch get_dataset_size\")\n\n data1 = ds.GeneratorDataset(generator_1d, [\"data\"])\n data1 = data1.batch(4)\n\n data_size = data1.get_dataset_size()\n\n num_rows = 0\n for _ in data1.create_dict_iterator(num_epochs=1):\n num_rows += 1\n assert data_size == num_rows\n\n\ndef test_generator_dataset_size_4():\n \"\"\"\n Test GeneratorDataset + num_shards\n \"\"\"\n logger.info(\"Test 1D Generator : 0 - 63 + num_shards get_dataset_size\")\n\n dataset_generator = DatasetGenerator()\n data1 = ds.GeneratorDataset(dataset_generator, [\"data\"], num_shards=3, shard_id=0)\n data_size = data1.get_dataset_size()\n\n num_rows = 0\n for _ in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary\n num_rows = num_rows + 1\n assert data_size == num_rows\n\n\ndef test_generator_dataset_size_5():\n \"\"\"\n Test get_dataset_size after create_dict_iterator\n \"\"\"\n logger.info(\"Test get_dataset_size after create_dict_iterator\")\n\n dataset_generator = DatasetGenerator()\n data1 = ds.GeneratorDataset(dataset_generator, [\"data\"], num_shards=3, shard_id=0)\n\n num_rows = 0\n for _ in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary\n num_rows = num_rows + 1\n data_size = data1.get_dataset_size()\n assert data_size == num_rows\n\n\ndef manual_test_generator_keyboard_interrupt():\n \"\"\"\n Test keyboard_interrupt\n \"\"\"\n logger.info(\"Test 1D Generator MP : 0 - 63\")\n\n class MyDS():\n def __getitem__(self, item):\n while True:\n pass\n\n def __len__(self):\n return 1024\n\n ds1 = ds.GeneratorDataset(MyDS(), [\"data\"], num_parallel_workers=4).repeat(2)\n for _ in ds1.create_dict_iterator(num_epochs=1): # each data is a dictionary\n pass\n\n\ndef test_explicit_deepcopy():\n \"\"\"\n Test explicit_deepcopy\n \"\"\"\n logger.info(\"Test explicit_deepcopy\")\n\n ds1 = ds.NumpySlicesDataset([1, 2], shuffle=False)\n ds2 = copy.deepcopy(ds1)\n for d1, d2 in zip(ds1, ds2):\n assert d1 == d2\n\ndef test_func_generator_dataset_005():\n \"\"\"\n generator: class __getitem__\n \"\"\"\n result = [np.random.randn(242, 242, 242), np.random.randn(42, 24, 442)]\n\n class MyData():\n def __init__(self, input_para):\n self.data = input_para\n\n def __getitem__(self, item):\n return (Tensor(self.data[0]), Tensor(self.data[1]))\n\n def __len__(self):\n return 2\n\n column_names = [\"col1\", \"col2\"]\n dataset = ds.GeneratorDataset(MyData(result), column_names)\n i = 0\n for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert \"col1\" in str(data.keys())\n assert (data[\"col1\"] == result[0]).all()\n assert (data[\"col2\"] == result[1]).all()\n i += 1\n assert i == 2\n\ndef test_func_generator_dataset_with_zip_source():\n \"\"\"\n Feature: verify the source is zip\n Description: the source input is zip\n Expectation: success\n \"\"\"\n def synthetic_data(w, b, num_examples):\n \"\"\"生成 y = Xw + b + 噪声。\"\"\"\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.matmul(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X.astype(np.float32), y.reshape((-1, 1)).astype(np.float32)\n\n true_w = np.array([2, -3.4])\n true_b = 4.2\n features, labels = synthetic_data(true_w, true_b, 10)\n\n def load_array(data_arrays, column_names, batch_size, is_train=True):\n \"\"\"构造一个MindSpore数据迭代器。\"\"\"\n dataset = ds.GeneratorDataset(data_arrays, column_names, shuffle=is_train)\n dataset = dataset.batch(batch_size)\n return dataset\n\n batch_size = 2\n dataset = load_array(zip(features, labels), ['features', 'labels'], batch_size)\n\n count = 0\n epochs = 10\n dataset_iter = dataset.create_dict_iterator(num_epochs=epochs, output_numpy=True)\n for _ in range(epochs):\n for _ in dataset_iter:\n count += 1\n assert count == 50\n\n\ndef test_generator_mixed_operator():\n \"\"\"\n Feature: Test adding computing operator into user defined dataset\n Description: will decrease num_parallel_worker into 1\n Expectation: success\n \"\"\"\n logger.info(\"Test adding computing operator into user defined dataset.\")\n\n # create dataset\n data1 = ds.GeneratorDataset(DatasetGeneratorMixed(), [\"col0\"], shuffle=False, python_multiprocessing=False)\n assert data1.num_parallel_workers == 1\n\n for _ in data1.create_tuple_iterator(num_epochs=1):\n pass\n\n\nif __name__ == \"__main__\":\n test_generator_0()\n test_generator_1()\n test_generator_2()\n test_generator_3()\n test_generator_4()\n test_generator_5()\n test_generator_6()\n test_generator_7()\n test_generator_8()\n test_generator_9()\n test_generator_10()\n test_generator_11()\n test_generator_12()\n test_generator_13()\n test_generator_14()\n test_generator_15()\n test_generator_16()\n test_generator_17()\n test_generator_18()\n test_generator_19()\n test_generator_error_1()\n test_generator_error_2()\n test_generator_error_3()\n test_generator_error_4()\n test_generator_sequential_sampler()\n test_generator_distributed_sampler()\n test_generator_random_sampler()\n test_generator_num_samples()\n test_generator_num_samples_underflow()\n test_generator_schema()\n test_generator_dataset_size_0()\n test_generator_dataset_size_1()\n test_generator_dataset_size_2()\n test_generator_dataset_size_3()\n test_generator_dataset_size_4()\n test_generator_dataset_size_5()\n test_explicit_deepcopy()\n test_func_generator_dataset_005()\n test_func_generator_dataset_with_zip_source()\n test_generator_mixed_operator()\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore import context\nfrom mindspore.common.api import _cell_graph_executor\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import operations as P\nfrom tests.ut.python.ops.test_math_ops import VirtualLoss\n\n\ngrad_all = C.GradOperation(get_all=True)\n\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network):\n super(NetWithLoss, self).__init__()\n self.loss = VirtualLoss()\n self.network = network\n\n def construct(self, x, y):\n predict = self.network(x, y)\n return self.loss(predict)\n\n\nclass GradWrap(nn.Cell):\n def __init__(self, network):\n super(GradWrap, self).__init__()\n self.network = network\n\n def construct(self, x, y):\n return grad_all(self.network)(x, y)\n\n # model_parallel test\n\n\ndef test_four_matmul_linear():\n class Net(nn.Cell):\n def __init__(self, strategy1):\n super().__init__()\n self.matmul1 = P.MatMul().shard(strategy1)\n self.weight = Parameter(Tensor(np.ones([512, 256]).astype(np.float32) * 0.01), \"w\", requires_grad=True)\n self.matmul2 = P.MatMul()\n\n def construct(self, x, y):\n out = self.matmul1(x, y)\n out = self.matmul2(out, self.weight)\n return out\n\n size = 8\n context.set_auto_parallel_context(device_num=size, global_rank=0)\n strategy1 = ((8, 1), (1, 1))\n x = Tensor(np.ones([8, 16]), dtype=ms.float32)\n y = Tensor(np.ones([16, 512]), dtype=ms.float32)\n\n net = GradWrap(NetWithLoss(Net(strategy1)))\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\")\n net.set_auto_parallel()\n net.set_train()\n _cell_graph_executor.compile(net, x, y)\n", "# coding: utf-8\n\n# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Operators for array.\"\"\"\nimport copy\nimport functools\nimport itertools\nimport numbers\nfrom collections import Counter\n\nimport numpy as np\n\nfrom mindspore import log as logger\nfrom mindspore.common.initializer import Zero\nfrom .. import signature as sig\nfrom .._utils import get_broadcast_shape, is_shape_unknown\nfrom .._utils import get_concat_offset\nfrom ..operations.math_ops import _infer_shape_reduce\nfrom ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op\nfrom ..._checkparam import Rel\nfrom ..._checkparam import Validator as validator\nfrom ..._checkparam import _check_3d_int_or_tuple\nfrom ...common import dtype as mstype\nfrom ...common._decorator import deprecated\nfrom ...common.parameter import Parameter\nfrom ...common.tensor import Tensor\nfrom ..._c_expression import Tensor as Tensor_\n\n\nclass _ScatterOp(PrimitiveWithInfer):\n \"\"\"\n Defines Scatter operators\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n sig.make_sig('updates', dtype=sig.sig_dtype.T)\n )\n\n def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):\n if indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]:\n raise ValueError(f\"For '{prim_name}', \"\n f\"updates_shape = indices_shape + x_shape[1:], but got x_shape: {x_shape}, \"\n f\"indices_shape: {indices_shape}, updates_shape: {updates_shape}.\")\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n \"\"\"Initialize _ScatterOp\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n def infer_shape(self, x_shape, indices_shape, updates_shape):\n self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name)\n return x_shape\n\n def infer_dtype(self, x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)\n args = {\"x\": x_dtype, \"updates\": updates_dtype}\n validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)\n return x_dtype\n\n\nclass _ScatterOpDynamic(PrimitiveWithCheck):\n \"\"\"\n Defines Scatter operators with dynamic shape\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n sig.make_sig('updates', dtype=sig.sig_dtype.T)\n )\n\n def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):\n # x_shape cannot be dynamic\n if np.any(np.array(x_shape) == -1):\n raise ValueError(f\"For '{prim_name}', the 'input_x' does not support dynamic shape, \"\n f\"but got the shape of 'input_x' is {x_shape}.\")\n # support indices and updates dynamic\n if np.any(np.array(indices_shape) == -1) or np.any(np.array(updates_shape) == -1):\n pass\n elif indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]:\n raise ValueError(f\"For '{prim_name}', \"\n f\"updates_shape = indices_shape + x_shape[1:], but got x_shape: {x_shape}, \"\n f\"indices_shape: {indices_shape}, updates_shape: {updates_shape}.\")\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n \"\"\"Initialize _ScatterOpDynamic\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n def check_shape(self, x_shape, indices_shape, updates_shape):\n self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name)\n\n def check_dtype(self, x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32], self.name)\n args = {\"x\": x_dtype, \"updates\": updates_dtype}\n validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)\n\n\nclass _ScatterNdOp(_ScatterOp):\n \"\"\"\n Defines _ScatterNd operators\n \"\"\"\n\n def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name):\n validator.check('the dimension of x', len(x_shape),\n 'the dimension of indices', indices_shape[-1], Rel.GE)\n if indices_shape[:-1] + x_shape[indices_shape[-1]:] != updates_shape:\n raise ValueError(f\"For '{prim_name}', updates_shape = \"\n f\"indices_shape[:-1] + x_shape[indices_shape[-1]:], but got x_shape: {x_shape}, \"\n f\"indices_shape: {indices_shape}, updates_shape: {updates_shape}.\")\n\n\ndef _check_infer_attr_reduce(axis, keep_dims, prim_name):\n validator.check_value_type('keep_dims', keep_dims, [bool], prim_name)\n validator.check_value_type('axis', axis, [int, tuple], prim_name)\n if isinstance(axis, tuple):\n for index, value in enumerate(axis):\n validator.check_value_type('axis[%d]' % index, value, [int], prim_name)\n\n\nclass ExpandDims(PrimitiveWithInfer):\n \"\"\"\n Adds an additional dimension to `input_x` at the given axis.\n\n Note:\n If the specified axis is a negative number, the index is counted\n backward from the end and starts at 1.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n - **axis** (int) - Specifies the dimension index at which to expand\n the shape of `input_x`. The value of axis must be in the range\n `[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the\n value of `axis` is 0. It has the same data type as `input_x`.\n\n Raises:\n ValueError: If `axis` is not an int or not in the valid range.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)\n >>> expand_dims = ops.ExpandDims()\n >>> output = expand_dims(input_tensor, 0)\n >>> print(output)\n [[[2. 2.]\n [2. 2.]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize ExpandDims\"\"\"\n self.init_prim_io_names(inputs=['x', 'axis'], outputs=['output'])\n\n def __infer__(self, x, axis):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n x_shape = list(x['shape'])\n axis_v = axis['value']\n rank = len(x_shape)\n validator.check_int_range(axis_v, -rank - 1, rank, Rel.INC_BOTH, 'axis', self.name)\n value = None\n if x['value'] is not None:\n value = x['value'].asnumpy()\n value = np.expand_dims(value, axis_v)\n value = Tensor(value)\n if axis_v < 0:\n axis_v = rank + 1 + axis_v\n x_shape.insert(axis_v, 1)\n out = {'shape': x_shape,\n 'dtype': x['dtype'],\n 'value': value}\n if 'min_shape' in x and 'max_shape' in x:\n out['min_shape'] = x['min_shape']\n out['min_shape'].insert(axis_v, 1)\n out['max_shape'] = x['max_shape']\n out['max_shape'].insert(axis_v, 1)\n return out\n\n\nclass DType(Primitive):\n \"\"\"\n Returns the data type of the input tensor as mindspore.dtype.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n\n Outputs:\n mindspore.dtype, the data type of a tensor.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)\n >>> output = ops.DType()(input_tensor)\n >>> print(output)\n Float32\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize DType\"\"\"\n\n\nclass SameTypeShape(PrimitiveWithInfer):\n \"\"\"\n Checks whether the data type and shape of two tensors are the same.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n - **input_y** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_S)`.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`,\n if data type and shape of `input_x` and `input_y` are the same.\n\n Raises:\n TypeError: If the data types of `input_x` and `input_y` are not the same.\n ValueError: If the shapes of `input_x` and `input_y` are not the same.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)\n >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)\n >>> output = ops.SameTypeShape()(input_x, input_y)\n >>> print(output)\n [[2. 2.]\n [2. 2.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Same\"\"\"\n\n def __call__(self, x, y):\n \"\"\"run in PyNative mode\"\"\"\n validator.check_value_type('x', x, Tensor, self.name)\n validator.check_value_type('y', y, Tensor, self.name)\n validator.check('x dtype', x.dtype, 'y dtype', y.dtype, Rel.EQ, self.name, TypeError)\n validator.check('x shape', x.shape, 'y shape', y.shape, Rel.EQ, self.name)\n return x\n\n def __infer__(self, x, y):\n validator.check_subclass('x', x['dtype'], mstype.tensor, self.name)\n validator.check_subclass('y', y['dtype'], mstype.tensor, self.name)\n validator.check('x dtype', x['dtype'], 'y dtype', y['dtype'], Rel.EQ, self.name, TypeError)\n validator.check('x shape', x['shape'], 'y shape', y['shape'], Rel.EQ, self.name)\n return x\n\n\nclass Cast(PrimitiveWithInfer):\n \"\"\"\n Returns a tensor with the new specified data type.\n\n Inputs:\n - **input_x** (Union[Tensor, Number]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n The tensor to be cast.\n - **type** (dtype.Number) - The valid data type of the output tensor. Only constant value is allowed.\n\n Outputs:\n Tensor, the shape of tensor is the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.\n\n Raises:\n TypeError: If `input_x` is neither Tensor nor Number.\n TypeError: If `type` is not a Number.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)\n >>> input_x = Tensor(input_np)\n >>> type_dst = mindspore.int32\n >>> cast = ops.Cast()\n >>> output = cast(input_x, type_dst)\n >>> print(output.dtype)\n Int32\n >>> print(output.shape)\n (2, 3, 4, 5)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n # if primitive need setattr in __infer__ need add this flag\n \"\"\"Initialize Cast\"\"\"\n self.init_prim_io_names(inputs=['x', 'dst_type'], outputs=['output'])\n\n def check_elim(self, x, dtype):\n if isinstance(x, (Tensor, numbers.Number, Parameter)):\n if isinstance(x, Parameter):\n data = x.data\n if data.dtype == dtype:\n return (True, x)\n if isinstance(x, Tensor) and x.dtype == dtype:\n x = Tensor(x)\n x.set_cast_dtype()\n return (True, x)\n if isinstance(x, numbers.Number):\n return (True, Tensor(x, dtype=dtype))\n return (False, None)\n\n def __infer__(self, x, t):\n src_type = x['dtype']\n dst_type = t['value']\n\n validator.check_subclass(\"input_x\", src_type, [mstype.tensor, mstype.number], self.name)\n validator.check_subclass(\"type\", dst_type, mstype.number, self.name)\n\n if isinstance(src_type, type(mstype.tensor)):\n src_type = x['dtype'].element_type()\n if isinstance(dst_type, type(mstype.tensor)):\n dst_type = dst_type.element_type()\n self.add_prim_attr('DstT', dst_type)\n self.add_prim_attr('SrcT', src_type)\n self.add_prim_attr('dst_type', dst_type)\n\n value = None\n if x['value'] is not None:\n np_dst_type = mstype.dtype_to_nptype(dst_type)\n if isinstance(x['value'], (int, float)):\n value = Tensor(np.array(x['value']).astype(np_dst_type))\n else:\n value = Tensor(x['value'].asnumpy().astype(np_dst_type))\n\n out = {'shape': x['shape'],\n 'dtype': mstype.tensor_type(t['value']),\n 'value': value}\n if 'min_shape' in x and 'max_shape' in x:\n out['min_shape'] = x['min_shape']\n out['max_shape'] = x['max_shape']\n return out\n\n\nclass IsSubClass(PrimitiveWithInfer):\n \"\"\"\n Checks whether this type is a sub-class of another type.\n\n Inputs:\n - **sub_type** (mindspore.dtype) - The type to be checked. Only constant value is allowed.\n - **type_** (mindspore.dtype) - The target type. Only constant value is allowed.\n\n Outputs:\n bool, the check result.\n\n Raises:\n TypeError: If `sub_type` or `type_` is not a Type.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> output = ops.IsSubClass()(mindspore.int32, mindspore.intc)\n >>> print(output)\n True\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def __infer__(self, sub_type, type_):\n sub_type_t = sub_type['value']\n type_v = type_['value']\n\n validator.check_value_type(\"sub_type\", sub_type_t, [mstype.Type], self.name)\n validator.check_value_type(\"type_\", type_v, [mstype.Type], self.name)\n\n value = mstype.issubclass_(sub_type_t, type_v)\n\n out = {'shape': (),\n 'dtype': mstype.type_type,\n 'value': value}\n return out\n\n\nclass IsInstance(PrimitiveWithInfer):\n \"\"\"\n Checks whether an object is an instance of a target type.\n\n Inputs:\n - **inst** (Any Object) - The instance to be checked. Only constant value is allowed.\n - **type_** (mindspore.dtype) - The target type. Only constant value is allowed.\n\n Outputs:\n bool, the check result.\n\n Raises:\n TypeError: If `type_` is not a Type.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> inst = 1\n >>> output = ops.IsInstance()(inst, mindspore.int32)\n >>> print(output)\n False\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def __infer__(self, inst, type_):\n sub_type_t = inst['dtype']\n type_v = type_['value']\n\n validator.check_value_type(\"type_\", type_v, [mstype.Type], self.name)\n\n if type_v == mstype.list_:\n value = isinstance(sub_type_t, list)\n elif type_v == mstype.tuple_:\n value = isinstance(sub_type_t, tuple)\n else:\n value = mstype.issubclass_(sub_type_t, type_v)\n\n out = {'shape': (),\n 'dtype': mstype.type_type,\n 'value': value}\n return out\n\n\nclass Reshape(PrimitiveWithInfer):\n \"\"\"\n Reshapes the input tensor with the same values based on a given shape tuple.\n\n The 'input_shape' can only have one -1 at most, in which case it’s inferred from the remaining dimensions and\n the number of elements in the input.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n - **input_shape** (tuple[int]) - The input tuple is constructed by multiple\n integers, i.e., :math:`(y_1, y_2, ..., y_S)`. Only constant value is allowed.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.\n\n Raises:\n ValueError: Given a shape tuple, if it has several -1; or if the product\n of its elements is less than or equal to 0 or cannot be divided by the product\n of the input tensor shape; or if it does not match the input's array size.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> reshape = ops.Reshape()\n >>> output = reshape(input_x, (3, 2))\n >>> print(output)\n [[-0.1 0.3]\n [ 3.6 0.4]\n [ 0.5 -3.2]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Reshape\"\"\"\n self.init_prim_io_names(inputs=['tensor', 'shape'], outputs=['output'])\n\n def _get_shape_and_range(self, x, shape):\n \"\"\" get min and max shape when output shape is dynamic\"\"\"\n min_shape = None\n max_shape = None\n x_shp = x['shape']\n if is_shape_unknown(shape['shape']):\n out_shape = [-2]\n return out_shape, min_shape, max_shape\n\n shape_rank = shape['shape'][0]\n if not x_shp:\n # x is a scalar, output shape fixed\n out_shape = [1] * shape_rank\n return out_shape, min_shape, max_shape\n\n out_shape = [-1] * shape_rank\n if \"max_value\" in shape and \"min_value\" in shape:\n min_shape = shape[\"min_value\"]\n max_shape = shape[\"max_value\"]\n if len(min_shape) != shape_rank or len(max_shape) != shape_rank:\n raise RuntimeError(\"The primitive[Reshape]'s input[shape] min or max value not math the shape rank.\")\n for i in range(shape_rank):\n if min_shape[i] == max_shape[i]:\n out_shape[i] = min_shape[i]\n elif is_shape_unknown(x_shp) and \"max_shape\" in x:\n # when dynamic memory allocation is supported, max_shape can be left out\n min_shape = [1] * shape_rank\n max_shape = [int(np.prod(x[\"max_shape\"]))] * shape_rank\n return out_shape, min_shape, max_shape\n\n def __infer__(self, x, shape):\n shape_v = shape['value']\n x_shp = x['shape']\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n # for shape is not constant\n if shape_v is None:\n out_shape, min_shape, max_shape = self._get_shape_and_range(x, shape)\n if is_shape_unknown(out_shape):\n # `min_shape` and `max_shape` can't be None before dynamic memory allocation is supported\n shape_shp = shape['shape']\n shape_rank = 1 if is_shape_unknown(shape_shp) else shape_shp[0]\n min_shape = [1] * shape_rank if min_shape is None else min_shape\n max_shape = [1] * shape_rank if max_shape is None else max_shape\n return {\n 'shape': out_shape,\n 'dtype': x['dtype'],\n 'value': None,\n 'max_shape': max_shape,\n 'min_shape': min_shape\n }\n\n if isinstance(shape_v, Tensor_):\n validator.check_tensor_dtype_valid(\"shape\", shape['dtype'], [mstype.int64], self.name)\n shape_v = shape_v.asnumpy().tolist()\n else:\n validator.check_value_type(\"shape\", shape_v, [tuple], self.name)\n shape_v = list(shape_v)\n\n neg_index = -1\n dim_prod = 1\n for i, shp_i in enumerate(shape_v):\n validator.check_value_type(\"shape[%d]\" % i, shp_i, [int], self.name)\n if shp_i == -1:\n if neg_index != -1:\n raise ValueError(f\"For '{self.name}', there can be at most one '-1' in 'input_shape', \"\n f\"but got {shape_v}.\")\n neg_index = i\n else:\n dim_prod *= shp_i\n\n if is_shape_unknown(x_shp):\n if 'max_shape' in x:\n x_max_shape = x['max_shape']\n else:\n x_max_shape = x['shape']\n if 'min_shape' in x:\n x_min_shape = x['min_shape']\n else:\n x_min_shape = x['shape']\n max_arr_prod = np.prod(x_max_shape)\n min_arr_prod = np.prod(x_min_shape)\n max_shape = list(shape_v)\n min_shape = list(shape_v)\n if neg_index != -1:\n max_shape[neg_index] = int(max_arr_prod / dim_prod)\n min_shape[neg_index] = int(min_arr_prod / dim_prod)\n\n out = {'shape': shape_v,\n 'dtype': x['dtype'],\n 'value': None,\n 'max_shape': tuple(max_shape),\n 'min_shape': tuple(min_shape)}\n else:\n arr_prod = np.prod(x_shp)\n if dim_prod <= 0:\n raise ValueError(f\"For '{self.name}', the shape of 'input_x' is {x_shp}, \"\n f\"the value of 'input_shape' is {shape_v}. \"\n f\"The product of 'input_shape' should > 0, but got {dim_prod}.\")\n if neg_index != -1:\n shape_v[neg_index] = int(arr_prod / dim_prod)\n dim_prod *= shape_v[neg_index]\n if dim_prod != arr_prod:\n raise ValueError(f\"For '{self.name}', the shape of 'input_x' is {x_shp}, \"\n f\"the value of 'input_shape' value is {shape_v}. \"\n f\"The product of the shape of 'input_x' should be equal to product of 'input_shape', \"\n f\"but product of the shape of 'input_x' is {arr_prod}, \"\n f\"product of 'input_shape' is {dim_prod}.\")\n value = None\n if x['value'] is not None:\n value = Tensor(x['value'].asnumpy().reshape(shape_v))\n\n out = {'shape': tuple(shape_v),\n 'dtype': x['dtype'],\n 'value': value}\n return out\n\n\nclass Shape(Primitive):\n \"\"\"\n Returns the shape of the input tensor. And it used to be static shape.\n\n static shape: A shape that can be obtained without running the graph. It is an inherent property of tensor and\n may be unknown. The static shape information can be completed by artificial setting.\n No matter what the input of the graph is, the static shape is not affected.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n\n Outputs:\n tuple[int], the output tuple is constructed by multiple integers,\n :math:`(x_1, x_2, ..., x_R)`.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)\n >>> shape = ops.Shape()\n >>> output = shape(input_x)\n >>> print(output)\n (3, 2, 1)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Shape\"\"\"\n\n\nclass DynamicShape(Primitive):\n \"\"\"\n Returns the shape of the input tensor. And it used to be dynamic shape.\n\n Note:\n Dynamic shape: After the graph is running, as the tensor flows in the graph, the specific shape of the tensor\n on each node on the graph can be inferred according to the structure of the graph.\n This shape is called a dynamic shape. As the input shape of the graph is different,\n the dynamic shape of the tensor in the graph will change.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n\n Outputs:\n Tensor[int], 1-dim Tensor of type int32\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)\n >>> shape = ops.DynamicShape()\n >>> output = shape(input_x)\n >>> print(output)\n [3 2 1]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init Shape\"\"\"\n self.init_prim_io_names(inputs=['tensor'], outputs=['output'])\n self.add_prim_attr('is_dynamic_shape', True)\n\n\nclass Squeeze(PrimitiveWithInfer):\n \"\"\"\n Returns a tensor with the same data type but dimensions of 1 are removed based on `axis`.\n\n If `axis` is specified, it will remove the dimensions of size 1 in the given `axis`.\n It `axis` is None, it will remove all the dimensions of size 1.\n For example, if input is of shape: (A×1×B×C×1×D), then the out tensor will be of shape: (A×B×C×D);\n When dim is given, a squeeze operation is done only in the given dimension.\n If input is of shape: (A×1×B), squeeze(input, 0) leaves the tensor unchanged,\n but squeeze(input, 1) will squeeze the tensor to the shape (A×B).\n\n Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,\n and there is no Tensor data copy process.\n\n Note:\n The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.\n\n Args:\n axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove\n all the dimensions that are equal to 1. If specified, it must be int32 or int64.\n Default: (), an empty tuple.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.\n\n Raises:\n TypeError: If `axis` is neither an int nor tuple.\n TypeError: If `axis` is a tuple whose elements are not all int.\n ValueError: If the corresponding dimension of the specified axis isn't equal to 1.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)\n >>> squeeze = ops.Squeeze(2)\n >>> output = squeeze(input_x)\n >>> print(output)\n [[1. 1.]\n [1. 1.]\n [1. 1.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=()):\n \"\"\"Initialize Squeeze\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n validator.check_value_type('axis', axis, [int, tuple], self.name)\n if isinstance(axis, tuple):\n for idx, item in enumerate(axis):\n validator.check_value_type(\"axis[%d]\" % idx, item, [int], self.name)\n else:\n self.axis = (axis,)\n self.add_prim_attr(\"axis\", (axis,))\n\n\nclass Transpose(Primitive):\n \"\"\"\n Permutes the dimensions of the input tensor according to input permutation.\n\n For a 1-D array this has no effect, as a transposed vector is simply the same vector.\n To convert a 1-D array into a 2D column vecto please refer the class: mindspore.ops.ExpandDims.\n For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,\n their order indicates how the axes are permuted (see Examples).\n If axes are not provided and a.shape = (i[0], i[1], ... i[n-2], i[n-1]),\n then a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0]).\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n - **input_perm** (tuple[int]) - The permutation to be converted. The elements in `input_perm` are composed of\n the indexes of each dimension of `input_x`. The length of `input_perm` and the shape of `input_x` must be\n the same. Only constant value is allowed. Must be in the range [0, rank(input_x)).\n\n Outputs:\n Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the\n shape of `input_x` and the value of `input_perm`.\n\n Raises:\n TypeError: If `input_perm` is not a tuple.\n ValueError: If length of shape of `input_x` is not equal to length of shape of `input_perm`.\n ValueError: If the same element exists in `input_perm`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)\n >>> input_perm = (0, 2, 1)\n >>> transpose = ops.Transpose()\n >>> output = transpose(input_x, input_perm)\n >>> print(output)\n [[[ 1. 4.]\n [ 2. 5.]\n [ 3. 6.]]\n [[ 7. 10.]\n [ 8. 11.]\n [ 9. 12.]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Transpose\"\"\"\n self.init_prim_io_names(inputs=['x', 'perm'], outputs=['output'])\n\n\nclass Unique(Primitive):\n \"\"\"\n Returns the unique elements of input tensor and also return a tensor containing the index of each value of input\n tensor corresponding to the output unique tensor.\n\n The output contains Tensor `y` and Tensor `idx`, the format is probably similar to (`y`, `idx`).\n The shape of Tensor `y` and Tensor `idx` is different in most cases, because Tensor `y` will be deduplicated,\n and the shape of Tensor `idx` is consistent with the input.\n\n To get the same shape between `idx` and `y`, please ref to 'UniqueWithPad' operator.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor.\n The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n\n Outputs:\n Tuple, containing Tensor objects (`y`, `idx`), `y` is a tensor with the\n same type as `input_x`, and contains the unique elements in `x`, sorted in\n ascending order. `idx` is a tensor containing indices of elements in\n the input corresponding to the output tensor.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)\n >>> output = ops.Unique()(input_x)\n >>> print(output)\n (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))\n >>> y = output[0]\n >>> print(y)\n [1 2 5]\n >>> idx = output[1]\n >>> print(idx)\n [0 1 2 1]\n >>> # As can be seen from the above, y and idx shape\n >>> # note that for GPU, this operator must be wrapped inside a model, and executed in graph mode.\n >>> class UniqueNet(nn.Cell):\n ... def __init__(self):\n ... super(UniqueNet, self).__init__()\n ... self.unique_op = ops.Unique()\n ...\n ... def construct(self, x):\n ... output, indices = self.unique_op(x)\n ... return output, indices\n ...\n >>> input_x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)\n >>> net = UniqueNet()\n >>> output = net(input_x)\n >>> print(output)\n (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n\nclass Gather(Primitive):\n r\"\"\"\n Returns a slice of the input tensor based on the specified indices and axis.\n\n Slices the input tensor base on the indices at specified axis. See the following example for more clear.\n\n Inputs:\n - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n The original Tensor.\n - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.\n Specifies the indices of elements of the original Tensor. Must be in the range\n `[0, input_param.shape[axis])` which are only validated on CPU. The data type can be int32 or int64.\n - **axis** (int) - Specifies the dimension index to gather indices.\n\n Outputs:\n Tensor, the shape of tensor is\n :math:`input\\_params.shape[:axis] + input\\_indices.shape + input\\_params.shape[axis + 1:]`.\n\n Raises:\n TypeError: If `axis` is not an int.\n TypeError: If `input_indices` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)\n >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)\n >>> axis = 1\n >>> output = ops.Gather()(input_params, input_indices, axis)\n >>> print(output)\n [[ 2. 7.]\n [ 4. 54.]\n [ 2. 55.]]\n >>> axis = 0\n >>> output = ops.Gather()(input_params, input_indices, axis)\n >>> print(output)\n [[3. 4. 54. 22.]\n [2. 2. 55. 3.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Gather\"\"\"\n self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])\n\n\nclass GatherV2(PrimitiveWithCheck):\n \"\"\"\n Same as operator Gather. GatherV2 will be deprecated in the future.\n Please use Gather instead.\n \"\"\"\n\n @deprecated(\"1.1\", \"Gather\", True)\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize GatherV2\"\"\"\n self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])\n\n def __check__(self, params, indices, axis):\n validator.check_subclass(\"params\", params['dtype'], mstype.tensor, self.name)\n validator.check_tensor_dtype_valid(\"indices\", indices['dtype'], mstype.int_type, self.name)\n validator.check_subclass(\"axis\", axis['dtype'], [mstype.number], self.name)\n axis_v = axis['value']\n validator.check_value_type('axis', axis_v, [int], self.name)\n rank = len(params['shape'])\n validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, \"axis\", self.name)\n\n\nclass SparseGatherV2(PrimitiveWithCheck):\n \"\"\"\n Returns a slice of input tensor based on the specified indices and axis.\n\n Inputs:\n - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.\n Specifies the indices of elements of the original Tensor, must be in the range\n `[0, input_param.shape[axis])`.\n - **axis** (int) - Specifies the dimension index to gather indices.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.\n\n Raises:\n TypeError: If `axis` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)\n >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)\n >>> axis = 1\n >>> out = ops.SparseGatherV2()(input_params, input_indices, axis)\n >>> print(out)\n [[2. 7.]\n [4. 54.]\n [2. 55.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize SparseGatherV2\"\"\"\n self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])\n\n def __check__(self, params, indices, axis):\n validator.check_subclass(\"params\", params['dtype'], mstype.tensor, self.name)\n validator.check_tensor_dtype_valid(\"indices\", indices['dtype'], mstype.int_type, self.name)\n validator.check_subclass(\"axis\", axis['dtype'], [mstype.number], self.name)\n axis_v = axis['value']\n validator.check_value_type('axis', axis_v, [int], self.name)\n rank = len(params['shape'])\n validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, \"axis\", self.name)\n\n\nclass Padding(PrimitiveWithInfer):\n \"\"\"\n Extends the last dimension of the input tensor from 1 to pad_dim_size, by filling with 0.\n\n Args:\n pad_dim_size (int): The value of the last dimension of `x` to be extended, which must be positive. Default: 8.\n\n Inputs:\n - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of `x` must be at least 2.\n The last dimension of `x` must be 1. The data type is Number.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`.\n\n Raises:\n TypeError: If `pad_dim_size` is not an int.\n ValueError: If `pad_dim_size` is less than 1.\n ValueError: If last dim of `x` is not equal 1.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> x = Tensor(np.array([[8], [10]]), mindspore.float32)\n >>> pad_dim_size = 4\n >>> output = ops.Padding(pad_dim_size)(x)\n >>> print(output)\n [[ 8. 0. 0. 0.]\n [10. 0. 0. 0.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, pad_dim_size=8):\n \"\"\"Initialize padding\"\"\"\n validator.check_value_type(\"pad_dim_size\", pad_dim_size, [int], self.name)\n validator.check_positive_int(pad_dim_size, \"pad_dim_size\", self.name)\n self.pad_dim_size = pad_dim_size\n\n def __infer__(self, x):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n x_shape = list(x['shape'])\n validator.check_int(len(x_shape), 1, Rel.GT, \"rank of x\", self.name)\n validator.check_int(x_shape[-1], 1, Rel.EQ, \"last dim of x\", self.name)\n out_shape = x_shape\n out_shape[-1] = self.pad_dim_size\n out = {'shape': out_shape,\n 'dtype': x['dtype'],\n 'value': None}\n return out\n\n\nclass UniqueWithPad(PrimitiveWithInfer):\n \"\"\"\n Returns unique elements and relative indexes in 1-D tensor, filled with padding num.\n\n The basic function is the same as the Unique operator, but the UniqueWithPad operator adds a Pad function.\n The returned tuple(`y`, `idx`) after the input Tensor `x` is processed by the unique operator,\n in which the shapes of `y` and `idx` are mostly not equal. Therefore, in order to solve the above situation,\n the UniqueWithPad operator will fill the `y` Tensor with the `pad_num` specified by the user\n to make it have the same shape as the Tensor `idx`.\n\n Inputs:\n - **x** (Tensor) - The tensor need to be unique. Must be 1-D vector with types: int32, int64.\n - **pad_num** (int) - Pad num. The data type is an int.\n\n Outputs:\n tuple(Tensor), tuple of 2 tensors, `y` and `idx`.\n - y (Tensor) - The unique elements filled with pad_num, the shape and data type same as `x`.\n - idx (Tensor) - The index of each value of `x` in the unique output `y`, the shape and data type same as `x`.\n\n Raises:\n TypeError: If dtype of `x` is neither int32 nor int64.\n ValueError: If length of shape of `x` is not equal to 1.\n\n Supported Platforms:\n ``Ascend`` ``CPU``\n\n Examples:\n >>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32)\n >>> pad_num = 8\n >>> output = ops.UniqueWithPad()(x, pad_num)\n >>> print(output)\n (Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]),\n Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init UniqueWithPad\"\"\"\n\n def __infer__(self, x, pad_num):\n validator.check_tensor_dtype_valid(\"x\", x['dtype'], [mstype.int32, mstype.int64], self.name)\n validator.check_subclass(\"pad_num\", pad_num['dtype'], [mstype.int32, mstype.int64], self.name)\n x_shape = list(x['shape'])\n validator.check(\"rank of x\", len(x_shape), \"expected\", 1, Rel.EQ, self.name)\n out_shape = x_shape\n out = {'shape': (out_shape, out_shape),\n 'dtype': (x['dtype'], x['dtype']),\n 'value': None}\n return out\n\n\nclass Split(PrimitiveWithCheck):\n \"\"\"\n Splits the input tensor into output_num of tensors along the given axis and output numbers.\n\n The `input_x` tensor will be split into equally sized sub-tensors.\n This requires that `input_x.shape(axis)` is divisible by `output_num`.\n\n Args:\n axis (int): Index of the split position. Default: 0.\n output_num (int): The number of output tensors. Must be positive int. Default: 1.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n\n Outputs:\n tuple[Tensor], the shape of each output tensor is the same, which is\n :math:`(y_1, y_2, ..., y_S)`. And the data type is the same with `input_x`.\n\n Raises:\n TypeError: If `axis` or `output_num` is not an int.\n ValueError: If `axis` is out of the range [-len(`input_x.shape`), len(`input_x.shape`)),\n or if the `output_num` is less than or equal to 0.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> split = ops.Split(1, 2)\n >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)\n >>> print(x)\n [[1 1 1 1]\n [2 2 2 2]]\n >>> output = split(x)\n >>> print(output)\n (Tensor(shape=[2, 2], dtype=Int32, value=\n [[1, 1],\n [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=\n [[1, 1],\n [2, 2]]))\n >>> split = ops.Split(1, 4)\n >>> output = split(x)\n >>> print(output)\n (Tensor(shape=[2, 1], dtype=Int32, value=\n [[1],\n [2]]), Tensor(shape=[2, 1], dtype=Int32, value=\n [[1],\n [2]]), Tensor(shape=[2, 1], dtype=Int32, value=\n [[1],\n [2]]), Tensor(shape=[2, 1], dtype=Int32, value=\n [[1],\n [2]]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0, output_num=1):\n \"\"\"Initialize Split\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n validator.check_value_type(\"output_num\", output_num, [int], self.name)\n validator.check_positive_int(output_num, \"output_num\", self.name)\n self.axis = axis\n self.output_num = output_num\n\n def __check__(self, x):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n x_shape = list(x['shape'])\n dim = len(x_shape)\n validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)\n if -1 not in x_shape:\n # only validate when shape fully known\n output_valid_check = x_shape[self.axis] % self.output_num\n if output_valid_check != 0:\n raise ValueError(f\"For '{self.name}', the specified axis of 'input_x' should be divided exactly by \"\n f\"'output_num', but got the shape of 'input_x' in 'axis' {self.axis} is \"\n f\"{x_shape[self.axis]}, 'output_num': {self.output_num}.\")\n size_splits = [x_shape[self.axis] // self.output_num] * self.output_num\n self.add_prim_attr('size_splits', size_splits)\n\n\nclass Rank(PrimitiveWithInfer):\n \"\"\"\n Returns the rank of a tensor.\n\n Returns a 0-D int32 Tensor representing the rank of input; the rank of a tensor\n is the number of indices required to uniquely select each element of the tensor.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.\n\n Outputs:\n Tensor. 0-D int32 Tensor representing the rank of input, i.e., :math:`R`. The data type is an int.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)\n >>> rank = ops.Rank()\n >>> output = rank(input_tensor)\n >>> print(output)\n 2\n >>> print(type(output))\n <class 'int'>\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Rank\"\"\"\n\n def __infer__(self, x):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n out = {'shape': None,\n 'dtype': None,\n 'value': len(x['shape'])}\n return out\n\n\nclass TruncatedNormal(PrimitiveWithInfer):\n \"\"\"\n Returns a tensor of the specified shape filled with truncated normal values.\n\n The generated values follow a normal distribution.\n\n Args:\n seed (int): A integer number used to create random seed. Default: 0.\n dtype (:class:`mindspore.dtype`): Data type. Default: mindspore.float32.\n\n Inputs:\n - **shape** (tuple[int]) - The shape of the output tensor, is a tuple of positive integer.\n\n Outputs:\n Tensor, the data type of output tensor is the same as attribute `dtype`.\n\n Examples:\n >>> shape = (1, 2, 3)\n >>> truncated_normal = ops.TruncatedNormal()\n >>> output = truncated_normal(shape)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, seed=0, dtype=mstype.float32):\n \"\"\"Initialize TruncatedNormal\"\"\"\n validator.check_value_type('seed', seed, [int], self.name)\n validator.check_types_same_and_valid({'dtype': dtype}, mstype.number_type, self.name)\n\n def __infer__(self, shape):\n shape_value = shape['value']\n validator.check_value_type(\"shape\", shape_value, [tuple], self.name)\n for i, value in enumerate(shape_value):\n validator.check_positive_int(value, f'{i}th value of shape', self.name)\n out = {'shape': shape_value,\n 'dtype': mstype.tensor_type(self.dtype),\n 'value': None}\n return out\n\n\nclass Size(PrimitiveWithInfer):\n r\"\"\"\n Returns the size of a Tensor.\n\n Returns an int scalar representing the elements' size of input, the total number of elements in the tensor.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.\n\n Outputs:\n int. A scalar representing the elements' size of `input_x`, tensor is the number of elements\n in a tensor, :math:`size=x_1*x_2*...x_R`. The data type is an int.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)\n >>> size = ops.Size()\n >>> output = size(input_x)\n >>> print(output)\n 4\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Size\"\"\"\n\n def __infer__(self, x):\n size = 1\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n shp = x['shape']\n if not shp:\n size = 0\n else:\n size = functools.reduce(lambda x, y: x * y, x['shape'])\n out = {'shape': None,\n 'dtype': mstype.int64,\n 'value': size}\n return out\n\n\nclass Fill(PrimitiveWithInfer):\n \"\"\"\n Creates a tensor filled with a scalar value.\n\n Creates a tensor with shape described by the first argument and fills it with values in the second argument.\n\n Inputs:\n - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.\n - **shape** (tuple) - The specified shape of output tensor. Only constant value is allowed.\n - **value** (scalar) - Value to fill the returned tensor. Only constant value is allowed.\n\n Outputs:\n Tensor, has the same type and shape as input value.\n\n Raises:\n TypeError: If `shape` is not a tuple.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> fill = ops.Fill()\n >>> output = fill(mindspore.float32, (2, 2), 1)\n >>> print(output)\n [[1. 1.]\n [1. 1.]]\n >>> output = fill(mindspore.float32, (3, 3), 0)\n >>> print(output)\n [[0. 0. 0.]\n [0. 0. 0.]\n [0. 0. 0.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Fill\"\"\"\n\n def __infer__(self, dtype, dims, x):\n validator.check_value_type(\"shape\", dims['value'], [tuple], self.name)\n validator.check_value_type(\"value\", x['value'], [numbers.Number, bool], self.name)\n for i, item in enumerate(dims['value']):\n validator.check_positive_int(item, f'dims[{i}]', self.name)\n valid_dtypes = [mstype.bool_, mstype.int8, mstype.int16, mstype.int32, mstype.int64,\n mstype.uint8, mstype.uint16, mstype.uint32, mstype.uint64,\n mstype.float16, mstype.float32, mstype.float64, mstype.complex64,\n mstype.complex128]\n validator.check_types_same_and_valid({\"value\": dtype['value']}, valid_dtypes, self.name)\n x_nptype = mstype.dtype_to_nptype(dtype['value'])\n ret = np.full(dims['value'], x['value'], x_nptype)\n out = {\n 'value': Tensor(ret),\n 'shape': dims['value'],\n 'dtype': x['dtype'],\n }\n return out\n\n\nclass Ones(Primitive):\n r\"\"\"\n Creates a tensor filled with value ones.\n\n Creates a tensor with shape described by the first argument and\n fills it with value ones in type of the second argument.\n\n Inputs:\n - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.\n Only constant positive int is allowed.\n - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.\n\n Outputs:\n Tensor, has the same type and shape as input shape value.\n\n Raises:\n TypeError: If `shape` is neither tuple nor int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> ones = ops.Ones()\n >>> output = ones((2, 2), mindspore.float32)\n >>> print(output)\n [[1. 1.]\n [1. 1.]]\n >>> output = ones((3, 3), mindspore.float32)\n >>> print(output)\n [[1. 1. 1.]\n [1. 1. 1.]\n [1. 1. 1.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Ones\"\"\"\n\n\nclass Zeros(Primitive):\n r\"\"\"\n Creates a tensor filled with value zeros.\n\n Creates a tensor with shape described by the first argument and\n fills it with value zeros in type of the second argument.\n\n Inputs:\n - **shape** (Union[tuple[int], int]) - The specified shape of output tensor.\n Only constant positive int is allowed.\n - **type** (mindspore.dtype) - The specified type of output tensor. Only constant value is allowed.\n\n Outputs:\n Tensor, has the same type and shape as input shape value.\n\n Raises:\n TypeError: If `shape` is neither int nor tuple.\n TypeError: If `shape` is a tuple whose elements are not all int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> zeros = ops.Zeros()\n >>> output = zeros((2, 2), mindspore.float32)\n >>> print(output)\n [[0. 0.]\n [0. 0.]]\n\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Zeros\"\"\"\n\n\nclass OnesLike(Primitive):\n \"\"\"\n Creates a new tensor. The values of all elements are 1.\n\n Returns a tensor of ones with the same shape and type as the input.\n\n Inputs:\n - **input_x** (Tensor) - Input tensor.\n The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n\n Outputs:\n Tensor, has the same shape and type as `input_x` but filled with ones.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> oneslike = ops.OnesLike()\n >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))\n >>> output = oneslike(input_x)\n >>> print(output)\n [[1 1]\n [1 1]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize OnesLike\"\"\"\n\n\nclass ZerosLike(Primitive):\n \"\"\"\n Creates a new tensor. All elements value are 0.\n\n Returns a tensor of zeros with the same shape and data type as the input tensor.\n\n Inputs:\n - **input_x** (Tensor) - Input tensor. The data type is int32, int64, float16 or float32.\n The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n\n Outputs:\n Tensor, has the same shape and data type as `input_x` but filled with zeros.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> zeroslike = ops.ZerosLike()\n >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))\n >>> output = zeroslike(input_x)\n >>> print(output)\n [[0. 0.]\n [0. 0.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize ZerosLike\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['y'])\n\n\nclass TupleToArray(PrimitiveWithInfer):\n \"\"\"\n Converts a tuple to a tensor.\n\n If the type of the first number in the tuple is integer, the data type of the output tensor is int.\n Otherwise, the data type of the output tensor is float.\n\n Inputs:\n - **input_x** (tuple) - A tuple of numbers. These numbers have the same type. Only constant value is allowed.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n\n Outputs:\n Tensor, if the input tuple contains `N` numbers, then the shape of the output tensor is (N,).\n\n Raises:\n TypeError: If `input_x` is not a tuple.\n ValueError: If length of `input_x` is less than or equal to 0.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = (1,2,3)\n >>> print(type(input_x))\n <class 'tuple'>\n >>> output = ops.TupleToArray()(input_x)\n >>> print(type(output))\n <class 'mindspore.common.tensor.Tensor'>\n >>> print(output)\n [1 2 3]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize TupleToArray\"\"\"\n\n def infer_value(self, x):\n validator.check_value_type(\"x\", x, [tuple], self.name)\n validator.check(\"size of x\", len(x), '', 0, Rel.GT, self.name)\n dtype = type(x[0])\n for i, item in enumerate(x):\n validator.check_value_type(f\"x[{i}]\", item, [numbers.Number], self.name)\n if not all(isinstance(item, dtype) for item in x):\n raise TypeError(f\"For \\'{self.name}\\', all elements of 'input_x' must be have same type.\")\n if isinstance(x[0], int):\n ret = np.array(x, np.int32)\n else:\n ret = np.array(x, np.float32)\n return Tensor(ret)\n\n def __call__(self, x):\n args = list()\n if isinstance(x, range):\n args.append(tuple(x))\n else:\n args.append(x)\n return _run_op(self, self.name, args)\n\n\nclass ScalarToArray(PrimitiveWithInfer):\n \"\"\"\n Converts a scalar to a `Tensor`.\n\n Inputs:\n - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.\n\n Outputs:\n Tensor. 0-D Tensor and the content is the input.\n\n Raises:\n TypeError: If `input_x` is neither int nor float.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> op = ops.ScalarToArray()\n >>> input_x = 1.0\n >>> print(type(input_x))\n <class 'float'>\n >>> output = op(input_x)\n >>> print(type(output))\n <class 'mindspore.common.tensor.Tensor'>\n >>> print(output)\n 1.0\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_value(self, x):\n validator.check_value_type(\"x\", x, [int, float], self.name)\n if isinstance(x, int):\n ret = np.array(x, np.int32)\n else:\n ret = np.array(x, np.float32)\n return Tensor(ret)\n\n\nclass ScalarToTensor(PrimitiveWithInfer):\n \"\"\"\n Converts a scalar to a `Tensor`, and converts the data type to the specified type.\n\n Inputs:\n - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed.\n - **dtype** (mindspore.dtype) - The target data type. Default: mindspore.float32. Only\n constant value is allowed.\n\n Outputs:\n Tensor. 0-D Tensor and the content is the input.\n\n Raises:\n TypeError: If `input_x` is neither int nor float.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> op = ops.ScalarToTensor()\n >>> data = 1\n >>> output = op(data, mindspore.float32)\n >>> print(output)\n 1.0\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_value(self, x, dtype=mstype.float32):\n validator.check_value_type(\"x\", x, [int, float], self.name)\n validator.check_subclass(\"dtype\", dtype, mstype.number, self.name)\n data_type = mstype.dtype_to_nptype(dtype)\n return Tensor(np.array(x, data_type))\n\n\nclass InvertPermutation(PrimitiveWithInfer):\n r\"\"\"\n Computes the inverse of an index permutation.\n\n This operator is mainly used to calculate the inverse of index permutation.\n It requires a 1-dimensional integer tensor x, which represents the index of a zero-based array,\n and exchanges each value with its index position. In other words, For output tensor y and input tensor x,\n this operation calculates the following values:\n\n :math:`y[x[i]] = i, \\quad i \\in [0, 1, \\ldots, \\text{len}(x)-1]`.\n\n Note:\n These values must include 0. There must be no duplicate values and the\n values can not be negative.\n\n Inputs:\n - **input_x** (Union(tuple[int], list[int])) - The input is constructed by multiple\n integers, i.e., :math:`(y_1, y_2, ..., y_S)` representing the indices.\n The values must include 0. There can be no duplicate values or negative values.\n Only constant value is allowed. The maximum value must be equal to length of input_x.\n\n Outputs:\n tuple[int]. It has the same length as the input.\n\n Raises:\n TypeError: If `input_x` is neither tuple nor list.\n TypeError: If element of `input_x` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> invert = ops.InvertPermutation()\n >>> input_data = (3, 4, 0, 2, 1)\n >>> output = invert(input_data)\n >>> print(output)\n (2, 4, 3, 0, 1)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize InvertPermutation\"\"\"\n self.set_const_prim(True)\n\n def __infer__(self, x):\n x_shp = x['shape']\n x_value = x['value']\n if mstype.issubclass_(x['dtype'], mstype.tensor):\n raise ValueError(f\"For \\'{self.name}\\', the value of 'input_x' must be non-Tensor, but got {x['dtype']}\")\n if x_value is None:\n raise ValueError(f\"For '{self.name}', the value of 'input_x' can not be None, but got {x_value}.\")\n validator.check_value_type(\"shape\", x_shp, [tuple, list], self.name)\n for shp in x_shp:\n if shp:\n x_rank = len(np.array(x_value, np.int64).shape)\n raise ValueError(f\"For \\'{self.name}\\', the dimension of 'input_x' must be 1, but got {x_rank}.\")\n for i, value in enumerate(x_value):\n validator.check_value_type(\"input[%d]\" % i, value, [int], self.name)\n z = [x_value[i] for i in range(len(x_value))]\n z.sort()\n\n for i in range(1, len(z)):\n if z[i - 1] == z[i]:\n raise ValueError(f\"For '{self.name}', the 'input_x' can not contain duplicate values, \"\n f\"but got duplicated {z[i]} in the 'input_x'.\")\n validator.check(f'value min', min(x_value), '', 0, Rel.EQ, self.name)\n validator.check(f'value max', max(x_value), '', len(x_value) - 1, Rel.EQ, self.name)\n\n y = [None] * len(x_value)\n for i, value in enumerate(x_value):\n validator.check_value_type(\"input[%d]\" % i, value, [int], self.name)\n validator.check(f'value', z[i], f'index', i, Rel.EQ, self.name)\n y[value] = i\n z.append(value)\n return {'shape': x_shp,\n 'dtype': x['dtype'],\n 'value': tuple(y)}\n\n\nclass Argmax(PrimitiveWithInfer):\n \"\"\"\n Returns the indices of the maximum value of a tensor across the axis.\n\n If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor will be\n :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.\n\n Args:\n axis (int): Axis where the Argmax operation applies to. Default: -1.\n output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.\n Default: `mindspore.dtype.int32`.\n\n Inputs:\n - **input_x** (Tensor) - Input tensor. :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n Support data type list as follows:\n\n - Ascend: Float16, Float32.\n - GPU: Float16, Float32.\n - CPU: Float16, Float32, Float64.\n\n Outputs:\n Tensor, indices of the max value of input tensor across the axis.\n\n Raises:\n TypeError: If `axis` is not an int.\n TypeError: If `output_type` is neither int32 nor int64.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))\n >>> output = ops.Argmax(output_type=mindspore.int32)(input_x)\n >>> print(output)\n [1 0 0]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=-1, output_type=mstype.int32):\n \"\"\"Initialize Argmax\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n validator.check_value_type(\"axis\", axis, [int], self.name)\n validator.check_types_same_and_valid({'output': output_type}, [mstype.int32], self.name)\n self.axis = axis\n self.add_prim_attr('output_type', output_type)\n\n def infer_shape(self, x_shape):\n axis = self.axis\n if axis is None:\n axis = 0\n x_rank = len(x_shape)\n validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, \"axis\", self.name)\n axis = axis + x_rank if axis < 0 else axis\n ouput_shape = [x_shape[i] for i in range(x_rank) if i != axis]\n return ouput_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_dtype_valid(\"input_x\", x_dtype, [mstype.float16, mstype.float32, mstype.float64],\n self.name)\n return mstype.tensor_type(self.output_type)\n\n\nclass Argmin(PrimitiveWithInfer):\n \"\"\"\n Returns the indices of the minimum value of a tensor across the axis.\n\n If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is\n :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.\n\n Args:\n axis (int): Axis where the Argmin operation applies to. Default: -1.\n output_type (:class:`mindspore.dtype`): An optional data type of `mindspore.dtype.int32`.\n Default: `mindspore.dtype.int32`.\n\n Inputs:\n - **input_x** (Tensor) - Input tensor.\n The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n\n Outputs:\n Tensor, indices of the min value of input tensor across the axis.\n\n Raises:\n TypeError: If `axis` is not an int.\n TypeError: If `output_type` is neither int32 nor int64.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)\n >>> index = ops.Argmin()(input_x)\n >>> print(index)\n 2\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=-1, output_type=mstype.int32):\n \"\"\"Initialize Argmin\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n validator.check_value_type(\"axis\", axis, [int], self.name)\n validator.check_type_name(\"output_type\", output_type, [mstype.int32, mstype.int64], self.name)\n self.axis = axis\n self.add_prim_attr('output_type', output_type)\n\n def infer_shape(self, x_shape):\n axis = self.axis\n if axis is None:\n axis = 0\n x_rank = len(x_shape)\n validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, \"axis\", self.name)\n axis = axis + x_rank if axis < 0 else axis\n ouput_shape = [x_shape[i] for i in range(x_rank) if i != axis]\n return ouput_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_subclass(\"input_x\", x_dtype, mstype.tensor, self.name)\n return mstype.tensor_type(self.output_type)\n\n\nclass ArgMaxWithValue(PrimitiveWithInfer):\n \"\"\"\n Calculates the maximum value with the corresponding index.\n\n Calculates the maximum value along with the given axis for the input tensor. It returns the maximum values and\n indices.\n\n Note:\n In auto_parallel and semi_auto_parallel mode, the first output index can not be used.\n\n .. warning::\n - If there are multiple maximum values, the index of the first maximum value is used.\n - The value range of \"axis\" is [-dims, dims - 1]. \"dims\" is the dimension length of \"input_x\".\n\n Args:\n axis (int): The dimension to reduce. Default: 0.\n keep_dims (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,\n the output will reduce dimension if false. Default: False.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as\n :math:`(x_1, x_2, ..., x_N)`. And the data type only support mindspore.float16 or float32.\n\n Outputs:\n tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input\n tensor.\n - index (Tensor) - The index for the maximum value of the input tensor. If `keep_dims` is true, the shape of\n output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is\n :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.\n - output_x (Tensor) - The maximum value of input tensor, with the same shape as index.\n\n Raises:\n TypeError: If `keep_dims` is not a bool.\n TypeError: If `axis` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)\n >>> index, output = ops.ArgMaxWithValue()(input_x)\n >>> print(index, output)\n 3 0.7\n >>> index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x)\n >>> print(index, output)\n [3] [0.7]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0, keep_dims=False):\n \"\"\"Initialize ArgMaxWithValue\"\"\"\n self.axis = axis\n self.keep_dims = keep_dims\n validator.check_value_type('keep_dims', keep_dims, [bool], self.name)\n validator.check_value_type('axis', axis, [int], self.name)\n\n def infer_shape(self, x_shape):\n axis = self.axis\n x_rank = len(x_shape)\n validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, \"axis\", self.name)\n ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.name)\n return ouput_shape, ouput_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_subclass(\"input_x\", x_dtype, mstype.tensor, self.name)\n return mstype.tensor_type(mstype.int32), x_dtype\n\n\nclass ArgMinWithValue(PrimitiveWithInfer):\n \"\"\"\n Calculates the minimum value with corresponding index, and returns indices and values.\n\n Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and\n indices.\n\n Note:\n In auto_parallel and semi_auto_parallel mode, the first output index can not be used.\n\n .. warning::\n - If there are multiple minimum values, the index of the first minimum value is used.\n - The value range of \"axis\" is [-dims, dims - 1]. \"dims\" is the dimension length of \"input_x\".\n\n Args:\n axis (int): The dimension to reduce. Default: 0.\n keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,\n the output will reduce dimension if false. Default: False.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as\n :math:`(x_1, x_2, ..., x_N)`.\n\n Outputs:\n tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input\n tensor.\n - index (Tensor) - The index for the minimum value of the input tensor. If `keep_dims` is true, the shape of\n output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is\n :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.\n - output_x (Tensor) - The minimum value of input tensor, with the same shape as index.\n\n Raises:\n TypeError: If `keep_dims` is not a bool.\n TypeError: If `axis` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)\n >>> output = ops.ArgMinWithValue()(input_x)\n >>> print(output)\n (Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Float32, value= 0))\n >>> output = ops.ArgMinWithValue(keep_dims=True)(input_x)\n >>> print(output)\n (Tensor(shape=[1], dtype=Int32, value= [0]), Tensor(shape=[1], dtype=Float32, value= [ 0.00000000e+00]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0, keep_dims=False):\n \"\"\"Initialize ArgMinWithValue\"\"\"\n self.axis = axis\n self.keep_dims = keep_dims\n validator.check_value_type('keep_dims', keep_dims, [bool], self.name)\n validator.check_value_type('axis', axis, [int], self.name)\n\n def infer_shape(self, x_shape):\n axis = self.axis\n x_rank = len(x_shape)\n validator.check_int_range(axis, -x_rank, x_rank, Rel.INC_LEFT, \"axis\", self.name)\n ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.name)\n return ouput_shape, ouput_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_subclass(\"input_x\", x_dtype, mstype.tensor, self.name)\n return mstype.tensor_type(mstype.int32), x_dtype\n\n\nclass Tile(PrimitiveWithInfer):\n r\"\"\"\n Replicates a tensor with given multiples times.\n\n Creates a new tensor by replicating `input_x` `multiples` times. The i'th dimension of\n output tensor has `input_x.shape(i) * multiples[i]` elements, and the values of `input_x`\n are replicated `multiples[i]` times along the i'th dimension.\n\n Note:\n The length of `multiples` must be greater or equal to the length of dimension in `input_x`.\n\n Inputs:\n - **input_x** (Tensor) - 1-D or higher Tensor. Set the shape of input tensor as\n :math:`(x_1, x_2, ..., x_S)`.\n\n - **multiples** (tuple[int]) - The input tuple is constructed by multiple\n integers, i.e., :math:`(y_1, y_2, ..., y_S)`. The length of `multiples`\n cannot be smaller than the length of the shape of `input_x`.\n Only constant value is allowed.\n\n Outputs:\n Tensor, has the same data type as the `input_x`.\n\n - If the length of `multiples` is the same as the length of shape of `input_x`,\n then the shape of their corresponding positions can be multiplied, and\n the shape of Outputs is :math:`(x_1*y_1, x_2*y_2, ..., x_S*y_R)`.\n - If the length of `multiples` is larger than the length of shape of `input_x`,\n fill in multiple 1 in the length of the shape of `input_x` until their lengths are consistent.\n Such as set the shape of `input_x` as :math:`(1, ..., x_1, x_2, ..., x_S)`,\n then the shape of their corresponding positions can be multiplied, and\n the shape of Outputs is :math:`(1*y_1, ..., x_S*y_R)`.\n\n Raises:\n TypeError: If `multiples` is not a tuple or its elements are not all int.\n ValueError: If the elements of `multiples` are not all greater than 0.\n ValueError: If the length of `multiples` are smaller than the length of dimension in `input_x`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> tile = ops.Tile()\n >>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)\n >>> multiples = (2, 3)\n >>> output = tile(input_x, multiples)\n >>> print(output)\n [[1. 2. 1. 2. 1. 2.]\n [3. 4. 3. 4. 3. 4.]\n [1. 2. 1. 2. 1. 2.]\n [3. 4. 3. 4. 3. 4.]]\n >>> multiples = (2, 3, 2)\n >>> output = tile(input_x, multiples)\n >>> print(output)\n [[[1. 2. 1. 2.]\n [3. 4. 3. 4.]\n [1. 2. 1. 2.]\n [3. 4. 3. 4.]\n [1. 2. 1. 2.]\n [3. 4. 3. 4.]]\n [[1. 2. 1. 2.]\n [3. 4. 3. 4.]\n [1. 2. 1. 2.]\n [3. 4. 3. 4.]\n [1. 2. 1. 2.]\n [3. 4. 3. 4.]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Tile\"\"\"\n self.init_prim_io_names(inputs=['x', 'multiples'], outputs=['output'])\n\n def check_elim(self, base_tensor, multiplier):\n if not isinstance(base_tensor, Tensor):\n raise TypeError(f\"For '{self.name}', the type of 'input_x' should be Tensor, \"\n f\"but got {type(base_tensor).__name__}.\")\n if all(v == 1 for v in multiplier) and len(base_tensor.shape) >= len(multiplier):\n return (True, base_tensor)\n return (False, None)\n\n def __infer__(self, x, multiples):\n multiples_v = multiples['value']\n if multiples_v is None:\n if len(multiples['shape']) != 1:\n raise ValueError(f'For \\'{self.name}\\' the dim of multiples must be 1.')\n rank = max(len(x['shape']), multiples['shape'][0])\n out_shape = [-1] * rank\n # tile can't infer min/max shape if multiples_v is None\n return {'shape': out_shape,\n 'dtype': x['dtype'],\n 'value': None,\n 'min_shape': [1] * rank,\n 'max_shape': [1] * rank\n }\n\n x_shp = x['shape']\n validator.check_value_type(\n \"multiples\", multiples_v, [tuple], self.name)\n for i, multiple in enumerate(multiples_v):\n validator.check_positive_int(\n multiple, \"multiples[%d]\" % i, self.name)\n validator.check_value_type(\n \"x[\\'dtype\\']\", x[\"dtype\"], mstype.tensor_type, self.name)\n len_sub = len(multiples_v) - len(x_shp)\n multiples_w = None\n if len_sub == 0:\n multiples_w = multiples_v\n if len_sub > 0:\n for i in range(0, len_sub):\n x_shp.insert(0, 1)\n multiples_w = multiples_v\n elif len_sub < 0:\n raise ValueError(f\"For '{self.name}', the length of 'multiples' can not be smaller than \"\n f\"the dimension of 'input_x', but got length of 'multiples': {len(multiples_v)} \"\n f\"and dimension of 'input_x': {len(x_shp)}.\")\n for i, a in enumerate(multiples_w):\n x_shp[i] *= a\n value = None\n if x['value'] is not None:\n value = Tensor(np.tile(x['value'].asnumpy(), multiples_w))\n return {'shape': x_shp,\n 'dtype': x['dtype'],\n 'value': value}\n\n\nclass UnsortedSegmentSum(PrimitiveWithInfer):\n r\"\"\"\n Computes the sum of a tensor along segments.\n\n Calculates a tensor such that :math:`\\text{output}[i] = \\sum_{segment\\_ids[j] == i} \\text{data}[j, \\ldots]`, where\n :math:`j` is a tuple describing the index of element in data. `segment_ids` selects which elements in data to sum\n up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value\n range.\n\n The following figure shows the calculation process of UnsortedSegmentSum:\n\n .. image:: api_img/UnsortedSegmentSum.png\n\n Note:\n - If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.\n - On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an\n execution error will occur.\n\n If the sum of the given segment_ids :math:`i` is empty, then :math:`\\text{output}[i] = 0`. If the given segment_ids\n is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.\n\n Inputs:\n - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.\n - **segment_ids** (Tensor) - Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.\n - **num_segments** (int) - Set :math:`z` as num_segments.\n\n Outputs:\n Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.\n\n Raises:\n TypeError: If `num_segments` is not an int.\n ValueError: If length of shape of `segment_ids` is less than 1.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)\n >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)\n >>> num_segments = 4\n >>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)\n >>> print(output)\n [3. 3. 4. 0.]\n >>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)\n >>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)\n >>> num_segments = 6\n >>> output = ops.UnsortedSegmentSum()(input_x, segment_ids, num_segments)\n >>> print(output)\n [3. 3. 4. 2. 5. 0.]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize UnsortedSegmentSum\"\"\"\n self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])\n\n def __infer__(self, x, segment_ids, num_segments):\n x_type = x['dtype']\n x_shp = x['shape']\n validator.check_subclass(\"input_x\", x_type, mstype.tensor, self.name)\n validator.check_value_type(\"x_shape\", x_shp, [list], self.name)\n x_shp_len = len(x_shp)\n validator.check_positive_int(x_shp_len, \"rank of input_x\", self.name)\n segment_ids_shp = segment_ids['shape']\n segment_ids_type = segment_ids['dtype']\n validator.check_subclass(\"segment_ids\", segment_ids_type, mstype.tensor, self.name)\n validator.check_value_type(\"segment_ids\", segment_ids_shp, [list], self.name)\n segment_ids_shp_len = len(segment_ids_shp)\n validator.check_positive_int(segment_ids_shp_len, \"rank of segment_ids\", self.name)\n validator.check(f'rank of input_x', len(x_shp),\n 'rank of segments_id', len(segment_ids_shp), Rel.GE, self.name)\n if -1 not in x_shp and -1 not in segment_ids_shp:\n # only validate when both shapes fully known\n for i, value in enumerate(segment_ids_shp):\n validator.check(\"ids[%d]\" % i, value, 'input[%d]' % i, x_shp[i], Rel.EQ, self.name)\n num_segments_v = num_segments['value']\n num_segments_type = num_segments['dtype']\n validator.check_subclass(\"num_segments\", num_segments_type, [mstype.tensor, mstype.number], self.name)\n if isinstance(num_segments_type, type(mstype.tensor)):\n validator.check_tensor_dtype_valid(\"num_segments\", num_segments_type, [mstype.int32, mstype.int64],\n self.name)\n shp = [-1]\n else:\n validator.check_value_type('num_segments', num_segments_v, [int], self.name)\n validator.check_positive_int(num_segments_v, \"num_segments\", self.name)\n shp = [num_segments_v]\n\n shp += x_shp[segment_ids_shp_len:]\n if \"max_value\" in num_segments and \"min_value\" in num_segments:\n output_max_shape = list(num_segments['max_value'])\n output_min_shape = list(num_segments['min_value'])\n else:\n if isinstance(num_segments_type, type(mstype.tensor)):\n raise ValueError(f\"For '{self.name}', the dtype of 'num_segments' only support int type \"\n f\"when it is not a dynamic value, but got type of 'num_segments': \"\n f\"{num_segments_type}.\")\n output_max_shape = [num_segments_v]\n output_min_shape = [num_segments_v]\n if 'max_shape' in x and 'min_shape' in x:\n max_output_incoming = x['max_shape']\n min_output_incoming = x['min_shape']\n else:\n max_output_incoming = x_shp\n min_output_incoming = x_shp\n output_max_shape += max_output_incoming[segment_ids_shp_len:]\n output_min_shape += min_output_incoming[segment_ids_shp_len:]\n return {'shape': shp,\n 'max_shape': output_max_shape,\n 'min_shape': output_min_shape,\n 'dtype': mstype.tensor_type(x_type.element_type()),\n 'value': None}\n\n\nclass UnsortedSegmentMin(PrimitiveWithCheck):\n r\"\"\"\n Computes the minimum of a tensor along segments.\n\n The following figure shows the calculation process of UnsortedSegmentMin:\n\n .. image:: api_img/UnsortedSegmentMin.png\n\n .. math::\n\n \\text { output }_i=\\text{min}_{j \\ldots} \\text { data }[j \\ldots]\n\n where :math:`min` over tuples :math:`j...` such that :math:`segment_ids[j...] == i`.\n\n Note:\n If the segment_id i is absent in the segment_ids, then output[i] will be filled with\n the maximum value of the input_x's type.\n The `segment_ids` must be non-negative tensor.\n\n Inputs:\n - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.\n The data type must be float16, float32 or int32.\n - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.\n The data type must be int32.\n - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.\n\n Outputs:\n Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.\n\n Raises:\n TypeError: If `num_segments` is not an int.\n ValueError: If length of shape of `segment_ids` is not equal to 1.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))\n >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))\n >>> num_segments = 2\n >>> unsorted_segment_min = ops.UnsortedSegmentMin()\n >>> output = unsorted_segment_min(input_x, segment_ids, num_segments)\n >>> print(output)\n [[1. 2. 3.]\n [4. 2. 1.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize UnsortedSegmentMin\"\"\"\n self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])\n\n def __check__(self, x, segment_ids, num_segments):\n x_shape = x['shape']\n segment_ids_shape = segment_ids['shape']\n valid_type = [mstype.float16, mstype.float32, mstype.int32]\n validator.check_tensor_dtype_valid(\"x\", x['dtype'], valid_type, self.name)\n validator.check_tensor_dtype_valid(\"segment_ids\", segment_ids['dtype'], [mstype.int32], self.name)\n validator.check_equal_int(len(segment_ids_shape), 1, \"rank of segment_ids_shape\", self.name)\n num_segments_type = num_segments['dtype']\n validator.check_subclass(\"num_segments\", num_segments_type, [mstype.number], self.name)\n if -1 not in x_shape and -1 not in segment_ids_shape:\n # only validate when both shapes fully known\n validator.check(f'first shape of input_x', x_shape[0],\n 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)\n num_segments_v = num_segments['value']\n validator.check_value_type('num_segments', num_segments_v, [int], self.name)\n validator.check_positive_int(num_segments_v, \"num_segments\", self.name)\n\n\nclass UnsortedSegmentMax(PrimitiveWithCheck):\n r\"\"\"\n Computes the maximum along segments of a tensor.\n\n The following figure shows the calculation process of UnsortedSegmentMax:\n\n .. image:: api_img/UnsortedSegmentMax.png\n\n .. math::\n\n \\text { output }_i=\\text{max}_{j \\ldots} \\text { data }[j \\ldots]\n\n where :math:`max` over tuples :math:`j...` such that :math:`segment\\_ids[j...] == i`.\n\n Note:\n If the segment_id i is absent in the segment_ids, then output[i] will be filled with\n the minimum value of the input_x's type.\n\n Inputs:\n - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.\n The data type must be float16, float32 or int32.\n - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.\n The data type must be int32.\n - **num_segments** (int) - The value specifies the number of distinct `segment_ids`.\n\n Outputs:\n Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.\n\n Raises:\n TypeError: If `num_segments` is not an int.\n ValueError: If length of shape of `segment_ids` is not equal to 1.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> # case 1: Only have two num_segments, where is 0 and 1, and segment_ids=[0, 1, 1]\n >>> # num_segments = 2 indicates that there are two types of segment_id,\n >>> # the first number '0' in [0, 1, 1] indicates input_x[0],\n >>> # the second number '1' in [0, 1, 1] indicates input_x[1],\n >>> # the third number '1' in [0, 1, 1] indicates input_x[2],\n >>> # input_x[0], which is [1, 2, 3] will not be compared to other segment_id.\n >>> # Only the same segment_id will be compared.\n >>> from mindspore import Tensor\n >>> from mindspore import ops\n >>> import numpy as np\n >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))\n >>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))\n >>> num_segments = 2\n >>> unsorted_segment_max = ops.UnsortedSegmentMax()\n >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)\n >>> print(output)\n [[1. 2. 3.]\n [4. 5. 6.]]\n >>>\n >>> # case 2: The segment_ids=[0, 0, 1, 1].\n >>> # [1, 2, 3] will compare with [4, 2, 0],\n >>> # and [4, 5, 6] will compare with [4, 2, 1].\n >>> input_x = Tensor(np.array([[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]]).astype(np.float32))\n >>> segment_ids = Tensor(np.array([0, 0, 1, 1]).astype(np.int32))\n >>> num_segments = 2\n >>> unsorted_segment_max = ops.UnsortedSegmentMax()\n >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)\n >>> print(input_x.shape)\n (4, 3)\n >>> print(output)\n [[4. 2. 3.]\n [4. 5. 6.]]\n >>> # case 3: If the input_x have three dimensions even more, what will happen?\n >>> # The shape of input_x is (2, 4, 3),\n >>> # and the length of segment_ids should be the same as the first dimension of input_x.\n >>> # Because the segment_ids are different, input_x[0] will not be compared to input_x[1].\n >>> input_x = Tensor(np.array([[[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]],\n ... [[1, 2, 3], [4, 2, 0], [4, 5, 6], [4, 2, 1]]]).astype(np.float32))\n >>> segment_ids = Tensor(np.array([0, 1]).astype(np.int32))\n >>> num_segments = 2\n >>> unsorted_segment_max = ops.UnsortedSegmentMax()\n >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)\n >>> print(input_x.shape)\n (2, 4, 3)\n >>> print(output)\n [[[1. 2. 3.]\n [4. 2. 0.]\n [4. 5. 6.]\n [4. 2. 1.]]\n [[1. 2. 3.]\n [4. 2. 0.]\n [4. 5. 6.]\n [4. 2. 1.]]]\n >>> # case 4: It has the same input with the 3rd case.\n >>> # Because num_segments is equal to 2, there are two segment_ids, but currently only one 0 is used.\n >>> # the segment_id i is absent in the segment_ids, then output[i] will be filled with\n >>> # the smallest possible value of the input_x's type.\n >>> segment_ids = Tensor(np.array([0, 0]).astype(np.int32))\n >>> output = unsorted_segment_max(input_x, segment_ids, num_segments)\n >>> print(output)\n [[[ 1.0000000e+00 2.0000000e+00 3.0000000e+00]\n [ 4.0000000e+00 2.0000000e+00 0.0000000e+00]\n [ 4.0000000e+00 5.0000000e+00 6.0000000e+00]\n [ 4.0000000e+00 2.0000000e+00 1.0000000e+00]]\n [[-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]\n [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]\n [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]\n [-3.4028235e+38 -3.4028235e+38 -3.4028235e+38]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize UnsortedSegmentMax\"\"\"\n self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])\n\n def __check__(self, x, segment_ids, num_segments):\n x_shape = x['shape']\n segment_ids_shape = segment_ids['shape']\n valid_type = [mstype.float16, mstype.float32, mstype.int32]\n validator.check_tensor_dtype_valid(\"x\", x['dtype'], valid_type, self.name)\n validator.check_tensors_dtypes_same_and_valid({\"segment_ids\": segment_ids['dtype']},\n [mstype.int32, mstype.int64], self.name)\n validator.check_equal_int(len(segment_ids_shape), 1, \"rank of segment_ids_shape\", self.name)\n num_segments_type = num_segments['dtype']\n validator.check_subclass(\"num_segments\", num_segments_type, [mstype.number], self.name)\n if -1 not in x_shape and -1 not in segment_ids_shape:\n # only validate when both shapes fully known\n validator.check(f'first shape of input_x', x_shape[0],\n 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)\n num_segments_v = num_segments['value']\n validator.check_value_type('num_segments', num_segments_v, [int], self.name)\n validator.check_positive_int(num_segments_v, \"num_segments\", self.name)\n\n\nclass UnsortedSegmentProd(PrimitiveWithInfer):\n \"\"\"\n Computes the product of a tensor along segments.\n\n The following figure shows the calculation process of UnsortedSegmentProd:\n\n .. image:: api_img/UnsortedSegmentProd.png\n\n Inputs:\n - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.\n With float16, float32 or int32 data type.\n - **segment_ids** (Tensor) - A `1-D` tensor whose shape is :math:`(x_1)`, the value must be non-negative tensor.\n Data type must be int32.\n - **num_segments** (int) - The value specifies the number of distinct `segment_ids`,\n must be greater than 0.\n\n Outputs:\n Tensor, set the number of `num_segments` as `N`, the shape is :math:`(N, x_2, ..., x_R)`.\n\n Raises:\n TypeError: If `num_segments` is not an int.\n ValueError: If length of shape of `segment_ids` is not equal to 1.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))\n >>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))\n >>> num_segments = 2\n >>> unsorted_segment_prod = ops.UnsortedSegmentProd()\n >>> output = unsorted_segment_prod(input_x, segment_ids, num_segments)\n >>> print(output)\n [[4. 4. 3.]\n [4. 5. 6.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize UnsortedSegmentProd\"\"\"\n self.init_prim_io_names(inputs=['x', 'segment_ids', 'num_segments'], outputs=['y'])\n\n def __infer__(self, x, segment_ids, num_segments):\n x_type = x['dtype']\n x_shape = x['shape']\n segment_ids_shape = segment_ids['shape']\n validator.check_subclass(\"input_x\", x_type, mstype.tensor, self.name)\n validator.check_value_type(\"x_shape\", x_shape, [list], self.name)\n valid_type = [mstype.float16, mstype.float32, mstype.int32]\n validator.check_tensor_dtype_valid(\"x\", x['dtype'], valid_type, self.name)\n validator.check_tensor_dtype_valid(\"segment_ids\", segment_ids['dtype'], [mstype.int32], self.name)\n validator.check_equal_int(len(segment_ids_shape), 1, \"rank of segment_ids_shape\", self.name)\n validator.check(f'first shape of input_x', x_shape[0],\n 'length of segments_id', segment_ids_shape[0], Rel.EQ, self.name)\n num_segments_v = num_segments['value']\n validator.check_value_type('num_segments', num_segments_v, [int], self.name)\n validator.check_positive_int(num_segments_v, \"num_segments\", self.name)\n segment_ids_shape_len = len(segment_ids_shape)\n out_shape = [num_segments_v]\n out_shape += x_shape[segment_ids_shape_len:]\n out = {'shape': out_shape,\n 'dtype': mstype.tensor_type(x_type.element_type()),\n 'value': None}\n return out\n\n\nclass Concat(PrimitiveWithInfer):\n r\"\"\"\n Connect tensor in the specified axis.\n\n Connect input tensors along with the given axis.\n\n The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and\n :math:`0 \\le m < R`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has\n the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the\n :math:`i`-th tensor. Then, the shape of the output tensor is\n\n .. math::\n\n (x_1, x_2, ..., \\sum_{i=1}^Nx_{mi}, ..., x_R)\n\n .. warning::\n The value range of \"axis\" is [-dims, dims - 1]. \"dims\" is the dimension length of \"input_x\".\n\n Args:\n axis (int): The specified axis. Default: 0.\n\n Inputs:\n - **input_x** (tuple, list) - A tuple or a list of input tensors.\n Suppose there are two tensors in this tuple or list, namely x1 and x2.\n To perform `Concat` in the axis 0 direction, except for the 0th axis, all other axes should be equal,\n that is, :math:`x1.shape[1] == x2.shape[1], x1.shape[2] == x2.shape[2], ..., x1.shape[R] == x2.shape[R]`,\n where the :math:`R` indicates the last axis.\n\n Outputs:\n - Tensor, the shape is :math:`(x_1, x_2, ..., \\sum_{i=1}^Nx_{mi}, ..., x_R)`.\n The data type is the same with `input_x`.\n\n Raises:\n TypeError: If `axis` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))\n >>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))\n >>> op = ops.Concat()\n >>> output = op((input_x1, input_x2))\n >>> print(output)\n [[0. 1.]\n [2. 1.]\n [0. 1.]\n [2. 1.]]\n >>> op = ops.Concat(1)\n >>> output = op((input_x1, input_x2))\n >>> print(output)\n [[0. 1. 0. 1.]\n [2. 1. 2. 1.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0):\n \"\"\"Initialize Concat\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n\n def __infer__(self, input_x):\n axis = self.axis\n x_shp = input_x['shape']\n x_type = input_x['dtype']\n _, all_shp, _ = get_concat_offset(x_shp, x_type, axis, self.name)\n self.add_prim_attr('inputNums', len(x_shp))\n ret_shp = x_shp[0].copy()\n value = None\n if input_x['value'] is not None:\n value = Tensor(np.concatenate([x.asnumpy() for x in input_x['value']], axis=axis))\n ret_shp[axis] = all_shp\n out = {'shape': ret_shp,\n 'dtype': x_type[0],\n 'value': value}\n if -1 in x_shp[0]:\n x_min_shp = input_x['min_shape']\n ret_min_shp = x_min_shp[0].copy()\n ret_min_shp[axis] = 0\n for all_min_shp in x_min_shp:\n ret_min_shp[axis] += all_min_shp[axis]\n out['min_shape'] = ret_min_shp\n x_max_shp = input_x['max_shape']\n ret_max_shp = x_max_shp[0].copy()\n ret_max_shp[axis] = 0\n for all_max_shp in x_max_shp:\n ret_max_shp[axis] += all_max_shp[axis]\n out['max_shape'] = ret_max_shp\n return out\n\n\nclass ParallelConcat(PrimitiveWithInfer):\n r\"\"\"\n Concats tensor in the first dimension.\n\n Concats input tensors along with the first dimension.\n\n The difference between Concat and ParallelConcat is that Concat requires all of the inputs be computed\n before the operation will begin but doesn't require that the input shapes be known during graph construction.\n Parallel concat will copy pieces of the input into the output as they become available, in some situations\n this can provide a performance benefit.\n\n Note:\n The input tensors are all required to have size 1 in the first dimension.\n\n Inputs:\n - **values** (tuple, list) - A tuple or a list of input tensors. The data type and shape of these\n tensors must be the same. The data type is Number except float64.\n\n Outputs:\n Tensor, data type is the same as `values`.\n\n Raises:\n ValueError: If length of shape of `values` is less than 1.\n ValueError: The data type and shape of these tensors are not the same.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> data1 = Tensor(np.array([[0, 1]]).astype(np.int32))\n >>> data2 = Tensor(np.array([[2, 1]]).astype(np.int32))\n >>> op = ops.ParallelConcat()\n >>> output = op((data1, data2))\n >>> print(output)\n [[0 1]\n [2 1]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize ParallelConcat\"\"\"\n\n def __infer__(self, values):\n x_shp = values['shape']\n x_type = values['dtype']\n\n validator.check_int(len(x_shp), 1, Rel.GE, f'x_shp length', self.name)\n\n args = {f\"x_type[{i}]\": elem for i, elem in enumerate(x_type)}\n validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)\n\n first_elem = x_shp[0]\n for i, elem in enumerate(x_shp[1:]):\n j = i + 1\n validator.check_equal_int(elem[0], 1, f'x_shp[{j}][0]', self.name)\n validator.check(f\"x_shp[0] shape\", first_elem, f\"x_shp[{j}] shape\", elem, Rel.EQ, self.name)\n\n ret_shp = x_shp[0].copy()\n ret_shp[0] = len(x_shp)\n self.add_prim_attr('shape', ret_shp)\n self.add_prim_attr('N', len(x_shp))\n\n out = {'shape': ret_shp,\n 'dtype': x_type[0],\n 'value': None}\n return out\n\n\ndef _get_stack_shape(x_shape, x_type, axis, prim_name):\n \"\"\"for stack output shape\"\"\"\n validator.check_value_type(\"shape\", x_shape, [tuple, list], prim_name)\n validator.check_int(len(x_shape), 1, Rel.GE, \"len of input_x\", prim_name)\n validator.check_subclass(\"input_x[0]\", x_type[0], mstype.tensor, prim_name)\n rank_base = len(x_shape[0])\n n = len(x_shape)\n out_shape = x_shape[0]\n validator.check_int_range(axis, -rank_base - 1, rank_base, Rel.INC_BOTH, 'axis', prim_name)\n if axis < 0:\n axis = axis + rank_base + 1\n for i in range(1, n):\n validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, prim_name, TypeError)\n if x_shape[i] != x_shape[0]:\n raise ValueError(f\"For \\'{prim_name}\\' element {i} shape in input can not pack with first element\")\n out_shape.insert(axis, n)\n return out_shape\n\n\nclass Pack(PrimitiveWithInfer):\n \"\"\"\n Same as operator Stack. Pack will be deprecated in the future.\n Please use Stack instead.\n \"\"\"\n\n @deprecated(\"1.1\", \"Stack\", True)\n @prim_attr_register\n def __init__(self, axis=0):\n \"\"\"Initialize Pack\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n self.axis = axis\n\n def __infer__(self, value):\n x_shape = value['shape']\n x_type = value['dtype']\n self.add_prim_attr('num', len(x_shape))\n all_shape = _get_stack_shape(x_shape, x_type, self.axis, self.name)\n out = {'shape': all_shape,\n 'dtype': x_type[0],\n 'value': None}\n return out\n\n\nclass Stack(PrimitiveWithInfer):\n r\"\"\"\n Stacks a list of tensors in specified axis.\n\n Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.\n\n Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.\n If :math:`0 \\le axis`, the shape of the output tensor is\n :math:`(x_1, x_2, ..., x_{axis}, N, x_{axis+1}, ..., x_R)`.\n\n Args:\n axis (int): Dimension to stack. Default: 0.\n Negative values wrap around. The range is [-(R+1), R+1).\n\n Inputs:\n - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type.\n\n Outputs:\n Tensor. A stacked Tensor with the same type as `input_x`.\n\n Raises:\n TypeError: If the data types of elements in `input_x` are not the same.\n ValueError: If the length of `input_x` is not greater than 1;\n or if axis is out of the range [-(R+1), R+1);\n or if the shapes of elements in input_x are not the same.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> data1 = Tensor(np.array([0, 1]).astype(np.float32))\n >>> data2 = Tensor(np.array([2, 3]).astype(np.float32))\n >>> stack = ops.Stack()\n >>> output = stack([data1, data2])\n >>> print(output)\n [[0. 1.]\n [2. 3.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0):\n \"\"\"Initialize Stack\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n self.axis = axis\n\n def __infer__(self, value):\n x_shape = value['shape']\n x_type = value['dtype']\n self.add_prim_attr('num', len(x_shape))\n all_shape = _get_stack_shape(x_shape, x_type, self.axis, self.name)\n tuple_value = value['value']\n input_array = []\n infered_value = None\n if tuple_value is not None:\n for item in tuple_value:\n npy_item = item.asnumpy()\n input_array.append(npy_item)\n infered_value = Tensor(np.stack(input_array, axis=self.axis))\n out = {'shape': all_shape,\n 'dtype': x_type[0],\n 'value': infered_value}\n return out\n\n\nclass Unpack(PrimitiveWithInfer):\n \"\"\"\n Same as operator Unstack. Unpack will be deprecated in the future.\n Please use Unstack instead.\n \"\"\"\n\n @deprecated(\"1.1\", \"Unstack\", True)\n @prim_attr_register\n def __init__(self, axis=0):\n \"\"\"Initialize Unpack\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n self.axis = axis\n\n def __infer__(self, x):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n x_shape = list(x['shape'])\n dim = len(x_shape)\n validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)\n if self.axis < 0:\n self.axis = self.axis + dim\n output_num = x_shape[self.axis]\n validator.check_value_type(\"num\", output_num, [int], self.name)\n validator.check_positive_int(output_num, \"output_num\", self.name)\n self.add_prim_attr('num', output_num)\n output_valid_check = x_shape[self.axis] - output_num\n validator.check_int(output_valid_check, 0, Rel.EQ,\n \"The dimension which to unstack divides output_num\", self.name)\n out_shapes = []\n out_dtypes = []\n out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:]\n for _ in range(output_num):\n out_shapes.append(tuple(out_shape))\n out_dtypes.append(x['dtype'])\n out_shapes = tuple(out_shapes)\n out_dtypes = tuple(out_dtypes)\n out = {'shape': out_shapes,\n 'dtype': out_dtypes,\n 'value': None}\n return out\n\n\nclass Unstack(PrimitiveWithInfer):\n r\"\"\"\n Unstacks tensor in specified axis.\n\n Unstacks a tensor of rank `R` along axis dimension, output tensors will have rank `(R-1)`.\n\n Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \\le axis`,\n the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`.\n\n This is the opposite of pack.\n\n Args:\n axis (int): Dimension along which to pack. Default: 0.\n Negative values wrap around. The range is [-R, R).\n\n Inputs:\n - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.\n A tensor to be unstacked and the rank of the tensor must be greater than 0.\n\n Outputs:\n A tuple of tensors, the shape of each objects is the same.\n\n Raises:\n ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> unstack = ops.Unstack()\n >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))\n >>> output = unstack(input_x)\n >>> print(output)\n (Tensor(shape=[4], dtype=Int64, value= [1, 1, 1, 1]), Tensor(shape=[4], dtype=Int64, value= [2, 2, 2, 2]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0):\n \"\"\"Initialize Unstack\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n self.axis = axis\n\n def __infer__(self, x):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n x_shape = list(x['shape'])\n dim = len(x_shape)\n validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)\n if self.axis < 0:\n self.axis = self.axis + dim\n output_num = x_shape[self.axis]\n validator.check_value_type(\"num\", output_num, [int], self.name)\n validator.check_positive_int(output_num, \"output_num\", self.name)\n self.add_prim_attr('num', output_num)\n output_valid_check = x_shape[self.axis] - output_num\n validator.check_int(output_valid_check, 0, Rel.EQ,\n \"The dimension which to unstack divides output_num\", self.name)\n out_shapes = []\n out_dtypes = []\n out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:]\n for _ in range(output_num):\n out_shapes.append(tuple(out_shape))\n out_dtypes.append(x['dtype'])\n out_shapes = tuple(out_shapes)\n out_dtypes = tuple(out_dtypes)\n out = {'shape': out_shapes,\n 'dtype': out_dtypes,\n 'value': None}\n return out\n\n\nclass Slice(PrimitiveWithInfer):\n \"\"\"\n Slices a tensor in the specified shape.\n\n Slice the tensor `input_x` in shape of `size` and starting at the location specified by `begin`,\n The slice `begin` represents the offset in each dimension of `input_x`,\n The slice `size` represents the size of the output tensor.\n\n Note that `begin` is zero-based and `size` is one-based.\n\n If `size[i]` is -1, all remaining elements in dimension i are included in the slice.\n This is equivalent to setting :math:`size[i] = input_x.shape(i) - begin[i]`.\n\n Inputs:\n - **input_x** (Tensor): The target tensor.\n The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n - **begin** (Union[tuple, list]): The beginning of the slice. Only constant value(>=0) is allowed.\n - **size** (Union[tuple, list]): The size of the slice. Only constant value is allowed.\n\n Outputs:\n Tensor, the shape is : input `size`, the data type is the same as `input_x`.\n\n Raises:\n TypeError: If `begin` or `size` is neither tuple nor list.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],\n ... [[3, 3, 3], [4, 4, 4]],\n ... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))\n >>> slice_op = ops.Slice()\n >>> output = slice_op(data, (1, 0, 0), (1, 1, 3))\n >>> print(output)\n [[[3 3 3]]]\n >>> output = slice_op(data, (1, 0, 0), (1, 1, 2))\n >>> print(output)\n [[[3 3]]]\n >>> output = slice_op(data, (1, 0, 0), (1, 1, 1))\n >>> print(output)\n [[[3]]]\n >>> output = slice_op(data, (1, 1, 0), (1, 1, 3))\n >>> print(output)\n [[[4 4 4]]]\n >>> output = slice_op(data, (1, 0, 1), (1, 1, 2))\n >>> print(output)\n [[[3 3]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize slice\"\"\"\n self.init_prim_io_names(inputs=['x', 'begin', 'size'], outputs=['output'])\n\n def __infer__(self, x, begin, size):\n x_shape = x['shape']\n x_shp_len = len(x_shape)\n begin_v, size_v = begin['value'], size['value']\n if begin_v is None or size_v is None:\n # if size_v is not None and begin_v is None, it should be also a dynamic output shape.\n if size_v is None:\n if size['shape'][0] < 0:\n raise ValueError(f\"For '{self.name}', the size shape haven't support dynamic yet.\")\n out_shape = [-1] * size['shape'][0]\n else:\n out_shape = [-1] * len(size_v)\n if 'max_shape' in x:\n max_shape = x['max_shape']\n min_shape = x['min_shape']\n else:\n min_shape = x['shape']\n max_shape = x['shape']\n return {'shape': out_shape,\n 'dtype': x['dtype'],\n 'value': None,\n 'min_shape': min_shape,\n 'max_shape': max_shape}\n validator.check_valid_input('begin', begin['value'], self.name)\n validator.check_valid_input('size', size['value'], self.name)\n validator.check_value_type(\"input begin\", begin_v, [tuple, list], self.name)\n validator.check_value_type(\"input size\", size_v, [tuple, list], self.name)\n for key, value in zip(('begin', 'size'), (begin_v, size_v)):\n validator.check(f'len of {key}', len(value),\n 'len x\\'s dim', x_shp_len)\n size_v = list(size_v)\n if -1 not in x_shape:\n for i in range(x_shp_len):\n if size_v[i] == -1:\n size_v[i] = x_shape[i] - begin_v[i]\n validator.check_positive_int(size_v[i], f'input size[{i}]')\n validator.check_non_negative_int(begin_v[i], f'input begin[{i}]')\n if x_shape[i] < begin_v[i] + size_v[i]:\n y = begin_v[i] + size_v[i]\n raise ValueError(f\"For '{self.name}', the sliced shape can not be greater than origin shape, \"\n f\"but got sliced shape is {y}, and origin shape is {x_shape}.\")\n return {'shape': size_v,\n 'dtype': x['dtype'],\n 'value': None}\n\n\nclass ReverseV2(PrimitiveWithInfer):\n \"\"\"\n Reverses specific dimensions of a tensor.\n\n .. warning::\n The value range of \"axis\" is [-dims, dims - 1]. \"dims\" is the dimension length of \"input_x\".\n\n Args:\n axis (Union[tuple(int), list(int)): The indices of the dimensions to reverse.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor. The data type is Number except float64.\n The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `axis` is neither list nor tuple.\n TypeError: If element of `axis` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)\n >>> op = ops.ReverseV2(axis=[1])\n >>> output = op(input_x)\n >>> print(output)\n [[4 3 2 1]\n [8 7 6 5]]\n >>> op = ops.ReverseV2(axis=[1, 0])\n >>> output = op(input_x)\n >>> print(output)\n [[8 7 6 5]\n [4 3 2 1]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis):\n \"\"\"Initialize ReverseV2.\"\"\"\n validator.check_value_type('axis', axis, [list, tuple], self.name)\n for i, each in enumerate(axis):\n validator.check_value_type(f'axis[{i}]', each, [int], self.name)\n self.axis = axis\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, x_shape):\n dim = len(x_shape)\n for i, each in enumerate(self.axis):\n validator.check_int_range(each, -dim, dim, Rel.INC_LEFT, f'axis[{i}]', self.name)\n normalized_axis = []\n for i, v in enumerate(self.axis):\n if v < 0:\n normalized_axis.append(v + dim)\n else:\n normalized_axis.append(v)\n\n if len(normalized_axis) != len(set(normalized_axis)):\n duplicated = [item for item, count in Counter(normalized_axis).items() if count > 1]\n raise ValueError(f\"For '{self.name}', the 'axis' cannot contain duplicate dimensions,\"\n f\" but got duplicated elements {duplicated}.\")\n\n return x_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_dtype_valid('x', x_dtype, (mstype.bool_,) + mstype.number_type, self.name)\n return x_dtype\n\n\nclass Rint(Primitive):\n \"\"\"\n Returns an integer that is closest to x element-wise.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor, which must be one of the following types:\n float16, float32. The shape is :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If dtype of `input_x` is not in [float16, float32, float64].\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([-1.6, -0.1, 1.5, 2.0]), mindspore.float32)\n >>> op = ops.Rint()\n >>> output = op(input_x)\n >>> print(output)\n [-2. 0. 2. 2.]\n >>> input_x = Tensor(np.array([[-2.0, -1.9, -1.8, -1.7, -1.6],\n ... [-2.0, -1.9, -1.8, -1.7, -1.6]]), mindspore.float32)\n >>> output = op(input_x)\n >>> print(output)\n [[-2. -2. -2. -2. -2.]\n [-2. -2. -2. -2. -2.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Rint.\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n\n\nclass Select(Primitive):\n r\"\"\"\n\n Returns the selected elements, either from input :math:`x` or input :math:`y`, depending on the `condition`.\n\n Given a tensor as input, this operation inserts a dimension of 1 at the dimension,\n it was invalid when both math: 'x' and math: 'y' are none.\n Keep in mind that the shape of the output tensor can vary depending\n on how many true values are in the input. Indexes are output in row-first\n order.\n\n The conditional tensor acts as an optional compensation (mask), which\n determines whether the corresponding element / row in the output must be\n selected from :math:`x` (if true) or :math:`y` (if false) based on the value of each\n element.\n\n It can be defined as:\n\n .. math::\n out_i = \\begin{cases}\n x_i, & \\text{if } condition_i \\\\\n y_i, & \\text{otherwise}\n \\end{cases}\n\n If condition is a vector, then :math:`x` and :math:`y` are higher-dimensional matrices, then it\n chooses to copy that row (external dimensions) from :math:`x` and :math:`y`. If condition has\n the same shape as :math:`x` and :math:`y`, you can choose to copy these elements from :math:`x`\n and :math:`y`.\n\n Inputs:\n - **input_cond** (Tensor[bool]) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.\n The condition tensor, decides which element is chosen.\n - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.\n The first input tensor.\n - **input_y** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.\n The second input tensor.\n\n Outputs:\n Tensor, has the same shape as `input_x`. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.\n\n Raises:\n TypeError: If `input_x` or `input_y` is not a Tensor.\n ValueError: If shape of `input_x` is not equal to shape of `input_y` or shape of `input_cond`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> select = ops.Select()\n >>> input_cond = Tensor([True, False])\n >>> input_x = Tensor([2,3], mindspore.float32)\n >>> input_y = Tensor([1,2], mindspore.float32)\n >>> output = select(input_cond, input_x, input_y)\n >>> print(output)\n [2. 2.]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Select.\"\"\"\n self.init_prim_io_names(inputs=['condition', 'x', 'y'], outputs=['output'])\n\n\ndef _compute_slicing_length(begin, end, stride, x_shape, i):\n \"\"\"Computes the length of the slicing.\"\"\"\n if i >= len(x_shape):\n raise ValueError(f\"For 'StridedSlice', the index must be less than \"\n f\"the dimension of 'input_x', but got the dimension of 'input_x': {len(x_shape)} \"\n f\"and the index: {i}.\")\n x_dim = x_shape[i]\n if stride > 0:\n # When slicing forward, convert begin and end to positive numbers.\n if begin >= x_dim or end < -x_dim:\n # When slicing forward, if begin >= x_dim or end < -x_dim, the length of the slicing is 0.\n slicing_length = 0\n else:\n if -x_dim <= begin < 0:\n begin += x_dim\n if begin < -x_dim:\n # When slicing forward, if begin < -x_dim, set begin = 0, which means start from the 0th element.\n begin = 0\n if -x_dim <= end < 0:\n end += x_dim\n if end > x_dim:\n # When slicing forward, if end > x_dim, set end = x_dims, which means slice to the last element.\n end = x_dim\n if begin >= end:\n # When slicing forward, if begin >= end, the length of the slicing is 0.\n slicing_length = 0\n else:\n slicing_length = 1 + (end - 1 - begin) // stride\n else:\n # When slicing backward, convert begin and end to negative numbers.\n if begin < -x_dim or end >= x_dim:\n # When slicing backward, if begin < -x_dim or end >= x_dim, the length of the slicing is 0.\n slicing_length = 0\n else:\n if 0 <= begin < x_dim:\n begin += -x_dim\n if begin >= x_dim:\n begin = -1\n if 0 <= end < x_dim:\n end += -x_dim\n if end < -x_dim - 1:\n # Slicing to the 0th element.\n end = -x_dim - 1\n if begin <= end:\n slicing_length = 0\n else:\n slicing_length = 1 + (end + 1 - begin) // stride\n return slicing_length\n\n\nclass StridedSlice(PrimitiveWithInfer):\n r\"\"\"\n\n Extracts a strided slice of a tensor.\n\n Given an input tensor, this operation inserts a dimension of length 1 at the dimension.\n This operation extracts a fragment of size (end-begin)/stride from the given 'input_tensor'.\n Starting from the beginning position, the fragment continues adding stride to the index until\n all dimensions are not less than the ending position.\n\n Given a `input_x[m1, m2, ..., mn]`, `begin`, `end` and `strides` will be vectors of length n.\n\n In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`, `shrink_axis_mask`)\n the ith bit will correspond to the ith m.\n\n If the ith bit of `begin_mask` is set, `begin[i]` is ignored and the fullest possible range in that dimension\n is used instead. `end_mask` is analogous, except with the end range.\n\n As for a 5*6*7 tensor, `x[2:,:3,:]` is equivalent to `x[2:5,0:3,0:7]`.\n\n If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions as needed will be inserted between\n other dimensions. Only one non-zero bit is allowed in `ellipsis_mask`.\n\n As for a 5*6*7*8 tensor, `x[2:,...,:6]` is equivalent to `x[2:5,:,:,0:6]`.\n `x[2:,...]` is equivalent to `x[2:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is set, `begin`, `end` and `strides` are ignored and a new length 1\n dimension is added at the specified position in tthe output tensor.\n\n As for a 5*6*7 tensor, `x[:2, newaxis, :6]` will produce a tensor with shape (2, 1, 7).\n\n If the ith bit of `shrink_axis_mask` is set, ith size shrinks the dimension by 1, taking on the value\n at index `begin[i]`, `end[i]` and `strides[i]` are ignored.\n\n As for a 5*6*7 tensor, `x[:, 5, :]` will result in `shrink_axis_mask` equal to 4.\n\n Note:\n The stride may be negative value, which causes reverse slicing.\n The shape of `begin`, `end` and `strides` must be the same.\n `begin` and `end` are zero-indexed. The element of `strides` must be non-zero.\n\n Args:\n begin_mask (int): Starting index of the slice. Default: 0.\n end_mask (int): Ending index of the slice. Default: 0.\n ellipsis_mask (int): An int mask. Default: 0.\n new_axis_mask (int): An int mask. Default: 0.\n shrink_axis_mask (int): An int mask. Default: 0.\n\n Inputs:\n - **input_x** (Tensor) - The input Tensor.\n - **begin** (tuple[int]) - A tuple which represents the location where to start. Only\n constant value is allowed.\n - **end** (tuple[int]) - A tuple or which represents the maximum location where to end.\n Only constant value is allowed.\n - **strides** (tuple[int]) - A tuple which represents the stride is continuously added\n before reaching the maximum location. Only constant value is allowed.\n\n Outputs:\n Tensor, The output is explained by following example.\n\n In the 0th dimension, begin is 1, end is 2, and strides is 1,\n because :math:`1+1=2\\geq2`, the interval is :math:`[1,2)`.\n Thus, return the element with :math:`index = 1` in 0th dimension, i.e., [[3, 3, 3], [4, 4, 4]].\n\n In the 1st dimension, similarly, the interval is :math:`[0,1)`.\n Based on the return value of the 0th dimension, return the element with :math:`index = 0`,\n i.e., [3, 3, 3].\n\n In the 2nd dimension, similarly, the interval is :math:`[0,3)`.\n Based on the return value of the 1st dimension, return the element with :math:`index = 0,1,2`,\n i.e., [3, 3, 3].\n\n Finally, the output is [3, 3, 3].\n\n Raises:\n TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is not an int.\n TypeError: If `begin`, `end` or `strides` is not a tuple.\n ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or `shrink_axis_mask` is less than 0.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],\n ... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)\n >>> # [[[1. 1. 1.]\n >>> # [2. 2. 2.]]\n >>> #\n >>> # [[3. 3. 3.]\n >>> # [4. 4. 4.]]\n >>> #\n >>> # [[5. 5. 5.]\n >>> # [6. 6. 6.]]]\n >>> # In order to visually view the multi-dimensional array, write the above as follows:\n >>> # [\n >>> # [\n >>> # [1,1,1]\n >>> # [2,2,2]\n >>> # ]\n >>> # [\n >>> # [3,3,3]\n >>> # [4,4,4]\n >>> # ]\n >>> # [\n >>> # [5,5,5]\n >>> # [6,6,6]\n >>> # ]\n >>> # ]\n >>> strided_slice = ops.StridedSlice()\n >>> output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))\n >>> # Take this \" output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) \" as an example,\n >>> # start = [1, 0, 2] , end = [3, 1, 3], stride = [1, 1, 1], Find a segment of (start, end),\n >>> # note that end is an open interval\n >>> # To facilitate understanding, this operator can be divided into three steps:\n >>> # Step 1: Calculation of the first dimension:\n >>> # start = 1, end = 3, stride = 1, So can take 1st, 2nd rows, and then gets the final output at this time.\n >>> # output_1th =\n >>> # [\n >>> # [\n >>> # [3,3,3]\n >>> # [4,4,4]\n >>> # ]\n >>> # [\n >>> # [5,5,5]\n >>> # [6,6,6]\n >>> # ]\n >>> # ]\n >>> # Step 2: Calculation of the second dimension\n >>> # 2nd dimension, start = 0, end = 1, stride = 1. So only 0th rows can be taken, and the output at this time.\n >>> # output_2nd =\n >>> # [\n >>> # [\n >>> # [3,3,3]\n >>> # ]\n >>> # [\n >>> # [5,5,5]\n >>> # ]\n >>> # ]\n >>> # Step 3: Calculation of the third dimension\n >>> # 3nd dimension,start = 2, end = 3, stride = 1, So can take 2th cols,\n >>> # and you get the final output at this time.\n >>> # output_3ed =\n >>> # [\n >>> # [\n >>> # [3]\n >>> # ]\n >>> # [\n >>> # [5]\n >>> # ]\n >>> # ]\n >>> # The final output after finishing is:\n >>> print(output)\n [[[3.]]\n [[5.]]]\n >>> # another example like :\n >>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))\n >>> print(output)\n [[[3. 3. 3.]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self,\n begin_mask=0,\n end_mask=0,\n ellipsis_mask=0,\n new_axis_mask=0,\n shrink_axis_mask=0):\n \"\"\"Initialize StridedSlice\"\"\"\n self.init_prim_io_names(inputs=['x', 'begin', 'end', 'strides'], outputs=['output'])\n validator.check_non_negative_int(begin_mask, 'begin_mask', self.name)\n validator.check_non_negative_int(end_mask, 'end_mask', self.name)\n validator.check_non_negative_int(ellipsis_mask, 'ellipsis_mask', self.name)\n if len(tuple(filter(lambda x: x == '1', bin(ellipsis_mask)[-1:1:-1]))) > 1:\n raise ValueError(f\"For '{self.name}', only support one ellipsis in the index, but got {end_mask}.\")\n validator.check_non_negative_int(new_axis_mask, 'new_axis_mask', self.name)\n validator.check_non_negative_int(shrink_axis_mask, 'shrink_axis_mask', self.name)\n\n def _check_and_get_value(self, slice_input, name):\n \"\"\"Check begin, end, strides. Get its length and value.\"\"\"\n slice_value = slice_input['value']\n if slice_value is None:\n validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int64], self.name)\n slice_shape = slice_input['shape']\n if len(slice_shape) != 1:\n raise ValueError(f\"For '{self.name}', both the 'begins', 'ends', and 'strides' must be 1-D, \"\n f\"but got '{name}' shape: {slice_shape}.\")\n # not support scalar\n return slice_value, slice_shape[0]\n\n if isinstance(slice_value, Tensor_):\n validator.check_tensor_dtype_valid(name, slice_input['dtype'], [mstype.int64], self.name)\n slice_value = slice_value.asnumpy().tolist()\n elif not isinstance(slice_value, tuple):\n raise TypeError(f\"For '{self.name}', both the 'begin', 'end', and 'strides' must be a tuple or Tensor, \"\n f\"but got '{name}': {slice_value}.\")\n\n if tuple(filter(lambda x: not isinstance(x, int), slice_value)):\n raise TypeError(f\"For '{self.name}', the elements of 'begin', 'end', and 'strides' must be int, \"\n f\"but got {name}: {slice_value}.\")\n return slice_value, len(slice_value)\n\n def __infer__(self, x, begin, end, strides):\n x_shape = x['shape']\n if -1 in x_shape:\n raise ValueError(f\"For '{self.name}', input x is currently not support dynamic shape.\")\n begin_v, begin_len = self._check_and_get_value(begin, 'begin')\n end_v, end_len = self._check_and_get_value(end, 'end')\n strides_v, strides_len = self._check_and_get_value(strides, 'strides')\n\n if strides_v is not None and tuple(filter(lambda x: x == 0, strides_v)):\n raise ValueError(f\"For '{self.name}', the 'strides' cannot contain 0, but got 'strides': {strides_v}.\")\n\n if begin_len != strides_len or end_len != strides_len:\n raise ValueError(f\"For '{self.name}', 'begin', 'end' and 'strides' must be the same length, but got \"\n f\"'begin' length: {begin_len}, 'end' length: {end_len}, 'strides' length: {strides_len}.\")\n\n if None in (strides_v, begin_v, end_v):\n ret_shape = self._compute_dynamic_slicing_shape(x_shape, begin_len)\n ret_min_shape = [1] * len(x_shape)\n ret_max_shape = x_shape\n for i, val in enumerate(ret_shape):\n if val > 0:\n ret_min_shape[i] = val\n ret_max_shape[i] = val\n return {'shape': ret_shape,\n 'dtype': x['dtype'],\n 'value': None,\n 'max_shape': ret_max_shape,\n 'min_shape': ret_min_shape}\n\n ret_shape = self._compute_slicing_shape(x_shape, begin_v, end_v, strides_v)\n if all(ret_shape):\n value = None\n else:\n init_func = Zero()\n init_func.__enable_zero_dim__ = True\n value = Tensor(dtype=x['dtype'].element_type(), shape=ret_shape, init=init_func)\n\n if \"max_value\" in x and \"min_value\" in x:\n validator.check_value_type(\"min_value\", x[\"min_value\"], [tuple, list], self.name)\n validator.check_value_type(\"max_value\", x[\"max_value\"], [tuple, list], self.name)\n max_value_np = np.array(x[\"max_value\"])\n min_value_np = np.array(x[\"min_value\"])\n slice_index = []\n for begin_i, end_i, strides_i in zip(begin_v, end_v, strides_v):\n s = slice(begin_i, end_i, strides_i)\n slice_index.append(s)\n slice_index = tuple(slice_index)\n max_value_slice = max_value_np[slice_index]\n min_value_slice = min_value_np[slice_index]\n max_value_slice = tuple(max_value_slice.tolist())\n min_value_slice = tuple(min_value_slice.tolist())\n return {'shape': ret_shape,\n 'dtype': x['dtype'],\n 'value': value,\n 'max_value': max_value_slice,\n 'min_value': min_value_slice}\n\n return {'shape': ret_shape,\n 'dtype': x['dtype'],\n 'value': value}\n\n def _compute_slicing_shape(self, x_shape, begin_v, end_v, strides_v):\n \"\"\"Computes the shape of the slicing.\"\"\"\n x_rank = len(x_shape)\n slice_len = len(begin_v)\n\n # After the integer is converted to binary, it is a str and the first two chars are the flag char '0b'.\n begin_pos = bin(self.begin_mask)[-1:1:-1]\n end_pos = bin(self.end_mask)[-1:1:-1]\n ellipsis_pos = bin(self.ellipsis_mask)[-1:1:-1]\n new_axis_pos = bin(self.new_axis_mask)[-1:1:-1]\n shrink_axis_pos = bin(self.shrink_axis_mask)[-1:1:-1]\n\n ret_shape = []\n i, j = 0, 0\n has_ellipsis = False\n while i < x_rank or j < slice_len:\n if j < slice_len:\n begin, end, stride = begin_v[j], end_v[j], strides_v[j]\n\n if j < len(ellipsis_pos) and ellipsis_pos[j] == '1':\n # When there is ellipsis, the latter part of the ellipsis will be processed separately.\n has_ellipsis = True\n break\n if j < len(begin_pos) and begin_pos[j] == '1':\n begin = -1 if strides_v[j] < 0 else 0\n if j < len(end_pos) and end_pos[j] == '1':\n end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]\n if j < len(new_axis_pos) and new_axis_pos[j] == '1':\n ret_shape.append(1)\n j += 1\n continue\n if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':\n if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:\n raise IndexError(f\"For '{self.name}', the 'strides[{i}]' cannot be negative number and \"\n f\"'begin[{i}]' should be in [-{x_shape[i]}, {x_shape[i]}) \"\n f\"when 'shrink_axis_mask' is greater than 0, \"\n f\"but got 'shrink_axis_mask': {self.shrink_axis_mask}, \"\n f\"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.\")\n j += 1\n i += 1\n continue\n else:\n begin, end, stride = 0, x_shape[i], 1\n\n slicing_length = _compute_slicing_length(begin, end, stride, x_shape, i)\n ret_shape.append(slicing_length)\n i += 1\n j += 1\n if has_ellipsis:\n # When there is ellipsis, handle the second half of the ellipsis split.\n ellipsis_occupied_dims = x_rank - i - (slice_len - (j + 1)) + \\\n len(tuple(filter(lambda x: x == '1', new_axis_pos[j + 1:slice_len])))\n ret_shape.extend(x_shape[i:i + ellipsis_occupied_dims])\n j += 1\n i += ellipsis_occupied_dims\n\n while i < x_rank or j < slice_len:\n begin, end, stride = begin_v[j], end_v[j], strides_v[j]\n\n if j < len(begin_pos) and begin_pos[j] == '1':\n begin = -1 if strides_v[j] < 0 else 0\n if j < len(end_pos) and end_pos[j] == '1':\n end = -(x_shape[i] + 1) if strides_v[j] < 0 else x_shape[i]\n if j < len(new_axis_pos) and new_axis_pos[j] == '1':\n ret_shape.append(1)\n j += 1\n continue\n if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':\n if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:\n raise IndexError(f\"For '{self.name}', the 'strides[{i}]' cannot be negative number and \"\n f\"'begin[{i}]' should be in [-{x_shape[i]}, {x_shape[i]}) \"\n f\"when 'shrink_axis_mask' is greater than 0, \"\n f\"but got 'shrink_axis_mask': {self.shrink_axis_mask}, \"\n f\"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.\")\n j += 1\n i += 1\n continue\n\n slicing_length = _compute_slicing_length(begin, end, stride, x_shape, i)\n ret_shape.append(slicing_length)\n i += 1\n j += 1\n return ret_shape\n\n def _compute_dynamic_slicing_shape(self, x_shape, slice_len):\n \"\"\"Computes the shape of the slicing for dynamic shape, mask is currently not supported.\"\"\"\n x_rank = len(x_shape)\n if self.begin_mask != 0 or self.end_mask != 0 or self.ellipsis_mask or self.new_axis_mask != 0 \\\n or self.shrink_axis_mask != 0:\n raise ValueError(\"Mask is currently not supported if 'begin', 'end' or 'strides' is not a constant.\")\n ret_shape = []\n i, j = 0, 0\n while i < x_rank or j < slice_len:\n slicing_length = -1\n if j >= slice_len:\n if i >= len(x_shape):\n raise ValueError(f\"For 'StridedSlice', the index must be less than or equal to \"\n f\"the dimension of 'input_x', but got the dimension of 'input_x': {len(x_shape)} \"\n f\"and the index: {i}.\")\n begin, end, stride = 0, x_shape[i], 1\n if end > 0:\n slicing_length = _compute_slicing_length(begin, end, stride, x_shape, i)\n ret_shape.append(slicing_length)\n i += 1\n j += 1\n return ret_shape\n\n\nclass Diag(PrimitiveWithInfer):\n r\"\"\"\n\n Constructs a diagonal tensor with a given diagonal values.\n\n Assume `input_x` has dimensions :math:`[D_1,... D_k]`, the output is a tensor of\n rank 2k with dimensions :math:`[D_1,..., D_k, D_1,..., D_k]` where:\n :math:`output[i_1,..., i_k, i_1,..., i_k] = input_x[i_1,..., i_k]` and 0 everywhere else.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. The input shape must be less than 5d.\n\n Outputs:\n Tensor, has the same dtype as the `input_x`.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n ValueError: If rank of `input_x` is less than 1.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> input_x = Tensor([1, 2, 3, 4])\n >>> diag = ops.Diag()\n >>> output = diag(input_x)\n >>> print(output)\n [[1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Diag\"\"\"\n\n def infer_dtype(self, x_type):\n validator.check_subclass('input_x', x_type, mstype.tensor, self.name)\n return x_type\n\n def infer_shape(self, x_shape):\n validator.check(\"x rank\", len(x_shape), \"\", 1, Rel.GE)\n ret_shape = copy.deepcopy(x_shape)\n ret_shape = ret_shape + ret_shape\n return ret_shape\n\n def infer_value(self, x):\n if x is None:\n return None\n # do constant-folding only when x rank is 1\n if len(x.shape) != 1:\n return None\n ret = np.diag(x.asnumpy())\n return Tensor(ret)\n\n\nclass DiagPart(PrimitiveWithInfer):\n r\"\"\"\n\n Extracts the diagonal part from given tensor.\n\n Assume input has dimensions :math:`[D_1,..., D_k, D_1,..., D_k]`, the output is a tensor\n of rank k with dimensions :math:`[D_1,..., D_k]` where:\n :math:`output[i_1,..., i_k] = input[i_1,..., i_k, i_1,..., i_k]`.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor of rank 2k, k is not zero.\n\n Outputs:\n Tensor, the extracted diagonal has the same dtype as the `input_x`.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n ValueError: If rank of `input_x` is not even or zero.\n ValueError: If input_shape[i] is not equal to input_shape[i + len(input_shape)/2].\n\n Supported Platforms:\n ``Ascend``\n\n Examples\n >>> input_x = Tensor([[1, 0, 0, 0],\n ... [0, 2, 0, 0],\n ... [0, 0, 3, 0],\n ... [0, 0, 0, 4]])\n >>> diag_part = ops.DiagPart()\n >>> output = diag_part(input_x)\n >>> print(output)\n [1 2 3 4]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize DiagPart\"\"\"\n\n def infer_dtype(self, x_type):\n validator.check_subclass('input_x', x_type, mstype.tensor, self.name)\n return x_type\n\n def infer_shape(self, x_shape):\n if len(x_shape) % 2 != 0 or \\\n not x_shape:\n raise ValueError(f\"For \\'{self.name}\\', the dimension of 'input_x' must be non-zero and even, \"\n f\"but got dimension {len(x_shape)}, with shapes {x_shape}.\")\n length = len(x_shape) // 2\n for i in range(length):\n validator.check('input_shape[i + len(input_shape)/2]', x_shape[i + length],\n 'input_shape[i]', x_shape[i], Rel.EQ, self.name)\n ret_shape = x_shape[0:length]\n return ret_shape\n\n def infer_value(self, x):\n if x is None:\n return None\n # do constant-folding only when x rank is 2\n if len(x.shape) != 2:\n return None\n ret = np.diag(x.asnumpy())\n return Tensor(ret)\n\n\nclass Eye(PrimitiveWithInfer):\n \"\"\"\n\n Creates a tensor with ones on the diagonal and zeros in the rest.\n\n Inputs:\n - **n** (int) - The number of rows of returned tensor. Constant value only.\n - **m** (int) - The number of columns of returned tensor. Constant value only.\n - **t** (mindspore.dtype) - MindSpore's dtype, The data type of the returned tensor.\n The data type can be Number.\n\n Outputs:\n Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on\n the user's Inputs `n` and `m`. And the data type depends on Inputs `t`.\n\n Raises:\n TypeError: If `m` or `n` is not an int.\n ValueError: If `m` or `n` is less than 1.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> eye = ops.Eye()\n >>> output = eye(2, 2, mindspore.int32)\n >>> print(output)\n [[1 0]\n [0 1]]\n >>> print(output.dtype)\n Int32\n >>> output = eye(1, 2, mindspore.float64)\n >>> print(output)\n [[1. 0.]]\n >>> print(output.dtype)\n Float64\n >>> # if wants a anti-diagonal\n >>> anti_diagonal_input = eye(2, 2, mindspore.int32)\n >>> # Note that ReverseV2 only supports \"Ascend\" and \"GPU\" at this time\n >>> reverse = ops.ReverseV2([1])\n >>> anti_diagonal_output = reverse(anti_diagonal_input)\n >>> print(anti_diagonal_output)\n [[0 1]\n [1 0]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize Eye\"\"\"\n\n def infer_value(self, n, m, t):\n validator.check_positive_int(n, \"n\", self.name)\n validator.check_positive_int(m, \"m\", self.name)\n args = {\"dtype\": t}\n validator.check_types_same_and_valid(args, mstype.number_type + (mstype.bool_,), self.name)\n np_type = mstype.dtype_to_nptype(t)\n ret = np.eye(n, m, dtype=np_type)\n return Tensor(ret)\n\n\nclass ScatterNd(PrimitiveWithInfer):\n r\"\"\"\n Scatters a tensor into a new tensor depending on the specified indices.\n\n Creates an empty tensor with the given `shape`, and set values by scattering the update tensor\n depending on indices.\n\n The empty tensor has rank P and `indices` has rank Q where `Q >= 2`.\n\n `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.\n\n The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of the empty tensor.\n\n `updates` is a tensor of rank `Q-1+P-N`. Its shape is: :math:`(i_0, i_1, ..., i_{Q-2}, shape_N, ..., shape_{P-1})`.\n\n The following figure shows the calculation process of inserting two slices in the first dimension of a rank-3\n with two matrices of new values:\n\n .. image:: api_img/ScatterNd.png\n\n Inputs:\n - **indices** (Tensor) - The index of scattering in the new tensor with int32 or int64 data type.\n The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.\n - **updates** (Tensor) - The source Tensor to be scattered.\n It has shape `indices_shape[:-1] + shape[indices_shape[-1]:]`.\n - **shape** (tuple[int]) - Define the shape of the output tensor, has the same data type as indices.\n The shape of `shape` is :math:`(x_1, x_2, ..., x_R)`, and length of 'shape' is greater than or equal 2.\n In other words, the shape of `shape` is at least :math:`(x_1, x_2)`.\n And the value of any element in `shape` must be greater than or equal 1.\n In other words, :math:`x_1` >= 1, :math:`x_2` >= 1.\n\n Outputs:\n Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.\n\n Raises:\n TypeError: If `shape` is not a tuple.\n ValueError: If any element of `shape` is less than 1.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> op = ops.ScatterNd()\n >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],\n ... [3, 3, 3, 3], [4, 4, 4, 4]],\n ... [[1, 1, 1, 1], [2, 2, 2, 2],\n ... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)\n >>> shape = (4, 4, 4)\n >>> output = op(indices, updates, shape)\n >>> print(output)\n [[[1. 1. 1. 1.]\n [2. 2. 2. 2.]\n [3. 3. 3. 3.]\n [4. 4. 4. 4.]]\n [[0. 0. 0. 0.]\n [0. 0. 0. 0.]\n [0. 0. 0. 0.]\n [0. 0. 0. 0.]]\n [[1. 1. 1. 1.]\n [2. 2. 2. 2.]\n [3. 3. 3. 3.]\n [4. 4. 4. 4.]]\n [[0. 0. 0. 0.]\n [0. 0. 0. 0.]\n [0. 0. 0. 0.]\n [0. 0. 0. 0.]]]\n >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)\n >>> shape = (3, 3)\n >>> output = op(indices, updates, shape)\n >>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:\n >>> # Step 1: Generate an empty Tensor of the specified shape according to the shape\n >>> # [\n >>> # [0. 0. 0.]\n >>> # [0. 0. 0.]\n >>> # [0. 0. 0.]\n >>> # ]\n >>> # Step 2: Modify the data at the specified location according to the indicators\n >>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.\n >>> # means that the empty tensor in the 0th row and 1st col set to 3.2\n >>> # [\n >>> # [0. 3.2. 0.]\n >>> # [0. 0. 0.]\n >>> # [0. 0. 0.]\n >>> # ]\n >>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.\n >>> # means that the empty tensor in the 1th row and 1st col set to 1.1\n >>> # [\n >>> # [0. 3.2. 0.]\n >>> # [0. 1.1 0.]\n >>> # [0. 0. 0.]\n >>> # ]\n >>> # The final result is as follows:\n >>> print(output)\n [[0. 3.2 0.]\n [0. 1.1 0.]\n [0. 0. 0.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize ScatterNd\"\"\"\n self.init_prim_io_names(inputs=['indices', 'update', 'shape'], outputs=['output'])\n\n def __infer__(self, indices, update, shape):\n shp = shape['value']\n validator.check_subclass(\"update_dtype\", update['dtype'], mstype.tensor, self.name)\n validator.check_tensor_dtype_valid(\"indices\", indices['dtype'], [mstype.int32, mstype.int64], self.name)\n validator.check_value_type(\"shape\", shp, [tuple], self.name)\n for i, x in enumerate(shp):\n validator.check_positive_int(x, f'shape[{i}]', self.name)\n\n indices_shape, update_shape = indices[\"shape\"], update[\"shape\"]\n if indices_shape[0] != update_shape[0]:\n raise ValueError(f\"For '{self.name}', the first shape of 'indices' must be the same as the first shape of \"\n f\"'updates', but got the first shape of 'indices': {indices_shape[0]}, \"\n f\"the first shape of 'updates': {update_shape[0]}.\")\n\n return {'shape': shp,\n 'dtype': update['dtype'],\n 'value': None}\n\n\nclass ResizeNearestNeighbor(Primitive):\n r\"\"\"\n Resizes the input tensor by using the nearest neighbor algorithm.\n\n Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest\n neighbor algorithm selects the value of the nearest point and does not consider the\n values of neighboring points at all, yielding a piecewise-constant interpolant.\n\n Args:\n size (Union[tuple, list]): The target size. The dimension of size must be 2.\n align_corners (bool): Whether the centers of the 4 corner pixels of the input\n and output tensors are aligned. Default: False.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.\n\n Outputs:\n Tensor, the shape of the output tensor is :math:`(N, C, NEW\\_H, NEW\\_W)`.\n The data type is the same as the `input_x`.\n\n Raises:\n TypeError: If `size` is neither tuple nor list.\n TypeError: If `align_corners` is not a bool.\n ValueError: If length of `size` is not equal to 2.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)\n >>> resize = ops.ResizeNearestNeighbor((2, 2))\n >>> output = resize(input_tensor)\n >>> print(output)\n [[[[-0.1 0.3]\n [ 0.4 0.5]]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, size, align_corners=False):\n \"\"\"Initialize ResizeNearestNeighbor\"\"\"\n validator.check_value_type(\"size\", size, [tuple, list], self.name)\n validator.check_value_type(\"align_corners\", align_corners, [bool], self.name)\n validator.check_equal_int(len(size), 2, \"length of size\", self.name)\n for i, value in enumerate(size):\n validator.check_non_negative_int(value, f'{i}th value of size', self.name)\n self.init_prim_io_names(inputs=['image_in'], outputs=['image_out'])\n\n\nclass GatherNd(Primitive):\n r\"\"\"\n Gathers slices from a tensor by indices.\n\n Using given indices to gather slices from a tensor with a specified shape.\n\n `indices` is an K-dimensional integer tensor. Supposes it as a (K-1)-dimensional tensor and each element of it\n defines a slice of `input_x`:\n\n .. math::\n output[(i_0, ..., i_{K-2})] = input\\_x[indices[(i_0, ..., i_{K-2})]]\n\n The last dimension of `indices` can not more than the rank of `input_x`:\n :math:`indices.shape[-1] <= input\\_x.rank`.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor to gather values.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index tensor, with int32 or int64 data type.\n\n Outputs:\n Tensor, has the same type as `input_x` and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].\n\n Raises:\n ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> op = ops.GatherNd()\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)\n >>> output = op(input_x, indices)\n >>> print(output)\n [-0.1 0.5]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize GatherNd\"\"\"\n self.init_prim_io_names(inputs=['input_x', 'indices'], outputs=['y'])\n\n\n\nclass TensorScatterUpdate(PrimitiveWithInfer):\n \"\"\"\n Creates a new tensor by updating the positions in `input_x` indicated by\n `indices`, with values from `update`. This operation is almost equivalent to using\n ScatterNd, except that the updates are applied on `input_x` instead of a zero tensor.\n\n `indices` must have rank at least 2, the last axis is the depth of each index\n vectors. For each index vector, there must be a corresponding value in `update`. If\n the depth of each index tensor matches the rank of `input_x`, then each index\n vector corresponds to a scalar in `input_x` and each `update` updates a scalar. If\n the depth of each index tensor is less than the rank of `input_x`, then each index\n vector corresponds to a slice in `input_x`, and each `update` updates a slice.\n\n The order in which updates are applied is nondeterministic, meaning that if there\n are multiple index vectors in `indices` that correspond to the same position, the\n value of that position in the output will be nondeterministic.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n The data type is Number.\n - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.\n The rank must be at least 2.\n - **update** (Tensor) - The tensor to update the input tensor, has the same type as input, and\n\n :math:`update.shape = indices.shape[:-1]+input_x.shape[indices.shape[-1]:]`\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If dtype of `indices` is neither int32 nor int64.\n ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.\n ValueError: If the value of `input_x` are not match with input `indices`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)\n >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)\n >>> op = ops.TensorScatterUpdate()\n >>> output = op(input_x, indices, update)\n >>> print(output)\n [[ 1. 0.3 3.6]\n [ 0.4 2.2 -3.2]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n\n def infer_shape(self, input_x_shape, indices_shape, updates_shape):\n if len(indices_shape) < 2:\n raise ValueError(f\"For '{self.name}', the dimension of 'indices' cannot be less than 2,\"\n f\" but got {len(indices_shape)}.\")\n\n if indices_shape[-1] > len(input_x_shape):\n raise ValueError(f\"For '{self.name}', the last dimension of 'indices' must be less than or equal to \"\n f\"the dimension of 'input_x', but got the \"\n f\"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': \"\n f\"{len(input_x_shape)}.\")\n\n updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:]\n if updates_shape_check != updates_shape:\n raise ValueError(f\"For '{self.name}', the shape of 'update' must be equal to updates_shape_check, \"\n f\"where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] \"\n f\"but got the shape of 'update': {updates_shape}, \"\n f\"updates_shape_check: {updates_shape_check}, indices_shape: {indices_shape} and \"\n f\"input_x_shape: {input_x_shape}. Please check input_x_shape and indices_shape.\")\n\n return input_x_shape\n\n def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)\n args = {\"input_x\": input_x_dtype, \"updates\": updates_dtype}\n validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)\n return input_x_dtype\n\n\nclass TensorScatterAdd(PrimitiveWithInfer):\n \"\"\"\n Creates a new tensor by adding the values from the positions in `input_x` indicated by\n `indices`, with values from `updates`. When multiple values are given for the same\n index, the updated result will be the sum of all values. This operation is almost\n equivalent to using ScatterNdAdd, except that the updates are applied on `Tensor`\n instead of `Parameter`.\n\n The last axis of `indices` is the depth of each index vectors. For each index vector,\n there must be a corresponding value in `updates`. The shape of `updates` should be\n equal to the shape of `input_x[indices]`. For more details, see use cases.\n\n Note:\n If some values of the `indices` are out of bound, instead of raising an index error,\n the corresponding `updates` will not be updated to `input_x`.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].\n - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.\n The rank must be at least 2.\n - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,\n and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If dtype of `indices` is neither int32 nor int64.\n ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.\n\n Supported Platforms:\n ``GPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)\n >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)\n >>> # Next, demonstrate the approximate operation process of this operator:\n >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]\n >>> # 2, And input_x[0, 0] = -0.1\n >>> # 3, So input_x[indices] = [-0.1, -0.1]\n >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)\n >>> op = ops.TensorScatterAdd()\n >>> # 5, Perform the addition operation for the first time:\n >>> # first_input_x = input_x[0][0] + updates[0] = [[0.9, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> # 6, Perform the addition operation for the second time:\n >>> # second_input_x = input_x[0][0] + updates[1] = [[3.1, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> output = op(input_x, indices, updates)\n >>> print(output)\n [[ 3.1 0.3 3.6]\n [ 0.4 0.5 -3.2]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n\n def infer_shape(self, input_x_shape, indices_shape, updates_shape):\n if len(indices_shape) < 2:\n raise ValueError(f\"For '{self.name}', the dimension of 'indices' cannot be less than 2,\"\n f\" but got {len(indices_shape)}.\")\n\n if indices_shape[-1] > len(input_x_shape):\n raise ValueError(f\"For '{self.name}', the last dimension of 'indices' must be less than or equal to \"\n f\"the dimension of 'input_x', but got the \"\n f\"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': \"\n f\"{len(input_x_shape)}.\")\n\n updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:]\n if updates_shape_check != updates_shape:\n raise ValueError(f\"For '{self.name}', the shape of 'update' must be equal to updates_shape_check, \"\n f\"where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] \"\n f\"but got the shape of 'update': {updates_shape}, \"\n f\"updates_shape_check: {updates_shape_check}, indices_shape: {indices_shape} and \"\n f\"input_x_shape: {input_x_shape}. Please check input_x_shape and indices_shape.\")\n\n return input_x_shape\n\n def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)\n args = {\"input_x\": input_x_dtype, \"updates\": updates_dtype}\n validator.check_tensors_dtypes_same_and_valid(args, (mstype.bool_,) + mstype.number_type, self.name)\n return input_x_dtype\n\n\nclass ScatterUpdate(_ScatterOpDynamic):\n r\"\"\"\n Updates tensor values by using input indices and value.\n\n Using given values to update tensor value, along with the input indices.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :] = \\text{updates}[i, ..., j, :]\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: True.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index of input tensor. With int32 data type.\n If there are duplicates in indices, the order for updating is undefined.\n - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,\n and updates.shape = indices.shape + input_x.shape[1:].\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])\n >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([0, 1]), mindspore.int32)\n >>> np_updates = np.array([[2.0, 1.2, 1.0], [3.0, 1.2, 1.0]])\n >>> updates = Tensor(np_updates, mindspore.float32)\n >>> op = ops.ScatterUpdate()\n >>> output = op(input_x, indices, updates)\n >>> print(output)\n [[2. 1.2 1.]\n [3. 1.2 1.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, use_locking=True):\n \"\"\"Initialize ScatterUpdate\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n\nclass ScatterNdUpdate(Primitive):\n r\"\"\"\n Updates tensor values by using input indices and value.\n\n Using given values to update tensor value, along with the input indices.\n\n `input_x` has rank P and `indices` has rank Q where `Q >= 2`.\n\n `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.\n\n The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.\n\n `updates` is a tensor of rank `Q-1+P-N`. Its shape is:\n :math:`(i_0, i_1, ..., i_{Q-2}, x\\_shape_N, ..., x\\_shape_{P-1})`.\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: True.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index of input tensor, with int32 data type.\n - **updates** (Tensor) - The tensor to be updated to the input tensor, has the same type as input.\n The shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> np_x = np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])\n >>> input_x = mindspore.Parameter(Tensor(np_x, mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)\n >>> op = ops.ScatterNdUpdate()\n >>> output = op(input_x, indices, updates)\n >>> print(output)\n [[1. 0.3 3.6]\n [0.4 2.2 -3.2]]\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n sig.make_sig('updates', dtype=sig.sig_dtype.T)\n )\n\n @prim_attr_register\n def __init__(self, use_locking=True):\n \"\"\"Initialize ScatterNdUpdate\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['input_x', 'indices', 'value'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\nclass ScatterMax(_ScatterOp):\n r\"\"\"\n Updates the value of the input tensor through the maximum operation.\n\n Using given values to update tensor value through the max operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :]\n = max(\\text{input_x}[\\text{indices}[i, ..., j], :], \\text{updates}[i, ..., j, :])\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: True.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do max operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor that performs the maximum operation with `input_x`,\n the data type is the same as `input_x`, the shape is `indices_shape + x_shape[1:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``CPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32),\n ... name=\"input_x\")\n >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.ones([2, 2, 3]) * 88, mindspore.float32)\n >>> scatter_max = ops.ScatterMax()\n >>> output = scatter_max(input_x, indices, updates)\n >>> print(output)\n [[88. 88. 88.]\n [88. 88. 88.]]\n \"\"\"\n\n\nclass ScatterMin(_ScatterOp):\n r\"\"\"\n Updates the value of the input tensor through the minimum operation.\n\n Using given values to update tensor value through the min operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :]\n = min(\\text{input_x}[\\text{indices}[i, ..., j], :], \\text{updates}[i, ..., j, :])\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor doing the min operation with `input_x`,\n the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``CPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]), mindspore.float32),\n ... name=\"input_x\")\n >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)\n >>> update = Tensor(np.ones([2, 2, 3]), mindspore.float32)\n >>> scatter_min = ops.ScatterMin()\n >>> output = scatter_min(input_x, indices, update)\n >>> print(output)\n [[0. 1. 1.]\n [0. 0. 0.]]\n \"\"\"\n\n\nclass ScatterAdd(_ScatterOpDynamic):\n r\"\"\"\n Updates the value of the input tensor through the addition operation.\n\n Using given values to update tensor value through the add operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :] \\mathrel{+}= \\text{updates}[i, ..., j, :]\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Note:\n This is an in-place update operator. Therefore, the `input_x` will be updated after the operation is completed.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor doing the min operation with `input_x`,\n the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.ones([2, 2, 3]), mindspore.float32)\n >>> scatter_add = ops.ScatterAdd()\n >>> output = scatter_add(input_x, indices, updates)\n >>> print(output)\n [[1. 1. 1.]\n [3. 3. 3.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [1, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]\n >>> # input_x[1] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [3.0, 3.0, 3.0] + [7.0, 7.0, 7.0] = [10.0, 10.0, 10.0]\n >>> # input_x[1] = [10.0, 10.0, 10.0] + [9.0, 9.0, 9.0] = [19.0, 19.0, 19.0]\n >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_add = ops.ScatterAdd()\n >>> output = scatter_add(input_x, indices, updates)\n >>> print(output)\n [[ 1. 1. 1.]\n [19. 19. 19.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[1, 0], [1, 1]]\n >>> # step 1: [1, 0]\n >>> # input_x[0] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]\n >>> # input_x[1] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [1.0, 1.0, 1.0] + [7.0, 7.0, 7.0] = [8.0, 8.0, 8.0]\n >>> # input_x[1] = [8.0, 8.0, 8.0] + [9.0, 9.0, 9.0] = [17.0, 17.0, 17.0]\n >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_add = ops.ScatterAdd()\n >>> output = scatter_add(input_x, indices, updates)\n >>> print(output)\n [[ 3. 3. 3.]\n [17. 17. 17.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [0, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [0.0, 0.0, 0.0] + [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]\n >>> # input_x[1] = [0.0, 0.0, 0.0] + [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]\n >>> # step 2: [0, 1]\n >>> # input_x[0] = [1.0, 1.0, 1.0] + [7.0, 7.0, 7.0] = [8.0, 8.0, 8.0]\n >>> # input_x[1] = [3.0, 3.0, 3.0] + [9.0, 9.0, 9.0] = [12.0, 12.0, 12.0]\n >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_add = ops.ScatterAdd()\n >>> output = scatter_add(input_x, indices, updates)\n >>> print(output)\n [[ 8. 8. 8.]\n [12. 12. 12.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n \"\"\"Initialize ScatterAdd\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n\nclass ScatterSub(_ScatterOpDynamic):\n r\"\"\"\n Updates the value of the input tensor through the subtraction operation.\n\n Using given values to update tensor value through the subtraction operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :] \\mathrel{-}= \\text{updates}[i, ..., j, :]\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor doing the min operation with `input_x`,\n the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``CPU`` ``GPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([[0, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mindspore.float32)\n >>> scatter_sub = ops.ScatterSub()\n >>> output = scatter_sub(input_x, indices, updates)\n >>> print(output)\n [[-1. -1. -1.]\n [-1. -1. -1.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [1, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]\n >>> # input_x[1] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [-3.0, -3.0, -3.0] - [7.0, 7.0, 7.0] = [-10.0, -10.0, -10.0]\n >>> # input_x[1] = [-10.0, -10.0, -10.0] - [9.0, 9.0, 9.0] = [-19.0, -19.0, -19.0]\n >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_sub = ops.ScatterSub()\n >>> output = scatter_sub(input_x, indices, updates)\n >>> print(output)\n [[ -1. -1. -1.]\n [-19. -19. -19.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[1, 0], [1, 1]]\n >>> # step 1: [1, 0]\n >>> # input_x[0] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]\n >>> # input_x[1] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [-1.0, -1.0, -1.0] - [7.0, 7.0, 7.0] = [-8.0, -8.0, -8.0]\n >>> # input_x[1] = [-8.0, -8.0, -8.0] - [9.0, 9.0, 9.0] = [-17.0, -17.0, -17.0]\n >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_sub = ops.ScatterSub()\n >>> output = scatter_sub(input_x, indices, updates)\n >>> print(output)\n [[ -3. -3. -3.]\n [-17. -17. -17.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [0, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [0.0, 0.0, 0.0] - [1.0, 1.0, 1.0] = [-1.0, -1.0, -1.0]\n >>> # input_x[1] = [0.0, 0.0, 0.0] - [3.0, 3.0, 3.0] = [-3.0, -3.0, -3.0]\n >>> # step 2: [0, 1]\n >>> # input_x[0] = [-1.0, -1.0, -1.0] - [7.0, 7.0, 7.0] = [-8.0, -8.0, -8.0]\n >>> # input_x[1] = [-3.0, -3.0, -3.0] - [9.0, 9.0, 9.0] = [-12.0, -12.0, -12.0]\n >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_sub = ops.ScatterSub()\n >>> output = scatter_sub(input_x, indices, updates)\n >>> print(output)\n [[ -8. -8. -8.]\n [-12. -12. -12.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n \"\"\"Initialize ScatterSub\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n\nclass ScatterMul(_ScatterOp):\n r\"\"\"\n Updates the value of the input tensor through the multiply operation.\n\n Using given values to update tensor value through the mul operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :] \\mathrel{*}= \\text{updates}[i, ..., j, :]\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor doing the min operation with `input_x`,\n the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``CPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([0, 1]), mindspore.int32)\n >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)\n >>> scatter_mul = ops.ScatterMul()\n >>> output = scatter_mul(input_x, indices, updates)\n >>> print(output)\n [[2. 2. 2.]\n [4. 4. 4.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [1, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]\n >>> # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [6.0, 6.0, 6.0] * [7.0, 7.0, 7.0] = [42.0, 42.0, 42.0]\n >>> # input_x[1] = [42.0, 42.0, 42.0] * [9.0, 9.0, 9.0] = [378.0, 378.0, 378.0]\n >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_mul = ops.ScatterMul()\n >>> output = scatter_mul(input_x, indices, updates)\n >>> print(output)\n [[ 1. 1. 1.]\n [378. 378. 378.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[1, 0], [1, 1]]\n >>> # step 1: [1, 0]\n >>> # input_x[0] = [1.0, 1.0, 1.0] * [3.0, 3.0, 3.0] = [3.0, 3.0, 3.0]\n >>> # input_x[1] = [2.0, 2.0, 2.0] * [1.0, 1.0, 1.0] = [2.0, 2.0, 2.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [2.0, 2.0, 2.0] * [7.0, 7.0, 7.0] = [14.0, 14.0, 14.0]\n >>> # input_x[1] = [14.0, 14.0, 14.0] * [9.0, 9.0, 9.0] = [126.0, 126.0, 126.0]\n >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_mul = ops.ScatterMul()\n >>> output = scatter_mul(input_x, indices, updates)\n >>> print(output)\n [[ 3. 3. 3.]\n [126. 126. 126.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [0, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [1.0, 1.0, 1.0] * [1.0, 1.0, 1.0] = [1.0, 1.0, 1.0]\n >>> # input_x[1] = [2.0, 2.0, 2.0] * [3.0, 3.0, 3.0] = [6.0, 6.0, 6.0]\n >>> # step 2: [0, 1]\n >>> # input_x[0] = [1.0, 1.0, 1.0] * [7.0, 7.0, 7.0] = [7.0, 7.0, 7.0]\n >>> # input_x[1] = [6.0, 6.0, 6.0] * [9.0, 9.0, 9.0] = [54.0, 54.0, 54.0]\n >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[7.0, 7.0, 7.0], [9.0, 9.0, 9.0]]]), mindspore.float32)\n >>> scatter_mul = ops.ScatterMul()\n >>> output = scatter_mul(input_x, indices, updates)\n >>> print(output)\n [[ 7. 7. 7.]\n [54. 54. 54.]]\n \"\"\"\n\n\nclass ScatterDiv(_ScatterOp):\n r\"\"\"\n Updates the value of the input tensor through the divide operation.\n\n Using given values to update tensor value through the div operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n for each `i, ..., j` in `indices.shape`:\n\n .. math::\n\n \\text{input_x}[\\text{indices}[i, ..., j], :] \\mathrel{/}= \\text{updates}[i, ..., j, :]\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor doing the min operation with `input_x`,\n the data type is same as `input_x`, the shape is `indices_shape + x_shape[1:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape + x_shape[1:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``CPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([0, 1]), mindspore.int32)\n >>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mindspore.float32)\n >>> scatter_div = ops.ScatterDiv()\n >>> output = scatter_div(input_x, indices, updates)\n >>> print(output)\n [[3. 3. 3.]\n [1. 1. 1.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],\n ... [315.0, 315.0, 315.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [1, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]\n >>> # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]\n >>> # input_x[1] = [21.0, 21.0, 21.0] / [7.0, 7.0, 7.0] = [3.0, 3.0, 3.0]\n >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)\n >>> scatter_div = ops.ScatterDiv()\n >>> output = scatter_div(input_x, indices, updates)\n >>> print(output)\n [[105. 105. 105.]\n [ 3. 3. 3.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],\n ... [315.0, 315.0, 315.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[1, 0], [1, 1]]\n >>> # step 1: [1, 0]\n >>> # input_x[0] = [105.0, 105.0, 105.0] / [3.0, 3.0, 3.0] = [35.0, 35.0, 35.0]\n >>> # input_x[1] = [315.0, 315.0, 315.0] / [1.0, 1.0, 1.0] = [315.0, 315.0, 315.0]\n >>> # step 2: [1, 1]\n >>> # input_x[1] = [315.0, 315.0, 315.0] / [5.0, 5.0, 5.0] = [63.0 63.0 63.0]\n >>> # input_x[1] = [63.0 63.0 63.0] / [7.0, 7.0, 7.0] = [9.0, 9.0, 9.0]\n >>> indices = Tensor(np.array([[1, 0], [1, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)\n >>> scatter_div = ops.ScatterDiv()\n >>> output = scatter_div(input_x, indices, updates)\n >>> print(output)\n [[35. 35. 35.]\n [ 9. 9. 9.]]\n >>> # for input_x will be updated after the operation is completed. input_x need to be re-initialized.\n >>> input_x = Parameter(Tensor(np.array([[105.0, 105.0, 105.0],\n ... [315.0, 315.0, 315.0]]), mindspore.float32), name=\"x\")\n >>> # for indices = [[0, 1], [0, 1]]\n >>> # step 1: [0, 1]\n >>> # input_x[0] = [105.0, 105.0, 105.0] / [1.0, 1.0, 1.0] = [105.0, 105.0, 105.0]\n >>> # input_x[1] = [315.0, 315.0, 315.0] / [3.0, 3.0, 3.0] = [105.0, 105.0, 105.0]\n >>> # step 2: [0, 1]\n >>> # input_x[0] = [105.0, 105.0, 105.0] / [5.0, 5.0, 5.0] = [21.0, 21.0, 21.0]\n >>> # input_x[1] = [105.0, 105.0, 105.0] / [7.0, 7.0, 7.0] = [15.0, 15.0, 15.0]\n >>> indices = Tensor(np.array([[0, 1], [0, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]],\n ... [[5.0, 5.0, 5.0], [7.0, 7.0, 7.0]]]), mindspore.float32)\n >>> scatter_div = ops.ScatterDiv()\n >>> output = scatter_div(input_x, indices, updates)\n >>> print(output)\n [[21. 21. 21.]\n [15. 15. 15.]]\n \"\"\"\n\n\nclass ScatterNdAdd(Primitive):\n r\"\"\"\n Applies sparse addition to individual values or slices in a tensor.\n\n Using given values to update tensor value through the add operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n `input_x` has rank P and `indices` has rank Q where `Q >= 2`.\n\n `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.\n\n The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.\n\n `updates` is a tensor of rank `Q-1+P-N`. Its shape is:\n :math:`(i_0, i_1, ..., i_{Q-2}, x\\_shape_N, ..., x\\_shape_{P-1})`.\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index to do min operation whose data type must be mindspore.int32.\n The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.\n - **updates** (Tensor) - The tensor doing the min operation with `input_x`,\n the data type is same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n\n Outputs:\n Tensor, the updated `input_x`, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)\n >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)\n >>> scatter_nd_add = ops.ScatterNdAdd()\n >>> output = scatter_nd_add(input_x, indices, updates)\n >>> print(output)\n [ 1. 10. 9. 4. 12. 6. 7. 17.]\n >>> input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))\n >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)\n >>> scatter_nd_add = ops.ScatterNdAdd()\n >>> output = scatter_nd_add(input_x, indices, updates)\n >>> print(output)\n [[[1 1 1 1]\n [2 2 2 2]\n [3 3 3 3]\n [4 4 4 4]]\n [[0 0 0 0]\n [0 0 0 0]\n [0 0 0 0]\n [0 0 0 0]]\n [[5 5 5 5]\n [6 6 6 6]\n [7 7 7 7]\n [8 8 8 8]]\n [[0 0 0 0]\n [0 0 0 0]\n [0 0 0 0]\n [0 0 0 0]]]\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n sig.make_sig('updates', dtype=sig.sig_dtype.T)\n )\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n \"\"\"Initialize ScatterNdAdd\"\"\"\n validator.check_value_type('use_locking', use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n\nclass ScatterNdSub(_ScatterNdOp):\n r\"\"\"\n Applies sparse subtraction to individual values or slices in a tensor.\n\n Using given values to update tensor value through the subtraction operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n `input_x` has rank P and `indices` has rank Q where `Q >= 2`.\n\n `indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)` where `N <= P`.\n\n The last dimension of `indices` (with length `N` ) indicates slices along the `N` th dimension of `input_x`.\n\n `updates` is a tensor of rank `Q-1+P-N`. Its shape is:\n :math:`(i_0, i_1, ..., i_{Q-2}, x\\_shape_N, ..., x\\_shape_{P-1})`.\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n relatively highest priority data type.\n\n Args:\n use_locking (bool): Whether protect the assignment by a lock. Default: False.\n\n Inputs:\n - **input_x** (Parameter) - The target tensor, with data type of Parameter.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **indices** (Tensor) - The index of input tensor, with int32 data type.\n The rank of indices must be at least 2 and `indices_shape[-1] <= len(shape)`.\n - **updates** (Tensor) - The tensor to be updated to the input tensor, has the same type as input.\n The shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If `use_locking` is not a bool.\n TypeError: If `indices` is not an int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)\n >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)\n >>> scatter_nd_sub = ops.ScatterNdSub()\n >>> output = scatter_nd_sub(input_x, indices, updates)\n >>> print(output)\n [ 1. -6. -3. 4. -2. 6. 7. -1.]\n >>> input_x = Parameter(Tensor(np.zeros((4, 4, 4)), mindspore.int32))\n >>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)\n >>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n ... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)\n >>> scatter_nd_sub = ops.ScatterNdSub()\n >>> output = scatter_nd_sub(input_x, indices, updates)\n >>> print(output)\n [[[-1 -1 -1 -1]\n [-2 -2 -2 -2]\n [-3 -3 -3 -3]\n [-4 -4 -4 -4]]\n [[ 0 0 0 0]\n [ 0 0 0 0]\n [ 0 0 0 0]\n [ 0 0 0 0]]\n [[-5 -5 -5 -5]\n [-6 -6 -6 -6]\n [-7 -7 -7 -7]\n [-8 -8 -8 -8]]\n [[ 0 0 0 0]\n [ 0 0 0 0]\n [ 0 0 0 0]\n [ 0 0 0 0]]]\n \"\"\"\n\n\nclass ScatterNonAliasingAdd(Primitive):\n \"\"\"\n Applies sparse addition to the input using individual values or slices.\n\n Using given values to update tensor value through the add operation, along with the input indices.\n This operation outputs the `input_x` after the update is done, which makes it convenient to use the updated value.\n\n Inputs of `input_x` and `updates` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, the lower priority data type will be converted to\n the relatively highest priority data type.\n\n Inputs:\n - **input_x** (Parameter) - The target parameter. The data type must be float16, float32 or int32.\n - **indices** (Tensor) - The index to perform the addition operation whose data type must be mindspore.int32.\n - **updates** (Tensor) - The tensor that performs the addition operation with `input_x`,\n the data type is the same as `input_x`, the shape is `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n\n Outputs:\n Parameter, the updated `input_x`.\n\n Raises:\n TypeError: If dtype of `indices` is not int32.\n TypeError: If dtype of `input_x` is not one of float16, float32, int32.\n ValueError: If the shape of `updates` is not equal to `indices_shape[:-1] + x_shape[indices_shape[-1]:]`.\n RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter\n is required when data type conversion of Parameter is not supported.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name=\"x\")\n >>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)\n >>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)\n >>> scatter_non_aliasing_add = ops.ScatterNonAliasingAdd()\n >>> output = scatter_non_aliasing_add(input_x, indices, updates)\n >>> print(output)\n [ 1. 10. 9. 4. 12. 6. 7. 17.]\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n sig.make_sig('updates', dtype=sig.sig_dtype.T)\n )\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize ScatterNonAliasingAdd\"\"\"\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n self.add_prim_attr('side_effect_mem', True)\n\n\nclass SpaceToDepth(PrimitiveWithInfer):\n r\"\"\"\n Rearrange blocks of spatial data into depth.\n\n The output tensor's `height` dimension is :math:`height / block\\_size`.\n\n The output tensor's `weight` dimension is :math:`weight / block\\_size`.\n\n The depth of output tensor is :math:`block\\_size * block\\_size * input\\_depth`.\n\n The input tensor's height and width must be divisible by `block_size`.\n The data format is \"NCHW\".\n\n Args:\n block_size (int): The block size used to divide spatial data. It must be >= 2.\n\n Inputs:\n - **x** (Tensor) - The target tensor. The data type is Number. It must be a 4-D tensor.\n\n Outputs:\n Tensor, the same data type as `x`. It must be a 4-D tensor. Tensor of shape\n :math:`(N, ( C_{in} * \\text{block_size} * 2), H_{in} / \\text{block_size}, W_{in} / \\text{block_size})`.\n\n Raises:\n TypeError: If `block_size` is not an int.\n ValueError: If `block_size` is less than 2.\n ValueError: If length of shape of `x` is not equal to 4.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)\n >>> block_size = 2\n >>> space_to_depth = ops.SpaceToDepth(block_size)\n >>> output = space_to_depth(x)\n >>> print(output.shape)\n (1, 12, 1, 1)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, block_size):\n \"\"\"Initialize SpaceToDepth\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['y'])\n validator.check_value_type('block_size', block_size, [int], self.name)\n validator.check('block_size', block_size, '', 2, Rel.GE)\n self.block_size = block_size\n self.add_prim_attr(\"data_format\", \"NCHW\")\n\n def infer_shape(self, x_shape):\n validator.check('x dimension', len(x_shape), '', 4, Rel.EQ)\n out_shape = copy.deepcopy(x_shape)\n for i in range(2):\n if out_shape[i + 2] % self.block_size != 0:\n msg_prefix = \"2nd\" if i + 2 == 2 else \"3rd\"\n raise ValueError(f\"For '{self.name}', the shape of output with index {i + 2} must be divided \"\n f\"exactly by 'block_size', but got the {msg_prefix} dimension \"\n f\"of output: {out_shape[i + 2]} and \"\n f\"'block_size': {self.block_size}.\")\n out_shape[i + 2] //= self.block_size\n\n out_shape[1] *= self.block_size * self.block_size\n return out_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_subclass(\"x_dtype\", x_dtype, mstype.tensor, self.name)\n return x_dtype\n\n\nclass DepthToSpace(PrimitiveWithInfer):\n r\"\"\"\n Rearrange blocks of depth data into spatial dimensions.\n\n This is the reverse operation of SpaceToDepth.\n\n The depth of output tensor is :math:`input\\_depth / (block\\_size * block\\_size)`.\n\n The output tensor's `height` dimension is :math:`height * block\\_size`.\n\n The output tensor's `weight` dimension is :math:`weight * block\\_size`.\n\n The input tensor's depth must be divisible by `block_size * block_size`.\n The data format is \"NCHW\".\n\n Args:\n block_size (int): The block size used to divide depth data. It must be >= 2.\n\n Inputs:\n - **x** (Tensor) - The target tensor. It must be a 4-D tensor with shape :math:`(N, C_{in}, H_{in}, W_{in})`.\n The data type is Number.\n\n Outputs:\n Tensor of shape :math:`(N, C_{in} / \\text{block_size} ^ 2, H_{in} * \\text{block_size},\n W_{in} * \\text{block_size})`.\n\n Raises:\n TypeError: If `block_size` is not an int.\n ValueError: If `block_size` is less than 2.\n ValueError: If length of shape of `x` is not equal to 4.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> x = Tensor(np.random.rand(1, 12, 1, 1), mindspore.float32)\n >>> block_size = 2\n >>> depth_to_space = ops.DepthToSpace(block_size)\n >>> output = depth_to_space(x)\n >>> print(output.shape)\n (1, 3, 2, 2)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, block_size):\n \"\"\"Initialize DepthToSpace\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['y'])\n validator.check_value_type('block_size', block_size, [int], self.name)\n validator.check('block_size', block_size, '', 2, Rel.GE, self.name)\n self.block_size = block_size\n self.add_prim_attr(\"data_format\", \"NCHW\")\n\n def infer_shape(self, x_shape):\n validator.check('x dimension', len(x_shape), '', 4, Rel.EQ)\n out_shape = copy.deepcopy(x_shape)\n for i in range(2):\n out_shape[i + 2] *= self.block_size\n\n validator.check_int(x_shape[1] % (self.block_size * self.block_size),\n 0, Rel.EQ, 'x_shape[1] % (block_size*block_size)', self.name)\n out_shape[1] //= self.block_size * self.block_size\n return out_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_subclass(\"x_dtype\", x_dtype, mstype.tensor, self.name)\n return x_dtype\n\n\nclass SpaceToBatch(PrimitiveWithInfer):\n r\"\"\"\n SpaceToBatch is deprecated. Please use :class:`mindspore.ops.SpaceToBatchND` instead.\n Divides spatial dimensions into blocks and combines the block size with the original batch.\n\n This operation will divide spatial dimensions (H, W) into blocks with `block_size`, the output tensor's H and W\n dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the\n product of the original batch and the square of block_size. Before division, the spatial dimensions\n of the input are zero padded according to paddings if necessary.\n\n Args:\n block_size (int): The block size of dividing blocks with value greater than or equal to 2.\n paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction lists.\n Each subtraction list contains 2 integer value. All values must be greater than 0.\n paddings[i] specifies the paddings for the spatial dimension i, which corresponds to the\n input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1]\n is divisible by block_size.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor. The data type is Number.\n\n Outputs:\n Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with\n :math:`block\\_size` and :math:`paddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,\n where\n\n :math:`n' = n*(block\\_size*block\\_size)`\n\n :math:`c' = c`\n\n :math:`h' = (h+paddings[0][0]+paddings[0][1])//block\\_size`\n\n :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\\_size`\n\n Raises:\n TypeError: If `block_size` is not an int.\n ValueError: If `block_size` is less than 2.\n\n Supported Platforms:\n Deprecated\n\n Examples:\n >>> block_size = 2\n >>> paddings = [[0, 0], [0, 0]]\n >>> space_to_batch = ops.SpaceToBatch(block_size, paddings)\n >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)\n >>> output = space_to_batch(input_x)\n >>> print(output)\n [[[[1.]]]\n [[[2.]]]\n [[[3.]]]\n [[[4.]]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, block_size, paddings):\n \"\"\"Initialize SpaceToBatch\"\"\"\n logger.warning(\"WARN_DEPRECATED: The usage of SpaceToBatch is deprecated.\"\n \" Please use SpaceToBatchND.\")\n validator.check_value_type('block_size', block_size, [int], self.name)\n validator.check('block_size', block_size, '', 2, Rel.GE, self.name)\n self.block_size = block_size\n validator.check('paddings shape', np.array(paddings).shape, '', (2, 2), Rel.EQ, self.name)\n for elem in itertools.chain(*paddings):\n validator.check_non_negative_int(elem, 'paddings element', self.name)\n validator.check_value_type('paddings element', elem, [int], self.name)\n self.paddings = paddings\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)\n return x_dtype\n\n def infer_shape(self, x_shape):\n validator.check_equal_int(len(x_shape), 4, 'rank of input_x', self.name)\n out_shape = copy.deepcopy(x_shape)\n for i in range(2):\n padded = out_shape[i + 2] + self.paddings[i][0] + self.paddings[i][1]\n if padded % self.block_size != 0:\n msg_ndim = \"2nd\" if i + 2 == 2 else \"3rd\"\n raise ValueError(f\"For '{self.name}', the shape of the output tensor should be \"\n f\"divisible by 'block_size', but got the {msg_ndim} dimension of output: {padded} and \"\n f\"'block_size': {self.block_size}. Please check the official homepage \"\n f\"for more information about the output tensor.\")\n out_shape[i + 2] = padded // self.block_size\n out_shape[0] *= self.block_size * self.block_size\n return out_shape\n\n\nclass BatchToSpace(PrimitiveWithInfer):\n r\"\"\"\n Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.\n\n This operation will divide batch dimension N into blocks with block_size, the output tensor's N dimension\n is the corresponding number of blocks after division. The output tensor's H, W dimension is product of\n original H, W dimension and block_size with given amount to crop from dimension, respectively.\n\n Args:\n block_size (int): The block size of division, has the value not less than 2.\n crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction lists.\n Each list contains 2 integers.\n All values must be not less than 0. crops[i] specifies the crop values for the spatial dimension i, which\n corresponds to the input dimension i+2. It is required that\n\n :math:`input\\_shape[i+2]*block\\_size >= crops[i][0]+crops[i][1]`\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by\n product of `block_shape`. The data type is float16 or float32.\n\n Outputs:\n Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_size\n and crops. The output shape will be (n', c', h', w'), where\n\n :math:`n' = n//(block\\_size*block\\_size)`\n\n :math:`c' = c`\n\n :math:`h' = h*block\\_size-crops[0][0]-crops[0][1]`\n\n :math:`w' = w*block\\_size-crops[1][0]-crops[1][1]`\n\n Raises:\n TypeError: If `block_size` or element of `crops` is not an int.\n TypeError: If `crops` is neither list nor tuple.\n ValueError: If `block_size` is less than 2.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> block_size = 2\n >>> crops = [[0, 0], [0, 0]]\n >>> batch_to_space = ops.BatchToSpace(block_size, crops)\n >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)\n >>> output = batch_to_space(input_x)\n >>> print(output)\n [[[[1. 2.]\n [3. 4.]]]]\n\n \"\"\"\n\n @prim_attr_register\n def __init__(self, block_size, crops):\n \"\"\"Initialize BatchToSpace\"\"\"\n logger.warning(\"WARN_DEPRECATED: The usage of BatchToSpace is deprecated.\"\n \" Please use BatchToSpaceND.\")\n validator.check_value_type('block_size', block_size, [int], self.name)\n validator.check('block_size', block_size, '', 2, Rel.GE, self.name)\n self.block_size = block_size\n validator.check_value_type('crops type', crops, [list, tuple], self.name)\n validator.check('crops shape', np.array(crops).shape, '', (2, 2))\n for elem in itertools.chain(*crops):\n validator.check_non_negative_int(elem, 'crops element', self.name)\n validator.check_value_type('crops element', elem, [int], self.name)\n self.crops = crops\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)\n return x_dtype\n\n def infer_shape(self, x_shape):\n validator.check('rank of input_x', len(x_shape), '', 4)\n out_shape = copy.deepcopy(x_shape)\n for i in range(2):\n x_block_prod = out_shape[i + 2] * self.block_size\n crops_sum = self.crops[i][0] + self.crops[i][1]\n validator.check(\"x block shape prod\", x_block_prod, 'crops sum', crops_sum, Rel.GT, self.name)\n out_shape[i + 2] = x_block_prod - crops_sum\n block_size_prod = self.block_size * self.block_size\n if out_shape[0] % block_size_prod != 0:\n raise ValueError(f\"For '{self.name}', the shape of output with index 0 must be divided exactly \"\n f\"by block_size_prod, but got the shape of output: {out_shape} and \"\n f\"block_size_prod: {block_size_prod}.\")\n out_shape[0] = out_shape[0] // block_size_prod\n return out_shape\n\n\nclass SpaceToBatchND(PrimitiveWithInfer):\n r\"\"\"\n Divides spatial dimensions into blocks and combines the block size with the original batch.\n\n This operation will divide spatial dimensions (H, W) into blocks with block_shape, the output tensor's H and W\n dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the\n product of the original batch and the product of `block_shape`. Before division,\n the spatial dimensions of the input are zero padded according to paddings if necessary.\n\n Args:\n block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater\n than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the\n number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the same,\n equal to `block_shape`. M must be 2.\n paddings (Union[tuple, list]): The padding values for H and W dimension, containing 2 subtraction list.\n Each contains 2 integer value. All values must be greater than 0.\n `paddings[i]` specifies the paddings for the spatial dimension i,\n which corresponds to the input dimension i+2.\n It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible by block_shape[i].\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor.\n\n Outputs:\n Tensor, the output tensor with the same data type as input. Assume input shape is :math:`(n, c, h, w)` with\n :math:`block\\_shape` and :math:`paddings`. The shape of the output tensor will be :math:`(n', c', h', w')`,\n where\n\n :math:`n' = n*(block\\_shape[0]*block\\_shape[1])`\n\n :math:`c' = c`\n\n :math:`h' = (h+paddings[0][0]+paddings[0][1])//block\\_shape[0]`\n\n :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\\_shape[1]`\n\n Raises:\n TypeError: If `block_shape` is not one of list, tuple, int.\n TypeError: If `paddings` is neither list nor tuple.\n ValueError: If length of shape of `block_shape` is not equal to 1.\n ValueError: If length of `block_shape` or `paddings` is not equal to 2.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> block_shape = [2, 2]\n >>> paddings = [[0, 0], [0, 0]]\n >>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)\n >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)\n >>> output = space_to_batch_nd(input_x)\n >>> print(output)\n [[[[1.]]]\n [[[2.]]]\n [[[3.]]]\n [[[4.]]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, block_shape, paddings):\n \"\"\"Initialize SpaceToBatchND\"\"\"\n if isinstance(block_shape, int):\n block_shape = (block_shape,) * 2\n self.add_prim_attr(\"block_shape\", block_shape)\n validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)\n validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, Rel.EQ, self.name)\n block_rank = len(block_shape)\n validator.check('block_shape length', block_rank, '', 2, Rel.EQ, self.name)\n for elem in block_shape:\n validator.check('block_shape element', elem, '', 1, Rel.GE, self.name)\n validator.check_value_type('block_shape element', elem, [int], self.name)\n self.block_shape = block_shape\n\n validator.check_value_type('paddings type', paddings, [list, tuple], self.name)\n validator.check('paddings length', len(paddings), '', 2, Rel.EQ, self.name)\n validator.check('paddings shape', np.array(paddings).shape, '', (block_rank, 2), Rel.EQ, self.name)\n for elem in itertools.chain(*paddings):\n validator.check_non_negative_int(elem, 'paddings element', self.name)\n validator.check_value_type('paddings element', elem, [int], self.name)\n self.paddings = paddings\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_dtype_valid('input_x', x_dtype, mstype.number_type, self.name)\n return x_dtype\n\n def infer_shape(self, x_shape):\n x_rank = len(x_shape)\n validator.check_equal_int(x_rank, 4, 'x_shape rank', self.name)\n out_shape = copy.deepcopy(x_shape)\n\n block_shape_prod = 1\n offset = 2\n for i in range(len(self.block_shape)):\n padded = out_shape[i + offset] + self.paddings[i][0] + \\\n self.paddings[i][1]\n if padded % self.block_shape[i] != 0:\n raise ValueError(f\"For '{self.name}', the padded should be divisible by 'block_shape', \"\n f\"where padded = input_x_shape[i + 2] + paddings[i][0] + paddings[i][1], \"\n f\"but got input_x_shape[{i + 2}]: {out_shape[i + offset]}, \"\n f\"paddings[{i}][0]: {self.paddings[i][0]} and paddings[{i}][1]: {self.paddings[i][1]}.\"\n f\" Please check the official api documents for \"\n f\"more information about the output tensor.\")\n out_shape[i + offset] = padded // self.block_shape[i]\n block_shape_prod = block_shape_prod * self.block_shape[i]\n out_shape[0] *= block_shape_prod\n return out_shape\n\n\nclass BatchToSpaceND(Primitive):\n r\"\"\"\n Divides batch dimension with blocks and interleaves these blocks back into spatial dimensions.\n\n This operation will divide batch dimension N into blocks with block_shape, the output tensor's N dimension\n is the corresponding number of blocks after division. The output tensor's H, W dimension is the product of\n original H, W dimension and block_shape with given amount to crop from dimension, respectively.\n\n Args:\n block_shape (Union[list(int), tuple(int), int]): The block shape of dividing block with all value greater\n than 1. If `block_shape` is a tuple or list, the length of `block_shape` is M corresponding to the\n number of spatial dimensions. If `block_shape` is an int, the block size of M dimensions are the same,\n equal to `block_shape`. M must be 2.\n crops (Union[list(int), tuple(int)]): The crop value for H and W dimension, containing 2 subtraction list,\n each containing 2 int value.\n All values must be >= 0. crops[i] specifies the crop values for spatial dimension i, which corresponds to\n input dimension i+2. It is required that\n\n :math:`input\\_shape[i+2]*block\\_shape[i] > crops[i][0]+crops[i][1]`\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. It must be a 4-D tensor, dimension 0 must be divisible by\n product of `block_shape`. The data type is float16 or float32.\n\n Outputs:\n Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_shape\n and crops. The output shape will be (n', c', h', w'), where\n\n :math:`n' = n//(block\\_shape[0]*block\\_shape[1])`\n\n :math:`c' = c`\n\n :math:`h' = h*block\\_shape[0]-crops[0][0]-crops[0][1]`\n\n :math:`w' = w*block\\_shape[1]-crops[1][0]-crops[1][1]`\n\n Raises:\n TypeError: If `block_shape` is not one of list, tuple, int.\n TypeError: If `crops` is neither list nor tuple.\n ValueError: If length of `block_shape` or `crops` is not equal to 2.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> block_shape = [2, 2]\n >>> crops = [[0, 0], [0, 0]]\n >>> batch_to_space_nd = ops.BatchToSpaceND(block_shape, crops)\n >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)\n >>> output = batch_to_space_nd(input_x)\n >>> print(output)\n [[[[1. 2.]\n [3. 4.]]]]\n\n \"\"\"\n\n @prim_attr_register\n def __init__(self, block_shape, crops):\n \"\"\"Initialize BatchToSpaceND\"\"\"\n if isinstance(block_shape, int):\n block_shape = (block_shape,) * 2\n self.add_prim_attr(\"block_shape\", block_shape)\n validator.check_value_type('block_shape type', block_shape, [list, tuple], self.name)\n validator.check('block_shape shape', len(np.array(block_shape).shape), '', 1, Rel.EQ, self.name)\n block_rank = len(block_shape)\n validator.check('block_shape length', block_rank, '', 2, Rel.EQ, self.name)\n for elem in block_shape:\n validator.check('block_shape element', elem, '', 1, Rel.GE, self.name)\n validator.check_value_type('block_shape element', elem, [int], self.name)\n self.block_shape = block_shape\n\n validator.check_value_type('crops type', crops, [list, tuple], self.name)\n validator.check('crops length', len(crops), '', 2, Rel.EQ, self.name)\n validator.check('crops shape', np.array(crops).shape, '', (block_rank, 2), Rel.EQ, self.name)\n for elem in itertools.chain(*crops):\n validator.check_non_negative_int(elem, 'crops element', self.name)\n validator.check_value_type('crops element', elem, [int], self.name)\n self.crops = crops\n\n\nclass BroadcastTo(Primitive):\n \"\"\"\n Broadcasts input tensor to a given shape.\n\n Input shape can be broadcast to target shape if for each dimension pair they are either equal or input is one or\n the target dimension is -1. In case of -1 in target shape, it will be replaced by the input shape's value\n in that dimension.\n\n When input shape is broadcast to target shape, it starts with the trailing\n dimensions. If there is a -1 in the target shape, the -1 cannot be in a leading,\n non-existing dimension.\n\n Args:\n shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position\n where it will be substituted by the input tensor's shape in that position, see example.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. The data type should be one of the following types:\n float16, float32, int32, int8, uint8.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n\n Outputs:\n Tensor, with the given `shape` and the same data type as `input_x`.\n\n Raises:\n TypeError: If `shape` is not a tuple.\n ValueError: if the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid\n location.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> shape = (2, 3)\n >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))\n >>> broadcast_to = ops.BroadcastTo(shape)\n >>> output = broadcast_to(input_x)\n >>> print(output)\n [[1. 2. 3.]\n [1. 2. 3.]]\n\n >>> shape = (-1, 2)\n >>> input_x = Tensor(np.array([[1], [2]]).astype(np.float32))\n >>> broadcast_to = ops.BroadcastTo(shape)\n >>> output = broadcast_to(input_x)\n >>> print(output)\n [[1. 1.]\n [2. 2.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, shape):\n \"\"\"Initialize BroadcastTo\"\"\"\n validator.check_value_type(\"shape\", shape, (tuple), self.name)\n validator.check(\"shape length\", len(shape), \"\", 0, Rel.GT, self.name)\n for ix, i in enumerate(shape):\n validator.check_value_type('target shape index -> ' + str(ix), i, [int], self.name)\n validator.check(\"shape element\", i, \"shape element min limit\", -1, Rel.GE, self.name)\n self.shape = shape\n\n\nclass Meshgrid(PrimitiveWithInfer):\n \"\"\"\n Generates coordinate matrices from given coordinate tensors.\n\n Given N one-dimensional coordinate tensors, returns a tuple outputs of N N-D\n coordinate tensors for evaluating expressions on an N-D grid.\n\n Args:\n indexing ('xy', 'ij', optional): Cartesian ('xy', default) or\n matrix ('ij') indexing of output. In the 2-D case with\n inputs of length `M` and `N`, the outputs are of shape `(N, M)`\n for 'xy' indexing and `(M, N)` for 'ij' indexing. In the 3-D\n case with inputs of length `M`, `N` and `P`, outputs are of shape\n `(N, M, P)` for 'xy' indexing and `(M, N, P)` for 'ij' indexing.\n\n Inputs:\n - **input** (Union[tuple]) - A Tuple of N 1-D Tensor objects.\n The length of input should be greater than 1. The data type is Number.\n\n Outputs:\n Tensors, A Tuple of N N-D Tensor objects. The data type is the same with the Inputs.\n\n Raises:\n TypeError: If `indexing` is not a str or `input` is not a tuple.\n ValueError: If `indexing` is neither 'xy' nor 'ij'.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))\n >>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))\n >>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))\n >>> inputs = (x, y, z)\n >>> meshgrid = ops.Meshgrid(indexing=\"xy\")\n >>> output = meshgrid(inputs)\n >>> print(output)\n (Tensor(shape=[3, 4, 5], dtype=Int32, value=\n [[[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]],\n [[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]],\n [[1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]]]),\n Tensor(shape=[3, 4, 5], dtype=Int32, value=\n [[[5, 5, 5, 5, 5],\n [5, 5, 5, 5, 5],\n [5, 5, 5, 5, 5],\n [5, 5, 5, 5, 5]],\n [[6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6]],\n [[7, 7, 7, 7, 7],\n [7, 7, 7, 7, 7],\n [7, 7, 7, 7, 7],\n [7, 7, 7, 7, 7]]]),\n Tensor(shape=[3, 4, 5], dtype=Int32, value=\n [[[8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2]],\n [[8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2]],\n [[8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2],\n [8, 9, 0, 1, 2]]]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, indexing=\"xy\"):\n \"\"\"Initialize Meshgrid.\"\"\"\n validator.check_value_type(\"indexing\", indexing, (str), self.name)\n validator.check_string(indexing.lower(), [\"xy\", \"ij\"], \"indexing\", self.name)\n self.indexing = indexing\n\n def infer_shape(self, x_shape):\n validator.check_value_type(\"shape\", x_shape, [tuple], self.name)\n validator.check_int(len(x_shape), 2, Rel.GE, \"len of input\", self.name)\n n = len(x_shape)\n shape_0 = []\n for s in x_shape:\n validator.check_int(len(s), 1, Rel.EQ, 'each input rank', self.name)\n shape_0.append(s[0])\n if self.indexing == \"xy\":\n shape_0[0], shape_0[1] = shape_0[1], shape_0[0]\n out_shape = tuple(tuple(shape_0) for _ in range(n))\n return out_shape\n\n def infer_dtype(self, x_type):\n validator.check_subclass(\"input[0]\", x_type[0], mstype.tensor, self.name)\n n = len(x_type)\n for i in range(1, n):\n validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, self.name, TypeError)\n return x_type\n\n\nclass InplaceUpdate(PrimitiveWithInfer):\n r\"\"\"\n Updates specified rows with values in `v`.\n\n Args:\n indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x\n to update with v. It is an int or tuple, whose value is in [0, the first dimension size of x).\n\n Inputs:\n - **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:\n float32, float16 and int32.\n - **v** (Tensor) - A tensor with the same type as `x` and the same dimension size as `x` except\n the first dimension, which must be the same as the size of `indices`.\n\n Outputs:\n Tensor, with the same type and shape as the input `x`.\n\n Raises:\n TypeError: If `indices` is neither int nor tuple.\n TypeError: If `indices` is a tuple and its element is not an int.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> indices = (0, 1)\n >>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)\n >>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)\n >>> inplace_update = ops.InplaceUpdate(indices)\n >>> output = inplace_update(x, v)\n >>> print(output)\n [[0.5 1. ]\n [1. 1.5]\n [5. 6. ]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, indices):\n \"\"\"Initialize InplaceUpdate\"\"\"\n self.init_prim_io_names(inputs=['x', 'v'], outputs=['y'])\n self.indices = indices\n validator.check_value_type(\"indices\", indices, [int, tuple], self.name)\n if isinstance(indices, int):\n self.indices = (indices,)\n for item in self.indices:\n validator.check_value_type(\"item of indices\", item, [int], self.name)\n\n def infer_dtype(self, x_dtype, v_dtype):\n args = {'x': x_dtype, 'v': v_dtype}\n valid_type = [mstype.int32, mstype.float16, mstype.float32]\n validator.check_tensors_dtypes_same_and_valid(args, valid_type, self.name)\n return x_dtype\n\n def infer_shape(self, x_shape, v_shape):\n validator.check(\"x\", len(x_shape), \"v\", len(v_shape), Rel.EQ, self.name)\n validator.check(\"size of indices\", len(self.indices), \"v's first dimension\", v_shape[0],\n Rel.EQ, self.name)\n for i in self.indices:\n if i < 0 or i >= x_shape[0]:\n raise ValueError(f\"For '{self.name}', the value of indices must be in [0, {x_shape[0]}), \"\n f\"but got {i}.\")\n x_rank = len(x_shape)\n for idx in range(x_rank)[1:]:\n validator.check('v dim %d' % idx, v_shape[idx], \"x dim %d\" % idx, x_shape[idx], Rel.EQ, self.name)\n return x_shape\n\n\nclass ReverseSequence(PrimitiveWithInfer):\n \"\"\"\n Reverses variable length slices.\n\n Args:\n seq_dim (int): The dimension where reversal is performed. Required.\n batch_dim (int): The input is sliced in this dimension. Default: 0.\n\n Inputs:\n - **x** (Tensor) - The input to reverse, supporting all number types including bool.\n - **seq_lengths** (Tensor) - Must be a 1-D vector with int32 or int64 types.\n\n Outputs:\n Reversed tensor with the same shape and data type as input.\n\n Raises:\n TypeError: If `seq_dim` or `batch_dim` is not an int.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)\n >>> seq_lengths = Tensor(np.array([1, 2, 3]))\n >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)\n >>> output = reverse_sequence(x, seq_lengths)\n >>> print(output)\n [[1. 2. 3.]\n [5. 4. 6.]\n [9. 8. 7.]]\n >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)\n >>> seq_lengths = Tensor(np.array([1, 2, 3]))\n >>> reverse_sequence = ops.ReverseSequence(seq_dim=0, batch_dim=1)\n >>> output = reverse_sequence(x, seq_lengths)\n >>> print(output)\n [[1. 5. 9.]\n [4. 2. 6.]\n [7. 8. 3.]]\n >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)\n >>> seq_lengths = Tensor(np.array([2, 2, 3]))\n >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)\n >>> output = reverse_sequence(x, seq_lengths)\n >>> print(output)\n [[2. 1. 3.]\n [5. 4. 6.]\n [9. 8. 7.]]\n >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)\n >>> seq_lengths = Tensor(np.array([3, 2, 3]))\n >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)\n >>> output = reverse_sequence(x, seq_lengths)\n >>> print(output)\n [[3. 2. 1.]\n [5. 4. 6.]\n [9. 8. 7.]]\n >>> x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.float32)\n >>> seq_lengths = Tensor(np.array([4, 4]))\n >>> reverse_sequence = ops.ReverseSequence(seq_dim=1)\n >>> output = reverse_sequence(x, seq_lengths)\n >>> print(output)\n [[4. 3. 2. 1.]\n [8. 7. 6. 5.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, seq_dim, batch_dim=0):\n \"\"\"Initialize ReverseSequence\"\"\"\n self.init_prim_io_names(inputs=['x', 'seq_lengths'], outputs=['y'])\n validator.check_value_type(\"seq_dim\", seq_dim, [int], self.name)\n self.seq_dim_ = seq_dim\n validator.check_value_type(\"batch_dim\", batch_dim, [int], self.name)\n self.batch_dim_ = batch_dim\n\n def infer_shape(self, x, seq_lengths):\n validator.check(\"seq_dim\", self.seq_dim_, \"x rank\", len(x), Rel.LE, self.name)\n validator.check(\"batch_dim\", self.batch_dim_, \"x rank\", len(x), Rel.LE, self.name)\n validator.check(\"batch_dim\", self.batch_dim_, \"seq_dim\", self.seq_dim_, Rel.NE, self.name)\n validator.check(\"seq_lengths rank\", len(seq_lengths), \"expected\", 1, Rel.EQ, self.name)\n validator.check(\"seq_lengths vector size\", seq_lengths[0],\n \"input size along batch_dim\", x[self.batch_dim_], Rel.EQ, self.name)\n return x\n\n def infer_dtype(self, x, seq_lengths):\n validator.check_tensor_dtype_valid(\"x_dtype\", x, mstype.number_type + (mstype.bool_,), self.name)\n validator.check_tensor_dtype_valid(\"seq_lengths_dtype\", seq_lengths, [mstype.int32, mstype.int64], self.name)\n return x\n\n\nclass EditDistance(PrimitiveWithInfer):\n r\"\"\"\n Computes the Levenshtein Edit Distance. It is used to measure the similarity of two sequences. The inputs are\n variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape)\n and (truth_indices, truth_values, truth_shape).\n\n .. math::\n\n \\operatorname{lev}_{a, b}(i, j)=\\left\\{\\begin{array}{ll}\n \\max (i, j) \\qquad \\qquad \\qquad \\qquad \\qquad \\quad \\ \\text { if } \\min (i, j)=0 \\\\\n \\min \\left\\{\\begin{array}{ll}\n \\operatorname{lev}_{a, b}(i-1, j)+1 & \\\\\n \\operatorname{lev}_{a, b}(i, j-1)+1 & \\text { otherwise. } \\\\\n \\operatorname{lev}_{a, b}(i-1, j-1)+1_{\\left(a_{i} \\neq b_{j}\\right)}\n \\end{array}\\right. &\n \\end{array}\\right.\n\n Where the :math:`a` indicates the hypothesis and the :math:`a` indicates the truth. For ease of understanding,\n i and j here in may be considered as lengths of a and b.\n\n Args:\n normalize (bool): If true, edit distances are normalized by length of truth. Default: True.\n\n Inputs:\n - **hypothesis_indices** (Tensor) - The indices of the hypothesis list SparseTensor. With int64 data type.\n The shape of tensor is :math:`(N, R)`.\n - **hypothesis_values** (Tensor) - The values of the hypothesis list SparseTensor. With float32 data type.\n Must be 1-D vector with length of N.\n - **hypothesis_shape** (Tensor) - The shape of the hypothesis list SparseTensor.\n Must be R-length vector with int64 data type. Only constant value is allowed.\n - **truth_indices** (Tensor) - The indices of the truth list SparseTensor. With int64 data type.\n The shape of tensor is :math:`(M, R)`.\n - **truth_values** (Tensor) - The values of the truth list SparseTensor. Must be 1-D vector with length of M.\n With float32 data type.\n - **truth_shape** (Tensor) - The shape of the truth list SparseTensor.\n Must be R-length vector with int64 data type. Only constant value is allowed.\n\n Outputs:\n Tensor, a dense tensor with rank `R-1` and float32 data type.\n\n Raises:\n TypeError: If `normalize` is not a bool.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> import numpy as np\n >>> from mindspore import context\n >>> from mindspore import Tensor\n >>> import mindspore.nn as nn\n >>> import mindspore.ops as ops\n >>> class EditDistance(nn.Cell):\n ... def __init__(self, hypothesis_shape, truth_shape, normalize=True):\n ... super(EditDistance, self).__init__()\n ... self.edit_distance = ops.EditDistance(normalize)\n ... self.hypothesis_shape = hypothesis_shape\n ... self.truth_shape = truth_shape\n ...\n ... def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):\n ... return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,\n ... truth_indices, truth_values, self.truth_shape)\n ...\n >>> hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64))\n >>> hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32))\n >>> hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64))\n >>> truth_indices = Tensor(np.array([[0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]).astype(np.int64))\n >>> truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32))\n >>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))\n >>> edit_distance = EditDistance(hypothesis_shape, truth_shape)\n >>> output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)\n >>> print(output)\n [[1. 1.]\n [1. 1.]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, normalize=True):\n \"\"\"Initialize EditDistance\"\"\"\n self.normalize = validator.check_value_type(\"normalize\", normalize, [bool], self.name)\n self.set_const_input_indexes([2, 5])\n\n def __infer__(self, h_indices, h_values, h_shape, truth_indices, truth_values, truth_shape):\n validator.check_valid_input('hypothesis_shape', h_shape['value'], self.name)\n validator.check_valid_input('truth_shape', truth_shape['value'], self.name)\n args_int = {\"hypothesis_indices\": h_indices['dtype'], \"hypothesis_shape\": h_shape['dtype'],\n \"truth_indices\": truth_indices['dtype'], \"truth_shape\": truth_shape['dtype']}\n validator.check_tensors_dtypes_same_and_valid(args_int, [mstype.int64], self.name)\n args = {\"hypothesis_values\": h_values['dtype'], \"truth_values\": truth_values['dtype']}\n validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)\n\n hypothesis_indices_shp, truth_indices_shp = h_indices['shape'], truth_indices['shape']\n validator.check(\"hypothesis_indices rank\", len(hypothesis_indices_shp), \"expected\", 2, Rel.EQ, self.name)\n validator.check(\"truth_indices rank\", len(truth_indices_shp), \"expected\", 2, Rel.EQ, self.name)\n validator.check(\"hypothesis_values rank\", len(h_values['shape']), \"expected\", 1, Rel.EQ, self.name)\n validator.check(\"hypothesis_shape rank\", len(h_shape['shape']), \"expected\", 1, Rel.EQ, self.name)\n validator.check(\"truth_values rank\", len(truth_values['shape']), \"expected\", 1, Rel.EQ, self.name)\n validator.check(\"truth_shape rank\", len(truth_shape['shape']), \"expected\", 1, Rel.EQ, self.name)\n validator.check(\"hypothesis_values shape\", h_values['shape'][0],\n \"hypothesis_indices shape[0]\", hypothesis_indices_shp[0], Rel.EQ, self.name)\n validator.check(\"hypothesis_shape\", h_shape['shape'][0],\n \"hypothesis_indices shape[1]\", hypothesis_indices_shp[1], Rel.EQ, self.name)\n validator.check(\"truth_values shape\", truth_values['shape'][0],\n \"truth_indices shape[0]\", truth_indices_shp[0], Rel.EQ, self.name)\n validator.check(\"hypothesis_shape\", h_shape['shape'][0],\n \"truth_shape\", truth_shape['shape'][0], Rel.EQ, self.name)\n hypothesis_shape_v = h_shape['value'].asnumpy()\n truth_shape_v = truth_shape['value'].asnumpy()\n out_shape_rank = len(hypothesis_shape_v) - 1\n out_shape = []\n for i in range(out_shape_rank):\n out_shape.append(max(hypothesis_shape_v[i], truth_shape_v[i]))\n\n return {'shape': tuple(out_shape),\n 'dtype': mstype.tensor_type(mstype.float32),\n 'value': None}\n\n\nclass TransShape(PrimitiveWithInfer):\n \"\"\"\n Transforms the shape of input tensor to target shape.\n\n Inputs:\n - **input_x** (Tensor) - A input tensor.\n - **out_shape** (tuple[int]) - The shape of output data.\n\n Outputs:\n Tensor, a tensor whose data type is same as 'input_x', and the shape is the same as the `out_shape`.\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize TransShape.\"\"\"\n self.__setattr_flag__ = True\n\n def __infer__(self, x, shape):\n shp = shape['value']\n dtype = x['dtype']\n validator.check_tensor_dtype_valid('x', dtype, mstype.number_type + (mstype.bool_,), self.name)\n self.add_prim_attr('out_shape', tuple(shp))\n return {'shape': shp,\n 'dtype': dtype,\n 'value': None}\n\n\nclass Sort(Primitive):\n \"\"\"\n Sorts the elements of the input tensor along a given dimension in ascending order by value.\n\n Args:\n axis (int): The dimension to sort along. Default: -1.\n descending (bool): Controls the sorting order. If descending is True then the elements\n are sorted in descending order by value. Default: False.\n\n .. warning::\n Currently, only the data type of Float16 is supported. If use Float32, it may cause loss\n of accuracy.\n\n Inputs:\n - **x** (Tensor) - The input to sort, with float16 or float32 data type.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n\n Outputs:\n - **y1** (Tensor) - A tensor whose values are the sorted values, with the same shape and data type as input.\n - **y2** (Tensor) - The indices of the elements in the original input tensor. Data type is int32.\n\n Raises:\n TypeError: If `axis` is not an int.\n TypeError: If `descending` is not a bool.\n TypeError: If dtype of `x` is neither float16 nor float32.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)\n >>> sort = ops.Sort()\n >>> output = sort(x)\n >>> print(output)\n (Tensor(shape=[3, 3], dtype=Float16, value=\n [[ 1.0000e+00, 2.0000e+00, 8.0000e+00],\n [ 3.0000e+00, 5.0000e+00, 9.0000e+00],\n [ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=\n [[2, 1, 0],\n [2, 0, 1],\n [0, 1, 2]]))\n \"\"\"\n @prim_attr_register\n def __init__(self, axis=-1, descending=False):\n \"\"\"Initialize Sort\"\"\"\n self.axis = validator.check_value_type(\"axis\", axis, [int], self.name)\n self.descending = validator.check_value_type(\"descending\", descending, [bool], self.name)\n self.init_prim_io_names(inputs=['x'], outputs=['y1', 'y2'])\n\n\nclass EmbeddingLookup(PrimitiveWithCheck):\n \"\"\"\n Returns a slice of input tensor based on the specified indices.\n\n This Primitive has the similar functionality as GatherV2 operating on `axis = 0`, but has one more inputs:\n `offset`.\n\n Inputs:\n - **input_params** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n This represents a Tensor slice, instead of the entire Tensor. Currently, the dimension is restricted to be 2.\n - **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.\n Specifies the indices of elements of the original Tensor. Values can be out of range of `input_params`,\n and the exceeding part will be filled with 0 in the output. Values do not support negative and the result\n is undefined if values are negative. The data type should be int32 or int64.\n - **offset** (int) - Specifies the offset value of this `input_params` slice. Thus the real indices\n are equal to `input_indices` minus `offset`.\n\n Outputs:\n Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. The data type is the same with `input_params`.\n\n Raises:\n TypeError: If dtype of `input_indices` is not int.\n ValueError: If length of shape of `input_params` is greater than 2.\n\n Supported Platforms:\n ``Ascend`` ``CPU`` ``GPU``\n\n Examples:\n >>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)\n >>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)\n >>> offset = 4\n >>> output = ops.EmbeddingLookup()(input_params, input_indices, offset)\n >>> print(output)\n [[[10. 11.]\n [ 0. 0.]]\n [[ 0. 0.]\n [10. 11.]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize EmbeddingLookup.\"\"\"\n self.__setattr_flag__ = True\n self.init_prim_io_names(inputs=['params', 'indices', 'offset'],\n outputs=['output'])\n\n def __check__(self, params, indices, offset):\n validator.check_subclass(\"params\", params['dtype'], mstype.tensor, self.name)\n validator.check_tensor_dtype_valid(\"indices\", indices['dtype'], mstype.int_type, self.name)\n validator.check_subclass(\"offset\", offset['dtype'], mstype.int_, self.name)\n indices_shp = indices['shape']\n if not indices_shp:\n raise ValueError(f\"For '{self.name}', the dimension of 'input_indices' should not \"\n f\"be zero, but got {len(indices_shp)}.\")\n params_shp = params['shape']\n if len(params_shp) > 2:\n raise ValueError(f\"For '{self.name}', the dimension of 'input_params' must <= 2, \"\n f\"but got {len(params_shp)}.\")\n\n\nclass GatherD(Primitive):\n \"\"\"\n Gathers values along an axis specified by dim.\n\n For a 3-D tensor, the output is:\n\n .. code-block::\n\n output[i][j][k] = x[index[i][j][k]][j][k] # if dim == 0\n\n output[i][j][k] = x[i][index[i][j][k]][k] # if dim == 1\n\n output[i][j][k] = x[i][j][index[i][j][k]] # if dim == 2\n\n If `x` is an n-D tensor with shape :math:`(z_0, z_1, ..., z_i, ..., z_{n-1})` and `dim` = i,\n the `index` must be an n-D tensor with shape :math:`(z_0, z_1, ..., y, ..., z_{n-1})`\n where `y`>=1 and the output will have the same shape as `index`.\n\n Inputs:\n - **x** (Tensor) - The source tensor.\n The shape is :math:`(N,*)` where :math:`*` means,any number of additional dimensions.\n - **dim** (int) - The axis along which to index. It must be int32 or int64. Only constant value is allowed.\n - **index** (Tensor) - The indices of elements to gather. It can be one of the following data types:\n int32, int64. The value range of each index element is [-x_rank[dim], x_rank[dim]).\n\n Outputs:\n Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`, has the same data type with `x`.\n\n Raises:\n TypeError: If dtype of `dim` or `index` is neither int32 nor int64.\n ValueError: If length of shape of `x` is not equal to length of shape of `index`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)\n >>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)\n >>> dim = 1\n >>> output = ops.GatherD()(x, dim, index)\n >>> print(output)\n [[1 1]\n [4 3]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize GatherD\"\"\"\n self.init_prim_io_names(inputs=['x', 'dim', 'index'], outputs=['output'])\n\n\nclass Identity(PrimitiveWithInfer):\n \"\"\"\n Returns a Tensor with the same shape and contents as input.\n\n Inputs:\n - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is Number.\n\n Outputs:\n Tensor, the shape of tensor and the data type are the same as `input_x`, :math:`(x_1, x_2, ..., x_R)`.\n\n Raises:\n TypeError: If `x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``CPU`` ``GPU``\n\n Examples:\n >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)\n >>> output = ops.Identity()(x)\n >>> print(output)\n [1 2 3 4]\n \"\"\"\n\n # Side effect is identity with input.\n side_effect_propagate = 1\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Initialize identity\"\"\"\n self.add_prim_attr('side_effect_propagate', 1)\n\n def __infer__(self, x):\n validator.check_subclass(\"x\", x['dtype'], mstype.tensor, self.name)\n validator.check_tensor_dtype_valid('x', x['dtype'], mstype.number_type + (mstype.bool_,), self.name)\n out = {'shape': x['shape'],\n 'dtype': x['dtype'],\n 'value': None}\n return out\n\n\nclass Range(PrimitiveWithCheck):\n r\"\"\"\n Creates a sequence of numbers that begins at `start` and extends by increments of\n `delta` up to but not including `limit`.\n\n The types of all 3 inputs must be the same. The type of the resulting tensor is\n the same as the type of the inputs.\n\n Args:\n maxlen (int): Memory that can fit `maxlen` many elements\n will be allocated for the output. Optional, must be positive, defaults to 1000000.\n If the output has more than `maxlen` elements, a runtime error\n will occur.\n\n Inputs:\n - **start** (Tensor) - A scalar Tensor. The first number in the sequence. Must have\n type: int32 or float32\n - **limit** (Tensor) - A scalar Tensor. Upper limit of the sequence, exclusive. Must\n have type: int32 or float32\n - **delta** (Tensor) - A scalar Tensor. Number that increments `start`. Must have\n type: int32 or float32\n\n Outputs:\n A 1-D Tensor, with the same type as the inputs.\n\n Supported Platforms:\n ``GPU``\n\n Examples:\n >>> start = Tensor(0, mstype.int32)\n >>> limit = Tensor(10, mstype.int32)\n >>> delta = Tensor(4, mstype.int32)\n >>> output = ops.Range()(start, limit, delta)\n >>> print(output)\n [0, 4, 8]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, maxlen=1000000):\n self.init_prim_io_names(inputs=['start', 'limit', 'delta'], outputs=['output'])\n validator.check_value_type(\"maxlen\", maxlen, [int], self.name)\n validator.check_positive_int(maxlen, \"maxlen\", self.name)\n self.maxlen = maxlen\n self.add_prim_attr('maxlen', maxlen)\n\n def check_shape(self, start_shape, limit_shape, delta_shape):\n validator.check(\"start_shape\", len(start_shape), \"\", 0, Rel.EQ, self.name)\n validator.check(\"limit_shape\", len(limit_shape), \"\", 0, Rel.EQ, self.name)\n validator.check(\"delta_shape\", len(delta_shape), \"\", 0, Rel.EQ, self.name)\n\n def check_dtype(self, start_dtype, limit_dtype, delta_dtype):\n valid_dtypes = [mstype.int32, mstype.float32]\n inputs = {\"start\": start_dtype, \"limit\": limit_dtype, \"delta\": delta_dtype}\n validator.check_tensors_dtypes_same_and_valid(inputs, valid_dtypes, self.name)\n\n def infer_value(self, start_value, limit_value, delat_value):\n \"\"\"Infer the value of input for Range.\"\"\"\n if start_value is not None and limit_value is not None and delat_value is not None:\n start = np.asscalar(start_value.asnumpy())\n limit = np.asscalar(limit_value.asnumpy())\n delat = np.asscalar(delat_value.asnumpy())\n return Tensor(np.arange(start, limit, delat), dtype=start_value.dtype)\n return None\n\n\nclass MaskedFill(Primitive):\n \"\"\"\n Fills elements of self tensor with value where mask is True.\n\n The shapes of `input` and `mask` need to be the same or broadcast.\n\n Inputs:\n - **input** (Tensor) - The source tensor whose data type is one of float16, float32, int8, int32.\n - **mask** (Tensor[bool]) - The boolean mask.\n - **value** (Union[float, Tensor]) – The value to fill in with, which only supports\n a 0-dimensional tensor or a float number.\n\n Outputs:\n Tensor, has the same type and shape as `input`.\n\n Raises:\n TypeError: If `input` or `mask` is not a tensor.\n TypeError: If `value` is neither float number nor tensor.\n TypeError: If dtype of `input` or `value` is not one of float16, float32, int8, int32.\n TypeError: If dtype of `value` is different from that of `input`.\n TypeError: If dtype of `mask` is not bool.\n ValueError: If the shapes of `input` and `mask` could not be broadcast.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> input = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)\n >>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)\n >>> output = ops.MaskedFill()(input, mask, 0.5)\n >>> print(output)\n [0.5 0.5 3. 0.5]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['input', 'mask', 'value'], outputs=['output'])\n\n\nclass MaskedSelect(PrimitiveWithCheck):\n \"\"\"\n Returns a new 1-D Tensor which indexes the input tensor according to the boolean mask.\n The shapes of the mask tensor and the input tensor don't need to match, but they must be broadcastable.\n\n Inputs:\n - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n - **mask** (Tensor[bool]) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.\n\n Outputs:\n A 1-D Tensor, with the same type as x.\n\n Raises:\n TypeError: If `x` is not a Tensor.\n\n Supported Platforms:\n ``Ascend`` ``CPU``\n\n Examples:\n >>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)\n >>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)\n >>> output = ops.MaskedSelect()(x, mask)\n >>> print(output)\n [1 3]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x', 'mask'], outputs=['output'])\n\n def check_shape(self, x_shape, mask_shape):\n get_broadcast_shape(x_shape, mask_shape, self.name, arg_name1=\"x\", arg_name2=\"mask\")\n\n def check_dtype(self, x_dtype, mask_dtype):\n validator.check_tensor_dtype_valid('mask', mask_dtype, [mstype.bool_], self.name)\n validator.check_tensor_dtype_valid('x', x_dtype, mstype.number_type, self.name)\n\n\nclass SearchSorted(PrimitiveWithInfer):\n \"\"\"\n Find the indices from the innermost dimension of `sequence` such that the order of the innermost dimension\n within `sequence` would be preserved when the corresponding values in `values` were inserted before the indices.\n\n Args:\n out_int32 (bool): Output datatype. Optional. If True, the output datatype will be int32;\n if False, the output datatype will be int64. Default is False.\n right (bool): Search Strategy. Optional. If True, return the last suitable index found.\n If False, return the first such index. Default is False.\n\n Inputs:\n - **sequence** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_R)` or `(x_1)`.\n It must contain monitonically increasing sequence on the innermost dimension.\n - **values** (Tensor) - The shape of tensor is : math:`(x_1, x_2, ..., x_R-1, x_S)`.\n\n Outputs:\n Tensor containing the indices from the innermost dimension of the input sequence such that,\n if insert the corresponding value in the values tensor, the order of the tensor sequence would be preserved.\n The shape of tensor is :math:`(x_1, x_2, ..., x_R-1, x_S)`,\n whose datatype is int32 if out_int32 is True, otherwise int64, and shape is the same as the shape of values.\n\n Raises:\n ValueError: If `sequence` and `values` do not have proper shapes.\n\n Supported Platforms:\n ``CPU``\n\n Examples:\n >>> sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)\n >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)\n >>> output = ops.SearchSorted()(sequence, values)\n >>> print(output)\n [[2 4 5]\n [1 2 4]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, out_int32=False, right=False):\n \"\"\"Initialize SearchSorted\"\"\"\n self.out_int32 = validator.check_value_type(\"out_int32\", out_int32, [bool], self.name)\n self.right = validator.check_value_type(\"right\", right, [bool], self.name)\n self.init_prim_io_names(inputs=['sequence', 'values'], outputs=['positions'])\n\n def infer_shape(self, sequence_shape, values_shape):\n if len(sequence_shape) != 1 and sequence_shape[:-1] != values_shape[:-1]:\n raise ValueError(f\"For '{self.name}', the 'sequence' should be 1 dimensional or \"\n f\"all dimensions except the last dimension of 'sequence' \"\n f\"must be the same as all dimensions except the last dimension of 'values'. \"\n f\"but got shape of 'sequence': {sequence_shape} \"\n f\"and shape of 'values': {values_shape}.\")\n return values_shape\n\n def infer_dtype(self, sequence_dtype, values_dtype):\n args = {\"sequence_dtype\": sequence_dtype, \"values_dtype\": values_dtype}\n validator.check_tensors_dtypes_same_and_valid(args, mstype.number_type, self.name)\n return mstype.tensor_type(mstype.int32) if self.out_int32 else mstype.tensor_type(mstype.int64)\n\n\nclass TensorScatterMax(PrimitiveWithInfer):\n \"\"\"\n By comparing the value at the position indicated by the index in input_x with the value in the update,\n the value at the index will eventually be equal to the largest one to create a new tensor.\n\n The last axis of the index is the depth of each index vector. For each index vector,\n there must be a corresponding value in `updates`. The shape of `updates` should be\n equal to the shape of input_x[indices].\n For more details, see use cases.\n\n Note:\n If some values of the `indices` are out of bound, instead of raising an index error,\n the corresponding `updates` will not be updated to `input_x`.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].\n - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.\n The rank must be at least 2.\n - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,\n and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If dtype of `indices` is neither int32 nor int64.\n ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.\n\n Supported Platforms:\n ``GPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)\n >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)\n >>> # Next, demonstrate the approximate operation process of this operator:\n >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]\n >>> # 2, And input_x[0, 0] = -0.1\n >>> # 3, So input_x[indices] = [-0.1, -0.1]\n >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)\n >>> op = ops.TensorScatterMax()\n >>> # 5, Perform the max operation for the first time:\n >>> # first_input_x = Max(input_x[0][0], updates[0]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> # 6, Perform the max operation for the second time:\n >>> # second_input_x = Max(input_x[0][0], updates[0]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> output = op(input_x, indices, updates)\n >>> print(output)\n [[ 2.2 0.3 3.6]\n [ 0.4 0.5 -3.2]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n\n def infer_shape(self, input_x_shape, indices_shape, updates_shape):\n if len(indices_shape) < 2:\n raise ValueError(f\"For '{self.name}', the dimension of 'indices' cannot be less than 2,\"\n f\" but got {len(indices_shape)}.\")\n\n if indices_shape[-1] > len(input_x_shape):\n raise ValueError(f\"For '{self.name}', the last dimension of 'indices' must be less than or equal to \"\n f\"the dimension of 'input_x', but got the \"\n f\"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': \"\n f\"{len(input_x_shape)}.\")\n\n updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:]\n if updates_shape_check != updates_shape:\n raise ValueError(f\"For '{self.name}', the shape of 'update' must be equal to updates_shape_check, \"\n f\"where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] \"\n f\"but got the shape of 'update': {updates_shape}, \"\n f\"updates_shape_check: {updates_shape_check}, indices_shape: {indices_shape} and \"\n f\"input_x_shape: {input_x_shape}. Please check input_x_shape and indices_shape.\")\n\n return input_x_shape\n\n def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)\n args = {\"input_x\": input_x_dtype, \"updates\": updates_dtype}\n valid_input_types = (mstype.float16, mstype.float32, mstype.int8, mstype.uint8, mstype.int32)\n validator.check_tensors_dtypes_same_and_valid(args, valid_input_types, self.name)\n return input_x_dtype\n\n\nclass TensorScatterMin(PrimitiveWithInfer):\n \"\"\"\n By comparing the value at the position indicated by the index in input_x with the value in the `updates`,\n the value at the index will eventually be equal to the smallest one to create a new tensor.\n\n The last axis of the index is the depth of each index vector. For each index vector,\n there must be a corresponding value in `updates`. The shape of `updates` should be\n equal to the shape of input_x[indices].\n For more details, see use cases.\n\n Note:\n If some values of the `indices` are out of bound, instead of raising an index error,\n the corresponding `updates` will not be updated to `input_x`.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].\n - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.\n The rank must be at least 2.\n - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,\n and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If dtype of `indices` is neither int32 nor int64.\n ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.\n\n Supported Platforms:\n ``GPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)\n >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)\n >>> # Next, demonstrate the approximate operation process of this operator:\n >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]\n >>> # 2, And input_x[0, 0] = -0.1\n >>> # 3, So input_x[indices] = [-0.1, -0.1]\n >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)\n >>> op = ops.TensorScatterMin()\n >>> # 5, Perform the min operation for the first time:\n >>> # first_input_x = Min(input_x[0][0], updates[0]) = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> # 6, Perform the min operation for the second time:\n >>> # second_input_x = Min(input_x[0][0], updates[1]) = [[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> output = op(input_x, indices, updates)\n >>> print(output)\n [[ -0.1 0.3 3.6]\n [ 0.4 0.5 -3.2]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n\n def infer_shape(self, input_x_shape, indices_shape, updates_shape):\n if len(indices_shape) < 2:\n raise ValueError(f\"For '{self.name}', the dimension of 'indices' cannot be less than 2,\"\n f\" but got {len(indices_shape)}.\")\n\n if indices_shape[-1] > len(input_x_shape):\n raise ValueError(f\"For '{self.name}', the last dimension of 'indices' must be less than or equal to \"\n f\"the dimension of 'input_x', but got the \"\n f\"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': \"\n f\"{len(input_x_shape)}.\")\n\n updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:]\n if updates_shape_check != updates_shape:\n raise ValueError(f\"For '{self.name}', the shape of 'update' must be equal to updates_shape_check, \"\n f\"where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] \"\n f\"but got the shape of 'update': {updates_shape}, \"\n f\"updates_shape_check: {updates_shape_check}, indices_shape: {indices_shape} and \"\n f\"input_x_shape: {input_x_shape}. Please check input_x_shape and indices_shape.\")\n\n return input_x_shape\n\n def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)\n args = {\"input_x\": input_x_dtype, \"updates\": updates_dtype}\n valid_input_types = (mstype.float16, mstype.float32, mstype.int8, mstype.uint8, mstype.int32)\n validator.check_tensors_dtypes_same_and_valid(args, valid_input_types, self.name)\n return input_x_dtype\n\n\nclass TensorScatterSub(PrimitiveWithInfer):\n \"\"\"\n Creates a new tensor by subtracting the values from the positions in `input_x` indicated by\n `indices`, with values from `updates`. When multiple values are provided for the same\n index, the result of the update will be to subtract these values respectively. This operation is almost\n equivalent to using ScatterNdSub, except that the updates are applied on `Tensor` instead of `Parameter`.\n\n The last axis of `indices` is the depth of each index vectors. For each index vector,\n there must be a corresponding value in `updates`. The shape of `updates` should be\n equal to the shape of `input_x[indices]`. For more details, see use cases.\n\n Note:\n If some values of the `indices` are out of bound, instead of raising an index error,\n the corresponding `updates` will not be updated to `input_x`.\n\n Inputs:\n - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].\n - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.\n The rank must be at least 2.\n - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,\n and updates.shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].\n\n Outputs:\n Tensor, has the same shape and type as `input_x`.\n\n Raises:\n TypeError: If dtype of `indices` is neither int32 nor int64.\n ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.\n\n Supported Platforms:\n ``GPU``\n\n Examples:\n >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)\n >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)\n >>> # Next, demonstrate the approximate operation process of this operator:\n >>> # 1, indices[0] = [0, 0], indices[1] = [0, 0]\n >>> # 2, And input_x[0, 0] = -0.1\n >>> # 3, So input_x[indices] = [-0.1, -0.1]\n >>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)\n >>> op = ops.TensorScatterSub()\n >>> # 5, Perform the subtract operation for the first time:\n >>> # first_input_x = input_x[0][0] - updates[0] = [[-1.1, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> # 6, Perform the subtract operation for the second time:\n >>> # second_input_x = input_x[0][0] - updates[1] = [[-3.3, 0.3, 3.6], [0.4, 0.5, -3.2]]\n >>> output = op(input_x, indices, updates)\n >>> print(output)\n [[-3.3000002 0.3 3.6 ]\n [ 0.4 0.5 -3.2 ]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['input_x', 'indices', 'updates'], outputs=['y'])\n\n def infer_shape(self, input_x_shape, indices_shape, updates_shape):\n if len(indices_shape) < 2:\n raise ValueError(f\"For '{self.name}', the dimension of 'indices' cannot be less than 2,\"\n f\" but got {len(indices_shape)}.\")\n\n if indices_shape[-1] > len(input_x_shape):\n raise ValueError(f\"For '{self.name}', the last dimension of 'indices' must be less than or equal to \"\n f\"the dimension of 'input_x', but got the \"\n f\"last dimension of 'indices': {indices_shape[-1]} and the dimension of 'input_x': \"\n f\"{len(input_x_shape)}.\")\n\n updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:]\n if updates_shape_check != updates_shape:\n raise ValueError(f\"For '{self.name}', the shape of 'update' must be equal to updates_shape_check, \"\n f\"where updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] \"\n f\"but got the shape of 'update': {updates_shape}, \"\n f\"updates_shape_check: {updates_shape_check}, indices_shape: {indices_shape} and \"\n f\"input_x_shape: {input_x_shape}. Please check input_x_shape and indices_shape.\")\n\n return input_x_shape\n\n def infer_dtype(self, input_x_dtype, indices_dtype, updates_dtype):\n validator.check_tensor_dtype_valid('indices', indices_dtype, [mstype.int32, mstype.int64], self.name)\n args = {\"input_x\": input_x_dtype, \"updates\": updates_dtype}\n valid_input_types = (mstype.float16, mstype.float32, mstype.int8, mstype.uint8, mstype.int32)\n validator.check_tensors_dtypes_same_and_valid(args, valid_input_types, self.name)\n return input_x_dtype\n\n\nclass SplitV(Primitive):\n r\"\"\"\n Splits the input tensor into num_split tensors along the given dimension.\n\n The `input_x` tensor will be split into sub-tensors with individual shapes given by `size_splits` along the split\n dimension. This requires that `input_x.shape(split_dim)` is equal to the sum of `size_splits`.\n\n The shape of `input_x` is :math:`(x_1, x_2, ..., x_M, ..., x_R)`. The rank of `input_x` is `R`. Set the given\n `split_dim` as M, and :math:`-R \\le M < R`. Set the given `num_split` as `N`, the given `size_splits` as\n :math:`(x_{m_1}, x_{m_2}, ..., x_{m_N})`, :math:`x_M=\\sum_{i=1}^Nx_{m_i}`. The output is a list of tensor objects,\n for the :math:`i`-th tensor, it has the shape of :math:`(x_1, x_2, ..., x_{m_i}, ..., x_R)`. :math:`x_{m_i}` is the\n :math:`M`-th dimension of the :math:`i`-th tensor. Then, the shape of the output tensor is\n\n .. math::\n\n ((x_1, x_2, ..., x_{m_1}, ..., x_R), (x_1, x_2, ..., x_{m_2}, ..., x_R), ...,\n (x_1, x_2, ..., x_{m_N}, ..., x_R))\n\n Args:\n size_splits (Union[tuple, list]): The list containing the sizes of each output tensor along the split\n dimension. Must sum to the dimension of value along `split_dim`.\n Can contain one -1 indicating that dimension is to be inferred.\n split_dim (int): The dimension along which to split. Must be in the range [-len(input_x.shape),\n len(input_x.shape)).\n num_split (int): The number of output tensors. Must be positive int.\n\n Inputs:\n - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ...,x_M ..., x_R)`.\n\n Outputs:\n Tensor, a list of `num_split` Tensor objects with the shape :math:`((x_1, x_2, ..., x_{m_1}, ..., x_R),\n (x_1, x_2, ..., x_{m_2}, ..., x_R), ..., (x_1, x_2, ..., x_{m_N}, ..., x_R))`, :math:`x_M=\\sum_{i=1}^Nx_{m_i}`.\n The data type is the same with `input_x`.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n TypeError: If `size_splits` is not a tuple or a list.\n TypeError: If element of `size_splits` is not an int.\n TypeError: If `split_dim` or `num_split` is not an int.\n ValueError: If rank of the `size_splits` is not equal to `num_split`.\n ValueError: If sum of the `size_splits` is not equal to the dimension of value along `split_dim`.\n ValueError: If `split_dim` is out of the range [-len(input_x.shape), len(input_x.shape)).\n ValueError: If the `num_split` is less than or equal to 0.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32)\n >>> op = ops.SplitV(size_splits=[1, -1], split_dim=1, num_split=2)\n >>> output = op(input_x)\n >>> print(output)\n (Tensor(shape=[3, 1], dtype=Int32, value=\n [[1],\n [4],\n [7]]), Tensor(shape=[3, 2], dtype=Int32, value=\n [[2, 3],\n [5, 6],\n [8, 9]]))\n >>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.int32)\n >>> op = ops.SplitV(size_splits=[2, 1], split_dim=0, num_split=2)\n >>> output = op(input_x)\n >>> print(output)\n (Tensor(shape=[2, 3], dtype=Int32, value=\n [[1, 2, 3],\n [4, 5, 6]]), Tensor(shape=[1, 3], dtype=Int32, value=\n [[7, 8, 9]]))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, size_splits, split_dim, num_split):\n \"\"\"Initialize SplitV\"\"\"\n validator.check_value_type(\"size_splits\", size_splits, [tuple, list], self.name)\n for elements_of_size_splits in size_splits:\n validator.check_value_type(\"elements of size_splits\", elements_of_size_splits, [int], self.name)\n if elements_of_size_splits != -1 and elements_of_size_splits < 1:\n raise ValueError(f\"For \\'{self.name}\\', all elements of size_splits must be positive (except at most \"\n f\"one default value -1), but got: {elements_of_size_splits}.\")\n validator.check_value_type(\"split_dim\", split_dim, [int], self.name)\n validator.check_value_type(\"num_split\", num_split, [int], self.name)\n validator.check_positive_int(num_split, \"num_split\", self.name)\n self.init_prim_io_names(inputs=['input_x'], outputs=['output'])\n\n\nclass ScatterElements(Primitive):\n \"\"\"\n ScatterElements takes three inputs data, updates, and indices of the same rank r >= 1\n and an optional attribute axis that identifies an axis of data (default is 0).\n The output of the operation is produced by creating a copy of the input data, and then updating its value to\n values specified by updates at specific index positions specified by indices.\n\n Args:\n axis (int): which axis to scatter, default is 0.\n\n Inputs:\n - **data** (Tensor) - The target tensor. c\n - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.\n - **update** (Tensor) - The tensor to update the input tensor, has the same type as input,\n and update.shape should be equal to indices.shape.\n\n Outputs:\n Tensor, has the same shape and type as `data`.\n\n Raises:\n TypeError: If dtype of `indices` is neither int32 nor int64.\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> op = ops.ScatterElements(0)\n >>> data = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)\n >>> indices = Tensor(np.array([[1, 0, 2], [0, 2, 1]]), mindspore.int32)\n >>> updates = Tensor(np.array([[0, 0, 0], [0, 0, 0]]), mindspore.float32)\n >>> output = op(data, indices, updates)\n >>> print(output)\n [[ 0.0 0.0 3.0]\n [ 0.0 5.0 0.0]\n [ 7.0 0.0 0.0]]\n >>> op = ops.ScatterElements(1)\n >>> data = Tensor(np.array([[1, 2, 3, 4, 5]), mindspore.int32)\n >>> indices = Tensor(np.array([[2, 4]), mindspore.int32)\n >>> updates = Tensor(np.array([[8, 8]]), mindspore.int32)\n >>> output = op(data, indices, updates)\n >>> print(output)\n [[ 1 2 8 4 8]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0):\n \"\"\"Initialize ScatterElements\"\"\"\n validator.check_value_type(\"axis\", axis, [int], self.name)\n self.init_prim_io_names(inputs=['data', 'indices', 'updates'], outputs=['y'])\n\n\nclass ExtractVolumePatches(Primitive):\n \"\"\"\n Extract patches from input and put them in the \"depth\" output dimension. 3D extension of extract_image_patches.\n\n Args:\n kernel_size (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.\n The size of the sliding window for each dimension of input. Must be: [1, 1, k_d, k_h, k_w] or\n [k_d, k_h, k_w]. If k_d = k_h = k_w, you can enter an integer.\n strides (Union[int, tuple[int], list[int]]): A list of ints which's length is 3 or 5.\n How far the centers of two consecutive patches are in input. Must be: [1, 1, s_d, s_h, s_w] or\n [s_d, s_h, s_w]. If s_d = s_h = s_w, you can enter an integer.\n padding (str): A string from: \"SAME\", \"VALID\". The type of padding algorithm to use.\n\n Inputs:\n - **input_x** (Tensor) - A Tensor. Must be one of the following types: float16, float32.\n 5-D Tensor with shape :math:`(x_n, x_c, x_d, x_h, x_w)`.\n\n Outputs:\n Tensor, has the same type as input.\n If padding is VALID, the shape is :math:`(x_n, k_d * k_h * k_w * x_c, 1 + (x_d - k_d) / s_d,\n 1 + (x_h - k_h) / s_h, 1 + (x_w - k_w) / s_w)`; if padding is SAME, the shape is :math:`(\n x_n, k_d * k_h * k_w * x_c, (x_d + s_d - 1) / s_d, (x_h + s_h - 1) / s_h, (x_w + s_w - 1) / s_w)`.\n\n Raises:\n TypeError: If dtype of input_x is neither float16 nor float32.\n TypeError: If kernel_size or strides is not a list, a tuple or an int.\n TypeError: If input_x is not a tensor.\n TypeError: If padding is not str.\n ValueError: If the length of kernel_size is neither 3 nor 5 and kernel_size is not an integer.\n ValueError: If the length of strides is neither 3 nor 5 and strides is not an integer.\n ValueError: If padding is neither \"VALID\" nor \"SAME\".\n ValueError: If elements of kernel_size or strides are not positive integer.\n ValueError: If input_x is not a tensor in dimension 5.\n ValueError: If input_x's shape has zero.\n ValueError: If one of kernel_size or strides' first two numbers is not 1.\n ValueError: If padding = \"VALID\" and input - kernel_size is less than 0 in d, h or w dimension.\n ValueError: If padding = \"SAME\" and :math:`padding_needed = ((input_x + strides - 1) / strides - 1) *\n strides + kernel_size - input` is less than 0 in d, h or w dimension.\n ValueError: If x_h is not 1 or x_w is not 1 and x_w + padding_needed - k_w - s_w is less than 0.\n ValueError: If x_d * x_h * x_w is greater than 2048.\n\n Supported Platforms:\n ``Ascend``\n\n Example:\n >>> kernel_size = (1, 1, 2, 2, 2)\n >>> strides = (1, 1, 1, 1, 1)\n >>> padding = \"VALID\"\n >>> input_x = P.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))\n >>> output_y = P.ExtractVolumePatches(kernel_size, strides, padding)(input_x)\n >>> print(output_y.shape)\n (1, 8, 2, 2, 2)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, kernel_size, strides, padding):\n validator.check_value_type(\"kernel_size\", kernel_size, (int, list, tuple), self.name)\n validator.check_value_type(\"strides\", strides, (int, list, tuple), self.name)\n if isinstance(kernel_size, (list, tuple)):\n kernel_size = tuple(kernel_size)\n if len(kernel_size) == 5:\n validator.check_int(kernel_size[0], 1, Rel.EQ, \"kernel_size[0]\", self.name)\n validator.check_int(kernel_size[1], 1, Rel.EQ, \"kernel_size[1]\", self.name)\n if isinstance(strides, (list, tuple)):\n strides = tuple(strides)\n if len(strides) == 5:\n validator.check_int(strides[0], 1, Rel.EQ, \"strides[0]\", self.name)\n validator.check_int(strides[1], 1, Rel.EQ, \"strides[1]\", self.name)\n self.kernel_size = _check_3d_int_or_tuple(\"kernel_size\", kernel_size, self.name,\n allow_five=True, ret_five=True, greater_zero=True)\n self.strides = _check_3d_int_or_tuple(\"strides\", strides, self.name,\n allow_five=True, ret_five=True, greater_zero=True)\n self.add_prim_attr(\"kernel_size\", self.kernel_size)\n self.add_prim_attr(\"strides\", self.strides)\n validator.check_value_type(\"padding_dtype\", padding, (str), self.name)\n self.padding = validator.check_string(padding.upper(), ['VALID', 'SAME'], 'padding', self.name)\n self.add_prim_attr(\"padding\", self.padding)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\" test_pynative_heterogeneous \"\"\"\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom mindspore import context, Tensor\r\nfrom mindspore.nn import Cell\r\nimport mindspore.ops as ops\r\n\r\n\r\nclass MulRelu(Cell):\r\n def __init__(self):\r\n super(MulRelu, self).__init__()\r\n self.relu1 = ops.ReLU()\r\n self.relu2 = ops.ReLU()\r\n self.mul = ops.Mul()\r\n\r\n def construct(self, inp1, inp2):\r\n x1 = self.relu1(inp1)\r\n x2 = self.relu2(inp2)\r\n y = self.mul(x1, x2)\r\n return y\r\n\r\n\r\[email protected]\r\[email protected]_x86_ascend_training\r\[email protected]_onecard\r\ndef test_heterogeneous_default_ascend_prim_cpu():\r\n \"\"\"\r\n Feature: PyNative heterogeneous.\r\n Description: Default device target is Ascend, the relu1 set to CPU.\r\n Expectation: The output of device is equal to the output of heterogeneous.\r\n \"\"\"\r\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\")\r\n net = MulRelu()\r\n inp1 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n inp2 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n output_device = net(inp1, inp2)\r\n net.relu1.add_prim_attr(\"primitive_target\", \"CPU\")\r\n output_heter = net(inp1, inp2)\r\n assert np.allclose(output_device.asnumpy(), output_heter.asnumpy(), 1e-6, 1e-6)\r\n\r\[email protected]\r\[email protected]_x86_ascend_training\r\[email protected]_onecard\r\ndef test_heterogeneous_default_cpu_prim_ascend():\r\n \"\"\"\r\n Feature: PyNative heterogeneous.\r\n Description: Default device target is CPU, the relu1 set to Ascend.\r\n Expectation: The output of device is equal to the output of heterogeneous.\r\n \"\"\"\r\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\r\n net = MulRelu()\r\n inp1 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n inp2 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n output_device = net(inp1, inp2)\r\n net.relu1.add_prim_attr(\"primitive_target\", \"Ascend\")\r\n output_heter = net(inp1, inp2)\r\n assert np.allclose(output_device.asnumpy(), output_heter.asnumpy(), 1e-6, 1e-6)\r\n\r\[email protected]\r\[email protected]_x86_gpu_training\r\[email protected]_onecard\r\ndef test_heterogeneous_default_gpu_prim_cpu():\r\n \"\"\"\r\n Feature: PyNative heterogeneous.\r\n Description: Default device target is GPU, the relu1 set to CPU.\r\n Expectation: The output of device is equal to the output of heterogeneous.\r\n \"\"\"\r\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\r\n net = MulRelu()\r\n inp1 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n inp2 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n output_device = net(inp1, inp2)\r\n net.relu1.add_prim_attr(\"primitive_target\", \"CPU\")\r\n output_heter = net(inp1, inp2)\r\n assert np.allclose(output_device.asnumpy(), output_heter.asnumpy(), 1e-6, 1e-6)\r\n\r\[email protected]\r\[email protected]_x86_gpu_training\r\[email protected]_onecard\r\ndef test_heterogeneous_default_cpu_prim_gpu():\r\n \"\"\"\r\n Feature: PyNative heterogeneous.\r\n Description: Default device target is CPU, the relu1 set to GPU.\r\n Expectation: The output of device is equal to the output of heterogeneous.\r\n \"\"\"\r\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\r\n net = MulRelu()\r\n inp1 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n inp2 = Tensor(np.random.randn(2, 2).astype(np.float32))\r\n output_device = net(inp1, inp2)\r\n net.relu1.add_prim_attr(\"primitive_target\", \"GPU\")\r\n output_heter = net(inp1, inp2)\r\n assert np.allclose(output_device.asnumpy(), output_heter.asnumpy(), 1e-6, 1e-6)\r\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nimport mindspore as ms\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import composite as C\n\n\nclass L2LossNet(nn.Cell):\n def __init__(self):\n super(L2LossNet, self).__init__()\n self.l2_loss = P.L2Loss()\n\n def construct(self, x):\n return self.l2_loss(x)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_pynative_fp32_2x2():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\n error = 1e-4\n x = Tensor(np.array([[1., 2.], [3., 4.]]), ms.float32)\n expect = np.array(15, np.float32)\n output = P.L2Loss()(x)\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_pynative_fp16_2x2():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\n error = 1e-4\n x = Tensor(np.array([[1., 2.], [3., 4.]]), ms.float16)\n expect = np.array(15, np.float16)\n output = P.L2Loss()(x)\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_pynative_fp32_1x4():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\n error = 1e-4\n x = Tensor(np.array([1., 2., 3., 4.]), ms.float32)\n expect = np.array(15, np.float32)\n output = P.L2Loss()(x)\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_pynative_fp16_1x4():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\n error = 1e-4\n x = Tensor(np.array([1., 2., 3., 4.]), ms.float16)\n expect = np.array(15, np.float16)\n output = P.L2Loss()(x)\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_graph_fp32_1x4():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n error = 1e-4\n x = Tensor(np.array([1., 2., 3., 4.]), ms.float32)\n expect = np.array(15, np.float32)\n l2_loss = L2LossNet()\n output = l2_loss(x)\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_graph_fp16_1x4():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n error = 1e-4\n x = Tensor(np.array([1., 2., 3., 4.]), ms.float16)\n expect = np.array(15, np.float16)\n l2_loss = L2LossNet()\n output = l2_loss(x)\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\nclass GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n self.grad_op = C.GradOperation(get_all=True)\n\n def construct(self, x):\n gradient_function = self.grad_op(self.net)\n return gradient_function(x)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_grad_fp32():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n x = Tensor(np.array([2.4, 3.2, 1.2, 5.9, 9.]).astype(np.float32))\n error = 1e-4\n net = L2LossNet()\n output = GradNet(net)(x)[0]\n expect = x\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_l2loss_grad_fp16():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n x = Tensor(np.array([[2.4, 3.2, 4.8], [1.2, 5.9, 9.]]).astype(np.float16))\n error = 1e-4\n net = L2LossNet()\n output = GradNet(net)(x)[0]\n expect = x\n diff = output.asnumpy() - expect\n assert np.all(diff < error)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport os\nimport sys\nimport tempfile\nimport time\nimport shutil\nimport glob\nimport json\nimport numpy as np\nimport pytest\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nfrom dump_test_utils import generate_dump_json, generate_dump_json_with_overflow, check_dump_structure\nfrom tests.security_utils import security_off_wrap\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.add = P.Add()\n\n def construct(self, x_, y_):\n return self.add(x_, y_)\n\n\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([[7, 8, 9], [10, 11, 12]]).astype(np.float32)\n\n\ndef run_async_dump(test_name):\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n with tempfile.TemporaryDirectory(dir='/tmp') as tmp_dir:\n dump_path = os.path.join(tmp_dir, 'async_dump')\n dump_config_path = os.path.join(tmp_dir, 'async_dump.json')\n generate_dump_json(dump_path, dump_config_path, test_name)\n os.environ['MINDSPORE_DUMP_CONFIG'] = dump_config_path\n dump_file_path = os.path.join(dump_path, 'rank_0', 'Net', '0', '0')\n if os.path.isdir(dump_path):\n shutil.rmtree(dump_path)\n add = Net()\n add(Tensor(x), Tensor(y))\n for _ in range(3):\n if not os.path.exists(dump_file_path):\n time.sleep(2)\n check_dump_structure(dump_path, dump_config_path, 1, 1, 1)\n assert len(os.listdir(dump_file_path)) == 1\n # check content of the generated dump data\n if test_name == \"test_async_dump_npy\":\n output_name = \"Add.Add-op*.*.*.*.output.0.ND.npy\"\n output_path = glob.glob(os.path.join(dump_file_path, output_name))[0]\n real_path = os.path.realpath(output_path)\n output = np.load(real_path)\n expect = np.array([[8, 10, 12], [14, 16, 18]], np.float32)\n assert np.array_equal(output, expect)\n del os.environ['MINDSPORE_DUMP_CONFIG']\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\n@security_off_wrap\ndef test_async_dump_npy():\n \"\"\"\n Feature: async dump on Ascend\n Description: test async dump with file_format = \"npy\"\n Expectation: dump data are generated as npy file format\n \"\"\"\n run_async_dump(\"test_async_dump_npy\")\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\n@security_off_wrap\ndef test_async_dump_bin():\n \"\"\"\n Feature: async dump on Ascend in npy format\n Description: test async dump with file_format = \"bin\"\n Expectation: dump data are generated as protobuf file format (suffix with timestamp)\n \"\"\"\n run_async_dump(\"test_async_dump_bin\")\n\n\ndef run_overflow_dump(test_name):\n \"\"\"Run async dump and generate overflow\"\"\"\n if sys.platform != 'linux':\n return\n overflow_x = np.array([60000, 60000]).astype(np.float16)\n with tempfile.TemporaryDirectory(dir='/tmp') as tmp_dir:\n dump_path = os.path.join(tmp_dir, 'overflow_dump')\n dump_config_path = os.path.join(tmp_dir, 'overflow_dump.json')\n generate_dump_json_with_overflow(dump_path, dump_config_path, test_name, 3)\n os.environ['MINDSPORE_DUMP_CONFIG'] = dump_config_path\n if os.path.isdir(dump_path):\n shutil.rmtree(dump_path)\n add = Net()\n add(Tensor(overflow_x), Tensor(overflow_x))\n exe_graph_path = os.path.join(dump_path, 'rank_0', 'Net', '0', '0')\n for _ in range(5):\n if not os.path.exists(exe_graph_path):\n time.sleep(2)\n check_dump_structure(dump_path, dump_config_path, 1, 1, 1)\n # check if overflow dump generate exact two files, and the naming format\n assert len(os.listdir(exe_graph_path)) == 2\n output_path = glob.glob(os.path.join(exe_graph_path, \"Add.Add-op*.*.*.*.output.0.ND.npy\"))[0]\n overflow_path = glob.glob(os.path.join(exe_graph_path, \"Opdebug.Node_OpDebug.*.*.*.output.0.json\"))[0]\n assert output_path\n assert overflow_path\n # check content of the output tensor\n real_path = os.path.realpath(output_path)\n output = np.load(real_path)\n expect = np.array([65504, 65504], np.float16)\n assert np.array_equal(output, expect)\n # check content of opdebug info json file\n with open(overflow_path, 'rb') as json_file:\n data = json.load(json_file)\n assert data\n del os.environ['MINDSPORE_DUMP_CONFIG']\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\n@security_off_wrap\ndef test_ascend_overflow_dump():\n \"\"\"\n Feature: Overflow Dump\n Description: Test overflow dump\n Expectation: Overflow is occurred, and overflow dump file is in correct format\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')\n run_overflow_dump(\"test_async_dump_npy\")\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test graph fallback \"\"\"\nimport pytest\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor, ms_function, context\n\ncontext.set_context(mode=context.GRAPH_MODE)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_np_print_1():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def np_print():\n x = np.array([1, 2, 3, 4, 5])\n print(\"x: \", x)\n return Tensor(x)\n assert np.all(np_print().asnumpy() == np.array([1, 2, 3, 4, 5]))\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_np_print_2():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n class PrintNet(nn.Cell):\n def construct(self):\n x = np.array([1, 2, 3, 4, 5])\n print(\"x: \", x)\n return Tensor(x)\n\n net = PrintNet()\n res = net()\n print(\"res: \", res)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_tensor_print_1():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def np_print():\n x = np.array([1, 2, 3, 4, 5])\n print(\"Tensor(x): \", Tensor(x))\n return Tensor(x)\n assert np.all(np_print().asnumpy() == np.array([1, 2, 3, 4, 5]))\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_tensor_print_2():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n class PrintNet(nn.Cell):\n def construct(self):\n x = np.array([1, 2, 3, 4, 5])\n print(\"Tensor(x): \", Tensor(x))\n return Tensor(x)\n\n net = PrintNet()\n res = net()\n print(\"res: \", res)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_print_cnode_1():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def print_func(x, y):\n res_sum = x + y\n print(\"res_sum: \", res_sum)\n return res_sum\n\n x = Tensor(np.array([1, 2, 3, 4, 5]))\n y = Tensor(np.array([1, 2, 3, 4, 5]))\n res = print_func(x, y)\n print(\"res: \", res)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_print_cnode_2():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def print_func():\n x = Tensor(np.array([1, 2, 3, 4, 5]))\n y = Tensor(np.array([1, 2, 3, 4, 5]))\n res_sum = x + y\n print(\"res_sum: \", res_sum)\n return res_sum\n\n res = print_func()\n print(\"res: \", res)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_print_cnode_3():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def print_func():\n x = np.array([1, 2, 3, 4, 5])\n y = np.array([1, 2, 3, 4, 5])\n res_sum = x + y\n print(\"res_sum: \", res_sum)\n return Tensor(res_sum)\n\n res = print_func()\n print(\"res: \", res)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_print_validate_tuple():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def print_func():\n x = Tensor(np.array([1, 2, 3, 4, 5]))\n y = Tensor(np.array([1, 2, 3, 4, 5]))\n tensor_sum = x + y\n print(\"tensor_sum: \", tensor_sum)\n np_x = np.array([1, 2, 3, 4, 5])\n np_y = np.array([1, 2, 3, 4, 5])\n np_sum = np_x + np_y\n print(\"np_sum: \", np_sum)\n return tensor_sum, np_sum\n\n with pytest.raises(RuntimeError) as err:\n res1, res2 = print_func()\n print(\"res1: \", res1)\n print(\"res2: \", res2)\n assert \"Should not use Python object in runtime\" in str(err.value)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_print_validate():\n \"\"\"\n Feature: JIT Fallback\n Description: Support print.\n Expectation: No exception.\n \"\"\"\n @ms_function\n def print_func():\n np_x = np.array([1, 2, 3, 4, 5])\n np_y = np.array([1, 2, 3, 4, 5])\n np_sum = np_x + np_y\n print(\"np_sum: \", np_sum)\n return np_sum\n\n with pytest.raises(RuntimeError) as err:\n res = print_func()\n print(\"res: \", res)\n assert \"Should not use Python object in runtime\" in str(err.value)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.context as context\nfrom mindspore import Tensor\nfrom mindspore.ops.operations import _inner_ops as P\n\ncontext.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\")\n\n\nclass Net(nn.Cell):\n def __init__(self, index=0, shapes_and_types=None):\n super(Net, self).__init__()\n shapes_and_types.reverse()\n self.init = P.StackInit(index)\n self.push = P.StackPush(index)\n self.pop = [P.StackPop(index, shape, dtype) for (shape, dtype) in shapes_and_types]\n self.destroy = P.StackDestroy(index)\n\n def construct(self, x1, x2, x3):\n self.init()\n self.push(x1)\n self.push(x2)\n self.push(x3)\n y1 = self.pop[0]()\n y2 = self.pop[1]()\n y3 = self.pop[2]()\n self.destroy()\n return y1, y2, y3\n\n\nclass NetTwoStack(nn.Cell):\n def __init__(self, index=0, shapes_and_types=None):\n super(NetTwoStack, self).__init__()\n self.init_0 = P.StackInit(index)\n self.push_0 = P.StackPush(index)\n self.pop_0 = [P.StackPop(index, shape, dtype) for (shape, dtype) in shapes_and_types]\n self.destroy_0 = P.StackDestroy(index)\n\n index += 1\n self.init_1 = P.StackInit(index)\n self.push_1 = P.StackPush(index)\n self.pop_1 = [P.StackPop(index, shape, dtype) for (shape, dtype) in shapes_and_types]\n self.destroy_1 = P.StackDestroy(index)\n\n def construct(self, x1, x2, x3):\n self.init_0()\n self.init_1()\n\n self.push_0(x1)\n self.push_1(x3)\n y1 = self.pop_0[0]()\n z1 = self.pop_1[2]()\n self.push_0(x2)\n self.push_0(x3)\n self.push_1(x1)\n self.push_1(x2)\n y2 = self.pop_0[2]()\n z2 = self.pop_1[1]()\n y3 = self.pop_0[1]()\n z3 = self.pop_1[0]()\n\n self.destroy_0()\n self.destroy_1()\n return y1, y2, y3, z1, z2, z3\n\n\ndef test_net():\n x1 = Tensor(np.random.randn(4,).astype(np.float64))\n x2 = Tensor(np.random.randn(4, 6).astype(np.float32))\n x3 = Tensor(np.random.randint(100, size=(3, 4, 5)).astype(np.int32))\n\n shapes_and_types = []\n shapes_and_types.append((x1.shape, x1.dtype))\n shapes_and_types.append((x2.shape, x2.dtype))\n shapes_and_types.append((x3.shape, x3.dtype))\n\n net = Net(2018, shapes_and_types)\n y1, y2, y3 = net(x1, x2, x3)\n print(x1)\n print(x2)\n print(x3)\n print(y1)\n print(y2)\n print(y3)\n assert np.array_equal(y1.asnumpy(), x3.asnumpy())\n assert np.array_equal(y2.asnumpy(), x2.asnumpy())\n assert np.array_equal(y3.asnumpy(), x1.asnumpy())\n\n\ndef test_net_tow_stack():\n x1 = Tensor(np.random.randn(4,).astype(np.float64))\n x2 = Tensor(np.random.randn(4, 6).astype(np.float32))\n x3 = Tensor(np.random.randint(100, size=(3, 4, 5)).astype(np.int32))\n\n shapes_and_types = []\n shapes_and_types.append((x1.shape, x1.dtype))\n shapes_and_types.append((x2.shape, x2.dtype))\n shapes_and_types.append((x3.shape, x3.dtype))\n\n net = NetTwoStack(1998, shapes_and_types)\n y1, y2, y3, z1, z2, z3 = net(x1, x2, x3)\n print(x1)\n print(x2)\n print(x3)\n print(y1)\n print(y2)\n print(y3)\n assert np.array_equal(y1.asnumpy(), x1.asnumpy())\n assert np.array_equal(y2.asnumpy(), x3.asnumpy())\n assert np.array_equal(y3.asnumpy(), x2.asnumpy())\n\n assert np.array_equal(z1.asnumpy(), x3.asnumpy())\n assert np.array_equal(z2.asnumpy(), x2.asnumpy())\n assert np.array_equal(z3.asnumpy(), x1.asnumpy())\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops.operations import _grad_ops as G\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.bias_add_grad = G.BiasAddGrad()\n\n def construct(self, dout):\n return self.bias_add_grad(dout)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add_grad2d():\n dout = np.ones([2, 3]).astype(np.float32)\n bias_add_grad = Net()\n output = bias_add_grad(Tensor(dout))\n expect_output = np.array([2., 2., 2.]).astype(np.float32)\n print(output.asnumpy())\n assert np.all(output.asnumpy() == expect_output), \"bias_add_grad execute failed, please check current code commit\"\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add_grad4d():\n dout = np.ones([2, 3, 4, 4]).astype(np.float32)\n bias_add_grad = Net()\n output = bias_add_grad(Tensor(dout))\n expect_output = np.array([32., 32., 32.]).astype(np.float32)\n print(output.asnumpy())\n assert np.all(output.asnumpy() == expect_output), \"bias_add_grad execute failed, please check current code commit\"\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_bias_add_grad5d():\n dout = np.ones([2, 3, 4, 4, 2]).astype(np.float32)\n bias_add_grad = Net()\n output = bias_add_grad(Tensor(dout))\n expect_output = np.array([64., 64., 64.]).astype(np.float32)\n print(output.asnumpy())\n assert np.all(output.asnumpy() == expect_output), \"bias_add_grad execute failed, please check current code commit\"\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.common.parameter import ParameterTuple\nfrom mindspore.nn import BatchNorm2d, BatchNorm1d, SGD\nfrom mindspore.nn import Cell\nfrom mindspore.ops import composite as C\n\n\nclass Batchnorm_Net(Cell):\n def __init__(self, c, weight, bias, moving_mean, moving_var_init, use_batch_statistics=None):\n super(Batchnorm_Net, self).__init__()\n self.bn = BatchNorm2d(c, eps=0.00001, momentum=0.1, beta_init=bias, gamma_init=weight,\n moving_mean_init=moving_mean, moving_var_init=moving_var_init,\n use_batch_statistics=use_batch_statistics)\n\n def construct(self, input_data):\n x = self.bn(input_data)\n return x\n\n\nclass Grad(Cell):\n def __init__(self, network):\n super(Grad, self).__init__()\n self.grad = C.GradOperation(get_all=True, sens_param=True)\n self.network = network\n\n def construct(self, input_data, sens):\n gout = self.grad(self.network)(input_data, sens)\n return gout\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_train_forward():\n x = np.array([[\n [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],\n [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)\n expect_output = np.array([[[[-0.6059, 0.3118, 0.3118, 1.2294],\n [-0.1471, 0.7706, 1.6882, 2.6059],\n [0.3118, 1.6882, 2.1471, 2.1471],\n [0.7706, 0.3118, 2.6059, -0.1471]],\n\n [[0.9119, 1.8518, 1.3819, -0.0281],\n [-0.0281, 0.9119, 1.3819, 1.8518],\n [2.7918, 0.4419, -0.4981, 0.9119],\n [1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)\n\n weight = np.ones(2).astype(np.float32)\n bias = np.ones(2).astype(np.float32)\n moving_mean = np.ones(2).astype(np.float32)\n moving_var_init = np.ones(2).astype(np.float32)\n error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-4\n\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias),\n Tensor(moving_mean), Tensor(moving_var_init))\n bn_net.set_train()\n output = bn_net(Tensor(x))\n diff = output.asnumpy() - expect_output\n assert np.all(diff < error)\n assert np.all(-diff < error)\n\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias),\n Tensor(moving_mean), Tensor(moving_var_init))\n bn_net.set_train()\n output = bn_net(Tensor(x))\n diff = output.asnumpy() - expect_output\n assert np.all(diff < error)\n assert np.all(-diff < error)\n\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias),\n Tensor(moving_mean), Tensor(moving_var_init))\n bn_net.set_train(False)\n output = bn_net(Tensor(x))\n\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias),\n Tensor(moving_mean), Tensor(moving_var_init))\n bn_net.set_train(False)\n output = bn_net(Tensor(x))\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_train_backward():\n x = np.array([[\n [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],\n [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)\n grad = np.array([[\n [[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]],\n [[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32)\n expect_output = np.array([[[[-0.69126546, -0.32903028, 1.9651246, -0.88445705],\n [0.6369296, -0.37732816, -0.93275493, -0.11168876],\n [-0.7878612, 1.3614, 0.8542711, -0.52222186],\n [-0.37732816, 0.5886317, -0.11168876, -0.28073236]],\n\n [[1.6447213, -0.38968924, -1.0174079, -0.55067265],\n [-2.4305856, -1.1751484, 0.86250514, 0.5502673],\n [0.39576983, 0.5470243, 1.1715001, 1.6447213],\n [-1.7996241, -0.7051701, 0.7080077, 0.5437813]]]]).astype(np.float32)\n\n weight = Tensor(np.ones(2).astype(np.float32))\n bias = Tensor(np.ones(2).astype(np.float32))\n moving_mean = Tensor(np.ones(2).astype(np.float32))\n moving_var_init = Tensor(np.ones(2).astype(np.float32))\n error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-6\n\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, weight, bias, moving_mean, moving_var_init)\n bn_net.set_train()\n bn_grad = Grad(bn_net)\n output = bn_grad(Tensor(x), Tensor(grad))\n diff = output[0].asnumpy() - expect_output\n assert np.all(diff < error)\n assert np.all(-diff < error)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_train_stats_false_forward():\n x = np.array([[\n [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],\n [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)\n\n expect_output = np.array([[[[3.707105, 5.121315, 5.121315, 6.535525],\n [4.41421, 5.8284197, 7.24263, 8.656839],\n [5.121315, 7.24263, 7.9497347, 7.9497347],\n [5.8284197, 5.121315, 8.656839, 4.41421]],\n\n [[6.535525, 7.9497347, 7.24263, 5.121315],\n [5.121315, 6.535525, 7.24263, 7.9497347],\n [9.363945, 5.8284197, 4.41421, 6.535525],\n [7.9497347, 6.535525, 8.656839, 3.707105]]]]).astype(np.float32)\n\n weight = np.ones(2).astype(np.float32)\n bias = np.ones(2).astype(np.float32) * 3\n moving_mean = np.zeros(2).astype(np.float32)\n moving_var_init = np.ones(2).astype(np.float32) * 2\n error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-4\n use_batch_statistics = False\n\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean),\n Tensor(moving_var_init), use_batch_statistics)\n bn_net.set_train()\n output = bn_net(Tensor(x))\n diff = output.asnumpy() - expect_output\n assert np.all(diff < error)\n assert np.all(-diff < error)\n\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean),\n Tensor(moving_var_init), use_batch_statistics)\n bn_net.set_train()\n output = bn_net(Tensor(x))\n diff = output.asnumpy() - expect_output\n assert np.all(diff < error)\n assert np.all(-diff < error)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_infer_backward():\n expect_output = np.array([[[[-0.3224156, -0.3840524], [1.1337637, -1.0998858]],\n [[-0.1724273, -0.877854], [0.0422135, 0.5828123]],\n [[-1.1006137, 1.1447179], [0.9015862, 0.5024918]]]]).astype(np.float32)\n np.random.seed(1)\n x_np = np.random.randn(1, 3, 2, 2).astype(np.float32)\n input_grad_np = np.random.randn(1, 3, 2, 2).astype(np.float32)\n ms_input = Tensor(x_np)\n weight = Tensor(np.ones(3).astype(np.float32))\n bias = Tensor(np.zeros(3).astype(np.float32))\n moving_mean = Tensor(np.zeros(3).astype(np.float32))\n moving_var_init = Tensor(np.ones(3).astype(np.float32))\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n ms_net = Batchnorm_Net(3, weight, bias, moving_mean, moving_var_init)\n ms_net.set_train(False)\n ms_grad = Grad(ms_net)\n ms_out_grad_np = ms_grad(ms_input, Tensor(input_grad_np))\n assert np.allclose(ms_out_grad_np[0].asnumpy(), expect_output)\n\n\nclass BatchNorm1d_Net(Cell):\n def __init__(self, affine=True, gamma_init='ones', beta_init='zeros', moving_mean_init='zeros',\n moving_var_init='ones', use_batch_statistics=None):\n super(BatchNorm1d_Net, self).__init__()\n self.bn1 = BatchNorm1d(2, eps=0.00001, momentum=0.1, affine=affine, gamma_init=gamma_init, beta_init=beta_init,\n moving_mean_init=moving_mean_init, moving_var_init=moving_var_init,\n use_batch_statistics=use_batch_statistics)\n\n def construct(self, x):\n x = self.bn1(x)\n return x\n\nclass GradByListNet(Cell):\n def __init__(self, network):\n super(GradByListNet, self).__init__()\n self.grad = C.GradOperation(get_all=True, sens_param=True, get_by_list=True)\n self.network = network\n self.params = ParameterTuple(network.trainable_params())\n\n def construct(self, x, dy):\n grad_op = self.grad(self.network, self.params)\n output = grad_op(x, dy)\n return output\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_1d_train():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n bn_net = BatchNorm1d_Net(use_batch_statistics=None)\n grad_net = GradByListNet(bn_net)\n optimizer = SGD(bn_net.trainable_params(), learning_rate=0.01, momentum=0.9)\n bn_net.set_train(True)\n\n x1 = np.array([[1.6243454, -0.6117564],\n [-0.5281718, -1.0729686],\n [0.86540765, -2.3015387],\n [1.7448118, -0.7612069],\n [0.3190391, -0.24937038]]).astype(np.float32)\n dy1 = np.array([[1.4621079, -2.0601406],\n [-0.3224172, -0.38405436],\n [1.1337694, -1.0998913],\n [-0.1724282, -0.8778584],\n [0.04221375, 0.58281523]]).astype(np.float32)\n x2 = np.array([[-0.19183555, -0.887629],\n [-0.7471583, 1.6924546],\n [0.05080776, -0.6369957],\n [0.19091548, 2.1002553],\n [0.12015896, 0.6172031]]).astype(np.float32)\n dy2 = np.array([[0.30017033, -0.35224986],\n [-1.1425182, -0.34934273],\n [-0.20889424, 0.5866232],\n [0.8389834, 0.9311021],\n [0.2855873, 0.8851412]]).astype(np.float32)\n x_train = [x1, x2]\n dy_train = [dy1, dy2]\n\n dx1 = np.array([[0.8120, -2.0371],\n [-0.2202, 0.5837],\n [0.8040, 0.1950],\n [-1.1823, -0.2786],\n [-0.2135, 1.5371]]).astype(np.float32)\n gamma1 = np.array([0.9821, 0.9873]).astype(np.float32)\n beta1 = np.array([-0.0214, 0.0384]).astype(np.float32)\n mean1 = np.array([0.7246, -0.8994]).astype(np.float32)\n variance1 = np.array([0.9036, 0.6559]).astype(np.float32)\n\n dx2 = np.array([[1.1955, -0.4247],\n [-0.2425, -0.6789],\n [-1.4563, 0.3237],\n [0.8752, 0.3351],\n [-0.3719, 0.4448]]).astype(np.float32)\n gamma2 = np.array([0.9370, 0.9687]).astype(np.float32)\n beta2 = np.array([-0.0415, 0.0559]).astype(np.float32)\n mean2 = np.array([-0.0314, 0.4294]).astype(np.float32)\n variance2 = np.array([0.2213, 1.6822]).astype(np.float32)\n\n exp_dx = [dx1, dx2]\n exp_gamma = [gamma1, gamma2]\n exp_beta = [beta1, beta2]\n exp_mean = [mean1, mean2]\n exp_variance = [variance1, variance2]\n\n for data in zip(x_train, dy_train, exp_dx, exp_gamma, exp_beta, exp_mean, exp_variance):\n output = grad_net(Tensor(data[0]), Tensor(data[1]))\n assert np.allclose(output[0][0].asnumpy(), data[2], atol=1.0e-4)\n optimizer(output[1])\n assert np.allclose(bn_net.bn1.gamma.asnumpy(), data[3], atol=1.0e-4)\n assert np.allclose(bn_net.bn1.beta.asnumpy(), data[4], atol=1.0e-4)\n assert np.allclose(bn_net.bn1.moving_mean.asnumpy(), data[5], atol=1.0e-4)\n assert np.allclose(bn_net.bn1.moving_variance.asnumpy(), data[6], atol=1.0e-4)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_1d_eval():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n gamma_init = Tensor(np.array([0.93700373, 0.96870345]).astype(np.float32))\n beta_init = Tensor(np.array([-0.04145495, 0.05593072]).astype(np.float32))\n mean_init = Tensor(np.array([-0.03142229, 0.4294087]).astype(np.float32))\n variance_init = Tensor(np.array([0.2212921, 1.6822311]).astype(np.float32))\n bn_net = BatchNorm1d_Net(affine=False, gamma_init=gamma_init, beta_init=beta_init, moving_mean_init=mean_init,\n moving_var_init=variance_init, use_batch_statistics=None)\n bn_net.set_train(False)\n\n x1 = np.array([[-1.1006192, 1.1447237],\n [0.9015907, 0.50249434],\n [0.90085596, -0.68372786],\n [-0.12289023, -0.93576944],\n [-0.26788807, 0.53035545]]).astype(np.float32)\n x2 = np.array([[-0.7543979, 1.2528682],\n [0.5129298, -0.29809284],\n [0.48851815, -0.07557172],\n [1.1316293, 1.5198169],\n [2.1855755, -1.3964963]]).astype(np.float32)\n x_test = [x1, x2]\n\n y1 = np.array([[-2.1711, 0.5902],\n [1.8169, 0.1105],\n [1.8155, -0.7754],\n [-0.2236, -0.9637],\n [-0.5125, 0.1313]]).astype(np.float32)\n y2 = np.array([[-1.4815, 0.6710],\n [1.0428, -0.4874],\n [0.9942, -0.3212],\n [2.2751, 0.8703],\n [4.3744, -1.3078]]).astype(np.float32)\n y_test = [y1, y2]\n\n for x, y in zip(x_test, y_test):\n y_pred = bn_net(Tensor(x))\n assert np.allclose(y_pred.asnumpy(), y, atol=1.0e-4)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport numpy as np\nimport pytest\nimport mindspore.dataset as ds\nimport mindspore.dataset.audio.transforms as audio\nfrom mindspore import log as logger\n\n\ndef count_unequal_element(data_expected, data_me, rtol, atol):\n assert data_expected.shape == data_me.shape\n total_count = len(data_expected.flatten())\n error = np.abs(data_expected - data_me)\n greater = np.greater(error, atol + np.abs(data_expected) * rtol)\n loss_count = np.count_nonzero(greater)\n assert (loss_count / total_count) < rtol, \"\\ndata_expected_std:{0}\\ndata_me_error:{1}\\nloss:{2}\".format(\n data_expected[greater], data_me[greater], error[greater])\n\n\ndef test_func_bandreject_biquad_eager():\n \"\"\" mindspore eager mode normal testcase:bandreject_biquad op\"\"\"\n\n # Original waveform\n waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)\n # Expect waveform\n expect_waveform = np.array([[9.802485108375549316e-01, 1.000000000000000000e+00, 1.000000000000000000e+00],\n [1.000000000000000000e+00, 1.000000000000000000e+00, 1.000000000000000000e+00]],\n dtype=np.float64)\n bandreject_biquad_op = audio.BandrejectBiquad(44100, 200.0, 0.707)\n # Filtered waveform by bandrejectbiquad\n output = bandreject_biquad_op(waveform)\n count_unequal_element(expect_waveform, output, 0.0001, 0.0001)\n\n\ndef test_func_bandreject_biquad_pipeline():\n \"\"\" mindspore pipeline mode normal testcase:bandreject_biquad op\"\"\"\n\n # Original waveform\n waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)\n # Expect waveform\n expect_waveform = np.array([[9.802485108375549316e-01, 1.000000000000000000e+00, 1.000000000000000000e+00],\n [1.000000000000000000e+00, 1.000000000000000000e+00, 1.000000000000000000e+00]],\n dtype=np.float64)\n label = np.random.sample((2, 1))\n data = (waveform, label)\n dataset = ds.NumpySlicesDataset(data, [\"channel\", \"sample\"], shuffle=False)\n bandreject_biquad_op = audio.BandrejectBiquad(44100, 200.0)\n # Filtered waveform by bandrejectbiquad\n dataset = dataset.map(\n input_columns=[\"channel\"], operations=bandreject_biquad_op, num_parallel_workers=8)\n i = 0\n for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):\n count_unequal_element(expect_waveform[i, :],\n item['channel'], 0.0001, 0.0001)\n i += 1\n\n\ndef test_bandreject_biquad_invalid_input():\n def test_invalid_input(test_name, sample_rate, central_freq, Q, error, error_msg):\n logger.info(\n \"Test BandrejectBiquad with bad input: {0}\".format(test_name))\n with pytest.raises(error) as error_info:\n audio.BandrejectBiquad(sample_rate, central_freq, Q)\n assert error_msg in str(error_info.value)\n\n test_invalid_input(\"invalid sample_rate parameter type as a float\", 44100.5, 200, 0.707, TypeError,\n \"Argument sample_rate with value 44100.5 is not of type [<class 'int'>],\"\n \" but got <class 'float'>.\")\n test_invalid_input(\"invalid sample_rate parameter type as a String\", \"44100\", 200, 0.707, TypeError,\n \"Argument sample_rate with value 44100 is not of type [<class 'int'>], but got <class 'str'>.\")\n test_invalid_input(\"invalid contral_freq parameter type as a String\", 44100, \"200\", 0.707, TypeError,\n \"Argument central_freq with value 200 is not of type [<class 'float'>, <class 'int'>],\"\n \" but got <class 'str'>.\")\n test_invalid_input(\"invalid sample_rate parameter value\", 0, 200, 0.707, ValueError,\n \"Input sample_rate is not within the required interval of [-2147483648, 0) and (0, 2147483647].\")\n test_invalid_input(\"invalid contral_freq parameter value\", 44100, 32434324324234321, 0.707, ValueError,\n \"Input central_freq is not within the required interval of [-16777216, 16777216].\")\n test_invalid_input(\"invalid Q parameter type as a String\", 44100, 200, \"0.707\", TypeError,\n \"Argument Q with value 0.707 is not of type [<class 'float'>, <class 'int'>],\"\n \" but got <class 'str'>.\")\n test_invalid_input(\"invalid Q parameter value\", 44100, 200, 1.707, ValueError,\n \"Input Q is not within the required interval of (0, 1].\")\n test_invalid_input(\"invalid Q parameter value\", 44100, 200, 0, ValueError,\n \"Input Q is not within the required interval of (0, 1].\")\n test_invalid_input(\"invalid sample_rate parameter value\", 441324343243242342345300, 200, 0.707, ValueError,\n \"Input sample_rate is not within the required interval of [-2147483648, 0) and (0, 2147483647].\")\n test_invalid_input(\"invalid sample_rate parameter value\", None, 200, 0.707, TypeError,\n \"Argument sample_rate with value None is not of type [<class 'int'>],\"\n \" but got <class 'NoneType'>.\")\n test_invalid_input(\"invalid central_rate parameter value\", 44100, None, 0.707, TypeError,\n \"Argument central_freq with value None is not of type [<class 'float'>, <class 'int'>],\"\n \" but got <class 'NoneType'>.\")\n\n\nif __name__ == \"__main__\":\n test_func_bandreject_biquad_eager()\n test_func_bandreject_biquad_pipeline()\n test_bandreject_biquad_invalid_input()\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport pytest\nfrom mindspore import context\nfrom mindspore import Tensor, nn\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import operations as P\nfrom mindspore.common import dtype as mstype\nfrom tests.security_utils import security_off_wrap\n\ngrad_all = C.GradOperation(get_all=True)\n\n\[email protected](reason=\"not supported for in while\")\ndef test_for_in_while_01():\n class ForInWhileNet(nn.Cell):\n def __init__(self):\n super().__init__()\n self.mul = P.Mul()\n self.add = P.Add()\n self.sub = P.Sub()\n self.assign = P.Assign()\n param_a = np.full((1,), 5, dtype=np.float32)\n self.param_a = Parameter(Tensor(param_a), name='a')\n param_b = np.full((1,), 2, dtype=np.float32)\n self.param_b = Parameter(Tensor(param_b), name='b')\n\n def construct(self, x):\n self.assign(self.param_a, x + self.param_a)\n while self.param_a > self.param_b:\n x = self.mul(x, 2)\n for _ in range(0, 5):\n x = self.add(x, x)\n self.param_b = self.param_b + 1\n y = self.sub(x, self.param_b)\n self.assign(self.param_a, y)\n return x\n\n class GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n\n def construct(self, *inputs):\n return grad_all(self.net)(*inputs)\n\n x = Tensor([2], mstype.int32)\n\n # graph mode\n context.set_context(mode=context.GRAPH_MODE)\n for_in_while_net = ForInWhileNet()\n backward_net = GradNet(for_in_while_net)\n\n forward_net = ForInWhileNet()\n graph_forward_res = forward_net(x)\n graph_backward_res = backward_net(x)\n\n expect_forward_res = 0\n expect_backward_res = 0\n assert graph_forward_res == expect_forward_res\n assert graph_backward_res == expect_backward_res\n\n\[email protected](reason=\"not supported for in while\")\n@security_off_wrap\ndef test_for_in_while_02():\n class ForInWhileNet(nn.Cell):\n def __init__(self):\n super().__init__()\n self.mul = P.Mul()\n self.add = P.Add()\n self.sub = P.Sub()\n self.assign = P.Assign()\n self.param_a = Parameter(Tensor(5, mstype.int32), name='a')\n self.param_b = Parameter(Tensor(7, mstype.int32), name='b')\n\n def construct(self, x):\n self.assign(self.param_a, x + self.param_a)\n while self.param_a > self.param_b:\n for _ in range(0, 3):\n x = self.add(x, self.param_a + self.param_b)\n self.assign(self.param_b, self.param_b + 1)\n y = self.sub(x, self.param_b)\n self.assign(self.param_a, y)\n return x\n\n class GradNet(nn.Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n\n def construct(self, *inputs):\n return grad_all(self.net)(*inputs)\n\n x = Tensor([2], mstype.int32)\n\n # graph mode\n context.set_context(mode=context.GRAPH_MODE)\n context.set_context(save_graphs=True)\n for_in_while_net = ForInWhileNet()\n net = GradNet(for_in_while_net)\n graph_forward_res = for_in_while_net(x)\n graph_backward_res = net(x)\n\n expect_forward_res = 0\n expect_backward_res = 0\n assert graph_forward_res == expect_forward_res\n assert graph_backward_res == expect_backward_res\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import context, Tensor, Parameter\nfrom mindspore.common.api import _cell_graph_executor\nfrom mindspore.nn import Cell, TrainOneStepCell, Momentum\nfrom mindspore.ops import operations as P\n\n\nclass Net(Cell):\n def __init__(self, mul_weight, batch_matmul_weight, transpose_b=False, strategy1=None, strategy2=None):\n super().__init__()\n self.mul = P.Mul().shard(strategy1)\n self.batch_matmul = P.BatchMatMul(transpose_b=transpose_b).shard(strategy2)\n self.mul_weight = Parameter(mul_weight, \"w1\")\n self.batch_matmul_weight = Parameter(batch_matmul_weight, \"w2\")\n\n def construct(self, x, b):\n out = self.mul(x, self.mul_weight)\n out = self.batch_matmul(out, self.batch_matmul_weight)\n return out\n\n\n_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)\n_w1 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)\n_w2 = Tensor(np.ones([128, 32, 32]), dtype=ms.float32)\n_b = Tensor(np.ones([128, 64, 16]), dtype=ms.float32)\n\n\ndef compile_net(net):\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_net = TrainOneStepCell(net, optimizer)\n train_net.set_auto_parallel()\n train_net.set_train()\n _cell_graph_executor.compile(train_net, _x, _b)\n context.reset_auto_parallel_context()\n\n\ndef test_batch_matmul_data_parallel():\n \"\"\"\n Feature: distribute operator batch_matmul in auto parallel.\n Description: mul-batch_matmul net with data parallel strategy in semi auto parallel.\n Expectation: compile done without error.\n \"\"\"\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((16, 1, 1), (16, 1, 1))\n strategy2 = ((16, 1, 1), (16, 1, 1))\n net = Net(_w1, _w2, False, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_batch_matmul_model_parallel():\n \"\"\"\n Feature: distribute operator batch_matmul in auto parallel.\n Description: mul-batch_matmul net with model parallel strategy in semi auto parallel.\n Expectation: compile done without error.\n \"\"\"\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((1, 1, 1), (1, 1, 1))\n strategy2 = ((1, 1, 1), (1, 1, 16))\n net = Net(_w1, _w2, False, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_batch_matmul_hybrid_parallel():\n \"\"\"\n Feature: distribute operator batch_matmul in auto parallel.\n Description: mul-batch_matmul net with mixed strategy in semi auto parallel.\n Expectation: compile done without error.\n \"\"\"\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((2, 2, 2), (2, 2, 2))\n strategy2 = ((2, 2, 2), (2, 2, 2))\n net = Net(_w1, _w2, False, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_batch_matmul_auto_parallel():\n \"\"\"\n Feature: distribute operator batch_matmul in auto parallel.\n Description: mul-batch_matmul net in auto parallel.\n Expectation: compile done without error.\n \"\"\"\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=16, global_rank=0)\n net = Net(_w1, _w2, False)\n compile_net(net)\n\n\ndef test_batch_matmul_repeat_calc():\n \"\"\"\n Feature: distribute operator batch_matmul in auto parallel.\n Description: mul-batch_matmul net with repeated strategy in semi auto parallel.\n Expectation: compile done without error.\n \"\"\"\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((2, 2, 4), (2, 2, 4))\n strategy2 = ((1, 2, 2), (1, 2, 2))\n net = Net(_w1, _w2, False, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_batch_matmul_transpose_b():\n \"\"\"\n Feature: distribute operator batch_matmul in auto parallel.\n Description: mul-batch_matmul net with strategy in semi auto parallel, transpose_b.\n Expectation: compile done without error.\n \"\"\"\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((2, 2, 4), (2, 2, 4))\n strategy2 = ((1, 2, 2), (1, 2, 2))\n net = Net(_w1, _w2, True, strategy1, strategy2)\n compile_net(net)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport mindspore.context as context\r\nimport mindspore.nn as nn\r\nfrom mindspore import Tensor, Parameter\r\nfrom mindspore.ops import operations as P\r\n\r\n\r\nclass AssignAdd(nn.Cell):\r\n def __init__(self, value):\r\n super(AssignAdd, self).__init__()\r\n self.var = Parameter(value, name=\"var\")\r\n self.add = P.AssignAdd()\r\n\r\n def construct(self, y):\r\n self.add(self.var, y)\r\n return self.var\r\n\r\ndef get_output(x2, y2, enable_graph_kernel=False):\r\n context.set_context(enable_graph_kernel=enable_graph_kernel)\r\n add = AssignAdd(x2)\r\n result_gk_on_1 = add(y2)\r\n add_2 = AssignAdd(result_gk_on_1)\r\n result_gk_on_2 = add_2(y2)\r\n output = [result_gk_on_1, result_gk_on_2]\r\n return output\r\n\r\ndef assign_add():\r\n x2 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))\r\n y2 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))\r\n\r\n expect = get_output(x2, y2, False)\r\n output = get_output(x2, y2, True)\r\n e1, e2 = list(expect)\r\n o1, o2 = list(output)\r\n\r\n assert np.allclose(o1.asnumpy(), e1.asnumpy())\r\n assert np.allclose(o2.asnumpy(), e2.asnumpy())\r\n\r\[email protected]\r\[email protected]_x86_gpu_training\r\[email protected]_onecard\r\ndef test_assign_add_gpu():\r\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\r\n assign_add()\r\n\r\[email protected]\r\[email protected]_arm_ascend_training\r\[email protected]_x86_ascend_training\r\[email protected]_onecard\r\ndef test_assign_add_ascend():\r\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\r\n assign_add()\r\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport mindspore as ms\nimport mindspore.context as context\nfrom mindspore import Tensor, Parameter\nimport mindspore.nn as nn\nfrom mindspore.common.api import _cell_graph_executor\nfrom mindspore.nn import TrainOneStepCell, Momentum\nfrom mindspore.ops import operations as P\n\nclass Net(nn.Cell):\n def __init__(self, wi, wo, stra1=None, stra2=None, stra3=None, stra4=None,\n stra5=None, stra6=None):\n super(Net, self).__init__()\n self.relu = P.ReLU().shard(stra1)\n self.transpose = P.Transpose().shard(stra2)\n self.wi = Parameter(wi, \"wi\")\n self.batch_mm = P.BatchMatMul().shard(stra3)\n self.wo = Parameter(wo, \"wo\")\n self.batch_mm2 = P.BatchMatMul().shard(stra4)\n self.transpose2 = P.Transpose().shard(stra5)\n self.relu2 = P.ReLU().shard(stra6)\n self.reshape = P.Reshape()\n self.reshape2 = P.Reshape()\n\n def construct(self, x):\n output = self.relu(x)\n trans_out = self.transpose(output, (2, 0, 3, 1))\n output = self.reshape(trans_out,\n (trans_out.shape[0], trans_out.shape[1]*trans_out.shape[2], trans_out.shape[3]))\n output = self.batch_mm(output, self.wi)\n output = self.batch_mm2(output, self.wo)\n output = self.reshape2(output, trans_out.shape)\n output = self.transpose2(output, (1, 3, 0, 2))\n output = self.relu2(output)\n return output\n\n_x = Tensor(np.ones([32, 16, 48, 128]), dtype=ms.float32)\n_wi = Tensor(np.ones([48, 16, 64]), dtype=ms.float32)\n_wo = Tensor(np.ones([48, 64, 16]), dtype=ms.float32)\n\n\ndef compile_net(net):\n context.set_context(mode=context.GRAPH_MODE)\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_net = TrainOneStepCell(net, optimizer)\n train_net.set_auto_parallel()\n train_net.set_train()\n _cell_graph_executor.compile(train_net, _x)\n context.reset_auto_parallel_context()\n\n\ndef test_batchmm():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, enable_alltoall=True,\n global_rank=0)\n stra1 = ((8, 1, 1, 1),)\n stra2 = ((8, 1, 1, 1),)\n stra3 = ((8, 1, 1), (8, 1, 1))\n stra4 = ((8, 1, 1), (8, 1, 1))\n stra5 = ((8, 1, 1, 1),)\n stra6 = ((8, 1, 1, 1),)\n net = Net(_wi, _wo, stra1=stra1, stra2=stra2, stra3=stra3, stra4=stra4, stra5=stra5, stra6=stra6)\n compile_net(net)\n\n\ndef test_batchmm2():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", enable_alltoall=True,\n device_num=32, global_rank=0)\n stra1 = ((4, 1, 1, 1),)\n stra2 = ((4, 1, 1, 1),)\n stra3 = ((4, 1, 1), (4, 1, 8))\n stra4 = ((4, 1, 8), (4, 8, 1))\n stra5 = ((4, 1, 1, 1),)\n stra6 = ((4, 1, 1, 1),)\n net = Net(_wi, _wo, stra1=stra1, stra2=stra2, stra3=stra3, stra4=stra4, stra5=stra5, stra6=stra6)\n compile_net(net)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport platform\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\n\nclass RCWM_count_in(nn.Cell):\n def __init__(self):\n super(RCWM_count_in, self).__init__()\n self.RCWM_count_in = P.RandomChoiceWithMask(count=4, seed=1)\n\n def construct(self, x):\n return self.RCWM_count_in(x)\n\n\nclass RCWM_count_out(nn.Cell):\n def __init__(self):\n super(RCWM_count_out, self).__init__()\n self.RCWM_count_out = P.RandomChoiceWithMask(count=10, seed=1)\n\n def construct(self, x):\n return self.RCWM_count_out(x)\n\n\nclass RCWM_3D(nn.Cell):\n def __init__(self):\n super(RCWM_3D, self).__init__()\n self.RCWM_3D = P.RandomChoiceWithMask(count=10, seed=1)\n\n def construct(self, x):\n return self.RCWM_3D(x)\n\n\nclass RCWM_1D(nn.Cell):\n def __init__(self):\n super(RCWM_1D, self).__init__()\n self.RCWM_1D = P.RandomChoiceWithMask(count=10, seed=9)\n\n def construct(self, x):\n return self.RCWM_1D(x)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_RCWM_3D():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n input_tensor = Tensor(np.ones([3, 4, 5]).astype(np.bool))\n expect1 = (10, 3)\n expect2 = (10,)\n rcwm = RCWM_3D()\n output1, output2 = rcwm(input_tensor)\n assert output1.shape == expect1\n assert output2.shape == expect2\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_RCWM_count_out():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n input_tensor = Tensor(np.array([[1, 0, 1, 0], [0, 0, 0, 1], [1, 1, 1, 1],\n [0, 0, 0, 1]]).astype(np.bool))\n expect1 = (10, 2)\n expect2 = (10,)\n rcwm = RCWM_count_out()\n output1, output2 = rcwm(input_tensor)\n assert output1.shape == expect1\n assert output2.shape == expect2\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_RCWM_count_in():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n input_tensor = Tensor(np.array([[1, 0, 1, 0], [0, 0, 0, 1], [1, 1, 1, 1],\n [0, 0, 0, 1]]).astype(np.bool))\n expect1 = (4, 2)\n expect2 = (4,)\n rcwm = RCWM_count_in()\n output1, output2 = rcwm(input_tensor)\n assert output1.shape == expect1\n assert output2.shape == expect2\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_RCWM_1D():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n input_tensor = Tensor(\n np.array([1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1]).astype(np.bool))\n expect_index = np.array([[11], [0], [8], [2], [9], [7],\n [10], [15], [0], [0]]).astype(np.int32)\n expect_index_mac = np.array([[11], [7], [9], [15], [2], [10],\n [8], [0], [0], [0]]).astype(np.int32)\n expect_mask = np.array(\n [True, True, True, True, True, True, True, True, False, False])\n rcwm = RCWM_1D()\n output1, output2 = rcwm(input_tensor)\n print(output1.asnumpy())\n print(output2)\n if platform.system().lower() == \"darwin\":\n assert np.array_equal(output1.asnumpy(), expect_index_mac)\n else:\n assert np.array_equal(output1.asnumpy(), expect_index)\n assert np.array_equal(output2.asnumpy(), expect_mask)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nfrom mindspore import ops, nn, ParameterTuple, context, set_seed\nfrom mindspore.train import DatasetHelper, connect_network_with_dataset\nimport mindspore.dataset as ds\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\nset_seed(2)\n\n\ndef _exec_preprocess(network, is_train, dataset, dataset_sink_mode, epoch_num, sink_size):\n if dataset_sink_mode and not is_train:\n dataset.__loop_size__ = 1\n\n dataset_helper = DatasetHelper(dataset, dataset_sink_mode, sink_size, epoch_num)\n\n if dataset_sink_mode:\n network = connect_network_with_dataset(network, dataset_helper)\n\n return dataset_helper, network\n\n\ndef dynamic_shape_sink_process(network, dataset, is_train=True):\n # epoch_num=1 sink_size=1: exec one step\n dataset_sink_mode = True\n sink_size = 1\n epoch_num = 1\n dataset_helper, network = _exec_preprocess(network, is_train, dataset, dataset_sink_mode, epoch_num, sink_size)\n network.set_train(is_train)\n for inputs in dataset_helper:\n outputs = network(*inputs)\n return outputs\n\n\ndef fixed_shape_process(network, dataset, is_train=True):\n network.set_train(is_train)\n for inputs in dataset.create_tuple_iterator():\n outputs = network(*inputs)\n return outputs\n\n\ndef dataset_generator(data_list):\n for data in data_list:\n yield data\n\n\nclass GradNetWrtX(nn.Cell):\n def __init__(self, net):\n super(GradNetWrtX, self).__init__()\n self.net = net\n self.grad_op = ops.GradOperation(get_all=True, get_by_list=True, sens_param=True)\n self.params = ParameterTuple(net.trainable_params())\n\n def construct(self, *inputs):\n gradient_function = self.grad_op(self.net, self.params)\n return gradient_function(*inputs)\n\n\nclass LayerNormNet(nn.Cell):\n def __init__(self, last_dim):\n super(LayerNormNet, self).__init__()\n self.layernorm = nn.LayerNorm([last_dim])\n\n def construct(self, x):\n return self.layernorm(x)\n\n\nclass Conv2dNet(nn.Cell):\n def __init__(self):\n super(Conv2dNet, self).__init__()\n self.conv = nn.Conv2d(3, 10, 4, pad_mode=\"valid\", has_bias=False, weight_init='normal')\n\n def construct(self, x):\n return self.conv(x)\n\n\nclass DropoutNet(nn.Cell):\n def __init__(self):\n super(DropoutNet, self).__init__()\n self.drop = nn.Dropout(0.5)\n self.relu = ops.ReLU()\n\n def construct(self, x):\n x = self.relu(x)\n return self.relu(self.drop(x))\n\n\nclass ReduceSumNet(nn.Cell):\n def __init__(self, axis=()):\n super(ReduceSumNet, self).__init__()\n self.reduce = ops.ReduceSum()\n self.axis = axis\n\n def construct(self, x):\n return self.reduce(x, self.axis)\n\n\nclass AddNet(nn.Cell):\n def construct(self, x, y):\n return ops.add(x, y)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_layernorm():\n \"\"\"\n Feature: Test LayerNorm and its backward. The input shape is dynamic.\n Description: The second dim of input is unknown.\n Expectation: Assert that results are consistent with fixed shape.\n \"\"\"\n last_dim = 32\n batch_size = 16\n data_list = []\n for i in range(20, 23):\n data_list.append((np.random.rand(batch_size, i, last_dim).astype(np.float32),\n np.random.rand(batch_size, i, last_dim).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\"])\n dataset.set_dynamic_columns(columns={\"data1\": [batch_size, None, last_dim], \"data2\": [batch_size, None, last_dim]})\n\n net = GradNetWrtX(LayerNormNet(last_dim))\n\n gradients = dynamic_shape_sink_process(net, dataset)\n gradients_cmp = fixed_shape_process(net, dataset)\n assert np.allclose(gradients[0][0].asnumpy(), gradients_cmp[0][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n assert np.allclose(gradients[1][0].asnumpy(), gradients_cmp[1][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n assert np.allclose(gradients[1][1].asnumpy(), gradients_cmp[1][1].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_conv2d():\n \"\"\"\n Feature: Test Conv2d and its backward. The input shape is dynamic.\n Description: Input dim of `H `or `W` is unknown. Conv2d's attr[pad] set to \"valid\".\n Expectation: Assert that results are consistent with fixed shape.\n \"\"\"\n batch_size = 16\n data_list = []\n for i in range(220, 224):\n data_list.append((np.random.rand(batch_size, 3, i, 112).astype(np.float32),\n np.random.rand(batch_size, 10, 219, 109).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\"])\n dataset.set_dynamic_columns(columns={\"data1\": [batch_size, 3, None, 112], \"data2\": [batch_size, 10, None, 109]})\n net = GradNetWrtX(Conv2dNet())\n gradients = dynamic_shape_sink_process(net, dataset)\n gradients_cmp = fixed_shape_process(net, dataset)\n assert np.allclose(gradients[0][0].asnumpy(), gradients_cmp[0][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n assert np.allclose(gradients[1][0].asnumpy(), gradients_cmp[1][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_dropout():\n \"\"\"\n Feature: Test Dropout and its backward.\n Description: The input shape is dynamic.\n Expectation: Dropout result is random, assert gradient shape.\n \"\"\"\n batch_size = 16\n data_list = []\n for i in range(48, 50):\n data_list.append((np.random.rand(batch_size, i, 256).astype(np.float32),\n np.random.rand(batch_size, i, 256).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\"])\n dataset.set_dynamic_columns(columns={\"data1\": [batch_size, None, 256], \"data2\": [batch_size, None, 256]})\n net = GradNetWrtX(DropoutNet())\n net.set_train()\n gradients = dynamic_shape_sink_process(net, dataset)\n assert gradients[0][0].shape == (batch_size, 49, 256)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_reducesum1():\n \"\"\"\n Feature: Test ReduceSum and its backward. The input shape is dynamic.\n Description: axis=(), result of reduce sum is a scalar, gradient shape is the same as input, value is all one.\n Expectation: Assert gradient shape.\n \"\"\"\n batch_size = 16\n data_list = []\n for i in range(48, 50):\n data_list.append((np.random.rand(batch_size, i, i + 2).astype(np.float32),\n np.array(1).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\"])\n dataset.set_dynamic_columns(columns={\"data1\": [batch_size, None, None], \"data2\": []})\n net = GradNetWrtX(ReduceSumNet())\n\n gradients = dynamic_shape_sink_process(net, dataset)\n assert gradients[0][0].shape == (batch_size, 49, 51)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_reducesum2():\n \"\"\"\n Feature: Test ReduceSum and its backward. The input shape is dynamic.\n Description: axis is a scalar, not tuple.\n Expectation: Assert that results are consistent with fixed shape.\n \"\"\"\n batch_size = 16\n data_list = []\n for i in range(48, 50):\n data_list.append((np.random.rand(batch_size, i, i + 2).astype(np.float32),\n np.random.rand(batch_size, i + 2).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\"])\n dataset.set_dynamic_columns(columns={\"data1\": [batch_size, None, None], \"data2\": [batch_size, None]})\n net = GradNetWrtX(ReduceSumNet(1))\n\n gradients = dynamic_shape_sink_process(net, dataset)\n gradients_cmp = fixed_shape_process(net, dataset)\n assert np.allclose(gradients[0][0].asnumpy(), gradients_cmp[0][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_add1():\n \"\"\"\n Feature: Test Add and its backward. The input shape is dynamic.\n Description: Second input is a scalar. Shape of forward result is the same as first input.\n Expectation: Assert that results are consistent with fixed shape.\n \"\"\"\n batch_size = 16\n data_list = []\n for i in range(48, 50):\n data_list.append((np.random.rand(batch_size, i).astype(np.float32),\n np.array(1).astype(np.float32),\n np.random.rand(batch_size, i).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\", \"data3\"])\n dataset.set_dynamic_columns(columns={\"data1\": [batch_size, None], \"data2\": [], \"data3\": [batch_size, None]})\n net = GradNetWrtX(AddNet())\n\n gradients = dynamic_shape_sink_process(net, dataset)\n gradients_cmp = fixed_shape_process(net, dataset)\n assert np.allclose(gradients[0][0].asnumpy(), gradients_cmp[0][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n assert np.allclose(gradients[0][1].asnumpy(), gradients_cmp[0][1].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_dynamic_add2():\n \"\"\"\n Feature: Test Add and its backward. The input shape is dynamic.\n Description: Shape of forward result is the same as first input. The axis of reduce_sum in add's bprop will be a\n empty Tensor.\n Expectation: Assert that results are consistent with fixed shape.\n \"\"\"\n batch_size = 16\n data_list = []\n for i in range(48, 50):\n data_list.append((np.random.rand(batch_size, 2, i).astype(np.float32),\n np.random.rand(2, i).astype(np.float32),\n np.random.rand(batch_size, 2, i).astype(np.float32)))\n\n dataset = ds.GeneratorDataset(data_list, [\"data1\", \"data2\", \"data3\"])\n dataset.set_dynamic_columns(columns=\n {\"data1\": [batch_size, 2, None], \"data2\": [2, None], \"data3\": [batch_size, 2, None]})\n net = GradNetWrtX(AddNet())\n\n gradients = dynamic_shape_sink_process(net, dataset)\n gradients_cmp = fixed_shape_process(net, dataset)\n assert np.allclose(gradients[0][0].asnumpy(), gradients_cmp[0][0].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n assert np.allclose(gradients[0][1].asnumpy(), gradients_cmp[0][1].asnumpy(), rtol=1.0e-4, atol=1.0e-4)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.ops import operations as P\nimport mindspore.common.dtype as mstype\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n\nvar_np = np.random.rand(3, 3).astype(np.float32)\naccum_np = np.random.rand(3, 3).astype(np.float32)\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.apply_adagrad = P.ApplyAdagrad()\n self.var = Parameter(Tensor(var_np), name=\"var\")\n self.accum = Parameter(Tensor(accum_np), name=\"accum\")\n\n def construct(self, lr, grad):\n z = self.apply_adagrad(self.var, self.accum, lr, grad)\n return z\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_apply_adagrad():\n # numpy op\n grident_np = np.random.rand(3, 3).astype(np.float32)\n expect_accum_np = accum_np + grident_np * grident_np\n expect_var_np = var_np - (0.001 * grident_np * (1 / np.sqrt(expect_accum_np + 1e-6)))\n\n net = Net()\n lr = Tensor(0.001, mstype.float32)\n grad = Tensor(grident_np)\n out = net(lr, grad)\n res_var_mindspore = out[0].asnumpy()\n res_accum_mindspore = out[1].asnumpy()\n eps = np.array([1e-6 for i in range(9)]).reshape(3, 3)\n\n assert np.all(expect_var_np - res_var_mindspore < eps)\n assert np.all(expect_accum_np - res_accum_mindspore < eps)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.communication.management import init, get_rank, get_group_size\nfrom mindspore.ops import operations as P\nimport mindspore as ms\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='GPU')\n\ninit()\nrank = get_rank()\nsize = get_group_size()\n\nx = np.asarray([1, 1, 1, 1, 1, 1, 1, 1]).astype(np.float32) * rank\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.neighborexchange = P.comm_ops.NeighborExchange(\n send_rank_ids=[(rank - 1) % 8],\n recv_rank_ids=[(rank + 1) % 8],\n recv_shapes=tuple([[8]]),\n send_shapes=tuple([[8]]),\n recv_type=ms.float32,\n group=\"nccl_world_group\")\n\n def construct(self, inputs):\n return self.neighborexchange(inputs)\n\n\ndef test_neighborexchange():\n \"\"\"\n Feature: NeighborExchange operator on GPU\n Description: for each device, send to previous rank and receive from next rank.\n example: rank 0 send to rank 7 and receive from rank 1.\n Expectation: on rank i, result == [1 ,1 ,1, 1, 1, 1, 1, 1] * ((i + 1) % 8)\n \"\"\"\n neighborexchange = Net()\n expect0 = np.asarray([1, 1, 1, 1, 1, 1, 1, 1]).astype(\n np.float32) * ((rank + 1) % 8)\n inputs = []\n inputs.append(Tensor(x))\n inputs = tuple(inputs)\n output0 = neighborexchange(inputs)[0].asnumpy()\n diff0 = output0 - expect0\n error0 = np.ones(shape=expect0.shape) * 1.0e-5\n assert np.all(diff0 < error0)\n assert output0.shape == expect0.shape\n", "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nCifar10 reader class.\n\"\"\"\nimport builtins\nimport io\nimport pickle\nimport re\nimport os\nimport numpy as np\n\nfrom ..shardutils import check_filename\n\n__all__ = ['Cifar10']\n\nsafe_builtins = {\n 'range',\n 'complex',\n 'set',\n 'frozenset',\n 'slice',\n}\n\n\nclass RestrictedUnpickler(pickle.Unpickler):\n \"\"\"\n Unpickle allowing only few safe classes from the builtins module or numpy\n\n Raises:\n pickle.UnpicklingError: If there is a problem unpickling an object\n \"\"\"\n def find_class(self, module, name):\n # Only allow safe classes from builtins and numpy\n if module == \"builtins\" and name in safe_builtins:\n return getattr(builtins, name)\n if module == \"numpy.core.multiarray\" and name == \"_reconstruct\":\n return getattr(np.core.multiarray, name)\n if module == \"numpy\":\n return getattr(np, name)\n # Forbid everything else.\n raise pickle.UnpicklingError(\"global '%s.%s' is forbidden\" % (module, name))\n\n\n\ndef restricted_loads(s):\n \"\"\"Helper function analogous to pickle.loads().\"\"\"\n if isinstance(s, str):\n raise TypeError(\"can not load pickle from unicode string\")\n f = io.BytesIO(s)\n try:\n return RestrictedUnpickler(f, encoding='bytes').load()\n except pickle.UnpicklingError:\n raise RuntimeError(\"Not a valid Cifar10 Dataset.\")\n except UnicodeDecodeError:\n raise RuntimeError(\"Not a valid Cifar10 Dataset.\")\n except Exception:\n raise RuntimeError(\"Unexpected error while Unpickling Cifar10 Dataset.\")\n\n\nclass Cifar10:\n \"\"\"\n Class to convert cifar10 to MindRecord.\n\n Args:\n path (str): cifar10 directory which contain data_batch_* and test_batch.\n one_hot (bool): one_hot flag.\n \"\"\"\n class Test:\n pass\n\n def __init__(self, path, one_hot=True):\n check_filename(path)\n self.path = path\n if not isinstance(one_hot, bool):\n raise ValueError(\"The parameter one_hot must be bool\")\n self.one_hot = one_hot\n self.images = None\n self.labels = None\n\n def load_data(self):\n \"\"\"\n Returns a list which contain data & label, test & label.\n\n Returns:\n list, train images, train labels and test images, test labels\n \"\"\"\n dic = {}\n images = np.zeros([10000, 3, 32, 32])\n labels = []\n files = os.listdir(self.path)\n for file in files:\n if re.match(\"data_batch_*\", file):\n real_file_path = os.path.realpath(self.path)\n with open(os.path.join(real_file_path, file), 'rb') as f: # load train data\n dic = restricted_loads(f.read())\n images = np.r_[images, dic[b\"data\"].reshape([-1, 3, 32, 32])]\n labels.append(dic[b\"labels\"])\n elif re.match(\"test_batch\", file): # load test data\n real_file_path = os.path.realpath(self.path)\n with open(os.path.join(real_file_path, file), 'rb') as f:\n dic = restricted_loads(f.read())\n test_images = np.array(dic[b\"data\"].reshape([-1, 3, 32, 32]))\n test_labels = np.array(dic[b\"labels\"])\n dic[\"train_images\"] = images[10000:].transpose(0, 2, 3, 1)\n dic[\"train_labels\"] = np.array(labels).reshape([-1, 1])\n dic[\"test_images\"] = test_images.transpose(0, 2, 3, 1)\n dic[\"test_labels\"] = test_labels.reshape([-1, 1])\n if self.one_hot:\n dic[\"train_labels\"] = self._one_hot(dic[\"train_labels\"], 10)\n dic[\"test_labels\"] = self._one_hot(dic[\"test_labels\"], 10)\n\n self.images, self.labels = dic[\"train_images\"], dic[\"train_labels\"]\n self.Test.images, self.Test.labels = dic[\"test_images\"], dic[\"test_labels\"]\n return [dic[\"train_images\"], dic[\"train_labels\"], dic[\"test_images\"], dic[\"test_labels\"]]\n\n def _one_hot(self, labels, num):\n \"\"\"\n Returns a numpy.\n\n Returns:\n Object, numpy array.\n \"\"\"\n size = labels.shape[0]\n label_one_hot = np.zeros([size, num])\n for i in range(size):\n label_one_hot[i, np.squeeze(labels[i])] = 1\n return label_one_hot\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\nimport mindspore.context as context\nfrom mindspore import Tensor\nfrom mindspore.nn import Cell\nimport mindspore.ops.operations as P\n\nclass Net(Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.identity = P.Identity()\n\n def construct(self, x):\n return self.identity(x)\n\ndef get_output(x, enable_graph_kernel=False):\n context.set_context(enable_graph_kernel=enable_graph_kernel)\n net = Net()\n output = net(x)\n return output\n\ndef test_basic(dtype):\n expect_np = np.random.normal(0, 10, (16, 32)).astype(dtype)\n x = Tensor(expect_np)\n output = get_output(x, True)\n output_np = output.asnumpy().copy()\n assert np.allclose(expect_np, output_np, 1.e-4, 1.e-7)\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_gpu_1():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n test_basic(np.float16)\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_gpu_2():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n test_basic(np.float32)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test cell \"\"\"\nimport copy\nimport numpy as np\nimport pytest\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.ops import operations as P\nfrom mindspore.common.api import _cell_graph_executor\n\n\nclass ModA(nn.Cell):\n def __init__(self, tensor):\n super(ModA, self).__init__()\n self.weight = Parameter(tensor, name=\"weight\")\n\n def construct(self, *inputs):\n pass\n\n\nclass ModB(nn.Cell):\n def __init__(self, tensor):\n super(ModB, self).__init__()\n self.weight = Parameter(tensor, name=\"weight\")\n\n def construct(self, *inputs):\n pass\n\n\nclass ModC(nn.Cell):\n def __init__(self, ta, tb):\n super(ModC, self).__init__()\n self.mod1 = ModA(ta)\n self.mod2 = ModB(tb)\n\n def construct(self, *inputs):\n pass\n\n\nclass Net(nn.Cell):\n \"\"\" Net definition \"\"\"\n name_len = 4\n cells_num = 3\n\n def __init__(self, ta, tb):\n super(Net, self).__init__()\n self.mod1 = ModA(ta)\n self.mod2 = ModB(tb)\n self.mod3 = ModC(ta, tb)\n\n def construct(self, *inputs):\n pass\n\n\nclass Net2(nn.Cell):\n def __init__(self, ta, tb):\n super(Net2, self).__init__(auto_prefix=False)\n self.mod1 = ModA(ta)\n self.mod2 = ModB(tb)\n self.mod3 = ModC(ta, tb)\n\n def construct(self, *inputs):\n pass\n\n\nclass ConvNet(nn.Cell):\n \"\"\" ConvNet definition \"\"\"\n image_h = 224\n image_w = 224\n output_ch = 64\n\n def __init__(self, num_classes=10):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(3, ConvNet.output_ch, kernel_size=7, stride=2, pad_mode=\"pad\", padding=3)\n self.bn1 = nn.BatchNorm2d(ConvNet.output_ch)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\")\n self.flatten = nn.Flatten()\n self.fc = nn.Dense(\n int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch / (4 * 4)),\n num_classes)\n\n def construct(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.flatten(x)\n x = self.fc(x)\n return x\n\n\ndef test_basic():\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n n = Net(ta, tb)\n names = list(n.parameters_dict().keys())\n assert len(names) == n.name_len\n assert names[0] == \"mod1.weight\"\n assert names[1] == \"mod2.weight\"\n assert names[2] == \"mod3.mod1.weight\"\n assert names[3] == \"mod3.mod2.weight\"\n\n\ndef test_parameter_name():\n \"\"\" test_parameter_name \"\"\"\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n n = Net(ta, tb)\n names = []\n for m in n.parameters_and_names():\n if m[0]:\n names.append(m[0])\n assert names[0] == \"mod1.weight\"\n assert names[1] == \"mod2.weight\"\n assert names[2] == \"mod3.mod1.weight\"\n assert names[3] == \"mod3.mod2.weight\"\n\n\ndef test_cell_name():\n \"\"\" test_cell_name \"\"\"\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n n = Net(ta, tb)\n n.insert_child_to_cell('modNone', None)\n names = []\n for m in n.cells_and_names():\n if m[0]:\n names.append(m[0])\n assert names[0] == \"mod1\"\n assert names[1] == \"mod2\"\n assert names[2] == \"mod3\"\n assert names[3] == \"mod3.mod1\"\n assert names[4] == \"mod3.mod2\"\n\n\ndef test_cells():\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n n = Net(ta, tb)\n ch = list(n.cells())\n assert len(ch) == n.cells_num\n\n\ndef test_exceptions():\n \"\"\" test_exceptions \"\"\"\n t = Tensor(np.ones([2, 3]))\n\n class ModError(nn.Cell):\n def __init__(self, tensor):\n self.weight = Parameter(tensor, name=\"weight\")\n super(ModError, self).__init__()\n\n def construct(self, *inputs):\n pass\n\n with pytest.raises(AttributeError):\n ModError(t)\n\n class ModError1(nn.Cell):\n def __init__(self, tensor):\n super().__init__()\n self.weight = Parameter(tensor, name=\"weight\")\n self.weight = None\n self.weight = ModA(tensor)\n\n def construct(self, *inputs):\n pass\n\n with pytest.raises(TypeError):\n ModError1(t)\n\n class ModError2(nn.Cell):\n def __init__(self, tensor):\n super().__init__()\n self.mod = ModA(tensor)\n self.mod = None\n self.mod = tensor\n\n def construct(self, *inputs):\n pass\n\n with pytest.raises(TypeError):\n ModError2(t)\n\n m = nn.Cell()\n assert m.construct() is None\n\n\ndef test_cell_copy():\n net = ConvNet()\n copy.deepcopy(net)\n\n\ndef test_del():\n \"\"\" test_del \"\"\"\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n n = Net(ta, tb)\n names = list(n.parameters_dict().keys())\n assert len(names) == n.name_len\n del n.mod1\n names = list(n.parameters_dict().keys())\n assert len(names) == n.name_len - 1\n with pytest.raises(AttributeError):\n del n.mod1.weight\n del n.mod2.weight\n names = list(n.parameters_dict().keys())\n assert len(names) == n.name_len - 2\n with pytest.raises(AttributeError):\n del n.mod\n\n\ndef test_add_attr():\n \"\"\" test_add_attr \"\"\"\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n p = Parameter(ta, name=\"weight\")\n m = nn.Cell()\n m.insert_param_to_cell('weight', p)\n\n with pytest.raises(TypeError):\n m.insert_child_to_cell(\"network\", p)\n\n with pytest.raises(KeyError):\n m.insert_param_to_cell('', p)\n with pytest.raises(KeyError):\n m.insert_param_to_cell('a.b', p)\n m.insert_param_to_cell('weight', p)\n with pytest.raises(KeyError):\n m.insert_child_to_cell('', ModA(ta))\n with pytest.raises(KeyError):\n m.insert_child_to_cell('a.b', ModB(tb))\n\n with pytest.raises(TypeError):\n m.insert_child_to_cell('buffer', tb)\n with pytest.raises(TypeError):\n m.insert_param_to_cell('w', ta)\n with pytest.raises(TypeError):\n m.insert_child_to_cell('m', p)\n\n class ModAddCellError(nn.Cell):\n def __init__(self, tensor):\n self.mod = ModA(tensor)\n super().__init__()\n\n def construct(self, *inputs):\n pass\n\n with pytest.raises(AttributeError):\n ModAddCellError(ta)\n\n\ndef test_train_eval():\n m = nn.Cell()\n assert not m.training\n m.set_train()\n assert m.training\n m.set_train(False)\n assert not m.training\n\n\ndef test_stop_update_name():\n ta = Tensor(np.ones([2, 3]))\n tb = Tensor(np.ones([1, 4]))\n n = Net2(ta, tb)\n names = list(n.parameters_dict().keys())\n assert names[0] == \"weight\"\n assert names[1] == \"mod1.weight\"\n assert names[2] == \"mod2.weight\"\n\n\nclass ModelName(nn.Cell):\n def __init__(self, tensor):\n super(ModelName, self).__init__()\n self.w2 = Parameter(tensor, name=\"weight\")\n self.w1 = Parameter(tensor, name=\"weight\")\n self.w3 = Parameter(tensor, name=None)\n self.w4 = Parameter(tensor, name=None)\n\n def construct(self, *inputs):\n pass\n\n\ndef test_cell_names():\n ta = Tensor(np.ones([2, 3]))\n mn = ModelName(ta)\n with pytest.raises(ValueError):\n _cell_graph_executor.compile(mn)\n\n\nclass TestKwargsNet(nn.Cell):\n def __init__(self):\n super(TestKwargsNet, self).__init__()\n\n def construct(self, p1, p2, p3=False, p4=False):\n if p3:\n return p1\n if p4:\n return P.Add()(p1, p2)\n return p2\n\ndef test_kwargs_default_value1():\n \"\"\"\n Feature: Supports Cell kwargs inputs.\n Description: Pass kwargs.\n Expectation: No exception.\n \"\"\"\n x = Tensor([[1], [2], [3]], ms.float32)\n y = Tensor([[4], [5], [6]], ms.float32)\n net = TestKwargsNet()\n res = net(x, y, p4=True)\n print(res)\n\n\ndef test_kwargs_default_value2():\n \"\"\"\n Feature: Supports Cell kwargs inputs.\n Description: Pass kwargs.\n Expectation: No exception.\n \"\"\"\n # Tensor(np.array([1, 2, 3, 4]), ms.float32).reshape((1, 1, 2, 2))\n x = Tensor([[[[1.0, 2.0], [3.0, 4.0]]]], ms.float32)\n nn_op = nn.ResizeBilinear()\n res = nn_op(x, (4, 4), align_corners=True)\n print(res)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"test callback function.\"\"\"\nimport os\nimport platform\nimport stat\nimport secrets\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\n\nimport mindspore.common.dtype as mstype\nimport mindspore.nn as nn\nfrom mindspore.common.api import ms_function\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.nn import TrainOneStepCell, WithLossCell\nfrom mindspore.nn.optim import Momentum\nfrom mindspore.train.callback import ModelCheckpoint, RunContext, LossMonitor, _InternalCallbackParam, \\\n _CallbackManager, Callback, CheckpointConfig, _set_cur_net, _checkpoint_cb_for_save_op\nfrom mindspore.train.callback._checkpoint import _chg_ckpt_file_name_if_same_exist\n\n\nclass Net(nn.Cell):\n \"\"\"Net definition.\"\"\"\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal')\n self.bn = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.flatten = nn.Flatten()\n self.fc = nn.Dense(64 * 222 * 222, 3)\n\n @ms_function\n def construct(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.flatten(x)\n out = self.fc(x)\n return out\n\n\nclass LossNet(nn.Cell):\n \"\"\" LossNet definition \"\"\"\n\n def __init__(self):\n super(LossNet, self).__init__()\n self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid')\n self.bn = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.flatten = nn.Flatten()\n self.fc = nn.Dense(64 * 222 * 222, 3) # padding=0\n self.loss = nn.SoftmaxCrossEntropyWithLogits()\n\n @ms_function\n def construct(self, x, y):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.flatten(x)\n x = self.fc(x)\n out = self.loss(x, y)\n return out\n\n\ndef test_model_checkpoint_prefix_invalid():\n \"\"\"Test ModelCheckpoint prefix invalid.\"\"\"\n with pytest.raises(ValueError):\n ModelCheckpoint(123)\n ModelCheckpoint(directory=\"./\")\n with pytest.raises(TypeError):\n ModelCheckpoint(config='type_error')\n ModelCheckpoint(config=CheckpointConfig())\n ModelCheckpoint(prefix=\"ckpt_2\", directory=\"./test_files\")\n\n\ndef test_loss_monitor_sink_mode():\n \"\"\"Test loss monitor sink mode.\"\"\"\n cb_params = _InternalCallbackParam()\n cb_params.cur_epoch_num = 4\n cb_params.epoch_num = 4\n cb_params.cur_step_num = 2\n cb_params.batch_num = 2\n cb_params.net_outputs = Tensor(2.0)\n run_context = RunContext(cb_params)\n loss_cb = LossMonitor(1)\n callbacks = [loss_cb]\n with _CallbackManager(callbacks) as callbacklist:\n callbacklist.begin(run_context)\n callbacklist.epoch_begin(run_context)\n callbacklist.step_begin(run_context)\n callbacklist.step_end(run_context)\n callbacklist.epoch_end(run_context)\n callbacklist.end(run_context)\n\n\ndef test_loss_monitor_normal_mode():\n \"\"\"Test loss monitor normal(non-sink) mode.\"\"\"\n cb_params = _InternalCallbackParam()\n run_context = RunContext(cb_params)\n loss_cb = LossMonitor(1)\n cb_params.cur_epoch_num = 4\n cb_params.epoch_num = 4\n cb_params.cur_step_num = 1\n cb_params.batch_num = 1\n cb_params.net_outputs = Tensor(2.0)\n loss_cb.begin(run_context)\n loss_cb.epoch_begin(run_context)\n loss_cb.step_begin(run_context)\n loss_cb.step_end(run_context)\n loss_cb.epoch_end(run_context)\n loss_cb.end(run_context)\n\n\ndef test_save_ckpt_and_test_chg_ckpt_file_name_if_same_exist():\n \"\"\"\n Feature: Save checkpoint and check if there is a file with the same name.\n Description: Save checkpoint and check if there is a file with the same name.\n Expectation: Checkpoint is saved and checking is successful.\n \"\"\"\n train_config = CheckpointConfig(\n save_checkpoint_steps=16,\n save_checkpoint_seconds=0,\n keep_checkpoint_max=5,\n keep_checkpoint_per_n_minutes=0)\n cb_params = _InternalCallbackParam()\n net = Net()\n loss = nn.SoftmaxCrossEntropyWithLogits()\n optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n network_ = WithLossCell(net, loss)\n _train_network = TrainOneStepCell(network_, optim)\n cb_params.train_network = _train_network\n cb_params.epoch_num = 10\n cb_params.cur_epoch_num = 5\n cb_params.cur_step_num = 0\n cb_params.batch_num = 32\n ckpoint_cb = ModelCheckpoint(prefix=\"test_ckpt\", directory='./test_files', config=train_config)\n run_context = RunContext(cb_params)\n ckpoint_cb.begin(run_context)\n ckpoint_cb.step_end(run_context)\n if os.path.exists('./test_files/test_ckpt-model.pkl'):\n os.chmod('./test_files/test_ckpt-model.pkl', stat.S_IWRITE)\n os.remove('./test_files/test_ckpt-model.pkl')\n _chg_ckpt_file_name_if_same_exist(directory=\"./test_files\", prefix=\"ckpt\")\n\n\ndef test_checkpoint_cb_for_save_op():\n \"\"\"Test checkpoint cb for save op.\"\"\"\n parameter_list = []\n one_param = {}\n one_param['name'] = \"conv1.weight\"\n one_param['data'] = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), dtype=mstype.float32)\n parameter_list.append(one_param)\n _checkpoint_cb_for_save_op(parameter_list)\n\n\ndef test_checkpoint_cb_for_save_op_update_net():\n \"\"\"Test checkpoint cb for save op.\"\"\"\n parameter_list = []\n one_param = {}\n one_param['name'] = \"conv.weight\"\n one_param['data'] = Tensor(np.ones(shape=(64, 3, 3, 3)), dtype=mstype.float32)\n parameter_list.append(one_param)\n net = Net()\n _set_cur_net(net)\n _checkpoint_cb_for_save_op(parameter_list)\n assert net.conv.weight.data.asnumpy()[0][0][0][0] == 1\n\n\ndef test_internal_callback_param():\n \"\"\"Test Internal CallbackParam.\"\"\"\n cb_params = _InternalCallbackParam()\n cb_params.member1 = 1\n cb_params.member2 = \"abc\"\n assert cb_params.member1 == 1\n assert cb_params.member2 == \"abc\"\n\n\ndef test_checkpoint_save_ckpt_steps():\n \"\"\"Test checkpoint save ckpt steps.\"\"\"\n train_config = CheckpointConfig(\n save_checkpoint_steps=16,\n save_checkpoint_seconds=0,\n keep_checkpoint_max=5,\n keep_checkpoint_per_n_minutes=0)\n ckpt_cb = ModelCheckpoint(config=train_config)\n cb_params = _InternalCallbackParam()\n net = Net()\n loss = nn.SoftmaxCrossEntropyWithLogits()\n optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n network_ = WithLossCell(net, loss)\n _train_network = TrainOneStepCell(network_, optim)\n cb_params.train_network = _train_network\n cb_params.epoch_num = 10\n cb_params.cur_epoch_num = 5\n cb_params.cur_step_num = 160\n cb_params.batch_num = 32\n run_context = RunContext(cb_params)\n ckpt_cb.begin(run_context)\n ckpt_cb.step_end(run_context)\n ckpt_cb2 = ModelCheckpoint(config=train_config)\n cb_params.cur_epoch_num = 1\n cb_params.cur_step_num = 15\n ckpt_cb2.begin(run_context)\n ckpt_cb2.step_end(run_context)\n\n\ndef test_checkpoint_save_ckpt_seconds():\n \"\"\"Test checkpoint save ckpt seconds.\"\"\"\n train_config = CheckpointConfig(\n save_checkpoint_steps=16,\n save_checkpoint_seconds=100,\n keep_checkpoint_max=0,\n keep_checkpoint_per_n_minutes=1)\n ckpt_cb = ModelCheckpoint(config=train_config)\n cb_params = _InternalCallbackParam()\n net = Net()\n loss = nn.SoftmaxCrossEntropyWithLogits()\n optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n network_ = WithLossCell(net, loss)\n _train_network = TrainOneStepCell(network_, optim)\n cb_params.train_network = _train_network\n cb_params.epoch_num = 10\n cb_params.cur_epoch_num = 4\n cb_params.cur_step_num = 128\n cb_params.batch_num = 32\n run_context = RunContext(cb_params)\n ckpt_cb.begin(run_context)\n ckpt_cb.step_end(run_context)\n ckpt_cb2 = ModelCheckpoint(config=train_config)\n cb_params.cur_epoch_num = 1\n cb_params.cur_step_num = 16\n ckpt_cb2.begin(run_context)\n ckpt_cb2.step_end(run_context)\n\n\ndef test_checkpoint_save_ckpt_with_encryption():\n \"\"\"Test checkpoint save ckpt with encryption.\"\"\"\n train_config = CheckpointConfig(\n save_checkpoint_steps=16,\n save_checkpoint_seconds=0,\n keep_checkpoint_max=5,\n keep_checkpoint_per_n_minutes=0,\n enc_key=secrets.token_bytes(16),\n enc_mode=\"AES-GCM\")\n ckpt_cb = ModelCheckpoint(config=train_config)\n cb_params = _InternalCallbackParam()\n net = Net()\n loss = nn.SoftmaxCrossEntropyWithLogits()\n optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n network_ = WithLossCell(net, loss)\n _train_network = TrainOneStepCell(network_, optim)\n cb_params.train_network = _train_network\n cb_params.epoch_num = 10\n cb_params.cur_epoch_num = 5\n cb_params.cur_step_num = 160\n cb_params.batch_num = 32\n run_context = RunContext(cb_params)\n ckpt_cb.begin(run_context)\n ckpt_cb.step_end(run_context)\n ckpt_cb2 = ModelCheckpoint(config=train_config)\n cb_params.cur_epoch_num = 1\n cb_params.cur_step_num = 15\n\n if platform.system().lower() == \"windows\":\n with pytest.raises(NotImplementedError):\n ckpt_cb2.begin(run_context)\n ckpt_cb2.step_end(run_context)\n else:\n ckpt_cb2.begin(run_context)\n ckpt_cb2.step_end(run_context)\n\n\ndef test_CallbackManager():\n \"\"\"TestCallbackManager.\"\"\"\n ck_obj = ModelCheckpoint()\n loss_cb_1 = LossMonitor(1)\n\n callbacks = [None]\n with pytest.raises(TypeError):\n _CallbackManager(callbacks)\n\n callbacks = ['Error']\n with pytest.raises(TypeError):\n _CallbackManager(callbacks)\n\n callbacks = [ck_obj, loss_cb_1, 'Error', None]\n with pytest.raises(TypeError):\n _CallbackManager(callbacks)\n\n\ndef test_CallbackManager_exit_called():\n with mock.patch.object(Callback, '__exit__', return_value=None) as mock_exit:\n cb1, cb2 = Callback(), Callback()\n with _CallbackManager([cb1, cb2]):\n pass\n for call_args in mock_exit.call_args_list:\n assert call_args == mock.call(mock.ANY, None, None, None)\n assert mock_exit.call_count == 2\n\n\ndef test_CallbackManager_exit_called_when_raises():\n with mock.patch.object(Callback, '__exit__', return_value=None) as mock_exit:\n cb1, cb2 = Callback(), Callback()\n with pytest.raises(ValueError):\n with _CallbackManager([cb1, cb2]):\n raise ValueError()\n for call_args in mock_exit.call_args_list:\n assert call_args == mock.call(*[mock.ANY] * 4)\n assert mock_exit.call_count == 2\n\n\ndef test_CallbackManager_begin_called():\n context = dict()\n with mock.patch.object(Callback, 'begin', return_value=None) as mock_begin:\n cb1, cb2 = Callback(), Callback()\n with _CallbackManager([cb1, cb2]) as cm:\n cm.begin(context)\n for call_args in mock_begin.call_args_list:\n assert call_args == mock.call(context)\n assert mock_begin.call_count == 2\n\n\ndef test_RunContext():\n \"\"\"Test RunContext.\"\"\"\n context_err = 666\n with pytest.raises(TypeError):\n RunContext(context_err)\n\n cb_params = _InternalCallbackParam()\n cb_params.member1 = 1\n cb_params.member2 = \"abc\"\n\n run_context = RunContext(cb_params)\n run_context.original_args()\n assert cb_params.member1 == 1\n assert cb_params.member2 == \"abc\"\n\n run_context.request_stop()\n should_stop = run_context.get_stop_requested()\n assert should_stop\n\n\ndef test_Checkpoint_Config():\n \"\"\"Test CheckpointConfig all None or 0.\"\"\"\n with pytest.raises(ValueError):\n CheckpointConfig(0, 0, 0, 0, True)\n\n with pytest.raises(ValueError):\n CheckpointConfig(0, None, 0, 0, True)\n\n\ndef test_step_end_save_graph():\n \"\"\"Test save checkpoint.\"\"\"\n train_config = CheckpointConfig(\n save_checkpoint_steps=16,\n save_checkpoint_seconds=0,\n keep_checkpoint_max=5,\n keep_checkpoint_per_n_minutes=0)\n cb_params = _InternalCallbackParam()\n net = LossNet()\n input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))\n input_label = Tensor(np.random.randint(0, 3, [1, 3]).astype(np.float32))\n net(input_data, input_label)\n cb_params.train_network = net\n cb_params.epoch_num = 10\n cb_params.cur_epoch_num = 5\n cb_params.cur_step_num = 0\n cb_params.batch_num = 32\n ckpoint_cb = ModelCheckpoint(prefix=\"test\", directory='./test_files', config=train_config)\n run_context = RunContext(cb_params)\n ckpoint_cb.begin(run_context)\n ckpoint_cb.step_end(run_context)\n assert os.path.exists('./test_files/test-graph.meta')\n if os.path.exists('./test_files/test-graph.meta'):\n os.chmod('./test_files/test-graph.meta', stat.S_IWRITE)\n os.remove('./test_files/test-graph.meta')\n ckpoint_cb.step_end(run_context)\n assert not os.path.exists('./test_files/test-graph.meta')\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"vgg_train_export.\"\"\"\n\nimport sys\nimport numpy as np\nfrom train_utils import save_inout, train_wrap\nfrom official.cv.vgg16.src.vgg import vgg16\nimport mindspore.common.dtype as mstype\nfrom mindspore import context, Tensor, nn\nfrom mindspore.train.serialization import export\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\", save_graphs=False)\n\nbatch = 2\n\nn = vgg16(num_classes=10)\nloss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)\noptimizer = nn.Momentum(n.trainable_params(), 0.01, 0.9, use_nesterov=False)\nnet = train_wrap(n, loss_fn, optimizer)\n\nx = Tensor(np.random.randn(batch, 3, 224, 224), mstype.float32)\nlabel = Tensor(np.zeros([batch, 10]).astype(np.float32))\nexport(net, x, label, file_name=\"mindir/vgg_train\", file_format='MINDIR')\n\nif len(sys.argv) > 1:\n save_inout(sys.argv[1] + \"vgg\", x, label, n, net)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport mindspore.context as context\nfrom mindspore import Tensor, ms_function\nfrom mindspore.common import dtype as mstype\nfrom mindspore import ops\nimport mindspore.nn as nn\nimport numpy as np\n\nZERO = Tensor([0], mstype.int32)\nONE = Tensor([1], mstype.int32)\n\n\n@ms_function\ndef f(x):\n y = ZERO\n if x < 0:\n y = f(x - 3)\n elif x < 3:\n y = x * f(x - 1)\n elif x < 5:\n y = x * f(x - 2)\n else:\n y = f(x - 4)\n z = y + 1\n return z\n\n\n@ms_function\ndef fr(x):\n y = ZERO\n if x < 0:\n y = ONE\n elif x < 3:\n y = x * fr(x - 1)\n elif x < 5:\n y = x * fr(x - 2)\n else:\n y = fr(x - 4)\n z = y + 1\n return z\n\n\n@ms_function\ndef f_pythonerr(x):\n if x > 0:\n return f_pythonerr(x - 1)\n return NOT_DEF\n\n\ndef test_python_error():\n context.set_context(mode=context.GRAPH_MODE)\n x = Tensor([5], mstype.int32)\n try:\n f_pythonerr(x)\n except NameError as e:\n assert 'not defined' in str(e)\n\n\n@ms_function\ndef f_recrusive_endless(x):\n if x > 0:\n return f_recrusive_endless(x - 1)\n return f_recrusive_endless(x + 1)\n\n\ndef test_recrusive_endless():\n context.set_context(mode=context.GRAPH_MODE)\n x = Tensor([5], mstype.int32)\n try:\n f_recrusive_endless(x)\n except RuntimeError as e:\n assert 'loop' in str(e)\n\n\ndef test_endless():\n context.set_context(mode=context.GRAPH_MODE)\n x = Tensor([5], mstype.int32)\n try:\n f(x)\n except RuntimeError as e:\n assert 'loop' in str(e)\n\n\n@ms_function\ndef f_ok(x):\n if x > 0:\n return f_ok(x - 1) + 1\n return ONE\n\n\ndef test_f_ok():\n context.set_context(mode=context.GRAPH_MODE)\n x = Tensor([3], mstype.int32)\n ret = f_ok(x)\n expect = Tensor([4], mstype.int32)\n assert ret == expect\n\n\ndef test_recrusive_fun():\n context.set_context(mode=context.GRAPH_MODE)\n x = Tensor([5], mstype.int32)\n ret = fr(x)\n expect = Tensor([3], mstype.int32)\n assert ret == expect\n\ndef test_branch_value_compatible():\n \"\"\"\n Feature: control flow\n Description: test branch value must be compatible with the other branch.\n Expectation: Join Failed\n \"\"\"\n class IfInWhileNet(nn.Cell):\n def __init__(self):\n super().__init__()\n self.expand_dims = ops.ExpandDims()\n\n def construct(self, x, y, i):\n out = x\n while i < 3:\n if x + i < y:\n out = out + x\n else:\n out = out + y\n out = out + 1\n out = self.expand_dims(out, -1)\n i = i + 1\n return out\n\n forward_net = IfInWhileNet()\n i = Tensor(np.array(0), dtype=mstype.int32)\n x = Tensor(np.array(0), dtype=mstype.int32)\n y = Tensor(np.array(1), dtype=mstype.int32)\n\n try:\n forward_net(x, y, i)\n except ValueError as e:\n assert 'Join Failed' in str(e)\n\nif __name__ == \"__main__\":\n test_endless()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n\n\nclass OpNetWrapper(nn.Cell):\n def __init__(self, op):\n super(OpNetWrapper, self).__init__()\n self.op = op\n\n def construct(self, *inputs):\n return self.op(*inputs)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_int32():\n op = P.Greater()\n op_wrapper = OpNetWrapper(op)\n\n input_x = Tensor(np.array([1, 2, 3]).astype(np.int32))\n input_y = Tensor(np.array([3, 2, 1]).astype(np.int32))\n outputs = op_wrapper(input_x, input_y)\n\n print(outputs)\n assert outputs.shape == (3,)\n assert np.allclose(outputs.asnumpy(), [False, False, True])\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_float32():\n op = P.Greater()\n op_wrapper = OpNetWrapper(op)\n\n input_x = Tensor(np.array([1, 2, -1]).astype(np.float32))\n input_y = Tensor(np.array([-3, 2, -1]).astype(np.float32))\n outputs = op_wrapper(input_x, input_y)\n\n print(outputs)\n assert outputs.shape == (3,)\n assert np.allclose(outputs.asnumpy(), [True, False, False])\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\ndef test_float64():\n \"\"\"\n Feature: ALL To ALL\n Description: test cases for Greater\n Expectation: the result match to numpy\n \"\"\"\n op = P.Greater()\n op_wrapper = OpNetWrapper(op)\n\n input_x = Tensor(np.array([1, 2, -1]).astype(np.float64))\n input_y = Tensor(np.array([-3, 2, -1]).astype(np.float64))\n outputs = op_wrapper(input_x, input_y)\n\n print(outputs)\n assert outputs.shape == (3,)\n assert np.allclose(outputs.asnumpy(), [True, False, False])\n\nif __name__ == '__main__':\n test_int32()\n test_float32()\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops.operations import _grad_ops as G\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\nnp.random.seed(0)\n\nclass LayerNormGradGradNet(nn.Cell):\n def __init__(self, begin_norm_axis, begin_params_axis):\n super(LayerNormGradGradNet, self).__init__()\n self.norm = G.LayerNormGradGrad(begin_norm_axis, begin_params_axis)\n\n def construct(self, x, dy, var, mean, gamma, grad_dx, grad_dg, grad_db):\n return self.norm(x, dy, var, mean, gamma, grad_dx, grad_dg, grad_db)\n\n\ndef LayerNormGradReference(x, dy, gamma, epsilon, begin_norm_axis, begin_params_axis):\n begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape)\n begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape)\n\n norm_axis = [i for i in range(begin_norm_axis, len(x.shape))]\n param_axis = [i for i in range(0, begin_params_axis)]\n num = 1\n for i in range(begin_norm_axis, len(x.shape)):\n num *= x.shape[i]\n\n mean = np.mean(x, axis=tuple(norm_axis), keepdims=True)\n var = np.var(x, axis=tuple(norm_axis), keepdims=True)\n\n gamma = gamma.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:]))\n dg = np.sum(dy * np.power(var + epsilon, -0.5) * (x - mean), axis=tuple(param_axis), keepdims=True)\n db = np.sum(dy, axis=tuple(param_axis), keepdims=True)\n\n sum1 = np.sum((-0.5) * dy * gamma * (x - mean) * np.power(var + epsilon, -1.5), axis=tuple(norm_axis),\n keepdims=True)\n sum2 = np.sum(dy * gamma, axis=tuple(norm_axis), keepdims=True)\n sum3 = np.sum(-2.0 * (x - mean), axis=tuple(norm_axis), keepdims=True)\n\n dx1 = dy * gamma * np.power(var + epsilon, -0.5)\n dx2 = sum1 * 2.0 / num * (x - mean)\n dx3 = ((-1.0) * np.power(var + epsilon, -0.5) * sum2 + (1.0 / num) * sum1 * sum3) * (1.0 / num)\n dx = dx1 + dx2 + dx3\n return dx, dg, db, mean, var\n\n\ndef LayerNormGradGradReference(x, dy, gamma, epsilon, grad_dx_np, grad_dg_np, grad_db_np, begin_norm_axis,\n begin_params_axis):\n begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape)\n begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape)\n\n norm_axis = tuple([i for i in range(begin_norm_axis, len(x.shape))])\n param_axis = [i for i in range(0, begin_params_axis)]\n num = 1\n for i in range(begin_norm_axis, len(x.shape)):\n num *= x.shape[i]\n\n mean = np.mean(x, tuple(norm_axis), keepdims=True)\n var = np.mean(np.power((x - mean), 2), tuple(norm_axis), keepdims=True)\n inv_std = np.power(var + epsilon, -0.5)\n x_hat = (x - mean) * inv_std\n\n sum1 = np.mean(-inv_std * grad_dx_np, tuple(norm_axis), keepdims=True)\n sum2 = np.mean(-x_hat * inv_std * grad_dx_np, tuple(norm_axis), keepdims=True)\n sum3 = np.mean(dy * gamma, tuple(norm_axis), keepdims=True)\n sum4 = np.mean(dy * gamma * x_hat, tuple(norm_axis), keepdims=True)\n part_sum1 = dy * gamma - sum3 - x_hat * sum4\n part_sum2 = dy * gamma * sum2 - sum4 * grad_dx_np * inv_std + dy * grad_dg_np\n part1 = np.mean(grad_dx_np * part_sum1, tuple(norm_axis), keepdims=True)\n part2 = np.mean((x - mean) * part_sum2, tuple(norm_axis), keepdims=True)\n part3 = inv_std * part_sum2\n sum5 = np.mean(part1, tuple(norm_axis), keepdims=True)\n sum6 = np.mean(part2, tuple(norm_axis), keepdims=True)\n sum7 = np.mean(-part3, tuple(norm_axis), keepdims=True)\n part4 = -(x - mean) * np.power(var + epsilon, -1.5) * (sum5 + sum6)\n d_x = part3 + part4 + sum7\n\n part5 = gamma * grad_dx_np * inv_std\n part6 = gamma * sum1\n part7 = gamma * x_hat * sum2\n part8 = x_hat * grad_dg_np\n d_dy = part5 + part6 + part7 + part8 + grad_db_np\n\n part9 = np.sum(dy * x_hat * sum2, tuple(param_axis), keepdims=True)\n part10 = np.sum(dy * sum1, tuple(param_axis), keepdims=True)\n part11 = np.sum(dy * grad_dx_np * inv_std, tuple(param_axis), keepdims=True)\n d_gamma = part9 + part10 + part11\n\n return d_x, d_dy, d_gamma\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad0():\n begin_norm_axis = 1\n begin_params_axis = 1\n\n x_np = np.random.randn(4096, 3072).astype(np.float32)\n dy_np = np.random.randn(4096, 3072).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-12\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float32))\n x_ms = Tensor(x_np.astype(np.float32))\n var_ms = Tensor(var_np.astype(np.float32))\n mean_ms = Tensor(mean_np.astype(np.float32))\n gamma_ms = Tensor(gamma_np.astype(np.float32))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float32))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float32))\n grad_db_ms = Tensor(grad_db_np.astype(np.float32))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=3e-3, atol=3e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad1():\n begin_norm_axis = 1\n begin_params_axis = 1\n x_np = np.random.randn(640, 768).astype(np.float32)\n dy_np = np.random.randn(640, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-12\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float32))\n x_ms = Tensor(x_np.astype(np.float32))\n var_ms = Tensor(var_np.astype(np.float32))\n mean_ms = Tensor(mean_np.astype(np.float32))\n gamma_ms = Tensor(gamma_np.astype(np.float32))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float32))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float32))\n grad_db_ms = Tensor(grad_db_np.astype(np.float32))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=3e-3, atol=3e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad2():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 128, 768).astype(np.float32)\n dy_np = np.random.randn(32, 128, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-12\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float32))\n x_ms = Tensor(x_np.astype(np.float32))\n var_ms = Tensor(var_np.astype(np.float32))\n mean_ms = Tensor(mean_np.astype(np.float32))\n gamma_ms = Tensor(gamma_np.astype(np.float32))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float32))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float32))\n grad_db_ms = Tensor(grad_db_np.astype(np.float32))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=3e-3, atol=3e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad3():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 64).astype(np.float32)\n dy_np = np.random.randn(32, 64).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-12\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float32))\n x_ms = Tensor(x_np.astype(np.float32))\n var_ms = Tensor(var_np.astype(np.float32))\n mean_ms = Tensor(mean_np.astype(np.float32))\n gamma_ms = Tensor(gamma_np.astype(np.float32))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float32))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float32))\n grad_db_ms = Tensor(grad_db_np.astype(np.float32))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=3e-3, atol=3e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad4():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 64).astype(np.float32)\n dy_np = np.random.randn(32, 64).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-12\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float32))\n x_ms = Tensor(x_np.astype(np.float32))\n var_ms = Tensor(var_np.astype(np.float32))\n mean_ms = Tensor(mean_np.astype(np.float32))\n gamma_ms = Tensor(gamma_np.astype(np.float32))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float32))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float32))\n grad_db_ms = Tensor(grad_db_np.astype(np.float32))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=3e-3, atol=3e-3)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad5():\n begin_norm_axis = 2\n begin_params_axis = 1\n x_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n dy_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-12\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float32))\n x_ms = Tensor(x_np.astype(np.float32))\n var_ms = Tensor(var_np.astype(np.float32))\n mean_ms = Tensor(mean_np.astype(np.float32))\n gamma_ms = Tensor(gamma_np.astype(np.float32))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float32))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float32))\n grad_db_ms = Tensor(grad_db_np.astype(np.float32))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=3e-3, atol=3e-3)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=3e-3, atol=3e-3)\n\n\ndef test_layernormgradgrad6():\n begin_norm_axis = 1\n begin_params_axis = 1\n\n x_np = np.random.randn(4096, 3072).astype(np.float32)\n dy_np = np.random.randn(4096, 3072).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-7\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float16))\n x_ms = Tensor(x_np.astype(np.float16))\n var_ms = Tensor(var_np.astype(np.float16))\n mean_ms = Tensor(mean_np.astype(np.float16))\n gamma_ms = Tensor(gamma_np.astype(np.float16))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float16))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float16))\n grad_db_ms = Tensor(grad_db_np.astype(np.float16))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=5e-3, atol=5e-1)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad7():\n begin_norm_axis = 1\n begin_params_axis = 1\n x_np = np.random.randn(640, 768).astype(np.float32)\n dy_np = np.random.randn(640, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-7\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float16))\n x_ms = Tensor(x_np.astype(np.float16))\n var_ms = Tensor(var_np.astype(np.float16))\n mean_ms = Tensor(mean_np.astype(np.float16))\n gamma_ms = Tensor(gamma_np.astype(np.float16))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float16))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float16))\n grad_db_ms = Tensor(grad_db_np.astype(np.float16))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=5e-3, atol=5e-1)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad8():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 128, 768).astype(np.float32)\n dy_np = np.random.randn(32, 128, 768).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-7\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float16))\n x_ms = Tensor(x_np.astype(np.float16))\n var_ms = Tensor(var_np.astype(np.float16))\n mean_ms = Tensor(mean_np.astype(np.float16))\n gamma_ms = Tensor(gamma_np.astype(np.float16))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float16))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float16))\n grad_db_ms = Tensor(grad_db_np.astype(np.float16))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=5e-3, atol=5e-1)\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad9():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 64).astype(np.float32)\n dy_np = np.random.randn(32, 64).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-7\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float16))\n x_ms = Tensor(x_np.astype(np.float16))\n var_ms = Tensor(var_np.astype(np.float16))\n mean_ms = Tensor(mean_np.astype(np.float16))\n gamma_ms = Tensor(gamma_np.astype(np.float16))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float16))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float16))\n grad_db_ms = Tensor(grad_db_np.astype(np.float16))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=5e-3, atol=5e-1)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad10():\n begin_norm_axis = -1\n begin_params_axis = -1\n x_np = np.random.randn(32, 64).astype(np.float32)\n dy_np = np.random.randn(32, 64).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-7\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float16))\n x_ms = Tensor(x_np.astype(np.float16))\n var_ms = Tensor(var_np.astype(np.float16))\n mean_ms = Tensor(mean_np.astype(np.float16))\n gamma_ms = Tensor(gamma_np.astype(np.float16))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float16))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float16))\n grad_db_ms = Tensor(grad_db_np.astype(np.float16))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=5e-3, atol=5e-1)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_layernormgradgrad11():\n begin_norm_axis = 2\n begin_params_axis = 1\n x_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n dy_np = np.random.randn(128, 2, 16, 32).astype(np.float32)\n gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n grad_dx_np = np.random.randn(*x_np.shape).astype(np.float32)\n grad_dg_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n grad_db_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)\n\n epsilon = 1e-7\n _, _, _, mean_np, var_np = LayerNormGradReference(x_np, dy_np, gamma_np, epsilon, begin_norm_axis,\n begin_params_axis)\n\n d_x_np, d_dy_np, d_gamma_np = LayerNormGradGradReference(x_np, dy_np, gamma_np, epsilon, grad_dx_np, grad_dg_np,\n grad_db_np, begin_norm_axis, begin_params_axis)\n\n dy_ms = Tensor(dy_np.astype(np.float16))\n x_ms = Tensor(x_np.astype(np.float16))\n var_ms = Tensor(var_np.astype(np.float16))\n mean_ms = Tensor(mean_np.astype(np.float16))\n gamma_ms = Tensor(gamma_np.astype(np.float16))\n grad_dx_ms = Tensor(grad_dx_np.astype(np.float16))\n grad_dg_ms = Tensor(grad_dg_np.astype(np.float16))\n grad_db_ms = Tensor(grad_db_np.astype(np.float16))\n\n net = LayerNormGradGradNet(begin_norm_axis, begin_params_axis)\n d_x_ms, d_dy_ms, d_gamma_ms = net(x_ms, dy_ms, var_ms, mean_ms, gamma_ms, grad_dx_ms, grad_dg_ms, grad_db_ms)\n\n assert np.allclose(d_x_ms.asnumpy(), d_x_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_dy_ms.asnumpy(), d_dy_np, rtol=5e-3, atol=5e-1)\n assert np.allclose(d_gamma_ms.asnumpy(), d_gamma_np, rtol=5e-3, atol=5e-1)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport pytest\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.ops = P.Less()\n\n def construct(self, x, y):\n return self.ops(x, y)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_onecard\[email protected]('dtype', [np.int32, np.int64, np.float32, np.float64])\ndef test_net(dtype):\n \"\"\"\n Feature: ALL To ALL\n Description: test cases for Less\n Expectation: the result match to numpy\n \"\"\"\n x0_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(dtype)\n y0_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(dtype)\n x1_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(dtype)\n y1_np = np.random.randint(1, 5, (2, 1, 4, 4)).astype(dtype)\n x2_np = np.random.randint(1, 5, (2, 1, 1, 4)).astype(dtype)\n y2_np = np.random.randint(1, 5, (2, 3, 4, 4)).astype(dtype)\n x3_np = np.random.randint(1, 5, 1).astype(dtype)\n y3_np = np.random.randint(1, 5, 1).astype(dtype)\n x4_np = np.array(768).astype(dtype)\n y4_np = np.array(3072.5).astype(dtype)\n\n x0 = Tensor(x0_np)\n y0 = Tensor(y0_np)\n x1 = Tensor(x1_np)\n y1 = Tensor(y1_np)\n x2 = Tensor(x2_np)\n y2 = Tensor(y2_np)\n x3 = Tensor(x3_np)\n y3 = Tensor(y3_np)\n x4 = Tensor(x4_np)\n y4 = Tensor(y4_np)\n\n context.set_context(mode=context.GRAPH_MODE, device_target='CPU')\n net = Net()\n out = net(x0, y0).asnumpy()\n expect = x0_np < y0_np\n assert np.all(out == expect)\n assert out.shape == expect.shape\n\n out = net(x1, y1).asnumpy()\n expect = x1_np < y1_np\n assert np.all(out == expect)\n assert out.shape == expect.shape\n\n out = net(x2, y2).asnumpy()\n expect = x2_np < y2_np\n assert np.all(out == expect)\n assert out.shape == expect.shape\n\n out = net(x3, y3).asnumpy()\n expect = x3_np < y3_np\n assert np.all(out == expect)\n assert out.shape == expect.shape\n\n out = net(x4, y4).asnumpy()\n expect = x4_np < y4_np\n assert np.all(out == expect)\n assert out.shape == expect.shape\n" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.ones" ], [ "numpy.random.uniform", "numpy.array", "numpy.random.randn", "numpy.vstack" ], [ "numpy.random.rand", "numpy.arange", "numpy.broadcast_to", "numpy.ones" ], [ "numpy.random.random", "numpy.sum", "numpy.allclose" ], [ "numpy.array", "numpy.random.random" ], [ "numpy.ones" ], [ "numpy.matmul", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.random.normal", "numpy.random.randn", "numpy.random.sample", "numpy.array" ], [ "numpy.ones" ], [ "numpy.expand_dims", "numpy.arange", "numpy.eye", "numpy.stack", "numpy.full", "numpy.prod", "numpy.array" ], [ "numpy.random.randn" ], [ "numpy.all", "numpy.array" ], [ "numpy.load", "numpy.array", "numpy.array_equal" ], [ "numpy.array" ], [ "numpy.random.randn", "numpy.random.randint" ], [ "numpy.array", "numpy.ones" ], [ "numpy.random.seed", "numpy.ones", "numpy.all", "numpy.random.randn", "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.abs", "numpy.count_nonzero", "numpy.random.sample" ], [ "numpy.full" ], [ "numpy.ones" ], [ "numpy.arange" ], [ "numpy.ones" ], [ "numpy.array", "numpy.ones" ], [ "numpy.array", "numpy.random.rand" ], [ "numpy.all", "numpy.sqrt", "numpy.random.rand" ], [ "numpy.all", "numpy.asarray", "numpy.ones" ], [ "numpy.squeeze", "numpy.array", "numpy.zeros" ], [ "numpy.random.normal", "numpy.allclose" ], [ "numpy.ones" ], [ "numpy.ones", "numpy.random.randint" ], [ "numpy.random.randn", "numpy.zeros" ], [ "numpy.array" ], [ "numpy.array" ], [ "numpy.random.randn", "numpy.random.seed", "numpy.power" ], [ "numpy.all", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
navin3011/Seminar-Energy-economy
[ "ddff1bf28f445d5a447fab119d7a6192f231d9c3" ]
[ "simbench/converter/voltLvl.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer\n# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual\n# contributors (see AUTHORS file for details). All rights reserved.\n\nimport numpy as np\nfrom pandas import Series\nfrom pandapower import element_bus_tuples\n\n__author__ = \"smeinecke\"\n\n\ndef convert_voltlvl_to_int(voltage_level):\n \"\"\" Returns voltage level names as int. \"\"\"\n if voltage_level in [\"EHV\", \"ehv\", \"UHV\", \"uhv\"]:\n return 1\n elif voltage_level in [\"EHV-HV\", \"ehv-hv\", \"UHV-HV\", \"uhv-hv\", \"EHVHV\", \"ehvhv\", \"UHVHV\",\n \"uhvhv\"]:\n return 2\n elif voltage_level in [\"HV\", \"hv\"]:\n return 3\n elif voltage_level in [\"HV-MV\", \"hv-mv\", \"HVMV\", \"hvmv\"]:\n return 4\n elif voltage_level in [\"MV\", \"mv\"]:\n return 5\n elif voltage_level in [\"MV-LV\", \"mv-lv\", \"MVLV\", \"mvlv\"]:\n return 6\n elif voltage_level in [\"LV\", \"lv\"]:\n return 7\n else:\n return int(voltage_level)\n\n\ndef convert_voltlvl_to_str(voltage_level):\n \"\"\" Returns voltage level names as string. \"\"\"\n return [\"EHV\", \"EHV-HV\", \"HV\", \"HV-MV\", \"MV\", \"MV-LV\", \"LV\"][convert_voltlvl_to_int(\n voltage_level)-1]\n\n\ndef convert_voltlvl_names(voltage_levels, desired_format):\n \"\"\" Returns voltage level names in desired format.\n EXAMPLE:\n voltlvl_names = convert_voltlvl_names([1, 2, \"hv\", 4, 5, \"ehv\", 7], str)\n \"\"\"\n if desired_format == str:\n if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, \"__iter__\")):\n return convert_voltlvl_to_str(voltage_levels)\n else:\n names = []\n for voltage_level in voltage_levels:\n for voltage_level in voltage_levels:\n names += [convert_voltlvl_to_str(voltage_level)]\n return names\n elif desired_format == int:\n if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, \"__iter__\")):\n return convert_voltlvl_to_int(voltage_levels)\n else:\n names = []\n for voltage_level in voltage_levels:\n for voltage_level in voltage_levels:\n names += [convert_voltlvl_to_int(voltage_level)]\n return names\n else:\n raise ValueError(\"desired_format must be str or int\")\n\n\ndef _voltlvl_idx(net, element, voltage_level, branch_bus=None, vn_kv_limits=[145, 60, 1]):\n \"\"\" similar to voltlvl_idx, but for only one voltage_level \"\"\"\n vn_kv_limits = [np.inf] + vn_kv_limits + [-np.inf]\n voltage_level = convert_voltlvl_names(voltage_level, int)\n lim_max = [0, 0, 1, 1, 2, 2, 3][voltage_level-1]\n lim_min = [1, 2, 2, 3, 3, 4, 4][voltage_level-1]\n Idx_bus = net.bus.index[(net.bus.vn_kv <= vn_kv_limits[lim_max]) &\n (net.bus.vn_kv > vn_kv_limits[lim_min])]\n if element == \"bus\":\n return list(Idx_bus)\n\n if branch_bus is None and element not in [\"trafo\", \"trafo3w\"]:\n # for all other elements than trafos, take the first possibility\n for elm, bus_name in element_bus_tuples():\n if elm == element:\n branch_bus = bus_name\n break\n\n if element == \"measurement\":\n measurement_buses = Series(index=net.measurement.index)\n # bus\n bool_ = net.measurement.element_type == \"bus\"\n measurement_buses.loc[bool_] = net.measurement.element.loc[bool_]\n # line and trafo\n for branch, side in zip([\"line\", \"line\", \"trafo\", \"trafo\"], [\"from\", \"to\", \"hv\", \"lv\"]):\n bus = side + \"_bus\"\n bool1 = net.measurement.element_type == branch\n bool2 = net.measurement.side == side\n measurement_buses.loc[bool1 & bool2] = net[branch][bus].loc[net.measurement.element.loc[\n bool1 & bool2]].values\n measurement_buses = measurement_buses.astype(int)\n isin_Idx_bus = measurement_buses.isin(Idx_bus)\n\n elif branch_bus in net[element].columns: # all other elements than measurement and bus\n isin_Idx_bus = net[element][branch_bus].isin(Idx_bus)\n\n else:\n raise KeyError(\"For net[%s] there is no column '%s'. Please\" % (element, str(branch_bus)) +\n \" give 'branch_bus' an valid bus column name, e.g. 'hv_bus' or 'lv_bus'.\")\n\n return list(net[element].index[isin_Idx_bus])\n\n\ndef voltlvl_idx(net, element, voltage_levels, branch_bus=None, vn_kv_limits=[145, 60, 1]):\n \"\"\"\n Returns indices of elements with special voltage level.\n Even voltage_level numbers behave equally to both neighboring numbers, i.e. 4 == [3, 5] and\n \"EHV-HV\" == [\"EHV\", \"HV\"].\n\n EXAMPLE:\n hv_and_mv_buses = voltlvl_idx(net, \"bus\", 4) # 4 == [3, 5]\n hv_and_mv_buses = voltlvl_idx(net, \"bus\", [3, 5])\n mv_loads = voltlvl_idx(net, \"load\", \"MV\")\n hvmv_trafos = voltlvl_idx(net, \"trafo\", \"HV\", branch_bus=\"hv_bus\")\n hvmv_trafos = voltlvl_idx(net, \"trafo\", \"MV\", branch_bus=\"lv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", 2, branch_bus=\"hv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", [1, 3], branch_bus=\"hv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", 4, branch_bus=\"lv_bus\")\n ehvhv_and_hvmv_trafos = voltlvl_idx(net, \"trafo\", [3, 5], branch_bus=\"lv_bus\")\n ehvhv_trafos = voltlvl_idx(net, \"trafo\", 2, branch_bus=\"lv_bus\")\n ehv_measurements = voltlvl_idx(net, \"measurement\", \"EHV\")\n \"\"\"\n if not net[element].shape[0]:\n return []\n\n if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, \"__iter__\")):\n return _voltlvl_idx(net, element, voltage_levels, branch_bus=branch_bus,\n vn_kv_limits=vn_kv_limits)\n else:\n Idx = []\n for voltage_level in voltage_levels:\n Idx += _voltlvl_idx(net, element, voltage_level, branch_bus=branch_bus,\n vn_kv_limits=vn_kv_limits)\n return Idx\n\n\ndef get_voltlvl(voltage_values, vn_kv_limits=[145, 60, 1]):\n \"\"\" Returns an array of voltage levels as integer. \"\"\"\n iter_ = hasattr(voltage_values, \"__iter__\")\n voltage_values = voltage_values if iter_ else [voltage_values]\n voltage_values = np.array(voltage_values)\n voltage_levels = np.ones(voltage_values.shape)\n for lim in vn_kv_limits:\n voltage_levels[voltage_values <= lim] += 2\n if iter_:\n return voltage_levels.astype(int)\n else:\n return int(voltage_levels[0])\n" ]
[ [ "numpy.array", "pandas.Series", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
freol35241/pysim
[ "36faf67d00ff644a593f20994c0f15053d600886" ]
[ "pysim/systems/python_systems.py" ]
[ "\"\"\"Example systems created in Python\n\"\"\"\nimport numpy as np\n\nfrom pysim.cythonsystem import Sys\n\nclass VanDerPol(Sys):\n \"\"\"Simple example of a class representing a VanDerPol oscillator.\n \"\"\"\n def __init__(self):\n self.add_state_scalar(\"x\", \"dx\")\n self.add_state_scalar(\"y\", \"dy\")\n self.add_input_scalar(\"a\")\n self.add_input_scalar(\"b\")\n self.inputs.a = 1.0\n self.inputs.b = 1.0\n self.states.x = 1.0\n self.states.y = 0.0\n\n def do_step(self,dummy):\n \"\"\"Perform a timestep by implmenting the VanDerPol equations\"\"\"\n \n a = self.inputs.a\n b = self.inputs.b\n x = self.states.x\n y = self.states.y\n\n self.ders.dx = a*x*(b-y*y)-y\n self.ders.dy = x\n\n\nclass MassSpringDamper(Sys):\n \"\"\"Simple class for testing the mass-spring-damper simulations with \n a cython system\"\"\"\n\n def __init__(self):\n \"\"\"Setup two states (one dimensional vectors for now). Initial \n conditions are simular to those in the build in c++ system\"\"\"\n self.add_state_scalar(\"x1\", \"dx1\")\n self.add_state_scalar(\"x2\", \"dx2\")\n self.states.x1 = 1\n self.states.x2 = 0\n\n def do_step(self,dummy):\n \"\"\"Perform a step using default constants, same as those in the \n cpp system\"\"\"\n\n m = 100.0\n b = 1.0\n k = 50.0\n f = 0.0\n x1 = self.states.x1\n x2 = self.states.x2\n self.ders.dx1 = x2\n self.ders.dx2 =-k/m*x1-b/m*x2+1/m*f\n\nclass InOutTestSystem(Sys):\n \"\"\"Python representation of the cpp InOutTestSystem\n\n Used for testing that the cpp system behaves as the python system\n with regards to the input output handling\n \"\"\"\n def __init__(self):\n self.add_input_scalar(\"input_scalar\")\n self.add_input_vector(\"input_vector\",3)\n self.add_input_matrix(\"input_matrix\",3,3)\n\n self.add_state_scalar(\"state_scalar\",\"der_scalar\")\n self.add_state_vector(\"state_vector\",\"der_vector\", 3)\n self.add_state_matrix(\"state_matrix\",\"der_matrix\", 3, 3)\n\n self.add_output_scalar(\"input_output_scalar\")\n self.add_output_vector(\"input_output_vector\",3)\n self.add_output_matrix(\"input_output_matrix\",3,3)\n self.add_output_scalar(\"state_output_scalar\")\n self.add_output_vector(\"state_output_vector\",3)\n self.add_output_matrix(\"state_output_matrix\",3,3)\n\n self.inputs.input_scalar = 0.0\n self.inputs.input_vector = [0.0, 0.0, 0.0]\n self.inputs.input_matrix = np.zeros((3,3))\n\n self.outputs.input_output_scalar = 0.0\n self.outputs.input_output_vector = [0.0, 0.0, 0.0]\n self.outputs.input_output_matrix = np.zeros((3,3))\n self.outputs.state_output_scalar = 0.0\n self.outputs.state_output_vector = [0.0, 0.0, 0.0]\n self.outputs.state_output_matrix = np.zeros((3,3))\n\n self.states.state_scalar = 1.23\n self.states.state_vector = np.ones(3)*4.56\n self.states.state_matrix = np.ones((3,3))*7.89\n self.ders.der_scalar = 0\n self.ders.der_vector = np.zeros(3)\n self.ders.der_matrix = np.zeros((3,3))\n\n def do_step(self,dummy):\n \"\"\"During a timestep we set the outputs to their respective inputs\"\"\"\n self.outputs.input_output_scalar = self.inputs.input_scalar\n self.outputs.input_output_vector = self.inputs.input_vector\n self.outputs.input_output_matrix = self.inputs.input_matrix\n self.outputs.state_output_scalar = self.states.state_scalar\n self.outputs.state_output_vector = self.states.state_vector\n self.outputs.state_output_matrix = self.states.state_matrix\n" ]
[ [ "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GrapeBaBa/ibis
[ "507bb14efdcfd719a0487ee23fe1c85c177517f6", "507bb14efdcfd719a0487ee23fe1c85c177517f6", "507bb14efdcfd719a0487ee23fe1c85c177517f6" ]
[ "ibis/tests/benchmarks/test_benchmarks.py", "ibis/backends/tests/test_array.py", "ibis/backends/pandas/execution/window.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.pandas.udf import udf\n\n\ndef make_t():\n return ibis.table(\n [\n ('_timestamp', 'int32'),\n ('dim1', 'int32'),\n ('dim2', 'int32'),\n ('valid_seconds', 'int32'),\n ('meas1', 'int32'),\n ('meas2', 'int32'),\n ('year', 'int32'),\n ('month', 'int32'),\n ('day', 'int32'),\n ('hour', 'int32'),\n ('minute', 'int32'),\n ],\n name=\"t\",\n )\n\n\[email protected]\ndef t():\n return make_t()\n\n\ndef make_base(t):\n return (\n (t.year > 2016)\n | ((t.year == 2016) & (t.month > 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day > 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour > 6))\n | (\n (t.year == 2016)\n & (t.month == 6)\n & (t.day == 6)\n & (t.hour == 6)\n & (t.minute >= 5)\n )\n ) & (\n (t.year < 2016)\n | ((t.year == 2016) & (t.month < 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day < 6))\n | ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour < 6))\n | (\n (t.year == 2016)\n & (t.month == 6)\n & (t.day == 6)\n & (t.hour == 6)\n & (t.minute <= 5)\n )\n )\n\n\[email protected]\ndef base(t):\n return make_base(t)\n\n\ndef make_large_expr(t, base):\n src_table = t[base]\n src_table = src_table.mutate(\n _timestamp=(src_table['_timestamp'] - src_table['_timestamp'] % 3600)\n .cast('int32')\n .name('_timestamp'),\n valid_seconds=300,\n )\n\n aggs = []\n for meas in ['meas1', 'meas2']:\n aggs.append(src_table[meas].sum().cast('float').name(meas))\n src_table = src_table.aggregate(\n aggs, by=['_timestamp', 'dim1', 'dim2', 'valid_seconds']\n )\n\n part_keys = ['year', 'month', 'day', 'hour', 'minute']\n ts_col = src_table['_timestamp'].cast('timestamp')\n new_cols = {}\n for part_key in part_keys:\n part_col = getattr(ts_col, part_key)()\n new_cols[part_key] = part_col\n src_table = src_table.mutate(**new_cols)\n return src_table[\n [\n '_timestamp',\n 'dim1',\n 'dim2',\n 'meas1',\n 'meas2',\n 'year',\n 'month',\n 'day',\n 'hour',\n 'minute',\n ]\n ]\n\n\[email protected]\ndef large_expr(t, base):\n return make_large_expr(t, base)\n\n\[email protected](group=\"construction\")\[email protected](\n \"construction_fn\",\n [\n pytest.param(lambda *_: make_t(), id=\"small\"),\n pytest.param(lambda t, *_: make_base(t), id=\"medium\"),\n pytest.param(lambda t, base: make_large_expr(t, base), id=\"large\"),\n ],\n)\ndef test_construction(benchmark, construction_fn, t, base):\n benchmark(construction_fn, t, base)\n\n\[email protected](group=\"builtins\")\[email protected](\n \"expr_fn\",\n [\n pytest.param(lambda t, _base, _large_expr: t, id=\"small\"),\n pytest.param(lambda _t, base, _large_expr: base, id=\"medium\"),\n pytest.param(lambda _t, _base, large_expr: large_expr, id=\"large\"),\n ],\n)\[email protected](\"builtin\", [hash, str])\ndef test_builtins(benchmark, expr_fn, builtin, t, base, large_expr):\n expr = expr_fn(t, base, large_expr)\n benchmark(builtin, expr)\n\n\[email protected](group=\"compilation\")\[email protected](\"module\", [\"impala\", \"sqlite\"])\[email protected](\n \"expr_fn\",\n [\n pytest.param(lambda t, _base, _large_expr: t, id=\"small\"),\n pytest.param(lambda _t, base, _large_expr: base, id=\"medium\"),\n pytest.param(lambda _t, _base, large_expr: large_expr, id=\"large\"),\n ],\n)\ndef test_compile(benchmark, module, expr_fn, t, base, large_expr):\n try:\n mod = getattr(ibis, module)\n except AttributeError as e:\n pytest.skip(str(e))\n else:\n expr = expr_fn(t, base, large_expr)\n benchmark(mod.compile, expr)\n\n\[email protected]\ndef pt():\n n = 60_000\n data = pd.DataFrame(\n {\n 'key': np.random.choice(16000, size=n),\n 'low_card_key': np.random.choice(30, size=n),\n 'value': np.random.rand(n),\n 'timestamps': pd.date_range(\n start='now', periods=n, freq='s'\n ).values,\n 'timestamp_strings': pd.date_range(\n start='now', periods=n, freq='s'\n ).values.astype(str),\n 'repeated_timestamps': pd.date_range(\n start='2018-09-01', periods=30\n ).repeat(int(n / 30)),\n }\n )\n\n return ibis.pandas.connect(dict(df=data)).table('df')\n\n\ndef high_card_group_by(t):\n return t.groupby(t.key).aggregate(avg_value=t.value.mean())\n\n\ndef cast_to_dates(t):\n return t.timestamps.cast(dt.date)\n\n\ndef cast_to_dates_from_strings(t):\n return t.timestamp_strings.cast(dt.date)\n\n\ndef multikey_group_by_with_mutate(t):\n return (\n t.mutate(dates=t.timestamps.cast('date'))\n .groupby(['low_card_key', 'dates'])\n .aggregate(avg_value=lambda t: t.value.mean())\n )\n\n\ndef simple_sort(t):\n return t.sort_by([t.key])\n\n\ndef simple_sort_projection(t):\n return t[['key', 'value']].sort_by(['key'])\n\n\ndef multikey_sort(t):\n return t.sort_by(['low_card_key', 'key'])\n\n\ndef multikey_sort_projection(t):\n return t[['low_card_key', 'key', 'value']].sort_by(['low_card_key', 'key'])\n\n\ndef low_card_rolling_window(t):\n return ibis.trailing_range_window(\n ibis.interval(days=2),\n order_by=t.repeated_timestamps,\n group_by=t.low_card_key,\n )\n\n\ndef low_card_grouped_rolling(t):\n return t.value.mean().over(low_card_rolling_window(t))\n\n\ndef high_card_rolling_window(t):\n return ibis.trailing_range_window(\n ibis.interval(days=2),\n order_by=t.repeated_timestamps,\n group_by=t.key,\n )\n\n\ndef high_card_grouped_rolling(t):\n return t.value.mean().over(high_card_rolling_window(t))\n\n\[email protected](['double'], 'double')\ndef my_mean(series):\n return series.mean()\n\n\ndef low_card_grouped_rolling_udf_mean(t):\n return my_mean(t.value).over(low_card_rolling_window(t))\n\n\ndef high_card_grouped_rolling_udf_mean(t):\n return my_mean(t.value).over(high_card_rolling_window(t))\n\n\[email protected](['double'], 'double')\ndef my_zscore(series):\n return (series - series.mean()) / series.std()\n\n\ndef low_card_window(t):\n return ibis.window(group_by=t.low_card_key)\n\n\ndef high_card_window(t):\n return ibis.window(group_by=t.key)\n\n\ndef low_card_window_analytics_udf(t):\n return my_zscore(t.value).over(low_card_window(t))\n\n\ndef high_card_window_analytics_udf(t):\n return my_zscore(t.value).over(high_card_window(t))\n\n\[email protected](['double', 'double'], 'double')\ndef my_wm(v, w):\n return np.average(v, weights=w)\n\n\ndef low_card_grouped_rolling_udf_wm(t):\n return my_wm(t.value, t.value).over(low_card_rolling_window(t))\n\n\ndef high_card_grouped_rolling_udf_wm(t):\n return my_wm(t.value, t.value).over(low_card_rolling_window(t))\n\n\[email protected](group=\"execution\")\[email protected](\n \"expression_fn\",\n [\n pytest.param(high_card_group_by, id=\"high_card_group_by\"),\n pytest.param(cast_to_dates, id=\"cast_to_dates\"),\n pytest.param(\n cast_to_dates_from_strings, id=\"cast_to_dates_from_strings\"\n ),\n pytest.param(\n multikey_group_by_with_mutate, id=\"multikey_group_by_with_mutate\"\n ),\n pytest.param(simple_sort, id=\"simple_sort\"),\n pytest.param(simple_sort_projection, id=\"simple_sort_projection\"),\n pytest.param(multikey_sort, id=\"multikey_sort\"),\n pytest.param(multikey_sort_projection, id=\"multikey_sort_projection\"),\n pytest.param(low_card_grouped_rolling, id=\"low_card_grouped_rolling\"),\n pytest.param(\n high_card_grouped_rolling, id=\"high_card_grouped_rolling\"\n ),\n pytest.param(\n low_card_grouped_rolling_udf_mean,\n id=\"low_card_grouped_rolling_udf_mean\",\n ),\n pytest.param(\n high_card_grouped_rolling_udf_mean,\n id=\"high_card_grouped_rolling_udf_mean\",\n ),\n pytest.param(\n low_card_window_analytics_udf, id=\"low_card_window_analytics_udf\"\n ),\n pytest.param(\n high_card_window_analytics_udf, id=\"high_card_window_analytics_udf\"\n ),\n pytest.param(\n low_card_grouped_rolling_udf_wm,\n id=\"low_card_grouped_rolling_udf_wm\",\n ),\n pytest.param(\n high_card_grouped_rolling_udf_wm,\n id=\"high_card_grouped_rolling_udf_wm\",\n ),\n ],\n)\ndef test_execute(benchmark, expression_fn, pt):\n expr = expression_fn(pt)\n benchmark(expr.execute)\n", "import numpy as np\nimport pytest\n\nimport ibis\nimport ibis.expr.types as ir\n\n\[email protected]_unsupported\[email protected]_missing_feature(\n ['supports_arrays', 'supports_arrays_outside_of_select']\n)\ndef test_array_column(backend, alltypes, df):\n expr = ibis.array([alltypes['double_col'], alltypes['double_col']])\n assert isinstance(expr, ir.ArrayColumn)\n\n result = expr.execute()\n expected = df.apply(\n lambda row: np.array(\n [row['double_col'], row['double_col']], dtype=object\n ),\n axis=1,\n )\n backend.assert_series_equal(result, expected, check_names=False)\n\n\[email protected]_unsupported\[email protected]_missing_feature(\n ['supports_arrays', 'supports_arrays_outside_of_select']\n)\ndef test_array_scalar(backend, con, alltypes, df):\n expr = ibis.array([1.0, 2.0, 3.0])\n assert isinstance(expr, ir.ArrayScalar)\n\n result = con.execute(expr)\n expected = np.array([1.0, 2.0, 3.0])\n\n # This does not check whether `result` is an np.array or a list,\n # because it varies across backends and backend configurations\n assert np.array_equal(result, expected)\n\n\[email protected]_unsupported\[email protected]_missing_feature(\n ['supports_arrays', 'supports_arrays_outside_of_select']\n)\n# Issues #2370\[email protected]_backends(['bigquery'])\ndef test_array_concat(backend, con):\n left = ibis.literal([1, 2, 3])\n right = ibis.literal([2, 1])\n expr = left + right\n result = con.execute(expr)\n expected = np.array([1, 2, 3, 2, 1])\n\n # This does not check whether `result` is an np.array or a list,\n # because it varies across backends and backend configurations\n assert np.array_equal(result, expected)\n\n\[email protected]_unsupported\[email protected]_missing_feature(\n ['supports_arrays', 'supports_arrays_outside_of_select']\n)\ndef test_array_length(backend, con):\n expr = ibis.literal([1, 2, 3]).length()\n assert con.execute(expr) == 3\n\n\[email protected]_unsupported\[email protected]_missing_feature(\n ['supports_arrays', 'supports_arrays_outside_of_select']\n)\ndef test_list_literal(backend, con):\n arr = [1, 2, 3]\n expr = ibis.literal(arr)\n result = con.execute(expr)\n\n # This does not check whether `result` is an np.array or a list,\n # because it varies across backends and backend configurations\n assert np.array_equal(result, arr)\n\n\[email protected]_unsupported\[email protected]_missing_feature(\n ['supports_arrays', 'supports_arrays_outside_of_select']\n)\ndef test_np_array_literal(backend, con):\n arr = np.array([1, 2, 3])\n expr = ibis.literal(arr)\n result = con.execute(expr)\n\n # This does not check whether `result` is an np.array or a list,\n # because it varies across backends and backend configurations\n assert np.array_equal(result, arr)\n", "\"\"\"Code for computing window functions with ibis and pandas.\"\"\"\n\nimport operator\nimport re\nfrom typing import Any, Callable, List, NoReturn, Optional, Union\n\nimport pandas as pd\nimport toolz\nfrom multipledispatch import Dispatcher\nfrom pandas.core.groupby import SeriesGroupBy\n\nimport ibis.common.exceptions as com\nimport ibis.expr.operations as ops\nimport ibis.expr.window as win\nfrom ibis.expr.scope import Scope\nfrom ibis.expr.timecontext import (\n construct_time_context_aware_series,\n get_time_col,\n)\nfrom ibis.expr.typing import TimeContext\n\nfrom .. import aggcontext as agg_ctx\nfrom ..aggcontext import AggregationContext\nfrom ..core import (\n compute_time_context,\n date_types,\n execute,\n integer_types,\n simple_types,\n timedelta_types,\n timestamp_types,\n)\nfrom ..dispatch import execute_node, pre_execute\nfrom ..execution import util\n\n\ndef _post_process_empty(\n result: Any,\n parent: pd.DataFrame,\n order_by: List[str],\n group_by: List[str],\n timecontext: Optional[TimeContext],\n) -> pd.Series:\n # This is the post process of the no groupby nor orderby window\n # `result` could be a Series, DataFrame, or a scalar. generated\n # by `agg` method of class `Window`. For window without grouby or\n # orderby, `agg` calls pands method directly. So if timecontext is\n # present, we need to insert 'time' column into index for trimming the\n # result. For cases when grouby or orderby is present, `agg` calls\n # Ibis method `window_agg_built_in` and `window_agg_udf`, time\n # context is already inserted there.\n assert not order_by and not group_by\n if isinstance(result, (pd.Series, pd.DataFrame)):\n if timecontext:\n result = construct_time_context_aware_series(result, parent)\n return result\n else:\n # `result` is a scalar when a reduction operation is being\n # applied over the window, since reduction operations are N->1\n # in this case we do not need to trim result by timecontext,\n # just expand reduction result to be a Series with `index`.\n index = parent.index\n result = pd.Series([result]).repeat(len(index))\n result.index = index\n return result\n\n\ndef _post_process_group_by(\n series: pd.Series,\n parent: pd.DataFrame,\n order_by: List[str],\n group_by: List[str],\n timecontext: Optional[TimeContext],\n) -> pd.Series:\n assert not order_by and group_by\n return series\n\n\ndef _post_process_order_by(\n series,\n parent: pd.DataFrame,\n order_by: List[str],\n group_by: List[str],\n timecontext: Optional[TimeContext],\n) -> pd.Series:\n assert order_by and not group_by\n indexed_parent = parent.set_index(order_by)\n index = indexed_parent.index\n names = index.names\n if len(names) > 1:\n series = series.reorder_levels(names)\n series = series.iloc[index.argsort(kind='mergesort')]\n return series\n\n\ndef _post_process_group_by_order_by(\n series: pd.Series,\n parent: pd.DataFrame,\n order_by: List[str],\n group_by: List[str],\n timecontext: Optional[TimeContext],\n) -> pd.Series:\n indexed_parent = parent.set_index(group_by + order_by, append=True)\n index = indexed_parent.index\n\n # get the names of the levels that will be in the result\n series_index_names = frozenset(series.index.names)\n\n # get the levels common to series.index, in the order that they occur in\n # the parent's index\n reordered_levels = [\n name for name in index.names if name in series_index_names\n ]\n\n if len(reordered_levels) > 1:\n series = series.reorder_levels(reordered_levels)\n return series\n\n\nget_aggcontext = Dispatcher('get_aggcontext')\n\n\n@get_aggcontext.register(object)\ndef get_aggcontext_default(\n window,\n *,\n scope,\n operand,\n parent,\n group_by,\n order_by,\n **kwargs,\n) -> NoReturn:\n raise NotImplementedError(\n f\"get_aggcontext is not implemented for {type(window).__name__}\"\n )\n\n\n@get_aggcontext.register(win.Window)\ndef get_aggcontext_window(\n window,\n *,\n scope,\n operand,\n parent,\n group_by,\n order_by,\n **kwargs,\n) -> AggregationContext:\n # no order by or group by: default summarization aggcontext\n #\n # if we're reducing and we have an order by expression then we need to\n # expand or roll.\n #\n # otherwise we're transforming\n output_type = operand.type()\n\n aggcontext: agg_ctx.AggregationContext\n if not group_by and not order_by:\n aggcontext = agg_ctx.Summarize(parent=parent, output_type=output_type)\n elif (\n isinstance(\n operand.op(), (ops.Reduction, ops.CumulativeOp, ops.Any, ops.All)\n )\n and order_by\n ):\n # XXX(phillipc): What a horror show\n preceding = window.preceding\n if preceding is not None:\n max_lookback = window.max_lookback\n assert not isinstance(operand.op(), ops.CumulativeOp)\n aggcontext = agg_ctx.Moving(\n preceding,\n max_lookback,\n parent=parent,\n group_by=group_by,\n order_by=order_by,\n output_type=output_type,\n )\n else:\n # expanding window\n aggcontext = agg_ctx.Cumulative(\n parent=parent,\n group_by=group_by,\n order_by=order_by,\n output_type=output_type,\n )\n else:\n # groupby transform (window with a partition by clause in SQL parlance)\n aggcontext = agg_ctx.Transform(\n parent=parent,\n group_by=group_by,\n order_by=order_by,\n output_type=output_type,\n )\n\n return aggcontext\n\n\ndef trim_window_result(\n data: Union[pd.Series, pd.DataFrame], timecontext: Optional[TimeContext]\n):\n \"\"\"Trim data within time range defined by timecontext\n\n This is a util function used in ``execute_window_op``, where time\n context might be adjusted for calculation. Data must be trimmed\n within the original time context before return.\n `data` is a pd.Series with Multiindex for most cases, for multi\n column udf result, `data` could be a pd.DataFrame\n\n Params\n ------\n data: pd.Series or pd.DataFrame\n timecontext: Optional[TimeContext]\n\n Returns:\n ------\n a trimmed pd.Series or or pd.DataFrame with the same Multiindex\n as data's\n\n \"\"\"\n # noop if timecontext is None\n if not timecontext:\n return data\n assert isinstance(\n data, (pd.Series, pd.DataFrame)\n ), 'window computed columns is not a pd.Series nor a pd.DataFrame'\n\n # reset multiindex, convert Series into a DataFrame\n df = data.reset_index()\n\n # Filter the data, here we preserve the time index so that when user is\n # computing a single column, the computation and the relevant time\n # indexes are returned.\n time_col = get_time_col()\n if time_col not in df:\n return data\n\n subset = df.loc[df[time_col].between(*timecontext)]\n\n # Get columns to set for index\n if isinstance(data, pd.Series):\n # if Series dosen't contain a name, reset_index will assign\n # '0' as the column name for the column of value\n name = data.name if data.name else 0\n index_columns = list(subset.columns.difference([name]))\n else:\n name = data.columns\n index_columns = list(subset.columns.difference(name))\n\n # set the correct index for return Series / DataFrame\n indexed_subset = subset.set_index(index_columns)\n return indexed_subset[name]\n\n\n@execute_node.register(ops.WindowOp, pd.Series, win.Window)\ndef execute_window_op(\n op,\n data,\n window,\n scope: Scope = None,\n timecontext: Optional[TimeContext] = None,\n aggcontext=None,\n clients=None,\n **kwargs,\n):\n operand = op.expr\n # pre execute \"manually\" here because otherwise we wouldn't pickup\n # relevant scope changes from the child operand since we're managing\n # execution of that by hand\n operand_op = operand.op()\n\n adjusted_timecontext = None\n if timecontext:\n arg_timecontexts = compute_time_context(\n op, timecontext=timecontext, clients=clients, scope=scope\n )\n # timecontext is the original time context required by parent node\n # of this WindowOp, while adjusted_timecontext is the adjusted context\n # of this Window, since we are doing a manual execution here, use\n # adjusted_timecontext in later execution phases\n adjusted_timecontext = arg_timecontexts[0]\n\n pre_executed_scope = pre_execute(\n operand_op,\n *clients,\n scope=scope,\n timecontext=adjusted_timecontext,\n aggcontext=aggcontext,\n **kwargs,\n )\n if scope is None:\n scope = pre_executed_scope\n else:\n scope = scope.merge_scope(pre_executed_scope)\n (root,) = op.root_tables()\n root_expr = root.to_expr()\n\n data = execute(\n root_expr,\n scope=scope,\n timecontext=adjusted_timecontext,\n clients=clients,\n aggcontext=aggcontext,\n **kwargs,\n )\n following = window.following\n order_by = window._order_by\n\n if (\n order_by\n and following != 0\n and not isinstance(operand_op, ops.ShiftBase)\n ):\n raise com.OperationNotDefinedError(\n 'Window functions affected by following with order_by are not '\n 'implemented'\n )\n\n group_by = window._group_by\n grouping_keys = [\n key_op.name\n if isinstance(key_op, ops.TableColumn)\n else execute(\n key,\n scope=scope,\n clients=clients,\n timecontext=adjusted_timecontext,\n aggcontext=aggcontext,\n **kwargs,\n )\n for key, key_op in zip(\n group_by, map(operator.methodcaller('op'), group_by)\n )\n ]\n\n order_by = window._order_by\n if not order_by:\n ordering_keys = []\n\n post_process: Callable[\n [Any, pd.DataFrame, List[str], List[str], Optional[TimeContext]],\n pd.Series,\n ]\n if group_by:\n if order_by:\n (\n sorted_df,\n grouping_keys,\n ordering_keys,\n ) = util.compute_sorted_frame(\n data,\n order_by,\n group_by=group_by,\n timecontext=adjusted_timecontext,\n **kwargs,\n )\n source = sorted_df.groupby(grouping_keys, sort=True)\n post_process = _post_process_group_by_order_by\n else:\n source = data.groupby(grouping_keys, sort=False)\n post_process = _post_process_group_by\n else:\n if order_by:\n source, grouping_keys, ordering_keys = util.compute_sorted_frame(\n data, order_by, timecontext=adjusted_timecontext, **kwargs\n )\n post_process = _post_process_order_by\n else:\n source = data\n post_process = _post_process_empty\n\n # Here groupby object should be add to the corresponding node in scope\n # for execution, data will be overwrite to a groupby object, so we\n # force an update regardless of time context\n new_scope = scope.merge_scopes(\n [\n Scope({t: source}, adjusted_timecontext)\n for t in operand.op().root_tables()\n ],\n overwrite=True,\n )\n\n aggcontext = get_aggcontext(\n window,\n scope=scope,\n operand=operand,\n parent=source,\n group_by=grouping_keys,\n order_by=ordering_keys,\n **kwargs,\n )\n result = execute(\n operand,\n scope=new_scope,\n timecontext=adjusted_timecontext,\n aggcontext=aggcontext,\n clients=clients,\n **kwargs,\n )\n result = post_process(\n result,\n data,\n ordering_keys,\n grouping_keys,\n adjusted_timecontext,\n )\n assert len(data) == len(\n result\n ), 'input data source and computed column do not have the same length'\n\n # trim data to original time context\n result = trim_window_result(result, timecontext)\n return result\n\n\n@execute_node.register(\n (ops.CumulativeSum, ops.CumulativeMax, ops.CumulativeMin),\n (pd.Series, SeriesGroupBy),\n)\ndef execute_series_cumulative_sum_min_max(op, data, **kwargs):\n typename = type(op).__name__\n method_name = (\n re.match(r\"^Cumulative([A-Za-z_][A-Za-z0-9_]*)$\", typename)\n .group(1)\n .lower()\n )\n method = getattr(data, f\"cum{method_name}\")\n return method()\n\n\n@execute_node.register(ops.CumulativeMean, (pd.Series, SeriesGroupBy))\ndef execute_series_cumulative_mean(op, data, **kwargs):\n # TODO: Doesn't handle the case where we've grouped/sorted by. Handling\n # this here would probably require a refactor.\n return data.expanding().mean()\n\n\n@execute_node.register(ops.CumulativeOp, (pd.Series, SeriesGroupBy))\ndef execute_series_cumulative_op(op, data, aggcontext=None, **kwargs):\n assert aggcontext is not None, \"aggcontext is none in {} operation\".format(\n type(op)\n )\n typename = type(op).__name__\n match = re.match(r'^Cumulative([A-Za-z_][A-Za-z0-9_]*)$', typename)\n if match is None:\n raise ValueError(f'Unknown operation {typename}')\n\n try:\n (operation_name,) = match.groups()\n except ValueError:\n raise ValueError(\n f'More than one operation name found in {typename} class'\n )\n\n dtype = op.to_expr().type().to_pandas()\n assert isinstance(aggcontext, agg_ctx.Cumulative), f'Got {type()}'\n result = aggcontext.agg(data, operation_name.lower())\n\n # all expanding window operations are required to be int64 or float64, so\n # we need to cast back to preserve the type of the operation\n try:\n return result.astype(dtype)\n except TypeError:\n return result\n\n\ndef post_lead_lag(result, default):\n if not pd.isnull(default):\n return result.fillna(default)\n return result\n\n\n@execute_node.register(\n (ops.Lead, ops.Lag),\n (pd.Series, SeriesGroupBy),\n integer_types + (type(None),),\n simple_types + (type(None),),\n)\ndef execute_series_lead_lag(op, data, offset, default, **kwargs):\n func = toolz.identity if isinstance(op, ops.Lag) else operator.neg\n result = data.shift(func(1 if offset is None else offset))\n return post_lead_lag(result, default)\n\n\n@execute_node.register(\n (ops.Lead, ops.Lag),\n (pd.Series, SeriesGroupBy),\n timedelta_types,\n date_types + timestamp_types + (str, type(None)),\n)\ndef execute_series_lead_lag_timedelta(\n op, data, offset, default, aggcontext=None, **kwargs\n):\n \"\"\"An implementation of shifting a column relative to another one that is\n in units of time rather than rows.\n \"\"\"\n # lagging adds time (delayed), leading subtracts time (moved up)\n func = operator.add if isinstance(op, ops.Lag) else operator.sub\n group_by = aggcontext.group_by\n order_by = aggcontext.order_by\n\n # get the parent object from which `data` originated\n parent = aggcontext.parent\n\n # get the DataFrame from the parent object, handling the DataFrameGroupBy\n # case\n parent_df = getattr(parent, 'obj', parent)\n\n # index our parent df by grouping and ordering keys\n indexed_original_df = parent_df.set_index(group_by + order_by)\n\n # perform the time shift\n adjusted_parent_df = parent_df.assign(\n **{k: func(parent_df[k], offset) for k in order_by}\n )\n\n # index the parent *after* adjustment\n adjusted_indexed_parent = adjusted_parent_df.set_index(group_by + order_by)\n\n # get the column we care about\n result = adjusted_indexed_parent[getattr(data, 'obj', data).name]\n\n # reindex the shifted data by the original frame's index\n result = result.reindex(indexed_original_df.index)\n\n # add a default if necessary\n return post_lead_lag(result, default)\n\n\n@execute_node.register(ops.FirstValue, pd.Series)\ndef execute_series_first_value(op, data, **kwargs):\n return data.values[0]\n\n\n@execute_node.register(ops.FirstValue, SeriesGroupBy)\ndef execute_series_group_by_first_value(op, data, aggcontext=None, **kwargs):\n return aggcontext.agg(data, 'first')\n\n\n@execute_node.register(ops.LastValue, pd.Series)\ndef execute_series_last_value(op, data, **kwargs):\n return data.values[-1]\n\n\n@execute_node.register(ops.LastValue, SeriesGroupBy)\ndef execute_series_group_by_last_value(op, data, aggcontext=None, **kwargs):\n return aggcontext.agg(data, 'last')\n\n\n@execute_node.register(ops.MinRank, (pd.Series, SeriesGroupBy))\ndef execute_series_min_rank(op, data, **kwargs):\n # TODO(phillipc): Handle ORDER BY\n return data.rank(method='min', ascending=True).astype('int64') - 1\n\n\n@execute_node.register(ops.DenseRank, (pd.Series, SeriesGroupBy))\ndef execute_series_dense_rank(op, data, **kwargs):\n # TODO(phillipc): Handle ORDER BY\n return data.rank(method='dense', ascending=True).astype('int64') - 1\n\n\n@execute_node.register(ops.PercentRank, (pd.Series, SeriesGroupBy))\ndef execute_series_percent_rank(op, data, **kwargs):\n # TODO(phillipc): Handle ORDER BY\n return data.rank(method='min', ascending=True, pct=True)\n" ]
[ [ "numpy.average", "numpy.random.rand", "pandas.date_range", "numpy.random.choice" ], [ "numpy.array", "numpy.array_equal" ], [ "pandas.Series", "pandas.isnull" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
aman-gupta-1995/Machine-Learning-Mindware
[ "8b3050720711730520683c89949e3dbdfb168961", "8b3050720711730520683c89949e3dbdfb168961", "8b3050720711730520683c89949e3dbdfb168961", "8b3050720711730520683c89949e3dbdfb168961" ]
[ "examples/cls_exp_user_defined_model.py", "test/exps/basics/evaluate_text2vector.py", "mindware/components/models/regression/extra_trees.py", "mindware/components/models/base_nn.py" ]
[ "import argparse\nimport os\nimport sys\nimport time\nimport numpy as np\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter, CategoricalHyperparameter, \\\n UnParametrizedHyperparameter, Constant\nfrom sklearn.datasets import load_iris\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nsys.path.append(os.getcwd())\nfrom mindware.utils.data_manager import DataManager\nfrom mindware.estimators import Classifier\nfrom mindware.components.models.base_model import BaseClassificationModel\nfrom mindware.components.models.classification import add_classifier\nfrom mindware.components.utils.configspace_utils import check_none\nfrom mindware.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--time_limit', type=int, default=1200)\nargs = parser.parse_args()\n\ntime_limit = args.time_limit\n\n\nclass UserDefinedDecisionTree(BaseClassificationModel):\n def __init__(self, criterion, max_features, max_depth_factor,\n min_samples_split, min_samples_leaf, min_weight_fraction_leaf,\n max_leaf_nodes, min_impurity_decrease, class_weight=None,\n random_state=None):\n self.criterion = criterion\n self.max_features = max_features\n self.max_depth_factor = max_depth_factor\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.max_leaf_nodes = max_leaf_nodes\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.min_impurity_decrease = min_impurity_decrease\n self.random_state = random_state\n self.class_weight = class_weight\n self.estimator = None\n self.time_limit = None\n\n def fit(self, X, y, sample_weight=None):\n from sklearn.tree import DecisionTreeClassifier\n\n self.max_features = float(self.max_features)\n # Heuristic to set the tree depth\n if check_none(self.max_depth_factor):\n max_depth_factor = self.max_depth_factor = None\n else:\n num_features = X.shape[1]\n self.max_depth_factor = int(self.max_depth_factor)\n max_depth_factor = max(\n 1,\n int(np.round(self.max_depth_factor * num_features, 0)))\n self.min_samples_split = int(self.min_samples_split)\n self.min_samples_leaf = int(self.min_samples_leaf)\n if check_none(self.max_leaf_nodes):\n self.max_leaf_nodes = None\n else:\n self.max_leaf_nodes = int(self.max_leaf_nodes)\n self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)\n self.min_impurity_decrease = float(self.min_impurity_decrease)\n\n self.estimator = DecisionTreeClassifier(\n criterion=self.criterion,\n max_depth=max_depth_factor,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n max_leaf_nodes=self.max_leaf_nodes,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n min_impurity_decrease=self.min_impurity_decrease,\n class_weight=self.class_weight,\n random_state=self.random_state)\n self.estimator.fit(X, y, sample_weight=sample_weight)\n return self\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)\n\n def predict_proba(self, X):\n if self.estimator is None:\n raise NotImplementedError()\n probas = self.estimator.predict_proba(X)\n return probas\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'DT',\n 'name': 'Decision Tree Classifier',\n 'handles_regression': False,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': True,\n 'is_deterministic': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n if optimizer == 'smac':\n cs = ConfigurationSpace()\n criterion = CategoricalHyperparameter(\n \"criterion\", [\"gini\", \"entropy\"], default_value=\"gini\")\n max_depth_factor = UniformFloatHyperparameter(\n 'max_depth_factor', 0., 2., default_value=0.5)\n min_samples_split = UniformIntegerHyperparameter(\n \"min_samples_split\", 2, 20, default_value=2)\n min_samples_leaf = UniformIntegerHyperparameter(\n \"min_samples_leaf\", 1, 20, default_value=1)\n min_weight_fraction_leaf = Constant(\"min_weight_fraction_leaf\", 0.0)\n max_features = UnParametrizedHyperparameter('max_features', 1.0)\n max_leaf_nodes = UnParametrizedHyperparameter(\"max_leaf_nodes\", \"None\")\n min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)\n\n cs.add_hyperparameters([criterion, max_features, max_depth_factor,\n min_samples_split, min_samples_leaf,\n min_weight_fraction_leaf, max_leaf_nodes,\n min_impurity_decrease])\n return cs\n\n\nprint('==> Start to evaluate with Budget %d' % time_limit)\n\niris = load_iris()\nX, y = iris.data, iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)\ndm = DataManager(X_train, y_train)\ntrain_data = dm.get_data_node(X_train, y_train)\ntest_data = dm.get_data_node(X_test, y_test)\n\nsave_dir = './data/eval_exps/soln-ml'\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\nadd_classifier(UserDefinedDecisionTree)\nclf = Classifier(time_limit=time_limit,\n output_dir=save_dir,\n include_algorithms=['UserDefinedDecisionTree'],\n random_state=1,\n metric='acc',\n n_jobs=1)\n_start_time = time.time()\n_iter_id = 0\n\nclf.fit(train_data)\npred = clf.predict(test_data)\n\nprint(balanced_accuracy_score(test_data.data[1], pred))\n", "import numpy as np\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nfrom mindware.components.feature_engineering.transformations.preprocessor.text2vector import \\\n Text2VectorTransformation\nfrom mindware.components.feature_engineering.transformation_graph import DataNode\nfrom mindware.components.utils.constants import *\nfrom mindware.estimators import Classifier\n\nx = np.array([[1, 'I am good', 'I am right', 3], [2, 'He is good', 'He is ok', 4],\n [2.5, 'Everyone is good', 'Everyone is ok', 7], [1.3333, 'well', 'what', 5]])\ny = np.array([0, 1, 0, 1])\n\nt2v = Text2VectorTransformation()\ndata = (x, y)\nfeature_type = [NUMERICAL, TEXT, TEXT, DISCRETE]\ndatanode = DataNode(data, feature_type)\n\nclf = Classifier(time_limit=20,\n enable_meta_algorithm_selection=False,\n include_algorithms=['random_forest'])\n\nclf.fit(datanode, opt_strategy='combined')\nprint(clf.predict(datanode))\n", "import time\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter, CategoricalHyperparameter, \\\n UnParametrizedHyperparameter\n\nfrom mindware.components.models.base_model import BaseRegressionModel, IterativeComponentWithSampleWeight\nfrom mindware.components.utils.configspace_utils import check_none, check_for_bool\nfrom mindware.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS\n\n\nclass ExtraTreesRegressor(IterativeComponentWithSampleWeight, BaseRegressionModel):\n\n def __init__(self, criterion, min_samples_leaf,\n min_samples_split, max_features, bootstrap, max_leaf_nodes,\n max_depth, min_weight_fraction_leaf, min_impurity_decrease,\n oob_score=False, n_jobs=1, random_state=None, verbose=0):\n self.n_estimators = self.get_max_iter()\n self.criterion = criterion\n\n if check_none(max_depth):\n self.max_depth = None\n else:\n self.max_depth = int(max_depth)\n if check_none(max_leaf_nodes):\n self.max_leaf_nodes = None\n else:\n self.max_leaf_nodes = int(max_leaf_nodes)\n\n self.min_samples_leaf = int(min_samples_leaf)\n self.min_samples_split = int(min_samples_split)\n self.max_features = float(max_features)\n self.bootstrap = check_for_bool(bootstrap)\n self.min_weight_fraction_leaf = float(min_weight_fraction_leaf)\n self.min_impurity_decrease = float(min_impurity_decrease)\n self.oob_score = oob_score\n self.n_jobs = int(n_jobs)\n self.random_state = random_state\n self.verbose = int(verbose)\n self.estimator = None\n\n @staticmethod\n def get_max_iter():\n return 512\n\n def get_current_iter(self):\n return self.estimator.n_estimators\n\n def iterative_fit(self, X, y, sample_weight=None, n_iter=1, refit=False):\n from sklearn.ensemble import ExtraTreesRegressor as ETR\n\n if refit:\n self.estimator = None\n\n if self.estimator is None:\n max_features = int(X.shape[1] ** float(self.max_features))\n self.estimator = ETR(n_estimators=n_iter,\n criterion=self.criterion,\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n bootstrap=self.bootstrap,\n max_features=max_features,\n max_leaf_nodes=self.max_leaf_nodes,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n min_impurity_decrease=self.min_impurity_decrease,\n oob_score=self.oob_score,\n n_jobs=self.n_jobs,\n verbose=self.verbose,\n random_state=self.random_state,\n warm_start=True)\n\n else:\n self.estimator.n_estimators += n_iter\n self.estimator.n_estimators = min(self.estimator.n_estimators,\n self.n_estimators)\n\n self.estimator.fit(X, y, sample_weight=sample_weight)\n return self\n\n def configuration_fully_fitted(self):\n if self.estimator is None:\n return False\n return not len(self.estimator.estimators_) < self.n_estimators\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'ET',\n 'name': 'Extra Trees Regressor',\n 'handles_regression': True,\n 'handles_classification': False,\n 'handles_multiclass': False,\n 'handles_multilabel': False,\n 'is_deterministic': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (PREDICTIONS,)}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n if optimizer == 'smac':\n cs = ConfigurationSpace()\n criterion = CategoricalHyperparameter(\n \"criterion\", [\"mse\", \"mae\"], default_value=\"mse\")\n\n # The maximum number of features used in the forest is calculated as m^max_features, where\n # m is the total number of features, and max_features is the hyperparameter specified below.\n # The default is 0.5, which yields sqrt(m) features as max_features in the estimator. This\n # corresponds with Geurts' heuristic.\n max_features = UniformFloatHyperparameter(\n \"max_features\", 0., 1., default_value=0.5)\n\n max_depth = UnParametrizedHyperparameter(name=\"max_depth\", value=\"None\")\n\n min_samples_split = UniformIntegerHyperparameter(\n \"min_samples_split\", 2, 20, default_value=2)\n min_samples_leaf = UniformIntegerHyperparameter(\n \"min_samples_leaf\", 1, 20, default_value=1)\n min_weight_fraction_leaf = UnParametrizedHyperparameter('min_weight_fraction_leaf', 0.)\n max_leaf_nodes = UnParametrizedHyperparameter(\"max_leaf_nodes\", \"None\")\n min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)\n\n bootstrap = CategoricalHyperparameter(\n \"bootstrap\", [\"True\", \"False\"], default_value=\"False\")\n cs.add_hyperparameters([criterion, max_features,\n max_depth, min_samples_split, min_samples_leaf,\n min_weight_fraction_leaf, max_leaf_nodes,\n min_impurity_decrease, bootstrap])\n\n return cs\n elif optimizer == 'tpe':\n from hyperopt import hp\n space = {'criterion': hp.choice('et_criterion', [\"mse\", \"mae\"]),\n 'max_features': hp.uniform('et_max_features', 0, 1),\n 'min_samples_split': hp.randint('et_min_samples_split', 19) + 2,\n 'min_samples_leaf': hp.randint('et_min_samples_leaf,', 20) + 1,\n 'bootstrap': hp.choice('et_bootstrap', [\"True\", \"False\"])}\n\n init_trial = {'criterion': \"mse\", 'max_features': 0.5,\n 'min_samples_split': 2, 'min_samples_leaf': 1, 'bootstrap': \"False\"}\n return space\n", "from __future__ import print_function, division, absolute_import\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.optim import Adam, SGD\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.conditions import EqualsCondition\nfrom ConfigSpace.hyperparameters import UniformFloatHyperparameter, \\\n UniformIntegerHyperparameter, CategoricalHyperparameter, UnParametrizedHyperparameter\n\nfrom mindware.datasets.base_dl_dataset import DLDataset\nfrom mindware.components.utils.dl_util import EarlyStop\nfrom mindware.components.utils.configspace_utils import check_for_bool\n\nNUM_WORKERS = 10\n\n\nclass BaseNeuralNetwork:\n def __init__(self):\n self.early_stop_flag = False\n\n @staticmethod\n def get_properties():\n \"\"\"\n Get the properties of the underlying algorithm.\n :return: algorithm_properties : dict, optional (default=None)\n \"\"\"\n raise NotImplementedError()\n\n def fit(self, dataset):\n \"\"\"\n The fit function calls the fit function of the underlying model and returns `self`.\n :param dataset: torch.utils.data.Dataset\n :return: self, an instance of self.\n \"\"\"\n raise NotImplementedError()\n\n def set_hyperparameters(self, params, init_params=None):\n \"\"\"\n The function set the class members according to params\n :param params: dictionary, parameters\n :param init_params: dictionary\n :return:\n \"\"\"\n for param, value in params.items():\n if not hasattr(self, param):\n raise ValueError('Cannot set hyperparameter %s for %s because '\n 'the hyperparameter does not exist.' % (param, str(self)))\n setattr(self, param, value)\n\n if init_params is not None:\n for param, value in init_params.items():\n if not hasattr(self, param):\n raise ValueError('Cannot set init param %s for %s because '\n 'the init param does not exist.' %\n (param, str(self)))\n setattr(self, param, value)\n return self\n\n def set_empty_model(self, dataset):\n raise NotImplementedError\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n cs = ConfigurationSpace()\n optimizer = CategoricalHyperparameter('optimizer', ['SGD'], default_value='SGD')\n sgd_learning_rate = CategoricalHyperparameter(\n \"sgd_learning_rate\", [1e-3, 3e-3, 7e-3, 1e-2, 3e-2, 7e-2, 1e-1],\n default_value=1e-1)\n sgd_momentum = UniformFloatHyperparameter(\n \"sgd_momentum\", lower=0.5, upper=0.99, default_value=0.9, log=False)\n nesterov = CategoricalHyperparameter('nesterov', ['True', 'False'], default_value='True')\n\n batch_size = CategoricalHyperparameter(\n \"batch_size\", [32, 64, 128], default_value=32)\n lr_decay = CategoricalHyperparameter(\"lr_decay\", [1e-2, 5e-2, 1e-1, 2e-1], default_value=1e-1)\n weight_decay = CategoricalHyperparameter(\"weight_decay\", [1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3],\n default_value=1e-4)\n epoch_num = UnParametrizedHyperparameter(\"epoch_num\", 150)\n cs.add_hyperparameters(\n [optimizer, sgd_learning_rate, sgd_momentum, batch_size, epoch_num,\n lr_decay, weight_decay, nesterov])\n # optimizer = CategoricalHyperparameter('optimizer', ['SGD', 'Adam'], default_value='SGD')\n # adam_learning_rate = UniformFloatHyperparameter(\n # \"adam_learning_rate\", lower=1e-4, upper=1e-2, default_value=2e-3, log=True)\n # beta1 = UniformFloatHyperparameter(\n # \"beta1\", lower=0.5, upper=0.999, default_value=0.9, log=False)\n # batch_size = CategoricalHyperparameter(\n # \"batch_size\", [16, 32, 64, 128], default_value=32)\n # sgd_lr_depends_on_sgd = EqualsCondition(sgd_learning_rate, optimizer, \"SGD\")\n # adam_lr_depends_on_adam = EqualsCondition(adam_learning_rate, optimizer, \"Adam\")\n # beta_depends_on_adam = EqualsCondition(beta1, optimizer, \"Adam\")\n # sgd_momentum_depends_on_sgd = EqualsCondition(sgd_momentum, optimizer, \"SGD\")\n # nesterov_depends_on_sgd = EqualsCondition(nesterov, optimizer, 'SGD')\n # cs.add_conditions(\n # [sgd_lr_depends_on_sgd, sgd_momentum_depends_on_sgd,\n # nesterov_depends_on_sgd])\n return cs\n\n\nclass BaseImgClassificationNeuralNetwork(BaseNeuralNetwork):\n def __init__(self, optimizer, batch_size, epoch_num, lr_decay, weight_decay,\n sgd_learning_rate=None, sgd_momentum=None, nesterov=None,\n adam_learning_rate=None, beta1=None, random_state=None,\n grayscale=False, device='cpu', **kwargs):\n super(BaseImgClassificationNeuralNetwork, self).__init__()\n self.optimizer = optimizer\n self.batch_size = batch_size\n self.max_epoch = epoch_num\n self.epoch_num = epoch_num\n self.lr_decay = lr_decay\n self.weight_decay = weight_decay\n self.sgd_learning_rate = sgd_learning_rate\n self.sgd_momentum = sgd_momentum\n self.nesterov = check_for_bool(nesterov)\n self.adam_learning_rate = adam_learning_rate\n self.beta1 = beta1\n self.random_state = random_state\n self.grayscale = grayscale\n self.model = None\n self.device = torch.device(device)\n self.time_limit = None\n self.load_path = None\n\n self.optimizer_ = None\n self.scheduler = None\n self.early_stop = None\n self.cur_epoch_num = 0\n\n def fit(self, dataset: DLDataset, mode='fit', **kwargs):\n from sklearn.metrics import accuracy_score\n\n assert self.model is not None\n\n params = self.model.parameters()\n val_loader = None\n if 'refit' in mode:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=NUM_WORKERS)\n if mode == 'refit_test':\n val_loader = DataLoader(dataset=dataset.test_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=NUM_WORKERS)\n else:\n if not dataset.subset_sampler_used:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=NUM_WORKERS)\n val_loader = DataLoader(dataset=dataset.val_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=NUM_WORKERS)\n else:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size,\n sampler=dataset.train_sampler, num_workers=NUM_WORKERS)\n val_loader = DataLoader(dataset=dataset.train_for_val_dataset, batch_size=self.batch_size,\n sampler=dataset.val_sampler, num_workers=NUM_WORKERS)\n\n if self.optimizer == 'SGD':\n optimizer = SGD(params=params, lr=self.sgd_learning_rate, momentum=self.sgd_momentum,\n weight_decay=self.weight_decay, nesterov=self.nesterov)\n elif self.optimizer == 'Adam':\n optimizer = Adam(params=params, lr=self.adam_learning_rate, betas=(self.beta1, 0.999),\n weight_decay=self.weight_decay)\n else:\n return ValueError(\"Optimizer %s not supported!\" % self.optimizer)\n\n scheduler = MultiStepLR(optimizer, milestones=[int(self.max_epoch * 0.5), int(self.max_epoch * 0.75)],\n gamma=self.lr_decay)\n loss_func = nn.CrossEntropyLoss()\n early_stop = EarlyStop(patience=100, mode='min')\n\n if self.load_path:\n checkpoint = torch.load(self.load_path)\n self.model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n self.cur_epoch_num = checkpoint['epoch_num']\n early_stop = checkpoint['early_stop']\n if early_stop.if_early_stop:\n print(\"Early stop!\")\n self.optimizer_ = optimizer\n self.epoch_num = int(self.epoch_num) + int(self.cur_epoch_num)\n self.scheduler = scheduler\n self.early_stop = early_stop\n return self\n\n profile_iter = kwargs.get('profile_iter', None)\n profile_epoch = kwargs.get('profile_epoch', None)\n assert not (profile_iter and profile_epoch)\n\n if profile_epoch or profile_iter: # Profile mode\n self.model.train()\n if profile_epoch:\n for epoch in range(int(profile_epoch)):\n for i, data in enumerate(train_loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device))\n optimizer.zero_grad()\n loss = loss_func(logits, batch_y.to(self.device))\n loss.backward()\n optimizer.step()\n else:\n num_iter = 0\n stop_flag = False\n for epoch in range(int(self.epoch_num)):\n if stop_flag:\n break\n for i, data in enumerate(train_loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device))\n optimizer.zero_grad()\n loss = loss_func(logits, batch_y.to(self.device))\n loss.backward()\n optimizer.step()\n num_iter += 1\n if num_iter > profile_iter:\n stop_flag = True\n break\n return self\n\n for epoch in range(int(self.cur_epoch_num), int(self.cur_epoch_num) + int(self.epoch_num)):\n self.model.train()\n # print('Current learning rate: %.5f' % optimizer.state_dict()['param_groups'][0]['lr'])\n epoch_avg_loss = 0\n epoch_avg_acc = 0\n val_avg_loss = 0\n val_avg_acc = 0\n num_train_samples = 0\n num_val_samples = 0\n for i, data in enumerate(train_loader):\n batch_x, batch_y = data[0], data[1]\n num_train_samples += len(batch_x)\n logits = self.model(batch_x.float().to(self.device))\n loss = loss_func(logits, batch_y.to(self.device))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_avg_loss += loss.to('cpu').detach() * len(batch_x)\n prediction = np.argmax(logits.to('cpu').detach().numpy(), axis=-1)\n epoch_avg_acc += accuracy_score(prediction, batch_y.to('cpu').detach().numpy()) * len(batch_x)\n\n epoch_avg_loss /= num_train_samples\n epoch_avg_acc /= num_train_samples\n # TODO: logger\n print('Epoch %d: Train loss %.4f, train acc %.4f' % (epoch, epoch_avg_loss, epoch_avg_acc))\n\n if val_loader is not None:\n self.model.eval()\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device))\n val_loss = loss_func(logits, batch_y.to(self.device))\n num_val_samples += len(batch_x)\n val_avg_loss += val_loss.to('cpu').detach() * len(batch_x)\n\n prediction = np.argmax(logits.to('cpu').detach().numpy(), axis=-1)\n val_avg_acc += accuracy_score(prediction, batch_y.to('cpu').detach().numpy()) * len(batch_x)\n\n val_avg_loss /= num_val_samples\n val_avg_acc /= num_val_samples\n print('Epoch %d: Val loss %.4f, val acc %.4f' % (epoch, val_avg_loss, val_avg_acc))\n\n # Early stop\n if 'refit' not in mode:\n early_stop.update(val_avg_loss)\n if early_stop.if_early_stop:\n self.early_stop_flag = True\n print(\"Early stop!\")\n break\n\n scheduler.step()\n\n self.optimizer_ = optimizer\n self.epoch_num = int(self.epoch_num) + int(self.cur_epoch_num)\n self.scheduler = scheduler\n self.early_stop = early_stop\n\n return self\n\n def predict_proba(self, dataset: Dataset, sampler=None, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=NUM_WORKERS)\n self.model.to(self.device)\n self.model.eval()\n\n prediction = None\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device))\n pred = nn.functional.softmax(logits, dim=-1)\n if prediction is None:\n prediction = pred.to('cpu').detach().numpy()\n else:\n prediction = np.concatenate((prediction, pred.to('cpu').detach().numpy()), 0)\n\n return prediction\n\n def predict(self, dataset: Dataset, sampler=None, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=NUM_WORKERS)\n self.model.to(self.device)\n self.model.eval()\n\n prediction = None\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device))\n if prediction is None:\n prediction = logits.to('cpu').detach().numpy()\n else:\n prediction = np.concatenate((prediction, logits.to('cpu').detach().numpy()), 0)\n return np.argmax(prediction, axis=-1)\n\n def score(self, dataset, metric, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n if isinstance(dataset, Dataset):\n loader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=NUM_WORKERS)\n else:\n if not dataset.subset_sampler_used:\n loader = DataLoader(dataset=dataset.val_dataset, batch_size=batch_size, num_workers=NUM_WORKERS)\n else:\n loader = DataLoader(dataset=dataset.train_for_val_dataset, batch_size=batch_size,\n sampler=dataset.val_sampler, num_workers=NUM_WORKERS)\n\n self.model.to(self.device)\n self.model.eval()\n total_len = 0\n score = 0\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device)).to('cpu')\n prediction = np.argmax(logits.detach().numpy(), axis=-1)\n score += metric(prediction, batch_y.detach().numpy()) * len(prediction)\n total_len += len(prediction)\n score /= total_len\n return score\n\n\nclass BaseTextClassificationNeuralNetwork(BaseNeuralNetwork):\n def __init__(self, optimizer, batch_size, epoch_num, lr_decay, step_decay, weight_decay,\n sgd_learning_rate=None, sgd_momentum=None, nesterov=None, adam_learning_rate=None,\n beta1=None, random_state=None, device='cpu',\n config='./mindware/components/models/text_classification/nn_utils/bert-base-uncased'):\n super(BaseTextClassificationNeuralNetwork, self).__init__()\n self.optimizer = optimizer\n self.batch_size = batch_size\n self.max_epoch = epoch_num\n self.epoch_num = epoch_num\n self.lr_decay = lr_decay\n self.step_decay = step_decay\n self.sgd_learning_rate = sgd_learning_rate\n self.sgd_momentum = sgd_momentum\n self.adam_learning_rate = adam_learning_rate\n self.beta1 = beta1\n self.random_state = random_state\n self.model = None\n self.device = torch.device(device)\n self.time_limit = None\n self.config = config\n self.load_path = None\n\n self.optimizer_ = None\n self.scheduler = None\n self.early_stop = None\n self.cur_epoch_num = 0\n\n def fit(self, dataset, mode='fit', **kwargs):\n from sklearn.metrics import accuracy_score\n\n assert self.model is not None\n\n params = self.model.parameters()\n val_loader = None\n if 'refit' in mode:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=NUM_WORKERS)\n if mode == 'refit_test':\n val_loader = DataLoader(dataset=dataset.test_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=NUM_WORKERS)\n else:\n if not dataset.subset_sampler_used:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=NUM_WORKERS)\n val_loader = DataLoader(dataset=dataset.val_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=NUM_WORKERS)\n else:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size,\n sampler=dataset.train_sampler, num_workers=NUM_WORKERS)\n val_loader = DataLoader(dataset=dataset.train_for_val_dataset, batch_size=self.batch_size,\n sampler=dataset.val_sampler, num_workers=NUM_WORKERS)\n\n if self.optimizer == 'SGD':\n optimizer = SGD(params=params, lr=self.sgd_learning_rate, momentum=self.sgd_momentum)\n elif self.optimizer == 'Adam':\n optimizer = Adam(params=params, lr=self.adam_learning_rate, betas=(self.beta1, 0.999))\n else:\n return ValueError(\"Optimizer %s not supported!\" % self.optimizer)\n\n scheduler = MultiStepLR(optimizer, milestones=[int(self.max_epoch * 0.5), int(self.max_epoch * 0.75)],\n gamma=self.lr_decay)\n loss_func = nn.CrossEntropyLoss()\n early_stop = EarlyStop(patience=5, mode='min')\n\n if self.load_path:\n checkpoint = torch.load(self.load_path)\n self.model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n self.cur_epoch_num = checkpoint['epoch_num']\n early_stop = checkpoint['early_stop']\n if early_stop.if_early_stop:\n print(\"Early stop!\")\n self.optimizer_ = optimizer\n self.epoch_num = int(self.epoch_num) + int(self.cur_epoch_num)\n self.scheduler = scheduler\n self.early_stop = early_stop\n return self\n\n profile_iter = kwargs.get('profile_iter', None)\n profile_epoch = kwargs.get('profile_epoch', None)\n assert not (profile_iter and profile_epoch)\n\n if profile_epoch or profile_iter: # Profile mode\n self.model.train()\n if profile_epoch:\n for epoch in range(int(profile_epoch)):\n for i, data in enumerate(train_loader):\n batch_x, batch_y = data[0], data[1]\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device))\n optimizer.zero_grad()\n loss = loss_func(logits, batch_y.to(self.device))\n loss.backward()\n optimizer.step()\n else:\n num_iter = 0\n stop_flag = False\n for epoch in range(int(self.epoch_num)):\n if stop_flag:\n break\n for i, data in enumerate(train_loader):\n batch_x, batch_y = data[0], data[1]\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device))\n optimizer.zero_grad()\n loss = loss_func(logits, batch_y.to(self.device))\n loss.backward()\n optimizer.step()\n num_iter += 1\n if num_iter > profile_iter:\n stop_flag = True\n break\n return self\n\n for epoch in range(int(self.cur_epoch_num), int(self.cur_epoch_num) + int(self.epoch_num)):\n self.model.train()\n # print('Current learning rate: %.5f' % optimizer.state_dict()['param_groups'][0]['lr'])\n epoch_avg_loss = 0\n epoch_avg_acc = 0\n val_avg_loss = 0\n val_avg_acc = 0\n num_train_samples = 0\n num_val_samples = 0\n for i, data in enumerate(train_loader):\n batch_x, batch_y = data[0], data[1]\n num_train_samples += len(batch_x)\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device))\n loss = loss_func(logits, batch_y.to(self.device))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n epoch_avg_loss += loss.to('cpu').detach() * len(batch_x)\n prediction = np.argmax(logits.to('cpu').detach().numpy(), axis=-1)\n epoch_avg_acc += accuracy_score(prediction, batch_y.to('cpu').detach().numpy()) * len(batch_x)\n\n epoch_avg_loss /= num_train_samples\n epoch_avg_acc /= num_train_samples\n # TODO: logger\n print('Epoch %d: Train loss %.4f, train acc %.4f' % (epoch, epoch_avg_loss, epoch_avg_acc))\n\n if val_loader is not None:\n self.model.eval()\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n batch_x, batch_y = data[0], data[1]\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device))\n val_loss = loss_func(logits, batch_y.to(self.device))\n num_val_samples += len(batch_x)\n val_avg_loss += val_loss.to('cpu').detach() * len(batch_x)\n\n prediction = np.argmax(logits.to('cpu').detach().numpy(), axis=-1)\n val_avg_acc += accuracy_score(prediction, batch_y.to('cpu').detach().numpy()) * len(batch_x)\n\n val_avg_loss /= num_val_samples\n val_avg_acc /= num_val_samples\n print('Epoch %d: Val loss %.4f, val acc %.4f' % (epoch, val_avg_loss, val_avg_acc))\n\n # Early stop\n if 'refit' not in mode:\n early_stop.update(val_avg_loss)\n if early_stop.if_early_stop:\n self.early_stop_flag = True\n print(\"Early stop!\")\n break\n\n scheduler.step()\n\n self.optimizer_ = optimizer\n self.epoch_num = int(self.epoch_num) + int(self.cur_epoch_num)\n self.scheduler = scheduler\n\n return self\n\n def predict_proba(self, dataset: Dataset, sampler=None, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler, num_workers=NUM_WORKERS)\n self.model.to(self.device)\n self.model.eval()\n\n prediction = None\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device))\n pred = nn.functional.softmax(logits, dim=-1)\n if prediction is None:\n prediction = pred.to('cpu').detach().numpy()\n else:\n prediction = np.concatenate((prediction, pred.to('cpu').detach().numpy()), 0)\n return prediction\n\n def predict(self, dataset: Dataset, sampler=None, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n assert sampler is None\n loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=None, num_workers=NUM_WORKERS)\n self.model.to(self.device)\n self.model.eval()\n\n prediction = None\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device))\n if prediction is None:\n prediction = logits.to('cpu').detach().numpy()\n else:\n prediction = np.concatenate((prediction, logits.to('cpu').detach().numpy()), 0)\n return np.argmax(prediction, axis=-1)\n\n def score(self, dataset, metric, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n if isinstance(dataset, Dataset):\n loader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=NUM_WORKERS)\n else:\n if not dataset.subset_sampler_used:\n loader = DataLoader(dataset=dataset.val_dataset, batch_size=batch_size, num_workers=NUM_WORKERS)\n else:\n loader = DataLoader(dataset=dataset.train_for_val_dataset, batch_size=batch_size,\n sampler=dataset.val_sampler, num_workers=NUM_WORKERS)\n self.model.eval()\n total_len = 0\n score = 0\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n masks = torch.Tensor(np.array([[float(i != 0) for i in sample] for sample in batch_x]))\n logits = self.model(batch_x.long().to(self.device), masks.to(self.device)).to('cpu')\n prediction = np.argmax(logits.detach().numpy(), axis=-1)\n score += metric(prediction, batch_y.detach().numpy()) * len(prediction)\n total_len += len(prediction)\n score /= total_len\n return score\n\n\nclass BaseODClassificationNeuralNetwork(BaseNeuralNetwork):\n def __init__(self, optimizer, batch_size, epoch_num, lr_decay, weight_decay,\n sgd_learning_rate=None, sgd_momentum=None, nesterov=None,\n adam_learning_rate=None, beta1=None, random_state=None,\n grayscale=False, device='cpu', **kwargs):\n super(BaseODClassificationNeuralNetwork, self).__init__()\n self.optimizer = optimizer\n self.batch_size = batch_size\n self.max_epoch = epoch_num\n self.epoch_num = epoch_num\n self.lr_decay = lr_decay\n self.weight_decay = weight_decay\n self.sgd_learning_rate = sgd_learning_rate\n self.sgd_momentum = sgd_momentum\n self.nesterov = check_for_bool(nesterov)\n self.adam_learning_rate = adam_learning_rate\n self.beta1 = beta1\n self.random_state = random_state\n self.grayscale = grayscale\n self.model = None\n self.device = torch.device(device)\n self.time_limit = None\n self.load_path = None\n\n self.optimizer_ = None\n self.scheduler = None\n self.early_stop = None\n self.cur_epoch_num = 0\n\n def fit(self, dataset: DLDataset, mode='fit', **kwargs):\n assert self.model is not None\n\n if self.load_path:\n self.model.load_state_dict(torch.load(self.load_path))\n\n params = self.model.parameters()\n\n val_loader = None\n if 'refit' in mode:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=NUM_WORKERS, collate_fn=dataset.train_dataset.collate_fn)\n if mode == 'refit_test':\n val_loader = DataLoader(dataset=dataset.test_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=NUM_WORKERS, collate_fn=dataset.test_dataset.collate_fn)\n else:\n train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size, shuffle=True,\n num_workers=NUM_WORKERS, collate_fn=dataset.train_dataset.collate_fn)\n val_loader = DataLoader(dataset=dataset.val_dataset, batch_size=self.batch_size, shuffle=False,\n num_workers=NUM_WORKERS, collate_fn=dataset.val_dataset.collate_fn)\n # else:\n # train_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size,\n # sampler=dataset.train_sampler, num_workers=4,\n # collate_fn=dataset.train_dataset.collate_fn)\n # val_loader = DataLoader(dataset=dataset.train_dataset, batch_size=self.batch_size,\n # sampler=dataset.val_sampler, num_workers=4,\n # collate_fn=dataset.train_dataset.collate_fn)\n\n if self.optimizer == 'SGD':\n optimizer = SGD(params=params, lr=self.sgd_learning_rate, momentum=self.sgd_momentum)\n elif self.optimizer == 'Adam':\n optimizer = Adam(params=params, lr=self.adam_learning_rate, betas=(self.beta1, 0.999))\n else:\n return ValueError(\"Optimizer %s not supported!\" % self.optimizer)\n\n scheduler = MultiStepLR(optimizer, milestones=[int(self.max_epoch * 0.5), int(self.max_epoch * 0.75)],\n gamma=self.lr_decay)\n early_stop = EarlyStop(patience=5, mode='min')\n\n if self.load_path:\n checkpoint = torch.load(self.load_path)\n self.model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n self.cur_epoch_num = checkpoint['epoch_num']\n early_stop = checkpoint['early_stop']\n if early_stop.if_early_stop:\n print(\"Early stop!\")\n self.optimizer_ = optimizer\n self.epoch_num = int(self.epoch_num) + int(self.cur_epoch_num)\n self.scheduler = scheduler\n self.early_stop = early_stop\n return self\n\n profile_iter = kwargs.get('profile_iter', None)\n profile_epoch = kwargs.get('profile_epoch', None)\n assert not (profile_iter and profile_epoch)\n\n if profile_epoch or profile_iter: # Profile mode\n self.model.train()\n if profile_epoch:\n for epoch in range(int(profile_epoch)):\n for i, (_, batch_x, batch_y) in enumerate(train_loader):\n loss, outputs = self.model(batch_x.float().to(self.device), batch_y.float().to(self.device))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n else:\n num_iter = 0\n stop_flag = False\n for epoch in range(int(self.epoch_num)):\n if stop_flag:\n break\n for i, (_, batch_x, batch_y) in enumerate(train_loader):\n loss, outputs = self.model(batch_x.float().to(self.device), batch_y.float().to(self.device))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n num_iter += 1\n if num_iter > profile_iter:\n stop_flag = True\n break\n return self\n\n for epoch in range(int(self.cur_epoch_num), int(self.cur_epoch_num) + int(self.epoch_num)):\n self.model.train()\n # print('Current learning rate: %.5f' % optimizer.state_dict()['param_groups'][0]['lr'])\n epoch_avg_loss = 0\n val_avg_loss = 0\n num_train_samples = 0\n num_val_samples = 0\n for i, (_, batch_x, batch_y) in enumerate(train_loader):\n loss, outputs = self.model(batch_x.float().to(self.device), batch_y.float().to(self.device))\n optimizer.zero_grad()\n epoch_avg_loss += loss.to('cpu').detach() * len(batch_x)\n num_train_samples += len(batch_x)\n loss.backward()\n optimizer.step()\n epoch_avg_loss /= num_train_samples\n print('Epoch %d: Train loss %.4f' % (epoch, epoch_avg_loss))\n scheduler.step()\n\n if val_loader is not None:\n self.model.eval()\n with torch.no_grad():\n for i, (_, batch_x, batch_y) in enumerate(val_loader):\n loss, outputs = self.model(batch_x.float().to(self.device), batch_y.float().to(self.device))\n val_avg_loss += loss.to('cpu').detach() * len(batch_x)\n num_val_samples += len(batch_x)\n\n val_avg_loss /= num_val_samples\n print('Epoch %d: Val loss %.4f' % (epoch, val_avg_loss))\n\n # Early stop\n if 'refit' not in mode:\n early_stop.update(val_avg_loss)\n if early_stop.if_early_stop:\n self.early_stop_flag = True\n print(\"Early stop!\")\n break\n\n self.optimizer_ = optimizer\n self.epoch_num = int(self.epoch_num) + int(self.cur_epoch_num)\n self.scheduler = scheduler\n\n return self\n\n def predict(self, dataset: Dataset, sampler=None, batch_size=None):\n if not self.model:\n raise ValueError(\"Model not fitted!\")\n batch_size = self.batch_size if batch_size is None else batch_size\n loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler,\n num_workers=NUM_WORKERS, collate_fn=dataset.collate_fn)\n self.model.to(self.device)\n self.model.eval()\n\n prediction = None\n with torch.no_grad():\n for i, data in enumerate(loader):\n batch_x, batch_y = data[0], data[1]\n logits = self.model(batch_x.float().to(self.device))\n if prediction is None:\n prediction = logits.to('cpu').detach().numpy()\n else:\n prediction = np.concatenate((prediction, logits.to('cpu').detach().numpy()), 0)\n return np.argmax(prediction, axis=-1)\n\n # TODO: UDF metric\n def score(self, dataset, metric, batch_size=None):\n raise NotImplementedError\n" ]
[ [ "sklearn.metrics.balanced_accuracy_score", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "numpy.round", "sklearn.tree.DecisionTreeClassifier" ], [ "numpy.array" ], [ "sklearn.ensemble.ExtraTreesRegressor" ], [ "torch.optim.Adam", "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.load", "torch.utils.data.DataLoader", "numpy.argmax", "torch.no_grad", "torch.optim.SGD", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maybeLee/keras
[ "793620ae1bdda7e37edd485b034e8962fff57f3e", "793620ae1bdda7e37edd485b034e8962fff57f3e", "793620ae1bdda7e37edd485b034e8962fff57f3e" ]
[ "keras/preprocessing/image.py", "keras/optimizers/optimizer_v2/rmsprop_test.py", "keras/optimizers/optimizer_v2/ftrl.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n# pylint: disable=g-import-not-at-top\n# pylint: disable=g-classes-have-attributes\n# pylint: disable=g-direct-tensorflow-import\n\"\"\"Utilies for image preprocessing and augmentation.\n\nWarning: `tf.keras.preprocessing.image` APIs do not operate on tensors and are\nnot recommended for new code. Prefer loading data with\n`tf.keras.utils.image_dataset_from_directory`, and then transforming the output\n`tf.data.Dataset` with preprocessing layers. For more information, see the\ntutorials for [loading images](\nhttps://www.tensorflow.org/tutorials/load_data/images) and [augmenting images](\nhttps://www.tensorflow.org/tutorials/images/data_augmentation), as well as the\n[preprocessing layer guide](\nhttps://www.tensorflow.org/guide/keras/preprocessing_layers).\n\"\"\"\n\nimport collections\nimport io\nimport multiprocessing\nimport os\nimport pathlib\nimport threading\nimport warnings\n\nfrom keras import backend\nfrom keras.utils import data_utils\nimport numpy as np\nfrom tensorflow.python.util.tf_export import keras_export\n\ntry:\n import scipy\n from scipy import linalg # pylint: disable=unused-import\n from scipy import ndimage # pylint: disable=unused-import\nexcept ImportError:\n pass\ntry:\n from PIL import Image as pil_image\n from PIL import ImageEnhance\nexcept ImportError:\n pil_image = None\n ImageEnhance = None\n\n\nif pil_image is not None:\n _PIL_INTERPOLATION_METHODS = {\n 'nearest': pil_image.NEAREST,\n 'bilinear': pil_image.BILINEAR,\n 'bicubic': pil_image.BICUBIC,\n 'hamming': pil_image.HAMMING,\n 'box': pil_image.BOX,\n 'lanczos': pil_image.LANCZOS,\n }\n\n\n@keras_export('keras.utils.array_to_img',\n 'keras.preprocessing.image.array_to_img')\ndef array_to_img(x, data_format=None, scale=True, dtype=None):\n \"\"\"Converts a 3D Numpy array to a PIL Image instance.\n\n Usage:\n\n ```python\n from PIL import Image\n img = np.random.random(size=(100, 100, 3))\n pil_img = tf.keras.preprocessing.image.array_to_img(img)\n ```\n\n\n Args:\n x: Input data, in any form that can be converted to a Numpy array.\n data_format: Image data format, can be either \"channels_first\" or\n \"channels_last\". Defaults to `None`, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Defaults to `True`.\n dtype: Dtype to use. Default to `None`, in which case the global setting\n `tf.keras.backend.floatx()` is used (unless you changed it, it defaults\n to \"float32\")\n\n Returns:\n A PIL Image instance.\n\n Raises:\n ImportError: if PIL is not available.\n ValueError: if invalid `x` or `data_format` is passed.\n \"\"\"\n\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `array_to_img` requires PIL.')\n x = np.asarray(x, dtype=dtype)\n if x.ndim != 3:\n raise ValueError('Expected image array to have rank 3 (single image). '\n f'Got array with shape: {x.shape}')\n\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError(f'Invalid data_format: {data_format}')\n\n # Original Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but target PIL image has format (width, height, channel)\n if data_format == 'channels_first':\n x = x.transpose(1, 2, 0)\n if scale:\n x = x - np.min(x)\n x_max = np.max(x)\n if x_max != 0:\n x /= x_max\n x *= 255\n if x.shape[2] == 4:\n # RGBA\n return pil_image.fromarray(x.astype('uint8'), 'RGBA')\n elif x.shape[2] == 3:\n # RGB\n return pil_image.fromarray(x.astype('uint8'), 'RGB')\n elif x.shape[2] == 1:\n # grayscale\n if np.max(x) > 255:\n # 32-bit signed integer grayscale image. PIL mode \"I\"\n return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')\n return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')\n else:\n raise ValueError(f'Unsupported channel number: {x.shape[2]}')\n\n\n@keras_export('keras.utils.img_to_array',\n 'keras.preprocessing.image.img_to_array')\ndef img_to_array(img, data_format=None, dtype=None):\n \"\"\"Converts a PIL Image instance to a Numpy array.\n\n Usage:\n\n ```python\n from PIL import Image\n img_data = np.random.random(size=(100, 100, 3))\n img = tf.keras.preprocessing.image.array_to_img(img_data)\n array = tf.keras.preprocessing.image.img_to_array(img)\n ```\n\n\n Args:\n img: Input PIL Image instance.\n data_format: Image data format, can be either \"channels_first\" or\n \"channels_last\". Defaults to `None`, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").\n dtype: Dtype to use. Default to `None`, in which case the global setting\n `tf.keras.backend.floatx()` is used (unless you changed it, it defaults\n to \"float32\")\n\n Returns:\n A 3D Numpy array.\n\n Raises:\n ValueError: if invalid `img` or `data_format` is passed.\n \"\"\"\n\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError(f'Unknown data_format: {data_format}')\n # Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but original PIL image has format (width, height, channel)\n x = np.asarray(img, dtype=dtype)\n if len(x.shape) == 3:\n if data_format == 'channels_first':\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if data_format == 'channels_first':\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError(f'Unsupported image shape: {x.shape}')\n return x\n\n\n@keras_export('keras.utils.save_img', 'keras.preprocessing.image.save_img')\ndef save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):\n \"\"\"Saves an image stored as a Numpy array to a path or file object.\n\n Args:\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format, either \"channels_first\" or\n \"channels_last\".\n file_format: Optional file format override. If omitted, the format to use\n is determined from the filename extension. If a file object was used\n instead of a filename, this parameter should always be used.\n scale: Whether to rescale image values to be within `[0, 255]`.\n **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.\n \"\"\"\n if data_format is None:\n data_format = backend.image_data_format()\n img = array_to_img(x, data_format=data_format, scale=scale)\n if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):\n warnings.warn('The JPG format does not support '\n 'RGBA images, converting to RGB.')\n img = img.convert('RGB')\n img.save(path, format=file_format, **kwargs)\n\n\n@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')\ndef load_img(path,\n grayscale=False,\n color_mode='rgb',\n target_size=None,\n interpolation='nearest',\n keep_aspect_ratio=False):\n \"\"\"Loads an image into PIL format.\n\n Usage:\n\n ```\n image = tf.keras.preprocessing.image.load_img(image_path)\n input_arr = tf.keras.preprocessing.image.img_to_array(image)\n input_arr = np.array([input_arr]) # Convert single image to a batch.\n predictions = model.predict(input_arr)\n ```\n\n Args:\n path: Path to image file.\n grayscale: DEPRECATED use `color_mode=\"grayscale\"`.\n color_mode: One of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\". The desired\n image format.\n target_size: Either `None` (default to original size) or tuple of ints\n `(img_height, img_width)`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are \"nearest\", \"bilinear\", and \"bicubic\". If PIL version 1.1.3\n or newer is installed, \"lanczos\" is also supported. If PIL version 3.4.0\n or newer is installed, \"box\" and \"hamming\" are also supported. By\n default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target\n size without aspect ratio distortion. The image is cropped in\n the center with target aspect ratio before resizing.\n\n Returns:\n A PIL Image instance.\n\n Raises:\n ImportError: if PIL is not available.\n ValueError: if interpolation method is not supported.\n \"\"\"\n if grayscale:\n warnings.warn('grayscale is deprecated. Please use '\n 'color_mode = \"grayscale\"')\n color_mode = 'grayscale'\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `load_img` requires PIL.')\n if isinstance(path, io.BytesIO):\n img = pil_image.open(path)\n elif isinstance(path, (pathlib.Path, bytes, str)):\n if isinstance(path, pathlib.Path):\n path = str(path.resolve())\n with open(path, 'rb') as f:\n img = pil_image.open(io.BytesIO(f.read()))\n else:\n raise TypeError('path should be path-like or io.BytesIO'\n ', not {}'.format(type(path)))\n\n if color_mode == 'grayscale':\n # if image is not already an 8-bit, 16-bit or 32-bit grayscale image\n # convert it to an 8-bit grayscale image.\n if img.mode not in ('L', 'I;16', 'I'):\n img = img.convert('L')\n elif color_mode == 'rgba':\n if img.mode != 'RGBA':\n img = img.convert('RGBA')\n elif color_mode == 'rgb':\n if img.mode != 'RGB':\n img = img.convert('RGB')\n else:\n raise ValueError('color_mode must be \"grayscale\", \"rgb\", or \"rgba\"')\n if target_size is not None:\n width_height_tuple = (target_size[1], target_size[0])\n if img.size != width_height_tuple:\n if interpolation not in _PIL_INTERPOLATION_METHODS:\n raise ValueError('Invalid interpolation method {} specified. Supported '\n 'methods are {}'.format(\n interpolation,\n ', '.join(_PIL_INTERPOLATION_METHODS.keys())))\n resample = _PIL_INTERPOLATION_METHODS[interpolation]\n\n if keep_aspect_ratio:\n width, height = img.size\n target_width, target_height = width_height_tuple\n\n crop_height = (width * target_height) // target_width\n crop_width = (height * target_width) // target_height\n\n # Set back to input height / width\n # if crop_height / crop_width is not smaller.\n crop_height = min(height, crop_height)\n crop_width = min(width, crop_width)\n\n crop_box_hstart = (height - crop_height) // 2\n crop_box_wstart = (width - crop_width) // 2\n crop_box_wend = crop_box_wstart + crop_width\n crop_box_hend = crop_box_hstart + crop_height\n crop_box = [\n crop_box_wstart, crop_box_hstart, crop_box_wend, crop_box_hend\n ]\n img = img.resize(width_height_tuple, resample, box=crop_box)\n else:\n img = img.resize(width_height_tuple, resample)\n return img\n\n\n@keras_export('keras.preprocessing.image.Iterator')\nclass Iterator(data_utils.Sequence):\n \"\"\"Base class for image data iterators.\n\n Warning: `tf.keras.preprocessing.image.Iterator` is not recommended for\n new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Every `Iterator` must implement the `_get_batches_of_transformed_samples`\n method.\n\n Args:\n n: Integer, total number of samples in the dataset to loop over.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seeding for data shuffling.\n \"\"\"\n white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff')\n\n def __init__(self, n, batch_size, shuffle, seed):\n self.n = n\n self.batch_size = batch_size\n self.seed = seed\n self.shuffle = shuffle\n self.batch_index = 0\n self.total_batches_seen = 0\n self.lock = threading.Lock()\n self.index_array = None\n self.index_generator = self._flow_index()\n\n def _set_index_array(self):\n self.index_array = np.arange(self.n)\n if self.shuffle:\n self.index_array = np.random.permutation(self.n)\n\n def __getitem__(self, idx):\n if idx >= len(self):\n raise ValueError('Asked to retrieve element {idx}, '\n 'but the Sequence '\n 'has length {length}'.format(idx=idx, length=len(self)))\n if self.seed is not None:\n np.random.seed(self.seed + self.total_batches_seen)\n self.total_batches_seen += 1\n if self.index_array is None:\n self._set_index_array()\n index_array = self.index_array[self.batch_size * idx:self.batch_size *\n (idx + 1)]\n return self._get_batches_of_transformed_samples(index_array)\n\n def __len__(self):\n return (self.n + self.batch_size - 1) // self.batch_size # round up\n\n def on_epoch_end(self):\n self._set_index_array()\n\n def reset(self):\n self.batch_index = 0\n\n def _flow_index(self):\n # Ensure self.batch_index is 0.\n self.reset()\n while 1:\n if self.seed is not None:\n np.random.seed(self.seed + self.total_batches_seen)\n if self.batch_index == 0:\n self._set_index_array()\n\n if self.n == 0:\n # Avoiding modulo by zero error\n current_index = 0\n else:\n current_index = (self.batch_index * self.batch_size) % self.n\n if self.n > current_index + self.batch_size:\n self.batch_index += 1\n else:\n self.batch_index = 0\n self.total_batches_seen += 1\n yield self.index_array[current_index:current_index + self.batch_size]\n\n def __iter__(self):\n # Needed if we want to do something like:\n # for x, y in data_gen.flow(...):\n return self\n\n def __next__(self, *args, **kwargs):\n return self.next(*args, **kwargs)\n\n def next(self):\n \"\"\"For python 2.x.\n\n Returns:\n The next batch.\n \"\"\"\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)\n\n def _get_batches_of_transformed_samples(self, index_array):\n \"\"\"Gets a batch of transformed samples.\n\n Args:\n index_array: Array of sample indices to include in batch.\n Returns:\n A batch of transformed samples.\n \"\"\"\n raise NotImplementedError\n\n\ndef _iter_valid_files(directory, white_list_formats, follow_links):\n \"\"\"Iterates on files with extension.\n\n Args:\n directory: Absolute path to the directory\n containing files to be counted\n white_list_formats: Set of strings containing allowed extensions for\n the files to be counted.\n follow_links: Boolean, follow symbolic links to subdirectories.\n Yields:\n Tuple of (root, filename) with extension in `white_list_formats`.\n \"\"\"\n\n def _recursive_list(subpath):\n return sorted(\n os.walk(subpath, followlinks=follow_links), key=lambda x: x[0])\n\n for root, _, files in _recursive_list(directory):\n for fname in sorted(files):\n if fname.lower().endswith('.tiff'):\n warnings.warn('Using \".tiff\" files with multiple bands '\n 'will cause distortion. Please verify your output.')\n if fname.lower().endswith(white_list_formats):\n yield root, fname\n\n\ndef _list_valid_filenames_in_directory(directory, white_list_formats, split,\n class_indices, follow_links):\n \"\"\"Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\n Args:\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label\n and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into\n account a certain fraction of files in each directory.\n E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent\n of images in each directory.\n class_indices: dictionary mapping a class name to its index.\n follow_links: boolean, follow symbolic links to subdirectories.\n\n Returns:\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`'s parent (e.g., if `directory` is \"dataset/class1\",\n the filenames will be\n `[\"class1/file1.jpg\", \"class1/file2.jpg\", ...]`).\n \"\"\"\n dirname = os.path.basename(directory)\n if split:\n all_files = list(\n _iter_valid_files(directory, white_list_formats, follow_links))\n num_files = len(all_files)\n start, stop = int(split[0] * num_files), int(split[1] * num_files)\n valid_files = all_files[start:stop]\n else:\n valid_files = _iter_valid_files(directory, white_list_formats, follow_links)\n classes = []\n filenames = []\n for root, fname in valid_files:\n classes.append(class_indices[dirname])\n absolute_path = os.path.join(root, fname)\n relative_path = os.path.join(dirname,\n os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n\n return classes, filenames\n\n\nclass BatchFromFilesMixin():\n \"\"\"Adds methods related to getting batches from filenames.\n\n It includes the logic to transform image files to batches.\n \"\"\"\n\n def set_processing_attrs(self, image_data_generator, target_size, color_mode,\n data_format, save_to_dir, save_prefix, save_format,\n subset, interpolation, keep_aspect_ratio):\n \"\"\"Sets attributes to use later for processing files into a batch.\n\n Args:\n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images\n to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`.\n Color mode to read images.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are \"nearest\", \"bilinear\", and \"bicubic\".\n If PIL version 1.1.3 or newer is installed, \"lanczos\" is also\n supported. If PIL version 3.4.0 or newer is installed, \"box\" and\n \"hamming\" are also supported. By default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target size\n without aspect ratio distortion. The image is cropped in the center\n with target aspect ratio before resizing.\n \"\"\"\n self.image_data_generator = image_data_generator\n self.target_size = tuple(target_size)\n self.keep_aspect_ratio = keep_aspect_ratio\n if color_mode not in {'rgb', 'rgba', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\", \"rgba\", or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n if self.color_mode == 'rgba':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (4,)\n else:\n self.image_shape = (4,) + self.target_size\n elif self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n self.interpolation = interpolation\n if subset is not None:\n validation_split = self.image_data_generator._validation_split # pylint: disable=protected-access\n if subset == 'validation':\n split = (0, validation_split)\n elif subset == 'training':\n split = (validation_split, 1)\n else:\n raise ValueError('Invalid subset name: %s;'\n 'expected \"training\" or \"validation\"' % (subset,))\n else:\n split = None\n self.split = split\n self.subset = subset\n\n def _get_batches_of_transformed_samples(self, index_array):\n \"\"\"Gets a batch of transformed samples.\n\n Args:\n index_array: Array of sample indices to include in batch.\n Returns:\n A batch of transformed samples.\n \"\"\"\n batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)\n # build batch of image data\n # self.filepaths is dynamic, is better to call it once outside the loop\n filepaths = self.filepaths\n for i, j in enumerate(index_array):\n img = load_img(\n filepaths[j],\n color_mode=self.color_mode,\n target_size=self.target_size,\n interpolation=self.interpolation,\n keep_aspect_ratio=self.keep_aspect_ratio)\n x = img_to_array(img, data_format=self.data_format)\n # Pillow images should be closed after `load_img`,\n # but not PIL images.\n if hasattr(img, 'close'):\n img.close()\n if self.image_data_generator:\n params = self.image_data_generator.get_random_transform(x.shape)\n x = self.image_data_generator.apply_transform(x, params)\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e7),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n # build batch of labels\n if self.class_mode == 'input':\n batch_y = batch_x.copy()\n elif self.class_mode in {'binary', 'sparse'}:\n batch_y = np.empty(len(batch_x), dtype=self.dtype)\n for i, n_observation in enumerate(index_array):\n batch_y[i] = self.classes[n_observation]\n elif self.class_mode == 'categorical':\n batch_y = np.zeros((len(batch_x), len(self.class_indices)),\n dtype=self.dtype)\n for i, n_observation in enumerate(index_array):\n batch_y[i, self.classes[n_observation]] = 1.\n elif self.class_mode == 'multi_output':\n batch_y = [output[index_array] for output in self.labels]\n elif self.class_mode == 'raw':\n batch_y = self.labels[index_array]\n else:\n return batch_x\n if self.sample_weight is None:\n return batch_x, batch_y\n else:\n return batch_x, batch_y, self.sample_weight[index_array]\n\n @property\n def filepaths(self):\n \"\"\"List of absolute paths to image files.\"\"\"\n raise NotImplementedError(\n '`filepaths` property method has not been implemented in {}.'.format(\n type(self).__name__))\n\n @property\n def labels(self):\n \"\"\"Class labels of every observation.\"\"\"\n raise NotImplementedError(\n '`labels` property method has not been implemented in {}.'.format(\n type(self).__name__))\n\n @property\n def sample_weight(self):\n raise NotImplementedError(\n '`sample_weight` property method has not been implemented in {}.'\n .format(type(self).__name__))\n\n\n@keras_export('keras.preprocessing.image.DirectoryIterator')\nclass DirectoryIterator(BatchFromFilesMixin, Iterator):\n \"\"\"Iterator capable of reading images from a directory on disk.\n\n Warning: `tf.keras.preprocessing.image.DirectoryIterator` is not recommended\n for new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n directory: Path to the directory to read images from. Each subdirectory in\n this directory will be considered to contain images from one class, or\n alternatively you could specify class subdirectories via the `classes`\n argument.\n image_data_generator: Instance of `ImageDataGenerator` to use for random\n transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`. Color mode to read\n images.\n classes: Optional list of strings, names of subdirectories containing\n images from each class (e.g. `[\"dogs\", \"cats\"]`). It will be computed\n automatically if not set.\n class_mode: Mode for yielding the targets:\n - `\"binary\"`: binary targets (if there are only two classes),\n - `\"categorical\"`: categorical targets,\n - `\"sparse\"`: integer targets,\n - `\"input\"`: targets are images identical to input images (mainly used\n to work with autoencoders),\n - `None`: no targets get yielded (only input images are yielded).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures being yielded,\n in a viewable format. This is useful for visualizing the random\n transformations being applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample images (if\n `save_to_dir` is set).\n save_format: Format to use for saving sample images (if `save_to_dir` is\n set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are \"nearest\", \"bilinear\", and \"bicubic\". If PIL version 1.1.3\n or newer is installed, \"lanczos\" is also supported. If PIL version 3.4.0\n or newer is installed, \"box\" and \"hamming\" are also supported. By\n default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target size\n without aspect ratio distortion. The image is cropped in the center\n with target aspect ratio before resizing.\n dtype: Dtype to use for generated arrays.\n \"\"\"\n allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}\n\n def __init__(self,\n directory,\n image_data_generator,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n data_format=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False,\n dtype=None):\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n super().set_processing_attrs(image_data_generator, target_size, color_mode,\n data_format, save_to_dir, save_prefix,\n save_format, subset, interpolation,\n keep_aspect_ratio)\n self.directory = directory\n self.classes = classes\n if class_mode not in self.allowed_class_modes:\n raise ValueError('Invalid class_mode: {}; expected one of: {}'\n .format(class_mode, self.allowed_class_modes))\n self.class_mode = class_mode\n self.dtype = dtype\n # First, count the number of samples and classes.\n self.samples = 0\n\n if not classes:\n classes = []\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n classes.append(subdir)\n self.num_classes = len(classes)\n self.class_indices = dict(zip(classes, range(len(classes))))\n\n pool = multiprocessing.pool.ThreadPool()\n\n # Second, build an index of the images\n # in the different class subfolders.\n results = []\n self.filenames = []\n i = 0\n for dirpath in (os.path.join(directory, subdir) for subdir in classes):\n results.append(\n pool.apply_async(_list_valid_filenames_in_directory,\n (dirpath, self.white_list_formats, self.split,\n self.class_indices, follow_links)))\n classes_list = []\n for res in results:\n classes, filenames = res.get()\n classes_list.append(classes)\n self.filenames += filenames\n self.samples = len(self.filenames)\n self.classes = np.zeros((self.samples,), dtype='int32')\n for classes in classes_list:\n self.classes[i:i + len(classes)] = classes\n i += len(classes)\n\n print('Found %d images belonging to %d classes.' %\n (self.samples, self.num_classes))\n pool.close()\n pool.join()\n self._filepaths = [\n os.path.join(self.directory, fname) for fname in self.filenames\n ]\n super().__init__(self.samples, batch_size, shuffle, seed)\n\n @property\n def filepaths(self):\n return self._filepaths\n\n @property\n def labels(self):\n return self.classes\n\n @property # mixin needs this property to work\n def sample_weight(self):\n # no sample weights will be returned\n return None\n\n\n@keras_export('keras.preprocessing.image.NumpyArrayIterator')\nclass NumpyArrayIterator(Iterator):\n \"\"\"Iterator yielding data from a Numpy array.\n\n Warning: `tf.keras.preprocessing.image.NumpyArrayIterator` is not recommended\n for new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Numpy array of input data or tuple. If tuple, the second elements is\n either another numpy array or a list of numpy arrays, each of which gets\n passed through as an output without any modifications.\n y: Numpy array of targets data.\n image_data_generator: Instance of `ImageDataGenerator` to use for random\n transformations and normalization.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n sample_weight: Numpy array of sample weights.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures being yielded,\n in a viewable format. This is useful for visualizing the random\n transformations being applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample images (if\n `save_to_dir` is set).\n save_format: Format to use for saving sample images (if `save_to_dir` is\n set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n ignore_class_split: Boolean (default: False), ignore difference\n in number of classes in labels across train and validation\n split (useful for non-classification tasks)\n dtype: Dtype to use for the generated arrays.\n \"\"\"\n\n def __init__(self,\n x,\n y,\n image_data_generator,\n batch_size=32,\n shuffle=False,\n sample_weight=None,\n seed=None,\n data_format=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n ignore_class_split=False,\n dtype=None):\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n self.dtype = dtype\n if isinstance(x, tuple) or isinstance(x, list):\n if not isinstance(x[1], list):\n x_misc = [np.asarray(x[1])]\n else:\n x_misc = [np.asarray(xx) for xx in x[1]]\n x = x[0]\n for xx in x_misc:\n if len(x) != len(xx):\n raise ValueError('All of the arrays in `x` '\n 'should have the same length. '\n 'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %\n (len(x), len(xx)))\n else:\n x_misc = []\n\n if y is not None and len(x) != len(y):\n raise ValueError('`x` (images tensor) and `y` (labels) '\n 'should have the same length. '\n 'Found: x.shape = %s, y.shape = %s' %\n (np.asarray(x).shape, np.asarray(y).shape))\n if sample_weight is not None and len(x) != len(sample_weight):\n raise ValueError('`x` (images tensor) and `sample_weight` '\n 'should have the same length. '\n 'Found: x.shape = %s, sample_weight.shape = %s' %\n (np.asarray(x).shape, np.asarray(sample_weight).shape))\n if subset is not None:\n if subset not in {'training', 'validation'}:\n raise ValueError('Invalid subset name:', subset,\n '; expected \"training\" or \"validation\".')\n split_idx = int(len(x) * image_data_generator._validation_split)\n\n if (y is not None and not ignore_class_split and not np.array_equal(\n np.unique(y[:split_idx]), np.unique(y[split_idx:]))):\n raise ValueError('Training and validation subsets '\n 'have different number of classes after '\n 'the split. If your numpy arrays are '\n 'sorted by the label, you might want '\n 'to shuffle them.')\n\n if subset == 'validation':\n x = x[:split_idx]\n x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]\n if y is not None:\n y = y[:split_idx]\n else:\n x = x[split_idx:]\n x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]\n if y is not None:\n y = y[split_idx:]\n\n self.x = np.asarray(x, dtype=self.dtype)\n self.x_misc = x_misc\n if self.x.ndim != 4:\n raise ValueError(\n 'Input data in `NumpyArrayIterator` '\n 'should have rank 4. You passed an array '\n 'with shape', self.x.shape)\n channels_axis = 3 if data_format == 'channels_last' else 1\n if self.x.shape[channels_axis] not in {1, 3, 4}:\n warnings.warn('NumpyArrayIterator is set to use the '\n 'data format convention \"' + data_format + '\" '\n '(channels on axis ' + str(channels_axis) +\n '), i.e. expected either 1, 3, or 4 '\n 'channels on axis ' + str(channels_axis) + '. '\n 'However, it was passed an array with shape ' +\n str(self.x.shape) + ' (' +\n str(self.x.shape[channels_axis]) + ' channels).')\n if y is not None:\n self.y = np.asarray(y)\n else:\n self.y = None\n if sample_weight is not None:\n self.sample_weight = np.asarray(sample_weight)\n else:\n self.sample_weight = None\n self.image_data_generator = image_data_generator\n self.data_format = data_format\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n super().__init__(x.shape[0], batch_size, shuffle, seed)\n\n def _get_batches_of_transformed_samples(self, index_array):\n batch_x = np.zeros(\n tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype)\n for i, j in enumerate(index_array):\n x = self.x[j]\n params = self.image_data_generator.get_random_transform(x.shape)\n x = self.image_data_generator.apply_transform(\n x.astype(self.dtype), params)\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n batch_x_miscs = [xx[index_array] for xx in self.x_misc]\n output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)\n if self.y is None:\n return output[0]\n output += (self.y[index_array],)\n if self.sample_weight is not None:\n output += (self.sample_weight[index_array],)\n return output\n\n\ndef validate_filename(filename, white_list_formats):\n \"\"\"Check if a filename refers to a valid file.\n\n Args:\n filename: String, absolute path to a file\n white_list_formats: Set, allowed file extensions\n Returns:\n A boolean value indicating if the filename is valid or not\n \"\"\"\n return (filename.lower().endswith(white_list_formats) and\n os.path.isfile(filename))\n\n\nclass DataFrameIterator(BatchFromFilesMixin, Iterator):\n \"\"\"Iterator capable of reading images from a directory on disk as a dataframe.\n\n Args:\n dataframe: Pandas dataframe containing the filepaths relative to\n `directory` (or absolute paths if `directory` is None) of the images in\n a string column. It should include other column/s depending on the\n `class_mode`: - if `class_mode` is `\"categorical\"` (default value) it\n must include the `y_col` column with the class/es of each image.\n Values in column can be string/list/tuple if a single class or\n list/tuple if multiple classes. - if `class_mode` is `\"binary\"` or\n `\"sparse\"` it must include the given `y_col` column with class values\n as strings. - if `class_mode` is `\"raw\"` or `\"multi_output\"` it should\n contain the columns specified in `y_col`. - if `class_mode` is\n `\"input\"` or `None` no extra column is needed.\n directory: string, path to the directory to read images from. If `None`,\n data in `x_col` column should be absolute paths.\n image_data_generator: Instance of `ImageDataGenerator` to use for random\n transformations and normalization. If None, no transformations and\n normalizations are made.\n x_col: string, column in `dataframe` that contains the filenames (or\n absolute paths if `directory` is `None`).\n y_col: string or list, column/s in `dataframe` that has the target data.\n weight_col: string, column in `dataframe` that contains the sample\n weights. Default: `None`.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`. Color mode to read\n images.\n classes: Optional list of strings, classes to use (e.g. `[\"dogs\",\n \"cats\"]`). If None, all classes in `y_col` will be used.\n class_mode: one of \"binary\", \"categorical\", \"input\", \"multi_output\",\n \"raw\", \"sparse\" or None. Default: \"categorical\".\n Mode for yielding the targets:\n - `\"binary\"`: 1D numpy array of binary labels,\n - `\"categorical\"`: 2D numpy array of one-hot encoded labels. Supports\n multi-label output.\n - `\"input\"`: images identical to input images (mainly used to work\n with autoencoders),\n - `\"multi_output\"`: list with the values of the different columns,\n - `\"raw\"`: numpy array of values in `y_col` column(s),\n - `\"sparse\"`: 1D numpy array of integer labels, - `None`, no targets\n are returned (the generator will only yield batches of image data,\n which is useful to use in `model.predict()`).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures being yielded,\n in a viewable format. This is useful for visualizing the random\n transformations being applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample images (if\n `save_to_dir` is set).\n save_format: Format to use for saving sample images (if `save_to_dir` is\n set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are \"nearest\", \"bilinear\", and \"bicubic\". If PIL version 1.1.3\n or newer is installed, \"lanczos\" is also supported. If PIL version 3.4.0\n or newer is installed, \"box\" and \"hamming\" are also supported. By\n default, \"nearest\" is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target size\n without aspect ratio distortion. The image is cropped in the center\n with target aspect ratio before resizing.\n dtype: Dtype to use for the generated arrays.\n validate_filenames: Boolean, whether to validate image filenames in\n `x_col`. If `True`, invalid images will be ignored. Disabling this\n option can lead to speed-up in the instantiation of this class. Default:\n `True`.\n \"\"\"\n allowed_class_modes = {\n 'binary', 'categorical', 'input', 'multi_output', 'raw', 'sparse', None\n }\n\n def __init__(self,\n dataframe,\n directory=None,\n image_data_generator=None,\n x_col='filename',\n y_col='class',\n weight_col=None,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n data_format='channels_last',\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False,\n dtype='float32',\n validate_filenames=True):\n super().set_processing_attrs(image_data_generator, target_size, color_mode,\n data_format, save_to_dir, save_prefix,\n save_format, subset, interpolation,\n keep_aspect_ratio)\n df = dataframe.copy()\n self.directory = directory or ''\n self.class_mode = class_mode\n self.dtype = dtype\n # check that inputs match the required class_mode\n self._check_params(df, x_col, y_col, weight_col, classes)\n if validate_filenames: # check which image files are valid and keep them\n df = self._filter_valid_filepaths(df, x_col)\n if class_mode not in ['input', 'multi_output', 'raw', None]:\n df, classes = self._filter_classes(df, y_col, classes)\n num_classes = len(classes)\n # build an index of all the unique classes\n self.class_indices = dict(zip(classes, range(len(classes))))\n # retrieve only training or validation set\n if self.split:\n num_files = len(df)\n start = int(self.split[0] * num_files)\n stop = int(self.split[1] * num_files)\n df = df.iloc[start:stop, :]\n # get labels for each observation\n if class_mode not in ['input', 'multi_output', 'raw', None]:\n self.classes = self.get_classes(df, y_col)\n self.filenames = df[x_col].tolist()\n self._sample_weight = df[weight_col].values if weight_col else None\n\n if class_mode == 'multi_output':\n self._targets = [np.array(df[col].tolist()) for col in y_col]\n if class_mode == 'raw':\n self._targets = df[y_col].values\n self.samples = len(self.filenames)\n validated_string = 'validated' if validate_filenames else 'non-validated'\n if class_mode in ['input', 'multi_output', 'raw', None]:\n print(f'Found {self.samples} {validated_string} image filenames.')\n else:\n print(f'Found {self.samples} {validated_string} image filenames '\n f'belonging to {num_classes} classes.')\n self._filepaths = [\n os.path.join(self.directory, fname) for fname in self.filenames\n ]\n super().__init__(self.samples, batch_size, shuffle, seed)\n\n def _check_params(self, df, x_col, y_col, weight_col, classes):\n # check class mode is one of the currently supported\n if self.class_mode not in self.allowed_class_modes:\n raise ValueError('Invalid class_mode: {}; expected one of: {}'.format(\n self.class_mode, self.allowed_class_modes))\n # check that y_col has several column names if class_mode is multi_output\n if (self.class_mode == 'multi_output') and not isinstance(y_col, list):\n raise TypeError(\n 'If class_mode=\"{}\", y_col must be a list. Received {}.'.format(\n self.class_mode,\n type(y_col).__name__))\n # check that filenames/filepaths column values are all strings\n if not all(df[x_col].apply(lambda x: isinstance(x, str))):\n raise TypeError(\n 'All values in column x_col={} must be strings.'.format(x_col))\n # check labels are string if class_mode is binary or sparse\n if self.class_mode in {'binary', 'sparse'}:\n if not all(df[y_col].apply(lambda x: isinstance(x, str))):\n raise TypeError('If class_mode=\"{}\", y_col=\"{}\" column '\n 'values must be strings.'.format(\n self.class_mode, y_col))\n # check that if binary there are only 2 different classes\n if self.class_mode == 'binary':\n if classes:\n classes = set(classes)\n if len(classes) != 2:\n raise ValueError('If class_mode=\"binary\" there must be 2 '\n 'classes. {} class/es were given.'.format(\n len(classes)))\n elif df[y_col].nunique() != 2:\n raise ValueError('If class_mode=\"binary\" there must be 2 classes. '\n 'Found {} classes.'.format(df[y_col].nunique()))\n # check values are string, list or tuple if class_mode is categorical\n if self.class_mode == 'categorical':\n types = (str, list, tuple)\n if not all(df[y_col].apply(lambda x: isinstance(x, types))):\n raise TypeError('If class_mode=\"{}\", y_col=\"{}\" column '\n 'values must be type string, list or tuple.'.format(\n self.class_mode, y_col))\n # raise warning if classes are given but will be unused\n if classes and self.class_mode in {'input', 'multi_output', 'raw', None}:\n warnings.warn(\n '`classes` will be ignored given the class_mode=\"{}\"'.format(\n self.class_mode))\n # check that if weight column that the values are numerical\n if weight_col and not issubclass(df[weight_col].dtype.type, np.number):\n raise TypeError(\n 'Column weight_col={} must be numeric.'.format(weight_col))\n\n def get_classes(self, df, y_col):\n labels = []\n for label in df[y_col]:\n if isinstance(label, (list, tuple)):\n labels.append([self.class_indices[lbl] for lbl in label])\n else:\n labels.append(self.class_indices[label])\n return labels\n\n @staticmethod\n def _filter_classes(df, y_col, classes):\n df = df.copy()\n\n def remove_classes(labels, classes):\n if isinstance(labels, (list, tuple)):\n labels = [cls for cls in labels if cls in classes]\n return labels or None\n elif isinstance(labels, str):\n return labels if labels in classes else None\n else:\n raise TypeError(\n 'Expect string, list or tuple but found {} in {} column '.format(\n type(labels), y_col))\n\n if classes:\n # prepare for membership lookup\n classes = list(collections.OrderedDict.fromkeys(classes).keys())\n df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes))\n else:\n classes = set()\n for v in df[y_col]:\n if isinstance(v, (list, tuple)):\n classes.update(v)\n else:\n classes.add(v)\n classes = sorted(classes)\n return df.dropna(subset=[y_col]), classes\n\n def _filter_valid_filepaths(self, df, x_col):\n \"\"\"Keep only dataframe rows with valid filenames.\n\n Args:\n df: Pandas dataframe containing filenames in a column\n x_col: string, column in `df` that contains the filenames or filepaths\n Returns:\n absolute paths to image files\n \"\"\"\n filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname))\n mask = filepaths.apply(validate_filename, args=(self.white_list_formats,))\n n_invalid = (~mask).sum()\n if n_invalid:\n warnings.warn('Found {} invalid image filename(s) in x_col=\"{}\". '\n 'These filename(s) will be ignored.'.format(\n n_invalid, x_col))\n return df[mask]\n\n @property\n def filepaths(self):\n return self._filepaths\n\n @property\n def labels(self):\n if self.class_mode in {'multi_output', 'raw'}:\n return self._targets\n else:\n return self.classes\n\n @property\n def sample_weight(self):\n return self._sample_weight\n\n\ndef flip_axis(x, axis):\n x = np.asarray(x).swapaxes(axis, 0)\n x = x[::-1, ...]\n x = x.swapaxes(0, axis)\n return x\n\n\n@keras_export('keras.preprocessing.image.ImageDataGenerator')\nclass ImageDataGenerator():\n \"\"\"Generate batches of tensor image data with real-time data augmentation.\n\n Warning: `tf.keras.preprocessing.image.ImageDataGenerator` is not recommended\n for new code. Prefer loading images with\n `tf.keras.utils.image_dataset_from_directory` and transforming the output\n `tf.data.Dataset` with preprocessing layers. For more information, see the\n tutorials for [loading images](\n https://www.tensorflow.org/tutorials/load_data/images) and\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n The data will be looped over (in batches).\n\n Args:\n featurewise_center: Boolean. Set input mean to 0 over the dataset,\n feature-wise.\n samplewise_center: Boolean. Set each sample mean to 0.\n featurewise_std_normalization: Boolean. Divide inputs by std of the\n dataset, feature-wise.\n samplewise_std_normalization: Boolean. Divide each input by its std.\n zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.\n zca_whitening: Boolean. Apply ZCA whitening.\n rotation_range: Int. Degree range for random rotations.\n width_shift_range: Float, 1-D array-like or int\n - float: fraction of total width, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval `(-width_shift_range,\n +width_shift_range)` - With `width_shift_range=2` possible values\n are integers `[-1, 0, +1]`, same as with `width_shift_range=[-1, 0,\n +1]`, while with `width_shift_range=1.0` possible values are floats\n in the interval [-1.0, +1.0).\n height_shift_range: Float, 1-D array-like or int\n - float: fraction of total height, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval `(-height_shift_range,\n +height_shift_range)` - With `height_shift_range=2` possible values\n are integers `[-1, 0, +1]`, same as with `height_shift_range=[-1, 0,\n +1]`, while with `height_shift_range=1.0` possible values are floats\n in the interval [-1.0, +1.0).\n brightness_range: Tuple or list of two floats. Range for picking a\n brightness shift value from.\n shear_range: Float. Shear Intensity (Shear angle in counter-clockwise\n direction in degrees)\n zoom_range: Float or [lower, upper]. Range for random zoom. If a float,\n `[lower, upper] = [1-zoom_range, 1+zoom_range]`.\n channel_shift_range: Float. Range for random channel shifts.\n fill_mode: One of {\"constant\", \"nearest\", \"reflect\" or \"wrap\"}. Default is\n 'nearest'. Points outside the boundaries of the input are filled\n according to the given mode:\n - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)\n - 'nearest': aaaaaaaa|abcd|dddddddd\n - 'reflect': abcddcba|abcd|dcbaabcd\n - 'wrap': abcdabcd|abcd|abcdabcd\n cval: Float or Int. Value used for points outside the boundaries when\n `fill_mode = \"constant\"`.\n horizontal_flip: Boolean. Randomly flip inputs horizontally.\n vertical_flip: Boolean. Randomly flip inputs vertically.\n rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is\n applied, otherwise we multiply the data by the value provided (after\n applying all other transformations).\n preprocessing_function: function that will be applied on each input. The\n function will run after the image is resized and augmented.\n The function should take one argument: one image (Numpy tensor with\n rank 3), and should output a Numpy tensor with the same shape.\n data_format: Image data format, either \"channels_first\" or\n \"channels_last\". \"channels_last\" mode means that the images should have\n shape `(samples, height, width, channels)`, \"channels_first\" mode means\n that the images should have shape `(samples, channels, height, width)`.\n It defaults to the `image_data_format` value found in your Keras config\n file at `~/.keras/keras.json`. If you never set it, then it will be\n \"channels_last\".\n validation_split: Float. Fraction of images reserved for validation\n (strictly between 0 and 1).\n dtype: Dtype to use for the generated arrays.\n\n Raises:\n ValueError: If the value of the argument, `data_format` is other than\n `\"channels_last\"` or `\"channels_first\"`.\n ValueError: If the value of the argument, `validation_split` > 1\n or `validation_split` < 0.\n\n Examples:\n\n Example of using `.flow(x, y)`:\n\n ```python\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n y_train = utils.to_categorical(y_train, num_classes)\n y_test = utils.to_categorical(y_test, num_classes)\n datagen = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n validation_split=0.2)\n # compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied)\n datagen.fit(x_train)\n # fits the model on batches with real-time data augmentation:\n model.fit(datagen.flow(x_train, y_train, batch_size=32,\n subset='training'),\n validation_data=datagen.flow(x_train, y_train,\n batch_size=8, subset='validation'),\n steps_per_epoch=len(x_train) / 32, epochs=epochs)\n # here's a more \"manual\" example\n for e in range(epochs):\n print('Epoch', e)\n batches = 0\n for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):\n model.fit(x_batch, y_batch)\n batches += 1\n if batches >= len(x_train) / 32:\n # we need to break the loop by hand because\n # the generator loops indefinitely\n break\n ```\n\n Example of using `.flow_from_directory(directory)`:\n\n ```python\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n test_datagen = ImageDataGenerator(rescale=1./255)\n train_generator = train_datagen.flow_from_directory(\n 'data/train',\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n validation_generator = test_datagen.flow_from_directory(\n 'data/validation',\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n model.fit(\n train_generator,\n steps_per_epoch=2000,\n epochs=50,\n validation_data=validation_generator,\n validation_steps=800)\n ```\n\n Example of transforming images and masks together.\n\n ```python\n # we create two instances with the same arguments\n data_gen_args = dict(featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.2)\n image_datagen = ImageDataGenerator(**data_gen_args)\n mask_datagen = ImageDataGenerator(**data_gen_args)\n # Provide the same seed and keyword arguments to the fit and flow methods\n seed = 1\n image_datagen.fit(images, augment=True, seed=seed)\n mask_datagen.fit(masks, augment=True, seed=seed)\n image_generator = image_datagen.flow_from_directory(\n 'data/images',\n class_mode=None,\n seed=seed)\n mask_generator = mask_datagen.flow_from_directory(\n 'data/masks',\n class_mode=None,\n seed=seed)\n # combine generators into one which yields image and masks\n train_generator = zip(image_generator, mask_generator)\n model.fit(\n train_generator,\n steps_per_epoch=2000,\n epochs=50)\n ```\n \"\"\"\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n zca_epsilon=1e-6,\n rotation_range=0,\n width_shift_range=0.,\n height_shift_range=0.,\n brightness_range=None,\n shear_range=0.,\n zoom_range=0.,\n channel_shift_range=0.,\n fill_mode='nearest',\n cval=0.,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n preprocessing_function=None,\n data_format=None,\n validation_split=0.0,\n interpolation_order=1,\n dtype=None):\n if data_format is None:\n data_format = backend.image_data_format()\n if dtype is None:\n dtype = backend.floatx()\n\n self.featurewise_center = featurewise_center\n self.samplewise_center = samplewise_center\n self.featurewise_std_normalization = featurewise_std_normalization\n self.samplewise_std_normalization = samplewise_std_normalization\n self.zca_whitening = zca_whitening\n self.zca_epsilon = zca_epsilon\n self.rotation_range = rotation_range\n self.width_shift_range = width_shift_range\n self.height_shift_range = height_shift_range\n self.shear_range = shear_range\n self.zoom_range = zoom_range\n self.channel_shift_range = channel_shift_range\n self.fill_mode = fill_mode\n self.cval = cval\n self.horizontal_flip = horizontal_flip\n self.vertical_flip = vertical_flip\n self.rescale = rescale\n self.preprocessing_function = preprocessing_function\n self.dtype = dtype\n self.interpolation_order = interpolation_order\n\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` should be `\"channels_last\"` '\n '(channel after row and column) or '\n '`\"channels_first\"` (channel before row and column). '\n 'Received: %s' % data_format)\n self.data_format = data_format\n if data_format == 'channels_first':\n self.channel_axis = 1\n self.row_axis = 2\n self.col_axis = 3\n if data_format == 'channels_last':\n self.channel_axis = 3\n self.row_axis = 1\n self.col_axis = 2\n if validation_split and not 0 < validation_split < 1:\n raise ValueError('`validation_split` must be strictly between 0 and 1. '\n ' Received: %s' % validation_split)\n self._validation_split = validation_split\n\n self.mean = None\n self.std = None\n self.zca_whitening_matrix = None\n\n if isinstance(zoom_range, (float, int)):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif (len(zoom_range) == 2 and\n all(isinstance(val, (float, int)) for val in zoom_range)):\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n else:\n raise ValueError('`zoom_range` should be a float or '\n 'a tuple or list of two floats. '\n 'Received: %s' % (zoom_range,))\n if zca_whitening:\n if not featurewise_center:\n self.featurewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, which overrides '\n 'setting of `featurewise_center`.')\n if featurewise_std_normalization:\n self.featurewise_std_normalization = False\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening` '\n 'which overrides setting of'\n '`featurewise_std_normalization`.')\n if featurewise_std_normalization:\n if not featurewise_center:\n self.featurewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, '\n 'which overrides setting of '\n '`featurewise_center`.')\n if samplewise_std_normalization:\n if not samplewise_center:\n self.samplewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`samplewise_std_normalization`, '\n 'which overrides setting of '\n '`samplewise_center`.')\n if brightness_range is not None:\n if (not isinstance(brightness_range, (tuple, list)) or\n len(brightness_range) != 2):\n raise ValueError(\n '`brightness_range should be tuple or list of two floats. '\n 'Received: %s' % (brightness_range,))\n self.brightness_range = brightness_range\n\n def flow(self,\n x,\n y=None,\n batch_size=32,\n shuffle=True,\n sample_weight=None,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n ignore_class_split=False,\n subset=None):\n \"\"\"Takes data & label arrays, generates batches of augmented data.\n\n Args:\n x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first\n element should contain the images and the second element another numpy\n array or a list of numpy arrays that gets passed to the output without\n any modifications. Can be used to feed the model miscellaneous data\n along with the images. In case of grayscale data, the channels axis of\n the image array should have value 1, in case of RGB data, it should\n have value 3, and in case of RGBA data, it should have value 4.\n y: Labels.\n batch_size: Int (default: 32).\n shuffle: Boolean (default: True).\n sample_weight: Sample weights.\n seed: Int (default: None).\n save_to_dir: None or str (default: None). This allows you to optionally\n specify a directory to which to save the augmented pictures being\n generated (useful for visualizing what you are doing).\n save_prefix: Str (default: `''`). Prefix to use for filenames of saved\n pictures (only relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\", \"bmp\", \"pdf\", \"ppm\", \"gif\", \"tif\",\n \"jpg\" (only relevant if `save_to_dir` is set). Default: \"png\".\n ignore_class_split: Boolean (default: False), ignore difference\n in number of classes in labels across train and validation\n split (useful for non-classification tasks)\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n\n Returns:\n An `Iterator` yielding tuples of `(x, y)`\n where `x` is a numpy array of image data\n (in the case of a single image input) or a list\n of numpy arrays (in the case with\n additional inputs) and `y` is a numpy array\n of corresponding labels. If 'sample_weight' is not None,\n the yielded tuples are of the form `(x, y, sample_weight)`.\n If `y` is None, only the numpy array `x` is returned.\n Raises:\n ValueError: If the Value of the argument, `subset` is other than\n \"training\" or \"validation\".\n\n \"\"\"\n return NumpyArrayIterator(\n x,\n y,\n self,\n batch_size=batch_size,\n shuffle=shuffle,\n sample_weight=sample_weight,\n seed=seed,\n data_format=self.data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n ignore_class_split=ignore_class_split,\n subset=subset,\n dtype=self.dtype)\n\n def flow_from_directory(self,\n directory,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False):\n \"\"\"Takes the path to a directory & generates batches of augmented data.\n\n Args:\n directory: string, path to the target directory. It should contain one\n subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside\n each of the subdirectories directory tree will be included in the\n generator. See [this script](\n https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)\n for more details.\n target_size: Tuple of integers `(height, width)`, defaults to `(256,\n 256)`. The dimensions to which all images found will be resized.\n color_mode: One of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\". Whether\n the images will be converted to have 1, 3, or 4 channels.\n classes: Optional list of class subdirectories\n (e.g. `['dogs', 'cats']`). Default: None. If not provided, the list\n of classes will be automatically inferred from the subdirectory\n names/structure under `directory`, where each subdirectory will be\n treated as a different class (and the order of the classes, which\n will map to the label indices, will be alphanumeric). The\n dictionary containing the mapping from class names to class\n indices can be obtained via the attribute `class_indices`.\n class_mode: One of \"categorical\", \"binary\", \"sparse\",\n \"input\", or None. Default: \"categorical\".\n Determines the type of label arrays that are returned:\n - \"categorical\" will be 2D one-hot encoded labels,\n - \"binary\" will be 1D binary labels,\n \"sparse\" will be 1D integer labels,\n - \"input\" will be images identical\n to input images (mainly used to work with autoencoders).\n - If None, no labels are returned\n (the generator will only yield batches of image data,\n which is useful to use with `model.predict_generator()`).\n Please note that in case of class_mode None,\n the data still needs to reside in a subdirectory\n of `directory` for it to work correctly.\n batch_size: Size of the batches of data (default: 32).\n shuffle: Whether to shuffle the data (default: True) If set to False,\n sorts the data in alphanumeric order.\n seed: Optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None). This allows you to optionally\n specify a directory to which to save the augmented pictures being\n generated (useful for visualizing what you are doing).\n save_prefix: Str. Prefix to use for filenames of saved pictures (only\n relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\", \"bmp\", \"pdf\", \"ppm\", \"gif\", \"tif\",\n \"jpg\"\n (only relevant if `save_to_dir` is set). Default: \"png\".\n follow_links: Whether to follow symlinks inside\n class subdirectories (default: False).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are `\"nearest\"`, `\"bilinear\"`, and `\"bicubic\"`. If PIL version\n 1.1.3 or newer is installed, `\"lanczos\"` is also supported. If PIL\n version 3.4.0 or newer is installed, `\"box\"` and `\"hamming\"` are also\n supported. By default, `\"nearest\"` is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target\n size without aspect ratio distortion. The image is cropped in\n the center with target aspect ratio before resizing.\n\n Returns:\n A `DirectoryIterator` yielding tuples of `(x, y)`\n where `x` is a numpy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a numpy array of corresponding labels.\n \"\"\"\n return DirectoryIterator(\n directory,\n self,\n target_size=target_size,\n color_mode=color_mode,\n keep_aspect_ratio=keep_aspect_ratio,\n classes=classes,\n class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n subset=subset,\n interpolation=interpolation,\n dtype=self.dtype)\n\n def flow_from_dataframe(self,\n dataframe,\n directory=None,\n x_col='filename',\n y_col='class',\n weight_col=None,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n validate_filenames=True,\n **kwargs):\n \"\"\"Takes the dataframe and the path to a directory + generates batches.\n\n The generated batches contain augmented/normalized data.\n\n **A simple tutorial can be found **[here](\n http://bit.ly/keras_flow_from_dataframe).\n\n Args:\n dataframe: Pandas dataframe containing the filepaths relative to\n `directory` (or absolute paths if `directory` is None) of the\n images in a string column. It should include other column/s\n depending on the `class_mode`:\n - if `class_mode` is `\"categorical\"` (default value) it must\n include the `y_col` column with the class/es of each image.\n Values in column can be string/list/tuple if a single class\n or list/tuple if multiple classes.\n - if `class_mode` is `\"binary\"` or `\"sparse\"` it must include\n the given `y_col` column with class values as strings.\n - if `class_mode` is `\"raw\"` or `\"multi_output\"` it should contain\n the columns specified in `y_col`.\n - if `class_mode` is `\"input\"` or `None` no extra column is needed.\n directory: string, path to the directory to read images from. If `None`,\n data in `x_col` column should be absolute paths.\n x_col: string, column in `dataframe` that contains the filenames (or\n absolute paths if `directory` is `None`).\n y_col: string or list, column/s in `dataframe` that has the target data.\n weight_col: string, column in `dataframe` that contains the sample\n weights. Default: `None`.\n target_size: tuple of integers `(height, width)`, default: `(256, 256)`.\n The dimensions to which all images found will be resized.\n color_mode: one of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\". Whether\n the images will be converted to have 1 or 3 color channels.\n classes: optional list of classes (e.g. `['dogs', 'cats']`). Default is\n None. If not provided, the list of classes will be automatically\n inferred from the `y_col`, which will map to the label indices, will\n be alphanumeric). The dictionary containing the mapping from class\n names to class indices can be obtained via the attribute\n `class_indices`.\n class_mode: one of \"binary\", \"categorical\", \"input\", \"multi_output\",\n \"raw\", sparse\" or None. Default: \"categorical\".\n Mode for yielding the targets:\n - `\"binary\"`: 1D numpy array of binary labels,\n - `\"categorical\"`: 2D numpy array of one-hot encoded labels.\n Supports multi-label output.\n - `\"input\"`: images identical to input images (mainly used to work\n with autoencoders),\n - `\"multi_output\"`: list with the values of the different columns,\n - `\"raw\"`: numpy array of values in `y_col` column(s),\n - `\"sparse\"`: 1D numpy array of integer labels, - `None`, no targets\n are returned (the generator will only yield batches of image data,\n which is useful to use in `model.predict()`).\n batch_size: size of the batches of data (default: 32).\n shuffle: whether to shuffle the data (default: True)\n seed: optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None). This allows you to optionally\n specify a directory to which to save the augmented pictures being\n generated (useful for visualizing what you are doing).\n save_prefix: str. Prefix to use for filenames of saved pictures (only\n relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\", \"bmp\", \"pdf\", \"ppm\", \"gif\", \"tif\",\n \"jpg\" (only relevant if `save_to_dir` is set). Default: \"png\".\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image. Supported\n methods are `\"nearest\"`, `\"bilinear\"`, and `\"bicubic\"`. If PIL version\n 1.1.3 or newer is installed, `\"lanczos\"` is also supported. If PIL\n version 3.4.0 or newer is installed, `\"box\"` and `\"hamming\"` are also\n supported. By default, `\"nearest\"` is used.\n validate_filenames: Boolean, whether to validate image filenames in\n `x_col`. If `True`, invalid images will be ignored. Disabling this\n option can lead to speed-up in the execution of this function.\n Defaults to `True`.\n **kwargs: legacy arguments for raising deprecation warnings.\n\n Returns:\n A `DataFrameIterator` yielding tuples of `(x, y)`\n where `x` is a numpy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a numpy array of corresponding labels.\n \"\"\"\n if 'has_ext' in kwargs:\n warnings.warn(\n 'has_ext is deprecated, filenames in the dataframe have '\n 'to match the exact filenames in disk.', DeprecationWarning)\n if 'sort' in kwargs:\n warnings.warn(\n 'sort is deprecated, batches will be created in the'\n 'same order than the filenames provided if shuffle'\n 'is set to False.', DeprecationWarning)\n if class_mode == 'other':\n warnings.warn(\n '`class_mode` \"other\" is deprecated, please use '\n '`class_mode` \"raw\".', DeprecationWarning)\n class_mode = 'raw'\n if 'drop_duplicates' in kwargs:\n warnings.warn(\n 'drop_duplicates is deprecated, you can drop duplicates '\n 'by using the pandas.DataFrame.drop_duplicates method.',\n DeprecationWarning)\n\n return DataFrameIterator(\n dataframe,\n directory,\n self,\n x_col=x_col,\n y_col=y_col,\n weight_col=weight_col,\n target_size=target_size,\n color_mode=color_mode,\n classes=classes,\n class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n subset=subset,\n interpolation=interpolation,\n validate_filenames=validate_filenames,\n dtype=self.dtype)\n\n def standardize(self, x):\n \"\"\"Applies the normalization configuration in-place to a batch of inputs.\n\n `x` is changed in-place since the function is mainly used internally\n to standardize images and feed them to your network. If a copy of `x`\n would be created instead it would have a significant performance cost.\n If you want to apply this method without changing the input in-place\n you can call the method creating a copy before:\n\n standardize(np.copy(x))\n\n Args:\n x: Batch of inputs to be normalized.\n\n Returns:\n The inputs, normalized.\n \"\"\"\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n if self.samplewise_center:\n x -= np.mean(x, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, keepdims=True) + 1e-6)\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + 1e-6)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, '\n 'but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.zca_whitening_matrix is not None:\n flat_x = x.reshape(-1, np.prod(x.shape[-3:]))\n white_x = flat_x @ self.zca_whitening_matrix\n x = np.reshape(white_x, x.shape)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x\n\n def get_random_transform(self, img_shape, seed=None):\n \"\"\"Generates random parameters for a transformation.\n\n Args:\n img_shape: Tuple of integers.\n Shape of the image that is transformed.\n seed: Random seed.\n\n Returns:\n A dictionary containing randomly chosen parameters describing the\n transformation.\n \"\"\"\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if self.rotation_range:\n theta = np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n try: # 1-D array-like or int\n tx = np.random.choice(self.height_shift_range)\n tx *= np.random.choice([-1, 1])\n except ValueError: # floating point\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range)\n if np.max(self.height_shift_range) < 1:\n tx *= img_shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n try: # 1-D array-like or int\n ty = np.random.choice(self.width_shift_range)\n ty *= np.random.choice([-1, 1])\n except ValueError: # floating point\n ty = np.random.uniform(-self.width_shift_range, self.width_shift_range)\n if np.max(self.width_shift_range) < 1:\n ty *= img_shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)\n\n flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n\n channel_shift_intensity = None\n if self.channel_shift_range != 0:\n channel_shift_intensity = np.random.uniform(-self.channel_shift_range,\n self.channel_shift_range)\n\n brightness = None\n if self.brightness_range is not None:\n brightness = np.random.uniform(self.brightness_range[0],\n self.brightness_range[1])\n\n transform_parameters = {\n 'theta': theta,\n 'tx': tx,\n 'ty': ty,\n 'shear': shear,\n 'zx': zx,\n 'zy': zy,\n 'flip_horizontal': flip_horizontal,\n 'flip_vertical': flip_vertical,\n 'channel_shift_intensity': channel_shift_intensity,\n 'brightness': brightness\n }\n\n return transform_parameters\n\n def apply_transform(self, x, transform_parameters):\n \"\"\"Applies a transformation to an image according to given parameters.\n\n Args:\n x: 3D tensor, single image.\n transform_parameters: Dictionary with string - parameter pairs\n describing the transformation.\n Currently, the following parameters\n from the dictionary are used:\n - `'theta'`: Float. Rotation angle in degrees.\n - `'tx'`: Float. Shift in the x direction.\n - `'ty'`: Float. Shift in the y direction.\n - `'shear'`: Float. Shear angle in degrees.\n - `'zx'`: Float. Zoom in the x direction.\n - `'zy'`: Float. Zoom in the y direction.\n - `'flip_horizontal'`: Boolean. Horizontal flip.\n - `'flip_vertical'`: Boolean. Vertical flip.\n - `'channel_shift_intensity'`: Float. Channel shift intensity.\n - `'brightness'`: Float. Brightness shift intensity.\n\n Returns:\n A transformed version of the input (same shape).\n \"\"\"\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n x = apply_affine_transform(\n x,\n transform_parameters.get('theta', 0),\n transform_parameters.get('tx', 0),\n transform_parameters.get('ty', 0),\n transform_parameters.get('shear', 0),\n transform_parameters.get('zx', 1),\n transform_parameters.get('zy', 1),\n row_axis=img_row_axis,\n col_axis=img_col_axis,\n channel_axis=img_channel_axis,\n fill_mode=self.fill_mode,\n cval=self.cval,\n order=self.interpolation_order)\n\n if transform_parameters.get('channel_shift_intensity') is not None:\n x = apply_channel_shift(x,\n transform_parameters['channel_shift_intensity'],\n img_channel_axis)\n\n if transform_parameters.get('flip_horizontal', False):\n x = flip_axis(x, img_col_axis)\n\n if transform_parameters.get('flip_vertical', False):\n x = flip_axis(x, img_row_axis)\n\n if transform_parameters.get('brightness') is not None:\n x = apply_brightness_shift(x, transform_parameters['brightness'], False)\n\n return x\n\n def random_transform(self, x, seed=None):\n \"\"\"Applies a random transformation to an image.\n\n Args:\n x: 3D tensor, single image.\n seed: Random seed.\n\n Returns:\n A randomly transformed version of the input (same shape).\n \"\"\"\n params = self.get_random_transform(x.shape, seed)\n return self.apply_transform(x, params)\n\n def fit(self, x, augment=False, rounds=1, seed=None):\n \"\"\"Fits the data generator to some sample data.\n\n This computes the internal data stats related to the\n data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n When `rescale` is set to a value, rescaling is applied to\n sample data before computing the internal data stats.\n\n Args:\n x: Sample data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, in case\n of RGB data, it should have value 3, and in case\n of RGBA data, it should have value 4.\n augment: Boolean (default: False).\n Whether to fit on randomly augmented samples.\n rounds: Int (default: 1).\n If using data augmentation (`augment=True`),\n this is how many augmentation passes over the data to use.\n seed: Int (default: None). Random seed.\n \"\"\"\n x = np.asarray(x, dtype=self.dtype)\n if x.ndim != 4:\n raise ValueError('Input to `.fit()` should have rank 4. '\n 'Got array with shape: ' + str(x.shape))\n if x.shape[self.channel_axis] not in {1, 3, 4}:\n warnings.warn('Expected input to be images (as Numpy array) '\n 'following the data format convention \"' +\n self.data_format + '\" (channels on axis ' +\n str(self.channel_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' +\n str(self.channel_axis) + '. '\n 'However, it was passed an array with shape ' +\n str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +\n ' channels).')\n\n if seed is not None:\n np.random.seed(seed)\n\n x = np.copy(x)\n if self.rescale:\n x *= self.rescale\n\n if augment:\n ax = np.zeros(\n tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype)\n for r in range(rounds):\n for i in range(x.shape[0]):\n ax[i + r * x.shape[0]] = self.random_transform(x[i])\n x = ax\n\n if self.featurewise_center:\n self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.mean = np.reshape(self.mean, broadcast_shape)\n x -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.std = np.reshape(self.std, broadcast_shape)\n x /= (self.std + 1e-6)\n\n if self.zca_whitening:\n n = len(x)\n flat_x = np.reshape(x, (n, -1))\n\n u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)\n s_inv = np.sqrt(n) / (s + self.zca_epsilon)\n self.zca_whitening_matrix = (u * s_inv).dot(u.T)\n\n\n@keras_export('keras.preprocessing.image.random_rotation')\ndef random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random rotation of a Numpy image tensor.\n\n Warning: `tf.keras.preprocessing.image.random_rotation` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomRotation` which provides equivalent functionality as a\n preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n rg: Rotation range, in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Rotated Numpy image tensor.\n \"\"\"\n theta = np.random.uniform(-rg, rg)\n x = apply_affine_transform(x,\n theta=theta,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_shift')\ndef random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random spatial shift of a Numpy image tensor.\n\n Warning: `tf.keras.preprocessing.image.random_shift` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomTranslation` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Shifted Numpy image tensor.\n \"\"\"\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n x = apply_affine_transform(x,\n tx=tx,\n ty=ty,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_shear')\ndef random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random spatial shear of a Numpy image tensor.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Sheared Numpy image tensor.\n \"\"\"\n shear = np.random.uniform(-intensity, intensity)\n x = apply_affine_transform(\n x,\n shear=shear,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_zoom')\ndef random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., interpolation_order=1):\n \"\"\"Performs a random spatial zoom of a Numpy image tensor.\n\n Warning: `tf.keras.preprocessing.image.random_zoom` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomZoom` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n zoom_range: Tuple of floats; zoom range for width and height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n interpolation_order: int, order of spline interpolation.\n see `ndimage.interpolation.affine_transform`\n\n Returns:\n Zoomed Numpy image tensor.\n\n Raises:\n ValueError: if `zoom_range` isn't a tuple.\n \"\"\"\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two'\n ' floats. Received: %s' % (zoom_range,))\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n x = apply_affine_transform(\n x,\n zx=zx,\n zy=zy,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=cval,\n order=interpolation_order)\n return x\n\n\n@keras_export('keras.preprocessing.image.apply_channel_shift')\ndef apply_channel_shift(x, intensity, channel_axis=0):\n \"\"\"Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n \"\"\"\n x = np.rollaxis(x, channel_axis, 0)\n min_x, max_x = np.min(x), np.max(x)\n channel_images = [\n np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_channel_shift')\ndef random_channel_shift(x, intensity_range, channel_axis=0):\n \"\"\"Performs a random channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity_range: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n \"\"\"\n intensity = np.random.uniform(-intensity_range, intensity_range)\n return apply_channel_shift(x, intensity, channel_axis=channel_axis)\n\n\n@keras_export('keras.preprocessing.image.apply_brightness_shift')\ndef apply_brightness_shift(x, brightness, scale=True):\n \"\"\"Performs a brightness shift.\n\n Args:\n x: Input tensor. Must be 3D.\n brightness: Float. The new brightness value.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ImportError: if PIL is not available.\n \"\"\"\n if ImageEnhance is None:\n raise ImportError('Using brightness shifts requires PIL. '\n 'Install PIL or Pillow.')\n x_min, x_max = np.min(x), np.max(x)\n local_scale = (x_min < 0) or (x_max > 255)\n x = array_to_img(x, scale=local_scale or scale)\n x = imgenhancer_Brightness = ImageEnhance.Brightness(x)\n x = imgenhancer_Brightness.enhance(brightness)\n x = img_to_array(x)\n if not scale and local_scale:\n x = x / 255 * (x_max - x_min) + x_min\n return x\n\n\n@keras_export('keras.preprocessing.image.random_brightness')\ndef random_brightness(x, brightness_range, scale=True):\n \"\"\"Performs a random brightness shift.\n\n Warning: `tf.keras.preprocessing.image.random_brightness` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomBrightness` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ValueError if `brightness_range` isn't a tuple.\n \"\"\"\n if len(brightness_range) != 2:\n raise ValueError(\n '`brightness_range should be tuple or list of two floats. '\n 'Received: %s' % (brightness_range,))\n\n u = np.random.uniform(brightness_range[0], brightness_range[1])\n return apply_brightness_shift(x, u, scale)\n\n\ndef transform_matrix_offset_center(matrix, x, y):\n o_x = float(x) / 2 - 0.5\n o_y = float(y) / 2 - 0.5\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix\n\n\n@keras_export('keras.preprocessing.image.apply_affine_transform')\ndef apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,\n row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0., order=1):\n \"\"\"Applies an affine transformation specified by the parameters given.\n\n Args:\n x: 3D numpy array - a 2D image with one or more channels.\n theta: Rotation angle in degrees.\n tx: Width shift.\n ty: Heigh shift.\n shear: Shear angle in degrees.\n zx: Zoom in x direction.\n zy: Zoom in y direction\n row_axis: Index of axis for rows (aka Y axis) in the input\n image. Direction: left to right.\n col_axis: Index of axis for columns (aka X axis) in the input\n image. Direction: top to bottom.\n channel_axis: Index of axis for channels in the input image.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n order: int, order of interpolation\n\n Returns:\n The transformed version of the input.\n\n Raises:\n ImportError: if SciPy is not available.\n \"\"\"\n if scipy is None:\n raise ImportError('Image transformations require SciPy. '\n 'Install SciPy.')\n\n # Input sanity checks:\n # 1. x must 2D image with one or more channels (i.e., a 3D tensor)\n # 2. channels must be either first or last dimension\n if np.unique([row_axis, col_axis, channel_axis]).size != 3:\n raise ValueError(\"'row_axis', 'col_axis', and 'channel_axis'\"\n \" must be distinct\")\n\n # shall we support negative indices?\n valid_indices = set([0, 1, 2])\n actual_indices = set([row_axis, col_axis, channel_axis])\n if actual_indices != valid_indices:\n raise ValueError(\n f'Invalid axis\\' indices: {actual_indices - valid_indices}')\n\n if x.ndim != 3:\n raise ValueError('Input arrays must be multi-channel 2D images.')\n if channel_axis not in [0, 2]:\n raise ValueError('Channels are allowed and the first and last dimensions.')\n\n transform_matrix = None\n if theta != 0:\n theta = np.deg2rad(theta)\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n transform_matrix = rotation_matrix\n\n if tx != 0 or ty != 0:\n shift_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = shift_matrix\n else:\n transform_matrix = np.dot(transform_matrix, shift_matrix)\n\n if shear != 0:\n shear = np.deg2rad(shear)\n shear_matrix = np.array([[1, -np.sin(shear), 0],\n [0, np.cos(shear), 0],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = shear_matrix\n else:\n transform_matrix = np.dot(transform_matrix, shear_matrix)\n\n if zx != 1 or zy != 1:\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n if transform_matrix is None:\n transform_matrix = zoom_matrix\n else:\n transform_matrix = np.dot(transform_matrix, zoom_matrix)\n\n if transform_matrix is not None:\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(\n transform_matrix, h, w)\n x = np.rollaxis(x, channel_axis, 0)\n\n # Matrix construction assumes that coordinates are x, y (in that order).\n # However, regular numpy arrays use y,x (aka i,j) indexing.\n # Possible solution is:\n # 1. Swap the x and y axes.\n # 2. Apply transform.\n # 3. Swap the x and y axes again to restore image-like data ordering.\n # Mathematically, it is equivalent to the following transformation:\n # M' = PMP, where P is the permutation matrix, M is the original\n # transformation matrix.\n if col_axis > row_axis:\n transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]\n transform_matrix[[0, 1]] = transform_matrix[[1, 0]]\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [ndimage.interpolation.affine_transform( # pylint: disable=g-complex-comprehension\n x_channel,\n final_affine_matrix,\n final_offset,\n order=order,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for rmsprop.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport copy\nimport itertools\nimport math\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tensorflow.python.framework import test_util as tf_test_utils # pylint: disable=g-direct-tensorflow-import\nfrom keras.testing_infra import test_combinations\nfrom keras.testing_infra import test_utils\nfrom keras.optimizers.schedules import learning_rate_schedule\nfrom keras.optimizers.optimizer_v2 import rmsprop\n\n_DATA_TYPES = [\n tf.half, tf.float32, tf.float64, tf.complex64,\n tf.complex128\n]\n\n_TEST_PARAM_VALUES = [\n # learning_rate, rho, momentum, epsilon, centered\n [0.05, 0.9, 0.0, 1e-3, True],\n [0.05, 0.9, 0.0, 1e-3, False],\n [0.1, 0.9, 0.0, 1e-3, True],\n [0.01, 0.9, 0.0, 1e-5, True],\n [0.01, 0.9, 0.9, 1e-5, True],\n]\n\n_TESTPARAMS = [\n [data_type] + values\n for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)\n]\n\n\nclass RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase):\n\n def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,\n epsilon, centered):\n rms_t = rms * rho + (1 - rho) * g * g\n if centered:\n mg_t = mg * rho + (1 - rho) * g\n denom_t = rms_t - mg_t * mg_t\n else:\n mg_t = mg\n denom_t = rms_t\n if momentum > 0.:\n mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))\n var_t = var - mom_t\n else:\n mom_t = mom\n var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)\n return var_t, mg_t, rms_t, mom_t\n\n def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,\n lr, rho, momentum, epsilon, centered):\n mg_t = copy.deepcopy(mg)\n rms_t = copy.deepcopy(rms)\n mom_t = copy.deepcopy(mom)\n var_t = copy.deepcopy(var)\n for i in range(len(gindexs)):\n gindex = gindexs[i]\n gvalue = gvalues[i]\n rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue\n if centered:\n mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue\n denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]\n else:\n denom_t = rms_t[gindex]\n if momentum > 0.:\n mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +\n epsilon)\n var_t[gindex] = var[gindex] - mom_t[gindex]\n else:\n mom_t[gindex] = mom[gindex]\n var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon)\n return var_t, mg_t, rms_t, mom_t\n\n def testDense(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:\n with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu():\n # Initialize variables for numpy implementation.\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np, dtype=dtype)\n var1 = tf.Variable(var1_np, dtype=dtype)\n grads0 = tf.constant(grads0_np, dtype=dtype)\n grads1 = tf.constant(grads1_np, dtype=dtype)\n opt = rmsprop.RMSprop(\n learning_rate=learning_rate,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered)\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n if centered:\n mg0 = opt.get_slot(var0, \"mg\")\n mg1 = opt.get_slot(var1, \"mg\")\n else:\n mg0 = None\n mg1 = None\n\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n\n mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 3 steps of RMSprop\n for _ in range(1, 4):\n self.evaluate(update)\n\n var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(\n var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,\n momentum, epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(\n var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,\n momentum, epsilon, centered)\n\n # Validate updated params\n if centered:\n self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))\n self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testDenseWithLearningRateDecay(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n var0_np = np.array([1.0, 2.0])\n grads0_np = np.array([0.1, 0.2])\n var1_np = np.array([3.0, 4.0])\n grads1_np = np.array([0.01, 0.2])\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n learning_rate = 0.01\n rho = 0.9\n momentum = 0.0\n epsilon = 1e-7\n centered = False\n decay = 0.5\n opt = rmsprop.RMSprop(\n learning_rate=learning_rate,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered,\n decay=decay)\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n mg0_np = np.array([0.0, 0.0])\n mg1_np = np.array([0.0, 0.0])\n rms0_np = np.array([0.0, 0.0])\n rms1_np = np.array([0.0, 0.0])\n mom0_np = np.array([0.0, 0.0])\n mom1_np = np.array([0.0, 0.0])\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 4 steps of RMSprop\n for t in range(2):\n self.evaluate(update)\n\n lr = learning_rate / (1 + decay * t)\n var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(\n var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,\n epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(\n var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,\n epsilon, centered)\n\n # Validate updated params\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testDenseWithLearningRateInverseTimeDecay(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n var0_np = np.array([1.0, 2.0])\n grads0_np = np.array([0.1, 0.2])\n var1_np = np.array([3.0, 4.0])\n grads1_np = np.array([0.01, 0.2])\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0 = tf.constant(grads0_np)\n grads1 = tf.constant(grads1_np)\n learning_rate = 0.01\n rho = 0.9\n momentum = 0.0\n epsilon = 1e-7\n centered = False\n decay = 0.5\n lr_schedule = learning_rate_schedule.InverseTimeDecay(\n learning_rate, decay_steps=1.0, decay_rate=decay)\n opt = rmsprop.RMSprop(\n learning_rate=lr_schedule,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered)\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n mg0_np = np.array([0.0, 0.0])\n mg1_np = np.array([0.0, 0.0])\n rms0_np = np.array([0.0, 0.0])\n rms1_np = np.array([0.0, 0.0])\n mom0_np = np.array([0.0, 0.0])\n mom1_np = np.array([0.0, 0.0])\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 4 steps of RMSprop\n for t in range(2):\n self.evaluate(update)\n\n lr = learning_rate / (1 + decay * t)\n var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(\n var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,\n epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(\n var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,\n epsilon, centered)\n\n # Validate updated params\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testMinimizeSparseResourceVariable(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n for dtype in _DATA_TYPES:\n var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)\n x = tf.constant([[4.0], [5.0]], dtype=dtype)\n\n def loss():\n pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop\n return pred * pred\n\n sgd_op = rmsprop.RMSprop(\n learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,\n centered=False).minimize(\n loss, var_list=[var0])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n self.assertAllCloseAccordingToType([[0., 1.]],\n self.evaluate(var0),\n atol=0.01)\n\n def testMinimizeSparseResourceVariableCentered(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n with tf.Graph().as_default():\n for dtype in _DATA_TYPES:\n var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)\n x = tf.constant([[4.0], [5.0]], dtype=dtype)\n\n def loss():\n pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop\n return pred * pred\n\n # loss = lambda: pred * pred # pylint: disable=cell-var-from-loop\n sgd_op = rmsprop.RMSprop(\n learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,\n centered=True).minimize(\n loss, var_list=[var0])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n self.assertAllCloseAccordingToType([[-111, -138]],\n self.evaluate(var0),\n atol=0.01)\n\n def testSparse(self):\n # TODO(tanzheny, omalleyt): Fix test in eager mode.\n for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:\n with tf.compat.v1.get_default_graph().as_default(), test_utils.use_gpu():\n # Initialize variables for numpy implementation.\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)\n\n var0 = tf.Variable(var0_np)\n var1 = tf.Variable(var1_np)\n grads0_np_indices = np.array([0], dtype=np.int32)\n grads0 = tf.IndexedSlices(\n tf.constant(grads0_np),\n tf.constant(grads0_np_indices), tf.constant([1]))\n grads1_np_indices = np.array([1], dtype=np.int32)\n grads1 = tf.IndexedSlices(\n tf.constant(grads1_np),\n tf.constant(grads1_np_indices), tf.constant([1]))\n opt = rmsprop.RMSprop(\n learning_rate=learning_rate,\n rho=rho,\n momentum=momentum,\n epsilon=epsilon,\n centered=centered)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n if centered:\n mg0 = opt.get_slot(var0, \"mg\")\n self.assertEqual(mg0 is not None, centered)\n mg1 = opt.get_slot(var1, \"mg\")\n self.assertEqual(mg1 is not None, centered)\n else:\n mg0 = None\n mg1 = None\n rms0 = opt.get_slot(var0, \"rms\")\n self.assertIsNotNone(rms0)\n rms1 = opt.get_slot(var1, \"rms\")\n self.assertIsNotNone(rms1)\n if momentum > 0.:\n mom0 = opt.get_slot(var0, \"momentum\")\n mom1 = opt.get_slot(var1, \"momentum\")\n else:\n mom0 = None\n mom1 = None\n\n mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 3 steps of RMSprop\n for _ in range(1, 4):\n self.evaluate(update)\n\n var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(\n var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,\n learning_rate, rho, momentum, epsilon, centered)\n var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(\n var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,\n learning_rate, rho, momentum, epsilon, centered)\n\n # Validate updated params\n if centered:\n self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))\n self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))\n self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))\n self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))\n if momentum > 0.:\n self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))\n self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n @test_combinations.generate(test_combinations.combine(mode=[\"eager\"]))\n def testCallableParams(self):\n for dtype in _DATA_TYPES:\n var0 = tf.Variable([1.0, 2.0], dtype=dtype)\n var1 = tf.Variable([3.0, 4.0], dtype=dtype)\n grads0 = tf.constant([0.1, 0.1], dtype=dtype)\n grads1 = tf.constant([0.01, 0.01], dtype=dtype)\n\n learning_rate = lambda: 2.0\n rho = lambda: 0.9\n momentum = lambda: 0.0\n epsilon = 1.0\n opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n # Step 1: the rms accumulators where 1. So we should see a normal\n # update: v -= grad * learning_rate\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n # Check the parameters.\n self.assertAllCloseAccordingToType(\n np.array([\n 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),\n 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0))\n ]), self.evaluate(var0))\n self.assertAllCloseAccordingToType(\n np.array([\n 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),\n 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))\n ]), self.evaluate(var1))\n # Step 2: the root mean square accumulators contain the previous update.\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n # Check the parameters.\n self.assertAllCloseAccordingToType(\n np.array([\n 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -\n (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),\n 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -\n (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0))\n ]), self.evaluate(var0))\n self.assertAllCloseAccordingToType(\n np.array([\n 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -\n (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),\n 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -\n (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0))\n ]), self.evaluate(var1))\n\n def testConstructRMSpropWithLR(self):\n opt = rmsprop.RMSprop(lr=1.0)\n opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)\n opt_3 = rmsprop.RMSprop(learning_rate=0.1)\n self.assertIsInstance(opt.lr, tf.Variable)\n self.assertIsInstance(opt_2.lr, tf.Variable)\n self.assertIsInstance(opt_3.lr, tf.Variable)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.assertAllClose(self.evaluate(opt.lr), (1.0))\n self.assertAllClose(self.evaluate(opt_2.lr), (1.0))\n self.assertAllClose(self.evaluate(opt_3.lr), (0.1))\n\n @test_combinations.generate(test_combinations.combine(mode=[\"eager\"]))\n def testSlotsUniqueEager(self):\n v1 = tf.Variable(1.)\n v2 = tf.Variable(1.)\n\n opt = rmsprop.RMSprop(1., momentum=0., centered=False)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and one unique slot variable for v1 and v2.\n self.assertLen(set({id(v) for v in opt.variables()}), 3)\n self.assertEqual(\n self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))\n\n opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and two unique slot variables for v1 and v2.\n self.assertLen(set({id(v) for v in opt.variables()}), 5)\n self.assertEqual(\n self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))\n\n opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and three unique slot variables for v1 and v2\n self.assertLen(set({id(v) for v in opt.variables()}), 7)\n self.assertEqual(\n self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))\n\n @test_combinations.generate(test_combinations.combine(mode=[\"eager\"]))\n def testMomentumProperValue(self):\n with self.assertRaisesRegex(ValueError,\n r\"`momentum` must be between \\[0, 1\\]. \"\n r\"Received: momentum=2.5 \\(of type <class \"\n r\"\\'float\\'>\\).\"):\n rmsprop.RMSprop(1., momentum=2.5, centered=False)\n\n\n@test_combinations.generate(test_combinations.combine(mode=[\"graph\", \"eager\"]))\nclass SlotColocationTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters([True, False])\n @tf_test_utils.run_gpu_only\n def testRunMinimizeOnGPUForCPUVariables(self, use_resource):\n with tf.device(\"/device:CPU:0\"):\n if use_resource:\n var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)\n var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)\n else:\n var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)\n var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)\n\n def loss():\n return 5 * var0 + 3 * var1\n\n opt = rmsprop.RMSprop(\n learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0)\n\n # Fetch params to validate initial values\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 1 step through optimizer on GPU.\n # Slot variables are created the first time optimizer is used on some\n # variable. This tests that slot variables will be colocated with the base\n # variable.\n with tf.device(\"/device:GPU:0\"):\n # Note that for eager execution, minimize expects a function instead of a\n # Tensor.\n opt_op = opt.minimize(loss, [var0, var1])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(opt_op)\n\n # Validate updated params, All variables should have decreased.\n self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)),\n msg=\"updated variables: %s\" % self.evaluate(var0))\n self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)),\n msg=\"updated variables: %s\" % self.evaluate(var1))\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Ftrl-proximal optimizer implementation.\"\"\"\n# pylint: disable=g-bad-import-order\n# pylint: disable=g-classes-have-attributes\n\nimport tensorflow.compat.v2 as tf\nfrom keras.optimizers.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.util.tf_export import keras_export # pylint: disable=g-direct-tensorflow-import\n\n\n# pylint: disable=g-classes-have-attributes\n@keras_export('keras.optimizers.Ftrl')\nclass Ftrl(optimizer_v2.OptimizerV2):\n r\"\"\"Optimizer that implements the FTRL algorithm.\n\n \"Follow The Regularized Leader\" (FTRL) is an optimization algorithm developed\n at Google for click-through rate prediction in the early 2010s. It is most\n suitable for shallow models with large and sparse feature spaces.\n The algorithm is described by\n [McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf).\n The Keras version has support for both online L2 regularization\n (the L2 regularization described in the paper\n above) and shrinkage-type L2 regularization\n (which is the addition of an L2 penalty to the loss function).\n\n Initialization:\n\n ```python\n n = 0\n sigma = 0\n z = 0\n ```\n\n Update rule for one variable `w`:\n\n ```python\n prev_n = n\n n = n + g ** 2\n sigma = (sqrt(n) - sqrt(prev_n)) / lr\n z = z + g - sigma * w\n if abs(z) < lambda_1:\n w = 0\n else:\n w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2)\n ```\n\n Notation:\n\n - `lr` is the learning rate\n - `g` is the gradient for the variable\n - `lambda_1` is the L1 regularization strength\n - `lambda_2` is the L2 regularization strength\n\n Check the documentation for the `l2_shrinkage_regularization_strength`\n parameter for more details when shrinkage is enabled, in which case gradient\n is replaced with a gradient with shrinkage.\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.\n learning_rate_power: A float value, must be less or equal to zero.\n Controls how the learning rate decreases during training. Use zero for\n a fixed learning rate.\n initial_accumulator_value: The starting value for accumulators.\n Only zero or positive values are allowed.\n l1_regularization_strength: A float value, must be greater than or\n equal to zero. Defaults to 0.0.\n l2_regularization_strength: A float value, must be greater than or\n equal to zero. Defaults to 0.0.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to `\"Ftrl\"`.\n l2_shrinkage_regularization_strength: A float value, must be greater than\n or equal to zero. This differs from L2 above in that the L2 above is a\n stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.\n When input is sparse shrinkage will only happen on the active weights.\n beta: A float value, representing the beta value from the paper.\n Defaults to 0.0.\n **kwargs: keyword arguments. Allowed arguments are `clipvalue`,\n `clipnorm`, `global_clipnorm`.\n If `clipvalue` (float) is set, the gradient of each weight\n is clipped to be no higher than this value.\n If `clipnorm` (float) is set, the gradient of each weight\n is individually clipped so that its norm is no higher than this value.\n If `global_clipnorm` (float) is set the gradient of all weights is\n clipped so that their global norm is no higher than this value.\n\n Reference:\n - [McMahan et al., 2013](\n https://research.google.com/pubs/archive/41159.pdf)\n \"\"\"\n\n def __init__(self,\n learning_rate=0.001,\n learning_rate_power=-0.5,\n initial_accumulator_value=0.1,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0,\n name='Ftrl',\n l2_shrinkage_regularization_strength=0.0,\n beta=0.0,\n **kwargs):\n super(Ftrl, self).__init__(name, **kwargs)\n\n if initial_accumulator_value < 0.0:\n raise ValueError(\n '`initial_accumulator_value` needs to be positive or zero. Received: '\n f'initial_accumulator_value={initial_accumulator_value}.')\n if learning_rate_power > 0.0:\n raise ValueError(\n '`learning_rate_power` needs to be negative or zero. Received: '\n f'learning_rate_power={learning_rate_power}.')\n if l1_regularization_strength < 0.0:\n raise ValueError(\n '`l1_regularization_strength` needs to be positive or zero. '\n f'Received: l1_regularization_strength={l1_regularization_strength}.')\n if l2_regularization_strength < 0.0:\n raise ValueError(\n '`l2_regularization_strength` needs to be positive or zero. '\n f'Received: l2_regularization_strength={l2_regularization_strength}.')\n if l2_shrinkage_regularization_strength < 0.0:\n raise ValueError(\n '`l2_shrinkage_regularization_strength` needs to be positive or '\n 'zero. Received: l2_shrinkage_regularization_strength'\n f'={l2_shrinkage_regularization_strength}.')\n\n self._set_hyper('learning_rate', learning_rate)\n self._set_hyper('decay', self._initial_decay)\n self._set_hyper('learning_rate_power', learning_rate_power)\n self._set_hyper('l1_regularization_strength', l1_regularization_strength)\n self._set_hyper('l2_regularization_strength', l2_regularization_strength)\n self._set_hyper('beta', beta)\n self._initial_accumulator_value = initial_accumulator_value\n self._l2_shrinkage_regularization_strength = (\n l2_shrinkage_regularization_strength)\n\n def _create_slots(self, var_list):\n # Create the \"accum\" and \"linear\" slots.\n for var in var_list:\n dtype = var.dtype.base_dtype\n init = tf.compat.v1.constant_initializer(\n self._initial_accumulator_value, dtype=dtype)\n self.add_slot(var, 'accumulator', init)\n self.add_slot(var, 'linear')\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(Ftrl, self)._prepare_local(var_device, var_dtype, apply_state)\n apply_state[(var_device, var_dtype)].update(\n dict(\n learning_rate_power=tf.identity(\n self._get_hyper('learning_rate_power', var_dtype)),\n l1_regularization_strength=tf.identity(\n self._get_hyper('l1_regularization_strength', var_dtype)),\n l2_regularization_strength=tf.identity(\n self._get_hyper('l2_regularization_strength', var_dtype)),\n beta=tf.identity(self._get_hyper('beta', var_dtype)),\n l2_shrinkage_regularization_strength=tf.cast(\n self._l2_shrinkage_regularization_strength, var_dtype)))\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n # Adjust L2 regularization strength to include beta to avoid the underlying\n # TensorFlow ops needing to include it.\n adjusted_l2_regularization_strength = (\n coefficients['l2_regularization_strength'] + coefficients['beta'] /\n (2. * coefficients['lr_t']))\n\n accum = self.get_slot(var, 'accumulator')\n linear = self.get_slot(var, 'linear')\n\n if self._l2_shrinkage_regularization_strength <= 0.0:\n return tf.raw_ops.ResourceApplyFtrl(\n var=var.handle,\n accum=accum.handle,\n linear=linear.handle,\n grad=grad,\n lr=coefficients['lr_t'],\n l1=coefficients['l1_regularization_strength'],\n l2=adjusted_l2_regularization_strength,\n lr_power=coefficients['learning_rate_power'],\n use_locking=self._use_locking)\n else:\n return tf.raw_ops.ResourceApplyFtrlV2(\n var=var.handle,\n accum=accum.handle,\n linear=linear.handle,\n grad=grad,\n lr=coefficients['lr_t'],\n l1=coefficients['l1_regularization_strength'],\n l2=adjusted_l2_regularization_strength,\n l2_shrinkage=coefficients['l2_shrinkage_regularization_strength'],\n lr_power=coefficients['learning_rate_power'],\n use_locking=self._use_locking)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n # Adjust L2 regularization strength to include beta to avoid the underlying\n # TensorFlow ops needing to include it.\n adjusted_l2_regularization_strength = (\n coefficients['l2_regularization_strength'] + coefficients['beta'] /\n (2. * coefficients['lr_t']))\n\n accum = self.get_slot(var, 'accumulator')\n linear = self.get_slot(var, 'linear')\n\n if self._l2_shrinkage_regularization_strength <= 0.0:\n return tf.raw_ops.ResourceSparseApplyFtrl(\n var=var.handle,\n accum=accum.handle,\n linear=linear.handle,\n grad=grad,\n indices=indices,\n lr=coefficients['lr_t'],\n l1=coefficients['l1_regularization_strength'],\n l2=adjusted_l2_regularization_strength,\n lr_power=coefficients['learning_rate_power'],\n use_locking=self._use_locking)\n else:\n return tf.raw_ops.ResourceSparseApplyFtrlV2(\n var=var.handle,\n accum=accum.handle,\n linear=linear.handle,\n grad=grad,\n indices=indices,\n lr=coefficients['lr_t'],\n l1=coefficients['l1_regularization_strength'],\n l2=adjusted_l2_regularization_strength,\n l2_shrinkage=coefficients['l2_shrinkage_regularization_strength'],\n lr_power=coefficients['learning_rate_power'],\n use_locking=self._use_locking)\n\n def get_config(self):\n config = super(Ftrl, self).get_config()\n config.update({\n 'learning_rate':\n self._serialize_hyperparameter('learning_rate'),\n 'decay':\n self._initial_decay,\n 'initial_accumulator_value':\n self._initial_accumulator_value,\n 'learning_rate_power':\n self._serialize_hyperparameter('learning_rate_power'),\n 'l1_regularization_strength':\n self._serialize_hyperparameter('l1_regularization_strength'),\n 'l2_regularization_strength':\n self._serialize_hyperparameter('l2_regularization_strength'),\n 'beta':\n self._serialize_hyperparameter('beta'),\n 'l2_shrinkage_regularization_strength':\n self._l2_shrinkage_regularization_strength,\n })\n return config\n" ]
[ [ "numpy.rollaxis", "numpy.dot", "numpy.sqrt", "numpy.asarray", "numpy.max", "numpy.mean", "numpy.random.randint", "numpy.linalg.svd", "numpy.clip", "numpy.reshape", "numpy.arange", "tensorflow.python.util.tf_export.keras_export", "numpy.unique", "numpy.stack", "numpy.sin", "numpy.copy", "numpy.std", "numpy.zeros", "scipy.ndimage.interpolation.affine_transform", "numpy.min", "numpy.random.choice", "numpy.deg2rad", "numpy.array", "numpy.random.random", "numpy.random.seed", "numpy.cos", "numpy.random.permutation", "numpy.prod", "numpy.random.uniform" ], [ "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.constant", "numpy.sqrt", "tensorflow.compat.v2.device", "tensorflow.compat.v2.compat.v1.nn.embedding_lookup", "tensorflow.compat.v2.compat.v1.global_variables_initializer", "tensorflow.compat.v2.Graph", "tensorflow.compat.v2.compat.v1.get_default_graph", "numpy.array" ], [ "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.raw_ops.ResourceApplyFtrl", "tensorflow.compat.v2.raw_ops.ResourceSparseApplyFtrlV2", "tensorflow.compat.v2.raw_ops.ResourceSparseApplyFtrl", "tensorflow.compat.v2.compat.v1.constant_initializer", "tensorflow.compat.v2.raw_ops.ResourceApplyFtrlV2" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sunpy/xrayvision
[ "905042be8227688c4088800423dfa8db79e56566" ]
[ "xrayvision/tests/test_clean.py" ]
[ "import numpy as np\nimport astropy.units as u\nfrom astropy.convolution.kernels import Gaussian2DKernel\n\nfrom scipy import signal\n\nfrom ..clean import clean, ms_clean, component, radial_prolate_sphereoidal,\\\n vec_radial_prolate_sphereoidal\nfrom ..transform import dft_map, idft_map\n\n\ndef test_clean_ideal():\n n = m = 65\n pos1 = [15, 30]\n pos2 = [40, 32]\n\n clean_map = np.zeros((n, m))\n clean_map[pos1[0], pos1[1]] = 10.\n clean_map[pos2[0], pos2[1]] = 7.\n\n dirty_beam = np.zeros((n, m))\n dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75\n dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75\n dirty_beam[(n-1)//2, (m-1)//2] = 0.8\n dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')\n\n dirty_map = signal.convolve(clean_map, dirty_beam, mode='same')\n\n # Disable convolution of model with gaussian for testing\n out_map = clean(dirty_map, dirty_beam, clean_beam_width=0.0)\n\n # Within threshold default threshold of 0.1\n assert np.allclose(clean_map, (out_map[0]+out_map[1]), out_map, atol=dirty_beam.max() * 0.1)\n\n\ndef test_component():\n comp = np.zeros((3, 3))\n comp[1, 1] = 1.0\n\n res = component(scale=0, shape=(3, 3))\n assert np.array_equal(res, comp)\n\n res = component(scale=1, shape=(3, 3))\n assert np.array_equal(res, comp)\n\n res = component(scale=2, shape=(6, 6))\n assert np.all(res[0, :] == 0.0)\n assert np.all(res[:, 0] == 0.0)\n assert np.all(res[2:4, 2:4] == res.max())\n\n res = component(scale=3, shape=(7, 7))\n assert np.all(res[0, :] == 0.0)\n assert np.all(res[:, 0] == 0.0)\n assert res[3, 3] == 1\n\n\ndef test_radial_prolate_spheroidal():\n amps = [radial_prolate_sphereoidal(r) for r in [-1.0, 0.0, 0.5, 1.0, 2.0]]\n assert amps[0] == 1.0\n assert amps[1] == 1.0\n assert amps[2] == 0.36106538453111797\n assert amps[3] == 0.0\n assert amps[4] == 0.0\n\n\ndef test_vec_radial_prolate_spheroidal():\n radii = np.linspace(-0.5, 1.5, 1000)\n amps1 = [radial_prolate_sphereoidal(r) for r in radii]\n amps2 = vec_radial_prolate_sphereoidal(radii)\n assert np.allclose(amps1, amps2)\n\n\ndef test_ms_clean_ideal():\n n = m = 65\n pos1 = [15, 30]\n pos2 = [40, 32]\n\n clean_map = np.zeros((n, m))\n clean_map[pos1[0], pos1[1]] = 10.\n clean_map[pos2[0], pos2[1]] = 7.\n\n dirty_beam = np.zeros((n, m))\n dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75\n dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75\n dirty_beam[(n-1)//2, (m-1)//2] = 1.0\n dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')\n\n dirty_map = signal.convolve2d(clean_map, dirty_beam, mode='same')\n\n # Disable convolution of model with gaussian for testing\n model, res = ms_clean(dirty_map, dirty_beam, scales=[1], clean_beam_width=0.0)\n recovered = model + res\n\n # Within threshold default threshold\n assert np.allclose(clean_map, recovered, atol=dirty_beam.max() * 0.1)\n\n\ndef test_clean_sim():\n n = m = 32\n data = Gaussian2DKernel(stddev=3.0, x_size=n, y_size=m).array\n # data = np.zeros((n, m))\n # data[13,13] = 10.0\n # data[12:14,12:14] = 10.0/4.0\n\n half_log_space = np.logspace(np.log10(0.03030303), np.log10(0.48484848), 10)\n\n theta = np.linspace(0, 2*np.pi, 32)\n theta = theta[np.newaxis, :]\n theta = np.repeat(theta, 10, axis=0)\n\n r = half_log_space\n r = r[:, np.newaxis]\n r = np.repeat(r, 32, axis=1)\n\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n sub_uv = np.vstack([x.flatten(), y.flatten()])\n sub_uv = np.hstack([sub_uv, np.zeros((2, 1))]) / u.arcsec\n\n # Factor of 9 is compensate for the factor of 3 * 3 increase in size\n dirty_beam = idft_map(np.ones(321)*9, (n*3, m*3), sub_uv)\n\n vis = dft_map(data, sub_uv)\n\n dirty_map = idft_map(vis, (n, m), sub_uv)\n\n clean_map, res = clean(dirty_map, dirty_beam, clean_beam_width=0)\n np.allclose(data, clean_map + res, atol=dirty_beam.max() * 0.1)\n\n" ]
[ [ "numpy.pad", "numpy.array_equal", "numpy.linspace", "numpy.allclose", "numpy.cos", "scipy.signal.convolve2d", "numpy.sin", "numpy.all", "numpy.ones", "numpy.log10", "numpy.repeat", "numpy.zeros", "scipy.signal.convolve" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
DedeKite/wxPlotLab
[ "808d457aeb897ceb37535bcd11d15b65a0a14cd1" ]
[ "mplotlab/graphics/Navigation.py" ]
[ "# -*-coding:Utf-8 -*\r\n\r\nfrom mplotlab import App\r\nfrom matplotlib.backend_bases import NavigationToolbar2\r\n\r\nimport wx\r\n\r\nclass Cursors:\r\n # this class is only used as a simple namespace\r\n HAND, POINTER, SELECT_REGION, MOVE = list(range(4))\r\ncursors = Cursors()\r\n\r\ncursord = {\r\n cursors.MOVE : wx.CURSOR_HAND,\r\n cursors.HAND : wx.CURSOR_HAND,\r\n cursors.POINTER : wx.CURSOR_ARROW,\r\n cursors.SELECT_REGION : wx.CURSOR_CROSS,\r\n }\r\n\r\nclass Navigation(NavigationToolbar2):\r\n def __init__(self,*a,**k):\r\n NavigationToolbar2.__init__(self, *a,**k)\r\n \r\n def _init_toolbar(self,*args,**kwargs):\r\n pass\r\n \r\n def set_message(self,s): \r\n \"\"\" display in the status bar\r\n the mouseover data (x,y) \r\n \"\"\" \r\n try:\r\n App().mainWin.GetStatusBar().SetStatusText(s,0)\r\n except:\r\n pass\r\n\r\n def set_cursor(self, cursor):\r\n cursor =wx.StockCursor(cursord[cursor])\r\n self.canvas.SetCursor( cursor )\r\n\r\n def dynamic_update(self):\r\n d = self._idle\r\n self._idle = False\r\n if d:\r\n self.canvas.draw()\r\n self._idle = True\r\n\r\n def press(self, event):\r\n if self._active == 'ZOOM':\r\n self.wxoverlay = wx.Overlay()\r\n\r\n def release(self, event):\r\n if self._active == 'ZOOM':\r\n # When the mouse is released we reset the overlay and it\r\n # restores the former content to the window.\r\n self.wxoverlay.Reset()\r\n del self.wxoverlay\r\n\r\n def draw_rubberband(self, event, x0, y0, x1, y1):\r\n # Use an Overlay to draw a rubberband-like bounding box.\r\n\r\n dc = wx.ClientDC(self.canvas)\r\n odc = wx.DCOverlay(self.wxoverlay, dc)\r\n odc.Clear()\r\n\r\n # Mac's DC is already the same as a GCDC, and it causes\r\n # problems with the overlay if we try to use an actual\r\n # wx.GCDC so don't try it.\r\n if 'wxMac' not in wx.PlatformInfo:\r\n dc = wx.GCDC(dc)\r\n\r\n height = self.canvas.figure.bbox.height\r\n y1 = height - y1\r\n y0 = height - y0\r\n\r\n if y1<y0: y0, y1 = y1, y0\r\n if x1<y0: x0, x1 = x1, x0\r\n\r\n w = x1 - x0\r\n h = y1 - y0\r\n rect = wx.Rect(x0, y0, w, h)\r\n\r\n rubberBandColor = '#C0C0FF' # or load from config?\r\n\r\n # Set a pen for the border\r\n color = wx.NamedColour(rubberBandColor)\r\n dc.SetPen(wx.Pen(color, 1))\r\n\r\n # use the same color, plus alpha for the brush\r\n r, g, b = color.Get()\r\n color.Set(r,g,b, 0x60)\r\n dc.SetBrush(wx.Brush(color))\r\n dc.DrawRectangleRect(rect)\r\n " ]
[ [ "matplotlib.backend_bases.NavigationToolbar2.__init__" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Walon1998/dace
[ "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0", "95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0" ]
[ "samples/polybench/lu.py", "tests/symbol_dependent_transients_test.py", "samples/tensorflow/dataset_reader.py", "tests/transformations/maptoforloop_test.py", "tests/fpga/vector_reduce_test.py", "tests/chained_nested_tasklet_test.py", "tests/blas/nodes/blas_nodes_test.py", "tests/wcr_cudatest.py", "tests/persistent_map_cudatest.py", "tests/symbol_in_tasklet_test.py", "tests/fpga/channel_mangling_test.py", "tests/map_dim_shuffle_test.py", "tests/transformations/subgraph_fusion/reduction_test.py", "tests/numpy/element_assignment_test.py", "tests/numpy/subarray_in_nested_call_test.py", "tests/codegen/control_flow_detection_test.py", "tests/library/linalg_inv_test.py", "tests/transformations/subgraph_fusion/smax_test.py" ]
[ "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport math\nimport numpy as np\nimport dace\nimport polybench\n\nN = dace.symbol('N')\n\n#datatypes = [dace.float64, dace.int32, dace.float32]\ndatatype = dace.float64\n\n# Dataset sizes\nsizes = [{N: 40}, {N: 120}, {N: 400}, {N: 2000}, {N: 4000}]\n\nargs = [([N, N], datatype)]\n\n\ndef init_array(A):\n n = N.get()\n\n for i in range(0, n, 1):\n for j in range(0, i + 1, 1):\n # Python does modulo, while C does remainder ...\n A[i, j] = datatype(-(j % n)) / n + 1\n for j in range(i + 1, n, 1):\n A[i, j] = datatype(0)\n A[i, i] = datatype(1)\n\n A[:] = np.dot(A, np.transpose(A))\n\n\[email protected](datatype[N, N])\ndef lu(A):\n for i in range(0, N, 1):\n for j in range(0, i, 1):\n\n @dace.map\n def k_loop1(k: _[0:j]):\n i_in << A[i, k]\n j_in << A[k, j]\n out >> A(1, lambda x, y: x + y)[i, j]\n out = -i_in * j_in\n\n @dace.tasklet\n def div():\n ij_in << A[i, j]\n jj_in << A[j, j]\n out >> A[i, j]\n out = ij_in / jj_in\n\n for j in range(i, N, 1):\n\n @dace.map\n def k_loop2(k: _[0:i]):\n i_in << A[i, k]\n j_in << A[k, j]\n out >> A(1, lambda x, y: x + y)[i, j]\n out = -i_in * j_in\n\n\nif __name__ == '__main__':\n polybench.main(sizes, args, [(0, 'A')], init_array, lu)\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\nimport pytest\nfrom dace.libraries import standard\nfrom dace.transformation import interstate\n\n\ndef _make_sdfg(name, storage=dace.dtypes.StorageType.CPU_Heap, isview=False):\n\n N = dace.symbol('N', dtype=dace.int32, integer=True, positive=True)\n i = dace.symbol('i', dtype=dace.int32, integer=True)\n\n sdfg = dace.SDFG(name)\n _, A = sdfg.add_array('A', [N, N, N], dtype=dace.float64)\n _, B = sdfg.add_array('B', [N], dtype=dace.float64)\n if isview:\n _, tmp1 = sdfg.add_view('tmp1', [N - 4, N - 4, N - i], dtype=dace.float64, storage=storage, strides=A.strides)\n else:\n _, tmp1 = sdfg.add_transient('tmp1', [N - 4, N - 4, N - i], dtype=dace.float64, storage=storage)\n _, tmp2 = sdfg.add_transient('tmp2', [1], dtype=dace.float64, storage=storage)\n\n begin_state = sdfg.add_state(\"begin\", is_start_state=True)\n guard_state = sdfg.add_state(\"guard\")\n body1_state = sdfg.add_state(\"body1\")\n body2_state = sdfg.add_state(\"body2\")\n body3_state = sdfg.add_state(\"body3\")\n end_state = sdfg.add_state(\"end\")\n\n sdfg.add_edge(begin_state, guard_state, dace.InterstateEdge(assignments=dict(i='0')))\n sdfg.add_edge(guard_state, body1_state, dace.InterstateEdge(condition=f'i<{N}'))\n sdfg.add_edge(guard_state, end_state, dace.InterstateEdge(condition=f'i>={N}'))\n sdfg.add_edge(body1_state, body2_state, dace.InterstateEdge())\n sdfg.add_edge(body2_state, body3_state, dace.InterstateEdge())\n sdfg.add_edge(body3_state, guard_state, dace.InterstateEdge(assignments=dict(i='i+1')))\n\n if not isview:\n read_a = body1_state.add_read('A')\n write_tmp1 = body1_state.add_write('tmp1')\n body1_state.add_nedge(read_a, write_tmp1, dace.Memlet(f'A[2:{N}-2, 2:{N}-2, i:{N}]'))\n\n if isview:\n read_a = body2_state.add_read('A')\n read_tmp1 = body2_state.add_access('tmp1')\n body2_state.add_nedge(read_a, read_tmp1, dace.Memlet(f'A[2:{N}-2, 2:{N}-2, i:{N}]'))\n else:\n read_tmp1 = body2_state.add_read('tmp1')\n rednode = standard.Reduce(wcr='lambda a, b : a + b', identity=0)\n if storage == dace.dtypes.StorageType.GPU_Global:\n rednode.implementation = 'CUDA (device)'\n elif storage == dace.dtypes.StorageType.FPGA_Global:\n rednode.implementation = 'FPGAPartialReduction'\n body2_state.add_node(rednode)\n write_tmp2 = body2_state.add_write('tmp2')\n body2_state.add_nedge(read_tmp1, rednode, dace.Memlet.from_array('tmp1', tmp1))\n body2_state.add_nedge(rednode, write_tmp2, dace.Memlet('tmp2[0]'))\n\n read_tmp2 = body3_state.add_read('tmp2')\n write_b = body3_state.add_write('B')\n body3_state.add_nedge(read_tmp2, write_b, dace.Memlet('B[i]'))\n\n return sdfg\n\n\ndef test_symbol_dependent_heap_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_heap_array\")\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_register_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_register_array\", storage=dace.dtypes.StorageType.Register)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_threadlocal_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_threadlocal_array\", storage=dace.dtypes.StorageType.CPU_ThreadLocal)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_cpu_view():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_cpu_view\", isview=True)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\[email protected]\ndef test_symbol_dependent_gpu_global_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_gpu_global_array\", storage=dace.dtypes.StorageType.GPU_Global)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\[email protected]\ndef test_symbol_dependent_pinned_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_pinned_array\", storage=dace.dtypes.StorageType.CPU_Pinned)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\[email protected] # @pytest.mark.gpu\ndef test_symbol_dependent_gpu_view():\n # NOTE: This test cannot produce the correct result since the input\n # data of the reduction are not contiguous and cub:reduce doesn't support\n # such data.\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_gpu_view\", storage=dace.dtypes.StorageType.GPU_Global, isview=True)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\[email protected]\ndef test_symbol_dependent_fpga_global_array():\n A = np.random.randn(10, 10, 10)\n B = np.ndarray(10, dtype=np.float64)\n sdfg = _make_sdfg(\"symbol_dependent_fpga_global_array\", storage=dace.dtypes.StorageType.FPGA_Global)\n # Compile manually to avoid simplification\n sdfg_exec = sdfg.compile()\n sdfg_exec(A=A, B=B, N=10)\n del sdfg_exec\n B_ref = np.ndarray(10, dtype=np.float64)\n for i in range(10):\n tmp = A[2:-2, 2:-2, i:]\n B_ref[i] = np.sum(tmp)\n assert (np.allclose(B, B_ref))\n\n\ndef test_symbol_dependent_array_in_map():\n @dace.program\n def symbol_dependent_array_in_map(A: dace.float32[10]):\n out = np.ndarray(10, dtype=np.float32)\n for i in dace.map[0:10]:\n tmp = A[0:i + 1]\n out[i] = np.sum(tmp)\n return out\n\n # Compile manually to avoid simplification\n sdfg = symbol_dependent_array_in_map.to_sdfg(simplify=False)\n sdfg.apply_transformations_repeated(interstate.StateFusion)\n sdfg.apply_transformations_repeated(interstate.InlineSDFG)\n # NOTE: Temporary fix for issue with symbols/free_symbols\n if 'i' in sdfg.free_symbols:\n sdfg.remove_symbol('i')\n func = sdfg.compile()\n A = np.random.randn(10).astype(np.float32)\n val = func(A=A)\n ref = np.cumsum(A)\n assert (np.allclose(val, ref))\n\n\nif __name__ == '__main__':\n test_symbol_dependent_heap_array()\n test_symbol_dependent_register_array()\n test_symbol_dependent_threadlocal_array()\n test_symbol_dependent_cpu_view()\n test_symbol_dependent_gpu_global_array()\n test_symbol_dependent_pinned_array()\n # test_symbol_dependent_gpu_view()\n # test_symbol_dependent_fpga_global_array()\n test_symbol_dependent_array_in_map()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport tensorflow as tf\nimport numpy as np\nfrom dace.frontend.tensorflow import TFSession\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef data_input_fn(filenames, batch_size=2, shuffle=False):\n def _parser(record):\n features = {\n \"label\": tf.FixedLenFeature([], tf.int64),\n \"image_raw\": tf.FixedLenFeature([], tf.string),\n }\n parsed_record = tf.parse_single_example(record, features)\n image = tf.decode_raw(parsed_record[\"image_raw\"], tf.float32)\n image = tf.reshape(image, [28, 28])\n\n label = tf.cast(parsed_record[\"label\"], tf.int32)\n label = tf.one_hot(indices=label, depth=10, on_value=1, off_value=0)\n return image, tf.one_hot(label, depth=10)\n\n def _input_fn():\n dataset = tf.data.TFRecordDataset(filenames).map(_parser)\n if shuffle:\n dataset = dataset.shuffle(buffer_size=10000)\n\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n\n return features, labels\n\n return _input_fn\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('USAGE: dataset_reader.py <FILENAME> [FILENAMES...]')\n exit(1)\n\n filenames = list(sys.argv[1:])\n\n with tf.Session() as sess:\n output_tf = sess.run(data_input_fn(filenames)())[0]\n for _out in output_tf:\n _out = np.multiply(255.0, _out)\n _out = _out.astype(np.uint8)\n plt.imshow(_out)\n plt.show()\n\n with TFSession() as sess:\n output_dace = sess.run(data_input_fn(filenames)())[0]\n for _out in output_dace:\n _out = np.multiply(255.0, _out)\n _out = _out.astype(np.uint8)\n plt.imshow(_out)\n plt.show()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\n\"\"\" A test for the MapToForLoop transformation. \"\"\"\n\nimport dace\nimport numpy as np\nfrom dace.transformation.dataflow import MapExpansion, MapToForLoop\n\n\[email protected]\ndef map2for(A: dace.float64[20, 20, 20]):\n for k in range(1, 19):\n for i, j in dace.map[0:20, 0:20]:\n with dace.tasklet:\n inp << A[i, j, k]\n inp2 << A[i, j, k - 1]\n out >> A[i, j, k + 1]\n out = inp + inp2\n\n\ndef test_map2for_overlap():\n A = np.random.rand(20, 20, 20)\n expected = np.copy(A)\n for k in range(1, 19):\n expected[:, :, k + 1] = expected[:, :, k] + expected[:, :, k - 1]\n\n sdfg = map2for.to_sdfg()\n assert sdfg.apply_transformations([MapExpansion, MapToForLoop]) == 2\n sdfg(A=A)\n assert np.allclose(A, expected)\n\n\nif __name__ == '__main__':\n test_map2for_overlap()", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\n\"\"\" Sums all the element of the vector with a reduce. \"\"\"\n\nimport dace\nimport numpy as np\nimport argparse\nfrom dace.fpga_testing import fpga_test\nfrom dace.transformation.interstate import FPGATransformSDFG\n\nN = dace.symbol('N')\n\n\[email protected]\ndef vector_reduce(x: dace.float32[N], s: dace.scalar(dace.float32)):\n #transient\n tmp = dace.define_local([N], dtype=x.dtype)\n\n @dace.map\n def sum(i: _[0:N]):\n in_x << x[i]\n out >> tmp[i]\n\n out = in_x\n\n dace.reduce(lambda a, b: a + b, tmp, s, axis=(0), identity=0)\n\n\n@fpga_test()\ndef test_vector_reduce():\n\n N.set(24)\n\n # Initialize arrays: X, Y and Z\n X = np.random.rand(N.get()).astype(dace.float32.type)\n s = dace.scalar(dace.float32)\n\n sdfg = vector_reduce.to_sdfg()\n sdfg.apply_transformations(FPGATransformSDFG)\n sdfg(x=X, s=s, N=N)\n\n # Compute expected result\n s_exp = 0.0\n for x in X:\n s_exp += x\n diff = np.linalg.norm(s_exp - s) / N.get()\n assert diff <= 1e-5\n\n return sdfg\n\n\nif __name__ == \"__main__\":\n test_vector_reduce(None)\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport numpy as np\n\nimport dace as dp\nfrom dace.sdfg import SDFG\nfrom dace.memlet import Memlet\nfrom dace.data import Scalar\n\n\n# Constructs an SDFG with two consecutive tasklets\ndef test_nested_map():\n print('SDFG consecutive tasklet (nested) test')\n # Externals (parameters, symbols)\n N = dp.symbol('N')\n N.set(20)\n input = dp.ndarray([N], dp.int32)\n output = dp.ndarray([N], dp.int32)\n input[:] = dp.int32(5)\n output[:] = dp.int32(0)\n\n # Construct SDFG\n mysdfg = SDFG('ctasklet_nested_map')\n state = mysdfg.add_state()\n A_ = state.add_array('A', [N], dp.int32)\n B_ = state.add_array('B', [N], dp.int32)\n\n omap_entry, omap_exit = state.add_map('omap', dict(k='0:2'))\n map_entry, map_exit = state.add_map('mymap', dict(i='0:N/2'))\n tasklet = state.add_tasklet('mytasklet', {'a'}, {'b'}, 'b = 5*a')\n state.add_edge(map_entry, None, tasklet, 'a', Memlet.simple(A_, 'k*N/2+i'))\n tasklet2 = state.add_tasklet('mytasklet2', {'c'}, {'d'}, 'd = 2*c')\n state.add_edge(tasklet, 'b', tasklet2, 'c', Memlet())\n state.add_edge(tasklet2, 'd', map_exit, None, Memlet.simple(B_, 'k*N/2+i'))\n\n # Add outer edges\n state.add_edge(A_, None, omap_entry, None, Memlet.simple(A_, '0:N'))\n state.add_edge(omap_entry, None, map_entry, None, Memlet.simple(A_, 'k*N/2:(k+1)*N/2'))\n state.add_edge(map_exit, None, omap_exit, None, Memlet.simple(B_, 'k*N/2:(k+1)*N/2'))\n state.add_edge(omap_exit, None, B_, None, Memlet.simple(B_, '0:N'))\n\n # Fill missing connectors\n mysdfg.fill_scope_connectors()\n mysdfg.validate()\n\n mysdfg(A=input, B=output, N=N)\n\n diff = np.linalg.norm(10 * input - output) / N.get()\n print(\"Difference:\", diff)\n assert diff <= 1e-5\n\n\ndef test_nested_sdfg():\n print('SDFG consecutive tasklet (nested SDFG) test')\n # Externals (parameters, symbols)\n N = dp.symbol('N')\n N.set(20)\n input = dp.ndarray([N], dp.int32)\n output = dp.ndarray([N], dp.int32)\n input[:] = dp.int32(5)\n output[:] = dp.int32(0)\n\n # Construct outer SDFG\n mysdfg = SDFG('ctasklet_nested_sdfg')\n state = mysdfg.add_state()\n A_ = state.add_array('A', [N], dp.int32)\n B_ = state.add_array('B', [N], dp.int32)\n\n # Construct inner SDFG\n nsdfg = dp.SDFG('ctasklet_nested_sdfg_inner')\n nstate = nsdfg.add_state()\n a = nstate.add_array('a', [N], dp.int32)\n b = nstate.add_array('b', [N], dp.int32)\n map_entry, map_exit = nstate.add_map('mymap', dict(i='0:N/2'))\n tasklet = nstate.add_tasklet('mytasklet', {'aa'}, {'bb'}, 'bb = 5*aa')\n nstate.add_memlet_path(a, map_entry, tasklet, dst_conn='aa', memlet=Memlet('a[k*N/2+i]'))\n tasklet2 = nstate.add_tasklet('mytasklet2', {'cc'}, {'dd'}, 'dd = 2*cc')\n nstate.add_edge(tasklet, 'bb', tasklet2, 'cc', Memlet())\n nstate.add_memlet_path(tasklet2, map_exit, b, src_conn='dd', memlet=Memlet('b[k*N/2+i]'))\n\n # Add outer edges\n omap_entry, omap_exit = state.add_map('omap', dict(k='0:2'))\n nsdfg_node = state.add_nested_sdfg(nsdfg, None, {'a'}, {'b'})\n state.add_memlet_path(A_, omap_entry, nsdfg_node, dst_conn='a', memlet=Memlet('A[0:N]'))\n state.add_memlet_path(nsdfg_node, omap_exit, B_, src_conn='b', memlet=Memlet('B[0:N]'))\n\n mysdfg.validate()\n mysdfg(A=input, B=output, N=N)\n\n diff = np.linalg.norm(10 * input - output) / N.get()\n print(\"Difference:\", diff)\n assert diff <= 1e-5\n\n mysdfg.simplify()\n\n mysdfg(A=input, B=output, N=N)\n\n diff = np.linalg.norm(10 * input - output) / N.get()\n print(\"Difference:\", diff)\n assert diff <= 1e-5\n\n\nif __name__ == '__main__':\n test_nested_map()\n test_nested_sdfg()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport dace.libraries.blas as blas\nfrom dace.transformation.dataflow import RedundantSecondArray\nimport numpy as np\nimport pytest\n\nM = dace.symbol('M')\nN = dace.symbol('N')\n\n\[email protected](('implementation', ), [('pure', ),\n pytest.param('MKL', marks=pytest.mark.mkl), ('OpenBLAS', ),\n pytest.param('cuBLAS', marks=pytest.mark.gpu)])\ndef test_gemv_strided(implementation):\n @dace.program\n def gemv(A: dace.float64[M, N], x: dace.float64[N, N]):\n return A @ x[:, 1]\n\n A = np.random.rand(20, 30)\n x = np.random.rand(30, 30)\n reference = A @ x[:, 1]\n sdfg = gemv.to_sdfg()\n sdfg.name = f'{sdfg.name}_{implementation}'\n if implementation == 'cuBLAS':\n sdfg.apply_gpu_transformations()\n sdfg.apply_transformations_repeated(RedundantSecondArray)\n\n blas.default_implementation = implementation\n daceres = sdfg(A=A, x=x, M=20, N=30)\n\n blas.default_implementation = None\n assert np.allclose(daceres, reference)\n\n\ndef test_dot_subset():\n @dace.program\n def dot(x: dace.float64[N, N], y: dace.float64[N, N]):\n return x[1, 1:N - 1] @ y[1:N - 1, 1]\n\n x = np.random.rand(30, 30)\n y = np.random.rand(30, 30)\n reference = x[1, 1:29] @ y[1:29, 1]\n sdfg = dot.to_sdfg()\n\n # Enforce one-dimensional memlets from two-dimensional arrays\n sdfg.apply_transformations_repeated(RedundantSecondArray)\n blas.default_implementation = 'pure'\n daceres = sdfg(x=x, y=y, N=30)\n\n blas.default_implementation = None\n assert np.allclose(daceres, reference)\n\n\[email protected](('implementation', ), [('pure', ),\n pytest.param('MKL', marks=pytest.mark.mkl), ('OpenBLAS', ),\n pytest.param('cuBLAS', marks=pytest.mark.gpu)])\ndef test_dot_strided(implementation):\n @dace.program\n def dot(x: dace.float64[N, N], y: dace.float64[N, N]):\n return x[1, :] @ y[:, 1]\n\n x = np.random.rand(30, 30)\n y = np.random.rand(30, 30)\n reference = x[1, :] @ y[:, 1]\n sdfg = dot.to_sdfg()\n sdfg.name = f'{sdfg.name}_{implementation}'\n\n # Enforce one-dimensional memlets from two-dimensional arrays\n sdfg.apply_transformations_repeated(RedundantSecondArray)\n\n if implementation == 'cuBLAS':\n sdfg.apply_gpu_transformations()\n\n blas.default_implementation = implementation\n daceres = sdfg(x=x, y=y, N=30)\n\n blas.default_implementation = None\n assert np.allclose(daceres, reference)\n\n\nif __name__ == '__main__':\n implementations = ['pure', 'MKL', 'cuBLAS']\n for implementation in implementations:\n test_gemv_strided(implementation)\n test_dot_subset()\n for implementation in implementations:\n test_dot_strided(implementation)\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport numpy as np\nimport dace\nfrom dace.transformation.interstate import GPUTransformSDFG\n\nfrom typing import Dict, Tuple\nimport pytest\n\n\ndef create_zero_initialization(init_state: dace.SDFGState, array_name):\n sdfg = init_state.parent\n array_shape = sdfg.arrays[array_name].shape\n\n array_access_node = init_state.add_write(array_name)\n\n indices = [\"i\" + str(k) for k, _ in enumerate(array_shape)]\n\n init_state.add_mapped_tasklet(output_nodes={array_name: array_access_node},\n name=(array_name + \"_init_tasklet\"),\n map_ranges={k: \"0:\" + str(v)\n for k, v in zip(indices, array_shape)},\n inputs={},\n code='val = 0',\n outputs=dict(val=dace.Memlet.simple(array_access_node.data, \",\".join(indices))),\n external_edges=True)\n\n\ndef create_test_sdfg():\n sdfg = dace.SDFG('test_sdfg')\n\n sdfg.add_array('BETA', shape=[10], dtype=dace.float32)\n sdfg.add_array('BETA_MAX', shape=[1], dtype=dace.float32)\n\n init_state = sdfg.add_state(\"init\")\n state = sdfg.add_state(\"compute\")\n\n sdfg.add_edge(init_state, state, dace.InterstateEdge())\n\n for arr in ['BETA_MAX']:\n create_zero_initialization(init_state, arr)\n\n BETA_MAX = state.add_access('BETA_MAX')\n BETA = state.add_access('BETA')\n\n beta_max_reduce = state.add_reduce(wcr=\"lambda a, b: max(a, b)\", axes=(0, ), identity=-999999)\n beta_max_reduce.implementation = 'CUDA (device)'\n state.add_edge(BETA, None, beta_max_reduce, None, dace.memlet.Memlet.simple(BETA.data, '0:10'))\n state.add_edge(beta_max_reduce, None, BETA_MAX, None, dace.memlet.Memlet.simple(BETA_MAX.data, '0:1'))\n\n return sdfg\n\n\[email protected]\ndef test():\n my_max_sdfg = create_test_sdfg()\n my_max_sdfg.validate()\n my_max_sdfg.apply_transformations(GPUTransformSDFG)\n\n BETA = np.random.rand(10).astype(np.float32)\n BETA_MAX = np.zeros(1).astype(np.float32)\n\n my_max_sdfg(BETA=BETA, BETA_MAX=BETA_MAX)\n\n assert (np.max(BETA) == BETA_MAX[0])\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\n\nimport numpy as np\nimport scipy\nimport pytest\n\nimport dace\nfrom dace import nodes\nfrom dace.dtypes import ScheduleType\n\nW = dace.symbol('W')\nH = dace.symbol('H')\nnnz = dace.symbol('nnz')\n\n\[email protected](dace.uint32[H + 1], dace.uint32[nnz], dace.float32[nnz], dace.float32[W], dace.float32[H])\ndef spmv(A_row, A_col, A_val, x, b):\n for ignore in dace.map[0]:\n for i in dace.map[0:H]:\n\n @dace.map(_[A_row[i]:A_row[i + 1]])\n def compute(j):\n a << A_val[j]\n in_x << x[A_col[j]]\n out >> b(1, lambda x, y: x + y)[i]\n\n out = a * in_x\n\n\[email protected]\ndef test_persistent_dynamic_map():\n\n print('SPMV with dynamic map')\n\n sdfg = spmv.to_sdfg()\n sdfg.apply_gpu_transformations()\n\n for state in sdfg:\n for scope in state.nodes():\n if not isinstance(scope, nodes.EntryNode):\n continue\n if state.entry_node(scope) is None:\n scope.map.schedule = ScheduleType.GPU_Persistent\n elif state.entry_node(state.entry_node(scope)) is None:\n scope.map.schedule = ScheduleType.GPU_Device\n else:\n scope.map.schedule = ScheduleType.GPU_ThreadBlock_Dynamic\n\n verify(sdfg)\n\n\[email protected]\ndef test_persistent_default():\n\n print('SPMV with default map')\n\n sdfg = spmv.to_sdfg()\n sdfg.apply_gpu_transformations()\n\n for state in sdfg:\n for scope in state.nodes():\n if not isinstance(scope, nodes.EntryNode):\n continue\n if state.entry_node(scope) is None:\n scope.map.schedule = ScheduleType.GPU_Persistent\n else:\n scope.map.schedule = ScheduleType.Default\n\n verify(sdfg)\n\n\ndef verify(sdfg):\n height = 1024\n width = 1024\n\n # Fill input data\n # each row has up (including) 256 elements\n A_row = np.random.randint(257, size=height + 1, dtype=dace.uint32.type)\n A_row[0] = 0\n A_row = np.cumsum(A_row, dtype=dace.uint32.type)\n\n # Column data\n A_col = dace.ndarray([A_row[height]], dtype=dace.uint32)\n for i in range(height):\n A_col[A_row[i]:A_row[i + 1]] = np.sort(np.random.choice(width, A_row[i + 1] - A_row[i], replace=False))\n\n # values\n A_val = np.random.rand(A_row[height]).astype(dace.float32.type)\n\n A_sparse = scipy.sparse.csr_matrix((A_val, A_col, A_row), dtype=dace.float32.type, shape=(1024, 1024))\n\n x = np.random.rand(width).astype(dace.float32.type)\n b = np.zeros(height, dtype=dace.float32.type)\n\n sdfg(A_row=A_row, A_col=A_col, A_val=A_val, x=x, b=b, H=A_sparse.shape[0], W=A_sparse.shape[1], nnz=A_sparse.nnz)\n\n assert np.allclose(b, A_sparse.dot(x)), \"Result doesn't match!\"\n print(\"Complete.\")\n\n\nif __name__ == '__main__':\n test_persistent_dynamic_map()\n test_persistent_default()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\n\nvalue = dace.symbol('value', dtype=dace.float32)\n\n\[email protected]\ndef symintasklet_numpy(out: dace.float32[1]):\n out[0] = value\n\n\[email protected]\ndef symintasklet_explicit(out: dace.float32[1]):\n with dace.tasklet:\n o = value\n o >> out[0]\n\n\ndef test_numpy():\n out = np.zeros(1).astype(np.float32)\n symintasklet_numpy(out, value=np.float32(1.5))\n assert out[0] == np.float32(1.5)\n\n\ndef test_explicit():\n out = np.zeros(1).astype(np.float32)\n symintasklet_explicit(out, value=np.float32(1.5))\n assert out[0] == np.float32(1.5)\n\n\nif __name__ == '__main__':\n test_numpy()\n test_explicit()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\n\n# The scope of the test is to verify channel mangling. In the\n# SDFG we have two nested SDFG that increments some streaming data by one.\n# The NestedSDFGs are similar, but they work on different sizes\n\n# Note: to generate the code for both NSDFG, we set Dace config for compiler->unique_functions to False\n\nimport dace\nimport numpy as np\nimport argparse\nimport subprocess\nfrom dace.config import Config\n\nfrom dace.fpga_testing import intel_fpga_test\nfrom dace.memlet import Memlet\n\nN = dace.symbol(\"N\")\n\n\ndef make_increment_sdfg(sdfg_name: str, dtype=dace.float32):\n inc_sdfg = dace.SDFG(sdfg_name)\n\n # FPGA State\n\n fpga_state = inc_sdfg.add_state(\"fpga_state\")\n\n inc_sdfg.add_array(\"x\", shape=[N], dtype=dtype, storage=dace.dtypes.StorageType.FPGA_Global)\n inc_sdfg.add_array(\"y\", shape=[N], dtype=dtype, storage=dace.dtypes.StorageType.FPGA_Global)\n inc_sdfg.add_stream(\"what_a_nice_pipe\", dtype, transient=True, storage=dace.dtypes.StorageType.FPGA_Local)\n\n data_in = fpga_state.add_read(\"x\")\n data_out = fpga_state.add_write(\"y\")\n pipe_write = fpga_state.add_write(\"what_a_nice_pipe\")\n pipe_read = fpga_state.add_read(\"what_a_nice_pipe\")\n\n # ---------- ----------\n read_map_entry, read_map_exit = fpga_state.add_map('read_incr_map',\n dict(i='0:N'),\n schedule=dace.dtypes.ScheduleType.FPGA_Device)\n\n incr_tasklet = fpga_state.add_tasklet('incr_task', ['in_con'], ['out_con'], 'out_con = in_con + 1')\n\n # From memory to increment\n fpga_state.add_memlet_path(data_in,\n read_map_entry,\n incr_tasklet,\n dst_conn='in_con',\n memlet=dace.Memlet(f\"{data_in.data}[i]\"))\n # from increment to pipe\n fpga_state.add_memlet_path(incr_tasklet,\n read_map_exit,\n pipe_write,\n src_conn='out_con',\n memlet=dace.Memlet(\"what_a_nice_pipe[0]\"))\n\n # from pipe to memory\n write_map_entry, write_map_exit = fpga_state.add_map('write_map',\n dict(i='0:N'),\n schedule=dace.dtypes.ScheduleType.FPGA_Device)\n\n copy_tasklet = fpga_state.add_tasklet('copy_task', ['in_con'], ['out_con'], 'out_con = in_con ')\n\n fpga_state.add_memlet_path(pipe_read,\n write_map_entry,\n copy_tasklet,\n dst_conn='in_con',\n memlet=dace.Memlet(\"what_a_nice_pipe[0]\"))\n fpga_state.add_memlet_path(copy_tasklet, write_map_exit, data_out, src_conn='out_con', memlet=dace.Memlet(\"y[i]\"))\n\n #########\n # Validate\n inc_sdfg.fill_scope_connectors()\n inc_sdfg.validate()\n return inc_sdfg\n\n\ndef make_nested_sdfg_fpga(dtype=dace.float32):\n '''\n Build an SDFG with two nested SDFGs, each one a different state\n '''\n\n sdfg = dace.SDFG(\"channels_mangling\")\n\n ###########################################################################\n # Copy data to FPGA\n\n copy_in_state = sdfg.add_state(\"copy_to_device\")\n\n sdfg.add_array(\"X\", shape=[N], dtype=dtype)\n\n in_host_x = copy_in_state.add_read(\"X\")\n\n sdfg.add_array(\"device_X\", shape=[N], dtype=dtype, storage=dace.dtypes.StorageType.FPGA_Global, transient=True)\n sdfg.add_array(\"device_tmp\", shape=[N], dtype=dtype, storage=dace.dtypes.StorageType.FPGA_Global, transient=True)\n\n in_device_x = copy_in_state.add_write(\"device_X\")\n\n copy_in_state.add_memlet_path(in_host_x, in_device_x, memlet=Memlet.simple(in_host_x, \"0:N\"))\n\n ###########################################################################\n # Copy data from FPGA\n\n copy_out_state = sdfg.add_state(\"copy_to_host\")\n sdfg.add_array(\"Y\", shape=[N], dtype=dtype)\n sdfg.add_array(\"device_Y\", shape=[N], dtype=dtype, storage=dace.dtypes.StorageType.FPGA_Global, transient=True)\n\n out_device = copy_out_state.add_read(\"device_Y\")\n out_host = copy_out_state.add_write(\"Y\")\n\n copy_out_state.add_memlet_path(out_device, out_host, memlet=Memlet.simple(out_host, \"0:N\"))\n\n ########################################################################\n # First state\n state = sdfg.add_state(\"state\")\n state.location[\"is_FPGA_kernel\"] = False\n\n to_nest = make_increment_sdfg(\"nest_1\", dtype)\n x = state.add_read(\"device_X\")\n tmp = state.add_write(\"device_tmp\")\n\n # add nested sdfg with symbol mapping\n nested_sdfg = state.add_nested_sdfg(to_nest, sdfg, {\"x\"}, {\"y\"})\n state.add_memlet_path(x, nested_sdfg, dst_conn=\"x\", memlet=Memlet(\"device_X[0:N]\"))\n state.add_memlet_path(nested_sdfg, tmp, src_conn=\"y\", memlet=Memlet(\"device_tmp[0:N]\"))\n\n ########################################################################\n # First state\n state2 = sdfg.add_state(\"state2\")\n state2.location[\"is_FPGA_kernel\"] = False\n\n to_nest = make_increment_sdfg(\"nest_2\", dtype)\n tmp_read = state2.add_read(\"device_tmp\")\n y = state2.add_write(\"device_Y\")\n\n # add nested sdfg with symbol mapping\n nested_sdfg = state2.add_nested_sdfg(to_nest, sdfg, {\"x\"}, {\"y\"})\n state2.add_memlet_path(tmp_read, nested_sdfg, dst_conn=\"x\", memlet=Memlet(\"device_tmp[0:N]\"))\n state2.add_memlet_path(nested_sdfg, y, src_conn=\"y\", memlet=Memlet(\"device_Y[0:N]\"))\n\n ######################################\n # Interstate edges\n sdfg.add_edge(state, state2, dace.sdfg.sdfg.InterstateEdge())\n\n # Interstate edges\n sdfg.add_edge(copy_in_state, state, dace.sdfg.sdfg.InterstateEdge())\n sdfg.add_edge(state2, copy_out_state, dace.sdfg.sdfg.InterstateEdge())\n sdfg.validate()\n\n return sdfg\n\n\n@intel_fpga_test()\ndef test_channel_mangling():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"N\", type=int, nargs=\"?\", default=32)\n args = vars(parser.parse_args())\n\n size_n = args[\"N\"]\n\n from dace.config import Config\n # set unique function to false to generate both sdfgs\n Config.set(\"compiler\", \"unique_functions\", value=\"none\")\n sdfg = make_nested_sdfg_fpga()\n\n X = np.random.rand(size_n).astype(np.float32)\n Y = np.random.rand(size_n).astype(np.float32)\n sdfg(X=X, Y=Y, N=size_n)\n ref = X + 2\n diff = np.linalg.norm(ref - Y) / size_n\n assert diff <= 1e-5\n\n return sdfg\n\n\nif __name__ == \"__main__\":\n test_channel_mangling(None)\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nfrom dace.transformation.dataflow import MapDimShuffle\nimport unittest\nimport numpy as np\n\nI = dace.symbol(\"I\")\nJ = dace.symbol(\"J\")\n\n\[email protected]\ndef copy(inp: dace.float32[I, J], out: dace.float32[I, J]):\n @dace.map\n def copy(y: _[0:I], x: _[0:J]):\n i << inp[y, x]\n o >> out[y, x]\n o = i\n\n\nclass MapDimShuffleTest(unittest.TestCase):\n def semantic_eq(self, params):\n A = np.random.rand(16, 8).astype(np.float32)\n B1 = np.zeros((16, 8), dtype=np.float32)\n B2 = np.zeros((16, 8), dtype=np.float32)\n\n sdfg = copy.to_sdfg()\n sdfg(inp=A, out=B1, I=A.shape[0], J=A.shape[1])\n\n count = sdfg.apply_transformations(MapDimShuffle, options={'parameters': params})\n self.assertGreater(count, 0)\n sdfg(inp=A, out=B2, I=A.shape[0], J=A.shape[1])\n\n self.assertLess(np.linalg.norm(B1 - B2), 1e-8)\n\n def test_semantic_eq(self):\n self.semantic_eq(['x', 'y'])\n\n def test_semantic_eq_trivial_trafo(self):\n self.semantic_eq(['y', 'x'])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport dace.transformation.subgraph.helpers as helpers\nfrom dace.transformation.subgraph import ReduceExpansion\nfrom dace.sdfg.graph import SubgraphView\nimport dace.sdfg.nodes as nodes\nimport numpy as np\nimport dace.libraries.standard as stdlib\n\nfrom typing import Union, List\nfrom util import expand_reduce, expand_maps, fusion\n\nimport pytest\n\nM = dace.symbol('M')\nN = dace.symbol('N')\nN.set(20)\nM.set(30)\n\n\[email protected]\ndef reduction_test_1(A: dace.float64[M, N], B: dace.float64[M, N], C: dace.float64[N]):\n\n tmp = np.ndarray(shape=[M, N], dtype=np.float64)\n tmp[:] = 2 * A[:] + B[:]\n C[:] = dace.reduce(lambda a, b: a + b, tmp, axis=0)\n\n\[email protected]\ndef reduction_test_2(A: dace.float64[M, N], B: dace.float64[M, N], C: dace.float64[N]):\n\n tmp = np.ndarray(shape=[M, N], dtype=np.float64)\n C[:] = dace.reduce(lambda a, b: max(a, b), B, axis=0)\n for i, j in dace.map[0:M, 0:N]:\n with dace.tasklet:\n in1 << C[j]\n in2 << A[i, j]\n out1 >> tmp[i, j]\n out1 = in1 * in2\n C[:] = dace.reduce(lambda a, b: a + b, tmp, axis=0)\n\n\nsettings = [[False, False], [True, False], [False, True]]\n\n\[email protected]([\"in_transient\", \"out_transient\"], settings)\ndef test_p1(in_transient, out_transient):\n sdfg = reduction_test_1.to_sdfg()\n sdfg.simplify()\n state = sdfg.nodes()[0]\n for node in state.nodes():\n if isinstance(node, dace.libraries.standard.nodes.Reduce):\n reduce_node = node\n\n rexp = ReduceExpansion(sdfg, sdfg.sdfg_id, 0, {ReduceExpansion.reduce: state.node_id(reduce_node)}, 0)\n assert rexp.can_be_applied(state, 0, sdfg) == True\n\n A = np.random.rand(M.get(), N.get()).astype(np.float64)\n B = np.random.rand(M.get(), N.get()).astype(np.float64)\n C1 = np.zeros([N.get()], dtype=np.float64)\n C2 = np.zeros([N.get()], dtype=np.float64)\n\n csdfg = sdfg.compile()\n csdfg(A=A, B=B, C=C1, N=N, M=M)\n del csdfg\n\n expand_reduce(sdfg, state, create_in_transient=in_transient, create_out_transient=out_transient)\n csdfg = sdfg.compile()\n csdfg(A=A, B=B, C=C2, N=N, M=M)\n del csdfg\n\n assert np.linalg.norm(C1) > 0.01\n assert np.allclose(C1, C2)\n\n\nsettings = [[False, False], [True, False], [False, True]]\n\n\[email protected]([\"in_transient\", \"out_transient\"], settings)\ndef test_p2(in_transient, out_transient):\n sdfg = reduction_test_2.to_sdfg()\n sdfg.simplify()\n state = sdfg.nodes()[0]\n A = np.random.rand(M.get(), N.get()).astype(np.float64)\n B = np.random.rand(M.get(), N.get()).astype(np.float64)\n C1 = np.zeros([N.get()], dtype=np.float64)\n C2 = np.zeros([N.get()], dtype=np.float64)\n\n csdfg = sdfg.compile()\n csdfg(A=A, B=B, C=C1, N=N, M=M)\n del csdfg\n\n expand_reduce(sdfg, state, create_in_transient=in_transient, create_out_transient=out_transient)\n csdfg = sdfg.compile()\n csdfg(A=A, B=B, C=C2, N=N, M=M)\n\n assert np.linalg.norm(C1) > 0.01\n assert np.allclose(C1, C2)\n\n\nif __name__ == \"__main__\":\n test_p1(in_transient=False, out_transient=False)\n test_p2(in_transient=False, out_transient=False)\n\n test_p1(in_transient=True, out_transient=False)\n test_p2(in_transient=True, out_transient=False)\n\n test_p1(in_transient=True, out_transient=True)\n test_p2(in_transient=True, out_transient=True)\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport numpy as np\nimport dace\n\n\[email protected]\ndef foo123(a: dace.float32[2], b: dace.float32[2]):\n b[0] = a[0]\n\n\ndef test_elem_assignment():\n A = np.array([1, 2], dtype=np.float32)\n B = np.array([3, 4], dtype=np.float32)\n\n foo123(A, B)\n\n assert A[0] == B[0]\n\n\[email protected]\ndef optest(A: dace.float64[5, 5], B: dace.float64[5, 5], C: dace.float64[5, 5]):\n tmp = (-A) * B\n for i, j in dace.map[0:5, 0:5]:\n with dace.tasklet:\n t << tmp[i, j]\n c >> C[i, j]\n c = t\n\n\ndef test_elementwise():\n A = np.random.rand(5, 5)\n B = np.random.rand(5, 5)\n C = np.random.rand(5, 5)\n\n optest(A, B, C)\n diff = np.linalg.norm(C - ((-A) * B))\n print('Difference:', diff)\n assert diff <= 1e-5\n\n\nif __name__ == '__main__':\n test_elem_assignment()\n test_elementwise()", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport numpy as np\nimport dace\n\nM = dace.symbol('M')\nK = dace.symbol('K')\n\n\[email protected]\ndef sdfg_transpose(A: dace.float32[M, K], B: dace.float32[K, M]):\n for i, j in dace.map[0:M, 0:K]:\n B[j, i] = A[i, j]\n\n\[email protected]\ndef transpose_test(C: dace.float32[20, 20], D: dace.float32[20, 20]):\n sdfg_transpose(C[:], D[:])\n\n\ndef test():\n c = np.random.rand(20, 20).astype(np.float32)\n d = np.zeros((20, 20), dtype=np.float32)\n\n transpose_test(c, d, K=20, M=20)\n\n assert np.allclose(c.transpose(), d)\n\n\[email protected]\ndef pb(a, i):\n a[i] = a[20 - i]\n\n\[email protected]\ndef pa(a):\n for i in dace.map[0:5]:\n pb(a, i)\n\n\ndef test_inout_connector():\n a = np.random.rand(20)\n ref = a.copy()\n pa(a)\n pa.f(ref)\n assert (np.allclose(a, ref))\n\n\nif __name__ == '__main__':\n test()\n test_inout_connector()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nfrom math import exp\nimport dace\nimport numpy as np\n\n\ndef test_for_loop_detection():\n N = dace.symbol('N')\n\n @dace.program\n def looptest(A: dace.float64[N]):\n for i in range(N):\n A[i] += 5\n\n sdfg: dace.SDFG = looptest.to_sdfg()\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n assert 'for (' in sdfg.generate_code()[0].code\n\n A = np.random.rand(20)\n expected = A + 5\n sdfg(A=A, N=20)\n assert np.allclose(A, expected)\n\n\ndef test_invalid_for_loop_detection():\n sdfg = dace.SDFG('looptest')\n sdfg.add_array('A', [20], dace.float64)\n init = sdfg.add_state()\n guard = sdfg.add_state()\n loop = sdfg.add_state()\n end = sdfg.add_state()\n sdfg.add_edge(init, guard, dace.InterstateEdge(assignments=dict(i='0')))\n # Invalid: Edge between guard and loop state must not have assignments\n # This edge will be split in code generation\n sdfg.add_edge(guard, loop, dace.InterstateEdge(condition='i < 20', assignments=dict(j='i')))\n sdfg.add_edge(guard, end, dace.InterstateEdge(condition='i >= 20'))\n sdfg.add_edge(loop, guard, dace.InterstateEdge(assignments=dict(i='i + 1')))\n\n r = loop.add_read('A')\n t = loop.add_tasklet('add', {'a'}, {'out'}, 'out = a + 5')\n w = loop.add_write('A')\n loop.add_edge(r, None, t, 'a', dace.Memlet('A[j]'))\n loop.add_edge(t, 'out', w, None, dace.Memlet('A[j]'))\n\n # If edge was split successfully, a for loop will be generated\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n assert 'for (' in sdfg.generate_code()[0].code\n A = np.random.rand(20)\n expected = A + 5\n sdfg(A=A)\n assert np.allclose(A, expected)\n\n\ndef test_edge_split_loop_detection():\n @dace.program\n def looptest():\n A = dace.ndarray([10], dtype=dace.int32)\n i = 0\n while (i < 10):\n A[i] = i\n i += 2\n return A\n\n sdfg: dace.SDFG = looptest.to_sdfg(simplify=True)\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n assert 'for (' in sdfg.generate_code()[0].code\n\n A = looptest()\n A_ref = np.array([0, 0, 2, 0, 4, 0, 6, 0, 8, 0], dtype=np.int32)\n assert (np.array_equal(A[::2], A_ref[::2]))\n\n\nif __name__ == '__main__':\n test_for_loop_detection()\n test_invalid_for_loop_detection()\n test_edge_split_loop_detection()\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nfrom dace import Memlet\nfrom dace.codegen.exceptions import CompilerConfigurationError, CompilationError\nfrom dace.libraries.linalg import Inv\nimport numpy as np\nimport warnings\nimport pytest\n\nn = dace.symbol(\"n\", dace.int64)\nid = -1\n\n\ndef generate_matrix(size, dtype):\n if dtype == np.float32:\n tol = 1e-7\n elif dtype == np.float64:\n tol = 1e-14\n else:\n raise NotImplementedError\n from numpy.random import default_rng\n rng = default_rng(42)\n while True:\n A = rng.random((size, size), dtype=dtype)\n B = A @ A.T\n err = np.absolute(B @ np.linalg.inv(B) - np.eye(size))\n if np.all(err < tol):\n break\n return A\n\n\ndef make_sdfg(implementation,\n dtype,\n id=0,\n in_shape=[n, n],\n out_shape=[n, n],\n in_subset=\"0:n, 0:n\",\n out_subset=\"0:n, 0:n\",\n overwrite=False,\n getri=True):\n\n sdfg = dace.SDFG(\"linalg_inv_{}_{}_{}\".format(implementation, dtype.__name__, id))\n sdfg.add_symbol(\"n\", dace.int64)\n state = sdfg.add_state(\"dataflow\")\n\n sdfg.add_array(\"xin\", in_shape, dtype)\n if not overwrite:\n sdfg.add_array(\"xout\", out_shape, dtype)\n\n xin = state.add_read(\"xin\")\n if overwrite:\n xout = state.add_write(\"xin\")\n else:\n xout = state.add_write(\"xout\")\n\n inv_node = Inv(\"inv\", overwrite_a=overwrite, use_getri=getri)\n inv_node.implementation = implementation\n\n state.add_memlet_path(xin, inv_node, dst_conn=\"_ain\", memlet=Memlet.simple(xin, in_subset, num_accesses=n * n))\n state.add_memlet_path(inv_node, xout, src_conn=\"_aout\", memlet=Memlet.simple(xout, out_subset, num_accesses=n * n))\n\n return sdfg\n\n\[email protected](\"implementation, dtype, size, shape, overwrite, getri\", [\n pytest.param(\n 'MKL', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True, marks=pytest.mark.mkl),\n pytest.param(\n 'MKL', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True, marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n True,\n marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n True,\n marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n True,\n marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n True,\n marks=pytest.mark.mkl),\n pytest.param(\n 'MKL', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.mkl),\n pytest.param(\n 'MKL', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n False,\n marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n False,\n marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n False,\n marks=pytest.mark.mkl),\n pytest.param('MKL',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n False,\n marks=pytest.mark.mkl),\n pytest.param('OpenBLAS',\n np.float32,\n 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]],\n False,\n True,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float64,\n 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]],\n False,\n True,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n True,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n True,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n True,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n True,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float32,\n 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]],\n False,\n False,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float64,\n 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]],\n False,\n False,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n False,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n False,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n False,\n marks=pytest.mark.lapack),\n pytest.param('OpenBLAS',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n False,\n marks=pytest.mark.lapack),\n pytest.param('cuSolverDn',\n np.float32,\n 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]],\n False,\n False,\n marks=pytest.mark.gpu),\n pytest.param('cuSolverDn',\n np.float64,\n 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]],\n False,\n False,\n marks=pytest.mark.gpu),\n pytest.param('cuSolverDn',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n False,\n marks=pytest.mark.gpu),\n pytest.param('cuSolverDn',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n False,\n False,\n marks=pytest.mark.gpu),\n pytest.param('cuSolverDn',\n np.float32,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n False,\n marks=pytest.mark.gpu),\n pytest.param('cuSolverDn',\n np.float64,\n 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]],\n True,\n False,\n marks=pytest.mark.gpu)\n])\ndef test_inv(implementation, dtype, size, shape, overwrite, getri):\n global id\n id += 1\n\n in_shape = shape[0]\n out_shape = shape[1]\n in_offset = shape[2]\n out_offset = shape[3]\n in_dims = shape[4]\n out_dims = shape[5]\n\n assert np.all(np.array(in_shape)[in_dims] >= size)\n assert np.all(np.array(out_shape)[out_dims] >= size)\n assert np.all(np.array(in_offset) < size)\n assert np.all(np.array(out_offset) < size)\n assert np.all(np.array(in_offset)[in_dims] + size <= np.array(in_shape)[in_dims])\n assert np.all(np.array(out_offset)[out_dims] + size <= np.array(out_shape)[out_dims])\n\n in_subset = tuple([slice(o, o + size) if i in in_dims else o for i, o in enumerate(in_offset)])\n if overwrite:\n out_subset = in_subset\n else:\n out_subset = tuple([slice(o, o + size) if i in out_dims else o for i, o in enumerate(out_offset)])\n\n in_subset_str = ','.join(\n [\"{b}:{e}\".format(b=o, e=o + size) if i in in_dims else str(o) for i, o in enumerate(in_offset)])\n if overwrite:\n out_subset_str = in_subset_str\n else:\n out_subset_str = ','.join(\n [\"{b}:{e}\".format(b=o, e=o + size) if i in out_dims else str(o) for i, o in enumerate(out_offset)])\n\n sdfg = make_sdfg(implementation, dtype, id, in_shape, out_shape, in_subset_str, out_subset_str, overwrite, getri)\n if implementation == 'cuSolverDn':\n sdfg.apply_gpu_transformations()\n sdfg.simplify()\n try:\n inv_sdfg = sdfg.compile()\n except (CompilerConfigurationError, CompilationError):\n warnings.warn('Configuration/compilation failed, library missing or '\n 'misconfigured, skipping test for {}.'.format(implementation))\n return\n\n A0 = np.zeros(in_shape, dtype=dtype)\n A0[in_subset] = generate_matrix(size, dtype)\n A1 = np.copy(A0)\n if overwrite:\n A2 = A1\n else:\n A2 = np.zeros(out_shape, dtype=dtype)\n A3 = np.linalg.inv(A0[in_subset])\n\n inv_sdfg(xin=A1, xout=A2, n=size)\n\n if dtype == np.float32:\n rtol = 1e-7\n atol = 1e-7\n elif dtype == np.float64:\n rtol = 1e-14\n atol = 1e-14\n else:\n raise NotImplementedError\n\n assert np.allclose(A2[out_subset], A3, rtol=rtol, atol=atol)\n if overwrite:\n assert not np.array_equal(A0, A1)\n\n\n###############################################################################\n\nif __name__ == \"__main__\":\n test_inv('MKL', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True)\n test_inv('MKL', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True)\n test_inv('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, True)\n test_inv('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, True)\n test_inv('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, True)\n test_inv('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, True)\n test_inv('MKL', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False)\n test_inv('MKL', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False)\n test_inv('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False)\n test_inv('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False)\n test_inv('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False)\n test_inv('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False)\n test_inv('cuSolverDn', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False)\n test_inv('cuSolverDn', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False)\n test_inv('cuSolverDn', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False)\n test_inv('cuSolverDn', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False)\n test_inv('cuSolverDn', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False)\n test_inv('cuSolverDn', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False)\n", "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\r\nimport dace\r\nimport numpy as np\r\nimport sys\r\n\r\nfrom dace.transformation.subgraph import ReduceExpansion, SubgraphFusion, MultiExpansion\r\nimport dace.transformation.subgraph.helpers as helpers\r\n\r\nimport dace.dtypes as dtypes\r\nfrom dace.sdfg.graph import SubgraphView\r\nimport dace.libraries.standard as stdlib\r\nimport dace.sdfg.nodes as nodes\r\nfrom typing import Union, List\r\nfrom util import expand_maps, expand_reduce, fusion\r\n\r\ndace_dtype = dace.float32\r\nH, B, SN, SM = (dace.symbol(s) for s in ('H', 'B', 'SN', 'SM'))\r\n\r\n\r\[email protected]\r\ndef softmax(X_in: dace_dtype[H, B, SN, SM]):\r\n tmp_max = dace.reduce(lambda a, b: max(a, b), X_in, axis=3, identity=0)\r\n\r\n tmp_out = np.ndarray([H, B, SN, SM], dtype=dace_dtype)\r\n out = np.ndarray([H, B, SN, SM], dtype=dace_dtype)\r\n\r\n # No broadcasting rules\r\n for i, j, k, l in dace.map[0:H, 0:B, 0:SN, 0:SM]:\r\n with dace.tasklet:\r\n inp << X_in[i, j, k, l]\r\n mx << tmp_max[i, j, k]\r\n o >> tmp_out[i, j, k, l]\r\n o = math.exp(inp - mx)\r\n #tmp_out = np.exp(X_in - tmp_max)\r\n\r\n tmp_sum = dace.reduce(lambda a, b: a + b, tmp_out, identity=0, axis=3)\r\n for i, j, k, l in dace.map[0:H, 0:B, 0:SN, 0:SM]:\r\n with dace.tasklet:\r\n inp << tmp_out[i, j, k, l]\r\n sm << tmp_sum[i, j, k]\r\n o >> out[i, j, k, l]\r\n o = inp / sm\r\n\r\n return out\r\n\r\n\r\nH.set(10)\r\nB.set(10)\r\nSN.set(20)\r\nSM.set(20)\r\n\r\n\r\ndef get_partition(sdfg, graph):\r\n subgraph1 = SubgraphView(graph, [])\r\n subgraph2 = SubgraphView(graph, [])\r\n\r\n cnt1 = 0\r\n for node in dace.sdfg.utils.dfs_topological_sort(graph):\r\n if isinstance(node, stdlib.nodes.reduce.Reduce):\r\n if cnt1 < 2:\r\n subgraph1._subgraph_nodes.append(node)\r\n cnt1 += 1\r\n else:\r\n subgraph2._subgraph_nodes.append(node)\r\n\r\n if isinstance(node, nodes.MapEntry):\r\n if cnt1 < 2:\r\n subgraph1._subgraph_nodes.append(node)\r\n cnt1 += 1\r\n else:\r\n subgraph2._subgraph_nodes.append(node)\r\n\r\n return [subgraph1, subgraph2]\r\n\r\n\r\ndef test_2fuse():\r\n sdfg = softmax.to_sdfg()\r\n sdfg.name = 'softmax_2part'\r\n sdfg.simplify()\r\n X_in = np.random.rand(H.get(), B.get(), SN.get(), SM.get()).astype(np.float32)\r\n\r\n csdfg = sdfg.compile()\r\n res1 = csdfg(X_in=X_in, H=H, B=B, SN=SN, SM=SM)\r\n del csdfg\r\n\r\n subgraph = get_partition(sdfg, sdfg.nodes()[0])\r\n expand_reduce(sdfg, sdfg.nodes()[0], subgraph)\r\n expand_maps(sdfg, sdfg.nodes()[0], subgraph)\r\n fusion(sdfg, sdfg.nodes()[0], subgraph)\r\n\r\n csdfg = sdfg.compile()\r\n res2 = csdfg(X_in=X_in, H=H, B=B, SN=SN, SM=SM)\r\n del csdfg\r\n\r\n assert np.allclose(res1, res2)\r\n print(\"PASS\")\r\n return\r\n\r\n\r\ndef test_1fuse():\r\n sdfg = softmax.to_sdfg()\r\n sdfg.name = 'softmax_fused'\r\n sdfg.simplify()\r\n X_in = np.random.rand(H.get(), B.get(), SN.get(), SM.get()).astype(np.float32)\r\n\r\n csdfg = sdfg.compile()\r\n res1 = csdfg(X_in=X_in, H=H, B=B, SN=SN, SM=SM)\r\n del csdfg\r\n\r\n expand_reduce(sdfg, sdfg.nodes()[0])\r\n expand_maps(sdfg, sdfg.nodes()[0])\r\n fusion(sdfg, sdfg.nodes()[0])\r\n\r\n csdfg = sdfg.compile()\r\n res2 = csdfg(X_in=X_in, H=H, B=B, SN=SN, SM=SM)\r\n del csdfg\r\n\r\n print(np.linalg.norm(res1))\r\n print(np.linalg.norm(res2))\r\n assert np.allclose(res1, res2)\r\n print(\"PASS\")\r\n return\r\n\r\n\r\ndef test_1fuse():\r\n sdfg = softmax.to_sdfg()\r\n sdfg.name = 'softmax_fused'\r\n sdfg.simplify()\r\n X_in = np.random.rand(H.get(), B.get(), SN.get(), SM.get()).astype(np.float32)\r\n\r\n csdfg = sdfg.compile()\r\n res1 = csdfg(X_in=X_in, H=H, B=B, SN=SN, SM=SM)\r\n del csdfg\r\n\r\n expand_reduce(sdfg, sdfg.nodes()[0])\r\n expand_maps(sdfg, sdfg.nodes()[0])\r\n fusion(sdfg, sdfg.nodes()[0])\r\n\r\n #sdfg.specialize({'SM':SM})\r\n csdfg = sdfg.compile()\r\n res2 = csdfg(X_in=X_in, H=H, B=B, SN=SN, SM=SM)\r\n del csdfg\r\n\r\n print(np.linalg.norm(res1))\r\n print(np.linalg.norm(res2))\r\n assert np.allclose(res1, res2)\r\n print(\"PASS\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_2fuse()\r\n test_1fuse()\r\n" ]
[ [ "numpy.transpose" ], [ "numpy.allclose", "numpy.cumsum", "numpy.ndarray", "numpy.random.randn", "numpy.sum" ], [ "matplotlib.pyplot.imshow", "tensorflow.FixedLenFeature", "numpy.multiply", "tensorflow.data.TFRecordDataset", "tensorflow.decode_raw", "tensorflow.cast", "tensorflow.reshape", "tensorflow.one_hot", "tensorflow.Session", "tensorflow.parse_single_example", "matplotlib.pyplot.show" ], [ "numpy.copy", "numpy.random.rand", "numpy.allclose" ], [ "numpy.linalg.norm" ], [ "numpy.linalg.norm" ], [ "numpy.random.rand", "numpy.allclose" ], [ "numpy.max", "numpy.zeros", "numpy.random.rand" ], [ "numpy.random.choice", "numpy.cumsum", "scipy.sparse.csr_matrix", "numpy.random.rand", "numpy.zeros", "numpy.random.randint" ], [ "numpy.zeros", "numpy.float32" ], [ "numpy.random.rand", "numpy.linalg.norm" ], [ "numpy.random.rand", "numpy.zeros", "numpy.linalg.norm" ], [ "numpy.linalg.norm", "numpy.ndarray", "numpy.allclose" ], [ "numpy.array", "numpy.linalg.norm", "numpy.random.rand" ], [ "numpy.zeros", "numpy.random.rand", "numpy.allclose" ], [ "numpy.array", "numpy.random.rand", "numpy.array_equal", "numpy.allclose" ], [ "numpy.allclose", "numpy.array_equal", "numpy.linalg.inv", "numpy.eye", "numpy.all", "numpy.copy", "numpy.array", "numpy.zeros", "numpy.random.default_rng" ], [ "numpy.linalg.norm", "numpy.ndarray", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Theomat/MPSEAS
[ "91f9c991e2061a7d230e491210d2c93005fd2236", "724b23a41fae2c24269805c3e87160b5c85ba85e" ]
[ "pseas/runnable/print_table_step1.py", "pseas/truncated_distributions/trunc_norm.py" ]
[ "import pandas as pd\nimport numpy as np\n\nCOLORS_QTY: int = 5\n# =============================================================================\n# Argument parsing.\n# =============================================================================\nimport argparse\n\nfrom scipy import integrate\nargument_parser: argparse.ArgumentParser = argparse.ArgumentParser(\n description=\"Plot figures based on run data.\")\n\nargument_default_values = {\n\t\"suffix\": 'kissat_ibm',\n \"folder\": \".\"\n}\n\nargument_parser.add_argument('-f', '--folder',\n type=str,\n action='store',\n default=argument_default_values['folder'],\n help=\"Folder in which to look for the file (default: '.')\"\n )\nargument_parser.add_argument('-s', '--suffix',\n type=str,\n action='store',\n default=argument_default_values['suffix'],\n help=\"File suffix used in produce_run_data (default: 'kissat_ibm')\"\n )\nparsed_parameters = argument_parser.parse_args()\n\nfolder: str = parsed_parameters.folder \nsuffix: str = parsed_parameters.suffix\n# =============================================================================\n# Finished parsing\n# =============================================================================\ndef __rename_strategies__(df: pd.DataFrame) -> pd.DataFrame:\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \".*-discrimination-based\", \"discrimination-based\", regex=True)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \"Info. over Decision/Time\", \"information-based\", regex=False)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \"Random\", \"random\", regex=False)\n\n # Rename discrimination component\n df[\"strategy\"] = df[\"strategy\"].str.replace(\" 10100%\", \"\", regex=False)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\".00%\", \"%\", regex=False)\n df[\"strategy\"] = df[\"strategy\"].str.replace(\n \"Subset\", \"subset\", regex=False)\n\n df[\"selection\"] = df[\"strategy\"].str.extract(r'^([^+]*) \\+ .*')\n df[\"discrimination\"] = df[\"strategy\"].str.extract(r'^[^+]* \\+ (.*)')\n return df\n\ndef __filter_best_strategies__(df: pd.DataFrame) -> pd.DataFrame:\n # Remove all that don't have timeout correction\n df[\"baseline\"] = df[\"selection\"].str.contains(\n \"random\") | df[\"discrimination\"].str.contains(\"subset\")\n return df\n\n\ndico = {}\nfor i, configurations in enumerate(range(10, 60, 10)):\n for j, split in enumerate(range(10, 60, 10)):\n ratio = split / 100\n detailed_df = pd.read_csv(f\"{folder}/detailed_runs_{suffix}_{configurations}_{ratio}.csv\")\n detailed_df = detailed_df.drop(\"Unnamed: 0\", axis=1)\n detailed_df = __rename_strategies__(detailed_df)\n df = __filter_best_strategies__(detailed_df)\n # Remove subset\n df = df[~df[\"discrimination\"].str.contains(\"subset\")]\n # Take mean performance\n df = df.groupby([\"selection\", \"time\"]).mean().reset_index()\n df[\"prediction\"] *= 100\n\n for method in df[\"selection\"].unique():\n if method not in dico:\n dico[method] = np.zeros((5, 5))\n\n data = df[df[\"selection\"] == method]\n data = data[[\"prediction\", \"time\"]].to_numpy()\n auc = integrate.trapezoid(data[:, 0], dx=1, axis=0)\n dico[method][i, j] = auc / 10000 * 100\n\nCOLOR_NAMES = [f\"color{i+1}\" for i in range(COLORS_QTY)]\n\nfor method, values in dico.items():\n print(\"\\\\begin{table}\")\n print(\"\\t\\\\centering\")\n print(\"\\t\\\\caption{Percentage of total AUC Evolution for \" + method + \" on \" + suffix.replace(\"_\", \" \") + \"}\")\n print(\"\\t\\\\begin{tabular}{\"+ (\"c\" * 6) + \"}\")\n print(\"\\t\\t\\\\toprule\")\n print(\"\\t\\tConfigurations & 10 & 20 & 30 & 40 & 50 \\\\\\\\\")\n mini = np.min(values) \n maxi = np.max(values)\n scale = maxi - mini\n unit = scale / (len(COLOR_NAMES) - 1)\n for j, percent in enumerate(range(10, 60, 10)):\n line_values = [float(values[i, j])\n for i, _ in enumerate(range(10, 60, 10))]\n colors = [COLOR_NAMES[round((x - mini) / unit)] for x in line_values]\n print(f\"\\t\\t{percent}\\\\% & \" + \" & \".join(f\"\\\\colorbox{{{color}!30}}{{{val:.1f}}}\" for color, val in zip(colors, line_values)) + \"\\\\\\\\\")\n print(\"\\t\\t\\\\bottomrule\")\n print(\"\\t\\\\end{tabular}\")\n print(\"\\\\end{table}\")\n\n\n", "\"\"\"\nProvides cdf and pdf methods for a truncated normal distribution.\n\"\"\"\nfrom typing import Union\nimport numpy as np\n\nimport scipy.stats as st\n\nArrayLike = Union[np.ndarray, float]\n\n\ndef pdf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1, a: ArrayLike = -np.inf, b: ArrayLike = np.inf) -> ArrayLike:\n a = (a - loc) / scale\n b = (b - loc) / scale\n return st.truncnorm.pdf(x, loc=loc, scale=scale, a=a, b=b)\n \n\n\ndef cdf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1, a: ArrayLike = -np.inf, b: ArrayLike = np.inf) -> ArrayLike:\n a = (a - loc) / scale\n b = (b - loc) / scale\n return st.truncnorm.cdf(x, loc=loc, scale=scale, a=a, b=b)\n" ]
[ [ "pandas.read_csv", "numpy.min", "numpy.max", "numpy.zeros", "scipy.integrate.trapezoid" ], [ "scipy.stats.truncnorm.pdf", "scipy.stats.truncnorm.cdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "1.10" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KoutaOhishi/burger_war_dev
[ "9a7e21d631dc7e82f5341450ddafdc8ed32d2ac1" ]
[ "burger_war_dev/scripts/waypoint.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport math\nimport numpy as np\n\nFIELD_SCORE_NUM_OFFSET=6\n\nclass Waypoints:\n\n def __init__(self, path, side):\n self.points = []\n self.number = 0\n self.Waypoints_Lap = 0\n self.next_target_idx = -1\n self.all_field_score = np.ones([18]) # field score state\n self._load_waypoints(path, side)\n print ('[waypoint]number of waypoints: '+str(len(self.points)))\n\n def _load_waypoints(self, path, side):\n with open(path) as f:\n lines = csv.reader(f)\n for l in lines:\n # x,y,radian,target_idx(refer main code)\n point = [float(n) for n in l]\n point[2] = point[2]*math.pi/180.0\n if side == 'r':\n point[3] = int(point[3])\n else:\n point[3] = int(point[4])\n print(\" \"+str(point))\n self.points.append(point[0:4])\n\n def get_next_waypoint(self):\n self.number = self.number+1\n if self.number == len(self.points):\n self.Waypoints_Lap = self.Waypoints_Lap+1\n print(\"[waypoint]next lap!!!!!!\")\n self.number = 0\n\n #print(\"[waypoint]search target !!!!!!\", self.all_field_score)\n for i in range(self.number, len(self.points))+range(self.number):\n score_num = self.points[i][3]\n #print(\"[waypoint]\"+str(score_num))\n\n # 得点と関係ないwaypoint\n if score_num == -1:\n # 1週目は得点と関係ないwaypointも辿る。\n if self.Waypoints_Lap == 0:\n return self.points[self.number][0:3]\n continue\n\n # 得点と関係あるwaypoint\n if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:\n # if already get score, skip search\n continue\n else:\n # if not get score, go to target\n print(\"[waypoint]\"+str(i)+\"/\"+str(len(self.points)))\n self.number = i\n return self.points[i][0:3]\n\n print(\"[waypoint]got all field score !!!\")\n return self.points[self.number][0:3]\n\n def get_current_waypoint(self):\n return self.points[self.number]\n\n def get_current_target_number(self):\n # target No.\n return self.points[self.number][3]\n\n def get_any_waypoint(self, n):\n return self.points[n]\n\n def set_number(self, n):\n self.number = n\n\n def set_field_score(self, n):\n self.all_field_score = n\n # print(self.all_field_score)\n\n def check_if_get_field_score(self, n):\n score_num = n\n if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:\n return True\n else:\n return False\n\n\n# if __name__ == \"__main__\":\n # Waypoints('waypoints.csv')\n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kiss2u/google-research
[ "5b70d349a6af2f5ec1694bfd5341e6b3fb526947", "2cd66234656f9e2f4218ed90a2d8aa9cf3139093", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2cd66234656f9e2f4218ed90a2d8aa9cf3139093", "2cd66234656f9e2f4218ed90a2d8aa9cf3139093", "9049acf9246c1b75170f0c6757e62a8f619a9db6", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467", "2c0043ecd507e75e2df9973a3015daf9253e1467" ]
[ "saccader/visual_attention/saccader_classnet.py", "correct_batch_effects_wdn/transform_test.py", "meta_reward_learning/semantic_parsing/nsm/model_factory.py", "norml/config_maml.py", "concept_explanations/toy_helper.py", "video_structure/hyperparameters.py", "cnn_quantization/tf_cnn_benchmarks/models/densenet_model.py", "mol_dqn/chemgraph/target_sas_test.py", "model_pruning/examples/cifar10/cifar10_eval.py", "qanet/util/misc_util.py", "summae/pors_test.py", "uq_benchmark_2019/metrics_lib_test.py", "norml/tools/utility.py", "tcc/utils.py", "genomics_ood/images_ood/utils.py", "weak_disentangle/networks.py", "linear_dynamical_systems/arma.py", "dual_dice/gridworld/environments.py", "cnn_quantization/tf_cnn_benchmarks/models/alexnet_model.py", "neutra/ebm/ebm_test.py", "mol_dqn/chemgraph/optimize_qed_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Saccader-Classification network model.\n\nSaccader model is an image classification model with a hard attention mechanism.\nThe model uses the saccader model for visual attention\nand uses a separate network for classification.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom saccader import utils\nfrom saccader.visual_attention import saccader\nfrom tensorflow.contrib import slim as contrib_slim\nfrom tensorflow_models.slim.nets import nets_factory\nfrom tensorflow_models.slim.nets.nasnet import nasnet\n\n\nslim = contrib_slim\nSaccader = saccader.Saccader\n\n\nclass SaccaderClassNet(Saccader):\n \"\"\"Saccader-Classification Model.\n\n Network that performs classification on images by taking glimpses at\n different locations on an image.\n\n Attributes:\n num_classes: (Integer) Number of classification classes.\n variable_scope: (String) Name of model variable scope.\n attention_groups: (Integer) Number of groups in attention network.\n attention_layers_per_group: (Integer) Number of layers in each group in\n attention network.\n saccader_cell: Saccader Cell object.\n representation_network: Representation network object.\n glimpse_shape: 2-D tuple of integers indicating glimpse shape.\n glimpse_shape_classnet: 2-D tuple of integers indicating classification\n network glimpse shape.\n glimpse_shape_saccader: 2-D tuple of integers indicating saccader\n glimpse shape.\n var_list_representation_network: List of variables for the representation\n network.\n var_list_attention_network: List of variables for the attention network.\n var_list_saccader_cell: List of variables for the saccader cell.\n var_list_location: List of variables for the location network.\n var_list_classification: List of variables for the classification network.\n var_list_classnet: List of variables for the classification network.\n var_list: List of all model variables.\n init_op: Initialization operations for model variables.\n \"\"\"\n\n def __init__(self, config, variable_scope=\"saccader_classnet\"):\n Saccader.__init__(self, config, variable_scope=variable_scope+\"/saccader\")\n self.var_list_saccader = []\n self.var_list_classnet = []\n self.classnet_type = config.classnet_type\n self.num_classes = config.num_classes\n self.variable_scope_classnet = variable_scope+\"/\"+self.classnet_type\n self.glimpse_shape_saccader = (-1, -1)\n self.glimpse_shape_classnet = config.glimpse_shape\n\n def __call__(self,\n images_saccader,\n images_classnet,\n num_times,\n is_training_saccader=False,\n is_training_classnet=False,\n policy=\"learned\",\n stop_gradient_after_representation=False):\n\n logits, locations_t, best_locations_t, endpoints = Saccader.__call__(\n self,\n images_saccader,\n num_times,\n is_training=is_training_saccader,\n policy=policy,\n stop_gradient_after_representation=stop_gradient_after_representation)\n\n self.glimpse_shape_saccader = self.glimpse_shape\n image_size_saccader = images_saccader.shape.as_list()[1]\n image_size_classnet = images_classnet.shape.as_list()[1]\n if self.glimpse_shape_classnet[0] < 0:\n self.glimpse_shape_classnet = tuple([int(\n image_size_classnet / image_size_saccader *\n self.glimpse_shape[0])] * 2)\n self.glimpse_shape = self.glimpse_shape_classnet\n\n images_glimpse_t = []\n for locations in locations_t:\n images_glimpse = utils.extract_glimpse(\n images_classnet, size=self.glimpse_shape_classnet, offsets=locations)\n images_glimpse_t.append(images_glimpse)\n\n batch_size = images_classnet.shape.as_list()[0]\n images_glimpse_t = tf.concat(images_glimpse_t, axis=0)\n\n variables_before = set(tf.global_variables())\n reuse = True if self.var_list_classnet else False\n with tf.variable_scope(self.variable_scope_classnet, reuse=reuse):\n if self.classnet_type == \"nasnet\":\n classnet_config = nasnet.large_imagenet_config()\n classnet_config.use_aux_head = 0\n classnet_config.drop_path_keep_prob = 1.0\n with slim.arg_scope(nasnet.nasnet_large_arg_scope()):\n classnet_logits, endpoints_ = nasnet.build_nasnet_large(\n images_glimpse_t, self.num_classes,\n is_training=is_training_classnet,\n config=classnet_config)\n elif self.classnet_type == \"resnet_v2_50\":\n network = nets_factory.get_network_fn(\n \"resnet_v2_50\", self.num_classes, is_training=is_training_classnet)\n classnet_logits, endpoints_ = network(images_glimpse_t)\n\n endpoints[\"classnet\"] = endpoints_\n variables_after = set(tf.global_variables())\n logits_t = tf.reshape(classnet_logits, (num_times, batch_size, -1))\n logits = tf.reduce_mean(logits_t, axis=0)\n if not reuse:\n self.var_list_saccader = self.var_list_classification + self.var_list_location\n self.var_list_classnet = [\n v for v in list(variables_after-variables_before)\n if \"global_step\" not in v.op.name]\n self.var_list.extend(self.var_list_classnet)\n self.init_op = tf.variables_initializer(var_list=self.var_list)\n\n return logits, locations_t, best_locations_t, endpoints\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Transform library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport string\n\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as pandas_testing\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\n\nfrom correct_batch_effects_wdn import metadata\nfrom correct_batch_effects_wdn import transform\n\n_ACTIVITY = \"ACTIVE\"\n_PLATE = \"plate1\"\n_SITE = 0\n_TIMEPOINT = \"0\"\n_SEQUENCE = \"AGCT\"\n_CELL_DENSITY = \"0\"\n_PASSAGE = \"0\"\n_CELL_LINE_ID = \"\"\n\n\nclass TransformTest(tf.test.TestCase):\n\n def setUp(self):\n super(TransformTest, self).setUp()\n wells_384, rows_384, cols_384 = [], [], []\n for row in string.ascii_uppercase[:16]:\n for col in range(24):\n wells_384.append(\"%s%02d\" % (row, col))\n rows_384.append(\"%s\" % row)\n cols_384.append(\"%02d\" % col)\n\n n_per_batch = 100\n n_each_control = 3 * n_per_batch\n n_other = 3 * n_per_batch\n np.random.seed(123)\n self.columns = [0, 1]\n\n neg_control_batches = []\n for i in range(0, n_each_control, n_per_batch):\n batch = \"week%d\" % (i % n_per_batch)\n control_tuples = []\n for j in range(n_per_batch):\n control_tuples.append(\n (\"NEGATIVE_CONTROL\", \"DMSO\", \"DMSO\", 1.0, _ACTIVITY, batch, _PLATE,\n wells_384[j], rows_384[j], cols_384[j], _SITE, _TIMEPOINT,\n _SEQUENCE, _CELL_DENSITY, _PASSAGE, _CELL_LINE_ID))\n neg_control_batches.append(\n pd.DataFrame(\n np.random.multivariate_normal(\n mean=np.array([2.0 + i, 4.0 + i]),\n cov=np.array([[3.0 + i, 1.0 + i], [1.0 + i, 2.0 + i]]),\n size=n_per_batch),\n columns=self.columns,\n index=pd.MultiIndex.from_tuples(\n control_tuples, names=metadata.METADATA_ORDER)))\n self.neg_controls = pd.concat(neg_control_batches)\n\n pos_control_batches = []\n for i in range(0, n_each_control, n_per_batch):\n batch = \"week%d\" % (i % n_per_batch)\n control_tuples = []\n for j in range(n_per_batch):\n control_tuples.append(\n (\"POSITIVE_CONTROL\", \"Taxol\", \"Taxol\", 1.0, _ACTIVITY, batch,\n _PLATE, wells_384[j], rows_384[j], cols_384[j], _SITE, _TIMEPOINT,\n _SEQUENCE, _CELL_DENSITY, _PASSAGE, _CELL_LINE_ID))\n pos_control_batches.append(\n pd.DataFrame(\n np.random.multivariate_normal(\n mean=np.array([5.0 + i, 7.0 + i]),\n cov=np.array([[6.0 + i, 4.0 + i], [4.0 + i, 5.0 + i]]),\n size=n_per_batch),\n columns=self.columns,\n index=pd.MultiIndex.from_tuples(\n control_tuples, names=metadata.METADATA_ORDER)))\n self.pos_controls = pd.concat(pos_control_batches)\n self.controls = pd.concat([self.neg_controls, self.pos_controls])\n\n experimental_batches = []\n for i in range(0, n_other, n_per_batch):\n batch = \"week%d\" % (i % n_per_batch)\n experimental_tuples = []\n for j in range(n_per_batch):\n experimental_tuples.append(\n (\"EXPERIMENTAL\", \"other\", \"2\", 1.0, _ACTIVITY, batch, _PLATE,\n wells_384[j], rows_384[j], cols_384[j], _SITE, _TIMEPOINT,\n _SEQUENCE, _CELL_DENSITY, _PASSAGE, _CELL_LINE_ID))\n experimental_batches.append(\n pd.DataFrame(\n np.random.multivariate_normal(\n mean=np.array([1.0 + i, 2.0 + i]),\n cov=np.array([[3.0 + i, 1.0 + i], [1.0 + i, 2.0 + i]]),\n size=n_per_batch),\n columns=self.columns,\n index=pd.MultiIndex.from_tuples(\n experimental_tuples, names=metadata.METADATA_ORDER)))\n self.experimental = pd.concat(experimental_batches)\n self.data = pd.concat([self.controls, self.experimental])\n\n def testGetNegativeControls(self):\n pandas_testing.assert_frame_equal(self.neg_controls,\n transform.get_negative_controls(\n self.data))\n\n def testEigSymmetric(self):\n q_expected = np.array([[1.0 / np.sqrt(2), -1.0 / np.sqrt(2)],\n [1.0 / np.sqrt(2), 1.0 / np.sqrt(2)]])\n # q should be orthonormal - make sure it really is\n pandas_testing.assert_almost_equal(\n q_expected.T.dot(q_expected), np.identity(2))\n lambda_expected = np.diag([3.0, 2.0])\n a = q_expected.dot(lambda_expected).dot(q_expected.T)\n lambda_computed, q_computed = transform.eig_symmetric(a)\n pandas_testing.assert_almost_equal(\n np.diag(lambda_expected), lambda_computed)\n # make sure q_computed is orthonormal\n pandas_testing.assert_almost_equal(\n np.identity(2), q_expected.T.dot(q_expected))\n for i in range(q_expected.shape[0]):\n ev_expected = q_expected[:, i]\n ev_computed = q_computed[:, i]\n # In this example, the eigenvalues are discrete, so the eigenvectors are\n # unique up to sign. Since the sign will depend on the particulars of\n # the algorithm used to generate the eigenvectors, just make sure that\n # the dot product with the expected eigenvectors is +/- 1\n pandas_testing.assert_almost_equal(1.0,\n np.abs(ev_expected.dot(ev_computed)))\n\n def testFactorAnalysisRun(self):\n transform.factor_analysis(self.data, 0.1, -1)\n\n def testGetBootstrapSampleRun(self):\n bootstrap_data = transform.get_bootstrap_sample(self.data)\n self.assertTupleEqual(self.data.shape, bootstrap_data.shape)\n\n def testTransformDf(self):\n df_small = pd.DataFrame(np.array([[1.0, 2.0], [3.0, 4.0]]))\n rotate_mat_np = np.array([[3.0, 4.0], [5.0, 6.0]])\n shift_vec_np = np.array([[-1.0], [-2.0]])\n expected = pd.DataFrame(np.array([[10.0, 15.0], [24.0, 37.0]]))\n df_trans = transform.transform_df(\n df_small, rotate_mat_np, shift_vec_np)\n pandas_testing.assert_frame_equal(df_trans, expected)\n\n def testSumOfSquare(self):\n a = tf.constant(np.array([1.0, 2.0]))\n expected = 5.0\n with self.session() as sess:\n a_sum_of_square = sess.run(transform.sum_of_square(a))\n self.assertEqual(a_sum_of_square, expected)\n\n def testDropUnevaluatedComp(self):\n pandas_testing.assert_frame_equal(\n pd.concat([self.pos_controls, self.experimental]),\n transform.drop_unevaluated_comp(self.data))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A scikit-lean like interface around tensorflow graphs.\"\"\"\nimport abc\nimport time\nimport numpy as np\nimport six\nimport tensorflow.compat.v1 as tf\n\nfrom meta_reward_learning.semantic_parsing.nsm import data_utils\nfrom meta_reward_learning.semantic_parsing.nsm import tf_utils\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass SeqModel(object):\n \"\"\"Abstract class for a sequence model.\"\"\"\n\n @abc.abstractmethod\n def step(self, inputs, state, context, parameters):\n raise NotImplementedError\n\n @abc.abstractmethod\n def train(self, inputs, targets, context, parameters):\n raise NotImplementedError\n\n\nclass RNNSeqModel(SeqModel):\n \"\"\"A scikit-learn like interface to a RNN sequence model.\n\n The model handles the batching and padding for sequence data.\n\n B is the batch size, T is the time steps or sequence length.\n \"...\" means scalar, arrays, or tuples.\n\n Conceptually, the input should contain 4 parts:\n 1) inputs that is different at every timestep. shape: (B, T, ...)\n 2) initial states that is different for each example. shape: (B, ...)\n 3) context that is different at every example, but the same at different\n timestep. shape: (B, ...), may be different for training\n (input sequence for backprop to encoder) and inference\n (encoded sequence).\n 4) parameters that is the same for each example. For example,\n the dropout rate. Usually a scalar. shape: (...)\n\n The output usually contains 2 parts:\n 1) outputs at each step, shape: (B, T, ...).\n 2) final states. shape: (B, ...)\n\n In terms of implementation, we use list to represent\n variable length inputs.\n\n Assume:\n Atom = np.array or float or integer or tuple\n\n For normal inputs (handled by data_utils.BatchGenerator):\n inputs = [[Atom1, Atom2, ...]]\n size is (B, ...)\n\n For sequence inputs (handled by data_utils.SeqBatchGenerator):\n inputs = [[Atom_11, Atom_12, ...],\n [Atom_21, Atom_22, ...], ...]\n size is (B, T, ...)\n \"\"\"\n\n def __init__(self, graph, batch_size=32, en_maxlen=None, maxlen=None):\n \"\"\"Creates a RNN sequence model for a given Graph instance.\"\"\"\n self.graph = graph\n self.session = graph.session\n self.saver = graph.saver\n self.batch_size = batch_size\n self._outputs = graph.outputs\n self._final_state = graph.final_state\n self._n_examples = graph.n_examples\n self._predictions = graph.predictions\n self._probs = graph.prediction_probs\n self._samples = graph.samples\n self.en_maxlen = en_maxlen\n self.maxlen = maxlen\n self.meta_learn = graph.meta_learn\n\n self._loss = 'loss'\n self._train = 'train'\n self._meta_train = 'meta_train'\n self._count = 'n'\n self._policy_ent = 'ent_reg'\n\n self._step_bc = data_utils.BatchConverter(\n tuple_keys=['initial_state'], seq_keys=['inputs', 'encoded_context'])\n self._step_ba = data_utils.BatchAggregator(\n tuple_keys=[self._final_state], seq_keys=[self._outputs])\n\n self._train_bc = data_utils.BatchConverter(['initial_state'],\n 'inputs targets context'.split())\n self._train_ba = data_utils.BatchAggregator(\n num_keys=[self._loss, self._policy_ent, self._count])\n\n def set_lr(self, new_lr):\n \"\"\"Set the learning rate to a new value.\"\"\"\n self.graph.run(['update_lr'], feed_dict=dict(new_lr=new_lr))\n\n def get_global_step(self):\n global_step = self.graph.run(['global_step'], {})['global_step']\n return global_step\n\n def run_epoch(self,\n fetch_list,\n feed_dict,\n batch_converter,\n batch_aggregator,\n shuffle=False,\n parameters=None,\n val_feed_dict=None,\n verbose=1,\n writer=None):\n \"\"\"Run the TF graph for one pass through the data in feed_dict.\n\n Args:\n fetch_list: A list of the names of the nodes to be fetched.\n feed_dict: A dictionary with names of the nodes to be feed to as keys.\n Contains the fixed length data.\n prepare_feed_dict_fn: A function to prepare a batch of examples to a feed\n dict for TF graph.\n reduce_result_dict_fn: A reducer to collect results from each iteration.\n shuffle: whether to shuffle the data.\n parameters: A dictionary of parameters.\n writer: A TF Filewriter to write summaries.\n\n Returns:\n epoch_result_dict: A dictionary with keys from fetch_list and\n the outputs collected through the epoch.\n \"\"\"\n batch_iterator = data_utils.BatchIterator(\n feed_dict, shuffle=shuffle, batch_size=self.batch_size)\n batch_aggregator.reset()\n if val_feed_dict is not None:\n val_feed_dict = batch_converter.convert(val_feed_dict)\n for batch_data in batch_iterator:\n batch_feed_dict = batch_converter.convert(batch_data)\n if val_feed_dict is not None:\n batch_feed_dict.update(val_feed_dict)\n if parameters is not None:\n batch_feed_dict.update(parameters)\n result_dict = self.graph.run(fetch_list, batch_feed_dict, writer=writer)\n batch_aggregator.merge(result_dict)\n return batch_aggregator.result\n\n def step(self, inputs, state=None, parameters=None, context=None):\n \"\"\"Step the RNN with the given inputs and state.\"\"\"\n feed_dict = dict(\n initial_state=state, inputs=inputs, encoded_context=context)\n fetch_list = [self._outputs, self._final_state]\n result_dict = self.run_epoch(\n fetch_list,\n feed_dict,\n self._step_bc,\n self._step_ba,\n parameters=parameters)\n outputs = result_dict[self._outputs]\n final_state = result_dict[self._final_state]\n return outputs, final_state\n\n def train(self,\n inputs,\n targets,\n weights=None,\n baselines=None,\n context=None,\n initial_state=None,\n shuffle=True,\n update=True,\n n_epochs=1,\n parameters=None,\n writer=None,\n val_feed_dict=None,\n sim_features=None,\n env_indices=None):\n # TODO(rishabhagarwal): Create a MetaLearnRNNSeq2Seq model to pass the meta\n # learning specific arguments like sim_features and env_indices.\n if weights is None:\n weights = [1.0] * len(inputs)\n if baselines is None:\n baselines = [0.0] * len(inputs)\n\n feed_dict = dict(\n initial_state=initial_state,\n inputs=inputs,\n targets=targets,\n weights=weights,\n context=context,\n baselines=baselines)\n if env_indices is not None:\n feed_dict.update(env_indices=env_indices)\n if sim_features is not None:\n feed_dict.update(sim_features=sim_features)\n if val_feed_dict is not None:\n # Assumes that val_feed_dict is a python dict containing val_inputs,\n # val_targets, val_weights and val_context\n val_feed_dict['val_batch_size'] = len(val_feed_dict['val_inputs'])\n\n for _ in xrange(n_epochs):\n t1 = time.time()\n fetch_list = [self._loss, self._count, self._policy_ent]\n if update:\n if self.meta_learn:\n fetch_list += [self._meta_train, self._train]\n else:\n fetch_list += [self._train]\n result_dict = self.run_epoch(\n fetch_list,\n feed_dict,\n self._train_bc,\n self._train_ba,\n shuffle=shuffle,\n val_feed_dict=val_feed_dict,\n parameters=parameters,\n writer=writer)\n t2 = time.time()\n tf.logging.info('{} sec used in one epoch'.format(t2 - t1))\n total_loss = result_dict[self._loss]\n total_n = result_dict[self._count]\n avg_loss = total_loss / total_n\n wps = total_n / (t2 - t1)\n result = dict(loss=avg_loss, wps=wps)\n result['policy_entropy'] = -result_dict[self._policy_ent] / total_n\n return result\n\n def compute_probs(self,\n inputs,\n targets,\n context=None,\n initial_state=None,\n parameters=None):\n feed_dict = dict(\n initial_state=initial_state,\n inputs=inputs,\n targets=targets,\n context=context)\n ba = data_utils.BatchAggregator(tuple_keys=['sequence_probs'])\n fetch_list = ['sequence_probs']\n result_dict = self.run_epoch(\n fetch_list, feed_dict, self._train_bc, ba, parameters=parameters)\n probs = [l[0] for l in result_dict.get('sequence_probs', [])]\n return probs\n\n def compute_simple_scores(self, sim_features, env_indices=None):\n feed_dict = {}\n if sim_features is not None:\n feed_dict.update(sim_features=sim_features)\n if env_indices is not None:\n feed_dict.update(env_indices=env_indices)\n result_dict = self.graph.run(['scores'], feed_dict=feed_dict)\n return result_dict['scores']\n\n def compute_scores(self,\n inputs,\n targets,\n context=None,\n initial_state=None,\n parameters=None):\n \"\"\"Computes the scores for the attn based score function.\"\"\"\n feed_dict = dict(\n initial_state=initial_state,\n inputs=inputs,\n targets=targets,\n context=context)\n ba = data_utils.BatchAggregator(keep_keys=['scores'])\n fetch_list = ['scores']\n result_dict = self.run_epoch(\n fetch_list, feed_dict, self._train_bc, ba, parameters=parameters)\n scores = result_dict.get('scores', [])\n return scores\n\n def compute_step_logprobs(self,\n inputs,\n targets,\n context=None,\n initial_state=None,\n parameters=None):\n feed_dict = dict(\n initial_state=initial_state,\n inputs=inputs,\n targets=targets,\n context=context)\n ba = data_utils.BatchAggregator(seq_keys=['step_logprobs'])\n fetch_list = ['step_logprobs']\n result_dict = self.run_epoch(\n fetch_list, feed_dict, self._train_bc, ba, parameters=parameters)\n logprobs = result_dict.get('step_logprobs', [])\n return logprobs\n\n def evaluate(self,\n inputs,\n targets,\n weights=None,\n context=None,\n initial_state=None,\n writer=None):\n return self.train(\n inputs,\n targets,\n weights=weights,\n context=context,\n initial_state=initial_state,\n shuffle=False,\n update=False,\n n_epochs=1,\n writer=writer)\n\n def _predict(self, cell_outputs, predictions_node, temperature=1.0):\n fetch_list = [predictions_node]\n feed_dict = {self._outputs: cell_outputs}\n\n bc = data_utils.BatchConverter(seq_keys=[self._outputs], maxlen=self.maxlen)\n ba = data_utils.BatchAggregator(seq_keys=[predictions_node])\n\n result_dict = self.run_epoch(\n fetch_list, feed_dict, bc, ba, parameters=dict(temperature=temperature))\n outputs = result_dict[predictions_node]\n return outputs\n\n def predict(self, cell_outputs):\n outputs = self._predict(cell_outputs, predictions_node=self._predictions)\n return outputs\n\n def predict_prob(self, cell_outputs, temperature=1.0):\n return self._predict(\n cell_outputs, predictions_node=self._probs, temperature=temperature)\n\n def sampling(self, cell_outputs, temperature=1.0):\n return self._predict(\n cell_outputs, predictions_node=self._samples, temperature=temperature)\n\n\nclass RNNSeq2seqModel(RNNSeqModel):\n \"\"\"Basic seq2seq model.\"\"\"\n\n def __init__(self, graph, batch_size=32, en_maxlen=None, maxlen=None):\n \"\"\"Creates a RNN seq2seq model for a given Graph object.\"\"\"\n super(RNNSeq2seqModel, self).__init__(\n graph, batch_size=batch_size, en_maxlen=en_maxlen, maxlen=maxlen)\n self._en_outputs = graph.en_outputs\n self._initial_state = graph.initial_state\n self._en_initial_state = graph.en_initial_state\n self._encode_bc = data_utils.BatchConverter(\n tuple_keys=[self._en_initial_state], seq_keys=['context'])\n self._encode_ba = data_utils.BatchAggregator(\n tuple_keys=[self._initial_state], seq_keys=[self._en_outputs])\n\n def encode(self, en_inputs, en_initial_state=None, parameters=None):\n # The returned outputs and states can be directly used\n # in step as en_outputs (for attention) and initial\n # state (the attention context vector is already concatenated).\n feed_dict = {self._en_initial_state: en_initial_state, 'context': en_inputs}\n fetch_list = [self._en_outputs, self._initial_state]\n result_dict = self.run_epoch(\n fetch_list,\n feed_dict,\n self._encode_bc,\n self._encode_ba,\n parameters=parameters)\n outputs = result_dict[self._en_outputs]\n final_state = result_dict[self._initial_state]\n return outputs, final_state\n\n\nclass MemorySeq2seqModel(RNNSeq2seqModel):\n \"\"\"Seq2seq model with augmented with key-variable memory.\"\"\"\n\n def __init__(self, graph, batch_size=32, en_maxlen=75, maxlen=25):\n super(MemorySeq2seqModel, self).__init__(\n graph, batch_size=batch_size, en_maxlen=en_maxlen, maxlen=maxlen)\n self.max_n_valid_indices = graph.config['core_config'][\n 'max_n_valid_indices']\n self.n_mem = graph.config['core_config']['n_mem']\n self.hidden_size = graph.config['core_config']['hidden_size']\n self.value_embedding_size = graph.config['core_config'][\n 'value_embedding_size']\n preprocess_fn = lambda x: self._preprocess(x, maxlen=self.maxlen)\n self._encode_bc = data_utils.BatchConverter(\n seq_keys=['en_inputs', 'en_input_features'],\n tuple_keys=[\n 'en_initial_state', 'n_constants', 'constant_spans',\n 'constant_value_embeddings'\n ],\n preprocess_fn=preprocess_fn,\n maxlen=self.en_maxlen)\n self._step_bc = data_utils.BatchConverter(\n tuple_keys=['initial_state'],\n seq_keys=['encoded_context'],\n preprocess_fn=preprocess_fn,\n maxlen=self.en_maxlen)\n\n tuple_keys = ['n_constants', 'constant_spans', 'constant_value_embeddings']\n seq_keys = ['targets', 'en_inputs', 'en_input_features']\n out_keys = ['targets']\n if self.meta_learn:\n tuple_keys += ['val_{}'.format(k) for k in tuple_keys]\n seq_keys += ['val_{}'.format(k) for k in seq_keys]\n # Use decoder maxlen for padding the placeholder inputs for these keys\n out_keys += ['val_targets']\n self._train_bc = data_utils.BatchConverter(\n tuple_keys=tuple_keys,\n seq_keys=seq_keys,\n out_keys=out_keys,\n preprocess_fn=preprocess_fn,\n maxlen=self.en_maxlen,\n out_maxlen=self.maxlen)\n\n def init_pretrained_embeddings(self, pretrained_embeddings):\n self.graph.run(\n ['en_pretrained_embeddings_init'],\n feed_dict={'en_pretrained_embeddings': pretrained_embeddings})\n\n def init_score_fn(self, ctxt_scores):\n self.graph.run(['scores_init'],\n feed_dict={'scores_placeholder': ctxt_scores})\n\n def init_num_trajs(self, num_trajs):\n # TODO(rishabhagarwal): Create a common init_fn for scores and num_trajs.\n self.graph.run(['num_trajs_init'],\n feed_dict={'num_trajs_placeholder': num_trajs})\n\n def _preprocess(self, batch_dict, maxlen=None):\n prefixes = ['', 'val_'] if self.meta_learn else ['']\n for prefix in prefixes:\n context_key = prefix + 'context'\n if context_key in batch_dict:\n packed_context = batch_dict[context_key]\n del batch_dict[context_key]\n batch_dict[prefix + 'en_inputs'] = [x[0] for x in packed_context]\n constant_value_embeddings = [x[2] for x in packed_context]\n constant_value_embeddings = [\n _pad_list(cs, np.zeros(self.value_embedding_size), self.n_mem)\n for cs in constant_value_embeddings\n ]\n batch_dict[prefix + 'constant_value_embeddings'] = [\n np.array([x]) for x in constant_value_embeddings\n ]\n batch_dict[prefix + 'n_constants'] = [len(x[1]) for x in packed_context]\n constant_spans = [\n _pad_list(x[1], [-1, -1], self.n_mem) for x in packed_context\n ]\n batch_dict[prefix + 'constant_spans'] = [\n np.array([x]) for x in constant_spans\n ]\n batch_dict[prefix + 'en_input_features'] = [\n np.array(x[3]) for x in packed_context\n ]\n input_key = prefix + 'inputs'\n if input_key in batch_dict:\n processed_step_inputs = self._process_step_inputs(\n batch_dict[input_key], maxlen=maxlen)\n batch_dict[input_key] = processed_step_inputs[0]\n batch_dict[prefix + 'output_features'] = processed_step_inputs[1]\n\n def _process_step_inputs(self, inputs, maxlen=None):\n \"\"\"Turn a list of MemoryInputTuple into one MemoryInputTuple.\n\n Args:\n inputs: a list of MemoryInputTuple, like [MemTuple(1, 2, [1,2,3]),\n MemTuple(1, 2, [1,2,3])...].\n maxlen: Maximum length of a program.\n\n Returns:\n processed_inputs: a MemoryInputTuple like\n MemTuple(np.array([1, 1, ...]), np.array([2, 2, ...]),\n np.array([[1, 2, 3, -1, ...], [1, 2, 3, -1,...]))).\n \"\"\"\n read_ind = np.array([[x[0].read_ind for x in seq] for seq in inputs])\n write_ind = np.array([[x[0].write_ind for x in seq] for seq in inputs])\n valid_indices = np.array([[\n _pad_list(x[0].valid_indices, -1, self.max_n_valid_indices) for x in seq\n ] for seq in inputs])\n output_features = np.array(\n [[_pad_list(x[1], [0], self.max_n_valid_indices)\n for x in seq]\n for seq in inputs])\n\n read_ind_batch, sequence_length = data_utils.convert_seqs_to_batch(\n read_ind, maxlen)\n output_feature_batch, _ = data_utils.convert_seqs_to_batch(\n output_features, maxlen)\n write_ind_batch, _ = data_utils.convert_seqs_to_batch(write_ind, maxlen)\n valid_indices_batch, _ = data_utils.convert_seqs_to_batch(\n valid_indices, maxlen)\n processed_inputs = tf_utils.MemoryInputTuple(\n read_ind_batch, write_ind_batch, valid_indices_batch)\n return (processed_inputs, sequence_length), (output_feature_batch,\n sequence_length)\n\n\ndef _pad_list(lst, pad, length):\n return np.array(lst + (length - len(lst)) * [pad])\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configurations for MAML training (Reinforcement Learning).\n\nSee maml_rl.py for usage examples.\nAn easy task to get started with is: RL_MINITAUR_POINT_CONFIG_CIRCLE.\n\"\"\"\n\n# b/128310658.\n# pytype: skip-file\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport random\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom norml import networks\nfrom norml import policies\nfrom norml.envs import cartpole_sensor_bias_env\nfrom norml.envs import halfcheetah_motor_env\nfrom norml.envs import move_point_env\n\n\ndef _early_termination_avg(rewards, num_steps, avg_reward):\n \"\"\"Early termination based on average reward.\"\"\"\n flat_reward = np.array(rewards).ravel()\n len_ok = len(flat_reward) >= num_steps\n val_ok = np.mean(flat_reward[-num_steps:]) >= avg_reward\n return len_ok and val_ok\n\nMOVE_POINT_ROTATE_MAML = dict(\n random_seed=random.randint(0, 1000000),\n num_outer_iterations=1000,\n task_generator=functools.partial(\n move_point_env.MovePointEnv,\n start_pos=(0, 0),\n end_pos=(1, 0),\n goal_reached_distance=-1,\n trial_length=10),\n task_env_modifiers=[{\n '_action_rotation': i\n } for i in np.linspace(-np.pi, np.pi, 5000)],\n network_generator=networks.FullyConnectedNetworkGenerator(\n dim_input=2,\n dim_output=2,\n layer_sizes=(\n 50,\n 50,\n ),\n activation_fn=tf.nn.tanh),\n input_dims=2,\n pol_log_std_init=-3.,\n output_dims=2,\n reward_disc=0.9,\n learn_offset=False,\n policy=policies.GaussianPolicy,\n tasks_batch_size=10,\n num_inner_rollouts=25,\n outer_optimizer_algo=tf.train.AdamOptimizer,\n advantage_function='returns-values',\n whiten_values=False,\n always_full_rollouts=False,\n inner_lr_init=0.02,\n outer_lr_init=7e-3,\n outer_lr_decay=True,\n first_order=False,\n learn_inner_lr=True,\n learn_inner_lr_tensor=True,\n fixed_tasks=False,\n ppo=True,\n ppo_clip_value=0.2,\n max_num_batch_env=1000,\n max_rollout_len=10,\n log_every=10,\n)\n\nMOVE_POINT_ROTATE_MAML_OFFSET = MOVE_POINT_ROTATE_MAML.copy()\nMOVE_POINT_ROTATE_MAML_OFFSET.update(\n learn_offset=True,\n inner_lr_init=0.1,\n outer_lr_init=3e-3,\n pol_log_std_init=-3.)\n\nMOVE_POINT_ROTATE_MAML_LAF = MOVE_POINT_ROTATE_MAML.copy()\nMOVE_POINT_ROTATE_MAML_LAF.update(\n learn_inner_lr=False,\n learn_inner_lr_tensor=False,\n learn_advantage_function_inner=True,\n advantage_generator=networks.FullyConnectedNetworkGenerator(\n dim_input=2 * 2 + 2,\n dim_output=1,\n layer_sizes=(\n 50,\n 50,\n ),\n activation_fn=tf.nn.tanh),\n inner_lr_init=0.7,\n outer_lr_init=6e-4,\n pol_log_std_init=-3.25)\n\nMOVE_POINT_ROTATE_NORML = MOVE_POINT_ROTATE_MAML_LAF.copy()\nMOVE_POINT_ROTATE_NORML.update(\n learn_offset=True,\n inner_lr_init=10.,\n outer_lr_init=6e-3,\n pol_log_std_init=-0.75)\n\nMOVE_POINT_ROTATE_SPARSE_MAML = MOVE_POINT_ROTATE_MAML.copy()\nMOVE_POINT_ROTATE_SPARSE_MAML.update(\n max_rollout_len=100,\n task_generator=functools.partial(\n move_point_env.MovePointEnv,\n start_pos=(0, 0),\n end_pos=(1, 0),\n goal_reached_distance=0.1,\n trial_length=100,\n sparse_reward=True),\n inner_lr_init=1e-4,\n outer_lr_init=2e-3,\n pol_log_std_init=-1.25)\n\nMOVE_POINT_ROTATE_SPARSE_MAML_OFFSET = MOVE_POINT_ROTATE_MAML_OFFSET.copy()\nMOVE_POINT_ROTATE_SPARSE_MAML_OFFSET.update(\n max_rollout_len=100,\n task_generator=functools.partial(\n move_point_env.MovePointEnv,\n start_pos=(0, 0),\n end_pos=(1, 0),\n goal_reached_distance=0.1,\n trial_length=100,\n sparse_reward=True),\n inner_lr_init=7.,\n outer_lr_init=2e-3,\n pol_log_std_init=-0.5)\n\nMOVE_POINT_ROTATE_SPARSE_MAML_LAF = MOVE_POINT_ROTATE_MAML_LAF.copy()\nMOVE_POINT_ROTATE_SPARSE_MAML_LAF.update(\n max_rollout_len=100,\n task_generator=functools.partial(\n move_point_env.MovePointEnv,\n start_pos=(0, 0),\n end_pos=(1, 0),\n goal_reached_distance=0.1,\n trial_length=100,\n sparse_reward=True),\n inner_lr_init=2e-5,\n outer_lr_init=1e-3,\n pol_log_std_init=-1.)\n\nMOVE_POINT_ROTATE_SPARSE_NORML = MOVE_POINT_ROTATE_NORML.copy()\nMOVE_POINT_ROTATE_SPARSE_NORML.update(\n max_rollout_len=100,\n task_generator=functools.partial(\n move_point_env.MovePointEnv,\n start_pos=(0, 0),\n end_pos=(1, 0),\n goal_reached_distance=0.1,\n trial_length=100,\n sparse_reward=True),\n inner_lr_init=9.,\n outer_lr_init=2.6e-3,\n pol_log_std_init=-0.6)\n\nCARTPOLE_SENSOR_DR = dict(\n random_seed=random.randint(0, 1000000),\n num_outer_iterations=1000,\n task_generator=functools.partial(\n cartpole_sensor_bias_env.CartpoleSensorBiasEnv),\n task_env_modifiers=[{\n '_angle_observation_bias': theta\n } for theta in np.linspace(-np.pi / 18, np.pi / 18, 5000)],\n network_generator=networks.FullyConnectedNetworkGenerator(\n dim_input=4,\n dim_output=1,\n layer_sizes=(\n 50,\n 50,\n ),\n activation_fn=tf.nn.tanh),\n input_dims=4,\n pol_log_std_init=-4.0,\n output_dims=1,\n reward_disc=0.97,\n learn_offset=False,\n policy=policies.GaussianPolicy,\n tasks_batch_size=10,\n num_inner_rollouts=25,\n outer_optimizer_algo=tf.train.AdamOptimizer,\n advantage_function='returns-values',\n whiten_values=False,\n always_full_rollouts=False,\n inner_lr_init=0.,\n outer_lr_init=2e-4,\n outer_lr_decay=True,\n first_order=False,\n learn_inner_lr=False,\n learn_inner_lr_tensor=False,\n fixed_tasks=False,\n ppo=True,\n ppo_clip_value=0.2,\n max_num_batch_env=1000,\n max_rollout_len=500,\n log_every=10,\n)\n\nCARTPOLE_SENSOR_MAML = CARTPOLE_SENSOR_DR.copy()\nCARTPOLE_SENSOR_MAML.update(\n learn_inner_lr=True,\n learn_inner_lr_tensor=True,\n inner_lr_init=1e-2,\n outer_lr_init=1e-2,\n pol_log_std_init=-0.5)\n\nCARTPOLE_SENSOR_MAML_OFFSET = CARTPOLE_SENSOR_MAML.copy()\nCARTPOLE_SENSOR_MAML_OFFSET.update(\n learn_offset=True,\n inner_lr_init=1e-1,\n outer_lr_init=4e-4,\n pol_log_std_init=-3.5)\n\nCARTPOLE_SENSOR_MAML_LAF = CARTPOLE_SENSOR_MAML.copy()\nCARTPOLE_SENSOR_MAML_LAF.update(\n learn_advantage_function_inner=True,\n advantage_generator=networks.FullyConnectedNetworkGenerator(\n dim_input=2 * 4 + 1,\n dim_output=1,\n layer_sizes=(\n 50,\n 50,\n ),\n activation_fn=tf.nn.tanh),\n inner_lr_init=1e-5,\n outer_lr_init=3e-3,\n pol_log_std_init=-0.5)\n\nCARTPOLE_SENSOR_NORML = CARTPOLE_SENSOR_MAML_LAF.copy()\nCARTPOLE_SENSOR_NORML.update(\n learn_offset=True,\n inner_lr_init=7e-4,\n outer_lr_init=3e-4,\n pol_log_std_init=-3.5)\n\nHALFCHEETAH_MOTOR_DR = dict(\n random_seed=random.randint(0, 1000000),\n num_outer_iterations=1000,\n task_generator=functools.partial(halfcheetah_motor_env.HalfcheetahMotorEnv),\n task_env_modifiers=[{\n '_swap_action': True\n }, {\n '_swap_action': False\n }] * 2,\n network_generator=networks.FullyConnectedNetworkGenerator(\n dim_input=14,\n dim_output=6,\n layer_sizes=(100,),\n activation_fn=tf.identity),\n pol_log_std_init=-1.61,\n input_dims=14,\n output_dims=6,\n reward_disc=0.99,\n learn_offset=False,\n policy=policies.GaussianPolicy,\n tasks_batch_size=4,\n num_inner_rollouts=50,\n outer_optimizer_algo=tf.train.AdamOptimizer,\n advantage_function='returns-values',\n whiten_values=True,\n always_full_rollouts=False, # also learn from failures?\n inner_lr_init=0.,\n outer_lr_init=0.0012,\n outer_lr_decay=True,\n first_order=False,\n learn_inner_lr=False,\n learn_inner_lr_tensor=False,\n fixed_tasks=False,\n log_every=10,\n ppo=True,\n ppo_clip_value=0.2,\n max_rollout_len=1000,\n max_num_batch_env=300,\n)\n\nHALFCHEETAH_MOTOR_MAML = HALFCHEETAH_MOTOR_DR.copy()\nHALFCHEETAH_MOTOR_MAML.update(\n learn_inner_lr=True,\n learn_inner_lr_tensor=True,\n learn_offset=False,\n inner_lr_init=4.5e-4,\n outer_lr_init=8.7e-5,\n pol_log_std_init=-1.17)\n\nHALFCHEETAH_MOTOR_MAML_OFFSET = HALFCHEETAH_MOTOR_MAML.copy()\nHALFCHEETAH_MOTOR_MAML_OFFSET.update(\n learn_offset=True,\n inner_lr_init=5e-5,\n outer_lr_init=1.5e-4,\n pol_log_std_init=-0.8)\n\nHALFCHEETAH_MOTOR_MAML_LAF = HALFCHEETAH_MOTOR_MAML.copy()\nHALFCHEETAH_MOTOR_MAML_LAF.update(\n learn_advantage_function_inner=True,\n advantage_generator=networks.FullyConnectedNetworkGenerator(\n dim_input=14 * 2 + 6,\n dim_output=1,\n layer_sizes=(50,),\n activation_fn=tf.nn.relu),\n learn_inner_lr=False,\n learn_inner_lr_tensor=False,\n inner_lr_init=3e-5,\n outer_lr_init=5e-4,\n pol_log_std_init=-1.5)\n\nHALFCHEETAH_MOTOR_NORML = HALFCHEETAH_MOTOR_MAML_LAF.copy()\nHALFCHEETAH_MOTOR_NORML.update(\n learn_offset=True,\n inner_lr_init=3e-5,\n outer_lr_init=5e-4,\n pol_log_std_init=-1.9)\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper file to run the discover concept algorithm in the toy dataset.\"\"\"\n# lint as: python3\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\nfrom absl import app\nimport keras.backend as K\nfrom keras.layers import Conv2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Input\nfrom keras.layers import MaxPooling2D\nfrom keras.models import Model\n\nfrom keras.optimizers import Adam\nfrom keras.optimizers import SGD\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import seed\nfrom skimage.segmentation import felzenszwalb\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom tensorflow.compat.v1 import set_random_seed\n\nseed(0)\nset_random_seed(0)\nbatch_size = 128\n\n\ndef load_xyconcept(n, pretrain):\n \"\"\"Loads data and create label for toy dataset.\"\"\"\n concept = np.load('concept_data.npy')\n y = np.zeros((n, 15))\n y[:, 0] = ((1 - concept[:, 0] * concept[:, 2]) + concept[:, 3]) > 0\n y[:, 1] = concept[:, 1] + (concept[:, 2] * concept[:, 3])\n y[:, 2] = (concept[:, 3] * concept[:, 4]) + (concept[:, 1] * concept[:, 2])\n y[:, 3] = np.bitwise_xor(concept[:, 0], concept[:, 1])\n y[:, 4] = concept[:, 1] + concept[:, 4]\n y[:, 5] = (1 - (concept[:, 0] + concept[:, 3] + concept[:, 4])) > 0\n y[:, 6] = np.bitwise_xor(concept[:, 1] * concept[:, 2], concept[:, 4])\n y[:, 7] = concept[:, 0] * concept[:, 4] + concept[:, 1]\n y[:, 8] = concept[:, 2]\n y[:, 9] = np.bitwise_xor(concept[:, 0] + concept[:, 1], concept[:, 3])\n y[:, 10] = (1 - (concept[:, 2] + concept[:, 4])) > 0\n y[:, 11] = concept[:, 0] + concept[:, 3] + concept[:, 4]\n y[:, 12] = np.bitwise_xor(concept[:, 1], concept[:, 2])\n y[:, 13] = (1 - (concept[:, 0] * concept[:, 4] + concept[:, 3])) > 0\n y[:, 14] = np.bitwise_xor(concept[:, 4], concept[:, 3])\n if not pretrain:\n x = np.load('x_data.npy') / 255.0\n return x, y, concept\n return 0, y, concept\n\n\ndef target_category_loss(x, category_index, nb_classes):\n return x * K.one_hot([category_index], nb_classes)\n\n\ndef load_model(x_train, y_train, x_val, y_val, width=216, \\\n height=216, channel=3, pretrain=True):\n \"\"\"Loads pretrain model or train one.\"\"\"\n input1 = Input(\n shape=(\n width,\n height,\n channel,\n ), name='concat_input')\n conv1 = Conv2D(64, kernel_size=3, activation='relu')\n conv2 = Conv2D(64, kernel_size=3, activation='relu')\n conv3 = Conv2D(32, kernel_size=3, activation='relu')\n conv4 = Conv2D(32, kernel_size=3, activation='relu')\n conv5 = Conv2D(16, kernel_size=3, activation='relu')\n dense1 = Dense(200, activation='relu')\n dense2 = Dense(100, activation='relu')\n predict = Dense(15, activation='sigmoid')\n conv1 = conv1(input1)\n conv2 = conv2(conv1)\n conv3 = conv3(conv2)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = conv4(pool1)\n conv5 = conv5(conv4)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv5)\n pool2f = Flatten()(pool2)\n fc1 = dense1(pool2f)\n fc2 = dense2(fc1)\n softmax1 = predict(fc2)\n\n mlp = Model(input1, softmax1)\n if pretrain:\n mlp.load_weights('conv_s13.h5')\n mlp.compile(\n loss='binary_crossentropy',\n optimizer=Adam(lr=0.0001),\n metrics=['binary_accuracy'])\n if not pretrain:\n _ = mlp.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=5,\n verbose=1,\n validation_data=(x_val, y_val))\n mlp.save_weights('conv_s13.h5')\n for layer in mlp.layers:\n layer.trainable = False\n feature_dense_model = Model(input1, fc1)\n return dense2, predict, feature_dense_model\n\n\ndef get_ace_concept(concept_arraynew_active, dense2, predict, f_train,\n n_concept):\n \"\"\"Calculates ACE/TCAV concepts.\"\"\"\n concept_input = Input(shape=(200,), name='concept_input')\n fc2_tcav = dense2(concept_input)\n softmax_tcav = predict(fc2_tcav)\n tcav_model = Model(inputs=concept_input, outputs=softmax_tcav)\n tcav_model.layers[-1].activation = None\n tcav_model.layers[-1].trainable = False\n tcav_model.layers[-2].trainable = False\n tcav_model.compile(\n loss='mean_squared_error',\n optimizer=SGD(lr=0.0),\n metrics=['binary_accuracy'])\n tcav_model.summary()\n\n n_cluster = concept_arraynew_active.shape[0]\n n_percluster = concept_arraynew_active.shape[1]\n print(concept_arraynew_active.shape)\n weight_ace = np.zeros((200, n_cluster))\n tcav_list_rand = np.zeros((15, 200))\n tcav_list_ace = np.zeros((15, n_cluster))\n for i in range(n_cluster):\n y = np.zeros((n_cluster * n_percluster))\n y[i * n_percluster:(i + 1) * n_percluster] = 1\n clf = LogisticRegression(\n random_state=0,\n solver='lbfgs',\n max_iter=10000,\n C=10.0,\n multi_class='ovr').fit(concept_arraynew_active.reshape((-1, 200)), y)\n weight_ace[:, i] = clf.coef_\n\n weight_rand = np.zeros((200, 200))\n for i in range(200):\n y = np.random.randint(2, size=n_cluster * n_percluster)\n clf = LogisticRegression(\n random_state=0,\n solver='lbfgs',\n max_iter=10000,\n C=10.0,\n multi_class='ovr').fit(concept_arraynew_active.reshape((-1, 200)), y)\n weight_rand[:, i] = clf.coef_\n\n sig_list = np.zeros(n_cluster)\n\n for j in range(15):\n grads = (\n K.gradients(target_category_loss(softmax_tcav, j, 15),\n concept_input)[0])\n gradient_function = K.function([tcav_model.input], [grads])\n grads_val = gradient_function([f_train])[0]\n grad_rand = np.matmul(grads_val, weight_rand)\n grad_ace = np.matmul(grads_val, weight_ace)\n tcav_list_rand[j, :] = np.sum(grad_rand > 0.000, axis=(0))\n tcav_list_ace[j, :] = np.sum(grad_ace > 0.000, axis=(0))\n mean = np.mean(tcav_list_rand[j, :])\n std = np.std(tcav_list_rand[j, :])\n sig_list += (tcav_list_ace[j, :] > mean + std * 1.0).astype(int)\n top_k_index = np.array(sig_list).argsort()[-1 * n_concept:][::-1]\n print(sig_list)\n print(top_k_index)\n return weight_ace[:, top_k_index]\n\n\ndef get_pca_concept(f_train, n_concept):\n pca = PCA()\n pca.fit(f_train)\n weight_pca = np.zeros((200, n_concept))\n for count, pc in enumerate(pca.components_):\n if count >= n_concept:\n break\n weight_pca[:, count] = pc\n return weight_pca\n\n\ndef create_dataset(n_sample=60000):\n \"\"\"Creates toy dataset and save to disk.\"\"\"\n concept = np.reshape(np.random.randint(2, size=15 * n_sample),\n (-1, 15)).astype(np.bool_)\n concept[:15, :15] = np.eye(15)\n fig = Figure(figsize=(3, 3))\n canvas = FigureCanvas(fig)\n axes = fig.gca()\n axes.set_xlim([0, 10])\n axes.set_ylim([0, 10])\n axes.axis('off')\n width, height = fig.get_size_inches() * fig.get_dpi()\n width = int(width)\n height = int(height)\n location = [(1.3, 1.3), (3.3, 1.3), (5.3, 1.3), (7.3, 1.3), (1.3, 3.3),\n (3.3, 3.3), (5.3, 3.3), (7.3, 3.3), (1.3, 5.3), (3.3, 5.3),\n (5.3, 5.3), (7.3, 5.3), (1.3, 7.3), (3.3, 7.3), (5.3, 7.3)]\n location_bool = np.zeros(15)\n x = np.zeros((n_sample, width, height, 3))\n color_array = ['green', 'red', 'blue', 'black', 'orange', 'purple', 'yellow']\n\n for i in range(n_sample):\n if i % 1000 == 0:\n print('{} images are created'.format(i))\n if concept[i, 5] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 'x',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 6] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '3',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 7] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 's',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 8] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 'p',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 9] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '_',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 10] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 'd',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 11] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 'd',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 12] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 11,\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 13] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 'o',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 14] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '.',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 0] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '+',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 1] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '1',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 2] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '*',\n color=color_array[np.random.randint(100) % 7],\n markersize=30,\n mew=3,\n ms=5)\n if concept[i, 3] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n '<',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n if concept[i, 4] == 1:\n a = np.random.randint(15)\n while location_bool[a] == 1:\n a = np.random.randint(15)\n location_bool[a] = 1\n axes.plot(\n location[a][0],\n location[a][1],\n 'h',\n color=color_array[np.random.randint(100) % 7],\n markersize=20,\n mew=4,\n ms=8)\n canvas.draw()\n image = np.fromstring(\n canvas.tostring_rgb(), dtype='uint8').reshape(width, height, 3)\n x[i, :, :, :] = image\n # imgplot = plt.imshow(image)\n # plt.show()\n\n # create label by booling functions\n y = np.zeros((n_sample, 15))\n y[:, 0] = ((1 - concept[:, 0] * concept[:, 2]) + concept[:, 3]) > 0\n y[:, 1] = concept[:, 1] + (concept[:, 2] * concept[:, 3])\n y[:, 2] = (concept[:, 3] * concept[:, 4]) + (concept[:, 1] * concept[:, 2])\n y[:, 3] = np.bitwise_xor(concept[:, 0], concept[:, 1])\n y[:, 4] = concept[:, 1] + concept[:, 4]\n y[:, 5] = (1 - (concept[:, 0] + concept[:, 3] + concept[:, 4])) > 0\n y[:, 6] = np.bitwise_xor(concept[:, 1] * concept[:, 2], concept[:, 4])\n y[:, 7] = concept[:, 0] * concept[:, 4] + concept[:, 1]\n y[:, 8] = concept[:, 2]\n y[:, 9] = np.bitwise_xor(concept[:, 0] + concept[:, 1], concept[:, 3])\n y[:, 10] = (1 - (concept[:, 2] + concept[:, 4])) > 0\n y[:, 11] = concept[:, 0] + concept[:, 3] + concept[:, 4]\n y[:, 12] = np.bitwise_xor(concept[:, 1], concept[:, 2])\n y[:, 13] = (1 - (concept[:, 0] * concept[:, 4] + concept[:, 3])) > 0\n y[:, 14] = np.bitwise_xor(concept[:, 4], concept[:, 3])\n\n np.save('x_data.npy', x)\n np.save('y_data.npy', y)\n np.save('concept_data.npy', concept)\n\n return width, height\n\n\ndef get_groupacc(finetuned_model_pr, concept_arraynew2, f_train, f_val, concept,\n n_concept, n_cluster, n0, verbose):\n \"\"\"Gets the group accuracy for dicovered concepts.\"\"\"\n print(finetuned_model_pr.summary())\n min_weight = finetuned_model_pr.layers[-5].get_weights()[0]\n sim_array = np.zeros((n_cluster, n_concept))\n for j in range(n_cluster):\n sim_array[j, :] = np.mean(\n np.matmul(concept_arraynew2[j, :100, :], min_weight), axis=0)\n\n posneg = np.zeros(5)\n sim_array_0mean = sim_array - np.mean(sim_array, axis=0)\n max_cluster = np.argmax(np.abs(sim_array_0mean), axis=0)\n for count in range(5):\n posneg[count] = sim_array_0mean[max_cluster[count], count] > 0\n loss_table = np.zeros((5, 5))\n for count in range(5):\n for count2 in range(5):\n # count2 = max_cluster[count]\n mean0 = np.mean(\n np.matmul(f_train, min_weight[:, count])[concept[:n0,\n count2] == 0]) * 100\n mean1 = np.mean(\n np.matmul(f_train, min_weight[:, count])[concept[:n0,\n count2] == 1]) * 100\n\n if mean0 < mean1:\n pos = 1\n else:\n pos = -1\n best_err = 1e10\n best_bias = 0\n a = int((mean1 - mean0) / 20)\n if a == 0:\n a = pos\n for bias in range(int(mean0), int(mean1), a):\n if pos == 1:\n if np.sum(\n np.bitwise_xor(\n concept[:n0, count2],\n np.matmul(f_train, min_weight[:, count]) >\n bias / 100.)) < best_err:\n best_err = np.sum(\n np.bitwise_xor(\n concept[:n0, count2],\n np.matmul(f_train, min_weight[:, count]) > bias / 100.))\n best_bias = bias\n else:\n if np.sum(\n np.bitwise_xor(\n concept[:n0, count2],\n np.matmul(f_train, min_weight[:, count]) <\n bias / 100.)) < best_err:\n best_err = np.sum(\n np.bitwise_xor(\n concept[:n0, count2],\n np.matmul(f_train, min_weight[:, count]) < bias / 100.))\n best_bias = bias\n if pos == 1:\n loss_table[count, count2] = np.sum(\n np.bitwise_xor(\n concept[n0:, count2],\n np.matmul(f_val, min_weight[:, count]) >\n best_bias / 100.)) / 12000\n if verbose:\n print(np.sum(\n np.bitwise_xor(\n concept[n0:, count2],\n np.matmul(f_val, min_weight[:, count]) > best_bias / 100.))\n /12000)\n else:\n loss_table[count, count2] = np.sum(\n np.bitwise_xor(\n concept[n0:, count2],\n np.matmul(f_val, min_weight[:, count]) <\n best_bias / 100.)) / 12000\n if verbose:\n print(np.sum(\n np.bitwise_xor(\n concept[n0:, count2],\n np.matmul(f_val, min_weight[:, count]) < best_bias / 100.))\n /12000)\n print(np.amin(loss_table, axis=0))\n acc = np.mean(np.amin(loss_table, axis=0))\n print(acc)\n return min_weight, acc\n\n\ndef create_feature(x, width, height, feature_dense_model):\n \"\"\"Saves embedding to disk to enhance computation.\"\"\"\n feature_sp = np.zeros((40000 * 10, 200))\n end = 0\n group_array = []\n for i in range(48000):\n if i % 1000 == 0:\n print('{} embeddings are created'.format(i))\n img = x[i, :, :, :]\n _ = plt.imshow(img)\n # plt.show()\n segments_fz = felzenszwalb(img, scale=100, sigma=.2, min_size=50)\n segments = len(np.unique(segments_fz))\n temp_arr = np.ones((segments, width, height, 3))\n for j in range(segments):\n temp_arr[j, segments_fz == j, :] = img[segments_fz == j, :]\n # imgplot = plt.imshow(temp_arr[j,:,:,:])\n # plt.show()\n aa = feature_dense_model.predict(temp_arr)\n if i <= 40000:\n feature_sp[end:end + segments, :] = aa\n end += segments\n group_array.append(aa)\n feature_sp = feature_sp[:end, :]\n all_feature_dense = feature_dense_model.predict(x)\n with open('group_array.pickle', 'wb') as handle:\n pickle.dump(group_array, handle, protocol=pickle.highest_protocol)\n np.save('all_feature_dense.npy', all_feature_dense)\n np.save('feature_sp.npy', feature_sp)\n\n\ndef create_cluster(concept):\n \"\"\"Creates a self-discovered clustering.\"\"\"\n with open('group_array.pickle', 'rb') as handle:\n group_array = pickle.load(handle)\n feature_sp = np.load('feature_sp.npy')\n all_feature_dense = np.load('all_feature_dense.npy')\n kmeans = KMeans(n_clusters=20, random_state=0).fit(feature_sp[:100000])\n concept_new = np.zeros((10000, 20))\n for i in range(10000):\n temp_cluster = kmeans.predict(group_array[i])\n concept_new[i, temp_cluster] = 1\n\n concept_arraynew = np.zeros((20, 300, 200))\n # Returns concepts found in unsupervised way.\n for i in range(20):\n print(i)\n concept_arraynew[i, :] = all_feature_dense[:10000, :][concept_new[:, i] ==\n 1, :][:300, :]\n concept_arraynew2 = np.zeros((15, 300, 200))\n\n # Returns concepts found in supervised way.\n for i in range(15):\n concept_arraynew2[i, :] = all_feature_dense[:60000, :][concept[:, i] ==\n 1, :][:300, :]\n np.save('concept_arraynew.npy', concept_arraynew)\n np.save('concept_arraynew2.npy', concept_arraynew2)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Hyperparameters of the structured video prediction models.\"\"\"\n\nimport tensorflow.compat.v1 as tf\n\n\nclass ConfigDict(dict):\n \"\"\"A dictionary whose keys can be accessed as attributes.\"\"\"\n\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n self[name] = value\n\n def get(self, key, default=None):\n \"\"\"Allows to specify defaults when accessing the config.\"\"\"\n if key not in self:\n return default\n return self[key]\n\n\ndef get_config():\n \"\"\"Default values for all hyperparameters.\"\"\"\n\n cfg = ConfigDict()\n\n # Directories:\n cfg.dataset = 'debug'\n cfg.data_dir = 'video_structure/testdata'\n cfg.train_dir = ''\n cfg.test_dir = ''\n\n # Architecture:\n cfg.layers_per_scale = 2\n cfg.conv_layer_kwargs = _conv_layer_kwargs()\n cfg.dense_layer_kwargs = _dense_layer_kwargs()\n\n # Optimization:\n cfg.batch_size = 32\n cfg.steps_per_epoch = 100\n cfg.num_epochs = 100\n cfg.learning_rate = 0.001\n cfg.clipnorm = 10\n\n # Image sequence parameters:\n cfg.observed_steps = 8\n cfg.predicted_steps = 8\n\n # Keypoint encoding settings:\n cfg.num_keypoints = 64\n cfg.heatmap_width = 16\n cfg.heatmap_regularization = 5.0\n cfg.keypoint_width = 1.5\n cfg.num_encoder_filters = 32\n cfg.separation_loss_scale = 10.0\n cfg.separation_loss_sigma = 0.1\n\n # Dynamics:\n cfg.num_rnn_units = 512\n cfg.prior_net_dim = 128\n cfg.posterior_net_dim = 128\n cfg.latent_code_size = 16\n cfg.kl_loss_scale = 0.0\n cfg.kl_annealing_steps = 1000\n cfg.use_deterministic_belief = False\n cfg.scheduled_sampling_ramp_steps = (\n cfg.steps_per_epoch * int(cfg.num_epochs * 0.8))\n cfg.scheduled_sampling_p_true_start_obs = 1.0\n cfg.scheduled_sampling_p_true_end_obs = 0.1\n cfg.scheduled_sampling_p_true_start_pred = 1.0\n cfg.scheduled_sampling_p_true_end_pred = 0.5\n cfg.num_samples_for_bom = 10\n\n return cfg\n\n\ndef _conv_layer_kwargs():\n \"\"\"Returns a configDict with default conv layer hyperparameters.\"\"\"\n\n cfg = ConfigDict()\n\n cfg.kernel_size = 3\n cfg.padding = 'same'\n cfg.activation = tf.nn.leaky_relu\n cfg.kernel_regularizer = tf.keras.regularizers.l2(1e-4)\n\n # He-uniform initialization is suggested by this paper:\n # https://arxiv.org/abs/1803.01719\n # The paper only considers ReLU units and it might be different for leaky\n # ReLU, but it is a better guess than Glorot.\n cfg.kernel_initializer = 'he_uniform'\n\n return cfg\n\n\ndef _dense_layer_kwargs():\n \"\"\"Returns a configDict with default dense layer hyperparameters.\"\"\"\n\n cfg = ConfigDict()\n cfg.activation = tf.nn.relu\n cfg.kernel_initializer = 'he_uniform'\n\n return cfg\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Densenet model configuration.\n\nReferences:\n \"Densely Connected Convolutional Networks\": https://arxiv.org/pdf/1608.06993\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\nfrom cnn_quantization.tf_cnn_benchmarks.models import model as model_lib\n\n\nclass DensenetCifar10Model(model_lib.CNNModel):\n \"\"\"Densenet cnn network configuration.\"\"\"\n\n def __init__(self, model, layer_counts, growth_rate, params=None):\n self.growth_rate = growth_rate\n super(DensenetCifar10Model, self).__init__(\n model, 32, 64, 0.1, layer_counts=layer_counts, params=params)\n self.batch_norm_config = {'decay': 0.9, 'epsilon': 1e-5, 'scale': True}\n\n def dense_block(self, cnn, growth_rate):\n input_layer = cnn.top_layer\n c = cnn.batch_norm(input_layer, **self.batch_norm_config)\n c = tf.nn.relu(c)\n c = cnn.conv(growth_rate, 3, 3, 1, 1, stddev=np.sqrt(2.0/9/growth_rate),\n activation=None, input_layer=c)\n channel_index = 3 if cnn.channel_pos == 'channels_last' else 1\n cnn.top_layer = tf.concat([input_layer, c], channel_index)\n cnn.top_size += growth_rate\n\n def transition_layer(self, cnn):\n in_size = cnn.top_size\n cnn.batch_norm(**self.batch_norm_config)\n cnn.top_layer = tf.nn.relu(cnn.top_layer)\n cnn.conv(in_size, 1, 1, 1, 1, stddev=np.sqrt(2.0/9/in_size))\n cnn.apool(2, 2, 2, 2)\n\n def add_inference(self, cnn):\n if self.layer_counts is None:\n raise ValueError('Layer counts not specified for %s' % self.get_model())\n if self.growth_rate is None:\n raise ValueError('Growth rate not specified for %s' % self.get_model())\n\n cnn.conv(16, 3, 3, 1, 1, activation=None)\n # Block 1\n for _ in xrange(self.layer_counts[0]):\n self.dense_block(cnn, self.growth_rate)\n self.transition_layer(cnn)\n # Block 2\n for _ in xrange(self.layer_counts[1]):\n self.dense_block(cnn, self.growth_rate)\n self.transition_layer(cnn)\n # Block 3\n for _ in xrange(self.layer_counts[2]):\n self.dense_block(cnn, self.growth_rate)\n cnn.batch_norm(**self.batch_norm_config)\n cnn.top_layer = tf.nn.relu(cnn.top_layer)\n channel_index = 3 if cnn.channel_pos == 'channels_last' else 1\n cnn.top_size = cnn.top_layer.get_shape().as_list()[channel_index]\n cnn.spatial_mean()\n\n def get_learning_rate(self, global_step, batch_size):\n num_batches_per_epoch = 50000 // batch_size\n boundaries = num_batches_per_epoch * np.array([150, 225, 300],\n dtype=np.int64)\n boundaries = [x for x in boundaries]\n values = [0.1, 0.01, 0.001, 0.0001]\n return tf.train.piecewise_constant(global_step, boundaries, values)\n\n\ndef create_densenet40_k12_model():\n return DensenetCifar10Model('densenet40_k12', (12, 12, 12), 12)\n\n\ndef create_densenet100_k12_model():\n return DensenetCifar10Model('densenet100_k12', (32, 32, 32), 12)\n\n\ndef create_densenet100_k24_model():\n return DensenetCifar10Model('densenet100_k24', (32, 32, 32), 24)\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for target_sas.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom absl import flags\nfrom absl.testing import flagsaver\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.compat.v1 import gfile\nfrom mol_dqn.chemgraph import target_sas\nfrom mol_dqn.chemgraph.dqn import deep_q_networks\nfrom mol_dqn.chemgraph.dqn.tensorflow_core import core\n\n\nclass OptimizeQedTest(tf.test.TestCase):\n\n def setUp(self):\n super(OptimizeQedTest, self).setUp()\n self.mount_point = tempfile.mkdtemp(dir=flags.FLAGS.test_tmpdir)\n self.model_dir = os.path.join(self.mount_point, 'model_dir')\n gfile.MakeDirs(self.model_dir)\n\n def test_run(self):\n hparams = deep_q_networks.get_hparams(\n replay_buffer_size=100,\n num_episodes=10,\n batch_size=10,\n update_frequency=1,\n save_frequency=1,\n dense_layers=[32],\n fingerprint_length=128,\n fingerprint_radius=2,\n num_bootstrap_heads=12,\n prioritized=True,\n double_q=True)\n hparams_file = os.path.join(self.mount_point, 'config.json')\n core.write_hparams(hparams, hparams_file)\n\n with flagsaver.flagsaver(model_dir=self.model_dir, hparams=hparams_file):\n target_sas.main(None)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Evaluation for CIFAR-10.\n\nAccuracy:\ncifar10_train.py achieves 83.0% accuracy after 100K steps (256 epochs\nof data) as judged by cifar10_eval.py.\n\nSpeed:\nOn a single Tesla K40, cifar10_train.py processes a single batch of 128 images\nin 0.25-0.35 sec (i.e. 350 - 600 images /sec). The model reaches ~86%\naccuracy after 100K steps in 8 hours of training time.\n\nUsage:\nPlease see the tutorial and website for how to download the CIFAR-10\ndata set, compile the program and train the model.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport datetime\nimport math\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom model_pruning.examples.cifar10 import cifar10_pruning as cifar10\n\nFLAGS = None\n\n\ndef eval_once(saver, summary_writer, top_k_op, summary_op):\n \"\"\"Run Eval once.\n\n Args:\n saver: Saver.\n summary_writer: Summary writer.\n top_k_op: Top K op.\n summary_op: Summary op.\n \"\"\"\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/cifar10_train/model.ckpt-0,\n # extract global_step from it.\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n try:\n threads = []\n for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n threads.extend(qr.create_threads(sess, coord=coord, daemon=True,\n start=True))\n\n num_iter = int(math.ceil(FLAGS.num_examples / 128))\n true_count = 0 # Counts the number of correct predictions.\n total_sample_count = num_iter * 128\n step = 0\n while step < num_iter and not coord.should_stop():\n predictions = sess.run([top_k_op])\n true_count += np.sum(predictions)\n step += 1\n\n # Compute precision @ 1.\n precision = true_count / total_sample_count\n print('%s: precision @ 1 = %.3f' % (datetime.datetime.now(), precision))\n\n summary = tf.Summary()\n summary.ParseFromString(sess.run(summary_op))\n summary.value.add(tag='Precision @ 1', simple_value=precision)\n summary_writer.add_summary(summary, global_step)\n except Exception as e: # pylint: disable=broad-except\n coord.request_stop(e)\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=10)\n\n\ndef evaluate():\n \"\"\"Eval CIFAR-10 for a number of steps.\"\"\"\n with tf.Graph().as_default() as g:\n # Get images and labels for CIFAR-10.\n eval_data = FLAGS.eval_data == 'test'\n images, labels = cifar10.inputs(eval_data=eval_data)\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = cifar10.inference(images)\n\n # Calculate predictions.\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(\n cifar10.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)\n\n while True:\n eval_once(saver, summary_writer, top_k_op, summary_op)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n cifar10.maybe_download_and_extract()\n if tf.gfile.Exists(FLAGS.eval_dir):\n tf.gfile.DeleteRecursively(FLAGS.eval_dir)\n tf.gfile.MakeDirs(FLAGS.eval_dir)\n evaluate()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--eval_dir',\n type=str,\n default='/tmp/cifar10_eval',\n help='Directory where to write event logs.')\n parser.add_argument(\n '--eval_data',\n type=str,\n default='test',\n help=\"\"\"Either 'test' or 'train_eval'.\"\"\")\n parser.add_argument(\n '--checkpoint_dir',\n type=str,\n default='/tmp/cifar10_train',\n help=\"\"\"Directory where to read model checkpoints.\"\"\")\n parser.add_argument(\n '--eval_interval_secs',\n type=int,\n default=60 * 5,\n help='How often to run the eval.')\n parser.add_argument(\n '--num_examples',\n type=int,\n default=10000,\n help='Number of examples to run.')\n parser.add_argument(\n '--run_once',\n type=bool,\n default=False,\n help='Whether to run eval only once.')\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Miscelleanous util functions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n__all__ = [\"init_from_checkpoint\", \"print_out\",\n \"get_device_str\", \"debug_tensor\", \"get_trainable_vars\",\n \"print_vars\", \"log_scope\"]\n\n\ndef init_from_checkpoint(init_checkpoint, new_scope, pattern=\"Net\"):\n \"\"\"Initialize model from a checkpoint.\n\n Args:\n init_checkpoint: checkpoint to init from.\n new_scope: a value to change the top variable scope.\n pattern: to find the top variable scope and replace with new_scope.\n\n Raises:\n ValueError: if top variable score doesn't contain the keyword \"Net\"\n \"\"\"\n # Get checkpoint\n if tf.gfile.IsDirectory(init_checkpoint):\n checkpoint = tf.train.latest_checkpoint(init_checkpoint)\n else:\n checkpoint = init_checkpoint\n tf.logging.info(\"# Initializing %s from checkpoint %s\" % (\n new_scope, checkpoint))\n\n # Find the existing top variable scope with pattern\n current_scope = None\n reader = tf.train.NewCheckpointReader(checkpoint)\n variable_map = reader.get_variable_to_shape_map()\n\n for var in variable_map:\n if pattern in var.split(\"/\")[0]:\n current_scope = var.split(\"/\")[0]\n tf.logging.info(\" found current_scope %s\" % current_scope)\n break\n if not current_scope:\n raise ValueError(\"Can\\'t find scope with pattern %s in %s\" % (\n pattern, checkpoint))\n\n # Build assignment map\n trainable_vars = {}\n for v in tf.trainable_variables():\n trainable_vars[v.name.split(\":\")[0]] = v.shape.as_list()\n assignment_map = {}\n scope_skip = {}\n ema_skip = 0\n skip_messages = []\n for var in variable_map:\n if not var.startswith(current_scope): # Not the same scope\n # count\n other_scope = var.split(\"/\")[0]\n if other_scope not in scope_skip:\n scope_skip[other_scope] = 0\n scope_skip[other_scope] += 1\n elif var.endswith(\"ExponentialMovingAverage\"): # EMA variables\n ema_skip += 1\n else:\n new_var = new_scope + var[len(current_scope):]\n if new_var not in trainable_vars: # Not trainable\n skip_messages.append(\" not in trainable, skip %s\" % new_var)\n elif trainable_vars[new_var] != variable_map[var]: # Shape mismatch\n skip_messages.append(\" shape mismatch %s vs %s, skip %s\" % (\n trainable_vars[new_var], variable_map[var], new_var))\n else: # This is good!\n assignment_map[var] = new_var\n tf.logging.info(\" load %s, %s\" % (new_var, str(variable_map[var])))\n\n # Logging of variables skipped\n for msg in skip_messages:\n tf.logging.info(\"%s\" % msg)\n tf.logging.info(\"# Scopes skipped %s\" % str(scope_skip))\n tf.logging.info(\"# EMA variables skipped %d\" % ema_skip)\n tf.logging.info(\"# Checkpoint has %d entries, map %d entries\" % (\n len(variable_map), len(assignment_map)))\n\n # Init\n tf.train.init_from_checkpoint(checkpoint, assignment_map)\n\n\ndef print_out(s, f=None, new_line=True):\n \"\"\"Similar to print but with support to flush and output to a file.\"\"\"\n if isinstance(s, bytes):\n s = s.decode(\"utf-8\")\n\n if f:\n f.write(s.encode(\"utf-8\"))\n if new_line:\n f.write(b\"\\n\")\n\n # stdout\n out_s = s.encode(\"utf-8\")\n if not isinstance(out_s, str):\n out_s = out_s.decode(\"utf-8\")\n print(out_s, end=\"\", file=sys.stdout)\n\n if new_line:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n\ndef get_device_str(device_id, num_gpus):\n \"\"\"Return a device string for multi-GPU setup.\"\"\"\n if num_gpus == 0:\n return \"/cpu:0\"\n device_str_output = \"/gpu:%d\" % (device_id % num_gpus)\n return device_str_output\n\n\ndef debug_tensor(s, msg=None, summarize=10, other_tensors=None):\n \"\"\"Print the shape and value of a tensor at test time. Return a new tensor.\"\"\"\n if not msg:\n msg = s.name\n outputs = [tf.shape(s), s]\n\n # print info of other tensors\n if other_tensors:\n for tensor in other_tensors:\n outputs.extend([tf.shape(tensor), tensor])\n\n return tf.Print(s, outputs, msg + \" \", summarize=summarize)\n\n\ndef _count_total_params(all_vars):\n \"\"\"Count total number of variables.\"\"\"\n return np.sum([np.prod(v.get_shape().as_list()) for v in all_vars])\n\n\ndef print_vars(all_vars=None, label=\"Variables\"):\n \"\"\"Print info about a list of variables.\"\"\"\n if not all_vars:\n all_vars = tf.all_variables()\n num_params = _count_total_params(all_vars)\n tf.logging.info(\"# %s, num_params=%d\" % (label, num_params))\n tf.logging.info(\"Format: <name>, <shape>, <(soft) device placement>\")\n for var in all_vars:\n tf.logging.info(\" %s, %s, %s\" % (var.name, str(var.get_shape()),\n var.op.device))\n\n\ndef _build_regex(pattern):\n \"\"\"Compile regex pattern turn comma-separated list into | in regex.\"\"\"\n compiled_pattern = None\n if pattern:\n # Trip ' \" at the beginning and end\n pattern = re.sub(\"(^\\\"|^\\'|\\\"$|\\'$)\", \"\", pattern)\n\n # Escape\n pattern = re.sub(\"/\", r\"\\/\", pattern)\n\n # Change \"a,b,c\" into \"(a|b|c)\"\n pattern = \"(\" + re.sub(\",\", \"|\", pattern) + \")\"\n\n # Compile\n tf.logging.info(\"regex pattern %s\" % pattern)\n compiled_pattern = re.compile(pattern)\n return compiled_pattern\n\n\ndef get_trainable_vars(all_vars=None, keep_pattern=None, exclude_pattern=None):\n \"\"\"Get trainable vars, frozen vars, check partition, count total params.\"\"\"\n if not all_vars:\n all_vars = tf.trainable_variables()\n\n # Split variables\n trainable_vars = []\n frozen_vars = []\n has_partition = False\n keep_regex = _build_regex(keep_pattern)\n exclude_regex = _build_regex(exclude_pattern)\n for var in all_vars:\n if keep_regex and keep_regex.search(var.name):\n tf.logging.info(\" keeping %s, %s, %s\" % (\n var.name, str(var.get_shape()), var.op.device))\n trainable_vars.append(var)\n elif exclude_regex and exclude_regex.search(var.name):\n tf.logging.info(\" excluding %s, %s, %s\" % (\n var.name, str(var.get_shape()), var.op.device))\n frozen_vars.append(var)\n else:\n trainable_vars.append(var)\n\n # Checked for partition variables\n if \"/part_\" in var.name:\n has_partition = True\n\n # Print variables\n print_vars(trainable_vars, label=\"Trainable variables\")\n print_vars(frozen_vars, label=\"Frozen variables\")\n\n return trainable_vars, frozen_vars, has_partition\n\n\ndef log_scope(msg):\n \"\"\"Print log messages with current variable scope.\"\"\"\n current_scope_name = tf.get_variable_scope().name\n print(\"%s, %s\" % (current_scope_name, msg))\n\n\nclass FixedRuntimeHook(tf.train.SessionRunHook):\n \"\"\"Run model for a fixed time.\n\n Estimates runtime based on median of most recent step times. Must have at\n least `min_window_size` samples stored before it will stop a job, and it\n stores at most `max_window_size` times.\n \"\"\"\n\n def __init__(self, seconds, tuner=None, window_size=50):\n \"\"\"Set internal state.\n\n Args:\n seconds: Number of seconds to run for\n tuner: Optional. Vizier tuner used to request that a trial stop\n window_size: Optional. Num samples for model time estimate.\n \"\"\"\n tf.train.SessionRunHook.__init__(self)\n self._seconds = seconds\n self._window_size = window_size\n self._tuner = tuner\n\n def begin(self):\n self._global_step_tensor = tf.train.get_global_step()\n self._step_times = collections.deque()\n\n def before_run(self, run_context):\n self._step_start = time.time()\n return tf.train.SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n global_step = run_values.results\n diff = time.time() - self._step_start\n self._step_times.append(diff)\n # NOTE: Could remove this to change to minimum, but that could end up taking\n # a long time on long running models.\n if len(self._step_times) > self._window_size:\n self._step_times.popleft()\n\n samples = len(self._step_times)\n if samples >= self._window_size:\n median = sorted(self._step_times)[samples / 2]\n if median * global_step > self._seconds:\n tf.logging.info(\n \"Model has trained for estimated %s seconds - stopping. \"\n \"Estimated median step time is %s.\", median * global_step, median)\n run_context.request_stop()\n if self._tuner is not None:\n self._tuner._hp_tuner.request_trial_stop() # pylint: disable=protected-access\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for pors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import flags\nfrom absl.testing import parameterized\nfrom six.moves import range\nimport tensorflow as tf # tf\nfrom summae import pors\nfrom summae import util\n\n# pylint: disable=invalid-name\nFLAGS = flags.FLAGS\nflags.declare_key_flag('task')\nflags.declare_key_flag('use_tpu')\nflags.declare_key_flag('pretrain_as_autoencoder')\nflags.declare_key_flag('in_domain_pretrain_steps')\nflags.declare_key_flag('out_domain_pretrain_steps')\nflags.declare_key_flag('decode_reconstructions')\n\n\nclass PorsTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(PorsTest, self).setUp()\n FLAGS.task = 'rocstories'\n FLAGS.use_tpu = False\n FLAGS.pretrain_as_autoencoder = True\n FLAGS.decode_reconstructions = False\n FLAGS.in_domain_pretrain_steps = 0\n FLAGS.out_domain_pretrain_steps = 0\n self.data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n tf.set_random_seed(1234)\n self.params = {\n 'decode_length': 5,\n 'encoder_type': 'rnn',\n 'decoder_type': 'rnn',\n 'latent_size': 4,\n 'decoding_method': 'argmax',\n 'decoding_beam_size': 2,\n 'decoding_alpha': 1.0,\n 'rnn_num_layers': 3,\n 'rnn_hidden_size': 4,\n 'rnn_pooling': 'last',\n 'rnn_bidirect_encode': False,\n 'trf_hidden_size': 10,\n 'trf_num_layers': 3,\n 'trf_num_heads': 2,\n 'trf_filter_size': 7,\n 'trf_postprocess_dropout': 0.1,\n 'trf_attention_dropout': 0.1,\n 'trf_relu_dropout': 0.1,\n 'trf_pooling': 'mean',\n 'pretrain_order': 'simultaneous',\n 'first_pretrain_steps': 0,\n 'nsp_pretrain': False,\n 'nsp_pretrain_not_next_diff_paragraph_prob': 0.5,\n 'cpp_pretrain_scheme': '',\n 'lm_pretrain_dec': False,\n 'lambda_lm_pretrain_s': 1.0,\n 'lambda_lm_pretrain_p': 1.0,\n 'lambda_nsp_pretrain': 1.0,\n 'lambda_cpp_pretrain': 1.0,\n 'lambda_p': 1.0,\n 'lambda_s': 1.0,\n 'lambda_c': 1.0,\n 'lambda_c_avg': 1.0,\n 'embedding_size': 10,\n 'learning_rate': 0.01,\n 'clip_norm': 10.0,\n 'tie_sent_para_enc': False,\n 'tie_sent_para_dec': False,\n 'tie_embeddings': False,\n 'max_decode_steps': 10,\n 'vocab_size': 10,\n 'mask_prob_input': 0.0,\n 'mask_rate_input': 0.0,\n 'mask_type': 'both',\n 'mask_prob_summary': 0.0,\n 'mask_rate_summary': 0.0,\n 'gs_temp': 2.0,\n 'lambda_n': 0.0,\n 'd_hidden': 3,\n 'gd_step_ratio': 1,\n 'adv_weight': 0.5,\n 'add_critic': True,\n 'lambda_c_avg2': 0.1,\n 'noisy_paragraph_prob': 0.0,\n 'avg_first': False,\n 'train_phase_subset': 'all',\n 'noisy_paragraph_scheme': 'shuffle_sentences'\n }\n self.params.update(pors.flags_hypers())\n\n def test_pors_model_pretrain_noD(self):\n # Tests that disciminator is not updated during pre-training.\n FLAGS.in_domain_pretrain_steps = 10\n tf.reset_default_graph()\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ],\n dtype=tf.int64)\n\n pids = tf.constant([[3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1]],\n dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n # Verify that ae_vars and d_vars is all vars.\n d_vars = tf.trainable_variables('disc_')\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n # 2 steps where we only train generator\n d_vars_np1 = ss.run(d_vars)\n _, _, _, _ = ss.run(ret_tensors[:-1])\n d_vars_np2 = ss.run(d_vars)\n for i in range(len(d_vars_np1)):\n self.assertAllClose(d_vars_np1[i], d_vars_np2[i])\n\n @parameterized.named_parameters(\n ('autoencoder_tpu',\n True, False, False, '', True, 'simultaneous'),\n ('autoencoder_notpu',\n True, False, False, '', False, 'simultaneous'),\n ('simultaneous_tpu',\n False, True, True, 'last_two', True, 'simultaneous'),\n ('simultaneous_notpu',\n False, True, True, 'last_two', False, 'simultaneous'),\n ('enc_first_tpu',\n False, True, True, 'last_two', True, 'encoder_first'),\n ('enc_first_notpu',\n False, True, True, 'last_two', False, 'encoder_first'),\n ('dec_first_tpu',\n False, True, True, 'last_two', True, 'decoder_first'),\n ('dec_first_notpu',\n False, True, True, 'last_two', False, 'decoder_first'),\n )\n def test_pors_model_pretrain_methods(self, pretrain_as_autoencoder,\n lm_pretrain_dec,\n nsp_pretrain,\n cpp_pretrain_scheme,\n use_tpu, pretrain_order):\n FLAGS.out_domain_pretrain_steps = 1\n FLAGS.in_domain_pretrain_steps = 1\n FLAGS.pretrain_as_autoencoder = pretrain_as_autoencoder\n self.params.update({\n 'add_critic': False,\n 'vocab_size': 11,\n 'pretrain_order': pretrain_order,\n 'first_pretrain_steps': 1,\n 'lm_pretrain_dec': lm_pretrain_dec,\n 'nsp_pretrain': nsp_pretrain,\n 'cpp_pretrain_scheme': cpp_pretrain_scheme,\n 'encoder_type': 'transformer'\n })\n FLAGS.use_tpu = use_tpu\n tf.reset_default_graph()\n # Batch of 4 paragraphs each with 5 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0], [4, 2, 4, 1, 0],\n [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0], [4, 2, 5, 1, 0],\n [5, 1, 0, 0, 0]],\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0], [4, 2, 4, 1, 0],\n [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0], [4, 2, 5, 1, 0],\n [5, 1, 0, 0, 0]]], dtype=tf.int64)\n\n pids = tf.constant([\n [3, 2, 4, 2, 4, 2, 4, 2, 4, 2, 1, 0],\n [5, 2, 3, 4, 2, 5, 5, 4, 2, 5, 5, 1],\n [3, 2, 4, 2, 4, 2, 4, 2, 4, 2, 1, 0],\n [5, 2, 3, 4, 2, 5, 5, 4, 2, 5, 5, 1]], dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n ret_tensors = pors.pors_model(\n features, self.params, True, spid_dict={pors.MASK: 10})\n\n # Verify that ae_vars and d_vars is all vars.\n ae_vars = tf.trainable_variables('ae_')\n d_vars = tf.trainable_variables('disc_')\n self.assertEqual(set(tf.trainable_variables()), set(ae_vars + d_vars))\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n ss.run(tf.tables_initializer())\n # pre-train the first component on out-domain data for 1 step\n # If simultaneous or autoencoder then encoder and decoder are jointly\n # pre-trained.\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n # pre-train the second component on in-domain data for 1 step\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n # 1 regular training step\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n @parameterized.named_parameters(\n ('use_tpu', True),\n ('no_tpu', False),\n )\n def test_pors_model_out_domain_pretrain(self, use_tpu):\n FLAGS.out_domain_pretrain_steps = 0\n FLAGS.in_domain_pretrain_steps = 0\n FLAGS.use_tpu = use_tpu\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ],\n dtype=tf.int64)\n\n pids = tf.constant([[3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1]],\n dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n # Verify that ae_vars and d_vars is all vars.\n ae_vars = tf.trainable_variables('ae_')\n d_vars = tf.trainable_variables('disc_')\n self.assertEqual(set(tf.trainable_variables()), set(ae_vars + d_vars))\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n d_vars_np1 = ss.run(d_vars)\n # 2 steps where we only train generator\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n d_vars_np2 = ss.run(d_vars)\n for i in range(len(d_vars_np1)):\n # D-vars should not update\n self.assertAllClose(d_vars_np1[i], d_vars_np2[i])\n self.assertGreater(loss, 0)\n\n @parameterized.named_parameters(\n ('use_tpu_critic', True, True),\n ('use_tpu_nocritic', True, False),\n ('no_tpu_critic', False, True),\n ('no_tpu_nocritic', False, False),\n )\n def test_pors_model(self, use_tpu, add_critic):\n # TODO(peterjliu): Actually test on TPU. Setting this flag is not enough.\n FLAGS.use_tpu = use_tpu\n FLAGS.decode_reconstructions = True\n self.params.update({'add_critic': add_critic})\n tf.reset_default_graph()\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ],\n dtype=tf.int64)\n\n pids = tf.constant([[3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1]],\n dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n # Verify that ae_vars and d_vars is all vars.\n ae_vars = tf.trainable_variables('ae_')\n d_vars = tf.trainable_variables('disc_')\n self.assertEqual(set(tf.trainable_variables()), set(ae_vars + d_vars))\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n # 2 steps to train both discriminator/generator\n loss, _, _, pred_dict = ss.run(ret_tensors[:-1])\n if not use_tpu:\n self.assertIn('decoded_paragraph', pred_dict, msg=str(pred_dict))\n self.assertIn('decoded_sentences', pred_dict, msg=str(pred_dict))\n self.assertGreater(loss, 0)\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n # This fails for some reason on TPU. TODO(peterjliu): Figure out why.\n # loss, _, _, _ = ss.run(ret_tensors[:-1])\n\n @parameterized.named_parameters(\n ('rnn_rnn_tpu', 'rnn', 'rnn', True),\n ('rnn_rnn_notpu', 'rnn', 'rnn', False),\n ('rnn_trf_tpu', 'rnn', 'transformer', True),\n ('rnn_trf_notpu', 'rnn', 'transformer', False),\n ('trf_rnn_tpu', 'transformer', 'rnn', True),\n ('trf_rnn_notpu', 'transformer', 'rnn', False),\n ('trf_trf_tpu', 'transformer', 'transformer', True),\n ('trf_trf_notpu', 'transformer', 'transformer', False),\n )\n def test_pors_model_encoder_decoder_type(self, encoder_type, decoder_type,\n use_tpu):\n FLAGS.use_tpu = use_tpu\n self.params.update({\n 'add_critic': False,\n 'embedding_size': 4,\n 'latent_size': 4,\n 'trf_hidden_size': 4,\n 'trf_num_heads': 2,\n 'encoder_type': encoder_type,\n 'decoder_type': decoder_type,\n })\n tf.reset_default_graph()\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]]], dtype=tf.int64)\n\n pids = tf.constant([[3, 2, 4, 2, 4, 2, 1, 0],\n [5, 2, 3, 4, 2, 5, 5, 1]], dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n def test_pors_model_noisy_paragraph(self):\n self.params.update({'noisy_paragraph_prob': 0.1})\n for use_tpu in [False]:\n FLAGS.use_tpu = use_tpu\n tf.reset_default_graph()\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ],\n dtype=tf.int64)\n\n pids = tf.constant([[3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1]],\n dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids,\n 'noisy_paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n # 2 steps to train both discriminator/generator\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n @parameterized.named_parameters(\n ('use_tpu', True),\n ('no_tpu', False),\n )\n def test_pors_model_mask(self, use_tpu):\n FLAGS.use_tpu = use_tpu\n self.params.update({\n 'rnn_hidden_size': 6,\n 'latent_size': 4,\n 'embedding_size': 10,\n 'trf_hidden_size': 10,\n 'tie_embeddings': True,\n 'mask_rate_input': 0.1,\n 'mask_prob_input': 0.1,\n 'vocab_size': 11,\n 'encoder_type': 'transformer',\n })\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant(\n [\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ], dtype=tf.int64)\n\n pids = tf.constant(\n [[3, 2, 4, 2, 4, 2, 1, 0],\n [5, 2, 3, 4, 2, 5, 5, 1]], dtype=tf.int64)\n features = {'sentences': sids,\n 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True,\n spid_dict={pors.MASK: 10})\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n def test_pors_model_tie_embeddings(self):\n tf.reset_default_graph()\n self.params.update({\n 'rnn_hidden_size': 6,\n 'latent_size': 4,\n 'embedding_size': 3,\n 'trf_hidden_size': 3,\n 'tie_embeddings': True,\n })\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant(\n [\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ], dtype=tf.int64)\n\n pids = tf.constant(\n [[3, 2, 4, 2, 4, 2, 1, 0],\n [5, 2, 3, 4, 2, 5, 5, 1]], dtype=tf.int64)\n features = {'sentences': sids,\n 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n @parameterized.named_parameters(\n ('tie_emb', True),\n ('notie_emb', False),\n )\n def test_pors_different_latent(self, tie_emb):\n self.params.update({\n 'rnn_hidden_size': 5,\n 'latent_size': 4,\n 'embedding_size': 3,\n 'trf_hidden_size': 3,\n 'tie_embeddings': tie_emb,\n 'tie_sent_para_dec': False,\n })\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ],\n dtype=tf.int64)\n\n pids = tf.constant([[3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1]],\n dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n ret_tensors = pors.pors_model(features, self.params, True)\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n def test_pors_model_tie_encs_decs(self):\n self.params.update({\n 'tie_sent_para_enc': True,\n 'tie_sent_para_dec': True,\n 'vocab_size': 12\n })\n # Batch of 2 paragraphs each with 3 sentences.\n sids = tf.constant(\n [\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n ], dtype=tf.int64)\n\n pids = tf.constant(\n [[3, 2, 4, 2, 4, 2, 1, 0],\n [5, 2, 3, 4, 2, 5, 5, 1]], dtype=tf.int64)\n features = {'sentences': sids,\n 'paragraphs': pids}\n\n spid_dict = {pors.BOS: 10, pors.BOP: 11}\n ret_tensors = pors.pors_model(features, self.params, True, spid_dict)\n\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n self.assertGreater(loss, 0)\n\n @parameterized.named_parameters(\n ('use_tpu', True),\n ('no_tpu', False),\n )\n def test_input_fn(self, use_tpu):\n files = util.file_list(self.data_dir, 'valid')\n FLAGS.use_tpu = use_tpu\n input_fn = pors.get_input_fn(self.params, files, False, shuffle=False)\n\n dataset = input_fn({'batch_size': 2})\n it = dataset.make_one_shot_iterator()\n next_batch = it.get_next()\n with self.session() as ss:\n batch = ss.run(next_batch)\n self.assertEqual(2, batch[0]['sentences'].shape[0])\n\n @parameterized.named_parameters(\n ('use_tpu', True),\n ('no_tpu', False),\n )\n def test_input_fn_augmented(self, use_tpu):\n files = util.file_list(self.data_dir, 'valid')\n FLAGS.use_tpu = use_tpu\n input_fn = pors.get_input_fn(self.params, files, False, shuffle=False,\n roc_data_augment=True)\n\n dataset = input_fn({'batch_size': 2})\n it = dataset.make_one_shot_iterator()\n next_batch = it.get_next()\n with self.session() as ss:\n batch = ss.run(next_batch)\n self.assertEqual(2, batch[0]['sentences'].shape[0])\n self.assertEqual(2, batch[0]['noisy_paragraphs'].shape[0])\n\n @parameterized.named_parameters(\n # Rest of the tests have no critic.\n ('no_tpu_rnn', False, False, 'rnn', '', 'all'),\n ('use_tpu_rnn', True, False, 'rnn', '', 'all'),\n ('no_tpu_trf', False, False, 'transformer', '', 'all'),\n ('use_tpu_trf', True, False, 'transformer', '', 'all'),\n ('no_tpu_nsp', False, False, 'transformer', 'nsp', 'all'),\n ('use_tpu_nsp', True, False, 'transformer', 'nsp', 'all'),\n ('no_tpu_cpp', False, False, 'transformer', 'cpp', 'all'),\n ('no_tpu_train_decoder_only', False, False, 'transformer', '', 'decoder'),\n )\n def test_model_fn_smoke(self, use_tpu, add_critic, encoder_type,\n encoder_pretraining, train_phase_subset):\n # This test a train step, running both input_fn and model_fn code paths,\n # including backward pass.\n if encoder_pretraining:\n FLAGS.in_domain_pretrain_steps = 10\n else:\n FLAGS.in_domain_pretrain_steps = 0\n\n FLAGS.pretrain_as_autoencoder = encoder_pretraining == 'autoencoder'\n\n if encoder_pretraining == 'nsp':\n self.params.update({\n 'nsp_pretrain': True,\n 'lambda_nsp_pretrain': 1.0,\n })\n elif encoder_pretraining == 'cpp':\n self.params.update({\n 'cpp_pretrain_scheme': 'last_two',\n 'lambda_cpp_pretrain': 1.0,\n })\n\n self.params.update({'add_critic': add_critic,\n 'train_phase_subset': train_phase_subset})\n FLAGS.use_tpu = use_tpu\n tf.reset_default_graph()\n # Just test it doesn't crash\n sptokens = [pors.BOS, pors.BOP, pors.MASK]\n tk, spid_dict = util.get_tokenizer_with_special(\n os.path.join(self.data_dir, 'wikitext103_32768.subword_vocab'),\n sptokens)\n self.params.update({\n 'vocab_size': tk.vocab_size,\n 'embedding_size': 4,\n 'trf_hidden_size': 4,\n 'trf_num_heads': 2,\n 'max_decode_steps': 2,\n 'encoder_type': encoder_type,\n })\n run_config = tf.estimator.tpu.RunConfig(\n model_dir=self.create_tempdir().full_path, keep_checkpoint_max=10)\n\n pors_estimator = tf.estimator.tpu.TPUEstimator(\n use_tpu=use_tpu,\n config=run_config,\n model_fn=pors.get_model_fn(spid_dict),\n train_batch_size=4,\n eval_batch_size=4,\n predict_batch_size=4,\n params=self.params)\n\n files = util.file_list(self.data_dir, 'valid')\n pors_estimator.train(input_fn=pors.get_input_fn(self.params, files, True),\n max_steps=2)\n\n @parameterized.named_parameters(\n # No pretraining or critic.\n ('base', False),\n # Language model pretraining\n ('pretrain_lm_dec', True),\n )\n def test_pors_cpu_tpu_diff(self, lm_pretrain):\n self.params.update({\n 'add_critic': False,\n 'lm_pretrain_dec': lm_pretrain,\n 'lambda_lm_pretrain_p': 1.0,\n 'lambda_lm_pretrain_s': 1.0,\n 'vocab_size': 13,\n 'encoder_type': 'transformer',\n })\n if lm_pretrain:\n FLAGS.in_domain_pretrain_steps = 10\n FLAGS.pretrain_as_autoencoder = False\n else:\n FLAGS.in_domain_pretrain_steps = 0\n\n # This tests that the loss computed by cpu and tpu is the same.\n # Batch of 4 paragraphs each with 3 sentences.\n losses = {}\n for tpu in [True, False]:\n tf.reset_default_graph()\n FLAGS.use_tpu = tpu\n tf.random.set_random_seed(1234)\n sids = tf.constant([\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]],\n [[3, 2, 1, 0, 0], [4, 2, 4, 1, 0], [2, 1, 0, 0, 0]],\n [[5, 2, 3, 1, 0], [4, 2, 5, 1, 0], [5, 1, 0, 0, 0]]], dtype=tf.int64)\n pids = tf.constant([\n [3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1],\n [3, 2, 4, 2, 4, 2, 1, 0], [5, 2, 3, 4, 2, 5, 5, 1]], dtype=tf.int64)\n features = {'sentences': sids, 'paragraphs': pids}\n\n ret_tensors = pors.pors_model(\n features, self.params, True,\n spid_dict={pors.MASK: 10, pors.BOS: 11, pors.BOP: 12})\n with self.session() as ss:\n ss.run(tf.initializers.global_variables())\n ss.run(tf.initializers.local_variables())\n # 2 steps to train both discriminator/generator\n loss, _, _, _ = ss.run(ret_tensors[:-1])\n losses[tpu] = loss\n self.assertAllClose(losses[True], losses[False])\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Tests for metrics_lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom uq_benchmark_2019 import metrics_lib\n\n\nclass MetricsLibTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_bin_predictions_and_accuracies(self):\n num_samples = int(1e5)\n num_bins = 7\n probabilities = np.linspace(0, 1, num_samples)\n labels = np.random.random(num_samples) < probabilities\n\n bin_edges, accuracies, counts = metrics_lib.bin_predictions_and_accuracies(\n probabilities, labels, num_bins)\n\n bin_centers = metrics_lib.bin_centers_of_mass(probabilities, bin_edges)\n self.assertTrue((bin_centers > bin_edges[:-1]).all())\n self.assertTrue((bin_centers < bin_edges[1:]).all())\n self.assertAllClose(accuracies, bin_centers, atol=0.05)\n self.assertAllClose(np.ones(num_bins), num_bins * counts / num_samples,\n atol=0.05)\n\n #\n # expected_calibration_error\n #\n\n def test_expected_calibration_error(self):\n np.random.seed(1)\n nsamples = 100\n probs = np.linspace(0, 1, nsamples)\n labels = np.random.rand(nsamples) < probs\n ece = metrics_lib.expected_calibration_error(probs, labels)\n bad_ece = metrics_lib.expected_calibration_error(probs / 2, labels)\n\n self.assertBetween(ece, 0, 1)\n self.assertBetween(bad_ece, 0, 1)\n self.assertLess(ece, bad_ece)\n\n bins = metrics_lib.get_quantile_bins(10, probs)\n quantile_ece = metrics_lib.expected_calibration_error(probs, labels, bins)\n bad_quantile_ece = metrics_lib.expected_calibration_error(\n probs / 2, labels, bins)\n\n self.assertBetween(quantile_ece, 0, 1)\n self.assertBetween(bad_quantile_ece, 0, 1)\n self.assertLess(quantile_ece, bad_quantile_ece)\n\n def test_expected_calibration_error_all_wrong(self):\n num_bins = 90\n ece = metrics_lib.expected_calibration_error(\n np.zeros(10), np.ones(10), bins=num_bins)\n self.assertAlmostEqual(ece, 1.)\n\n ece = metrics_lib.expected_calibration_error(\n np.ones(10), np.zeros(10), bins=num_bins)\n self.assertAlmostEqual(ece, 1.)\n\n def test_expected_calibration_error_all_right(self):\n num_bins = 90\n ece = metrics_lib.expected_calibration_error(\n np.ones(10), np.ones(10), bins=num_bins)\n self.assertAlmostEqual(ece, 0.)\n ece = metrics_lib.expected_calibration_error(\n np.zeros(10), np.zeros(10), bins=num_bins)\n self.assertAlmostEqual(ece, 0.)\n\n def test_expected_calibration_error_bad_input(self):\n with self.assertRaises(ValueError):\n metrics_lib.expected_calibration_error(np.ones(1), np.ones(1))\n with self.assertRaises(ValueError):\n metrics_lib.expected_calibration_error(np.ones(100), np.ones(1))\n with self.assertRaises(ValueError):\n metrics_lib.expected_calibration_error(np.ones(100), np.ones(100) * 0.5)\n\n #\n # Tests for multiclass functions.\n #\n\n def test_get_multiclass_predictions_and_correctness(self):\n multiclass_probs = np.array([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2],\n [0.7, 0.2, 0.1], [0.3, 0.5, 0.2]])\n labels = np.array([2, 0, 1, 0])\n (argmax_probs,\n is_correct) = metrics_lib.get_multiclass_predictions_and_correctness(\n multiclass_probs, labels)\n self.assertAllEqual(argmax_probs, [0.7, 0.5, 0.7, 0.5])\n self.assertAllEqual(is_correct, [True, True, False, False])\n\n def test_get_multiclass_predictions_and_correctness_error_cases(self):\n multiclass_probs = np.array([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2],\n [0.7, 0.2, 0.1], [0.3, 0.5, 0.2]])\n labels = np.array([2, 0, 1, 0])\n with self.assertRaises(ValueError):\n bad_multiclass_probs = multiclass_probs - 0.01\n metrics_lib.get_multiclass_predictions_and_correctness(\n bad_multiclass_probs, labels)\n with self.assertRaises(ValueError):\n metrics_lib.get_multiclass_predictions_and_correctness(\n bad_multiclass_probs[Ellipsis, None], labels)\n with self.assertRaises(ValueError):\n metrics_lib.get_multiclass_predictions_and_correctness(\n bad_multiclass_probs, labels[Ellipsis, None])\n\n def test_expected_calibration_error_multiclass(self):\n num_samples = int(1e4)\n num_classes = 5\n probabilities, labels = _make_perfectly_calibrated_multiclass(\n num_samples, num_classes)\n good_ece = metrics_lib.expected_calibration_error_multiclass(\n probabilities, labels)\n bad_ece = metrics_lib.expected_calibration_error_multiclass(\n np.fliplr(probabilities), labels)\n self.assertAllClose(good_ece, 0, atol=0.05)\n self.assertAllClose(bad_ece, 0.5, atol=0.05)\n\n good_ece_topk = metrics_lib.expected_calibration_error_multiclass(\n probabilities, labels, top_k=3)\n self.assertAllClose(good_ece_topk, 0, atol=0.05)\n\n @parameterized.parameters(1, 2, None)\n def test_expected_calibration_error_quantile_multiclass(self, top_k):\n bad_quantile_eces = {1: .5, 2: .25, None: .2}\n num_samples = int(1e4)\n num_classes = 5\n probabilities, labels = _make_perfectly_calibrated_multiclass(\n num_samples, num_classes)\n\n bins = metrics_lib.get_quantile_bins(10, probabilities, top_k=top_k)\n good_quantile_ece = metrics_lib.expected_calibration_error_multiclass(\n probabilities, labels, bins, top_k)\n bad_quantile_ece = metrics_lib.expected_calibration_error_multiclass(\n np.fliplr(probabilities), labels, bins, top_k)\n self.assertAllClose(good_quantile_ece, 0, atol=0.05)\n self.assertAllClose(bad_quantile_ece, bad_quantile_eces[top_k], atol=0.05)\n\n def test_accuracy_top_k(self):\n num_samples = 20\n num_classes = 10\n probs = np.random.rand(num_samples, num_classes)\n probs /= np.expand_dims(probs.sum(axis=1), axis=-1)\n probs = np.apply_along_axis(sorted, 1, probs)\n labels = np.tile(np.arange(num_classes), 2)\n top_2_accuracy = metrics_lib.accuracy_top_k(probs, labels, 2)\n top_5_accuracy = metrics_lib.accuracy_top_k(probs, labels, 5)\n self.assertEqual(top_2_accuracy, .2)\n self.assertEqual(top_5_accuracy, .5)\n\n #\n # Tests for Brier score, deomposition\n #\n\n def test_brier_scores(self):\n batch_shape = (2, 3)\n num_samples, num_classes = 99, 9\n logits = tf.random.uniform(batch_shape + (num_samples, num_classes))\n dist = tfp.distributions.Categorical(logits=logits)\n labels = dist.sample().numpy()\n probs = dist.probs_parameter().numpy()\n\n scores = metrics_lib.brier_scores(labels, probs=probs)\n # Check that computing from logits returns the same result.\n self.assertAllClose(scores, metrics_lib.brier_scores(labels, logits=logits))\n\n self.assertEqual(scores.shape, batch_shape + (num_samples,))\n\n def compute_brier(labels_, logits_):\n probs_ = tf.math.softmax(logits_, axis=1)\n _, nlabels = probs_.shape\n plabel = tf.reduce_sum(tf.one_hot(labels_, nlabels) * probs_, axis=1)\n brier = tf.reduce_sum(tf.square(probs_), axis=1) - 2.0 * plabel\n return tf.reduce_mean(brier)\n\n scores_avg = scores.mean(-1)\n for indices in np.ndindex(*batch_shape):\n score_i = compute_brier(labels[indices], logits[indices])\n self.assertAlmostEqual(score_i.numpy(), scores_avg[indices])\n\n def test_brier_decompositions(self):\n batch_shape = (2, 3)\n num_samples, num_classes = 99, 9\n logits = tf.random.uniform(batch_shape + (num_samples, num_classes))\n dist = tfp.distributions.Categorical(logits=logits)\n labels = dist.sample().numpy()\n probs = dist.probs_parameter().numpy()\n\n all_decomps = metrics_lib.brier_decompositions(labels, probs)\n self.assertEqual(all_decomps.shape, batch_shape + (3,))\n for indices in np.ndindex(*batch_shape):\n decomp_i = metrics_lib.brier_decomposition(labels[indices],\n logits[indices])\n decomp_i = tf.stack(decomp_i, axis=-1).numpy()\n self.assertAllClose(decomp_i, all_decomps[indices])\n\n\ndef _make_perfectly_calibrated_multiclass(num_samples, num_classes):\n argmax_probabilities = np.linspace(1/num_classes, 1, num_samples)\n # Probs have uniform probability among non-selected class.\n probabilities = (1 - argmax_probabilities) / (num_classes - 1)\n probabilities = np.tile(probabilities[:, None], [1, num_classes])\n probabilities[:, 0] = argmax_probabilities\n labels = np.stack([np.random.choice(num_classes, p=p) for p in probabilities])\n return probabilities, labels\n\nif __name__ == '__main__':\n tf.enable_v2_behavior()\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility for saving/loading training configs.\"\"\"\nimport os\nimport tensorflow.compat.v1 as tf\nimport yaml\n\n\ndef save_config(config, logdir=None):\n \"\"\"Save a new configuration by name.\n\n If a logging directory is specified, is will be created and the configuration\n will be stored there. Otherwise, a log message will be printed.\n\n Args:\n config: Configuration object.\n logdir: Location for writing summaries and checkpoints if specified.\n\n Returns:\n Configuration object.\n \"\"\"\n if logdir:\n # with config.unlocked:\n config.logdir = logdir\n message = 'Start a new run and write summaries and checkpoints to {}.'\n tf.logging.info(message.format(config.logdir))\n tf.gfile.MakeDirs(config.logdir)\n config_path = os.path.join(config.logdir, 'config.yaml')\n with tf.gfile.GFile(config_path, 'w') as file_:\n yaml.dump(config, file_, default_flow_style=False)\n else:\n message = (\n 'Start a new run without storing summaries and checkpoints since no '\n 'logging directory was specified.')\n tf.logging.info(message)\n return config\n\n\ndef load_config(logdir):\n \"\"\"Load a configuration from the log directory.\n\n Args:\n logdir: The logging directory containing the configuration file.\n\n Raises:\n IOError: The logging directory does not contain a configuration file.\n\n Returns:\n Configuration object.\n \"\"\"\n config_path = logdir and os.path.join(logdir, 'config.yaml')\n if not config_path or not tf.gfile.Exists(config_path):\n message = (\n 'Cannot resume an existing run since the logging directory does not '\n 'contain a configuration file.')\n raise IOError(message)\n with tf.gfile.GFile(config_path, 'r') as file_:\n config = yaml.load(file_)\n print('Resume run and write summaries and checkpoints to {}.'.format(\n config.logdir))\n return config\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Util functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport math\nimport os\nimport time\n\nfrom absl import flags\nfrom absl import logging\n\nfrom easydict import EasyDict\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport yaml\n\nfrom tcc.config import CONFIG\n\nFLAGS = flags.FLAGS\n\n\ndef visualize_batch(data, global_step, batch_size, num_steps):\n \"\"\"Visualizes a batch.\"\"\"\n frames = data['frames']\n frames_list = tf.unstack(frames, num=num_steps, axis=1)\n frames_summaries = tf.concat(frames_list, axis=2)\n batch_list = tf.split(frames_summaries, batch_size, axis=0)\n batch_summaries = tf.concat(batch_list, axis=1)\n tf.summary.image('train_batch', batch_summaries, step=global_step)\n\n\ndef visualize_nearest_neighbours(model, data, global_step, batch_size,\n num_steps, num_frames_per_step, split):\n \"\"\"Visualize nearest neighbours in embedding space.\"\"\"\n # Set learning_phase to False to use models in inference mode.\n tf.keras.backend.set_learning_phase(0)\n\n cnn = model['cnn']\n emb = model['emb']\n\n cnn_feats = get_cnn_feats(cnn, data, training=False)\n emb_feats = emb(cnn_feats, num_steps)\n emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1)\n\n query_feats = emb_feats[0]\n\n frames = data['frames']\n image_list = tf.unstack(frames, num=batch_size, axis=0)\n im_list = [image_list[0][num_frames_per_step-1::num_frames_per_step]]\n sim_matrix = np.zeros((batch_size-1, num_steps, num_steps), dtype=np.float32)\n\n for i in range(1, batch_size):\n candidate_feats = emb_feats[i]\n\n img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step,\n axis=0)[num_frames_per_step-1::num_frames_per_step]\n nn_img_list = []\n\n for j in range(num_steps):\n curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1])\n mean_squared_distance = tf.reduce_mean(\n tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1)\n sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance)\n nn_img_list.append(img_list[tf.argmin(mean_squared_distance)])\n\n nn_img = tf.stack(nn_img_list, axis=0)\n im_list.append(nn_img)\n\n def vstack(im):\n return tf.concat(tf.unstack(im, num=num_steps), axis=1)\n\n summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list],\n axis=0), axis=0)\n tf.summary.image('%s/nn' % split, summary_im, step=global_step)\n # Convert sim_matrix to float32 as summary_image doesn't take float64\n sim_matrix = sim_matrix.astype(np.float32)\n tf.summary.image('%s/similarity_matrix' % split,\n np.expand_dims(sim_matrix, axis=3), step=global_step)\n\n\ndef softmax(w, t=1.0):\n e = np.exp(np.array(w) / t)\n dist = e / np.sum(e)\n return dist\n\n\ndef random_choice_noreplace(m, n, axis=-1):\n # Generate m random permuations of range (0, n)\n # NumPy version: np.random.rand(m,n).argsort(axis=axis)\n return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64)\n\n\ndef gen_cycles(num_cycles, batch_size, cycle_len):\n \"\"\"Generate cycles for alignment.\"\"\"\n random_cycles = random_choice_noreplace(num_cycles, batch_size)[:, :cycle_len]\n return random_cycles\n\n\ndef get_warmup_lr(lr, global_step, lr_params):\n \"\"\"Returns learning rate during warm up phase.\"\"\"\n if lr_params.NUM_WARMUP_STEPS > 0:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_lr = lr_params.INITIAL_LR * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr\n return lr\n\n\n# Minimally adapted from Tensorflow object_detection code.\ndef manual_stepping(global_step, boundaries, rates):\n boundaries = [0] + boundaries\n num_boundaries = len(boundaries)\n rate_index = tf.reduce_max(\n tf.where(\n tf.greater_equal(global_step, boundaries),\n list(range(num_boundaries)), [0] * num_boundaries))\n return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries))\n\n\ndef get_lr_fn(optimizer_config):\n \"\"\"Returns function that provides current learning rate based on config.\n\n NOTE: This returns a function as in Eager we need to call assign to update\n the learning rate.\n\n Args:\n optimizer_config: EasyDict, contains params required to initialize the\n learning rate and the learning rate decay function.\n Returns:\n lr_fn: function, this can be called to return the current learning rate\n based on the provided config.\n Raises:\n ValueError: in case invalid params have been passed in the config.\n \"\"\"\n lr_params = optimizer_config.LR\n # pylint: disable=g-long-lambda\n if lr_params.DECAY_TYPE == 'exp_decay':\n lr_fn = lambda lr, global_step: tf.train.exponential_decay(\n lr,\n global_step,\n lr_params.EXP_DECAY_STEPS,\n lr_params.EXP_DECAY_RATE,\n staircase=True)()\n elif lr_params.DECAY_TYPE == 'manual':\n lr_step_boundaries = [int(x) for x in lr_params.MANUAL_LR_STEP_BOUNDARIES]\n\n f = lr_params.MANUAL_LR_DECAY_RATE\n learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p\n for p in range(len(lr_step_boundaries) + 1)]\n lr_fn = lambda lr, global_step: manual_stepping(\n global_step, lr_step_boundaries, learning_rate_sequence)\n elif lr_params.DECAY_TYPE == 'fixed':\n lr_fn = lambda lr, global_step: lr_params.INITIAL_LR\n elif lr_params.DECAY_TYPE == 'poly':\n lr_fn = lambda lr, global_step: tf.train.polynomial_decay(\n lr,\n global_step,\n CONFIG.TRAIN.MAX_ITERS,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n else:\n raise ValueError('Learning rate decay type %s not supported. Only support'\n 'the following decay types: fixed, exp_decay, manual,'\n 'and poly.')\n\n return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step),\n global_step, lr_params))\n\n\ndef get_optimizer(optimizer_config, learning_rate):\n \"\"\"Returns optimizer based on config and learning rate.\"\"\"\n if optimizer_config.TYPE == 'AdamOptimizer':\n opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n elif optimizer_config.TYPE == 'MomentumOptimizer':\n opt = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)\n else:\n raise ValueError('Optimizer %s not supported. Only support the following'\n 'optimizers: AdamOptimizer, MomentumOptimizer .')\n return opt\n\n\ndef get_lr_opt_global_step():\n \"\"\"Intializes learning rate, optimizer and global step.\"\"\"\n optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR)\n global_step = optimizer.iterations\n learning_rate = optimizer.learning_rate\n return learning_rate, optimizer, global_step\n\n\ndef restore_ckpt(logdir, **ckpt_objects):\n \"\"\"Create and restore checkpoint (if one exists on the path).\"\"\"\n # Instantiate checkpoint and restore from any pre-existing checkpoint.\n # Since model is a dict we can insert multiple modular networks in this dict.\n checkpoint = tf.train.Checkpoint(**ckpt_objects)\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=logdir,\n max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n status = checkpoint.restore(ckpt_manager.latest_checkpoint)\n return ckpt_manager, status, checkpoint\n\n\ndef to_dict(config):\n if isinstance(config, list):\n return [to_dict(c) for c in config]\n elif isinstance(config, EasyDict):\n return dict([(k, to_dict(v)) for k, v in config.items()])\n else:\n return config\n\n\ndef setup_train_dir(logdir):\n \"\"\"Setups directory for training.\"\"\"\n tf.io.gfile.makedirs(logdir)\n config_path = os.path.join(logdir, 'config.yml')\n if not os.path.exists(config_path):\n logging.info(\n 'Using config from config.py as no config.yml file exists in '\n '%s', logdir)\n with tf.io.gfile.GFile(config_path, 'w') as config_file:\n config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])\n yaml.safe_dump(config, config_file, default_flow_style=False)\n else:\n logging.info('Using config from config.yml that exists in %s.', logdir)\n with tf.io.gfile.GFile(config_path, 'r') as config_file:\n config_dict = yaml.safe_load(config_file)\n CONFIG.update(config_dict)\n\n train_logs_dir = os.path.join(logdir, 'train_logs')\n if os.path.exists(train_logs_dir) and not FLAGS.force_train:\n raise ValueError('You might be overwriting a directory that already '\n 'has train_logs. Please provide a new logdir name in '\n 'config or pass --force_train while launching script.')\n tf.io.gfile.makedirs(train_logs_dir)\n\n\ndef setup_eval_dir(logdir, config_timeout_seconds=1):\n \"\"\"Setups directory for evaluation.\"\"\"\n tf.io.gfile.makedirs(logdir)\n tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs'))\n config_path = os.path.join(logdir, 'config.yml')\n while not tf.io.gfile.exists(config_path):\n logging.info('Waiting for config to exist. Going to sleep '\n ' %s for secs.', config_timeout_seconds)\n time.sleep(config_timeout_seconds)\n\n while True:\n with tf.io.gfile.GFile(config_path, 'r') as config_file:\n config_dict = yaml.safe_load(config_file)\n if config_dict is None:\n time.sleep(config_timeout_seconds)\n else:\n break\n CONFIG.update(config_dict)\n\n\ndef get_data(iterator):\n \"\"\"Return a data dict which contains all the requested sequences.\"\"\"\n data = iterator.get_next()\n return data, data['chosen_steps'], data['seq_lens']\n\n\ndef get_cnn_feats(cnn, data, training, num_steps=None):\n \"\"\"Passes data through base CNN.\"\"\"\n if num_steps is None:\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS\n\n cnn.num_steps = num_steps\n cnn_feats = cnn(data['frames'])\n return cnn_feats\n\n\ndef get_context_steps(step):\n num_steps = CONFIG.DATA.NUM_STEPS\n stride = CONFIG.DATA.FRAME_STRIDE\n # We don't want to see the future.\n steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride)\n return steps\n\n\ndef get_indices(curr_idx, num_steps, seq_len):\n steps = range(curr_idx, curr_idx + num_steps)\n single_steps = np.concatenate([get_context_steps(step) for step in steps])\n single_steps = np.maximum(0, single_steps)\n single_steps = np.minimum(seq_len, single_steps)\n return single_steps\n\n\n# TODO(debidatta): Modular and simpler function for embedding datasets\n# with different embedders.\ndef get_embeddings_dataset(model, iterator, frames_per_batch,\n keep_data=False, keep_labels=True,\n max_embs=None):\n \"\"\"Get embeddings from a one epoch iterator.\"\"\"\n keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS\n num_frames_per_step = CONFIG.DATA.NUM_STEPS\n cnn = model['cnn']\n emb = model['emb']\n embs_list = []\n labels_list = []\n steps_list = []\n seq_lens_list = []\n names_list = []\n seq_labels_list = []\n if keep_data:\n frames_list = []\n\n n = 0\n def cond(n):\n if max_embs is None:\n return True\n else:\n return n < max_embs\n\n # Make Recurrent Layers stateful, set batch size.\n # We do this as we are embedding the whole sequence and that can take\n # more than one batch to be passed and we don't want to automatically\n # reset hidden states after each batch.\n if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':\n for gru_layer in emb.gru_layers:\n gru_layer.stateful = True\n gru_layer.input_spec[0].shape = [1,]\n\n while cond(n):\n try:\n embs = []\n labels = []\n steps = []\n seq_lens = []\n names = []\n seq_labels = []\n if keep_data:\n frames = []\n\n # Reset GRU states for each video.\n if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':\n for gru_layer in emb.gru_layers:\n gru_layer.reset_states()\n\n data, chosen_steps, seq_len = get_data(iterator)\n seq_len = seq_len.numpy()[0]\n num_batches = int(math.ceil(float(seq_len)/frames_per_batch))\n for i in range(num_batches):\n if (i + 1) * frames_per_batch > seq_len:\n num_steps = seq_len - i * frames_per_batch\n else:\n num_steps = frames_per_batch\n curr_idx = i * frames_per_batch\n\n curr_data = {}\n for k, v in data.items():\n # Need to do this as some modalities might not exist.\n if len(v.shape) > 1 and v.shape[1] != 0:\n idxes = get_indices(curr_idx, num_steps, seq_len)\n curr_data[k] = tf.gather(v, idxes, axis=1)\n else:\n curr_data[k] = v\n\n cnn_feats = get_cnn_feats(cnn, curr_data,\n num_steps=num_frames_per_step * num_steps,\n training=False)\n\n emb_feats = emb(cnn_feats, num_steps)\n logging.info('On sequence number %d, frames embedded %d', n,\n curr_idx + num_steps)\n embs.append(emb_feats.numpy())\n\n steps.append(chosen_steps.numpy()[0])\n seq_lens.append(seq_len * [seq_len])\n all_labels = data['frame_labels'].numpy()[0]\n name = data['name'].numpy()[0]\n names.append(seq_len * [name])\n seq_label = data['seq_labels'].numpy()[0]\n seq_labels.append(seq_len * [seq_label])\n labels.append(all_labels)\n embs = np.concatenate(embs, axis=0)\n labels = np.concatenate(labels, axis=0)\n\n steps = np.concatenate(steps, axis=0)\n seq_lens = np.concatenate(seq_lens, axis=0)\n names = np.concatenate(names, axis=0)\n seq_labels = np.concatenate(seq_labels, axis=0)\n if keep_data:\n frames.append(data['frames'].numpy()[0])\n frames = np.concatenate(frames, axis=0)\n\n if keep_labels:\n labels = labels[~np.isnan(embs).any(axis=1)]\n assert len(embs) == len(labels)\n seq_labels = seq_labels[~np.isnan(embs).any(axis=1)]\n\n names = names[~np.isnan(embs).any(axis=1)]\n seq_lens = seq_lens[~np.isnan(embs).any(axis=1)]\n steps = steps[~np.isnan(embs).any(axis=1)]\n if keep_data:\n frames = frames[~np.isnan(embs).any(axis=1)]\n embs = embs[~np.isnan(embs).any(axis=1)]\n\n assert len(embs) == len(seq_lens)\n assert len(embs) == len(steps)\n assert len(names) == len(steps)\n\n embs_list.append(embs)\n if keep_labels:\n labels_list.append(labels)\n seq_labels_list.append(seq_labels)\n steps_list.append(steps)\n seq_lens_list.append(seq_lens)\n names_list.append(names)\n if keep_data:\n frames_list.append(frames)\n n += 1\n except tf.errors.OutOfRangeError:\n logging.info('Finished embedding the dataset.')\n break\n\n dataset = {'embs': embs_list,\n 'seq_lens': seq_lens_list,\n 'steps': steps_list,\n 'names': names_list,\n 'seq_labels': seq_labels_list}\n if keep_data:\n dataset['frames'] = frames_list\n if keep_labels:\n dataset['labels'] = labels_list\n\n # Reset statefulness to recurrent layers for other evaluation tasks.\n if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':\n for gru_layer in emb.gru_layers:\n gru_layer.stateful = False\n\n return dataset\n\n\ndef gen_plot(x, y):\n \"\"\"Create a pyplot, save to buffer and return TB compatible image.\"\"\"\n plt.figure()\n plt.plot(x, y)\n plt.title('Val Accuracy')\n plt.ylim(0, 1)\n plt.tight_layout()\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n # Convert PNG buffer to TF image\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n # Add the batch dimension\n image = tf.expand_dims(image, 0)\n return image\n\n\nclass Stopwatch(object):\n \"\"\"Simple timer for measuring elapsed time.\"\"\"\n\n def __init__(self):\n self.reset()\n\n def elapsed(self):\n return time.time() - self.time\n\n def done(self, target_interval):\n return self.elapsed() >= target_interval\n\n def reset(self):\n self.time = time.time()\n\n\ndef set_learning_phase(f):\n \"\"\"Sets the correct learning phase before calling function f.\"\"\"\n def wrapper(*args, **kwargs):\n \"\"\"Calls the function f after setting proper learning phase.\"\"\"\n if 'training' not in kwargs:\n raise ValueError('Function called with set_learning_phase decorator which'\n ' does not have training argument.')\n training = kwargs['training']\n if training:\n # Set learning_phase to True to use models in training mode.\n tf.keras.backend.set_learning_phase(1)\n else:\n # Set learning_phase to False to use models in inference mode.\n tf.keras.backend.set_learning_phase(0)\n return f(*args, **kwargs)\n return wrapper\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utils for training pixel_cnn model for images.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nfrom sklearn import metrics\nimport tensorflow.compat.v1 as tf\nimport yaml\n\n\ndef load_tfdata_from_np(np_file):\n with tf.compat.v1.gfile.Open(np_file, mode='rb') as f:\n images = np.load(f)\n labels = np.load(f)\n dataset = tf.compat.v1.data.Dataset.from_tensor_slices(\n (images, labels)).map(tensor_slices_preprocess)\n return dataset\n\n\ndef load_fmnist_datasets(data_dir):\n \"\"\"Load fashionMNIST and MNIST dataset from np array.\"\"\"\n tr_in = load_tfdata_from_np(os.path.join(data_dir, 'fashion_mnist_train.npy'))\n val_in = load_tfdata_from_np(os.path.join(data_dir, 'fashion_mnist_val.npy'))\n test_in = load_tfdata_from_np(\n os.path.join(data_dir, 'fashion_mnist_test.npy'))\n\n val_ood = load_tfdata_from_np(os.path.join(data_dir, 'notmnist.npy'))\n test_ood = load_tfdata_from_np(os.path.join(data_dir, 'mnist_test.npy'))\n\n return {\n 'tr_in': tr_in,\n 'val_in': val_in,\n 'test_in': test_in,\n 'val_ood': val_ood,\n 'test_ood': test_ood\n }\n\n\ndef load_cifar_datasets(data_dir):\n \"\"\"Load CIFAR10 and SVHN dataset from np array.\"\"\"\n tr_in = load_tfdata_from_np(os.path.join(data_dir, 'cifar10_train.npy'))\n val_in = load_tfdata_from_np(os.path.join(data_dir, 'cifar10_val.npy'))\n test_in = load_tfdata_from_np(os.path.join(data_dir, 'cifar10_test.npy'))\n\n test_ood = load_tfdata_from_np(\n os.path.join(data_dir, 'svhn_cropped_test.npy'))\n\n return {\n 'tr_in': tr_in,\n 'val_in': val_in,\n 'test_in': test_in,\n 'test_ood': test_ood # val_ood is val_in_grey\n }\n\n\ndef tensor_slices_preprocess(x, y):\n new = {}\n new['image'] = tf.cast(x, tf.float32)\n new['label'] = tf.cast(y, tf.int32)\n return new\n\n\ndef image_preprocess(x):\n x['image'] = tf.cast(x['image'], tf.float32)\n return x\n\n\ndef mutate_x(x, mutation_rate):\n \"\"\"Add mutations to input.\n\n Generate mutations for all positions,\n in order to be different than itselves, the mutations have to be >= 1\n mute the untargeted positions by multiple mask (1 for targeted)\n then add the mutations to the original, mod 255 if necessary.\n\n Args:\n x: input image tensor of size batch*width*height*channel\n mutation_rate: mutation rate\n\n Returns:\n mutated input\n \"\"\"\n w, h, c = x.get_shape().as_list()\n mask = tf.cast(\n tf.compat.v1.multinomial(\n tf.compat.v1.log([[1.0 - mutation_rate, mutation_rate]]), w * h * c),\n tf.int32)[0]\n mask = tf.reshape(mask, [w, h, c])\n possible_mutations = tf.compat.v1.random_uniform(\n [w * h * c],\n minval=0,\n maxval=256, # 256 values [0, 1, ..., 256) = [0, 1, ..., 255]\n dtype=tf.int32)\n possible_mutations = tf.reshape(possible_mutations, [w, h, c])\n x = tf.compat.v1.mod(tf.cast(x, tf.int32) + mask * possible_mutations, 256)\n x = tf.cast(x, tf.float32)\n return x\n\n\ndef image_preprocess_add_noise(x, mutation_rate):\n \"\"\"Image preprocess and add noise to image.\"\"\"\n x['image'] = tf.cast(x['image'], tf.float32)\n\n if mutation_rate > 0:\n x['image'] = mutate_x(x['image'], mutation_rate)\n\n return x # (input, output) of the model\n\n\ndef image_preprocess_grey(x): # used for generate CIFAR-grey\n x['image'] = tf.compat.v1.image.rgb_to_grayscale(x['image'])\n x['image'] = tf.tile(x['image'], [1, 1, 3])\n x['image'] = tf.cast(x['image'], tf.float32)\n return x\n\n\ndef compute_auc(neg, pos, pos_label=1):\n ys = np.concatenate((np.zeros(len(neg)), np.ones(len(pos))), axis=0)\n neg = np.array(neg)[np.logical_not(np.isnan(neg))]\n pos = np.array(pos)[np.logical_not(np.isnan(pos))]\n scores = np.concatenate((neg, pos), axis=0)\n auc = metrics.roc_auc_score(ys, scores)\n if pos_label == 1:\n return auc\n else:\n return 1 - auc\n\n\ndef get_ckpt_at_step(tr_model_dir, step):\n pattern = 'model_step{}.ckpt.index'.format(step)\n list_of_ckpt = tf.compat.v1.gfile.Glob(os.path.join(tr_model_dir, pattern))\n if list_of_ckpt:\n ckpt_file = list_of_ckpt[0].replace('.index', '')\n return ckpt_file\n else:\n tf.compat.v1.logging.fatal('Cannot find the ckpt file at step %s in dir %s',\n step, tr_model_dir)\n return None\n\n\ndef load_hparams(params_yaml_file):\n \"\"\"Create tf.HParams object based on params loaded from yaml file.\"\"\"\n with tf.compat.v1.gfile.Open(params_yaml_file, mode='rb') as f:\n params = yaml.safe_load(f)\n params['dropout_rate'] = 0.0 # turn off dropout for eval\n\n return params\n\n\ndef eval_on_data(data,\n preprocess_fn,\n params,\n dist,\n sess,\n return_per_pixel=False):\n \"\"\"predict for data and save log_prob to npy.\"\"\"\n\n data_ds = data.map(preprocess_fn).batch(\n params['batch_size']).make_one_shot_iterator()\n data_im = data_ds.get_next()\n\n log_prob_i_list = []\n label_i_list = []\n image_i_list = []\n\n log_prob = dist.log_prob(data_im['image'], return_per_pixel=return_per_pixel)\n # eval on dataset\n while True:\n try:\n log_prob_np, label_np, image_np = sess.run(\n [log_prob, data_im['label'], data_im['image']])\n log_prob_i_list.append(np.expand_dims(log_prob_np, axis=-1))\n label_i_list += list(label_np.reshape(-1))\n image_i_list.append(image_np)\n\n except tf.errors.OutOfRangeError:\n break\n\n log_prob_i_t_np = np.vstack(log_prob_i_list)\n log_prob_i_np = np.sum(\n log_prob_i_t_np.reshape(log_prob_i_t_np.shape[0], -1), axis=1)\n label_i_np = np.array(label_i_list)\n image_i_np = np.squeeze(np.vstack(image_i_list)).reshape(\n -1, params['n_dim'], params['n_dim'], params['n_channel'])\n out = {'log_probs': log_prob_i_np, 'labels': label_i_np, 'images': image_i_np}\n if return_per_pixel:\n out['log_probs_per_pixel'] = np.squeeze(log_prob_i_t_np)\n return out\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"Models.\"\"\"\n\n# pylint: disable=g-bad-import-order, unused-import, g-multiple-import\n# pylint: disable=line-too-long, missing-docstring, g-importing-member\n# pylint: disable=g-wrong-blank-lines, missing-super-argument\nimport gin\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp\nfrom functools import partial\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom weak_disentangle import tensorsketch as ts\nfrom weak_disentangle import utils as ut\n\ntfd = tfp.distributions\ndense = gin.external_configurable(ts.Dense)\nconv = gin.external_configurable(ts.Conv2d)\ndeconv = gin.external_configurable(ts.ConvTranspose2d)\nadd_wn = gin.external_configurable(ts.WeightNorm.add)\nadd_bn = gin.external_configurable(ts.BatchNorm.add)\n\n\[email protected]\nclass Encoder(ts.Module):\n def __init__(self, x_shape, z_dim, width=1, spectral_norm=True):\n super().__init__()\n self.net = ts.Sequential(\n conv(32 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(32 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(64 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(64 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n ts.Flatten(),\n dense(128 * width), ts.LeakyReLU(),\n dense(2 * z_dim)\n )\n\n if spectral_norm:\n self.net.apply(ts.SpectralNorm.add, targets=ts.Affine)\n\n ut.log(\"Building encoder...\")\n self.build([1] + x_shape)\n self.apply(ut.reset_parameters)\n\n def forward(self, x):\n h = self.net(x)\n a, b = tf.split(h, 2, axis=-1)\n return tfd.MultivariateNormalDiag(\n loc=a,\n scale_diag=tf.nn.softplus(b) + 1e-8)\n\n\[email protected]\nclass LabelDiscriminator(ts.Module):\n def __init__(self, x_shape, y_dim, width=1, share_dense=False,\n uncond_bias=False):\n super().__init__()\n self.y_dim = y_dim\n self.body = ts.Sequential(\n conv(32 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(32 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(64 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(64 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n ts.Flatten(),\n )\n\n self.aux = ts.Sequential(\n dense(128 * width), ts.LeakyReLU(),\n )\n\n if share_dense:\n self.body.append(dense(128 * width), ts.LeakyReLU())\n self.aux.append(dense(128 * width), ts.LeakyReLU())\n\n self.head = ts.Sequential(\n dense(128 * width), ts.LeakyReLU(),\n dense(128 * width), ts.LeakyReLU(),\n dense(1, bias=uncond_bias)\n )\n\n for m in (self.body, self.aux, self.head):\n m.apply(ts.SpectralNorm.add, targets=ts.Affine)\n\n ut.log(\"Building label discriminator...\")\n x_shape, y_shape = [1] + x_shape, (1, y_dim)\n self.build(x_shape, y_shape)\n self.apply(ut.reset_parameters)\n\n def forward(self, x, y):\n hx = self.body(x)\n hy = self.aux(y)\n o = self.head(tf.concat((hx, hy), axis=-1))\n return o\n\n\[email protected]\nclass Discriminator(ts.Module):\n def __init__(self, x_shape, y_dim, width=1, share_dense=False,\n uncond_bias=False, cond_bias=False, mask_type=\"match\"):\n super().__init__()\n self.y_dim = y_dim\n self.mask_type = mask_type\n self.body = ts.Sequential(\n conv(32 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(32 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(64 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n conv(64 * width, 4, 2, \"same\"), ts.LeakyReLU(),\n ts.Flatten(),\n )\n\n if share_dense:\n self.body.append(dense(128 * width), ts.LeakyReLU())\n\n if mask_type == \"match\":\n self.neck = ts.Sequential(\n dense(128 * width), ts.LeakyReLU(),\n dense(128 * width), ts.LeakyReLU(),\n )\n\n self.head_uncond = dense(1, bias=uncond_bias)\n self.head_cond = dense(128 * width, bias=cond_bias)\n\n for m in (self.body, self.neck, self.head_uncond):\n m.apply(ts.SpectralNorm.add, targets=ts.Affine)\n add_wn(self.head_cond)\n x_shape, y_shape = [1] + x_shape, ((1,), tf.int32)\n\n elif mask_type == \"rank\":\n self.body.append(\n dense(128 * width), ts.LeakyReLU(),\n dense(128 * width), ts.LeakyReLU(),\n dense(1 + y_dim, bias=uncond_bias)\n )\n\n self.body.apply(ts.SpectralNorm.add, targets=ts.Affine)\n x_shape, y_shape = [1] + x_shape, (1, y_dim)\n\n ut.log(\"Building {} discriminator...\".format(mask_type))\n self.build(x_shape, x_shape, y_shape)\n self.apply(ut.reset_parameters)\n\n def forward(self, x1, x2, y):\n if self.mask_type == \"match\":\n h = self.body(tf.concat((x1, x2), axis=0))\n h1, h2 = tf.split(h, 2, axis=0)\n h = self.neck(tf.concat((h1, h2), axis=-1))\n o_uncond = self.head_uncond(h)\n\n w = self.head_cond(tf.one_hot(y, self.y_dim))\n o_cond = tf.reduce_sum(h * w, axis=-1, keepdims=True)\n return o_uncond + o_cond\n\n elif self.mask_type == \"rank\":\n h = self.body(tf.concat((x1, x2), axis=0))\n h1, h2 = tf.split(h, 2, axis=0)\n o1, z1 = tf.split(h1, (1, self.y_dim), axis=-1)\n o2, z2 = tf.split(h2, (1, self.y_dim), axis=-1)\n y_pm = y * 2 - 1 # convert from {0, 1} to {-1, 1}\n diff = (z1 - z2) * y_pm\n o_diff = tf.reduce_sum(diff, axis=-1, keepdims=True)\n return o1 + o2 + o_diff\n\n def expose_encoder(self, x):\n h = self.body(x)\n _, z = tf.split(h, (1, self.y_dim), axis=-1)\n return z\n\n\[email protected]\nclass Generator(ts.Module):\n def __init__(self, x_shape, z_dim, batch_norm=True):\n super().__init__()\n ch = x_shape[-1]\n self.net = ts.Sequential(\n dense(128), ts.ReLU(),\n dense(4 * 4 * 64), ts.ReLU(), ts.Reshape((-1, 4, 4, 64)),\n deconv(64, 4, 2, \"same\"), ts.LeakyReLU(),\n deconv(32, 4, 2, \"same\"), ts.LeakyReLU(),\n deconv(32, 4, 2, \"same\"), ts.LeakyReLU(),\n deconv(ch, 4, 2, \"same\"), ts.Sigmoid(),\n )\n\n # Add batchnorm post-activation (attach to activation out_hook)\n if batch_norm:\n self.net.apply(add_bn, targets=(ts.ReLU, ts.LeakyReLU))\n\n ut.log(\"Building generator...\")\n self.build((1, z_dim))\n self.apply(ut.reset_parameters)\n\n def forward(self, z):\n return self.net(z)\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils related to AR and ARMA models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\n\nimport numpy as np\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.tools import sm_exceptions\nfrom statsmodels.tools.tools import add_constant\nfrom statsmodels.tsa.ar_model import AR\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.tsatools import lagmat\n\n\ndef fit_ar(outputs, inputs, guessed_dim):\n \"\"\"Fits an AR model of order p = guessed_dim.\n\n Args:\n outputs: Array with the output values from the LDS.\n inputs: Array with exogenous inputs values.\n guessed_dim: Guessed hidden dimension.\n\n Returns:\n - Fitted AR coefficients.\n \"\"\"\n if outputs.shape[1] > 1:\n # If there are multiple output dimensions, fit autoregressive params on\n # each dimension separately and average.\n params_list = [\n fit_ar(outputs[:, j:j+1], inputs, guessed_dim) \\\n for j in xrange(outputs.shape[1])]\n return np.mean(\n np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)\n if inputs is None:\n model = AR(outputs).fit(ic='bic', trend='c', maxlag=guessed_dim, disp=0)\n arparams = np.zeros(guessed_dim)\n arparams[:model.k_ar] = model.params[model.k_trend:]\n return arparams\n else:\n model = ARMA(outputs, order=(guessed_dim, 0), exog=inputs)\n try:\n arma_model = model.fit(start_ar_lags=guessed_dim, trend='c', disp=0)\n return arma_model.arparams\n except (ValueError, np.linalg.LinAlgError) as e:\n warnings.warn(str(e), sm_exceptions.ConvergenceWarning)\n return np.zeros(guessed_dim)\n\n\ndef _fit_arma_iter(outputs, inputs, p, q, r, l2_reg=0.0):\n \"\"\"Iterative regression for estimating AR params in ARMAX(p, q, r) model.\n\n The iterative AR regression process provides consistent estimates for the\n AR parameters of an ARMAX(p, q, r) model after q iterative steps.\n\n It first fits an ARMAX(p, 0, r) model with least squares regression, then\n ARMAX(p, 1, r), and so on, ..., til ARMAX(p, q, r). At the i-th step, it\n fits an ARMAX(p, i, r) model, according to estimated error terms from the\n previous step.\n\n For description of the iterative regression method, see Section 2 of\n `Consistent Estimates of Autoregressive Parameters and Extended Sample\n Autocorrelation Function for Stationary and Nonstationary ARMA Models` at\n https://www.jstor.org/stable/2288340.\n\n The implementation here is a generalization of the method mentioned in the\n paper. We adapt the method for multidimensional outputs, exogenous inputs, nan\n handling, and also add regularization on the MA parameters.\n\n Args:\n outputs: Array with the output values from the LDS, nans allowed.\n inputs: Array with exogenous inputs values, nans allowed. Could be None.\n p: AR order, i.e. max lag of the autoregressive part.\n q: MA order, i.e. max lag of the error terms.\n r: Max lag of the exogenous inputs.\n l2_reg: L2 regularization coefficient, to be applied on MA coefficients.\n\n Returns:\n Fitted AR coefficients.\n \"\"\"\n if outputs.shape[1] > 1:\n # If there are multiple output dimensions, fit autoregressive params on\n # each dimension separately and average.\n params_list = [\n _fit_arma_iter(outputs[:, j:j+1], inputs, p, q, r, l2_reg=l2_reg) \\\n for j in xrange(outputs.shape[1])]\n return np.mean(\n np.concatenate([a.reshape(1, -1) for a in params_list]), axis=0)\n # We include a constant term in regression.\n k_const = 1\n # Input dim. If inputs is None, then in_dim = 0.\n in_dim = 0\n if inputs is not None:\n in_dim = inputs.shape[1]\n # Lag the inputs to obtain [?, r], column j means series x_{t-j}.\n # Use trim to drop rows with unknown values both at beginning and end.\n lagged_in = np.concatenate(\n [lagmat(inputs[:, i], maxlag=r, trim='both') for i in xrange(in_dim)],\n axis=1)\n # Since we trim in beginning, the offset is r.\n lagged_in_offset = r\n # Lag the series itself to p-th order.\n lagged_out = lagmat(outputs, maxlag=p, trim='both')\n lagged_out_offset = p\n y = outputs\n y_offset = 0\n # Estimated residuals, initialized to 0.\n res = np.zeros_like(outputs)\n for i in xrange(q + 1):\n # Lag the residuals to i-th order in i-th iteration.\n lagged_res = lagmat(res, maxlag=i, trim='both')\n lagged_res_offset = y_offset + i\n # Compute offset in regression, since lagged_in, lagged_out, and lagged_res\n # have different offsets. Align them.\n if inputs is None:\n y_offset = max(lagged_out_offset, lagged_res_offset)\n else:\n y_offset = max(lagged_out_offset, lagged_res_offset, lagged_in_offset)\n y = outputs[y_offset:, :]\n # Concatenate all variables in regression.\n x = np.concatenate([\n lagged_out[y_offset - lagged_out_offset:, :],\n lagged_res[y_offset - lagged_res_offset:, :]\n ],\n axis=1)\n if inputs is not None:\n x = np.concatenate([lagged_in[y_offset - lagged_in_offset:, :], x],\n axis=1)\n # Add constant term as the first variable.\n x = add_constant(x, prepend=True)\n if x.shape[1] < k_const + in_dim * r + p + i:\n raise ValueError('Insufficient sequence length for model fitting.')\n # Drop rows with nans.\n arr = np.concatenate([y, x], axis=1)\n arr = arr[~np.isnan(arr).any(axis=1)]\n y_dropped_na = arr[:, 0:1]\n x_dropped_na = arr[:, 1:]\n # Only regularize the MA part.\n alpha = np.concatenate(\n [np.zeros(k_const + in_dim * r + p), l2_reg * np.ones(i)], axis=0)\n # When L1_wt = 0, it's ridge regression.\n olsfit = OLS(y_dropped_na, x_dropped_na).fit_regularized(\n alpha=alpha, L1_wt=0.0)\n # Update estimated residuals.\n res = y - np.matmul(x, olsfit.params.reshape(-1, 1))\n if len(olsfit.params) != k_const + in_dim * r + p + q:\n raise ValueError('Expected param len %d, got %d.' %\n (k_const + in_dim * r + p + q, len(olsfit.params)))\n if q == 0:\n return olsfit.params[-p:]\n return olsfit.params[-(p + q):-q]\n\n\ndef fit_arma_iter(outputs, inputs, guessed_dim, l2_reg=0.0):\n \"\"\"Iterative regression for ARMAX(p, q, r) model, where p=q=r=guessed_dim.\n\n Args:\n outputs: Array with the output values from the LDS.\n inputs: Array with exogenous inputs values.\n guessed_dim: Guessed hidden dimension.\n l2_reg: L2 regularization coefficient.\n\n Returns:\n Fitted AR coefficients.\n \"\"\"\n return _fit_arma_iter(\n outputs,\n inputs,\n p=guessed_dim,\n q=guessed_dim - 1,\n r=guessed_dim - 1,\n l2_reg=l2_reg)\n\n\ndef fit_arma_mle(outputs, inputs, guessed_dim, method='css-mle'):\n \"\"\"Fits an ARMA model of order (p=guessed_dim, q=gussed_dim).\n\n First try using the statsmodel.ARMA default initialization of start params.\n\n If the start params are unstable (roots outside unit circle), try start params\n with only AR params but no MA.\n\n If the AR only start params result in SVD failure in optimizer, returns the AR\n only params as the fitted params and returns no model.\n\n Args:\n outputs: Array with the output values from the LDS.\n inputs: Array of exogenous inputs.\n guessed_dim: Guessed hidden dimension.\n method: 'ccs-mle' or 'ccs' or 'mle', fit method in statsmodel package.\n\n Returns:\n - Fitted AR coefficients.\n \"\"\"\n p = guessed_dim\n q = guessed_dim\n model = ARMA(outputs, order=(p, q), exog=inputs)\n try:\n arma_model = model.fit(start_ar_lags=None, trend='c', method=method, disp=0)\n return arma_model.arparams\n except (ValueError, np.linalg.LinAlgError) as e:\n warnings.warn(str(e), sm_exceptions.ConvergenceWarning)\n return np.zeros(p)\n\n\ndef get_eig_from_arparams(arparams):\n eigs = np.roots(np.r_[1, -arparams])\n return eigs[np.argsort(eigs.real)[::-1]]\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple grid-world environment.\n\nThe task here is to walk to the (max_x, max_y) position in a square grid.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom typing import Any, Dict, Tuple, Union\n\n\nclass GridWalk(object):\n \"\"\"Walk on grid to target location.\"\"\"\n\n def __init__(self, length, tabular_obs = True):\n \"\"\"Initializes the environment.\n\n Args:\n length: The length of the square gridworld.\n tabular_obs: Whether to use tabular observations. Otherwise observations\n are x, y coordinates.\n \"\"\"\n self._length = length\n self._tabular_obs = tabular_obs\n self._x = np.random.randint(length)\n self._y = np.random.randint(length)\n self._n_state = length ** 2\n self._n_action = 4\n self._target_x = length - 1\n self._target_y = length - 1\n\n def reset(self):\n \"\"\"Resets the agent to a random square.\"\"\"\n self._x = np.random.randint(self._length)\n self._y = np.random.randint(self._length)\n return self._get_obs()\n\n def _get_obs(self):\n \"\"\"Gets current observation.\"\"\"\n if self._tabular_obs:\n return self._x * self._length + self._y\n else:\n return np.array([self._x, self._y])\n\n def get_tabular_obs(self, xy_obs):\n \"\"\"Gets tabular observation given non-tabular (x,y) observation.\"\"\"\n return self._length * xy_obs[Ellipsis, 0] + xy_obs[Ellipsis, 1]\n\n def get_xy_obs(self, state):\n \"\"\"Gets (x,y) coordinates given tabular observation.\"\"\"\n x = state // self._length\n y = state % self._length\n return np.stack([x, y], axis=-1)\n\n def step(self, action):\n \"\"\"Perform a step in the environment.\n\n Args:\n action: A valid action (one of 0, 1, 2, 3).\n\n Returns:\n next_obs: Observation after action is applied.\n reward: Environment step reward.\n done: Whether the episode has terminated.\n info: A dictionary of additional environment information.\n\n Raises:\n ValueError: If the input action is invalid.\n \"\"\"\n if action == 0:\n if self._x < self._length - 1:\n self._x += 1\n elif action == 1:\n if self._y < self._length - 1:\n self._y += 1\n elif action == 2:\n if self._x > 0:\n self._x -= 1\n elif action == 3:\n if self._y > 0:\n self._y -= 1\n else:\n raise ValueError('Invalid action %s.' % action)\n taxi_distance = (np.abs(self._x - self._target_x) +\n np.abs(self._y - self._target_y))\n reward = np.exp(-2 * taxi_distance / self._length)\n done = False\n return self._get_obs(), reward, done, {}\n\n @property\n def num_states(self):\n return self._n_state\n\n @property\n def num_actions(self):\n return self._n_action\n\n @property\n def state_dim(self):\n return 1 if self._tabular_obs else 2\n\n @property\n def action_dim(self):\n return self._n_action\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Alexnet model configuration.\n\nReferences:\n Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton\n ImageNet Classification with Deep Convolutional Neural Networks\n Advances in Neural Information Processing Systems. 2012\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom cnn_quantization.tf_cnn_benchmarks.models import model\n\n\nclass AlexnetModel(model.CNNModel):\n \"\"\"Alexnet cnn model.\"\"\"\n\n def __init__(self, params=None):\n super(AlexnetModel, self).__init__(\n 'alexnet', 224 + 3, 512, 0.005, params=params)\n\n def add_inference(self, cnn):\n # Note: VALID requires padding the images by 3 in width and height\n cnn.conv(64, 11, 11, 4, 4, 'VALID')\n cnn.mpool(3, 3, 2, 2)\n cnn.conv(192, 5, 5)\n cnn.mpool(3, 3, 2, 2)\n cnn.conv(384, 3, 3)\n cnn.conv(384, 3, 3)\n cnn.conv(256, 3, 3)\n cnn.mpool(3, 3, 2, 2)\n cnn.reshape([-1, 256 * 6 * 6])\n cnn.affine(4096)\n cnn.dropout()\n cnn.affine(4096)\n cnn.dropout()\n\n\nclass AlexnetCifar10Model(model.CNNModel):\n \"\"\"Alexnet cnn model for cifar datasets.\n\n The model architecture follows the one defined in the tensorflow tutorial\n model.\n\n Reference model: tensorflow/models/tutorials/image/cifar10/cifar10.py\n Paper: http://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf\n \"\"\"\n\n def __init__(self, params=None):\n super(AlexnetCifar10Model, self).__init__(\n 'alexnet', 32, 128, 0.1, params=params)\n\n def add_inference(self, cnn):\n cnn.conv(64, 5, 5, 1, 1, 'SAME', stddev=5e-2)\n cnn.mpool(3, 3, 2, 2, mode='SAME')\n cnn.lrn(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n cnn.conv(64, 5, 5, 1, 1, 'SAME', bias=0.1, stddev=5e-2)\n cnn.lrn(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)\n cnn.mpool(3, 3, 2, 2, mode='SAME')\n shape = cnn.top_layer.get_shape().as_list()\n flat_dim = shape[1] * shape[2] * shape[3]\n cnn.reshape([-1, flat_dim])\n cnn.affine(384, stddev=0.04, bias=0.1)\n cnn.affine(192, stddev=0.04, bias=0.1)\n\n def get_learning_rate(self, global_step, batch_size):\n num_examples_per_epoch = 50000\n num_epochs_per_decay = 100\n decay_steps = (\n num_epochs_per_decay * num_examples_per_epoch // batch_size)\n decay_factor = 0.1\n return tf.train.exponential_decay(\n self.learning_rate,\n global_step,\n decay_steps,\n decay_factor,\n staircase=True)\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"Tests EBM.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom neutra.ebm import train_ebm\n\ntf.enable_v2_behavior()\n\nN_CH = 3 # Number of channels\nN_WH = 32 # Width of the images.\n\n\nclass EBMTest(tf.test.TestCase):\n\n def test_u_valid(self):\n \"\"\"Tests that we can initialize U without error.\"\"\"\n num_samples = 16\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n u = train_ebm.EbmConv(anchor_size=1)\n energy_x = u(x)\n # This should have a higher energy due to the quadratic prior.\n energy_x_far = u(x + tf.ones_like(x))\n self.assertTrue(np.all(energy_x < energy_x_far))\n\n def test_train_q_fwd_kl(self):\n \"\"\"Verify that train_q_fwd_kl doesn't raise any exceptions.\"\"\"\n num_samples = 16\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n q = train_ebm.MeanFieldGaussianQ()\n opt = tf.optimizers.Adam()\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=13)\n loss = train_ebm.train_q_fwd_kl(q, x, opt)\n self.assertTrue(np.all(np.isfinite(loss)))\n\n def test_train_q_rev_kl(self):\n \"\"\"Verify that train_q_rev_kl doesn't raise any exceptions.\"\"\"\n q = train_ebm.MeanFieldGaussianQ()\n u = lambda x: tf.reduce_sum(tf.square(x), axis=[1, 2, 3])\n opt = tf.optimizers.Adam()\n loss, entropy = train_ebm.train_q_rev_kl(q, u, opt)\n self.assertTrue(np.all(np.isfinite(loss)))\n self.assertTrue(np.all(np.isfinite(entropy)))\n\n def test_train_q_rev_kl_mle(self):\n \"\"\"Verify that train_q_rev_kl_mle doesn't raise any exceptions.\"\"\"\n num_samples = 16\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n q = train_ebm.MeanFieldGaussianQ()\n u = lambda x: tf.reduce_sum(tf.square(x), axis=[1, 2, 3])\n opt = tf.optimizers.Adam()\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n (loss, entropy, neg_e_q, mle_loss, grads_ebm_norm,\n grads_mle_norm) = train_ebm.train_q_rev_kl_mle(q, u, x, 1., opt)\n self.assertTrue(np.all(np.isfinite(loss)))\n self.assertTrue(np.all(np.isfinite(entropy)))\n self.assertTrue(np.all(np.isfinite(neg_e_q)))\n self.assertTrue(np.all(np.isfinite(mle_loss)))\n self.assertTrue(np.all(np.isfinite(grads_ebm_norm)))\n self.assertTrue(np.all(np.isfinite(grads_mle_norm)))\n\n def test_train_q_mle(self):\n \"\"\"Verify that train_q_mle doesn't raise any exceptions.\"\"\"\n num_samples = 16\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n q = train_ebm.MeanFieldGaussianQ()\n opt = tf.optimizers.Adam()\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=13)\n loss = train_ebm.train_q_mle(q, x, opt)\n self.assertTrue(np.all(np.isfinite(loss)))\n\n def test_train_p(self):\n \"\"\"Verify that train_p doesn't raise any exceptions.\"\"\"\n num_samples = 16\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n q = train_ebm.MeanFieldGaussianQ()\n u = train_ebm.EbmConv(anchor_size=1)\n opt = tf.optimizers.Adam()\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=13)\n (x_neg_q, x_neg_p, p_accept, step_size, pos_e, pos_e_updated, neg_e_q,\n neg_e_p, neg_e_p_updated) = train_ebm.train_p(q, u, x, 0.1, opt)\n self.assertTrue(np.all(np.isfinite(x_neg_q)))\n self.assertTrue(np.all(np.isfinite(x_neg_p)))\n self.assertTrue(np.all(np.isfinite(p_accept)))\n self.assertTrue(np.all(np.isfinite(step_size)))\n self.assertTrue(np.all(np.isfinite(pos_e)))\n self.assertTrue(np.all(np.isfinite(pos_e_updated)))\n self.assertTrue(np.all(np.isfinite(neg_e_q)))\n self.assertTrue(np.all(np.isfinite(neg_e_p)))\n self.assertTrue(np.all(np.isfinite(neg_e_p_updated)))\n\n def test_train_p_mh(self):\n \"\"\"Verify that train_p doesn't raise any exceptions.\"\"\"\n num_samples = 16\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=12)\n q = train_ebm.MeanFieldGaussianQ()\n u = train_ebm.EbmConv(anchor_size=1)\n opt = tf.optimizers.Adam()\n x = tf.random.normal(\n [num_samples, N_WH, N_WH, N_CH], seed=13)\n (x_neg_q, x_neg_p, p_accept, step_size, pos_e, pos_e_updated, neg_e_q,\n neg_e_p, neg_e_p_updated) = train_ebm.train_p_mh(q, u, x, 0.1, opt)\n self.assertTrue(np.all(np.isfinite(x_neg_q)))\n self.assertTrue(np.all(np.isfinite(x_neg_p)))\n self.assertTrue(np.all(np.isfinite(p_accept)))\n self.assertTrue(np.all(np.isfinite(step_size)))\n self.assertTrue(np.all(np.isfinite(pos_e)))\n self.assertTrue(np.all(np.isfinite(pos_e_updated)))\n self.assertTrue(np.all(np.isfinite(neg_e_q)))\n self.assertTrue(np.all(np.isfinite(neg_e_p)))\n self.assertTrue(np.all(np.isfinite(neg_e_p_updated)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Optimizes QED of a molecule with DQN.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom absl import flags\nfrom absl.testing import flagsaver\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.compat.v1 import gfile\nfrom mol_dqn.chemgraph import optimize_qed\nfrom mol_dqn.chemgraph.dqn import deep_q_networks\nfrom mol_dqn.chemgraph.dqn.tensorflow_core import core\n\n\nclass OptimizeQedTest(tf.test.TestCase):\n\n def setUp(self):\n super(OptimizeQedTest, self).setUp()\n self.mount_point = tempfile.mkdtemp(dir=flags.FLAGS.test_tmpdir)\n self.model_dir = os.path.join(self.mount_point, 'model_dir')\n gfile.MakeDirs(self.model_dir)\n\n def test_run(self):\n hparams = deep_q_networks.get_hparams(\n replay_buffer_size=100,\n num_episodes=10,\n batch_size=10,\n update_frequency=1,\n save_frequency=1,\n dense_layers=[32],\n fingerprint_length=128,\n fingerprint_radius=2,\n num_bootstrap_heads=12,\n prioritized=True,\n double_q=True)\n hparams_file = os.path.join(self.mount_point, 'config.json')\n core.write_hparams(hparams, hparams_file)\n\n with flagsaver.flagsaver(model_dir=self.model_dir, hparams=hparams_file):\n optimize_qed.main(None)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v1.variables_initializer", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.variable_scope" ], [ "numpy.diag", "pandas.concat", "numpy.sqrt", "numpy.random.seed", "tensorflow.compat.v1.test.main", "pandas.MultiIndex.from_tuples", "pandas.util.testing.assert_frame_equal", "numpy.identity", "numpy.array" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.array", "numpy.mean", "numpy.linspace" ], [ "matplotlib.pyplot.imshow", "sklearn.cluster.KMeans", "numpy.bitwise_xor", "numpy.mean", "numpy.random.randint", "numpy.unique", "matplotlib.backends.backend_agg.FigureCanvasAgg", "numpy.eye", "numpy.matmul", "numpy.save", "numpy.std", "numpy.load", "tensorflow.compat.v1.set_random_seed", "numpy.zeros", "numpy.amin", "numpy.array", "numpy.sum", "sklearn.decomposition.PCA", "numpy.abs", "numpy.random.seed", "matplotlib.figure.Figure", "sklearn.linear_model.LogisticRegression", "numpy.ones" ], [ "tensorflow.compat.v1.keras.regularizers.l2" ], [ "numpy.sqrt", "tensorflow.compat.v1.concat", "tensorflow.compat.v1.train.piecewise_constant", "tensorflow.compat.v1.nn.relu", "numpy.array" ], [ "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.test.main" ], [ "tensorflow.compat.v1.summary.merge_all", "numpy.sum", "tensorflow.compat.v1.train.Coordinator", "tensorflow.compat.v1.train.get_checkpoint_state", "tensorflow.compat.v1.summary.FileWriter", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.train.ExponentialMovingAverage", "tensorflow.compat.v1.get_collection", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.Summary", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.nn.in_top_k", "tensorflow.compat.v1.gfile.DeleteRecursively", "tensorflow.compat.v1.app.run" ], [ "tensorflow.compat.v1.train.SessionRunArgs", "tensorflow.compat.v1.all_variables", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.get_variable_scope", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.gfile.IsDirectory", "tensorflow.compat.v1.Print", "tensorflow.compat.v1.train.get_global_step", "tensorflow.compat.v1.train.init_from_checkpoint", "tensorflow.compat.v1.train.NewCheckpointReader", "tensorflow.compat.v1.train.latest_checkpoint", "tensorflow.compat.v1.train.SessionRunHook.__init__" ], [ "tensorflow.initializers.local_variables", "tensorflow.constant", "tensorflow.test.main", "tensorflow.trainable_variables", "tensorflow.reset_default_graph", "tensorflow.set_random_seed", "tensorflow.tables_initializer", "tensorflow.random.set_random_seed", "tensorflow.initializers.global_variables" ], [ "numpy.linspace", "tensorflow.compat.v2.math.softmax", "numpy.fliplr", "numpy.arange", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.stack", "numpy.apply_along_axis", "tensorflow.compat.v2.random.uniform", "numpy.zeros", "tensorflow.compat.v2.test.main", "numpy.random.choice", "tensorflow.compat.v2.square", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.one_hot", "numpy.random.rand", "numpy.array", "numpy.random.random", "numpy.random.seed", "numpy.tile", "numpy.ones", "numpy.ndindex" ], [ "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.gfile.GFile" ], [ "numpy.expand_dims", "numpy.minimum", "tensorflow.compat.v2.train.CheckpointManager", "tensorflow.compat.v2.keras.optimizers.Adam", "matplotlib.pyplot.plot", "numpy.concatenate", "tensorflow.compat.v2.keras.backend.set_learning_phase", "tensorflow.compat.v2.argmin", "matplotlib.pyplot.tight_layout", "numpy.arange", "tensorflow.compat.v2.summary.image", "tensorflow.compat.v2.io.gfile.makedirs", "tensorflow.compat.v2.stack", "tensorflow.compat.v2.train.polynomial_decay", "tensorflow.compat.v2.greater_equal", "tensorflow.compat.v2.expand_dims", "tensorflow.compat.v2.math.squared_difference", "tensorflow.compat.v2.gather", "tensorflow.compat.v2.random.uniform", "tensorflow.compat.v2.train.Checkpoint", "numpy.zeros", "matplotlib.pyplot.figure", "tensorflow.compat.v2.unstack", "matplotlib.pyplot.title", "numpy.isnan", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "tensorflow.compat.v2.one_hot", "tensorflow.compat.v2.keras.optimizers.SGD", "tensorflow.compat.v2.constant", "numpy.array", "numpy.sum", "numpy.maximum", "tensorflow.compat.v2.split", "matplotlib.use", "tensorflow.compat.v2.concat", "tensorflow.compat.v2.io.gfile.exists", "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.train.exponential_decay", "tensorflow.compat.v2.tile" ], [ "sklearn.metrics.roc_auc_score", "numpy.expand_dims", "tensorflow.compat.v1.compat.v1.data.Dataset.from_tensor_slices", "tensorflow.compat.v1.compat.v1.image.rgb_to_grayscale", "numpy.isnan", "numpy.squeeze", "tensorflow.compat.v1.compat.v1.logging.fatal", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.compat.v1.log", "numpy.concatenate", "tensorflow.compat.v1.tile", "tensorflow.compat.v1.compat.v1.gfile.Open", "numpy.load", "tensorflow.compat.v1.cast", "numpy.array", "tensorflow.compat.v1.compat.v1.random_uniform", "numpy.vstack" ], [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.nn.softplus", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.split", "tensorflow.compat.v1.one_hot" ], [ "numpy.isnan", "numpy.ones", "numpy.roots", "numpy.concatenate", "numpy.zeros_like", "numpy.argsort", "numpy.zeros" ], [ "numpy.abs", "numpy.stack", "numpy.exp", "numpy.array", "numpy.random.randint" ], [ "tensorflow.compat.v1.train.exponential_decay" ], [ "tensorflow.compat.v2.ones_like", "tensorflow.compat.v2.random.normal", "tensorflow.compat.v2.test.main", "numpy.isfinite", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.compat.v2.square", "numpy.all", "tensorflow.compat.v2.optimizers.Adam" ], [ "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qzhong0605/tensorboardplugins
[ "92bfc7ca96b933cdbdf074a08f26f5c715d8421d" ]
[ "tensorboard/plugins/interactive_inference/witwidget/notebook/base.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport googleapiclient.discovery\nimport os\nimport tensorflow as tf\nfrom IPython import display\nfrom google.protobuf import json_format\nfrom numbers import Number\nfrom six import ensure_str\nfrom tensorboard.plugins.interactive_inference.utils import inference_utils\n\n# Constants used in mutant inference generation.\nNUM_MUTANTS_TO_GENERATE = 10\nNUM_EXAMPLES_FOR_MUTANT_ANALYSIS = 50\n\n# Custom user agent for tracking number of calls to Cloud AI Platform.\nUSER_AGENT_FOR_CAIP_TRACKING = 'WhatIfTool'\n\nclass WitWidgetBase(object):\n \"\"\"WIT widget base class for common code between Jupyter and Colab.\"\"\"\n\n def __init__(self, config_builder):\n \"\"\"Constructor for WitWidgetBase.\n\n Args:\n config_builder: WitConfigBuilder object containing settings for WIT.\n \"\"\"\n tf.logging.set_verbosity(tf.logging.WARN)\n config = config_builder.build()\n copied_config = dict(config)\n self.estimator_and_spec = (\n dict(config.get('estimator_and_spec'))\n if 'estimator_and_spec' in config else {})\n self.compare_estimator_and_spec = (\n dict(config.get('compare_estimator_and_spec'))\n if 'compare_estimator_and_spec' in config else {})\n if 'estimator_and_spec' in copied_config:\n del copied_config['estimator_and_spec']\n if 'compare_estimator_and_spec' in copied_config:\n del copied_config['compare_estimator_and_spec']\n\n self.custom_predict_fn = (\n config.get('custom_predict_fn')\n if 'custom_predict_fn' in config else None)\n self.compare_custom_predict_fn = (\n config.get('compare_custom_predict_fn')\n if 'compare_custom_predict_fn' in config else None)\n self.adjust_prediction_fn = (\n config.get('adjust_prediction')\n if 'adjust_prediction' in config else None)\n self.compare_adjust_prediction_fn = (\n config.get('compare_adjust_prediction')\n if 'compare_adjust_prediction' in config else None)\n self.adjust_example_fn = (\n config.get('adjust_example')\n if 'adjust_example' in config else None)\n self.compare_adjust_example_fn = (\n config.get('compare_adjust_example')\n if 'compare_adjust_example' in config else None)\n if 'custom_predict_fn' in copied_config:\n del copied_config['custom_predict_fn']\n if 'compare_custom_predict_fn' in copied_config:\n del copied_config['compare_custom_predict_fn']\n if 'adjust_prediction' in copied_config:\n del copied_config['adjust_prediction']\n if 'compare_adjust_prediction' in copied_config:\n del copied_config['compare_adjust_prediction']\n if 'adjust_example' in copied_config:\n del copied_config['adjust_example']\n if 'compare_adjust_example' in copied_config:\n del copied_config['compare_adjust_example']\n\n self.set_examples(config['examples'])\n del copied_config['examples']\n\n self.config = copied_config\n\n # If using AI Platform for prediction, set the correct custom prediction\n # functions.\n if self.config.get('use_aip'):\n self.custom_predict_fn = self._predict_aip_model\n if self.config.get('compare_use_aip'):\n self.compare_custom_predict_fn = self._predict_aip_compare_model\n\n def _get_element_html(self):\n return \"\"\"\n <link rel=\"import\" href=\"/nbextensions/wit-widget/wit_jupyter.html\">\"\"\"\n\n def set_examples(self, examples):\n \"\"\"Sets the examples shown in WIT.\n\n The examples are initially set by the examples specified in the config\n builder during construction. This method can change which examples WIT\n displays.\n \"\"\"\n self.examples = [json_format.MessageToJson(ex) for ex in examples]\n self.updated_example_indices = set(range(len(examples)))\n\n def json_to_proto(self, json):\n ex = (tf.train.SequenceExample()\n if self.config.get('are_sequence_examples')\n else tf.train.Example())\n json_format.Parse(json, ex)\n return ex\n\n def infer_impl(self):\n \"\"\"Performs inference on examples that require inference.\"\"\"\n indices_to_infer = sorted(self.updated_example_indices)\n examples_to_infer = [\n self.json_to_proto(self.examples[index]) for index in indices_to_infer]\n infer_objs = []\n attribution_objs = []\n serving_bundle = inference_utils.ServingBundle(\n self.config.get('inference_address'),\n self.config.get('model_name'),\n self.config.get('model_type'),\n self.config.get('model_version'),\n self.config.get('model_signature'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.estimator_and_spec.get('estimator'),\n self.estimator_and_spec.get('feature_spec'),\n self.custom_predict_fn)\n (predictions, attributions) = (\n inference_utils.run_inference_for_inference_results(\n examples_to_infer, serving_bundle))\n infer_objs.append(predictions)\n attribution_objs.append(attributions)\n if ('inference_address_2' in self.config or\n self.compare_estimator_and_spec.get('estimator') or\n self.compare_custom_predict_fn):\n serving_bundle = inference_utils.ServingBundle(\n self.config.get('inference_address_2'),\n self.config.get('model_name_2'),\n self.config.get('model_type'),\n self.config.get('model_version_2'),\n self.config.get('model_signature_2'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.compare_estimator_and_spec.get('estimator'),\n self.compare_estimator_and_spec.get('feature_spec'),\n self.compare_custom_predict_fn)\n (predictions, attributions) = (\n inference_utils.run_inference_for_inference_results(\n examples_to_infer, serving_bundle))\n infer_objs.append(predictions)\n attribution_objs.append(attributions)\n self.updated_example_indices = set()\n return {\n 'inferences': {'indices': indices_to_infer, 'results': infer_objs},\n 'label_vocab': self.config.get('label_vocab'),\n 'attributions': attribution_objs}\n\n def infer_mutants_impl(self, info):\n \"\"\"Performs mutant inference on specified examples.\"\"\"\n example_index = int(info['example_index'])\n feature_name = info['feature_name']\n examples = (self.examples if example_index == -1\n else [self.examples[example_index]])\n examples = [self.json_to_proto(ex) for ex in examples]\n scan_examples = [self.json_to_proto(ex) for ex in self.examples[0:50]]\n serving_bundles = []\n serving_bundles.append(inference_utils.ServingBundle(\n self.config.get('inference_address'),\n self.config.get('model_name'),\n self.config.get('model_type'),\n self.config.get('model_version'),\n self.config.get('model_signature'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.estimator_and_spec.get('estimator'),\n self.estimator_and_spec.get('feature_spec'),\n self.custom_predict_fn))\n if ('inference_address_2' in self.config or\n self.compare_estimator_and_spec.get('estimator') or\n self.compare_custom_predict_fn):\n serving_bundles.append(inference_utils.ServingBundle(\n self.config.get('inference_address_2'),\n self.config.get('model_name_2'),\n self.config.get('model_type'),\n self.config.get('model_version_2'),\n self.config.get('model_signature_2'),\n self.config.get('uses_predict_api'),\n self.config.get('predict_input_tensor'),\n self.config.get('predict_output_tensor'),\n self.compare_estimator_and_spec.get('estimator'),\n self.compare_estimator_and_spec.get('feature_spec'),\n self.compare_custom_predict_fn))\n viz_params = inference_utils.VizParams(\n info['x_min'], info['x_max'],\n scan_examples, 10,\n info['feature_index_pattern'])\n return inference_utils.mutant_charts_for_feature(\n examples, feature_name, serving_bundles, viz_params)\n\n def get_eligible_features_impl(self):\n \"\"\"Returns information about features eligible for mutant inference.\"\"\"\n examples = [self.json_to_proto(ex) for ex in self.examples[\n 0:NUM_EXAMPLES_FOR_MUTANT_ANALYSIS]]\n return inference_utils.get_eligible_features(\n examples, NUM_MUTANTS_TO_GENERATE)\n\n def create_sprite(self):\n \"\"\"Returns an encoded image of thumbnails for image examples.\"\"\"\n # Generate a sprite image for the examples if the examples contain the\n # standard encoded image feature.\n if not self.examples:\n return None\n example_to_check = self.json_to_proto(self.examples[0])\n feature_list = (example_to_check.context.feature\n if self.config.get('are_sequence_examples')\n else example_to_check.features.feature)\n if 'image/encoded' in feature_list:\n example_strings = [\n self.json_to_proto(ex).SerializeToString()\n for ex in self.examples]\n encoded = ensure_str(base64.b64encode(\n inference_utils.create_sprite_image(example_strings)))\n return 'data:image/png;base64,{}'.format(encoded)\n else:\n return None\n\n def _json_from_tf_examples(self, tf_examples):\n json_exs = []\n feature_names = self.config.get('feature_names')\n for ex in tf_examples:\n # Create a JSON list or dict for each example depending on settings.\n # Strip out any explicitly-labeled target feature from the example.\n # This is needed because AI Platform models that accept JSON cannot handle\n # when non-input features are provided as part of the object to run\n # prediction on.\n if self.config.get('uses_json_list'):\n json_ex = []\n for feat in ex.features.feature:\n if feature_names and feat in feature_names:\n feat_idx = feature_names.index(feat)\n else:\n feat_idx = int(feat)\n if (feat == self.config.get('target_feature') or\n feat_idx == self.config.get('target_feature')):\n continue\n # Ensure the example value list is long enough to add the next feature\n # from the tf.Example.\n if feat_idx >= len(json_ex):\n json_ex.extend([None] * (feat_idx - len(json_ex) + 1))\n if ex.features.feature[feat].HasField('int64_list'):\n json_ex[feat_idx] = ex.features.feature[feat].int64_list.value[0]\n elif ex.features.feature[feat].HasField('float_list'):\n json_ex[feat_idx] = ex.features.feature[feat].float_list.value[0]\n else:\n json_ex[feat_idx] = ensure_str(\n ex.features.feature[feat].bytes_list.value[0])\n else:\n json_ex = {}\n for feat in ex.features.feature:\n if feat == self.config.get('target_feature'):\n continue\n if ex.features.feature[feat].HasField('int64_list'):\n json_ex[feat] = ex.features.feature[feat].int64_list.value[0]\n elif ex.features.feature[feat].HasField('float_list'):\n json_ex[feat] = ex.features.feature[feat].float_list.value[0]\n else:\n json_ex[feat] = ensure_str(\n ex.features.feature[feat].bytes_list.value[0])\n json_exs.append(json_ex)\n return json_exs\n\n def _predict_aip_model(self, examples):\n return self._predict_aip_impl(\n examples, self.config.get('inference_address'),\n self.config.get('model_name'), self.config.get('model_signature'),\n self.config.get('force_json_input'), self.adjust_example_fn,\n self.adjust_prediction_fn)\n\n def _predict_aip_compare_model(self, examples):\n return self._predict_aip_impl(\n examples, self.config.get('inference_address_2'),\n self.config.get('model_name_2'), self.config.get('model_signature_2'),\n self.config.get('compare_force_json_input'),\n self.compare_adjust_example_fn,\n self.compare_adjust_prediction_fn)\n\n def _predict_aip_impl(self, examples, project, model, version, force_json,\n adjust_example, adjust_prediction):\n \"\"\"Custom prediction function for running inference through AI Platform.\"\"\"\n\n # Set up environment for GCP call for specified project.\n os.environ['GOOGLE_CLOUD_PROJECT'] = project\n\n service = googleapiclient.discovery.build('ml', 'v1', cache_discovery=False)\n name = 'projects/{}/models/{}'.format(project, model)\n if version is not None:\n name += '/versions/{}'.format(version)\n\n # Properly package the examples to send for prediction.\n if self.config.get('uses_json_input') or force_json:\n examples_for_predict = self._json_from_tf_examples(examples)\n else:\n examples_for_predict = [{'b64': base64.b64encode(\n example.SerializeToString()).decode('utf-8') }\n for example in examples]\n\n # If there is a user-specified input example adjustment to make, make it.\n if adjust_example:\n examples_for_predict = [\n adjust_example(ex) for ex in examples_for_predict]\n\n # Send request, including custom user-agent for tracking.\n request_builder = service.projects().predict(\n name=name,\n body={'instances': examples_for_predict}\n )\n user_agent = request_builder.headers.get('user-agent')\n request_builder.headers['user-agent'] = (\n USER_AGENT_FOR_CAIP_TRACKING + ('-' + user_agent if user_agent else ''))\n response = request_builder.execute()\n\n if 'error' in response:\n raise RuntimeError(response['error'])\n\n # Get the key to extract the prediction results from.\n results_key = self.config.get('predict_output_tensor')\n if results_key is None:\n if self.config.get('model_type') == 'classification':\n results_key = 'probabilities'\n else:\n results_key = 'outputs'\n\n # Parse the results from the response and return them.\n results = []\n attributions = (response['attributions']\n if 'attributions' in response else None)\n for pred in response['predictions']:\n # If the prediction contains a key to fetch the prediction, use it.\n if isinstance(pred, dict):\n pred = pred[results_key]\n # If the model is regression and the response is a list, extract the\n # score by taking the first element.\n if (self.config.get('model_type') == 'regression' and\n isinstance(pred, list)):\n pred = pred[0]\n # If an prediction adjustment function was provided, use it to adjust\n # the prediction.\n if adjust_prediction:\n pred = adjust_prediction(pred)\n results.append(pred)\n return {'predictions': results, 'attributions': attributions}\n" ]
[ [ "tensorflow.train.SequenceExample", "tensorflow.logging.set_verbosity", "tensorflow.train.Example" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
J-Z-Z/akshare
[ "0a9ca71b381a272e2f56211e455ff2493dfed17a", "0a9ca71b381a272e2f56211e455ff2493dfed17a", "0a9ca71b381a272e2f56211e455ff2493dfed17a", "0a9ca71b381a272e2f56211e455ff2493dfed17a", "0a9ca71b381a272e2f56211e455ff2493dfed17a", "0a9ca71b381a272e2f56211e455ff2493dfed17a" ]
[ "akshare/futures_derivative/nh_index_price.py", "akshare/stock/stock_rank_forecast.py", "akshare/index/index_cflp.py", "akshare/stock_feature/stock_wencai.py", "akshare/economic/macro_australia.py", "akshare/movie/video_yien.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/12/20 14:52\nDesc: 南华期货-商品指数历史走势-价格指数-数值\nhttp://www.nanhua.net/nhzc/varietytrend.html\n1000 点开始, 用收益率累计\nhttp://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280\n\"\"\"\nimport time\n\nimport requests\nimport pandas as pd\n\n\ndef futures_nh_index_symbol_table() -> pd.DataFrame:\n \"\"\"\n 南华期货-南华指数所有品种一览表\n http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280\n :return: 南华指数所有品种一览表\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.nanhua.net/ianalysis/plate-variety.json\"\n r = requests.get(url)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json)\n temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date\n return temp_df\n\n\ndef futures_nh_price_index(symbol: str = \"A\") -> pd.DataFrame:\n \"\"\"\n 南华期货-南华指数单品种-价格-所有历史数据\n http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280\n :param symbol: 通过 ak.futures_nh_index_symbol_table() 获取\n :type symbol: str\n :return: 南华期货-南华指数单品种-价格-所有历史数据\n :rtype: pandas.Series\n \"\"\"\n symbol_df = futures_nh_index_symbol_table()\n if symbol in symbol_df[\"code\"].tolist():\n t = time.time()\n url = f\"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}\"\n r = requests.get(url)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json)\n temp_df.columns = [\"date\", \"value\"]\n temp_df['date'] = pd.to_datetime(temp_df[\"date\"], unit='ms').dt.date\n return temp_df\n\n\nif __name__ == \"__main__\":\n futures_nh_index_symbol_table_df = futures_nh_index_symbol_table()\n print(futures_nh_index_symbol_table_df)\n\n futures_nh_price_index_df = futures_nh_price_index(symbol=\"NHAI\")\n print(futures_nh_price_index_df)\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/9/12 18:29\nDesc: 巨潮资讯-数据中心-评级预测-投资评级\nhttp://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7\n\"\"\"\nimport time\nfrom py_mini_racer import py_mini_racer\nimport requests\nimport pandas as pd\n\njs_str = \"\"\"\n function mcode(input) { \n var keyStr = \"ABCDEFGHIJKLMNOP\" + \"QRSTUVWXYZabcdef\" + \"ghijklmnopqrstuv\" + \"wxyz0123456789+/\" + \"=\"; \n var output = \"\"; \n var chr1, chr2, chr3 = \"\"; \n var enc1, enc2, enc3, enc4 = \"\"; \n var i = 0; \n do { \n chr1 = input.charCodeAt(i++); \n chr2 = input.charCodeAt(i++); \n chr3 = input.charCodeAt(i++); \n enc1 = chr1 >> 2; \n enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); \n enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); \n enc4 = chr3 & 63; \n if (isNaN(chr2)) { \n enc3 = enc4 = 64; \n } else if (isNaN(chr3)) { \n enc4 = 64; \n } \n output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) \n + keyStr.charAt(enc3) + keyStr.charAt(enc4); \n chr1 = chr2 = chr3 = \"\"; \n enc1 = enc2 = enc3 = enc4 = \"\"; \n } while (i < input.length); \n \n return output; \n } \n\"\"\"\n\n\ndef stock_rank_forecast_cninfo(date: str = \"20210910\") -> pd.DataFrame:\n \"\"\"\n 巨潮资讯-数据中心-评级预测-投资评级\n http://webapi.cninfo.com.cn/#/thematicStatistics?name=%E6%8A%95%E8%B5%84%E8%AF%84%E7%BA%A7\n :param date: 查询日期\n :type date: str\n :return: 投资评级\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://webapi.cninfo.com.cn/api/sysapi/p_sysapi1089\"\n params = {\"tdate\": \"-\".join([date[:4], date[4:6], date[6:]])}\n random_time_str = str(int(time.time()))\n js_code = py_mini_racer.MiniRacer()\n js_code.eval(js_str)\n mcode = js_code.call(\"mcode\", random_time_str)\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"Cache-Control\": \"no-cache\",\n \"Content-Length\": \"0\",\n \"Host\": \"webapi.cninfo.com.cn\",\n \"mcode\": mcode,\n \"Origin\": \"http://webapi.cninfo.com.cn\",\n \"Pragma\": \"no-cache\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"http://webapi.cninfo.com.cn/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n r = requests.post(url, params=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"records\"])\n temp_df.columns = [\n \"证券简称\",\n \"发布日期\",\n \"前一次投资评级\",\n \"评级变化\",\n \"目标价格-上限\",\n \"是否首次评级\",\n \"投资评级\",\n \"研究员名称\",\n \"研究机构简称\",\n \"目标价格-下限\",\n \"证券代码\",\n ]\n temp_df = temp_df[[\n \"证券代码\",\n \"证券简称\",\n \"发布日期\",\n \"研究机构简称\",\n \"研究员名称\",\n \"投资评级\",\n \"是否首次评级\",\n \"评级变化\",\n \"前一次投资评级\",\n \"目标价格-下限\",\n \"目标价格-上限\",\n ]]\n temp_df[\"目标价格-上限\"] = pd.to_numeric(temp_df[\"目标价格-上限\"], errors=\"coerce\")\n temp_df[\"目标价格-下限\"] = pd.to_numeric(temp_df[\"目标价格-下限\"], errors=\"coerce\")\n return temp_df\n\n\nif __name__ == \"__main__\":\n stock_rank_forecast_cninfo_df = stock_rank_forecast_cninfo(date=\"20210907\")\n print(stock_rank_forecast_cninfo_df)\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/12/27 15:47\nDesc: 中国公路物流运价、运量指数\nhttp://index.0256.cn/expx.htm\n\"\"\"\nimport pandas as pd\nimport requests\n\n\ndef index_cflp_price(symbol: str = \"周指数\") -> pd.DataFrame:\n \"\"\"\n 中国公路物流运价指数\n http://index.0256.cn/expx.htm\n :param symbol: choice of {\"周指数\", \"月指数\", \"季度指数\", \"年度指数\"}\n :type symbol: str\n :return: 中国公路物流运价指数\n :rtype: pandas.DataFrame\n \"\"\"\n symbol_map = {\n \"周指数\": \"2\",\n \"月指数\": \"3\",\n \"季度指数\": \"4\",\n \"年度指数\": \"5\",\n }\n url = \"http://index.0256.cn/expcenter_trend.action\"\n params = {\n \"marketId\": \"1\",\n \"attribute1\": \"5\",\n \"exponentTypeId\": symbol_map[symbol],\n \"cateId\": \"2\",\n \"attribute2\": \"华北\",\n \"city\": \"\",\n \"startLine\": \"\",\n \"endLine\": \"\",\n }\n headers = {\n \"Origin\": \"http://index.0256.cn\",\n \"Referer\": \"http://index.0256.cn/expx.htm\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\",\n }\n r = requests.post(url, data=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(\n [\n data_json[\"chart1\"][\"xLebal\"],\n data_json[\"chart1\"][\"yLebal\"],\n data_json[\"chart2\"][\"yLebal\"],\n data_json[\"chart3\"][\"yLebal\"],\n ]\n ).T\n temp_df.columns = [\"日期\", \"定基指数\", \"环比指数\", \"同比指数\"]\n temp_df[\"日期\"] = pd.to_datetime(temp_df[\"日期\"]).dt.date\n temp_df[\"定基指数\"] = pd.to_numeric(temp_df[\"定基指数\"])\n temp_df[\"环比指数\"] = pd.to_numeric(temp_df[\"环比指数\"])\n temp_df[\"同比指数\"] = pd.to_numeric(temp_df[\"同比指数\"])\n return temp_df\n\n\ndef index_cflp_volume(symbol: str = \"月指数\") -> pd.DataFrame:\n \"\"\"\n 中国公路物流运量指数\n http://index.0256.cn/expx.htm\n :param symbol: choice of {\"月指数\", \"季度指数\", \"年度指数\"}\n :type symbol: str\n :return: 中国公路物流运量指数\n :rtype: pandas.DataFrame\n \"\"\"\n symbol_map = {\n \"月指数\": \"3\",\n \"季度指数\": \"4\",\n \"年度指数\": \"5\",\n }\n url = \"http://index.0256.cn/volume_query.action\"\n params = {\n \"type\": \"1\",\n \"marketId\": \"1\",\n \"expTypeId\": symbol_map[symbol],\n \"startDate1\": \"\",\n \"endDate1\": \"\",\n \"city\": \"\",\n \"startDate3\": \"\",\n \"endDate3\": \"\",\n }\n headers = {\n \"Origin\": \"http://index.0256.cn\",\n \"Referer\": \"http://index.0256.cn/expx.htm\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\",\n }\n r = requests.post(url, data=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(\n [\n data_json[\"chart1\"][\"xLebal\"],\n data_json[\"chart1\"][\"yLebal\"],\n data_json[\"chart2\"][\"yLebal\"],\n data_json[\"chart3\"][\"yLebal\"],\n ]\n ).T\n temp_df.columns = [\"日期\", \"定基指数\", \"环比指数\", \"同比指数\"]\n temp_df[\"日期\"] = pd.to_datetime(temp_df[\"日期\"]).dt.date\n temp_df[\"定基指数\"] = pd.to_numeric(temp_df[\"定基指数\"])\n temp_df[\"环比指数\"] = pd.to_numeric(temp_df[\"环比指数\"])\n temp_df[\"同比指数\"] = pd.to_numeric(temp_df[\"同比指数\"])\n return temp_df\n\n\nif __name__ == \"__main__\":\n index_cflp_price_df = index_cflp_price(symbol=\"周指数\")\n print(index_cflp_price_df)\n\n index_cflp_price_df = index_cflp_price(symbol=\"月指数\")\n print(index_cflp_price_df)\n\n index_cflp_price_df = index_cflp_price(symbol=\"季度指数\")\n print(index_cflp_price_df)\n\n index_cflp_price_df = index_cflp_price(symbol=\"年度指数\")\n print(index_cflp_price_df)\n\n index_cflp_volume_df = index_cflp_volume(symbol=\"月指数\")\n print(index_cflp_volume_df)\n\n index_cflp_volume_df = index_cflp_volume(symbol=\"季度指数\")\n print(index_cflp_volume_df)\n\n index_cflp_volume_df = index_cflp_volume(symbol=\"年度指数\")\n print(index_cflp_volume_df)\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/9/29 21:11\nDesc: 问财-热门股票排名\nhttp://www.iwencai.com/unifiedwap/home/index\n\"\"\"\nimport os\n\nimport pandas as pd\nimport requests\nfrom py_mini_racer import py_mini_racer\n\n\ndef _get_js_path_ths(name: str = None, module_file: str = None) -> str:\n \"\"\"\n 获取 JS 文件的路径(从模块所在目录查找)\n :param name: 文件名\n :type name: str\n :param module_file: 模块路径\n :type module_file: str\n :return: 路径\n :rtype: str\n \"\"\"\n module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))\n module_json_path = os.path.join(module_folder, \"stock_feature\", name)\n return module_json_path\n\n\ndef _get_file_content_ths(file_name: str = \"ase.min.js\") -> str:\n \"\"\"\n 获取 JS 文件的内容\n :param file_name: JS 文件名\n :type file_name: str\n :return: 文件内容\n :rtype: str\n \"\"\"\n setting_file_name = file_name\n setting_file_path = _get_js_path_ths(setting_file_name, __file__)\n with open(setting_file_path) as f:\n file_data = f.read()\n return file_data\n\n\ndef stock_wc_hot_rank(date: str = \"20210430\") -> pd.DataFrame:\n \"\"\"\n 问财-热门股票排名\n http://www.iwencai.com/unifiedwap/result?w=%E7%83%AD%E9%97%A85000%E8%82%A1%E7%A5%A8&querytype=stock&issugs&sign=1620126514335\n :param date: 查询日期\n :type date: str\n :return: 热门股票排名\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.iwencai.com/unifiedwap/unified-wap/v2/result/get-robot-data\"\n js_code = py_mini_racer.MiniRacer()\n js_content = _get_file_content_ths(\"ths.js\")\n js_code.eval(js_content)\n v_code = js_code.call(\"v\")\n headers = {\n \"hexin-v\": v_code,\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36\",\n }\n params = {\n \"question\": f\"{date}热门5000股票\",\n \"perpage\": \"5000\",\n \"page\": \"1\",\n \"secondary_intent\": \"\",\n \"log_info\": '{\"input_type\":\"click\"}',\n \"source\": \"Ths_iwencai_Xuangu\",\n \"version\": \"2.0\",\n \"query_area\": \"\",\n \"block_list\": \"\",\n \"add_info\": '{\"urp\":{\"scene\":1,\"company\":1,\"business\":1},\"contentType\":\"json\"}',\n }\n r = requests.post(url, data=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(\n data_json[\"data\"][\"answer\"][0][\"txt\"][0][\"content\"][\"components\"][0][\"data\"][\n \"datas\"\n ]\n )\n temp_df.reset_index(inplace=True)\n temp_df[\"index\"] = range(1, len(temp_df) + 1)\n try:\n rank_date_str = temp_df.columns[1].split(\"[\")[1].strip(\"]\")\n except:\n try:\n rank_date_str = temp_df.columns[2].split(\"[\")[1].strip(\"]\")\n except:\n rank_date_str = date\n temp_df.rename(\n columns={\n \"index\": \"序号\",\n f\"个股热度排名[{rank_date_str}]\": \"个股热度排名\",\n f\"个股热度[{rank_date_str}]\": \"个股热度\",\n \"code\": \"股票代码\",\n \"market_code\": \"_\",\n \"最新涨跌幅\": \"涨跌幅\",\n \"最新价\": \"现价\",\n \"股票代码\": \"_\",\n },\n inplace=True,\n )\n temp_df = temp_df[\n [\n \"序号\",\n \"股票代码\",\n \"股票简称\",\n \"现价\",\n \"涨跌幅\",\n \"个股热度\",\n \"个股热度排名\",\n ]\n ]\n temp_df[\"涨跌幅\"] = round(temp_df[\"涨跌幅\"].astype(float), 2)\n temp_df[\"排名日期\"] = rank_date_str\n temp_df['现价'] = pd.to_numeric(temp_df['现价'])\n return temp_df\n\n\nif __name__ == \"__main__\":\n stock_wc_hot_rank_df = stock_wc_hot_rank(date=\"20210429\")\n print(stock_wc_hot_rank_df)\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/12/15 15:26\nDesc: 东方财富-经济数据-澳大利亚\nhttp://data.eastmoney.com/cjsj/foreign_5_0.html\n\"\"\"\nimport pandas as pd\nimport requests\n\nfrom akshare.utils import demjson\n\n\n# 零售销售月率\ndef macro_australia_retail_rate_monthly() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-零售销售月率\n http://data.eastmoney.com/cjsj/foreign_5_0.html\n :return: 零售销售月率\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"0\",\n \"pageNo\": \"1\",\n \"pageNum\": \"1\",\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 贸易帐\ndef macro_australia_trade() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-贸易帐\n http://data.eastmoney.com/cjsj/foreign_5_1.html\n :return: 贸易帐\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"1\",\n \"pageNo\": \"1\",\n \"pageNum\": \"1\",\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 失业率\ndef macro_australia_unemployment_rate() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-失业率\n http://data.eastmoney.com/cjsj/foreign_5_2.html\n :return: 失业率\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"2\",\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 生产者物价指数季率\ndef macro_australia_ppi_quarterly() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-生产者物价指数季率\n http://data.eastmoney.com/cjsj/foreign_5_3.html\n :return: 生产者物价指数季率\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"3\",\n 'pageNo': '1',\n 'pageNum': '1',\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 消费者物价指数季率\ndef macro_australia_cpi_quarterly() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-消费者物价指数季率\n http://data.eastmoney.com/cjsj/foreign_5_4.html\n :return: 消费者物价指数季率\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"4\",\n 'pageNo': '1',\n 'pageNum': '1',\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 消费者物价指数年率\ndef macro_australia_cpi_yearly() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-消费者物价指数年率\n http://data.eastmoney.com/cjsj/foreign_5_5.html\n :return: 消费者物价指数年率\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"5\",\n 'pageNo': '1',\n 'pageNum': '1',\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 央行公布利率决议\ndef macro_australia_bank_rate() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据-澳大利亚-央行公布利率决议\n http://data.eastmoney.com/cjsj/foreign_5_6.html\n :return: 央行公布利率决议\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n \"type\": \"GJZB\",\n \"sty\": \"HKZB\",\n \"js\": \"({data:[(x)],pages:(pc)})\",\n \"p\": \"1\",\n \"ps\": \"2000\",\n \"mkt\": \"5\",\n \"stat\": \"6\",\n 'pageNo': '1',\n 'pageNum': '1',\n \"_\": \"1625474966006\",\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(\",\") for item in data_json[\"data\"]])\n temp_df.columns = [\n \"时间\",\n \"前值\",\n \"现值\",\n \"发布日期\",\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df[\"前值\"] = pd.to_numeric(temp_df[\"前值\"])\n temp_df[\"现值\"] = pd.to_numeric(temp_df[\"现值\"])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\nif __name__ == '__main__':\n macro_australia_retail_rate_monthly_df = macro_australia_retail_rate_monthly()\n print(macro_australia_retail_rate_monthly_df)\n\n macro_australia_trade_df = macro_australia_trade()\n print(macro_australia_trade_df)\n\n macro_australia_unemployment_rate_df = macro_australia_unemployment_rate()\n print(macro_australia_unemployment_rate_df)\n\n macro_australia_ppi_quarterly_df = macro_australia_ppi_quarterly()\n print(macro_australia_ppi_quarterly_df)\n\n macro_australia_cpi_quarterly_df = macro_australia_cpi_quarterly()\n print(macro_australia_cpi_quarterly_df)\n\n macro_australia_cpi_yearly_df = macro_australia_cpi_yearly()\n print(macro_australia_cpi_yearly_df)\n\n macro_australia_bank_rate_df = macro_australia_bank_rate()\n print(macro_australia_bank_rate_df)\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/11/15 20:40\nDesc: 艺恩\n视频放映\n电视剧集\n综艺节目\nhttps://www.endata.com.cn/Video/index.html\n\"\"\"\nimport json\nimport os\n\nimport pandas as pd # type: ignore\nimport requests\nfrom py_mini_racer import py_mini_racer # type: ignore\n\n\ndef _get_js_path(name: str = \"\", module_file: str = \"\") -> str:\n \"\"\"\n get JS file path\n :param name: file name\n :type name: str\n :param module_file: filename\n :type module_file: str\n :return: 路径\n :rtype: str\n \"\"\"\n module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))\n module_json_path = os.path.join(module_folder, \"movie\", name)\n return module_json_path\n\n\ndef _get_file_content(file_name: str = \"jm.js\"):\n \"\"\"\n read the file content\n :param file_name: filename\n :type file_name: str\n :return: file content\n :rtype: str\n \"\"\"\n setting_file_name = file_name\n setting_file_path = _get_js_path(setting_file_name, __file__)\n with open(setting_file_path) as f:\n file_data = f.read()\n return file_data\n\n\ndef decrypt(origin_data: str = \"\") -> str:\n \"\"\"\n 解密艺恩的加密数据\n :param origin_data: 解密前的字符串\n :type origin_data: str\n :return: 解密后的字符串\n :rtype: str\n \"\"\"\n file_data = _get_file_content(file_name=\"jm.js\")\n ctx = py_mini_racer.MiniRacer()\n ctx.eval(file_data)\n data = ctx.call(\"webInstace.shell\", origin_data)\n return data\n\n\ndef video_tv() -> pd.DataFrame:\n \"\"\"\n 艺恩-视频放映-电视剧集\n https://www.endata.com.cn/Video/index.html\n :return: 电视剧集\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"https://www.endata.com.cn/API/GetData.ashx\"\n payload = {\"tvType\": 2, \"MethodName\": \"BoxOffice_GetTvData_PlayIndexRank\"}\n r = requests.post(url, data=payload)\n r.encoding = \"utf8\"\n data_json = json.loads(decrypt(r.text))\n temp_df = pd.DataFrame(data_json[\"Data\"][\"Table\"])\n report_date = data_json[\"Data\"][\"Table1\"][0][\"MaxDate\"]\n temp_df.columns = [\"排序\", \"名称\", \"类型\", \"播映指数\", \"用户热度\", \"媒体热度\", \"观看度\", \"好评度\"]\n temp_df = temp_df[[\"排序\", \"名称\", \"类型\", \"播映指数\", \"媒体热度\", \"用户热度\", \"好评度\", \"观看度\"]]\n temp_df[\"统计日期\"] = report_date\n return temp_df\n\n\ndef video_variety_show() -> pd.DataFrame:\n \"\"\"\n 艺恩-视频放映-综艺节目\n https://www.endata.com.cn/Video/index.html\n :return: 综艺节目\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"https://www.endata.com.cn/API/GetData.ashx\"\n payload = {\"tvType\": 8, \"MethodName\": \"BoxOffice_GetTvData_PlayIndexRank\"}\n r = requests.post(url, data=payload)\n r.encoding = \"utf8\"\n data_json = json.loads(decrypt(r.text))\n temp_df = pd.DataFrame(data_json[\"Data\"][\"Table\"])\n report_date = data_json[\"Data\"][\"Table1\"][0][\"MaxDate\"]\n temp_df.columns = [\"排序\", \"名称\", \"类型\", \"播映指数\", \"用户热度\", \"媒体热度\", \"观看度\", \"好评度\"]\n temp_df = temp_df[[\"排序\", \"名称\", \"类型\", \"播映指数\", \"媒体热度\", \"用户热度\", \"好评度\", \"观看度\"]]\n temp_df[\"统计日期\"] = report_date\n return temp_df\n\n\nif __name__ == \"__main__\":\n video_tv_df = video_tv()\n print(video_tv_df)\n\n video_variety_show_df = video_variety_show()\n print(video_variety_show_df)\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ], [ "pandas.to_numeric", "pandas.DataFrame" ], [ "pandas.to_datetime", "pandas.to_numeric", "pandas.DataFrame" ], [ "pandas.to_numeric", "pandas.DataFrame" ], [ "pandas.to_datetime", "pandas.to_numeric" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ZhangXiao96/RecommenderSystems4Python
[ "f125536436f83696e133e6b98c22430a47df287d" ]
[ "TraditionalRecommenderSystems/MatrixFactorization/MatrixFactorization.py" ]
[ "from lib.utils import top_k\nfrom TraditionalRecommenderSystems.MatrixFactorization.Models import BaseMF\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nimport torch.utils.data as data\nfrom tqdm import tqdm\n\n\nclass MatrixFactorization(object):\n def __init__(self, user_item_pairs, user_list, item_list, nb_factor=40, drop_rate=0.5, batch_size=32, lr=1e-1,\n optimizer=torch.optim.Adam, loss_func=nn.MSELoss(reduction='mean'), sparse=False,\n weight_decay=0., device='cuda', pro_process=None):\n \"\"\"\n Matrix Factorization based on Pytorch.\n :param user_item_pairs: list. [(user, item, rating)].\n :param user_list: list. The list of all the users (with no repeat).\n :param item_list: list. The list of all the items (with no repeat).\n :param nb_factor: int. The number of factors.\n :param drop_rate: float 0~1. Drop rate of the dropout layer.\n :param batch_size: int. Batch size of training\n :param lr: float. Learning rate.\n :param optimizer: torch.optim. Optimizer utilized to train the model.\n :param loss_func: torch.nn.*Loss. Loss function of training.\n :param sparse: boolean. The gradient requires to be sparse or not.\n :param weight_decay: float. L2 regularization.\n :param device: 'cpu' or 'cuda'.\n :param pro_process: nn.Module.\n \"\"\"\n self.user_item_pairs = pd.DataFrame(user_item_pairs)\n\n # build index-user, index-item\n self.index_2_user = np.array(user_list)\n self.index_2_item = np.array(item_list)\n assert len(self.index_2_user) == len(set(self.index_2_user))\n assert len(self.index_2_item) == len(set(self.index_2_item))\n self.user_2_index = {self.index_2_user[i]: i for i in range(len(self.index_2_user))}\n self.item_2_index = {self.index_2_item[i]: i for i in range(len(self.index_2_item))}\n self.nb_user, self.nb_item = len(user_list), len(item_list)\n\n # prepare training loader\n train_user_indices = torch.from_numpy(self.users_to_indices(self.user_item_pairs[0].values)).long()\n train_item_indices = torch.from_numpy(self.items_to_indices(self.user_item_pairs[1].values)).long()\n train_ratings = torch.from_numpy(self.user_item_pairs[2].values.reshape(-1, 1)).float()\n self.train_data_loader = data.DataLoader(data.TensorDataset(train_user_indices, train_item_indices,\n train_ratings), batch_size=batch_size, shuffle=True)\n\n # build model\n self.nb_factor = nb_factor\n self.lr = lr\n self.batch_size = batch_size\n self.loss_func = loss_func\n self.weight_decay = weight_decay\n self.device = device\n self.sparse = sparse\n self.process = pro_process\n self.model = BaseMF(self.nb_user, self.nb_item, nb_factor, drop_rate, sparse, pro_process=self.process).to(device)\n self.optimizer = optimizer(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n\n # build history rating matrix\n self.pred_rating_matrix = None\n self.history_rating_matrix = None\n self.update_history_rating_matrix()\n\n def train(self, epochs, test_data=None, test_epoch_step=1):\n \"\"\"\n Train the model.\n :param epochs: int. The epochs of training.\n :param test_data: [(user, item, rating)]. None if no validation is applied.\n :param test_epoch_step: int. The step of validation.\n :return: (list of training loss, list of test loss) if validation is applied, else only the list of training loss.\n \"\"\"\n hist_train_loss, hist_test_loss = [], []\n if test_data is not None:\n test_data = pd.DataFrame(test_data)\n for epoch in range(epochs):\n print('Epoch-{}/{}:'.format(epoch+1, epochs))\n self.model.train()\n train_loss = self.train_epoch()\n hist_train_loss.append(train_loss)\n if (test_data is not None) and (epoch % test_epoch_step == 0):\n self.model.eval()\n test_loss = self.eval(test_data.iloc[:, [0, 1]].values, ground_truth=test_data[2].values)\n hist_test_loss.append(test_loss)\n print('training loss = {}, test loss = {}'.format(train_loss, test_loss))\n else:\n print('training loss = {}'.format(train_loss))\n self.update_pred_rating_matrix()\n return hist_train_loss, hist_test_loss\n\n def train_epoch(self):\n \"\"\"\n :return: training loss.\n \"\"\"\n self.model.train()\n epoch_loss = 0.\n for id_user, id_item, id_rating in tqdm(self.train_data_loader):\n batch_loss = self.train_on_batch(id_user, id_item, id_rating)\n epoch_loss += batch_loss\n epoch_loss /= len(self.train_data_loader)\n return epoch_loss\n\n def train_on_batch(self, user_indices, item_indices, ratings):\n users, items, ratings = user_indices.to(self.device), item_indices.to(self.device), ratings.to(self.device)\n self.optimizer.zero_grad()\n outputs = self.model(users, items)\n loss = self.loss_func(outputs, ratings)\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n def eval(self, user_item_pairs, ground_truth, batch_size=100):\n \"\"\"\n Predict the ratings of the pairs of (user, item).\n :param user_item_pairs: list of (user, item).\n :param ground_truth: the ground truth rating.\n :param batch_size: batch_size of predicting.\n :return: ratings. size=[nb_pairs]\n \"\"\"\n self.model.eval()\n outputs = self.predict(user_item_pairs, batch_size=batch_size).ravel()\n loss = np.mean((outputs-ground_truth.ravel())**2)\n return loss\n\n def predict(self, user_item_pairs, batch_size=100):\n \"\"\"\n Predict the ratings of the pairs of (user, item).\n :param user_item_pairs: list of (user, item)\n :param batch_size: batch_size of predicting.\n :return: ratings. size=[nb_pairs]\n \"\"\"\n pairs = pd.DataFrame(user_item_pairs)\n user_indices = self.users_to_indices(pairs[0].values)\n item_indices = self.items_to_indices(pairs[1].values)\n self.model.eval()\n outputs = []\n with torch.no_grad():\n start_id = 0\n end_id = min(batch_size, len(pairs))\n while start_id < len(pairs):\n outputs.append(self.predict_on_batch(user_indices[start_id:end_id], item_indices[start_id:end_id]))\n start_id += batch_size\n end_id = min(start_id+batch_size, len(pairs))\n return np.concatenate(outputs, axis=0)\n\n def predict_on_batch(self, user_indices, item_indices):\n users = torch.from_numpy(user_indices).long().to(self.device)\n items = torch.from_numpy(item_indices).long().to(self.device)\n outputs = self.model(users, items)\n return outputs.data.cpu().numpy()\n\n def update_history_rating_matrix(self):\n \"\"\"\n Update history rating matrix.\n :return: self.\n \"\"\"\n self.history_rating_matrix = pd.DataFrame(index=self.index_2_user, columns=self.index_2_item)\n for i, j, k in self.user_item_pairs.values:\n if i and j and k:\n self.history_rating_matrix[j][i] = k\n return self\n\n def update_pred_rating_matrix(self):\n \"\"\"\n Update prediction rating matrix.\n :return: self.\n \"\"\"\n pred_matrix = self.model.get_rating_matrix().data.cpu().numpy()\n self.pred_rating_matrix = np.where(self.history_rating_matrix.isna(), pred_matrix, np.nan)\n return self\n\n # def get_single_rating(self, i, j):\n # return self.pred_rating_matrix[i][j] if not np.isnan(self.pred_rating_matrix[i][j])\\\n # else self.history_rating_matrix.values[i][j]\n #\n # def predict_ratings_with_matrix(self, user_item_pairs):\n # \"\"\"\n # Predict the ratings of the pairs of (user, item).\n # :param user_item_pairs: list of (user, item)\n # :return: ratings. size=[nb_pairs]\n # \"\"\"\n # pairs = pd.DataFrame(user_item_pairs)\n # users = self.users_to_indices(pairs[0])\n # items = self.items_to_indices(pairs[1])\n # return np.array([self.get_single_rating(users[i], items[i]) for i in range(len(user_item_pairs))])\n\n def predict_ratings(self, user_item_pairs):\n \"\"\"\n Predict the ratings of the pairs of (user, item).\n :param user_item_pairs: list of (user, item)\n :return: ratings. size=[nb_pairs]\n \"\"\"\n return self.predict(user_item_pairs).ravel()\n\n def recommend(self, users, nb_recommendation):\n \"\"\"\n return the recommendations and their corresponding ratings.\n :param users: array of users\n :param nb_recommendation: The number of items to be recommended.\n :return: Indices of recommended items and their corresponding scores.\n \"\"\"\n user_indices = self.users_to_indices(users)\n id_recommend, rating_recommend = top_k(np.where(np.isnan(self.pred_rating_matrix[user_indices, :]),\n -np.inf, self.pred_rating_matrix[user_indices, :]),\n k=nb_recommendation, axis=-1, reverse=True, sort=True)\n return id_recommend, rating_recommend\n\n def users_to_indices(self, users):\n return np.array([self.user_2_index[user] for user in users]).ravel()\n\n def indices_to_users(self, indices):\n return self.index_2_user[np.array(indices).ravel()]\n\n def items_to_indices(self, items):\n return np.array([self.item_2_index[item] for item in items]).ravel()\n\n def indices_to_items(self, indices):\n return self.index_2_item[np.array(indices).ravel()]\n" ]
[ [ "numpy.isnan", "torch.utils.data.TensorDataset", "torch.from_numpy", "pandas.DataFrame", "numpy.concatenate", "torch.no_grad", "numpy.array", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
cww97/Jordan
[ "00234927d5c33e2dd301c5dae57eb89cd5e54c79" ]
[ "brain/mcts_alphaZero.py" ]
[ "import numpy as np\nimport copy \n\n\ndef softmax(x):\n probs = np.exp(x - np.max(x))\n probs /= np.sum(probs)\n return probs\n\nclass TreeNode(object):\n \"\"\"A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and\n its visit-count-adjusted prior score u.\n \"\"\"\n\n def __init__(self, parent, prior_p):\n self._parent = parent\n self._children = {} # a map from action to TreeNode\n self._n_visits = 0\n self._Q = 0\n self._u = 0\n self._P = prior_p\n\n def expand(self, action_priors):\n \"\"\"Expand tree by creating new children.\n action_priors -- output from policy function - a list of tuples of actions\n and their prior probability according to the policy function.\n \"\"\"\n for action, prob in action_priors:\n if action not in self._children:\n self._children[action] = TreeNode(self, prob)\n\n def select(self, c_puct):\n \"\"\"Select action among children that gives maximum action value, Q plus bonus u(P).\n Returns:\n A tuple of (action, next_node)\n \"\"\"\n return max(self._children.items(), key=lambda act_node: act_node[1].get_value(c_puct))\n\n def update(self, leaf_value):\n \"\"\"Update node values from leaf evaluation.\n Arguments:\n leaf_value -- the value of subtree evaluation from the current player's perspective. \n \"\"\"\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n def update_recursive(self, leaf_value):\n \"\"\"Like a call to update(), but applied recursively for all ancestors.\n \"\"\"\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)\n\n def get_value(self, c_puct):\n \"\"\"Calculate and return the value for this node: a combination of leaf evaluations, Q, and\n this node's prior adjusted for its visit count, u\n c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and\n prior probability, P, on this node's score.\n \"\"\"\n self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)\n return self._Q + self._u\n\n def is_leaf(self):\n \"\"\"Check if leaf node (i.e. no nodes below this have been expanded).\n \"\"\"\n return self._children == {}\n\n def is_root(self):\n return self._parent is None\n\n\nclass MCTS(object):\n \"\"\"A simple implementation of Monte Carlo Tree Search.\n \"\"\"\n\n def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):\n \"\"\"Arguments:\n policy_value_fn -- a function that takes in a board state and outputs a list of (action, probability)\n tuples and also a score in [-1, 1] (i.e. the expected value of the end game score from \n the current player's perspective) for the current player.\n c_puct -- a number in (0, inf) that controls how quickly exploration converges to the\n maximum-value policy, where a higher value means relying on the prior more\n \"\"\"\n self._root = TreeNode(None, 1.0)\n self._policy = policy_value_fn\n self._c_puct = c_puct\n self._n_playout = n_playout\n\n def _playout(self, state):\n \"\"\"Run a single playout from the root to the leaf, getting a value at the leaf and\n propagating it back through its parents. State is modified in-place, so a copy must be\n provided.\n Arguments:\n state -- a copy of the state.\n \"\"\"\n node = self._root\n while(1): \n if node.is_leaf():\n break \n # Greedily select next move.\n action, node = node.select(self._c_puct) \n state.do_move(action)\n\n # Evaluate the leaf using a network which outputs a list of (action, probability)\n # tuples p and also a score v in [-1, 1] for the current player.\n action_probs, leaf_value = self._policy(state)\n # Check for end of game.\n end, winner = state.game_end()\n if not end:\n node.expand(action_probs)\n else:\n # for end state,return the \"true\" leaf_value\n if winner == -1: # tie\n leaf_value = 0.0\n else:\n leaf_value = 1.0 if winner == state.get_current_player() else -1.0\n\n # Update value and visit count of nodes in this traversal.\n node.update_recursive(-leaf_value)\n\n def get_move_probs(self, state, temp=1e-3):\n \"\"\"Runs all playouts sequentially and returns the available actions and their corresponding probabilities \n Arguments:\n state -- the current state, including both game state and the current player.\n temp -- temperature parameter in (0, 1] that controls the level of exploration\n Returns:\n the available actions and the corresponding probabilities \n \"\"\" \n for n in range(self._n_playout):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n \n # calc the move probabilities based on the visit counts at the root node\n act_visits = [(act, node._n_visits) for act, node in self._root._children.items()]\n acts, visits = zip(*act_visits)\n act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10)) \n \n return acts, act_probs\n\n def update_with_move(self, last_move):\n \"\"\"Step forward in the tree, keeping everything we already know about the subtree.\n \"\"\"\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)\n\n def __str__(self):\n return \"MCTS\"\n \n\nclass MCTSPlayer(object):\n \"\"\"AI player based on MCTS\"\"\"\n def __init__(self, policy_value_function, c_puct=5, n_playout=2000, is_selfplay=0):\n self.mcts = MCTS(policy_value_function, c_puct, n_playout)\n self._is_selfplay = is_selfplay\n \n def set_player_ind(self, p):\n self.player = p\n\n def reset_player(self):\n self.mcts.update_with_move(-1) \n\n def get_action(self, board, temp=1e-3, return_prob=0):\n sensible_moves = board.availables\n move_probs = np.zeros(board.width*board.height) # the pi vector returned by MCTS as in the alphaGo Zero paper\n if len(sensible_moves) > 0:\n acts, probs = self.mcts.get_move_probs(board, temp)\n move_probs[list(acts)] = probs \n if self._is_selfplay:\n # add Dirichlet Noise for exploration (needed for self-play training)\n move = np.random.choice(acts, p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs)))) \n self.mcts.update_with_move(move) # update the root node and reuse the search tree\n else:\n # with the default temp=1e-3, this is almost equivalent to choosing the move with the highest prob\n move = np.random.choice(acts, p=probs) \n # reset the root node\n self.mcts.update_with_move(-1) \n# location = board.move_to_location(move)\n# print(\"AI move: %d,%d\\n\" % (location[0], location[1]))\n \n if return_prob:\n return move, move_probs\n else:\n return move\n else: \n print(\"WARNING: the board is full\")\n\n def __str__(self):\n return \"MCTS {}\".format(self.player) " ]
[ [ "numpy.sqrt", "numpy.random.choice", "numpy.max", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zqma/IIC
[ "9d4e30b51535c6ca381389d9c22ce45be4d11883", "9d4e30b51535c6ca381389d9c22ce45be4d11883" ]
[ "proj/archs/segmentation/baselines/net10a_doersch.py", "proj/scripts/cluster/analysis/print_sub_heads_eval.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom proj.archs.cluster.vgg import VGGNet\nfrom proj.archs.segmentation.net10a import SegmentationNet10aTrunk, \\\n SegmentationNet10a\nfrom proj.utils.segmentation.baselines.general import get_patches\n\n__all__ = [\"SegmentationNet10aDoersch\"]\n\n\nclass DoerschHead(nn.Module):\n def __init__(self, config):\n super(DoerschHead, self).__init__()\n self.patch_side = config.doersch_patch_side\n\n self.siamese_branch = nn.Sequential(\n nn.Conv2d(in_channels=SegmentationNet10a.cfg[-1][0], out_channels=1024,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.ReLU(inplace=True)\n )\n\n self.joint = nn.Sequential(\n nn.Linear(2 * 1024 * self.patch_side * self.patch_side, 1024),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(1024, 9) # 9 gt positions, N, NE... NW.\n )\n\n def forward(self, patches1, patches2):\n patches1 = self.siamese_branch(patches1)\n patches2 = self.siamese_branch(patches2)\n\n ni, k, h, w = patches1.size()\n ni2, k2, h2, w2 = patches1.size()\n\n if not ((ni == ni2) and (k == k2) and (h == h2) and (w == w2) and \\\n (h == self.patch_side) and (w == self.patch_side)):\n print(ni, k, h, w)\n print(ni2, k2, h2, w2)\n assert (False)\n\n # flatten all but first dim\n patches1 = patches1.contiguous() # otherwise view may behave funny\n patches2 = patches2.contiguous()\n\n patches1 = patches1.view(patches1.size(0), -1)\n patches2 = patches2.view(patches2.size(0), -1)\n concatenated = torch.cat((patches1, patches2), dim=1)\n\n ni3, nf = concatenated.size()\n if not ((ni3 == ni) and (nf == (2 * 1024 * self.patch_side *\n self.patch_side))):\n print(ni, k, h, w)\n print(ni2, k2, h2, w2)\n print(patches1.size())\n print(patches2.size())\n print(ni3, nf)\n assert (False)\n\n return self.joint(concatenated)\n\n\nclass SegmentationNet10aDoersch(VGGNet):\n def __init__(self, config):\n super(SegmentationNet10aDoersch, self).__init__()\n\n self.patch_side = config.doersch_patch_side\n self.input_sz = config.input_sz\n self.features_sz = SegmentationNet10a.cfg[-1][0]\n\n print(\"SegmentationNet10aDoersch: %d %d %d\" % (self.patch_side,\n self.input_sz,\n self.features_sz))\n\n self.features = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)\n self.doersch_head = DoerschHead(config)\n\n self._initialize_weights()\n\n def forward(self, x, centre=None, other=None, penultimate=False):\n x = self.features(x)\n x = F.interpolate(x, size=self.input_sz, mode=\"bilinear\")\n\n if not penultimate:\n assert ((centre is not None) and (other is not None))\n patches1, patches2 = \\\n get_patches(x, centre, other, self.patch_side)\n\n # predicted position distribution, no softmax - using\n # torch.CrossEntropyLoss\n # shape: bn, 9\n x = self.doersch_head(patches1, patches2)\n\n return x\n", "import argparse\nimport os\nimport pickle\nimport torch\n\nimport proj.archs as archs\nfrom proj.utils.cluster.cluster_eval import get_subhead_using_loss\nfrom proj.utils.cluster.data import cluster_twohead_create_dataloaders\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_inds\", type=int, nargs=\"+\",\n default=[570, 569, 640, 579, 685])\n parser.add_argument(\"--out_root\", type=str,\n default=\"/scratch/shared/slow/xuji/iid_private\")\n\n given_config = parser.parse_args()\n\n for model_ind in given_config.model_inds:\n print(\"\\n%d -------------------------------------------------\" % model_ind)\n\n given_config.out_dir = os.path.join(given_config.out_root, str(model_ind))\n reloaded_config_path = os.path.join(given_config.out_dir, \"config.pickle\")\n print(\"Loading restarting config from: %s\" % reloaded_config_path)\n with open(reloaded_config_path, \"rb\") as config_f:\n config = pickle.load(config_f)\n assert (config.model_ind == model_ind)\n\n if not hasattr(config, \"twohead\"):\n config.twohead = (\"TwoHead\" in config.arch)\n\n config.double_eval = False # no double eval, not training (or saving config)\n\n net = archs.__dict__[config.arch](config)\n model_path = os.path.join(config.out_dir, \"best_net.pytorch\")\n net.load_state_dict(\n torch.load(model_path, map_location=lambda storage, loc: storage))\n net.cuda()\n net = torch.nn.DataParallel(net)\n\n dataloaders_head_A, dataloaders_head_B, \\\n mapping_assignment_dataloader, mapping_test_dataloader = \\\n cluster_twohead_create_dataloaders(config)\n\n if \"MNIST\" in config.dataset:\n sobel = False\n lamb = config.lamb_B\n else:\n sobel = True\n lamb = config.lamb\n\n get_subhead_using_loss(config, dataloaders_head_B, net, sobel, lamb,\n compare=True)\n\n\nmain()\n" ]
[ [ "torch.nn.Dropout", "torch.cat", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "torch.nn.DataParallel", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdc50/bokeh
[ "4f0a77c96f0045d380e5e9edb606a9f3c7832d9f" ]
[ "tests/unit/bokeh/core/test_properties.py" ]
[ "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import annotations # isort:skip\n\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom bokeh._testing.util.api import verify_all\nfrom bokeh.core.has_props import HasProps\nfrom bokeh.core.properties import (\n Alias,\n Dict,\n Enum,\n Float,\n Instance,\n Int,\n List,\n Nullable,\n NumberSpec,\n Override,\n String,\n)\nfrom bokeh.models import Plot\n\n# Module under test\nimport bokeh.core.properties as bcp # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nALL = (\n 'Alias',\n 'Alpha',\n 'AlphaSpec',\n 'Angle',\n 'AngleSpec',\n 'Any',\n 'AnyRef',\n 'Array',\n 'Auto',\n 'Base64String',\n 'Bool',\n 'Byte',\n 'Color',\n 'ColorHex',\n 'ColorSpec',\n 'ColumnData',\n 'Complex',\n 'DashPattern',\n 'DataSpec',\n 'Date',\n 'Datetime',\n 'Dict',\n 'DistanceSpec',\n 'Either',\n 'Enum',\n 'Factor',\n 'FactorSeq',\n 'Float',\n 'FontSize',\n 'FontSizeSpec',\n 'HatchPatternSpec',\n 'HatchPatternType',\n 'Image',\n 'Include',\n 'Instance',\n 'Int',\n 'Interval',\n 'JSON',\n 'List',\n 'MarkerSpec',\n 'MarkerType',\n 'MathString',\n 'MinMaxBounds',\n 'NonNegativeInt',\n 'NonNullable',\n 'Null',\n 'NullStringSpec',\n 'Nullable',\n 'NumberSpec',\n 'Override',\n 'PandasDataFrame',\n 'PandasGroupBy',\n 'Percent',\n 'PositiveInt',\n 'RGB',\n 'Readonly',\n 'Regex',\n 'RelativeDelta',\n 'RestrictedDict',\n 'Seq',\n 'Size',\n 'SizeSpec',\n 'String',\n 'StringSpec',\n 'Struct',\n 'TimeDelta',\n 'TextLike',\n 'Tuple',\n 'UnitsSpec',\n 'expr',\n 'field',\n 'validate',\n 'value',\n 'without_property_validation'\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#----------------------------------------------------------------------------\n\n# TODO (bev) These tests should be moved to better places\n\n\nclass TestBasic:\n def test_simple_class(self) -> None:\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1, 2, 3])\n zz = Dict(String, Int)\n s = Nullable(String(None))\n\n f = Foo()\n assert f.x == 12\n assert f.y == \"hello\"\n assert np.array_equal(np.array([1, 2, 3]), f.z)\n assert f.s is None\n\n\n assert {\"x\", \"y\", \"z\", \"zz\", \"s\"} == f.properties()\n with_defaults = f.properties_with_values(include_defaults=True)\n assert dict(x=12, y=\"hello\", z=[1,2,3], zz={}, s=None) == with_defaults\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict() == without_defaults\n\n f.x = 18\n assert f.x == 18\n\n f.y = \"bar\"\n assert f.y == \"bar\"\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\") == without_defaults\n\n f.z[0] = 100\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\", z=[100,2,3]) == without_defaults\n\n f.zz = {'a': 10}\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\", z=[100,2,3], zz={'a': 10}) == without_defaults\n\n def test_enum(self) -> None:\n class Foo(HasProps):\n x = Enum(\"blue\", \"red\", \"green\") # the first item is the default\n y = Enum(\"small\", \"medium\", \"large\", default=\"large\")\n\n f = Foo()\n assert f.x == \"blue\"\n assert f.y == \"large\"\n\n f.x = \"red\"\n assert f.x == \"red\"\n\n with pytest.raises(ValueError):\n f.x = \"yellow\"\n\n f.y = \"small\"\n assert f.y == \"small\"\n\n with pytest.raises(ValueError):\n f.y = \"yellow\"\n\n def test_inheritance(self) -> None:\n class Base(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n class Child(Base):\n z = Float(3.14)\n\n c = Child()\n assert frozenset(['x', 'y', 'z']) == frozenset(c.properties())\n assert c.y == \"hello\"\n\n def test_set(self) -> None:\n class Foo(HasProps):\n x = Int(12)\n y = Enum(\"red\", \"blue\", \"green\")\n z = String(\"blah\")\n\n f = Foo()\n assert f.x == 12\n assert f.y == \"red\"\n assert f.z == \"blah\"\n f.update(**dict(x=20, y=\"green\", z=\"hello\"))\n assert f.x == 20\n assert f.y == \"green\"\n assert f.z == \"hello\"\n with pytest.raises(ValueError):\n f.update(y=\"orange\")\n\n def test_accurate_properties_sets(self) -> None:\n class Base(HasProps):\n num = Int(12)\n container = List(String)\n child = Instance(HasProps)\n\n class Mixin(HasProps):\n mixin_num = Int(12)\n mixin_container = List(String)\n mixin_child = Instance(HasProps)\n\n class Sub(Base, Mixin):\n sub_num = Int(12)\n sub_container = List(String)\n sub_child = Instance(HasProps)\n\n b = Base()\n assert {\"child\"} == set(b.properties_with_refs())\n assert {\"num\", \"container\", \"child\"} == b.properties()\n\n m = Mixin()\n assert set(m.properties_with_refs()) == {\"mixin_child\"}\n assert m.properties() == {\"mixin_num\", \"mixin_container\", \"mixin_child\"}\n\n s = Sub()\n assert set(s.properties_with_refs()) == {\"child\", \"sub_child\", \"mixin_child\"}\n assert s.properties() == {\"num\", \"container\", \"child\", \"mixin_num\", \"mixin_container\", \"mixin_child\", \"sub_num\", \"sub_container\", \"sub_child\"}\n\n # verify caching\n assert s.properties_with_refs() is s.properties_with_refs()\n assert s.properties() is s.properties()\n\n def test_accurate_dataspecs(self) -> None:\n class Base(HasProps):\n num = NumberSpec(12)\n not_a_dataspec = Float(10)\n\n class Mixin(HasProps):\n mixin_num = NumberSpec(14)\n\n class Sub(Base, Mixin):\n sub_num = NumberSpec(16)\n\n base = Base()\n mixin = Mixin()\n sub = Sub()\n\n assert {\"num\"} == set(base.dataspecs())\n assert {\"mixin_num\"} == set(mixin.dataspecs())\n assert {\"num\", \"mixin_num\", \"sub_num\"} == set(sub.dataspecs())\n\n def test_not_serialized(self) -> None:\n class NotSerialized(HasProps):\n x = Int(12, serialized=False)\n y = String(\"hello\")\n\n o = NotSerialized()\n assert o.x == 12\n assert o.y == 'hello'\n\n # non-serialized props are still in the list of props\n assert 'x' in o.properties()\n assert 'y' in o.properties()\n\n # but they aren't in the dict of props with values, since their\n # values are not important (already included in other values,\n # as with the _units properties)\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n o.x = 42\n o.y = 'world'\n\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_readonly(self) -> None:\n class Readonly(HasProps):\n x = Int(12, readonly=True) # with default\n y = Nullable(Int(), readonly=True) # without default\n z = String(\"hello\")\n\n o = Readonly()\n assert o.x == 12\n assert o.y == None\n assert o.z == 'hello'\n\n # readonly props are still in the list of props\n assert 'x' in o.properties()\n assert 'y' in o.properties()\n assert 'z' in o.properties()\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'z' in o.properties_with_values(include_defaults=True)\n\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n assert 'z' not in o.properties_with_values(include_defaults=False)\n\n with pytest.raises(RuntimeError):\n o.x = 7\n with pytest.raises(RuntimeError):\n o.y = 7\n o.z = \"xyz\"\n\n assert o.x == 12\n assert o.y == None\n assert o.z == 'xyz'\n\n def test_include_defaults(self) -> None:\n class IncludeDefaultsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsTest()\n assert o.x == 12\n assert o.y == 'hello'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n o.x = 42\n o.y = 'world'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_include_defaults_with_kwargs(self) -> None:\n class IncludeDefaultsKwargsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsKwargsTest(x=14, y=\"world\")\n assert o.x == 14\n assert o.y == 'world'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_include_defaults_set_to_same(self) -> None:\n class IncludeDefaultsSetToSameTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsSetToSameTest()\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n # this should no-op\n o.x = 12\n o.y = \"hello\"\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n def test_override_defaults(self) -> None:\n class FooBase(HasProps):\n x = Int(12)\n\n class FooSub(FooBase):\n x = Override(default=14)\n\n def func_default():\n return 16\n\n class FooSubSub(FooBase):\n x = Override(default=func_default)\n\n f_base = FooBase()\n f_sub = FooSub()\n f_sub_sub = FooSubSub()\n\n assert f_base.x == 12\n assert f_sub.x == 14\n assert f_sub_sub.x == 16\n\n assert 12 == f_base.properties_with_values(include_defaults=True)['x']\n assert 14 == f_sub.properties_with_values(include_defaults=True)['x']\n assert 16 == f_sub_sub.properties_with_values(include_defaults=True)['x']\n\n assert 'x' not in f_base.properties_with_values(include_defaults=False)\n assert 'x' not in f_sub.properties_with_values(include_defaults=False)\n assert 'x' in f_sub_sub.properties_with_values(include_defaults=False)\n\n # def test_kwargs_init(self) -> None:\n # class Foo(HasProps):\n # x = String\n # y = Int\n # z = Float\n # f = Foo(x = \"hello\", y = 14)\n # assert f.x == \"hello\"\n # assert f.y == 14\n\n # with pytest.raises(TypeError):\n # # This should raise a TypeError: object.__init__() takes no parameters\n # g = Foo(z = 3.14, q = \"blah\")\n\nclass Foo(HasProps):\n pass\n\nclass Bar(HasProps):\n pass\n\nclass Baz(HasProps):\n pass\n\ndef test_HasProps_equals() -> None:\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n class FooUnrelated(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n v = Foo().equals(Foo())\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=1))\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=2))\n assert v is False\n\n v = Foo(x=1).equals(1)\n assert v is False\n\n v = Foo().equals(FooUnrelated())\n assert v is False\n\ndef test_HasProps_clone() -> None:\n p1 = Plot(width=1000)\n c1 = p1.properties_with_values(include_defaults=False)\n p2 = p1._clone()\n c2 = p2.properties_with_values(include_defaults=False)\n assert c1 == c2\n\ndef test_Alias() -> None:\n class Foo(HasProps):\n x = Int(12)\n ax = Alias('x')\n\n f = Foo(x=10)\n assert f.x == 10\n assert f.ax == 10\n\n f.x = 20\n assert f.x == 20\n assert f.ax == 20\n\n f.ax = 30\n assert f.x == 30\n assert f.ax == 30\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nTest___all__ = verify_all(bcp, ALL)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
omshinde/dfc2019
[ "2e48cc8442c2c33aef7e1a0de27041709ef160e8" ]
[ "track2/icnet/memory_saving_gradients.py" ]
[ "from toposort import toposort\nimport contextlib\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.graph_editor as ge\nimport time\nimport sys\nsys.setrecursionlimit(10000)\n# refers back to current module if we decide to split helpers out\nutil = sys.modules[__name__]\n\n# getting rid of \"WARNING:tensorflow:VARIABLES collection name is deprecated\"\nsetattr(tf.GraphKeys, \"VARIABLES\", \"variables\")\n\n# save original gradients since tf.gradient could be monkey-patched to point\n# to our version\nfrom tensorflow.python.ops import gradients as tf_gradients_lib\ntf_gradients = tf_gradients_lib.gradients\n\nMIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing\n\n# specific versions we can use to do process-wide replacement of tf.gradients\ndef gradients_speed(ys, xs, grad_ys=None, **kwargs):\n return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs)\n\ndef gradients_memory(ys, xs, grad_ys=None, **kwargs):\n return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs)\n \ndef gradients_collection(ys, xs, grad_ys=None, **kwargs):\n return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs)\n\ndef gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):\n '''\n Authors: Tim Salimans & Yaroslav Bulatov\n\n memory efficient gradient implementation inspired by \"Training Deep Nets with Sublinear Memory Cost\"\n by Chen et al. 2016 (https://arxiv.org/abs/1604.06174)\n\n ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients\n (https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients)\n\n 'checkpoints' can either be\n - a list consisting of tensors from the forward pass of the neural net\n that we should re-use when calculating the gradients in the backward pass\n all other tensors that do not appear in this list will be re-computed\n - a string specifying how this list should be determined. currently we support\n - 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive,\n so checkpointing them maximizes the running speed\n (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory)\n - 'memory': try to minimize the memory usage\n (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint)\n - 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint\n '''\n\n # print(\"Calling memsaving gradients with\", checkpoints)\n if not isinstance(ys,list):\n ys = [ys]\n if not isinstance(xs,list):\n xs = [xs]\n\n bwd_ops = ge.get_backward_walk_ops([y.op for y in ys],\n inclusive=True)\n\n debug_print(\"bwd_ops: %s\", bwd_ops)\n \n # forward ops are all ops that are candidates for recomputation\n fwd_ops = ge.get_forward_walk_ops([x.op for x in xs],\n inclusive=True,\n within_ops=bwd_ops)\n debug_print(\"fwd_ops: %s\", fwd_ops)\n \n # exclude ops with no inputs\n fwd_ops = [op for op in fwd_ops if op.inputs]\n\n # don't recompute xs, remove variables\n xs_ops = _to_ops(xs)\n fwd_ops = [op for op in fwd_ops if not op in xs_ops]\n fwd_ops = [op for op in fwd_ops if not '/assign' in op.name]\n fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name]\n fwd_ops = [op for op in fwd_ops if not '/read' in op.name]\n ts_all = ge.filter_ts(fwd_ops, True) # get the tensors\n ts_all = [t for t in ts_all if '/read' not in t.name]\n ts_all = set(ts_all) - set(xs) - set(ys)\n\n # construct list of tensors to checkpoint during forward pass, if not\n # given as input\n if type(checkpoints) is not list:\n if checkpoints == 'collection':\n checkpoints = tf.get_collection('checkpoints')\n \n elif checkpoints == 'speed':\n # checkpoint all expensive ops to maximize running speed\n checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')\n \n elif checkpoints == 'memory':\n\n # remove very small tensors and some weird ops\n def fixdims(t): # tf.Dimension values are not compatible with int, convert manually\n try:\n return [int(e if e.value is not None else 64) for e in t]\n except:\n return [0] # unknown shape\n ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE]\n ts_all = [t for t in ts_all if 'L2Loss' not in t.name]\n ts_all = [t for t in ts_all if 'entropy' not in t.name]\n ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name]\n ts_all = [t for t in ts_all if 'Switch' not in t.name]\n ts_all = [t for t in ts_all if 'dropout' not in t.name]\n # DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16\n ts_all = [t for t in ts_all if 'Cast' not in t.name]\n\n # filter out all tensors that are inputs of the backward graph\n with util.capture_ops() as bwd_ops:\n tf_gradients(ys, xs, grad_ys, **kwargs)\n\n bwd_inputs = [t for op in bwd_ops for t in op.inputs]\n # list of tensors in forward graph that is in input to bwd graph\n ts_filtered = list(set(bwd_inputs).intersection(ts_all))\n debug_print(\"Using tensors %s\", ts_filtered)\n\n # try two slightly different ways of getting bottlenecks tensors\n # to checkpoint\n for ts in [ts_filtered, ts_all]:\n\n # get all bottlenecks in the graph\n bottleneck_ts = []\n for t in ts:\n b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))\n f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))\n # check that there are not shortcuts\n b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)\n f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)\n if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all):\n bottleneck_ts.append(t) # we have a bottleneck!\n else:\n debug_print(\"Rejected bottleneck candidate and ops %s\", [t] + list(set(ts_all) - set(b_inp) - set(f_inp)))\n\n # success? or try again without filtering?\n if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found!\n break\n\n if not bottleneck_ts:\n raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints=\"speed\".')\n\n # sort the bottlenecks\n bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)\n sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]\n\n # save an approximately optimal number ~ sqrt(N)\n N = len(ts_filtered)\n if len(bottleneck_ts) <= np.ceil(np.sqrt(N)):\n checkpoints = sorted_bottlenecks\n else:\n step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N)))\n checkpoints = sorted_bottlenecks[step::step]\n \n else:\n raise Exception('%s is unsupported input for \"checkpoints\"' % (checkpoints,))\n\n checkpoints = list(set(checkpoints).intersection(ts_all))\n\n # at this point automatic selection happened and checkpoints is list of nodes\n assert isinstance(checkpoints, list)\n\n debug_print(\"Checkpoint nodes used: %s\", checkpoints)\n # better error handling of special cases\n # xs are already handled as checkpoint nodes, so no need to include them\n xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))\n if xs_intersect_checkpoints:\n debug_print(\"Warning, some input nodes are also checkpoint nodes: %s\",\n xs_intersect_checkpoints)\n ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))\n debug_print(\"ys: %s, checkpoints: %s, intersect: %s\", ys, checkpoints,\n ys_intersect_checkpoints)\n # saving an output node (ys) gives no benefit in memory while creating\n # new edge cases, exclude them\n if ys_intersect_checkpoints:\n debug_print(\"Warning, some output nodes are also checkpoints nodes: %s\",\n format_ops(ys_intersect_checkpoints))\n\n # remove initial and terminal nodes from checkpoints list if present\n checkpoints = list(set(checkpoints) - set(ys) - set(xs))\n \n # check that we have some nodes to checkpoint\n if not checkpoints:\n raise Exception('no checkpoints nodes found or given as input! ')\n\n # disconnect dependencies between checkpointed tensors\n checkpoints_disconnected = {}\n for x in checkpoints:\n if x.op and x.op.name is not None:\n grad_node = tf.stop_gradient(x, name=x.op.name+\"_sg\")\n else:\n grad_node = tf.stop_gradient(x)\n checkpoints_disconnected[x] = grad_node\n\n # partial derivatives to the checkpointed tensors and xs\n ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys],\n stop_at_ts=checkpoints, within_ops=fwd_ops)\n debug_print(\"Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s\",\n len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)\n debug_print(\"ops_to_copy = %s\", ops_to_copy)\n debug_print(\"Processing list %s\", ys)\n copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})\n for origin_op, op in info._transformed_ops.items():\n op._set_device(origin_op.node_def.device)\n copied_ops = info._transformed_ops.values()\n debug_print(\"Copied %s to %s\", ops_to_copy, copied_ops)\n ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)\n debug_print(\"Rewired %s in place of %s restricted to %s\",\n checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)\n\n # get gradients with respect to current boundary + original x's\n copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]\n boundary = list(checkpoints_disconnected.values())\n dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs)\n debug_print(\"Got gradients %s\", dv)\n debug_print(\"for %s\", copied_ys)\n debug_print(\"with respect to %s\", boundary+xs)\n\n inputs_to_do_before = [y.op for y in ys]\n if grad_ys is not None:\n inputs_to_do_before += grad_ys\n wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]\n my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)\n\n # partial derivatives to the checkpointed nodes\n # dictionary of \"node: backprop\" for nodes in the boundary\n d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(),\n dv[:len(checkpoints_disconnected)])}\n # partial derivatives to xs (usually the params of the neural net)\n d_xs = dv[len(checkpoints_disconnected):]\n\n # incorporate derivatives flowing through the checkpointed nodes\n checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)\n for ts in checkpoints_sorted_lists[::-1]:\n debug_print(\"Processing list %s\", ts)\n checkpoints_other = [r for r in checkpoints if r not in ts]\n checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]\n\n # copy part of the graph below current checkpoint node, stopping at\n # other checkpoints nodes\n ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)\n debug_print(\"Found %s ops to copy within %s, seed %s, stop_at %s\",\n len(ops_to_copy), fwd_ops, [r.op for r in ts],\n checkpoints_other)\n debug_print(\"ops_to_copy = %s\", ops_to_copy)\n if not ops_to_copy: # we're done!\n break\n copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})\n for origin_op, op in info._transformed_ops.items():\n op._set_device(origin_op.node_def.device)\n copied_ops = info._transformed_ops.values()\n debug_print(\"Copied %s to %s\", ops_to_copy, copied_ops)\n ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)\n debug_print(\"Rewired %s in place of %s restricted to %s\",\n checkpoints_disconnected_other, checkpoints_other, copied_ops)\n\n # gradient flowing through the checkpointed node\n boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]\n substitute_backprops = [d_checkpoints[r] for r in ts]\n dv = tf_gradients(boundary,\n checkpoints_disconnected_other+xs,\n grad_ys=substitute_backprops, **kwargs)\n debug_print(\"Got gradients %s\", dv)\n debug_print(\"for %s\", boundary)\n debug_print(\"with respect to %s\", checkpoints_disconnected_other+xs)\n debug_print(\"with boundary backprop substitutions %s\", substitute_backprops)\n\n inputs_to_do_before = [d_checkpoints[r].op for r in ts]\n wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]\n my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)\n\n # partial derivatives to the checkpointed nodes\n for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]):\n if dr is not None:\n if d_checkpoints[r] is None:\n d_checkpoints[r] = dr\n else:\n d_checkpoints[r] += dr\n def _unsparsify(x):\n if not isinstance(x, tf.IndexedSlices):\n return x\n assert x.dense_shape is not None, \"memory_saving_gradients encountered sparse gradients of unknown shape\"\n indices = x.indices\n while indices.shape.ndims < x.values.shape.ndims:\n indices = tf.expand_dims(indices, -1)\n return tf.scatter_nd(indices, x.values, x.dense_shape)\n\n # partial derivatives to xs (usually the params of the neural net)\n d_xs_new = dv[len(checkpoints_other):]\n for j in range(len(xs)):\n if d_xs_new[j] is not None:\n if d_xs[j] is None:\n d_xs[j] = _unsparsify(d_xs_new[j])\n else:\n d_xs[j] += _unsparsify(d_xs_new[j])\n\n\n return d_xs\n\ndef tf_toposort(ts, within_ops=None):\n all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)\n\n deps = {}\n for op in all_ops:\n for o in op.outputs:\n deps[o] = set(op.inputs)\n sorted_ts = toposort(deps)\n\n # only keep the tensors from our original list\n ts_sorted_lists = []\n for l in sorted_ts:\n keep = list(set(l).intersection(ts))\n if keep:\n ts_sorted_lists.append(keep)\n\n return ts_sorted_lists\n\ndef fast_backward_ops(within_ops, seed_ops, stop_at_ts):\n bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts))\n ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts])\n return list(ops)\n\[email protected]\ndef capture_ops():\n \"\"\"Decorator to capture ops created in the block.\n with capture_ops() as ops:\n # create some ops\n print(ops) # => prints ops created.\n \"\"\"\n\n micros = int(time.time()*10**6)\n scope_name = str(micros)\n op_list = []\n with tf.name_scope(scope_name):\n yield op_list\n\n g = tf.get_default_graph()\n op_list.extend(ge.select_ops(scope_name+\"/.*\", graph=g))\n\ndef _to_op(tensor_or_op):\n if hasattr(tensor_or_op, \"op\"):\n return tensor_or_op.op\n return tensor_or_op\n\ndef _to_ops(iterable):\n if not _is_iterable(iterable):\n return iterable\n return [_to_op(i) for i in iterable]\n\ndef _is_iterable(o):\n try:\n _ = iter(o)\n except Exception:\n return False\n return True\n\nDEBUG_LOGGING=False\ndef debug_print(s, *args):\n \"\"\"Like logger.log, but also replaces all TensorFlow ops/tensors with their\n names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug\n\n Usage:\n debug_print(\"see tensors %s for %s\", tensorlist, [1,2,3])\n \"\"\"\n\n if DEBUG_LOGGING:\n formatted_args = [format_ops(arg) for arg in args]\n print(\"DEBUG \"+s % tuple(formatted_args))\n\ndef format_ops(ops, sort_outputs=True):\n \"\"\"Helper method for printing ops. Converts Tensor/Operation op to op.name,\n rest to str(op).\"\"\"\n \n if hasattr(ops, '__iter__') and not isinstance(ops, str):\n l = [(op.name if hasattr(op, \"name\") else str(op)) for op in ops]\n if sort_outputs:\n return sorted(l)\n return l\n else:\n return ops.name if hasattr(ops, \"name\") else str(ops)\n\ndef my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):\n for op in wait_to_do_ops:\n ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]\n ge.add_control_inputs(op, ci)\n" ]
[ [ "tensorflow.get_default_graph", "tensorflow.contrib.graph_editor.filter_ts", "numpy.sqrt", "tensorflow.get_collection", "tensorflow.contrib.graph_editor.get_backward_walk_ops", "tensorflow.contrib.graph_editor.get_forward_walk_ops", "tensorflow.contrib.graph_editor.reroute_ts", "tensorflow.scatter_nd", "tensorflow.expand_dims", "tensorflow.stop_gradient", "tensorflow.contrib.graph_editor.select_ops", "tensorflow.name_scope", "tensorflow.contrib.graph_editor.sgv", "tensorflow.contrib.graph_editor.filter_ts_from_regex", "tensorflow.contrib.graph_editor.add_control_inputs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
katekaseth/Project_One
[ "0eae5928b92ff99cc27815b73acc751d0348fca8" ]
[ "server/db/Data/data_cleaner.py" ]
[ "import pandas as pd\nimport re\n\ndata = pd.read_csv(\"BIPMetadata_current.csv\")\n\ndef format_date(date_column):\n # formatting the date data to display as yyyy-mm-dd\n new_dates = []\n for date in date_column:\n month = date[0:date.find('/')]\n date = date[date.find('/')+1:]\n day = date[0:date.find('/')]\n year = date[date.find('/')+1:]\n\n if (len(month) == 1):\n month = \"0\" + month\n if (len(day) == 1):\n day = \"0\" + day\n if (len(year) == 2):\n year = \"20\" + year\n newDate = year + \"-\" + month + \"-\" + day\n \n print(newDate)\n new_dates.append(newDate)\n return new_dates\n\n\ndef truncate(column, length):\n # truncates given column to given length and returns new column\n new_d = []\n for d in column:\n if (len(d) > length):\n d = d[0:length]\n new_d.append(d)\n return new_d\n\n\n# source: https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string\ndef cleanhtml(column):\n new_desc = []\n for d in column:\n cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n cleantext = re.sub(cleanr, '', d)\n new_desc.append(' '.join(cleantext.split()))\n return new_desc\n\n\ndef remove_spaces(column):\n new_sql = []\n for d in column:\n new_sql.append(' '.join(d.split()))\n return new_sql\n\n\nnew_created = format_date(data[\"created\"])\nprint(\"UPDATAED\")\nnew_updated = format_date(data[\"updated\"])\nnew_query = remove_spaces(data[\"sql_query\"])\nnew_query = truncate(new_query, 5000)\nnew_description = truncate(data[\"description\"], 500)\nnew_description = cleanhtml(new_description)\n\n\ndata[\"created\"] = new_created\ndata[\"updated\"] = new_updated\ndata[\"sql_query\"] = new_query\ndata[\"description\"] = new_description\n\n\ndata.to_csv(\"BIPMetadata_cleaned.csv\", index=False)" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
kirtanp/MAMO-fair
[ "fd0fc39383f11a9e1ec401233b89c2399860fb94" ]
[ "utils/utilities.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nfrom collections import defaultdict\nimport itertools\nfrom sklearn.metrics import confusion_matrix\n\ndef print_data_stats(sens_attr, class_labels):\n \"\"\"Print a few numbers about the data: Total number of points, number of\n protected examples and unprotected examples, and number of protected points\n in positive class, and number of unprotected points in positive class.\n\n Parameters\n -----------\n sens_attr: numpy array\n The sensitive attribute of shape=(number_points,).\n class_labels: nunmp\n The class labels of shape=(number_points,).\n \"\"\"\n non_prot_all = sum(sens_attr == 1.0) # non-protected group\n prot_all = len(sens_attr) - non_prot_all # protected group\n non_prot_pos = sum(class_labels[sens_attr == 1.0] == 1.0) # non_protected in positive class\n prot_pos = sum(class_labels == 1.0) - non_prot_pos # protected in positive class\n frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)\n frac_prot_pos = float(prot_pos) / float(prot_all)\n print\n print(\"Total data points: %d\" % len(sens_attr))\n print(\"# non-protected examples: %d\" % non_prot_all)\n print(\"# protected examples: %d\" % prot_all)\n print(\"# non-protected examples in positive class: %d (%0.1f%%)\" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))\n print(\"# protected examples in positive class: %d (%0.1f%%)\" % (prot_pos, prot_pos * 100.0 / prot_all))\n\ndef get_positive_rate(y_predicted, y_true):\n \"\"\"Compute the positive rate for given predictions of the class label.\n\n Parameters\n ----------\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n y_true: numpy array\n The true class labels of shape=(number_points,).\n\n Returns\n ---------\n pr: float\n The positive rate.\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()\n pr = (tp+fp) / (tp+fp+tn+fn)\n return pr\n\ndef get_true_positive_rate(y_predicted, y_true):\n \"\"\"Compute the true positive rate for given predictions of the class label.\n\n Parameters\n ----------\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n y_true: numpy array\n The true class labels of shape=(number_points,).\n\n Returns\n ---------\n tpr: float\n The true positive rate.\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()\n tpr = tp / (tp+fn)\n return tpr\n\ndef compute_fairness_measures(y_predicted, y_true, sens_attr):\n \"\"\"Compute value of demographic parity and equality of opportunity for given predictions.\n\n Parameters\n ----------\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n y_true: numpy array\n The true class labels of shape=(number_points,).\n sens_attr: numpy array\n The sensitive labels of shape=(number_points,).\n\n Returns\n ----------\n DDP: float\n The difference of demographic parity.\n DEO: float\n The difference of equality of opportunity.\n \"\"\"\n positive_rate_prot = get_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])\n positive_rate_unprot = get_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])\n true_positive_rate_prot = get_true_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])\n true_positive_rate_unprot = get_true_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])\n DDP = positive_rate_unprot - positive_rate_prot\n DEO = true_positive_rate_unprot - true_positive_rate_prot\n rates = [positive_rate_unprot, positive_rate_prot]\n DP = np.min(rates)/(np.max(rates) + 1e-5)\n\n return DDP, DEO, DP\n\ndef get_accuracy(y_true, y_predicted):\n \"\"\"Compute the accuracy for given predicted class labels.\n\n Parameters\n ----------\n y_true: numpy array\n The true class labels of shape=(number_points,).\n y_predicted: numpy array\n The predicted class labels of shape=(number_points,).\n\n Returns\n ---------\n accuracy: float\n The accuracy of the predictions.\n \"\"\"\n correct_answers = (y_predicted == y_true).astype(int) # will have 1 when the prediction and the actual label match\n accuracy = float(sum(correct_answers)) / float(len(correct_answers))\n return accuracy\n" ]
[ [ "numpy.max", "sklearn.metrics.confusion_matrix", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
changwoolee/gradient-rescaling-attention-model
[ "2f1d819e8cee03a9d06312e700a5c474bed48c70" ]
[ "util.py" ]
[ "import tensorflow as tf\n\nfrom contextlib import contextmanager\nfrom PIL import Image\n\nfrom keras import backend as K\nfrom keras.utils.data_utils import OrderedEnqueuer\n\ndef heteroscedastic_loss(attention=False, \n\t\t\t\t\t\t\t\t\t\t\t\t block_attention_gradient=False, \n\t\t\t\t\t\t\t\t\t\t\t\t mode='l2'):\n\t''' Heteroscedastic loss.'''\n\n\tdef het_loss(y_true, y_pred):\n\t\ty_mean = y_pred[:,:,:,:3]\n\t\ty_logvar = y_pred[:,:,:,3:]\n\t\ty_logvar = K.clip(y_logvar, -10, 10)\n\t\tif mode == 'l2':\n\t\t\teuclidian_loss = K.square(y_true/127.5 - y_mean/127.5)\n\t\telif mode == 'l1':\n\t\t\teuclidian_loss = K.abs(y_true/127.5 - y_mean/127.5)\n\n\t\tloss = tf.exp(-y_logvar)*euclidian_loss + y_logvar\n\t\tloss *= 127.5\n\t\tif mode == 'l2':\n\t\t\tloss *= 127.5\n\t\t\t\n\n\t\tif attention:\n\t\t\tattention_mask = K.sigmoid(y_logvar) \n\n\t\t\tif block_attention_gradient:\n\t\t\t\tattention_mask = K.stop_gradient(attention_mask)\n\n\t\t\tloss = attention_mask * loss\n\t\treturn K.mean(loss, axis=-1)\n\n\treturn het_loss\n\n\n\n\n\n\n\n\n@contextmanager\ndef concurrent_generator(sequence, num_workers=8, max_queue_size=32, use_multiprocessing=False):\n\tenqueuer = OrderedEnqueuer(sequence, use_multiprocessing=use_multiprocessing)\n\ttry:\n\t\tenqueuer.start(workers=num_workers, max_queue_size=max_queue_size)\n\t\tyield enqueuer.get()\n\tfinally:\n\t\tenqueuer.stop()\n\n\ndef init_session(gpu_memory_fraction):\n\tK.tensorflow_backend.set_session(tensorflow_session(gpu_memory_fraction=gpu_memory_fraction))\n\n\ndef reset_session(gpu_memory_fraction):\n\tK.clear_session()\n\tinit_session(gpu_memory_fraction)\n\n\ndef tensorflow_session(gpu_memory_fraction):\n\tconfig = tf.ConfigProto()\n\tconfig.gpu_options.allow_growth = True\n\tconfig.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction\n\treturn tf.Session(config=config)\n\n\ndef load_image(path):\n\timg = Image.open(path)\n\tif img.mode != 'RGB':\n\t\timg = img.convert('RGB')\n\treturn img\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.exp", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
jie311/vega
[ "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae", "1bba6100ead802697e691403b951e6652a99ccae" ]
[ "vega/algorithms/nas/fis/autogate_s2_trainer_callback.py", "vega/datasets/transforms/RandomMirrow_pair.py", "vega/algorithms/data_augmentation/cyclesr/cyclesr_trainer_callback.py", "vega/datasets/transforms/ImageTransform.py", "vega/datasets/transforms/Cutout.py", "vega/datasets/tensorflow/adapter.py", "vega/networks/pytorch/customs/utils/logical_graph.py", "vega/trainer/callbacks/timm_trainer_callback.py", "evaluate_service/hardwares/davinci/davinci.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the MIT License.\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# MIT License for more details.\r\n\"\"\"AutoGate top-k version Stage2 TrainerCallback.\"\"\"\r\n\r\nimport logging\r\nimport pandas as pd\r\nfrom vega.common import ClassFactory, ClassType\r\nfrom vega.common import FileOps\r\nfrom vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback\r\nfrom vega.core.pipeline.conf import ModelConfig\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\[email protected](ClassType.CALLBACK)\r\nclass AutoGateS2TrainerCallback(CtrTrainerCallback):\r\n \"\"\"AutoGateS2TrainerCallback module.\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Construct AutoGateS2TrainerCallback class.\"\"\"\r\n super(CtrTrainerCallback, self).__init__()\r\n self.sieve_board = pd.DataFrame(\r\n columns=['selected_feature_pairs', 'score'])\r\n self.selected_pairs = list()\r\n\r\n logging.info(\"init autogate s2 trainer callback\")\r\n\r\n def before_train(self, logs=None):\r\n \"\"\"Call before_train of the managed callbacks.\"\"\"\r\n super().before_train(logs)\r\n\r\n \"\"\"Be called before the training process.\"\"\"\r\n hpo_result = FileOps.load_pickle(FileOps.join_path(\r\n self.trainer.local_output_path, 'best_config.pickle'))\r\n logging.info(\"loading stage1_hpo_result \\n{}\".format(hpo_result))\r\n\r\n feature_interaction_score = hpo_result['feature_interaction_score']\r\n print('feature_interaction_score:', feature_interaction_score)\r\n sorted_pairs = sorted(feature_interaction_score.items(),\r\n key=lambda x: abs(x[1]), reverse=True)\r\n\r\n if ModelConfig.model_desc:\r\n fis_ratio = ModelConfig.model_desc[\"custom\"][\"fis_ratio\"]\r\n else:\r\n fis_ratio = 1.0\r\n top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\r\n self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\r\n\r\n # add selected_pairs\r\n setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)\r\n\r\n def after_train(self, logs=None):\r\n \"\"\"Call after_train of the managed callbacks.\"\"\"\r\n curr_auc = float(self.trainer.valid_metrics.results['auc'])\r\n\r\n self.sieve_board = self.sieve_board.append(\r\n {\r\n 'selected_feature_pairs': self.selected_pairs,\r\n 'score': curr_auc\r\n }, ignore_index=True)\r\n result_file = FileOps.join_path(\r\n self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\r\n\r\n self.sieve_board.to_csv(result_file, sep='\\t')\r\n", "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is a class for RandomMirrow_pair.\"\"\"\nimport numpy as np\nfrom vega.common import ClassFactory, ClassType\n\n\[email protected](ClassType.TRANSFORM)\nclass RandomMirrow_pair(object):\n \"\"\"Random mirrow two related image.\"\"\"\n\n def __call__(self, image, label):\n \"\"\"Call function of RandomMirrow_pair.\n\n :param image: usually the feature image, for example, the LR image for super solution dataset,\n the initial image for the segmentation dataset, and etc\n :type image: PIL image\n :param label: usually the label image, for example, the HR image for super solution dataset,\n the mask image for the segmentation dataset, and etc\n :type lebel: PIL image\n :return: the image after transform\n :rtype: list, erery item is a PIL image, the first one is feature image, the second is label image\n \"\"\"\n flip = np.random.choice(2) * 2 - 1\n channels_image = image.shape[-1]\n channels_label = label.shape[-1]\n if channels_image == 3:\n image = image[:, :, ::flip]\n else:\n image = image[:, ::flip]\n if channels_label == 3:\n label = label[:, :, ::flip]\n else:\n label = label[:, ::flip]\n return image, label\n", "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is the class for cyclesr trainworker.\"\"\"\nimport datetime\nimport logging\nimport itertools\nimport os\nimport time\nimport numpy as np\nimport torch\nimport json\nfrom tensorboardX import SummaryWriter\nimport vega\nfrom vega.datasets import Adapter\nfrom vega.datasets.common.dataset import Dataset\nfrom vega.common import FileOps\nfrom vega.report import ReportClient\nfrom vega.common import ClassFactory, ClassType\nfrom vega.networks.network_desc import NetworkDesc\nfrom vega.trainer.callbacks import Callback\nfrom .utils import AverageMeter\nfrom .utils import TensorNorm\n\n\ntry:\n import horovod.torch as hvd\nexcept Exception:\n # logging.warning(\"horovod not been installed, {}\".format(str(e)))\n pass\n# data-processing module\nfrom .utils import find_best_PSNR\n\n\[email protected](ClassType.CALLBACK)\nclass CyclesrTrainerCallback(Callback):\n \"\"\"A special callback for Trainer.\"\"\"\n\n disable_callbacks = [\"ModelStatistics\", \"MetricsEvaluator\", \"ModelCheckpoint\", \"PerformanceSaver\",\n \"LearningRateScheduler\", \"ProgressLogger\", \"ReportCallback\", \"ModelBuilder\"]\n\n def __init__(self):\n \"\"\"Initialize method.\"\"\"\n super(CyclesrTrainerCallback, self).__init__()\n\n def set_trainer(self, trainer):\n \"\"\"Set trainer object for current callback.\"\"\"\n self.trainer = trainer\n self.trainer._train_loop = self._train_loop\n self.cfg = self.trainer.config\n self._worker_id = self.trainer._worker_id\n self.worker_path = self.trainer.get_local_worker_path()\n self.output_path = self.trainer.local_output_path\n self.best_model_name = \"model_best\"\n self.best_model_file = FileOps.join_path(\n self.worker_path, \"model_{}.pth\".format(self.trainer.worker_id))\n\n def _init_dataloader(self, mode):\n \"\"\"Decode train dataset and validation dataset.\n\n :return: train dataset and validataion dataset\n :rtype: tuple of torch.utils.data.Dataset\n \"\"\"\n dataset = Dataset(mode=mode)\n if self.cfg.distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset, num_replicas=hvd.size(), rank=hvd.rank())\n dataset.sampler = sampler\n return dataset\n\n def _init_model(self):\n \"\"\"Initialize the model architecture for full train step.\n\n :return: train model\n :rtype: class\n \"\"\"\n logging.info('Initializing model')\n if self.cfg.model_desc:\n logging.debug(\"model_desc: {}\".format(self.cfg.model_desc))\n _file = FileOps.join_path(self.worker_path, \"model_desc_{}.json\".format(self._worker_id))\n with open(_file, \"w\") as f:\n json.dump(self.cfg.model_desc, f)\n if self.cfg.distributed:\n hvd.join()\n model_desc = self.cfg.model_desc\n net_desc = NetworkDesc(model_desc)\n model = net_desc.to_model()\n return model\n else:\n return None\n\n def batch_psnr(self, HR, SR):\n \"\"\"Calculate the mean psnr in a batch.\n\n :param HR: HR image\n :type HR: torch FloatTensor\n :param SR: SR image\n :type SR: torch FloatTensor\n :return: mean psnr in a batch\n :rtype: Float\n \"\"\"\n psnr = 20 * torch.log10(1 / torch.sqrt(torch.mean((HR - SR) ** 2, [1, 2, 3])))\n psnr = psnr.mean().item()\n return psnr\n\n def _train(self, trainloader, writer, epoch, model, print_freq=10):\n \"\"\"Train process.\n\n :param trainloader: train dataset\n :type trainloader: torch.utils.data.DataLoader\n :param writer: record enent files to log dir\n :type writer: tensorboardX.SummaryWriter\n :param epoch: current epoch\n :type epoch: int\n :param model: cyclesr model with train mode\n :type model: CycleSRModel class(nn.Module)\n :param print_freq: frequency of showing training results on console\n :type print_freq: int\n \"\"\"\n loss_sr = AverageMeter()\n loss_ga = AverageMeter()\n loss_cycA = AverageMeter()\n PSNRes = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n end = time.time()\n num_batches = len(trainloader)\n for batch_idx, data in enumerate(trainloader):\n model.set_mode('train')\n step = epoch * num_batches + batch_idx\n data_time.update(time.time() - end)\n #######################################################################\n model.optimize_CycleSR(data, epoch)\n\n # caclute psnr during training\n losses = model.get_current_losses()\n for name, loss in losses.items():\n writer.add_scalar(\"loss\" + name, loss, step) # store the loss in tensorboardX\n batchsize = data['X'].size(0)\n loss_sr.update(losses['SR'], batchsize)\n loss_ga.update(losses['G'], batchsize)\n loss_cycA.update(losses['rec_X'], batchsize)\n # logging.info(\"HR: {}. SR: {}\".format(model.HR.data))\n if epoch < 6:\n psnr = self.batch_psnr(model.HR.data, model.G_SR.data)\n else:\n psnr = self.batch_psnr(model.HR.data, model.SR.data)\n PSNRes.update(psnr, batchsize)\n writer.add_scalar(\"training_psnr\", psnr, step) # store the psnr\n\n batch_time.update(time.time() - end)\n # print result\n if (batch_idx + 1) % print_freq == 0:\n if not vega.is_gpu_device() or (vega.is_gpu_device() and self.trainer.is_chief):\n logging.info('[epoch {0},iter {1}/{2}]\\t'\n 'Time {batch_time.val:.3f}({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f}({data_time.avg:.3f})\\t'\n 'SR MSE {mse.val:.5f}({mse.avg:.5f})\\t'\n 'psnr {psnr.val:.3f}({psnr.avg:.3f})\\t'\n 'G_A {loss_ga.val:.5f}({loss_ga.avg:.5f})\\t'\n 'Cycle_A {loss_cycA.val:.5f}({loss_cycA.avg:.5f})'\n .format(epoch, batch_idx + 1, num_batches, batch_time=batch_time, data_time=data_time,\n mse=loss_sr, psnr=PSNRes, loss_ga=loss_ga, loss_cycA=loss_cycA))\n end = time.time()\n\n def getValImg(self, dataset, val_num=5):\n \"\"\"Get val_num images for showing outputs of cycleGAN during training.\n\n :param dataset: valid dataset\n :type dataset: torch.utils.data.Dataset\n :param val_num: number of selected images, defualt: 5\n :type val_num: int\n :return: list of selected valid images\n :rtype: list\n \"\"\"\n val_imgs = []\n for i in range(val_num):\n img = dataset[(i * (len(dataset) - 1)) // 5]\n img[\"X\"] = torch.unsqueeze(img['X'], 0)\n img['Y'] = torch.unsqueeze(img['Y'], 0)\n img['HR'] = torch.unsqueeze(img['HR'], 0)\n val_imgs.append(img)\n return val_imgs\n\n def _evalGAN(self, model, imgs, epoch, writer):\n \"\"\"Save images to event file.\n\n :param model: cyclesr model\n :type model: CycleSRModel class(nn.Module)\n :param imgs: list of selected valid images\n :type imgs: list\n :param epoch: current epoch\n :type epoch: int\n :param writer: record enent files to log dir\n :type writer: tensorboardX.SummaryWriter\n \"\"\"\n model.set_mode('eval')\n with torch.no_grad():\n for i, img in enumerate(imgs):\n if vega.is_npu_device():\n real_X = img['X'].npu()\n real_Y = img['Y'].npu()\n HR = img['HR'].npu()\n else:\n real_X = img['X'].cuda()\n real_Y = img['Y'].cuda()\n HR = img['HR'].cuda()\n fake_Y = model.netG(real_X) # G(X)\n rec_X = model.netF(fake_Y) # F(G(X))\n fake_X = model.netF(real_Y) # F(Y)\n rec_Y = model.netG(fake_X) # G(F(Y))\n\n G_SR = model.netSR(fake_Y) # SR(G(X))\n writer.add_image(\"G_SR\" + str(i), TensorNorm((G_SR[0])), epoch)\n writer.add_image(\"HR\" + str(i), TensorNorm((HR[0])), epoch)\n writer.add_image(\"Real_bicubic\" + str(i), TensorNorm((real_X[0])), epoch)\n writer.add_image(\"Fake_unknown\" + str(i), TensorNorm((fake_Y[0])), epoch)\n writer.add_image(\"Real_unknown\" + str(i), TensorNorm((real_Y[0])), epoch)\n writer.add_image(\"Fake_bicubic\" + str(i), TensorNorm((fake_X[0])), epoch)\n writer.add_image(\"Rec_bicubic\" + str(i), TensorNorm((rec_X[0])), epoch)\n writer.add_image(\"Rec_unknown\" + str(i), TensorNorm((rec_Y[0])), epoch)\n\n def _valid(self, model, val_dataloader, epoch, eval_epoch, writer, ps_offset=10, val_sr_num=20):\n \"\"\"Validate process of cyclesr.\n\n :param model: cyclesr model\n :type model: CycleSRModel class(nn.Module)\n :param val_dataloader: validate dataset\n :type val_dataloader: torch.utils.data.DataLoader\n :param epoch: current epoch\n :type epoch: int\n :param eval_epoch: frequency of evaluation\n :type eval_epoch: int\n :param writer: record enent files to log dir\n :type writer: tensorboardX.SummaryWriter\n :param ps_offset: pixel offset when calculating psnr during evaluation, default: 10\n :type ps_offset: int\n :param val_sr_num: number of selected images for testing sr model\n :type val_sr_num: int\n :return: mean psnr of whole validation images or None\n :rtype: int or None\n \"\"\"\n SRnet = model.netSR\n SRnet.eval()\n val_PSNR = []\n with torch.no_grad():\n for i, data in enumerate(val_dataloader):\n val_LR = data['Y']\n if \"HR\" in data.keys():\n HR = data['HR']\n else:\n HR = None\n if vega.is_npu_device():\n SR = SRnet(val_LR.npu())\n else:\n SR = SRnet(val_LR.cuda())\n SR = torch.clamp(SR, 0.0, 1.0)\n if i < val_sr_num:\n if i == 0:\n logging.info('Saving real LR test images to tensorboard......')\n writer.add_image(\"Val_SR\" + str(i), TensorNorm((SR)), epoch)\n if epoch == eval_epoch:\n writer.add_image('Val_LR' + str(i), TensorNorm((val_LR)), epoch)\n if HR is not None:\n writer.add_image('Val_HR' + str(i), TensorNorm((HR)), epoch)\n if i == val_sr_num - 1:\n logging.info('***** Save Done! *****')\n else:\n if HR is None:\n return None\n if vega.is_npu_device():\n val_PSNR.append(find_best_PSNR(HR.npu(), SR, ps_offset) if HR is not None else None)\n else:\n val_PSNR.append(find_best_PSNR(HR.cuda(), SR, ps_offset) if HR is not None else None)\n if all(val_PSNR):\n ave_PSNR = np.asarray(val_PSNR).mean()\n else:\n ave_PSNR = None\n return ave_PSNR\n\n def _train_loop(self):\n \"\"\"Whole train and validate process for the fully train cyclesr.\"\"\"\n self._init_report()\n if not vega.is_cpu_device():\n self.trainer._init_setting()\n self.model = self._init_model()\n if self.cfg.distributed:\n self._horovod_init_optimizer()\n self._init_horovod_setting()\n self.train_data = self._init_dataloader('train')\n self.valid_data = self._init_dataloader('test')\n train_dataloader = Adapter(self.train_data).loader\n valid_dataloader = Adapter(self.valid_data).loader\n\n writer = SummaryWriter(self.worker_path)\n\n start_time = time.time()\n train_time = 0\n best_psnr = -np.inf\n best_epoch = 0\n logging.info(\"==> Start training\")\n val_gan_imgs = self.getValImg(self.train_data, val_num=5)\n for epoch in range(self.cfg.epoch_count, self.cfg.n_epoch + self.cfg.n_epoch_decay + 1):\n self.model.update_learning_rate(\n epoch,\n self.cfg.model_desc.custom.cyc_lr,\n self.cfg.model_desc.custom.SR_lr,\n self.cfg.n_epoch,\n self.cfg.n_epoch_decay)\n start_train_time = time.time()\n self._train(train_dataloader, writer, epoch, self.model, print_freq=self.cfg.print_freq)\n train_time += round(time.time() - start_train_time)\n # validation\n ###############################################################################\n if epoch % self.cfg.eval_epoch == 0:\n logging.info(\"==> Validng\")\n self._evalGAN(self.model, val_gan_imgs, epoch, writer)\n val_ave_psnr = self._valid(self.model, valid_dataloader, epoch, self.cfg.eval_epoch, writer,\n self.cfg.val_ps_offset)\n if val_ave_psnr is not None:\n logging.info(\"==> Current ave psnr is {:.3f}\".format(val_ave_psnr))\n if val_ave_psnr > best_psnr:\n best_psnr = val_ave_psnr\n best_epoch = epoch\n logging.info(\n \"==> Best PSNR on val dataset {:.3f}, achieved at epoch {}\".format(best_psnr, best_epoch))\n self._save_checkpoint(epoch, best=True)\n self._update_report(epoch, {\"psnr\": val_ave_psnr})\n model_name = 'epoch' + str(epoch)\n logging.info(\"Saving checkpoints to {}\".format(model_name))\n self._save_checkpoint(epoch)\n elapsed = round(time.time() - start_time)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n train_time = str(datetime.timedelta(seconds=train_time))\n logging.info(\"Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.\".format(elapsed, train_time))\n\n def _save_checkpoint(self, epoch, best=False):\n \"\"\"Save model weights.\n\n :param epoch: current epoch\n :type epoch: int\n \"\"\"\n save_dir = os.path.join(self.worker_path, str(epoch))\n FileOps.make_dir(save_dir)\n for name in self.model.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = FileOps.join_path(save_dir, save_filename)\n net = getattr(self.model, 'net' + name)\n best_file = FileOps.join_path(\n self.worker_path,\n \"model_{}.pth\".format(name))\n if vega.is_gpu_device() and torch.cuda.is_available():\n # torch.save(net.module.cpu().state_dict(), save_path)\n torch.save(net.module.state_dict(), save_path)\n # net.cuda()\n if best:\n torch.save(net.module.state_dict(), best_file)\n elif vega.is_npu_device():\n torch.save(net.state_dict(), save_path)\n if best:\n torch.save(net.state_dict(), best_file)\n else:\n torch.save(net.cpu().state_dict(), save_path)\n if best:\n torch.save(net.cpu().state_dict(), best_file)\n\n def _init_horovod_setting(self):\n \"\"\"Init horovod setting.\"\"\"\n self.is_chief = True\n # SR\n hvd.broadcast_parameters(self.model.netSR.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(self.model.optimizer_SR, root_rank=0)\n # G F\n hvd.broadcast_parameters(self.model.netG.state_dict(), root_rank=0)\n hvd.broadcast_parameters(self.model.netF.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(self.model.optimizer_G, root_rank=0)\n # D_X\n hvd.broadcast_parameters(self.model.netD_X.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(self.model.optimizer_D_X, root_rank=0)\n # D_Y\n hvd.broadcast_parameters(self.model.netD_Y.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(self.model.optimizer_D_Y, root_rank=0)\n if hvd.rank() != 0:\n self.is_chief = False\n else:\n self.is_chief = True\n\n def _horovod_init_optimizer(self):\n # SR optimizer\n self.model.optimizer_SR = hvd.DistributedOptimizer(\n self.model.optimizer_SR,\n named_parameters=self.model.netSR.named_parameters(),\n compression=hvd.Compression.none\n )\n # G optimizer\n self.model.optimizer_G = hvd.DistributedOptimizer(\n self.model.optimizer_G,\n named_parameters=itertools.chain(self.model.netG.named_parameters(), self.model.netF.named_parameters()),\n compression=hvd.Compression.none\n )\n # D_X optimizer\n self.model.optimizer_D_X = hvd.DistributedOptimizer(\n self.model.optimizer_D_X,\n named_parameters=self.model.netD_X.named_parameters(),\n compression=hvd.Compression.none\n )\n # D_Y optimizer\n self.model.optimizer_D_Y = hvd.DistributedOptimizer(\n self.model.optimizer_D_Y,\n named_parameters=self.model.netD_Y.named_parameters(),\n compression=hvd.Compression.none\n )\n\n def _init_report(self):\n record = ReportClient().update(\n worker_id=self.trainer.worker_id,\n desc=self.cfg.model_desc,\n step_name=self.trainer.step_name,\n weights_file=self.best_model_file)\n logging.debug(\"update record=%s\", str(record))\n\n def _update_report(self, epoch, performance):\n record = ReportClient().update(\n self.trainer.step_name,\n self.trainer.worker_id,\n performance=performance)\n logging.debug(\"report_callback record: {}\".format(record))\n", "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is a class for ImageTransform.\"\"\"\nimport numpy as np\nimport mmcv\nfrom vega.common import ClassFactory, ClassType\n\n\[email protected](ClassType.TRANSFORM)\nclass ImageTransform(object):\n \"\"\"Image transform method, which contains.\n\n 1. rescale the image to expected size\n 2. normalize the image\n 3. flip the image (if needed)\n 4. pad the image (if needed)\n 5. transpose to (c, h, w)\n :param mean: the mean value to normalized , defaults to (0, 0, 0)\n :type mean: tuple, optional\n :param std: the std value to normalized, defaults to (1, 1, 1)\n :type std: tuple, optional\n :param to_rgb: whether the mode of the image is rgb or not, defaults to True\n :type to_rgb: bool, optional\n :param size_divisor: pad shape, defaults to None\n :type size_divisor: int, optional\n \"\"\"\n\n def __init__(self,\n mean=(0, 0, 0),\n std=(1, 1, 1),\n to_rgb=True,\n size_divisor=None):\n\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_rgb = to_rgb\n self.size_divisor = size_divisor\n\n def __call__(self, img, scale, flip=False, keep_ratio=True):\n \"\"\"Call function of ImageTransform.\n\n :param img: input image\n :type img: numpy or tensor\n :param scale: a random scaler\n :type scale: float\n :param flip: wheather flip or not, defaults to False\n :type flip: bool, optional\n :param keep_ratio: whether to keep the aspect ratio or not, defaults to True\n :type keep_ratio: bool, optional\n :return: the image after transform and other paras\n :rtype: list\n \"\"\"\n if keep_ratio:\n img, scale_factor = mmcv.imrescale(img, scale, return_scale=True)\n else:\n img, w_scale, h_scale = mmcv.imresize(\n img, scale, return_scale=True)\n scale_factor = np.array(\n [w_scale, h_scale, w_scale, h_scale], dtype=np.float32)\n img_shape = img.shape\n img = mmcv.imnormalize(img, self.mean, self.std, self.to_rgb)\n if flip:\n img = mmcv.imflip(img)\n if self.size_divisor is not None:\n img = mmcv.impad_to_multiple(img, self.size_divisor)\n pad_shape = img.shape\n else:\n pad_shape = img_shape\n img = img.transpose(2, 0, 1)\n return img, img_shape, pad_shape, scale_factor\n", "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is a class for Cutout.\"\"\"\nimport numpy as np\nimport torch\nfrom .ops import int_parameter\nfrom vega.common import ClassFactory, ClassType\n\n\[email protected](ClassType.TRANSFORM)\nclass Cutout(object):\n \"\"\"Cutout for an image.\"\"\"\n\n def __init__(self, length):\n \"\"\"Construct the Cutout class.\"\"\"\n self.length = int_parameter(length, 20)\n\n def __call__(self, img):\n \"\"\"Cutout for an image.\n\n :param img: An image\n :type img: Tensor\n \"\"\"\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n", "# -*- coding: utf-8 -*-\r\n\r\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the MIT License.\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# MIT License for more details.\r\n\r\n\"\"\"This is a base class of the dataset.\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport vega\r\nfrom vega.common.general import General\r\n\r\n\r\nclass TfAdapter(object):\r\n \"\"\"This is the base class of the dataset, which is a subclass of `TaskOps`.\r\n\r\n The Dataset provide several basic attribute like dataloader, transform and sampler.\r\n \"\"\"\r\n\r\n dtype_map = {\"torch.float32\": tf.float32,\r\n \"float32\": tf.float32,\r\n \"torch.float16\": tf.float32,\r\n \"float16\": tf.float32,\r\n \"float64\": tf.double,\r\n \"torch.int32\": tf.int32,\r\n \"int32\": tf.int32,\r\n \"torch.int64\": tf.int64,\r\n \"int64\": tf.int64,\r\n \"int\": tf.int64}\r\n\r\n def __init__(self, dataset):\r\n self.dataset = dataset\r\n self.args = dataset.args\r\n self._num_examples = len(self.dataset) if hasattr(self.dataset, \"__len__\") else self.args.get('num_images')\r\n self.data_index = list(range(self._num_examples))\r\n if self.args.get('train_portion', 1.0) < 1:\r\n split = int(self.args.train_portion * self._num_examples)\r\n if self.dataset.mode == 'train':\r\n self.data_index = self.data_index[:split]\r\n self._num_examples = split\r\n elif self.dataset.mode == 'val':\r\n self.data_index = self.data_index[split:]\r\n self._num_examples = self._num_examples - split\r\n self.repeat_ratio = self.args.get('repeat_ratio', 1.)\r\n self.is_detection = self.args.get(\"is_detection\", False)\r\n self.is_spatiotemporal = self.args.get('is_spatiotemporal')\r\n\r\n def _get_dataset_info(self):\r\n \"\"\"Get the data shape.\"\"\"\r\n if self.is_detection:\r\n return\r\n item = self.dataset[0]\r\n if self.is_spatiotemporal:\r\n self.feature_shape = [v.shape if v is not None else v for v in item]\r\n if isinstance(item, (list, tuple)):\r\n self.image_pos, self.label_pos = 0, 1\r\n elif isinstance(item, dict):\r\n keys = list(item.keys())\r\n self.image_pos, self.label_pos = keys[0], keys[1]\r\n else:\r\n raise ValueError\r\n image = item[self.image_pos]\r\n label = item[self.label_pos]\r\n self.fixed_size = self.args.get(\"fixed_size\", True)\r\n self.data_format = General.data_format\r\n self.image_shape = list(image.shape)\r\n try:\r\n self.label_shape = list(label.shape)\r\n except Exception:\r\n self.label_shape = 1\r\n\r\n try:\r\n self.image_dtype = str(image.dtype)\r\n except Exception:\r\n pass\r\n try:\r\n self.label_dtype = str(label.dtype)\r\n except Exception:\r\n self.label_dtype = \"int\"\r\n\r\n self.image_dtype_tf = self.dtype_map[self.image_dtype]\r\n self.label_dtype_tf = self.dtype_map[self.label_dtype]\r\n\r\n def _get_item(self, images_index, label_index):\r\n \"\"\"Get one item of the dataset.\"\"\"\r\n item = self.dataset[images_index]\r\n if self.is_spatiotemporal:\r\n return item[0], item[1], item[2], item[3], item[4], item[5]\r\n if not self.is_detection:\r\n image = item[self.image_pos]\r\n label = item[self.label_pos]\r\n return image, label\r\n else:\r\n image = item[0]\r\n img_meta = image.get(\"img_meta\")\r\n return image.get(\"img\"), image.get(\"gt_bboxes\"), image.get(\"gt_bboxes_ignore\"), \\\r\n image.get(\"gt_labels_ignore\"), image.get(\"gt_labels\"), \\\r\n img_meta.get(\"ori_shape\"), img_meta.get(\"img_shape\"), \\\r\n img_meta.get(\"pad_shape\"), img_meta.get(\"scale_factor\"), \\\r\n img_meta.get(\"flip\"), item[1]\r\n\r\n def _resize_image_label(self, image, label):\r\n \"\"\"Resize the image and label.\"\"\"\r\n if len(self.image_shape) == 3:\r\n img_channel = self.image_shape[0]\r\n image.set_shape([img_channel, None, None])\r\n elif len(self.image_shape) == 2:\r\n img_channel = 1\r\n image.set_shape([img_channel, None, None])\r\n else:\r\n image.set_shape(self.image_shape)\r\n\r\n if self.label_shape == 1:\r\n label.set_shape(self.label_shape)\r\n elif len(self.label_shape) == 3:\r\n label_channel = self.label_shape[0]\r\n label.set_shape([label_channel, None, None])\r\n else:\r\n label_channel = 1\r\n label.set_shape([label_channel, None, None])\r\n\r\n return image, label\r\n\r\n def data_map_func(self, images_index, label_index):\r\n \"\"\"Apply data map function from raw data.\"\"\"\r\n if self.is_spatiotemporal:\r\n feature, spatial_mx, temporal_mx, mean, std, label = tf.numpy_function(\r\n self._get_item, [images_index, label_index],\r\n [tf.float64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64])\r\n feature.set_shape(self.feature_shape[0])\r\n spatial_mx.set_shape(self.feature_shape[1])\r\n temporal_mx.set_shape(self.feature_shape[2])\r\n label.set_shape(self.feature_shape[-1])\r\n return (feature, spatial_mx, temporal_mx), (mean, std, label)\r\n if not self.is_detection:\r\n image, label = tf.numpy_function(self._get_item,\r\n [images_index, label_index],\r\n [self.image_dtype_tf, self.label_dtype_tf])\r\n if self.fixed_size:\r\n image.set_shape(self.image_shape)\r\n label.set_shape(self.label_shape)\r\n else:\r\n image, label = self._resize_image_label(image, label)\r\n\r\n try:\r\n label = tf.squeeze(label)\r\n except Exception:\r\n pass\r\n if self.label_dtype == \"int\":\r\n label = tf.cast(label, tf.int32)\r\n if self.data_format == \"channels_last\":\r\n try:\r\n image = tf.transpose(image, [1, 2, 0])\r\n label = tf.transpose(label, [1, 2, 0])\r\n except Exception:\r\n pass\r\n else:\r\n img, gt_bboxes, gt_bboxes_ignore, gt_labels_ignore, gt_labels, \\\r\n ori_shape, img_shape, pad_shape, scale_factor, flip, target = tf.numpy_function(\r\n self._get_item,\r\n [images_index, label_index],\r\n [tf.float32, tf.float32, tf.float32, tf.float32, tf.int64,\r\n tf.int64, tf.int64, tf.int64, tf.float64, tf.bool, tf.int64])\r\n image = dict()\r\n img_meta = dict()\r\n img_meta[\"ori_shape\"] = ori_shape\r\n img_meta[\"img_shape\"] = img_shape\r\n img_meta[\"pad_shape\"] = pad_shape\r\n img_meta[\"scale_factor\"] = scale_factor\r\n img_meta[\"flip\"] = flip\r\n image[\"img\"] = img\r\n image[\"gt_bboxes\"] = gt_bboxes\r\n image[\"gt_bboxes_ignore\"] = gt_bboxes_ignore\r\n image[\"gt_labels\"] = gt_labels\r\n image[\"gt_labels_ignore\"] = gt_labels_ignore\r\n image[\"img_meta\"] = img_meta\r\n label = target\r\n\r\n return image, label\r\n\r\n def __len__(self):\r\n \"\"\"Return dataset length of train or valid.\"\"\"\r\n if self.dataset.mode == 'train':\r\n len = self._num_examples // self.args.batch_size\r\n if self.dataset.world_size > 1:\r\n len = len // self.dataset.world_size\r\n len = int(len * self.repeat_ratio)\r\n else:\r\n len = self._num_examples // self.args.batch_size\r\n return len\r\n\r\n def input_fn(self):\r\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\r\n if hasattr(self.dataset, \"input_fn\"):\r\n return self.dataset.input_fn()\r\n self._get_dataset_info()\r\n dataset = tf.data.Dataset.from_tensor_slices(\r\n (self.data_index, self.data_index))\r\n if self.dataset.mode == 'train' and self.dataset.world_size > 1:\r\n dataset = dataset.shard(self.dataset.world_size, self.dataset.rank)\r\n if self.dataset.mode == 'train':\r\n dataset = dataset.repeat()\r\n if self.args.shuffle:\r\n dataset = dataset.shuffle(buffer_size=self._num_examples)\r\n\r\n if vega.is_npu_device():\r\n # esr cannot adapt to num_parallel_calls on NPU\r\n dataset = dataset.map(self.data_map_func)\r\n dataset = dataset.batch(\r\n batch_size=self.args.batch_size, drop_remainder=self.args.drop_last)\r\n else:\r\n dataset = dataset.map(self.data_map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)\r\n dataset = dataset.batch(\r\n batch_size=self.args.batch_size, drop_remainder=self.args.drop_last)\r\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\r\n return dataset\r\n\r\n @property\r\n def loader(self):\r\n \"\"\"Dataloader arrtribute which is a unified interface to generate the data.\r\n\r\n :return: a batch data\r\n :rtype: dict, list, optional\r\n \"\"\"\r\n return self\r\n", "# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"LogicalGraph for NAGO.\"\"\"\nimport time\nimport collections\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import List\nimport networkx as nx\n\n\nNode = collections.namedtuple('Node', ['id', 'inputs', 'type'])\n\n\ndef _has_all_keys(op_dict, all_ops):\n \"\"\"Check all keys.\"\"\"\n return all([k in op_dict.keys() for k in all_ops]) and len(all_ops) == len(op_dict)\n\n\ndef get_graph_info(graph):\n \"\"\"Label and sort nodes in a graph.\"\"\"\n input_nodes = []\n output_nodes = []\n Nodes = []\n n_nodes = graph.number_of_nodes()\n if n_nodes == 1:\n node = Node(0, [], -1)\n return [node], [0], []\n for node in range(n_nodes):\n tmp = list(graph.neighbors(node))\n tmp.sort()\n type = -1\n if node < tmp[0]:\n input_nodes.append(node)\n type = 0\n if node > tmp[-1]:\n output_nodes.append(node)\n type = 1\n Nodes.append(Node(node, [n for n in tmp if n < node], type))\n return Nodes, input_nodes, output_nodes\n\n\ndef build_graph(graphparam, seed):\n \"\"\"Build a graph using network x based on graphparamters.\"\"\"\n graph_model_name = graphparam[0]\n if graph_model_name == 'ER':\n graph_model, nodes, P = graphparam\n return nx.random_graphs.erdos_renyi_graph(int(nodes), P, seed)\n elif graph_model_name == 'BA':\n graph_model, nodes, M = graphparam\n return nx.random_graphs.barabasi_albert_graph(int(nodes), int(M), seed)\n elif graph_model_name == 'WS':\n graph_model, nodes, P, K = graphparam\n return nx.random_graphs.connected_watts_strogatz_graph(int(nodes), int(K), P, tries=200, seed=seed)\n\n\ndef sample_merging_strategy(inputs, merge_distribution, role):\n \"\"\"Sample merging options from a categorical distribution.\"\"\"\n if role == NodeRoles.INPUT or len(inputs) == 1:\n return EdgeMerge.SINGLE\n return np.random.choice(EdgeMerge.merging_options, p=merge_distribution)\n\n\ndef compute_input_planes(input_channels, merging_strategy, inputs, abs_nodes):\n \"\"\"Compute the number of input planes.\"\"\"\n if len(inputs) == 0: # this is an input node\n return input_channels\n inplanes = 0\n for i in inputs:\n if merging_strategy == EdgeMerge.CAT:\n inplanes += abs_nodes[i].outplanes\n else: # Residual or Attention\n # this assumes that later stages are what determine the actual number of planes\n inplanes = abs_nodes[i].outplanes\n return inplanes\n\n\ndef get_stage_list(n_stages, stage_ratios):\n \"\"\"Get stage label.\"\"\"\n if n_stages == 3:\n return [0, 1, 2]\n stage_values = []\n stage = 0\n threshold = stage_ratios[stage] * n_stages\n for x in range(n_stages):\n if x >= threshold:\n stage += 1\n threshold += stage_ratios[stage] * n_stages\n stage_values.append(stage)\n return stage_values\n\n\nclass EdgeMerge:\n \"\"\"Class Edge merge.\"\"\"\n\n SUM = \"SUM\"\n CAT = \"CAT\"\n ATTENTION = \"ATTENTION\"\n SINGLE = \"SINGLE\"\n merging_options = [SUM, CAT, ATTENTION]\n\n\nclass Ops:\n \"\"\"Class Ops.\"\"\"\n\n POOL3 = \"pool3x3\"\n POOL5 = \"pool5x5\"\n C13 = \"1x3\"\n C31 = \"3x1\"\n C1 = \"1x1\"\n C5 = \"5x5\"\n C3 = \"3x3\"\n pooling_ops = [POOL3, POOL5]\n conv_ops = [C3, C5, C1, C31, C13]\n all_ops = conv_ops + pooling_ops\n ops_to_num_params = {C3: 9, C5: 25, C31: 3, C13: 3, C1: 1, POOL3: 0, POOL5: 0}\n ops_to_kernel_size = {C3: (3, 3), C5: (5, 5), C31: (3, 1), C13: (1, 3), C1: (1, 1), POOL3: (3, 3), POOL5: (5, 5)}\n assert _has_all_keys(ops_to_num_params, all_ops), \"Ops must match\"\n assert _has_all_keys(ops_to_kernel_size, all_ops), \"Ops must match\"\n\n\n@dataclass\nclass GeneratorSolution:\n \"\"\"Specify all the hyperparameters needed to define the generator.\"\"\"\n\n master_params: List[float]\n cell_params: List[float]\n op_graph_params: List[float]\n stage_ratios: List[float]\n channel_ratios: List[int]\n op_dist: List[float]\n conv_type: str\n master_merge_dist: List[float]\n cell_merge_dist: List[float]\n op_merge_dist: List[float]\n\n\nclass NodeRoles:\n \"\"\"Class node roles.\"\"\"\n\n INPUT = \"INPUT\"\n OUTPUT = \"OUTPUT\"\n TRANSIT = \"TRANSIT\"\n MASTER = \"MASTER\"\n\n\nclass BasicNode:\n \"\"\"Define a basic graph in our generator.\"\"\"\n\n tollerance = 30\n\n def __init__(self, node_inputs, role, merging_strategy, inplanes, outplanes, incoming_planes):\n \"\"\"Initialize BasicNode.\"\"\"\n self.inputs = node_inputs\n self.role = role\n self.merging_strategy = merging_strategy\n self.inplanes = inplanes\n self.outplanes = outplanes\n self.incoming_planes = incoming_planes\n\n def _get_role_for_node(self, i):\n role = NodeRoles.TRANSIT\n if i in self.input_nodes:\n role = NodeRoles.INPUT\n elif i in self.output_nodes:\n role = NodeRoles.OUTPUT\n return role\n\n def _init_graph(self, graphparams):\n init_seed = 0\n for x in range(BasicNode.tollerance):\n graph = build_graph(graphparams, int(x + init_seed))\n self.graph = graph.copy()\n if nx.is_connected(self.graph) is not True:\n continue\n try:\n self.nodes, self.input_nodes, self.output_nodes = get_graph_info(self.graph)\n break\n except Exception:\n continue\n return graph\n\n\nclass LogicalOperation(BasicNode):\n \"\"\"Each operation node in the bottom-level graph.\"\"\"\n\n def __init__(self, op_type, conv_type, node_inputs, role, merging_strategy, inplanes, outplanes, incoming_planes):\n \"\"\"Initialize LogicalOperation.\"\"\"\n super().__init__(node_inputs, role, merging_strategy, inplanes, outplanes, incoming_planes)\n self.type = op_type\n self.conv_type = conv_type\n if op_type in Ops.pooling_ops:\n self.outplanes = self.inplanes\n\n def __repr__(self):\n \"\"\"Overide __repr__.\"\"\"\n return \"%s, inputs: %s\\t%s {%s:%d,%d}\" % (\n self.type, self.inputs, self.role, self.merging_strategy[:2], self.inplanes, self.outplanes)\n\n def _get_param_count(self):\n if self.conv_type == 'depthwise_separable':\n if Ops.ops_to_num_params[self.type] == 0:\n param_count = 0\n else:\n param_count = Ops.ops_to_num_params[self.type] * self.inplanes + 1 * 1 * self.inplanes * self.outplanes\n return param_count\n\n else:\n return Ops.ops_to_num_params[self.type] * self.inplanes * self.outplanes\n\n\nclass LogicalOpGraph(BasicNode):\n \"\"\"\n Define the logic bottom-level graph and define the details.\n\n (operation type, inputs, node type, input merging option,\n input/output channel number and parameter count) for in each operation node.\n \"\"\"\n\n def __init__(self, role, inputs, merging_strategy, solution: GeneratorSolution, inplanes, outplanes,\n incoming_planes):\n \"\"\"Initialize LogicalOpGraph.\"\"\"\n super().__init__(inputs, role, merging_strategy, inplanes, outplanes, incoming_planes)\n self.depth = \"\\t\\t\"\n self.bottomlvl_graph = self._init_graph(solution.op_graph_params)\n self._init_nodes(solution)\n\n def _init_nodes(self, solution: GeneratorSolution):\n self.child_nodes = []\n for i, node in enumerate(self.nodes):\n role, merging_strategy, inplanes, outplanes = self._get_node_details(i, node, solution.op_merge_dist)\n if role == NodeRoles.OUTPUT:\n p = solution.op_dist[:-len(Ops.pooling_ops)]\n op = np.random.choice(Ops.conv_ops, p=p / p.sum())\n else:\n op = np.random.choice(Ops.all_ops, p=solution.op_dist)\n incoming_planes = [self.child_nodes[i].outplanes for i in node.inputs]\n self.child_nodes.append(LogicalOperation(\n op, solution.conv_type, node.inputs, role,\n merging_strategy, inplanes, outplanes, incoming_planes))\n\n def _get_node_details(self, i, node, merge_dist):\n role = self._get_role_for_node(i)\n merging_strategy = sample_merging_strategy(node.inputs, merge_dist, role)\n outplanes = self.outplanes\n inplanes = compute_input_planes(self.inplanes, merging_strategy, node.inputs, self.child_nodes)\n return role, merging_strategy, inplanes, outplanes\n\n def _get_param_count(self):\n cost = [node._get_param_count() for node in self.child_nodes]\n return sum(cost)\n\n def _get_merging_cost(self):\n fixed_costs = 0\n for node in self.child_nodes:\n inplanes = node.inplanes\n for i in node.inputs:\n incoming_outplanes = self.child_nodes[i].outplanes\n if (node.merging_strategy in [EdgeMerge.ATTENTION, EdgeMerge.SUM]) and inplanes != incoming_outplanes:\n fixed_costs += inplanes * incoming_outplanes\n return fixed_costs\n\n def __repr__(self):\n \"\"\"Implement __repr__.\"\"\"\n self_rep = []\n for i, node in enumerate(self.child_nodes):\n self_rep.append(\"%s%d: %s\" % (self.depth, i, node))\n return \"{\" + \"\\n \".join(self_rep) + \"} [%s] - %s\" % (self.inputs, self.role)\n\n\nclass LogicalCellGraph(LogicalOpGraph):\n \"\"\"Define the logic midlle-level graph.\"\"\"\n\n def __init__(self, stage: int, role: NodeRoles, inputs: List[int], merging_strategy: str,\n solution: GeneratorSolution, inplanes: int, outplanes: int, incoming_planes: List[int]):\n \"\"\"Initialize LogicalCellGraph.\"\"\"\n BasicNode.__init__(self, inputs, role, merging_strategy, inplanes, outplanes, incoming_planes)\n self.depth = \"\\t\"\n self.stage = stage\n self.midlvl_graph = self._init_graph(solution.cell_params)\n self._init_nodes(solution)\n\n def _get_merging_cost(self):\n cost = [node._get_merging_cost() for node in self.child_nodes]\n return sum(cost)\n\n def _init_nodes(self, solution: GeneratorSolution):\n self.child_nodes = []\n for i, node in enumerate(self.nodes):\n role = self._get_role_for_node(i)\n merging_strategy = sample_merging_strategy(node.inputs, solution.cell_merge_dist, role)\n outplanes = self.outplanes\n inplanes = compute_input_planes(self.inplanes, merging_strategy, node.inputs, self.child_nodes)\n incoming_planes = [self.child_nodes[i].outplanes for i in node.inputs]\n self.child_nodes.append(\n LogicalOpGraph(role, node.inputs, merging_strategy, solution, inplanes, outplanes, incoming_planes))\n\n\nclass LogicalMasterGraph(LogicalOpGraph):\n \"\"\"Define the logic top-level graph (i.e. the architecture) from the GeneratorSolution hyperparameters.\"\"\"\n\n def __init__(self, solution: GeneratorSolution):\n \"\"\"Initialize LogicalMasterGraph.\"\"\"\n assert len(solution.stage_ratios) == len(solution.channel_ratios), \"Ratios should have same length\"\n self.child_nodes = []\n self.depth = \"\"\n self.inputs = []\n self.inplanes = solution.channel_ratios[0]\n self.role = NodeRoles.MASTER\n self.toplvl_graph = self._init_graph(solution.master_params)\n self._init_nodes(solution)\n\n def _get_merging_cost(self): # TODO fix this, it's unprecise\n cost = [node._get_merging_cost() for node in self.child_nodes]\n return sum(cost)\n\n def _init_nodes(self, solution: GeneratorSolution):\n self.child_nodes = []\n n_nodes = len(self.nodes)\n stage_values = get_stage_list(n_nodes, solution.stage_ratios)\n for i, node in enumerate(self.nodes):\n role = self._get_role_for_node(i)\n merging_strategy = sample_merging_strategy(node.inputs, solution.master_merge_dist, role)\n stage = stage_values[i]\n outplanes = solution.channel_ratios[stage]\n inplanes = compute_input_planes(self.inplanes, merging_strategy, node.inputs, self.child_nodes)\n incoming_planes = [self.child_nodes[i].outplanes for i in node.inputs]\n self.child_nodes.append(\n LogicalCellGraph(stage, role, node.inputs, merging_strategy, solution, inplanes, outplanes,\n incoming_planes))\n\n def _get_graphs(self):\n return [self.toplvl_graph, self.child_nodes[0].midlvl_graph, self.child_nodes[0].child_nodes[0].bottomlvl_graph]\n", "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"TIMM method trainer.\"\"\"\n\nimport os\nimport importlib\nimport torch\nfrom timm import create_model\nfrom timm.optim.optim_factory import create_optimizer, add_weight_decay\nfrom timm.scheduler import create_scheduler\nfrom timm.data import Dataset, create_transform\nfrom timm.utils import ModelEma\nfrom timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom timm.data.loader import fast_collate, PrefetchLoader\nfrom timm.data.distributed_sampler import OrderedDistributedSampler\ntry:\n import apex\n from apex import amp\nexcept Exception:\n pass\nimport horovod.torch as hvd\nimport vega\nfrom vega.common import Config\nfrom vega.common import ClassFactory, ClassType\nfrom vega.common import FileOps\nfrom vega.trainer.callbacks import Callback\n\n\ndef create_loader(\n dataset,\n input_size,\n batch_size,\n is_training=False,\n use_prefetcher=True,\n rand_erase_prob=0.,\n rand_erase_mode='const',\n rand_erase_count=1,\n color_jitter=0.4,\n auto_augment=None,\n interpolation='bilinear',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n num_workers=1,\n distributed=False,\n crop_pct=None,\n collate_fn=None,\n fp16=False,\n tf_preprocessing=False,\n world_size=None,\n rank=None\n):\n \"\"\"Create data loader for timm.\"\"\"\n dataset.transform = create_transform(\n input_size,\n is_training=is_training,\n use_prefetcher=use_prefetcher,\n color_jitter=color_jitter,\n auto_augment=auto_augment,\n interpolation=interpolation,\n mean=mean,\n std=std,\n crop_pct=crop_pct,\n tf_preprocessing=tf_preprocessing,\n )\n\n sampler = None\n if distributed:\n if is_training:\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset, num_replicas=world_size, rank=rank)\n else:\n # This will add extra duplicate entries to result in equal num\n # of samples per-process, will slightly alter validation results\n sampler = OrderedDistributedSampler(dataset, num_replicas=world_size, rank=rank)\n\n if collate_fn is None:\n collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate\n\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=sampler is None and is_training,\n num_workers=num_workers,\n sampler=sampler,\n collate_fn=collate_fn,\n drop_last=is_training,\n )\n if use_prefetcher:\n loader = PrefetchLoader(\n loader,\n re_prob=rand_erase_prob if is_training else 0.,\n re_mode=rand_erase_mode,\n re_count=rand_erase_count,\n mean=mean,\n std=std,\n fp16=fp16)\n\n return loader\n\n\[email protected](ClassType.CALLBACK)\nclass TimmTrainerCallback(Callback):\n \"\"\"A special callback for TimmTrainer.\"\"\"\n\n disable_callbacks = [\"LearningRateScheduler\", \"ModelStatistics\", \"ModelBuilder\"]\n\n def before_train(self, logs=None):\n \"\"\"Be called before the training process.\"\"\"\n self._init_all_settings()\n\n def before_epoch(self, epoch, logs=None):\n \"\"\"Be called before each epoch.\"\"\"\n if self.distributed:\n self.trainer.train_loader.sampler.set_epoch(epoch)\n self.num_updates = epoch * len(self.trainer.train_loader)\n self.epoch = epoch\n self.trainer.model.train()\n\n def make_batch(self, batch):\n \"\"\"Prepare batch data for train_step.\"\"\"\n input, target = batch\n if not self.config.prefetcher:\n if vega.is_gpu_device():\n input, target = input.cuda(), target.cuda()\n elif vega.is_npu_device():\n input, target = input.npu(), target.npu()\n return input, target\n\n def train_step(self, batch):\n \"\"\"Train one step of model.\"\"\"\n input, target = batch\n self.trainer.optimizer.zero_grad()\n logits = self.trainer.model(input)\n loss = self.trainer.loss(logits, target)\n if self.use_amp:\n with amp.scale_loss(loss, self.trainer.optimizer) as scaled_loss:\n scaled_loss.backward()\n self.trainer.optimizer.synchronize()\n with self.trainer.optimizer.skip_synchronize():\n self.trainer.optimizer.step()\n else:\n loss.backward()\n self.trainer.optimizer.step()\n if self.use_ema:\n self.model_ema.update(self.trainer.model)\n self.num_updates += 1\n self.trainer.lr_scheduler.step_update(num_updates=self.num_updates)\n return {'loss': loss.item(),\n 'train_batch_output': logits,\n 'lr': self.trainer.lr_scheduler.get_epoch_values(self.epoch)}\n\n def before_valid(self, epoch, logs=None):\n \"\"\"Be called before valid loop.\"\"\"\n if self.use_ema:\n self.trainer.model = self.model_ema.ema\n self.trainer.model.eval()\n\n def after_epoch(self, epoch, logs=None):\n \"\"\"Be called after each epoch.\"\"\"\n if self.use_ema:\n self.trainer.model = self.model\n self.trainer.lr_scheduler.step(epoch=epoch + 1)\n if self.trainer.is_chief:\n self.trainer._backup()\n\n def _init_all_settings(self): # noqa: C901\n \"\"\"Init all settings from config.\"\"\"\n self.config = self.trainer.config\n if self.trainer.hps and self.trainer.hps.get('trainer'):\n self.config.from_dict(self.trainer.hps.get('trainer'))\n self.trainer._init_distributed_setting()\n if not vega.is_cpu_device():\n self.trainer._init_setting()\n self.epochs = self.trainer.epochs\n self.distributed = self.trainer.distributed\n self.trainer.model = self._init_model()\n self.model = self.trainer.model\n self.use_syncbn = self.config.syncbn\n self.trainer.use_syncbn = self.use_syncbn\n if self.use_syncbn:\n self.trainer.model = apex.parallel.convert_syncbn_model(self.trainer.model)\n self.trainer.optimizer = self._init_optimizer()\n self.use_ema = hasattr(self.config, 'model_ema')\n if self.use_ema:\n self.model_ema = self._init_model_ema()\n self.trainer.lr_scheduler = self._init_lr_scheduler()\n self.trainer.loss = self._init_loss()\n if self.distributed:\n self.trainer._init_horovod_setting()\n self.use_amp = self.config.amp\n if self.use_amp:\n self.trainer.model, self.trainer.optimizer = amp.initialize(self.trainer.model,\n self.trainer.optimizer,\n opt_level='O1')\n self._init_dataloader()\n self.trainer.valid_metrics = self.trainer._init_metrics(None)\n self.trainer.callbacks._set_params(self.trainer)\n\n # self.trainer.has_built = True\n\n def _init_model_ema(self):\n \"\"\"Init Model Ema.\"\"\"\n args = self.config.model_ema\n model_ema = ModelEma(self.trainer.model,\n decay=args.model_ema_decay,\n device='cpu' if args.model_ema_force_cpu else '',\n resume=None)\n return model_ema\n\n def _init_model(self):\n \"\"\"Init network model from timm according to model type in config.\"\"\"\n args = self.config.model_desc\n model = create_model(args.model_name,\n pretrained=args.pretrained,\n num_classes=args.num_classes,\n drop_rate=args.drop,\n drop_path_rate=args.drop_path,\n global_pool=args.gp,\n bn_tf=args.bn_tf,\n bn_momentum=args.bn_momentum,\n bn_eps=args.bn_eps,\n checkpoint_path=args.initial_checkpoint)\n if vega.is_gpu_device():\n model = model.cuda()\n elif vega.is_npu_device():\n model = model.npu()\n return model\n\n def _init_optimizer(self):\n \"\"\"Init optimizer from timm according to optim type in config.\"\"\"\n optimizer = create_optimizer(self.config.optimizer().to_dict()[\"params\"], self.trainer.model)\n if self.distributed:\n optimizer = hvd.DistributedOptimizer(optimizer,\n named_parameters=self.trainer.model.named_parameters(),\n compression=hvd.Compression.none)\n return optimizer\n\n def _init_lr_scheduler(self):\n \"\"\"Init lr scheduler from timm according to type in config.\"\"\"\n args = self.config.lr_scheduler().to_dict()[\"params\"]\n args['epochs'] = self.config.epochs\n lr_scheduler, self.config.epochs = create_scheduler(Config(args), self.trainer.optimizer)\n start_epoch = args.get('start_epoch', 0)\n lr_scheduler.step(start_epoch)\n return lr_scheduler\n\n def _init_loss(self):\n \"\"\"Init loss function from timm according to type in config.\"\"\"\n loss_name = self.config.loss.type\n loss_config = self.config.loss().to_dict()[\"params\"]\n loss_class = getattr(importlib.import_module('timm.loss'), loss_name)\n loss_fn = loss_class(**loss_config)\n if vega.is_gpu_device():\n loss_fn = loss_fn.cuda()\n elif vega.is_npu_device():\n loss_fn = loss_fn.npu()\n return loss_fn\n\n def _reset_sync_opt(self):\n \"\"\"Rest sysnc opt.\"\"\"\n params = add_weight_decay(self.model, self.config.optimizer.weight_decay)\n self.optimizer.param_groups = []\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{'params': param_groups}]\n for param_group in param_groups:\n self.optimizer.add_param_group(param_group)\n\n def _init_dataloader(self):\n \"\"\"Init dataloader from timm.\"\"\"\n if self.distributed and hvd.local_rank() == 0 and 'remote_data_dir' in self.config.dataset:\n FileOps.copy_folder(self.config.dataset.remote_data_dir, self.config.dataset.data_dir)\n if self.distributed:\n hvd.join()\n args = self.config.dataset\n train_dir = os.path.join(self.config.dataset.data_dir, 'train')\n dataset_train = Dataset(train_dir)\n world_size, rank = None, None\n if self.distributed:\n world_size, rank = hvd.size(), hvd.rank()\n self.trainer.train_loader = create_loader(\n dataset_train,\n input_size=tuple(args.input_size),\n batch_size=args.batch_size,\n is_training=True,\n use_prefetcher=self.config.prefetcher,\n rand_erase_prob=args.reprob,\n rand_erase_mode=args.remode,\n rand_erase_count=args.recount,\n color_jitter=args.color_jitter,\n auto_augment=args.aa,\n interpolation='random',\n mean=tuple(args.mean),\n std=tuple(args.std),\n num_workers=args.workers,\n distributed=self.distributed,\n world_size=world_size,\n rank=rank\n )\n valid_dir = os.path.join(self.config.dataset.data_dir, 'val')\n dataset_eval = Dataset(valid_dir)\n self.trainer.valid_loader = create_loader(\n dataset_eval,\n input_size=tuple(args.input_size),\n batch_size=4 * args.batch_size,\n is_training=False,\n use_prefetcher=self.config.prefetcher,\n interpolation=args.interpolation,\n mean=tuple(args.mean),\n std=tuple(args.std),\n num_workers=args.workers,\n distributed=self.distributed,\n world_size=world_size,\n rank=rank\n )\n self.trainer.batch_num_train = len(self.trainer.train_loader)\n self.trainer.batch_num_valid = len(self.trainer.valid_loader)\n", "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"The hardware of davinci.\"\"\"\nimport subprocess\nimport logging\nimport os\nfrom evaluate_service.class_factory import ClassFactory\nimport datetime\nimport numpy as np\n\n\[email protected]()\nclass Davinci(object):\n \"\"\"Davinci class.\"\"\"\n\n def __init__(self, optional_params):\n self.current_path = os.path.dirname(os.path.abspath(__file__))\n self.optional_params = optional_params\n if \"davinci_environment_type\" in optional_params:\n self.davinci_environment_type = optional_params.get(\"davinci_environment_type\")\n\n def convert_model(self, backend, model, weight, **kwargs):\n \"\"\"Convert the tf/caffe/mindspore model to om model in Davinci.\n\n :param backend: the backend can be one of \"tensorflow\", \"caffe\" and \"mindspore\"\n :type backend: str\n :param model: the model file need to convert\n :type model: str\n :param weight: the weight file need to converta\n :type weight: str\n \"\"\"\n om_save_path = kwargs[\"save_dir\"]\n input_shape = kwargs[\"input_shape\"]\n log_save_path = os.path.dirname(model)\n\n command_line = [\"bash\", self.current_path + \"/model_convert.sh\", self.davinci_environment_type, backend, model,\n weight, om_save_path, log_save_path, input_shape]\n try:\n subprocess.check_output(command_line)\n except subprocess.CalledProcessError as exc:\n logging.error(\"convert model to om model failed. the return message is : {}.\".format(exc))\n\n def inference(self, converted_model, input_data, **kwargs):\n \"\"\"Inference in Davinci.\n\n :param converted_model: converted model file\n :type backend: str\n :param input_data: the input data file\n :type model: str\n \"\"\"\n if os.path.isfile(converted_model):\n share_dir = os.path.dirname(converted_model)\n else:\n share_dir = converted_model\n converted_model = os.path.join(converted_model, \"davinci_model.om\")\n log_save_path = os.path.dirname(input_data)\n if self.davinci_environment_type == \"ATLAS200DK\":\n task_dir = log_save_path\n app_dir = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')\n example_dir = self.current_path + \"/samples/atlas200dk\"\n ddk_user_name = self.optional_params.get(\"ddk_user_name\")\n ddk_host_ip = self.optional_params.get(\"ddk_host_ip\")\n atlas_host_ip = self.optional_params.get(\"atlas_host_ip\")\n command_line = [\"bash\", self.current_path + \"/utils/atlas200_dk/inference_atlas300.sh\",\n task_dir, example_dir, ddk_user_name, ddk_host_ip, atlas_host_ip, app_dir]\n result_file = os.path.join(log_save_path, \"result_file\")\n else:\n if not os.path.exists(os.path.join(share_dir, \"main\")):\n # compile the Davinci program\n example_dir = self.current_path + \"/samples/atlas300\"\n command_line = [\"bash\", self.current_path + \"/compile_atlas300.sh\",\n example_dir, share_dir]\n try:\n subprocess.check_output(command_line)\n except subprocess.CalledProcessError as exc:\n logging.error(\"compile failed. the return message is : {}.\".format(exc))\n # execute the Davinci program\n command_line = [\"bash\", self.current_path + \"/inference_atlas300.sh\",\n input_data, converted_model, share_dir, log_save_path]\n result_file = os.path.join(log_save_path, \"result.txt\")\n\n try:\n subprocess.check_output(command_line)\n except subprocess.CalledProcessError as exc:\n logging.error(\"inference failed. the return message is : {}.\".format(exc))\n\n latency = self._get_latency(os.path.join(log_save_path, \"ome.log\"))\n output = self._get_output(result_file)\n return latency, output\n\n def _get_latency(self, log_file):\n \"\"\"Get latency from the log file.\"\"\"\n logging.info(\"The log file is {}.\".format(log_file))\n command_line = [\"bash\", self.current_path + \"/get_latency_from_log.sh\", log_file]\n try:\n latency = subprocess.check_output(command_line)\n return str(latency, 'utf-8').split(\"\\n\")[0]\n except subprocess.CalledProcessError as exc:\n logging.error(\"get_latency_from_log failed. the return message is : {}.\".format(exc))\n\n def _get_output(self, result_file):\n \"\"\"Get output data of Davinci.\"\"\"\n if self.davinci_environment_type == \"ATLAS200DK\":\n with open(result_file, 'r') as f:\n data = f.readlines()\n labels = []\n for index, line in enumerate(data):\n if index == 0:\n continue\n label = line.split(\":\")[-1]\n label = np.float(label)\n labels.append(label)\n labels = [labels]\n else:\n with open(result_file, 'r') as f:\n values = f.readlines()\n labels = []\n for value in values:\n labels.append(float(value))\n return labels\n" ]
[ [ "pandas.DataFrame" ], [ "numpy.random.choice" ], [ "torch.mean", "numpy.asarray", "torch.unsqueeze", "torch.no_grad", "torch.cuda.is_available", "torch.clamp" ], [ "numpy.array" ], [ "torch.from_numpy", "numpy.random.randint", "numpy.clip", "numpy.ones" ], [ "tensorflow.transpose", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.squeeze", "tensorflow.numpy_function" ], [ "numpy.random.choice" ], [ "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler" ], [ "numpy.float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aauss/DSND_Term2
[ "ff1ff8edc208652c29bfc25f18c610a02dc9d299" ]
[ "lessons/CRISP_DM/RemovingData.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport RemovingDataSolns as s\r\n\r\n# Question 1\r\ndef prop_sals_test(prop_sals):\r\n '''\r\n INPUT prop_sals - a float as the percent of missing values in the salary column\r\n\r\n Prints statement related to the correctness of the solution of the proportion\r\n '''\r\n if np.allclose(prop_sals, s.prop_sals):\r\n print(\"Nice job! That looks right!\")\r\n else:\r\n print(\"Oops! Make sure your value is for the proportion of nan values in only the Salary column.\")\r\n\r\n\r\n# Question 2\r\ndef sal_rm_test(sal_rm):\r\n '''\r\n INPUT sal_rm - a pandas dataframe with all rows that are missing a value the salary column removed. The dataframe should only have the columns of num_vars (quant variables)\r\n\r\n Prints statement related to the correctness of the solution of the dataframe\r\n '''\r\n if sal_rm.equals(s.sal_rm):\r\n print(\"Nice job! That looks right!\")\r\n else:\r\n print(\"That wasn't quite as expected. Try again, this should be the num_vars dataframe with salary removed.\")\r\n\r\n# Question 3\r\ndef question3_check(question3_solution):\r\n '''\r\n INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.\r\n\r\n Prints statement related to the correctness of the letter chosen.\r\n '''\r\n if question3_solution == s.question3_solution:\r\n print(\"Nice job! That's right! Those missing values in the X matrix will still not allow us to predict the response.\")\r\n else:\r\n print(\"Oops! That wasn't what we were expecting. Your solution should be either a, b, or c for the string that best relates to what happened.\")\r\n\r\n\r\n# Question 4\r\ndef all_rm_test(all_rm):\r\n '''\r\n INPUT all_rm - a pandas dataframe with all rows that are missing a value in any column removed from num_vars (only the numeric columns)\r\n\r\n Prints statement related to the correctness of the solution of the dataframe\r\n '''\r\n if all_rm.equals(s.all_rm):\r\n print(\"Nice job! That looks right. The default is to drop any row with a missing value in any column, so we didn't need to specify any arguments in this case.\")\r\n else:\r\n print(\"Oops! That doesn't look like what we were expecting. Make sure you are working with only the numeric columns, and you have dropped any rows with missing values.\")\r\n\r\n\r\n# Question 5\r\ndef question5_check(question5_solution):\r\n '''\r\n INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.\r\n\r\n Prints statement related to the correctness of the letter chosen.\r\n '''\r\n if question5_solution == s.question5_solution:\r\n print(\"Nice job! That's right! Python isn't exactly magic, but sometimes it feels like it is!\")\r\n else:\r\n print(\"Oops! Your solution should have worked. In which case, no output should have printed. This solution should follow just as in the screencast.\")\r\n\r\n\r\n# Question 6\r\ndef r2_test_check(r2_test):\r\n '''\r\n INPUT r2_test - the rsquared value from fitting a model with all nan values dropped and only using quantitative variables.\r\n\r\n Prints statement related to the correctness rsquared matching solution.\r\n '''\r\n if r2_test == s.r2_test:\r\n print(\"Nice job! That's right! Your rsquared matches the solution.\")\r\n else:\r\n print(\"Oops! That wasn't the value that was expected. You should fit your model using the training data, predict on the X_test data, and then score comparing the y_test and your predicted values.\")\r\n\r\n# Question 7\r\ndef question7_check(question7_solution):\r\n '''\r\n INPUT question7_solution - a dictionary with statements of takeaways from the rest of the notebook. The values should be the variables a, b, c, d, e, f, or g\r\n\r\n Prints statement related to the correctness of the solution of the dictionary\r\n '''\r\n if question7_solution == s.question7_solution:\r\n print(\"Nice job! That looks right to me! We would really like to predict for anyone who provides a salary, but our model right now definitely has some limitations.\")\r\n elif question7_solution['The number of reported salaries in the original dataset'] != s.question7_solution['The number of reported salaries in the original dataset']:\r\n print(\"The number of reported salaries in the original dataset doesn't look quite right.\")\r\n elif question7_solution['The number of test salaries predicted using our model'] != s.question7_solution['The number of test salaries predicted using our model']:\r\n print(\"The number of salaries predicted using our model doesn't look quite right.\")\r\n elif question7_solution['If an individual does not rate stackoverflow, but has a salary'] != s.question7_solution['If an individual does not rate stackoverflow, but has a salary']:\r\n print(\"Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.\")\r\n elif question7_solution['If an individual does not have a a job satisfaction, but has a salary'] != s.question7_solution['If an individual does not have a a job satisfaction, but has a salary']:\r\n print(\"Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.\")\r\n elif question7_solution['Our model predicts salaries for the two individuals described above.'] != s.question7_solution['Our model predicts salaries for the two individuals described above.']:\r\n print(\"Unfortunately, our current model will not predict for anyone who has missing values in any column - even if they do have a salary!\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pengshuang/allennlp
[ "91d0fa1a51485c4118e48426d76328acd8049587", "91d0fa1a51485c4118e48426d76328acd8049587", "478bf46cb676524ee9b74fb271ec0a592d1c4a48" ]
[ "allennlp/interpret/saliency_interpreters/simple_gradient.py", "tests/training/metrics/attachment_scores_test.py", "tests/training/metrics/sequence_accuracy_test.py" ]
[ "import math\n\nfrom typing import List\nimport numpy\n\nfrom allennlp.common.util import JsonDict, sanitize\nfrom allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter\nfrom allennlp.nn import util\n\n\[email protected](\"simple-gradient\")\nclass SimpleGradient(SaliencyInterpreter):\n \"\"\"\n Registered as a `SaliencyInterpreter` with name \"simple-gradient\".\n \"\"\"\n\n def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:\n \"\"\"\n Interprets the model's prediction for inputs. Gets the gradients of the loss with respect\n to the input and returns those gradients normalized and sanitized.\n \"\"\"\n labeled_instances = self.predictor.json_to_labeled_instances(inputs)\n\n # List of embedding inputs, used for multiplying gradient by the input for normalization\n embeddings_list: List[numpy.ndarray] = []\n\n instances_with_grads = dict()\n for idx, instance in enumerate(labeled_instances):\n # Hook used for saving embeddings\n handle = self._register_forward_hook(embeddings_list)\n grads = self.predictor.get_gradients([instance])[0]\n handle.remove()\n\n # Gradients come back in the reverse order that they were sent into the network\n embeddings_list.reverse()\n for key, grad in grads.items():\n # Get number at the end of every gradient key (they look like grad_input_[int],\n # we're getting this [int] part and subtracting 1 for zero-based indexing).\n # This is then used as an index into the reversed input array to match up the\n # gradient and its respective embedding.\n input_idx = int(key[-1]) - 1\n # The [0] here is undo-ing the batching that happens in get_gradients.\n emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx], axis=1)\n norm = numpy.linalg.norm(emb_grad, ord=1)\n normalized_grad = [math.fabs(e) / norm for e in emb_grad]\n grads[key] = normalized_grad\n\n instances_with_grads[\"instance_\" + str(idx + 1)] = grads\n return sanitize(instances_with_grads)\n\n def _register_forward_hook(self, embeddings_list: List):\n \"\"\"\n Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()\n is called, embeddings_list is filled with the embedding values. This is necessary because\n our normalization scheme multiplies the gradient by the embedding value.\n \"\"\"\n\n def forward_hook(module, inputs, output):\n embeddings_list.append(output.squeeze(0).clone().detach().numpy())\n\n embedding_layer = util.find_embedding_layer(self.predictor._model)\n handle = embedding_layer.register_forward_hook(forward_hook)\n\n return handle\n", "import torch\n\nfrom allennlp.common.testing import AllenNlpTestCase, multi_device\nfrom allennlp.training.metrics import AttachmentScores\n\n\nclass AttachmentScoresTest(AllenNlpTestCase):\n def setup_method(self):\n super().setup_method()\n self.scorer = AttachmentScores()\n\n self.predictions = torch.Tensor([[0, 1, 3, 5, 2, 4], [0, 3, 2, 1, 0, 0]])\n\n self.gold_indices = torch.Tensor([[0, 1, 3, 5, 2, 4], [0, 3, 2, 1, 0, 0]])\n\n self.label_predictions = torch.Tensor([[0, 5, 2, 1, 4, 2], [0, 4, 8, 2, 0, 0]])\n\n self.gold_labels = torch.Tensor([[0, 5, 2, 1, 4, 2], [0, 4, 8, 2, 0, 0]])\n\n self.mask = torch.tensor(\n [[True, True, True, True, True, True], [True, True, True, True, False, False]]\n )\n\n def _send_tensors_to_device(self, device: str):\n self.predictions = self.predictions.to(device)\n self.gold_indices = self.gold_indices.to(device)\n self.label_predictions = self.label_predictions.to(device)\n self.gold_labels = self.gold_labels.to(device)\n self.mask = self.mask.to(device)\n\n @multi_device\n def test_perfect_scores(self, device: str):\n self._send_tensors_to_device(device)\n\n self.scorer(\n self.predictions, self.label_predictions, self.gold_indices, self.gold_labels, self.mask\n )\n\n for value in self.scorer.get_metric().values():\n assert value == 1.0\n\n @multi_device\n def test_unlabeled_accuracy_ignores_incorrect_labels(self, device: str):\n self._send_tensors_to_device(device)\n\n label_predictions = self.label_predictions\n # Change some stuff so our 4 of our label predictions are wrong.\n label_predictions[0, 3:] = 3\n label_predictions[1, 0] = 7\n self.scorer(\n self.predictions, label_predictions, self.gold_indices, self.gold_labels, self.mask\n )\n\n metrics = self.scorer.get_metric()\n\n assert metrics[\"UAS\"] == 1.0\n assert metrics[\"UEM\"] == 1.0\n\n # 4 / 12 labels were wrong and 2 positions\n # are masked, so 6/10 = 0.6 LAS.\n assert metrics[\"LAS\"] == 0.6\n # Neither should have labeled exact match.\n assert metrics[\"LEM\"] == 0.0\n\n @multi_device\n def test_labeled_accuracy_is_affected_by_incorrect_heads(self, device: str):\n self._send_tensors_to_device(device)\n\n predictions = self.predictions\n # Change some stuff so our 4 of our predictions are wrong.\n predictions[0, 3:] = 3\n predictions[1, 0] = 7\n # This one is in the padded part, so it shouldn't affect anything.\n predictions[1, 5] = 7\n self.scorer(\n predictions, self.label_predictions, self.gold_indices, self.gold_labels, self.mask\n )\n\n metrics = self.scorer.get_metric()\n\n # 4 heads are incorrect, so the unlabeled score should be\n # 6/10 = 0.6 LAS.\n assert metrics[\"UAS\"] == 0.6\n # All the labels were correct, but some heads\n # were wrong, so the LAS should equal the UAS.\n assert metrics[\"LAS\"] == 0.6\n\n # Neither batch element had a perfect labeled or unlabeled EM.\n assert metrics[\"LEM\"] == 0.0\n assert metrics[\"UEM\"] == 0.0\n\n @multi_device\n def test_attachment_scores_can_ignore_labels(self, device: str):\n self._send_tensors_to_device(device)\n\n scorer = AttachmentScores(ignore_classes=[1])\n\n label_predictions = self.label_predictions\n # Change the predictions where the gold label is 1;\n # as we are ignoring 1, we should still get a perfect score.\n label_predictions[0, 3] = 2\n scorer(self.predictions, label_predictions, self.gold_indices, self.gold_labels, self.mask)\n\n for value in scorer.get_metric().values():\n assert value == 1.0\n", "import torch\nfrom torch.testing import assert_allclose\n\nfrom allennlp.common.testing import AllenNlpTestCase, multi_device\nfrom allennlp.training.metrics import SequenceAccuracy\n\n\nclass SequenceAccuracyTest(AllenNlpTestCase):\n @multi_device\n def test_sequence_accuracy(self, device: str):\n accuracy = SequenceAccuracy()\n gold = torch.tensor([[1, 2, 3], [2, 4, 8], [0, 1, 1]], device=device)\n predictions = torch.tensor(\n [[[1, 2, 3], [1, 2, -1]], [[2, 4, 8], [2, 5, 9]], [[-1, -1, -1], [0, 1, -1]]],\n device=device,\n )\n\n accuracy(predictions, gold)\n actual_accuracy = accuracy.get_metric()\n assert_allclose(actual_accuracy, 2 / 3)\n\n @multi_device\n def test_sequence_accuracy_respects_mask(self, device: str):\n accuracy = SequenceAccuracy()\n gold = torch.tensor([[1, 2, 3], [2, 4, 8], [0, 1, 1], [11, 13, 17]], device=device)\n predictions = torch.tensor(\n [\n [[1, 2, 3], [1, 2, -1]],\n [[2, 4, 8], [2, 5, 9]],\n [[-1, -1, -1], [0, 1, -1]],\n [[12, 13, 17], [11, 13, 18]],\n ],\n device=device,\n )\n mask = torch.tensor(\n [[False, True, True], [True, True, True], [True, True, False], [True, False, True]],\n device=device,\n )\n\n accuracy(predictions, gold, mask)\n actual_accuracy = accuracy.get_metric()\n assert_allclose(actual_accuracy, 3 / 4)\n\n @multi_device\n def test_sequence_accuracy_accumulates_and_resets_correctly(self, device: str):\n accuracy = SequenceAccuracy()\n gold = torch.tensor([[1, 2, 3]], device=device)\n accuracy(torch.tensor([[[1, 2, 3]]], device=device), gold)\n accuracy(torch.tensor([[[1, 2, 4]]], device=device), gold)\n\n actual_accuracy = accuracy.get_metric(reset=True)\n assert_allclose(actual_accuracy, 1 / 2)\n assert accuracy.correct_count == 0\n assert accuracy.total_count == 0\n\n @multi_device\n def test_get_metric_on_new_object_works(self, device: str):\n accuracy = SequenceAccuracy()\n\n actual_accuracy = accuracy.get_metric(reset=True)\n assert_allclose(actual_accuracy, 0)\n" ]
[ [ "numpy.linalg.norm", "numpy.sum" ], [ "torch.Tensor", "torch.tensor" ], [ "torch.testing.assert_allclose", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
webdeveloper0012/Tensor2tensor
[ "48bce065278eba461c8a2840e4132becbc822c7c", "48bce065278eba461c8a2840e4132becbc822c7c", "48bce065278eba461c8a2840e4132becbc822c7c", "48bce065278eba461c8a2840e4132becbc822c7c" ]
[ "tensor2tensor/data_generators/problem.py", "tensor2tensor/layers/common_hparams.py", "tensor2tensor/data_generators/gene_expression_test.py", "tensor2tensor/data_generators/desc2code_test.py" ]
[ "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for problem/dataset definitions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport collections\nimport os\nimport random\n# Dependency imports\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\n\n\nclass SpaceID(object):\n \"\"\"Input and target space ids. Add more as needed.\"\"\"\n # Generic / unknown output space (default)\n GENERIC = 0\n # Image labels\n IMAGE_LABEL = 1\n # English characters\n EN_CHR = 2\n # English tokens\n EN_TOK = 3\n # English bpe tokens\n EN_BPE_TOK = 4\n # French characters\n FR_CHR = 5\n # French tokens\n FR_TOK = 6\n # German characters\n DE_CHR = 7\n # German tokens\n DE_TOK = 8\n # German bpe tokens\n DE_BPE_TOK = 9\n # Digit cipher lexicon 0\n DIGIT_0 = 10\n # Digit cipher lexicon 1\n DIGIT_1 = 11\n # Audio waveform domain\n AUDIO_WAV = 12\n # Audio spectral domain\n AUDIO_SPECTRAL = 13\n # Parse characters\n PARSE_CHR = 14\n # Parse tokens\n PARSE_TOK = 15\n # Chinese tokens\n ZH_TOK = 16\n # Icelandic characters\n ICE_CHAR = 17\n # Icelandic tokens\n ICE_TOK = 18\n # Icelandic parse tokens\n ICE_PARSE_TOK = 19\n # Macedonian tokens\n MK_TOK = 20\n # Czech tokens\n CS_TOK = 21\n # Czech characters\n CS_CHR = 22\n # Genetic bases (ACTG)\n DNA = 23\n # Real numbers\n REAL = 24\n # Images\n IMAGE = 25\n # Peptide\n PEPTIDE = 26\n # Python\n PY_TOK = 27\n # C++\n CPP_TOK = 28\n # Strokes\n STROKES = 29\n # Pickled Python\n PICKLED_PYTHON = 30\n\n\ndef default_model_hparams():\n return tf.contrib.training.HParams(\n max_input_seq_length=0,\n max_target_seq_length=0,\n prepend_mode=\"none\",\n data_dir=None)\n\n\ndef preprocess_example_common(example, hparams, mode):\n \"\"\"Preprocessing steps common to all models.\"\"\"\n if hparams.max_input_seq_length > 0:\n example[\"inputs\"] = example[\"inputs\"][:hparams.max_input_seq_length]\n if hparams.max_target_seq_length > 0:\n example[\"targets\"] = example[\"targets\"][:hparams.max_target_seq_length]\n if hparams.prepend_mode != \"none\":\n if mode == tf.estimator.ModeKeys.PREDICT:\n example[\"partial_targets\"] = tf.concat([example[\"inputs\"], [0]], 0)\n else:\n example[\"targets\"] = tf.concat(\n [example[\"inputs\"], [0], example[\"targets\"]], 0)\n return example\n\n\nclass Problem(object):\n \"\"\"Problem base class. Specifies a T2T problem.\n\n Problems unify the specification of a problem for data generation, training,\n and inference.\n\n New problems are specified by the following methods:\n\n Data generation:\n * generate_data(data_dir, tmp_dir)\n - Generate training and dev datasets into data_dir.\n - Additional files, e.g. vocabulary files, should also be written to\n data_dir. Vocab files are newline-separated files with each line\n containing a token. The standard convention for the filename is to\n set it to be\n ${Problem.vocab_name}.${Problem.targeted_vocab_size}\n - Downloads and other files can be written to tmp_dir\n - If you have a training and dev generator, you can generate the\n training and dev datasets with\n generator_utils.generate_dataset_and_shuffle.\n - Use the self.training_filepaths and self.dev_filepaths functions to\n get sharded filenames. If shuffled=False, the filenames will contain\n an \"unshuffled\" suffix; you should then shuffle the data\n shard-by-shard with generator_utils.shuffle_dataset.\n - Allows to specify the number of shards, optionally (can be omitted).\n - Subclasses must override\n * dataset_filename()\n - Base filename for problem.\n - Defaults to registered name (self.name).\n\n Training:\n * hparams(defaults, model_hparams)\n - Specify the problem hyperparameters (see _default_hparams)\n - Mutate defaults as needed\n * example_reading_spec\n - Specify the names and types of the features on disk.\n - Specify tf.contrib.slim.tfexample_decoder\n * preprocess_example(example, mode)\n - Preprocess the example feature dict from feature name to Tensor or\n SparseTensor.\n - Used in training, eval, and inference (specified by mode).\n\n Eval:\n * eval_metrics\n - Specify the set of evaluation metrics for this problem.\n\n Inference:\n * feature_encoders(data_dir)\n - Return a dict of <feature name, TextEncoder> for encoding and decoding\n inference input/output.\n - Defaults to TextEncoder for inputs and targets.\n \"\"\"\n\n # ============================================================================\n # BEGIN SUBCLASS INTERFACE\n # ============================================================================\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n raise NotImplementedError()\n\n def hparams(self, defaults, model_hparams):\n pass\n\n def dataset_filename(self):\n return self.name\n\n def feature_encoders(self, data_dir):\n del data_dir\n return {\n \"inputs\": text_encoder.TextEncoder(),\n \"targets\": text_encoder.TextEncoder()\n }\n\n def example_reading_spec(self):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def preprocess_example(self, example, mode, hparams):\n return preprocess_example_common(example, hparams, mode)\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY\n ]\n\n # ============================================================================\n # END SUBCLASS INTERFACE\n # ============================================================================\n\n def training_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.train_data_filenames(file_basename, data_dir,\n num_shards)\n\n def dev_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.dev_data_filenames(file_basename, data_dir,\n num_shards)\n\n def test_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.test_data_filenames(file_basename, data_dir,\n num_shards)\n\n def filepattern(self, data_dir, mode, shard=None):\n \"\"\"Get filepattern for data files for mode.\n\n Matches mode to a suffix.\n * TRAIN: train\n * EVAL: dev\n * PREDICT: dev\n * test: test\n\n Args:\n data_dir: str, data directory.\n mode: tf.estimator.ModeKeys or \"test\".\n shard: int, if provided, will only read data from the specified shard.\n\n Returns:\n filepattern str\n \"\"\"\n path = os.path.join(data_dir, self.dataset_filename())\n shard_str = \"-%05d\" % shard if shard is not None else \"\"\n if mode == tf.estimator.ModeKeys.TRAIN:\n suffix = \"train\"\n elif mode in [tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT]:\n suffix = \"dev\"\n else:\n assert mode == \"test\"\n suffix = \"test\"\n\n return \"%s-%s%s*\" % (path, suffix, shard_str)\n\n def __init__(self, was_reversed=False, was_copy=False):\n \"\"\"Create a Problem.\n\n Args:\n was_reversed: bool, whether to reverse inputs and targets.\n was_copy: bool, whether to copy inputs to targets. Can be composed with\n was_reversed so that if both are true, the targets become the inputs,\n which are then copied to targets so that the task is targets->targets.\n \"\"\"\n self._was_reversed = was_reversed\n self._was_copy = was_copy\n self._encoders = None\n self._hparams = None\n self._feature_info = None\n\n def get_feature_encoders(self, data_dir=None):\n if self._encoders is None:\n self._encoders = self.feature_encoders(data_dir)\n return self._encoders\n\n def get_hparams(self, model_hparams=None):\n \"\"\"Returns problem_hparams.\"\"\"\n if self._hparams is not None:\n return self._hparams\n\n if self._encoders is None:\n data_dir = (model_hparams and model_hparams.data_dir) or None\n self.get_feature_encoders(data_dir)\n\n hp = _default_hparams()\n ret = self.hparams(hp, model_hparams)\n if ret is not None:\n raise ValueError(\"The Problem subclass hparams function should mutate \"\n \"the defaults passed in and return None.\")\n\n hp.add_hparam(\"vocabulary\", self._encoders)\n hp.add_hparam(\"was_reversed\", self._was_reversed)\n hp.add_hparam(\"was_copy\", self._was_copy)\n\n if self._was_reversed:\n _reverse_problem_hparams(hp)\n if self._was_copy:\n _copy_problem_hparams(hp)\n\n self._hparams = hp\n return self._hparams\n\n def maybe_reverse_features(self, feature_map):\n if not self._was_reversed:\n return\n inputs, targets = feature_map[\"inputs\"], feature_map[\"targets\"]\n feature_map[\"inputs\"], feature_map[\"targets\"] = targets, inputs\n\n def maybe_copy_features(self, feature_map):\n if not self._was_copy:\n return\n feature_map[\"targets\"] = feature_map[\"inputs\"]\n\n def dataset(self,\n mode,\n data_dir=None,\n num_threads=None,\n output_buffer_size=None,\n shuffle_files=None,\n hparams=None,\n preprocess=True,\n dataset_split=None,\n shard=None):\n \"\"\"Build a Dataset for this problem.\n\n Args:\n mode: tf.estimator.ModeKeys; determines which files to read from.\n data_dir: directory that contains data files.\n num_threads: int, number of threads to use for decode and preprocess\n Dataset.map calls.\n output_buffer_size: int, how many elements to prefetch in Dataset.map\n calls.\n shuffle_files: whether to shuffle input files. Default behavior (i.e. when\n shuffle_files=None) is to shuffle if mode == TRAIN.\n hparams: tf.contrib.training.HParams; hparams to be passed to\n Problem.preprocess_example and Problem.hparams. If None, will use a\n default set that is a no-op.\n preprocess: bool, whether to map the Dataset through\n Problem.preprocess_example.\n dataset_split: tf.estimator.ModeKeys + [\"test\"], which split to read data\n from (TRAIN:\"-train\", EVAL:\"-dev\", \"test\":\"-test\"). Defaults to mode.\n shard: int, if provided, will only read data from the specified shard.\n\n Returns:\n Dataset containing dict<feature name, Tensor>.\n \"\"\"\n dataset_split = dataset_split or mode\n assert data_dir\n\n if hparams is None:\n hparams = default_model_hparams()\n\n if not hasattr(hparams, \"data_dir\"):\n hparams.add_hparam(\"data_dir\", data_dir)\n if not hparams.data_dir:\n hparams.data_dir = data_dir\n # Construct the Problem's hparams so that items within it are accessible\n _ = self.get_hparams(hparams)\n\n data_fields, data_items_to_decoders = self.example_reading_spec()\n if data_items_to_decoders is None:\n data_items_to_decoders = {\n field: tf.contrib.slim.tfexample_decoder.Tensor(field)\n for field in data_fields\n }\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)\n tf.logging.info(\"Reading data files from %s\", data_filepattern)\n data_files = tf.contrib.slim.parallel_reader.get_data_files(\n data_filepattern)\n if shuffle_files or shuffle_files is None and is_training:\n random.shuffle(data_files)\n dataset = tf.contrib.data.TFRecordDataset(data_files)\n\n def decode_record(record):\n \"\"\"Serialized Example to dict of <feature name, Tensor>.\"\"\"\n decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(\n data_fields, data_items_to_decoders)\n\n decode_items = list(data_items_to_decoders)\n decoded = decoder.decode(record, items=decode_items)\n return dict(zip(decode_items, decoded))\n\n def _preprocess(example):\n example = self.preprocess_example(example, mode, hparams)\n self.maybe_reverse_features(example)\n self.maybe_copy_features(example)\n return example\n\n dataset = dataset.map(decode_record, num_threads=num_threads)\n\n if preprocess:\n dataset = dataset.map(\n _preprocess,\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n\n return dataset\n\n @property\n def has_inputs(self):\n return \"inputs\" in self.get_feature_encoders()\n\n @property\n def feature_info(self):\n \"\"\"Retrieve dict<feature name, FeatureInfo>.\n\n Must first call Problem.get_hparams or Problem.dataset to have the problem's\n internal hparams already constructed.\n\n Returns:\n dict<feature name, FeatureInfo>\n \"\"\"\n if self._feature_info is not None:\n return self._feature_info\n\n assert self._hparams is not None\n\n hp = self.get_hparams()\n input_mods = hp.input_modality\n target_mod = hp.target_modality\n vocabs = hp.vocabulary\n if self.has_inputs:\n in_id = hp.input_space_id\n out_id = hp.target_space_id\n\n features = collections.defaultdict(FeatureInfo)\n\n for name, mod_spec in six.iteritems(input_mods):\n mod, vocab_size = mod_spec\n finfo = features[name]\n finfo.modality = mod\n finfo.vocab_size = vocab_size\n\n mod, vocab_size = target_mod\n features[\"targets\"].modality = mod\n features[\"targets\"].vocab_size = vocab_size\n\n for name, encoder in six.iteritems(vocabs):\n features[name].encoder = encoder\n\n if self.has_inputs:\n features[\"inputs\"].space_id = in_id\n features[\"targets\"].space_id = out_id\n\n self._feature_info = features\n return features\n\n\nclass FeatureInfo(object):\n\n def __init__(self,\n encoder=None,\n modality=None,\n vocab_size=None,\n space_id=None):\n self.encoder = encoder\n self.modality = modality\n self.vocab_size = vocab_size\n self.space_id = space_id\n\n\ndef _copy_problem_hparams(p_hparams):\n \"\"\"Use input modality, vocab, and space id for target.\"\"\"\n p = p_hparams\n # Duplicate input modality.\n p.target_modality = p.input_modality[\"inputs\"]\n # Duplicate input vocabulary.\n p.vocabulary[\"targets\"] = p.vocabulary[\"inputs\"]\n # Duplicate input space ids.\n p.target_space_id = p.input_space_id\n # Mark that p was reversed.\n p.was_copy = True\n\n\ndef _reverse_problem_hparams(p_hparams):\n \"\"\"Swap input/output modalities, vocab, and space ids.\"\"\"\n p = p_hparams\n\n # Swap modalities.\n input_modality = p.input_modality[\"inputs\"]\n target_modality = p.target_modality\n p.input_modality[\"inputs\"] = target_modality\n p.target_modality = input_modality\n\n # Swap vocabularies.\n input_vocabulary = p.vocabulary[\"inputs\"]\n target_vocabulary = p.vocabulary[\"targets\"]\n p.vocabulary[\"inputs\"] = target_vocabulary\n p.vocabulary[\"targets\"] = input_vocabulary\n\n # Swap input/target space ids.\n input_space_id = p.input_space_id\n target_space_id = p.target_space_id\n p.input_space_id = target_space_id\n p.target_space_id = input_space_id\n\n # Mark that p was reversed.\n p.was_reversed = True\n\n\ndef _default_hparams():\n \"\"\"A set of basic model hyperparameters.\"\"\"\n return tf.contrib.training.HParams(\n # Use this parameter to get comparable perplexity numbers with different\n # tokenizations. This value should be set to the ratio of the number of\n # tokens in the test set according to the tokenization used to the number\n # of tokens in the test set in the \"official\" tokenization. For\n # example, if we are using a word-piece based model and we want to\n # compute per-word perplexity, then we set loss_multiplier to the number\n # of wordpieces per word in the test set.\n loss_multiplier=1.0,\n\n # Use this parameter to allow for larger sequences in the batch. Without\n # the use of this parameter, the size of the inner two dimensions will\n # be used to judge the sequence length.\n batch_size_multiplier=1,\n\n # To make queues of the right capacity, it's good to know the maximal\n # expected batch size, as it can vary a lot. It only affects performance\n # of input readers and memory use. The defaults should be safe and fast,\n # but decrease if your reader uses a lot of memory and increase if slow.\n max_expected_batch_size_per_shard=64,\n\n # During inference for autoregressive problems, if the batch_size is 1,\n # the inference will stop when the model predict a text_encoder.EOS_ID\n # token.\n stop_at_eos=False,\n\n # Modalities used to map from input features to a space compatible with\n # chosen model architecture. One modality spec (which is a 2-tuple,\n # (modality_full_name, vocab_size)) per feature key. modality_full_name\n # is a string type:name, e.g. class_label:class_label_2d. Leaving off\n # the name uses the default modality for that type (e.g. class_label ==\n # class_label:default).\n input_modality={},\n\n # Modality used to map from hidden representation to the target space.\n # Specified as a modality spec, a 2-tuple described above.\n target_modality=None,\n\n # Identifiers used to tell the model which input/target space will be\n # expected. For example, it can tell that we expect French as characters\n # as output, or Spanish as sound. Spaces defined as constants in SpaceID\n # class.\n input_space_id=SpaceID.GENERIC,\n target_space_id=SpaceID.GENERIC)\n\n\nclass Text2TextProblem(Problem):\n \"\"\"Base class for text-to-text problems.\"\"\"\n\n @property\n def is_character_level(self):\n \"\"\"Whether the inputs and targets are sequences of characters.\"\"\"\n raise NotImplementedError()\n\n @property\n def targeted_vocab_size(self):\n raise NotImplementedError() # Not needed if self.is_character_level.\n\n def generator(self, data_dir, tmp_dir, is_training):\n \"\"\"Generator for the training and evaluation data.\n\n Args:\n data_dir: The directory in which to assets, e.g. the vocab file.\n tmp_dir: A scratch directory (if needed).\n is_training: A boolean indicating if we should generate training data\n (True) or dev set data (False).\n\n Yields:\n dicts with keys \"inputs\" and \"targets\", with values being lists of token\n ids.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def use_train_shards_for_dev(self):\n \"\"\"If true, we only generate training data and hold out shards for dev.\"\"\"\n return False\n\n @property\n def input_space_id(self):\n raise NotImplementedError()\n\n @property\n def target_space_id(self):\n raise NotImplementedError()\n\n @property\n def num_shards(self):\n raise NotImplementedError()\n\n @property\n def num_dev_shards(self):\n return 1\n\n @property\n def vocab_name(self):\n raise NotImplementedError()\n\n @property\n def vocab_file(self):\n return \"%s.%d\" % (self.vocab_name, self.targeted_vocab_size)\n\n @property\n def use_subword_tokenizer(self):\n raise NotImplementedError()\n\n @property\n def has_inputs(self):\n return True # Set to False for language models.\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n train_paths = self.training_filepaths(\n data_dir, self.num_shards, shuffled=False)\n dev_paths = self.dev_filepaths(\n data_dir, self.num_dev_shards, shuffled=False)\n if self.use_train_shards_for_dev:\n all_paths = train_paths + dev_paths\n generator_utils.generate_files(\n self.generator(data_dir, tmp_dir, True), all_paths)\n generator_utils.shuffle_dataset(all_paths)\n else:\n generator_utils.generate_dataset_and_shuffle(\n self.generator(data_dir, tmp_dir, True), train_paths,\n self.generator(data_dir, tmp_dir, False), dev_paths)\n\n def feature_encoders(self, data_dir):\n if self.is_character_level:\n encoder = text_encoder.ByteTextEncoder()\n elif self.use_subword_tokenizer:\n vocab_filename = os.path.join(data_dir, self.vocab_file)\n encoder = text_encoder.SubwordTextEncoder(vocab_filename)\n else:\n vocab_filename = os.path.join(data_dir, self.vocab_file)\n encoder = text_encoder.TokenTextEncoder(vocab_filename)\n if self.has_inputs:\n return {\"inputs\": encoder, \"targets\": encoder}\n return {\"targets\": encoder}\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.stop_at_eos = int(True)\n\n if self.has_inputs:\n source_vocab_size = self._encoders[\"inputs\"].vocab_size\n p.input_modality = {\n \"inputs\": (registry.Modalities.SYMBOL, source_vocab_size)\n }\n target_vocab_size = self._encoders[\"targets\"].vocab_size\n p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)\n if self.has_inputs:\n p.input_space_id = self.input_space_id\n p.target_space_id = self.target_space_id\n if self.is_character_level:\n p.loss_multiplier = 2.0\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY,\n metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F,\n metrics.Metrics.ROUGE_L_F\n ]\n", "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hyperparameters and ranges common to multiple models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\[email protected]_hparams(\"basic_1\")\ndef basic_params1():\n \"\"\"A set of basic hyperparameters.\"\"\"\n return tf.contrib.training.HParams(\n batch_size=4096, # in tokens per batch per gpu\n # Fixed batch size turns off bucketing during training mode\n # and uses batch_size as minibatch size (use small batch_size<=32)\n use_fixed_batch_size=False,\n num_hidden_layers=4,\n kernel_height=3,\n kernel_width=1,\n hidden_size=64,\n compress_steps=0,\n # All hyperparameters ending in \"dropout\" are automatically set to 0.0\n # when not in training mode.\n dropout=0.2,\n clip_grad_norm=2.0,\n grad_noise_scale=0.0,\n summarize_grads=False,\n initializer=\"orthogonal\",\n initializer_gain=1.5,\n label_smoothing=0.1,\n optimizer=\"Adam\",\n optimizer_adam_epsilon=1e-6,\n optimizer_adam_beta1=0.85,\n optimizer_adam_beta2=0.997,\n optimizer_momentum_momentum=0.9,\n weight_decay=0.1,\n weight_noise=0.0,\n learning_rate_decay_scheme=\"none\",\n learning_rate_warmup_steps=100,\n learning_rate_cosine_cycle_steps=250000,\n learning_rate=0.1,\n sampling_method=\"argmax\", # \"argmax\" or \"random\"\n sampling_temp=1.0, # temperature for sampling\n problem_choice=\"adaptive\", # \"uniform\", \"adaptive\", \"distributed\"\n # expand the logits a piece at a time - saves memory.\n factored_logits=False,\n multiply_embedding_mode=\"sqrt_depth\",\n # Parameters related to mixtures of experts.\n moe_hidden_sizes=\"2048\", # hidden layer sizes (comma-separated)\n moe_num_experts=64, # number of experts per layer\n moe_k=2, # how many experts to use for each batch element\n moe_loss_coef=1e-2,\n # Sequences of operations to perform on layer input and layer output.\n # Used by common_layers.layer_preprocess, common_layers.layer_postprocess\n # Each character repsesnts an operation:\n # none: no preprocessing\n # d: apply dropout\n # n: apply normalization (see norm_type and norm_epsilon)\n # a: add layer input (residual connection - only during postprocess)\n # The special string \"none\" is used instead of the empty string\n # to indicate no pre/postprocesisng, since the empty string causes\n # trouble for hyperparameter tuning.\n # TODO(noam): The current settings (\"\", \"dan\") are the published version\n # of the transformer. (\"n\", \"da\") seems better for harder-to-learn\n # models, so it should probably be the default.\n layer_preprocess_sequence=\"none\",\n layer_postprocess_sequence=\"dan\",\n # dropout rate to use during layer_preprocess and layer_postprocess\n layer_prepostprocess_dropout=0.1,\n # What type of normalization to use\n norm_type=\"layer\", # \"batch\", layer\", \"noam\", \"none\".\n # epsilon parameter to normalization function\n norm_epsilon=1e-6,\n symbol_modality_num_shards=16,\n # During training, we drop sequences whose inputs and targets are shorter\n # than min_length\n min_length=0,\n # During training, we drop sequences whose inputs or targets are longer\n # than max_length.\n # If max_length==0, we use hparams.batch_size instead.\n max_length=0,\n # Maximum length in the smallest length bucket. Setting this\n # flag too high will result in wasteful padding of short\n # sequences. Due to some (hopefully) temporary hacks in the\n # data reading and batching code, setting this flag too low\n # results in a very long batch-shuffling queue.\n # TODO(noam): change this once the Datasets API changes.\n min_length_bucket=8,\n # This flag controls the number of length buckets in the data\n # reader. The buckets have maximum lengths from\n # min_bucket_length to (max_length or batch_size), increasing\n # (approximately) by factors of length_bucket_step.\n length_bucket_step=1.1,\n # If set to True, drop sequences longer than max_length during eval.\n # This affects the validity of the evaluation metrics.\n eval_drop_long_sequences=False,\n # TODO(lukaszkaiser): these parameters should probably be set elsewhere.\n # in SymbolModality, share the output embeddings and the softmax\n # variables.\n # You can also share the input embeddings with the output embeddings\n # by using a problem_hparams that uses the same modality object for\n # the input_modality and target_modality.\n shared_embedding_and_softmax_weights=False,\n # In SymbolModality, skip the top layer, assume we're providing logits.\n symbol_modality_skip_top=False,\n # For each feature for which you want to override the default input\n # modality, add an entry to this semicolon-separated string. Entries are\n # formatted \"feature_name:modality_type:modality_name\", e.g.\n # \"inputs:symbol:default;other_inputs:audio:identity\".\n input_modalities=\"default\", # We don't use empty string in params.\n # To override the default target modality, specify\n # \"modality_type:modality_name\", e.g. \"symbol:ctc\".\n target_modality=\"default\",\n # The maximum length of \"input\" sequence.\n # Sequences longer than this value will be truncated. 0 or negative values\n # mean there is no maximum or truncation.\n # You can change this behavior by overridding preprocess_example() method\n # in your problem class.\n max_input_seq_length=0,\n # The maximum length of \"target\" sequence.\n # Sequences longer than this value will be truncated. 0 or negative values\n # mean there is no maximum or truncation.\n # You can change this behavior by overridding preprocess_example() method\n # in your problem class.\n max_target_seq_length=0,\n # This flag allows us to optionally treat a seq-to-seq problem\n # as a language model. Legal values are:\n #\n # \"none\" - Do not prepend the inputs to the targets.\n # \"prepend_inputs_masked_attention\"\n # replace \"targets\" in preprocessing with\n # tf.concat([inputs, [0], targets], axis=1)\n # i.e. we prepend the inputs to the targets with a single\n # padding token in between. Use masked self-attention on the\n # entire resulting sequence. During training, we compute losses on\n # the combined sequence. During eval, we compute the metrics\n # on only the targets portion.\n # \"prepend_inputs_full_attention\"\n # similar to the previous option except that each\n # position in the inputs portion can see the\n # entire inputs portion. This removes the challenge of\n # autoregressively predicting the inputs portion.\n prepend_mode=\"none\",\n # Scheduled sampling is interesting for auto-regressive models.\n # It runs an additional step using the generated output as autoregressive\n # targets, which can improve the models inference results later. The\n # parameter scheduled_sampling_prob determines with what probability\n # will such additional step be run. It's turned off (0.0) by default.\n # This probability will exponentially warm up for the number of\n # steps determined by scheduled_sampling_warmup_steps.\n # The tensor used for the second step will consist of outputs from\n # the first step mixed with gold truth, with the proportion of gold\n # determined by scheduled_sampling_gold_mixin_prob.\n scheduled_sampling_prob=0.0,\n scheduled_sampling_warmup_steps=50000,\n scheduled_sampling_gold_mixin_prob=0.5,\n # This is the actual batch size, *not* tokens per batch (i.e. for\n # language models this is the number of sentences in the batch)\n tpu_batch_size_per_shard=24,\n )\n\n\nclass RangedHParams(object):\n \"\"\"Defines parameter ranges for tuning.\"\"\"\n\n # From ParameterConfig proto\n LINEAR_SCALE = 1\n LOG_SCALE = 2\n REVERSE_LOG_SCALE = 3\n\n def __init__(self):\n self._categorical_params = {}\n self._discrete_params = {}\n self._discrete_float_params = {}\n self._float_params = {}\n self._int_params = {}\n\n def _check_reset_and_type_change(self, name, orig_ctr):\n \"\"\"Check if name is in orig_ctr or in one of the other type containers.\"\"\"\n # Resetting a hyperparameter\n if name in orig_ctr:\n tf.logging.warning(\"Overwriting hparam %s\", name)\n\n ctr_names = [(self._categorical_params,\n \"categorical\"), (self._discrete_params, \"discrete\"),\n (self._float_params, \"float\"), (self._int_params, \"int\"),\n (self._discrete_float_params, \"discrete_float\")]\n ctrs, names = list(zip(*ctr_names))\n orig_name = names[ctrs.index(orig_ctr)]\n\n for ctr, ctr_name in ctr_names:\n if ctr is orig_ctr:\n continue\n\n # Using a different type for the same hyperparameter name\n if name in ctr:\n raise ValueError(\"Setting hyperparameter %s as type %s, but a \"\n \"hyperparemeter of the same name was originally \"\n \"registered as type %s\" % (name, ctr_name, orig_name))\n\n def set_categorical(self, name, categories, length=None):\n self._check_reset_and_type_change(name, self._categorical_params)\n self._categorical_params[name] = (name, categories, length)\n\n def set_discrete(self, name, feasible_points, scale=None, length=None):\n self._check_reset_and_type_change(name, self._discrete_params)\n self._discrete_params[name] = (name, feasible_points, scale, length)\n\n def set_float(self, name, min_val, max_val, scale=None, length=None):\n if name in self._discrete_float_params:\n del self._discrete_float_params[name]\n self._check_reset_and_type_change(name, self._float_params)\n self._float_params[name] = (name, min_val, max_val, scale, length)\n\n def set_discrete_float(self, name, val):\n self._check_reset_and_type_change(name, self._discrete_float_params)\n self._discrete_float_params[name] = (name, [val])\n\n def set_int(self, name, min_val, max_val, scale=None, length=None):\n self._check_reset_and_type_change(name, self._int_params)\n self._int_params[name] = (name, min_val, max_val, scale, length)\n\n def fix_select_params(self, hp):\n ctrs = [\n self._categorical_params, self._discrete_params,\n self._discrete_float_params, self._float_params, self._int_params\n ]\n for key, val in hp.values().iteritems():\n for ctr in ctrs:\n if key in ctr:\n del ctr[key]\n self.set_discrete(key, [val])\n\n\ndef fill_ranged_hparams_from_hparams(hparams, ranged_hparams):\n \"\"\"Fill ranged_hparams with singleton values from hparams.\n\n HParams are placed in RangedHParams with the following functions, according to\n type:\n * int: set_discrete\n * bool: set_discrete\n * float: set_discrete_float\n * str: set_categorical\n\n Args:\n hparams: tf.contrib.training.HParams; contains the hyperparameters to copy\n over to ranged_hparams.\n ranged_hparams: RangedHParams; will have hparams values copied to it.\n\n Raises:\n ValueError: if hparams contains a hyperparameter not of type\n {int, float, str, bool}.\n \"\"\"\n for name, (hp_type, is_multivalent) in six.iteritems(hparams._hparam_types): # pylint: disable=protected-access\n\n if is_multivalent:\n raise ValueError(\"Multivalent hparams not supported in RangedHParams. \"\n \"Hyperparameter %s is multivalent.\" % name)\n val = getattr(hparams, name)\n if hp_type == int:\n ranged_hparams.set_discrete(name, [val])\n elif hp_type == bool:\n ranged_hparams.set_discrete(name, [int(val)])\n elif hp_type == float:\n ranged_hparams.set_discrete_float(name, val)\n elif hp_type == str:\n ranged_hparams.set_categorical(name, [val])\n else:\n raise ValueError(\"Unsupported type %s for param %s\" % (hp_type, name))\n\n\[email protected]_ranged_hparams(\"basic1\")\ndef basic_range1(ranged_hparams):\n \"\"\"A basic range of hyperparameters.\"\"\"\n rhp = ranged_hparams\n\n hparams = basic_params1()\n fill_ranged_hparams_from_hparams(hparams, rhp)\n\n rhp.set_discrete(\"batch_size\", [1024, 2048, 4096])\n rhp.set_discrete(\"num_hidden_layers\", [1, 2, 3, 4, 5, 6])\n rhp.set_discrete(\"hidden_size\", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE)\n rhp.set_discrete(\"kernel_height\", [1, 3, 5, 7])\n rhp.set_discrete(\"kernel_width\", [1, 3, 5, 7])\n rhp.set_discrete(\"compress_steps\", [0, 1, 2])\n rhp.set_float(\"dropout\", 0.0, 0.5)\n rhp.set_float(\"weight_decay\", 1e-4, 10.0, scale=rhp.LOG_SCALE)\n rhp.set_float(\"label_smoothing\", 0.0, 0.2)\n rhp.set_float(\"clip_grad_norm\", 0.01, 50.0, scale=rhp.LOG_SCALE)\n rhp.set_float(\"learning_rate\", 0.005, 2.0, scale=rhp.LOG_SCALE)\n rhp.set_categorical(\"initializer\",\n [\"uniform\", \"orthogonal\", \"uniform_unit_scaling\"])\n rhp.set_float(\"initializer_gain\", 0.5, 3.5)\n rhp.set_categorical(\"learning_rate_decay_scheme\",\n [\"none\", \"sqrt\", \"noam\", \"exp10k\"])\n rhp.set_float(\"optimizer_adam_epsilon\", 1e-7, 1e-2, scale=rhp.LOG_SCALE)\n rhp.set_float(\"optimizer_adam_beta1\", 0.8, 0.9)\n rhp.set_float(\"optimizer_adam_beta2\", 0.995, 0.999)\n rhp.set_categorical(\n \"optimizer\",\n [\"Adam\", \"Adagrad\", \"Momentum\", \"RMSProp\", \"SGD\", \"YellowFin\"])\n", "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Genetics problems.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\n\nfrom tensor2tensor.data_generators import dna_encoder\nfrom tensor2tensor.data_generators import gene_expression\n\nimport tensorflow as tf\n\n\nclass GeneticsTest(tf.test.TestCase):\n\n def _oneHotBases(self, bases):\n ref = [\"A\", \"C\", \"T\", \"G\"]\n one_hots = []\n for base in bases:\n one_hot = [False] * 4\n if base in ref:\n one_hot[ref.index(base)] = True\n one_hots.append(one_hot)\n return np.array(one_hots)\n\n def testRecordToExample(self):\n encoder = dna_encoder.DNAEncoder(chunk_size=2)\n raw_inputs = [\"A\", \"C\", \"G\", \"N\", \"C\", \"T\"]\n\n # Put in numpy arrays in the same format as in the h5 file\n inputs = self._oneHotBases(raw_inputs)\n mask = np.array([True, False, True])\n outputs = np.array([[1.0, 2.0, 3.0], [5.0, 1.0, 0.2], [5.1, 2.3, 2.3]])\n # Convert to example dict\n ex_dict = gene_expression.to_example_dict(encoder, inputs, mask, outputs)\n\n self.assertEqual(len(raw_inputs) // 2 + 1, len(ex_dict[\"inputs\"]))\n self.assertAllEqual(encoder.encode(raw_inputs) + [1], ex_dict[\"inputs\"])\n self.assertAllEqual([1.0, 0.0, 1.0], ex_dict[\"targets_mask\"])\n self.assertAllEqual([1.0, 2.0, 3.0, 5.0, 1.0, 0.2, 5.1, 2.3, 2.3],\n ex_dict[\"targets\"])\n self.assertAllEqual([3, 3], ex_dict[\"targets_shape\"])\n\n def testGenerateShardArgs(self):\n num_examples = 37\n num_shards = 4\n outfiles = [str(i) for i in range(num_shards)]\n shard_args = gene_expression.generate_shard_args(outfiles, num_examples)\n\n starts, ends, fnames = zip(*shard_args)\n self.assertAllEqual([0, 9, 18, 27], starts)\n self.assertAllEqual([9, 18, 27, 37], ends)\n self.assertAllEqual(fnames, outfiles)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for desc2code.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nfrom tensor2tensor.data_generators import desc2code\n\nimport tensorflow as tf\n\nCODE_CPP_IN = \"\"\"\n #include <iostream>\n\nvoid main() { // This comment will be removed\n // This too.\n //\n /* Not this one */\n\\t\n\\t\n int a \\t\\n = 3;//\n//\n}\n\n\"\"\"\n\nCODE_CPP_OUT = (\"#include <iostream> void main() { /* Not this one */ int a = \"\n \"3; }\")\n\n\nclass Desc2codeTest(tf.test.TestCase):\n\n def testCppPreprocess(self):\n \"\"\"Check that the file correctly preprocess the code source.\"\"\"\n cpp_pb = desc2code.ProgrammingDesc2codeCpp()\n\n self.assertEqual( # Add space beween two lines\n cpp_pb.preprocess_target(\"firstline//comm1\\nsecondline//comm2\\n\"),\n \"firstline secondline\")\n # Checking for boths comments and spaces\n self.assertEqual(cpp_pb.preprocess_target(CODE_CPP_IN), CODE_CPP_OUT)\n self.assertEqual(\n cpp_pb.preprocess_target(\" not removed //abcd \"),\n \"not removed //abcd\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.concat", "tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder", "tensorflow.contrib.data.TFRecordDataset", "tensorflow.logging.info", "tensorflow.contrib.slim.parallel_reader.get_data_files", "tensorflow.contrib.slim.tfexample_decoder.Tensor", "tensorflow.VarLenFeature", "tensorflow.contrib.training.HParams" ], [ "tensorflow.logging.warning", "tensorflow.contrib.training.HParams" ], [ "numpy.array", "tensorflow.test.main" ], [ "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
A-Charvin/cv-tricks.com
[ "3c6da9c62665abefa6114e0b7f0c39a0a012f496" ]
[ "Tensorflow-tutorials/tutorial-2-image-classifier/predict2.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport os,glob,cv2\nimport sys,argparse\n\n\n# First, pass the path of the image\ndir_path = os.path.dirname(os.path.realpath(__file__))\nimage_path=sys.argv[1] \nfilename = dir_path +'/' +image_path\nimage_size=128\nnum_channels=3\nimages = []\n# Reading the image using OpenCV\nimage = cv2.imread(filename)\n# Resizing the image to our desired size and preprocessing will be done exactly as done during training\nimage = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)\nimages.append(image)\nimages = np.array(images, dtype=np.uint8)\nimages = images.astype('float32')\nimages = np.multiply(images, 1.0/255.0) \n#The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.\nx_batch = images.reshape(1, image_size,image_size,num_channels)\n\n## Let us restore the saved model \nsess = tf.Session()\n# Step-1: Recreate the network graph. At this step only graph is created.\nsaver = tf.train.import_meta_graph('ore-mine-model.meta')\n# Step-2: Now let's load the weights saved using the restore method.\nsaver.restore(sess, tf.train.latest_checkpoint('./'))\n\n# Accessing the default graph which we have restored\ngraph = tf.get_default_graph()\n\n# Now, let's get hold of the op that we can be processed to get the output.\n# In the original network y_pred is the tensor that is the prediction of the network\ny_pred = graph.get_tensor_by_name(\"y_pred:0\")\n\n## Let's feed the images to the input placeholders\nx= graph.get_tensor_by_name(\"x:0\") \ny_true = graph.get_tensor_by_name(\"y_true:0\") \ny_test_images = np.zeros((1, len(os.listdir('training_data')))) \n\n\n### Creating the feed_dict that is required to be fed to calculate y_pred \nfeed_dict_testing = {x: x_batch, y_true: y_test_images}\nresult=sess.run(y_pred, feed_dict=feed_dict_testing)\n# result is of this format [probabiliy_of_rose probability_of_sunflower]\nprint(result)\n" ]
[ [ "tensorflow.train.latest_checkpoint", "numpy.multiply", "tensorflow.train.import_meta_graph", "tensorflow.Session", "tensorflow.get_default_graph", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
87003697/Segmentation
[ "5973a64768632fc52c55f9ffc9f0b43746699b37", "5973a64768632fc52c55f9ffc9f0b43746699b37" ]
[ "utils/losses.py", "dataloaders/coco.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom sklearn.utils import class_weight \nfrom utils.lovasz_losses import lovasz_softmax\nimport pdb\n\ndef make_one_hot(labels, classes):\n one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)\n target = one_hot.scatter_(1, labels.data, 1)\n return target\n\ndef get_weights(target):\n t_np = target.view(-1).data.cpu().numpy()\n\n classes, counts = np.unique(t_np, return_counts=True)\n cls_w = np.median(counts) / counts\n #cls_w = class_weight.compute_class_weight('balanced', classes, t_np)\n\n weights = np.ones(7)\n weights[classes] = cls_w\n return torch.from_numpy(weights).float().cuda()\n\nclass CrossEntropyLoss2d(nn.Module):\n def __init__(self, weight=None, ignore_index=255, reduction='mean'):\n super(CrossEntropyLoss2d, self).__init__()\n self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)\n\n def forward(self, output, target):\n loss = self.CE(output, target)\n return loss\n\nclass DiceLoss(nn.Module):\n def __init__(self, smooth=1., ignore_index=255):\n super(DiceLoss, self).__init__()\n self.ignore_index = ignore_index\n self.smooth = smooth\n\n def forward(self, output, target):\n if self.ignore_index not in range(target.min(), target.max()):\n if (target == self.ignore_index).sum() > 0:\n target[target == self.ignore_index] = target.min()\n target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])\n output = F.softmax(output, dim=1)\n output_flat = output.contiguous().view(-1)\n target_flat = target.contiguous().view(-1)\n intersection = (output_flat * target_flat).sum()\n loss = 1 - ((2. * intersection + self.smooth) /\n (output_flat.sum() + target_flat.sum() + self.smooth))\n return loss\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.size_average = size_average\n self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)\n\n def forward(self, output, target):\n logpt = self.CE_loss(output, target)\n pt = torch.exp(-logpt)\n loss = ((1-pt)**self.gamma) * logpt\n if self.size_average:\n return loss.mean()\n return loss.sum()\n\nclass CE_DiceLoss(nn.Module):\n def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):\n super(CE_DiceLoss, self).__init__()\n self.smooth = smooth\n self.dice = DiceLoss()\n self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)\n \n def forward(self, output, target):\n CE_loss = self.cross_entropy(output, target)\n dice_loss = self.dice(output, target)\n return CE_loss + dice_loss\n\nclass LovaszSoftmax(nn.Module):\n def __init__(self, classes='present', per_image=False, ignore_index=255):\n super(LovaszSoftmax, self).__init__()\n self.smooth = classes\n self.per_image = per_image\n self.ignore_index = ignore_index\n \n def forward(self, output, target):\n logits = F.softmax(output, dim=1)\n loss = lovasz_softmax(logits, target, ignore=self.ignore_index)\n return loss\n", "# Originally written by Kazuto Nakashima \n# https://github.com/kazuto1011/deeplab-pytorch\n\nfrom base import BaseDataSet, BaseDataLoader\nfrom PIL import Image\nfrom glob import glob\nimport numpy as np\nimport scipy.io as sio\nfrom utils import palette\nimport torch\nimport os\nimport cv2\nimport pdb\n\nclass CocoStuff10k(BaseDataSet):\n def __init__(self, warp_image = True, **kwargs):\n self.warp_image = warp_image\n self.num_classes = 182\n self.palette = palette.COCO_palette\n super(CocoStuff10k, self).__init__(**kwargs)\n\n def _set_files(self):\n if self.split in ['train', 'test', 'all']:\n file_list = os.path.join(self.root, 'imageLists', self.split + '.txt')\n self.files = [name.rstrip() for name in tuple(open(file_list, \"r\"))]\n else: raise ValueError(f\"Invalid split name {self.split} choose one of [train, test, all]\")\n\n def _load_data(self, index):\n image_id = self.files[index]\n image_path = os.path.join(self.root, 'images', image_id + '.jpg')\n label_path = os.path.join(self.root, 'annotations', image_id + '.mat')\n image = np.asarray(Image.open(image_path), dtype=np.float32)\n label = sio.loadmat(label_path)['S']\n label -= 1 # unlabeled (0 -> -1)\n label[label == -1] = 255\n if self.warp_image:\n image = cv2.resize(image, (513, 513), interpolation=cv2.INTER_LINEAR)\n label = np.asarray(Image.fromarray(label).resize((513, 513), resample=Image.NEAREST))\n return image, label, image_id\n\nclass CocoStuff164k(BaseDataSet):\n def __init__(self, **kwargs):\n self.num_classes = 182\n self.palette = palette.COCO_palette\n super(CocoStuff164k, self).__init__(**kwargs)\n\n def _set_files(self):\n if self.split in ['train2017', 'val2017']:\n file_list = sorted(glob(os.path.join(self.root, 'images', self.split + '/*.jpg')))\n self.files = [os.path.basename(f).split('.')[0] for f in file_list]\n else: raise ValueError(f\"Invalid split name {self.split}, either train2017 or val2017\")\n\n def _load_data(self, index):\n image_id = self.files[index]\n image_path = os.path.join(self.root, 'images', self.split, image_id + '.jpg')\n label_path = os.path.join(self.root, 'annotations', self.split, image_id + '.png')\n image = np.asarray(Image.open(image_path).convert('RGB'), dtype=np.float32)\n label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)\n pdb.set_trace()\n return image, label, image_id\n\ndef get_parent_class(value, dictionary):\n for k, v in dictionary.items():\n if isinstance(v, list):\n if value in v:\n yield k\n elif isinstance(v, dict):\n if value in list(v.keys()):\n yield k\n else:\n for res in get_parent_class(value, v):\n yield res\n\nclass COCO(BaseDataLoader):\n def __init__(self, data_dir, batch_size, split, crop_size=None, base_size=None, scale=True, num_workers=1, partition = 'CocoStuff10k',\n shuffle=False, flip=False, rotate=False, blur= False, augment=False, val_split= None, return_id=False, val=False):\n\n self.MEAN = [0.43931922, 0.41310471, 0.37480941]\n self.STD = [0.24272706, 0.23649098, 0.23429529]\n\n kwargs = {\n 'root': data_dir,\n 'split': split,\n 'mean': self.MEAN,\n 'std': self.STD,\n 'augment': augment,\n 'crop_size': crop_size,\n 'base_size': base_size,\n 'scale': scale,\n 'flip': flip,\n 'blur': blur,\n 'rotate': rotate,\n 'return_id': return_id,\n 'val': val\n }\n if partition == 'CocoStuff10k': self.dataset = CocoStuff10k(**kwargs)\n elif partition == 'CocoStuff164k': self.dataset = CocoStuff164k(**kwargs)\n else: raise ValueError(f\"Please choose either CocoStuff10k / CocoStuff164k\")\n\n super(COCO, self).__init__(self.dataset, batch_size, shuffle, num_workers, val_split)\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "numpy.unique", "numpy.median", "torch.from_numpy", "numpy.ones", "torch.exp" ], [ "scipy.io.loadmat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
meeseeksmachine/pandas
[ "27ebb3e1e40513ad5f8919a5bbc7298e2e070a39" ]
[ "pandas/core/sparse/frame.py" ]
[ "\"\"\"\nData structures for sparse float data. Life is made simpler by dealing only\nwith float64 data\n\"\"\"\nfrom __future__ import division\n# pylint: disable=E1101,E1103,W0231,E0202\n\nimport warnings\nfrom pandas.compat import lmap\nfrom pandas import compat\nimport numpy as np\n\nfrom pandas.core.dtypes.missing import isna, notna\nfrom pandas.core.dtypes.cast import maybe_upcast, find_common_type\nfrom pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse\n\nfrom pandas.compat.numpy import function as nv\nfrom pandas.core.index import Index, MultiIndex, ensure_index\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame, extract_index, _prep_ndarray\nimport pandas.core.algorithms as algos\nfrom pandas.core.internals import (BlockManager,\n create_block_manager_from_arrays)\nimport pandas.core.generic as generic\nfrom pandas.core.sparse.series import SparseSeries, SparseArray\nfrom pandas._libs.sparse import BlockIndex, get_blocks\nfrom pandas.util._decorators import Appender\nimport pandas.core.ops as ops\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\n\n_shared_doc_kwargs = dict(klass='SparseDataFrame')\n\n\nclass SparseDataFrame(DataFrame):\n \"\"\"\n DataFrame containing sparse floating point data in the form of SparseSeries\n objects\n\n Parameters\n ----------\n data : same types as can be passed to DataFrame or scipy.sparse.spmatrix\n .. versionchanged :: 0.23.0\n If data is a dict, argument order is maintained for Python 3.6\n and later.\n\n index : array-like, optional\n column : array-like, optional\n default_kind : {'block', 'integer'}, default 'block'\n Default sparse kind for converting Series to SparseSeries. Will not\n override SparseSeries passed into constructor\n default_fill_value : float\n Default fill_value for converting Series to SparseSeries\n (default: nan). Will not override SparseSeries passed in.\n \"\"\"\n _subtyp = 'sparse_frame'\n\n def __init__(self, data=None, index=None, columns=None, default_kind=None,\n default_fill_value=None, dtype=None, copy=False):\n\n # pick up the defaults from the Sparse structures\n if isinstance(data, SparseDataFrame):\n if index is None:\n index = data.index\n if columns is None:\n columns = data.columns\n if default_fill_value is None:\n default_fill_value = data.default_fill_value\n if default_kind is None:\n default_kind = data.default_kind\n elif isinstance(data, (SparseSeries, SparseArray)):\n if index is None:\n index = data.index\n if default_fill_value is None:\n default_fill_value = data.fill_value\n if columns is None and hasattr(data, 'name'):\n columns = [data.name]\n if columns is None:\n raise Exception(\"cannot pass a series w/o a name or columns\")\n data = {columns[0]: data}\n\n if default_fill_value is None:\n default_fill_value = np.nan\n if default_kind is None:\n default_kind = 'block'\n\n self._default_kind = default_kind\n self._default_fill_value = default_fill_value\n\n if is_scipy_sparse(data):\n mgr = self._init_spmatrix(data, index, columns, dtype=dtype,\n fill_value=default_fill_value)\n elif isinstance(data, dict):\n mgr = self._init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, (np.ndarray, list)):\n mgr = self._init_matrix(data, index, columns, dtype=dtype)\n elif isinstance(data, SparseDataFrame):\n mgr = self._init_mgr(data._data,\n dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif isinstance(data, DataFrame):\n mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)\n elif isinstance(data, Series):\n mgr = self._init_dict(data.to_frame(), data.index,\n columns=None, dtype=dtype)\n elif isinstance(data, BlockManager):\n mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),\n dtype=dtype, copy=copy)\n elif data is None:\n data = DataFrame()\n\n if index is None:\n index = Index([])\n else:\n index = ensure_index(index)\n\n if columns is None:\n columns = Index([])\n else:\n for c in columns:\n data[c] = SparseArray(np.nan, index=index,\n kind=self._default_kind,\n fill_value=self._default_fill_value)\n mgr = to_manager(data, columns, index)\n if dtype is not None:\n mgr = mgr.astype(dtype)\n else:\n msg = ('SparseDataFrame called with unknown type \"{data_type}\" '\n 'for data argument')\n raise TypeError(msg.format(data_type=type(data).__name__))\n\n generic.NDFrame.__init__(self, mgr)\n\n @property\n def _constructor(self):\n return SparseDataFrame\n\n _constructor_sliced = SparseSeries\n\n def _init_dict(self, data, index, columns, dtype=None):\n # pre-filter out columns if we passed it\n if columns is not None:\n columns = ensure_index(columns)\n data = {k: v for k, v in compat.iteritems(data) if k in columns}\n else:\n keys = com._dict_keys_to_ordered_list(data)\n columns = Index(keys)\n\n if index is None:\n index = extract_index(list(data.values()))\n\n def sp_maker(x):\n return SparseArray(x, kind=self._default_kind,\n fill_value=self._default_fill_value,\n copy=True, dtype=dtype)\n sdict = {}\n for k, v in compat.iteritems(data):\n if isinstance(v, Series):\n # Force alignment, no copy necessary\n if not v.index.equals(index):\n v = v.reindex(index)\n\n if not isinstance(v, SparseSeries):\n v = sp_maker(v.values)\n elif isinstance(v, SparseArray):\n v = v.copy()\n else:\n if isinstance(v, dict):\n v = [v.get(i, np.nan) for i in index]\n\n v = sp_maker(v)\n sdict[k] = v\n\n # TODO: figure out how to handle this case, all nan's?\n # add in any other columns we want to have (completeness)\n nan_arr = np.empty(len(index), dtype='float64')\n nan_arr.fill(np.nan)\n nan_arr = sp_maker(nan_arr)\n sdict.update((c, nan_arr) for c in columns if c not in sdict)\n\n return to_manager(sdict, columns, index)\n\n def _init_matrix(self, data, index, columns, dtype=None):\n \"\"\" Init self from ndarray or list of lists \"\"\"\n data = _prep_ndarray(data, copy=False)\n index, columns = self._prep_index(data, index, columns)\n data = {idx: data[:, i] for i, idx in enumerate(columns)}\n return self._init_dict(data, index, columns, dtype)\n\n def _init_spmatrix(self, data, index, columns, dtype=None,\n fill_value=None):\n \"\"\" Init self from scipy.sparse matrix \"\"\"\n index, columns = self._prep_index(data, index, columns)\n data = data.tocoo()\n N = len(index)\n\n # Construct a dict of SparseSeries\n sdict = {}\n values = Series(data.data, index=data.row, copy=False)\n for col, rowvals in values.groupby(data.col):\n # get_blocks expects int32 row indices in sorted order\n rowvals = rowvals.sort_index()\n rows = rowvals.index.values.astype(np.int32)\n blocs, blens = get_blocks(rows)\n\n sdict[columns[col]] = SparseSeries(\n rowvals.values, index=index,\n fill_value=fill_value,\n sparse_index=BlockIndex(N, blocs, blens))\n\n # Add any columns that were empty and thus not grouped on above\n sdict.update({column: SparseSeries(index=index,\n fill_value=fill_value,\n sparse_index=BlockIndex(N, [], []))\n for column in columns\n if column not in sdict})\n\n return self._init_dict(sdict, index, columns, dtype)\n\n def _prep_index(self, data, index, columns):\n N, K = data.shape\n if index is None:\n index = ibase.default_index(N)\n if columns is None:\n columns = ibase.default_index(K)\n\n if len(columns) != K:\n raise ValueError('Column length mismatch: {columns} vs. {K}'\n .format(columns=len(columns), K=K))\n if len(index) != N:\n raise ValueError('Index length mismatch: {index} vs. {N}'\n .format(index=len(index), N=N))\n return index, columns\n\n def to_coo(self):\n \"\"\"\n Return the contents of the frame as a sparse SciPy COO matrix.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n coo_matrix : scipy.sparse.spmatrix\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n\n Notes\n -----\n The dtype will be the lowest-common-denominator type (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. By numpy.find_common_type convention, mixing int64 and\n and uint64 will result in a float64 dtype.\n \"\"\"\n try:\n from scipy.sparse import coo_matrix\n except ImportError:\n raise ImportError('Scipy is not installed')\n\n dtype = find_common_type(self.dtypes)\n cols, rows, datas = [], [], []\n for col, name in enumerate(self):\n s = self[name]\n row = s.sp_index.to_int_index().indices\n cols.append(np.repeat(col, len(row)))\n rows.append(row)\n datas.append(s.sp_values.astype(dtype, copy=False))\n\n cols = np.concatenate(cols)\n rows = np.concatenate(rows)\n datas = np.concatenate(datas)\n return coo_matrix((datas, (rows, cols)), shape=self.shape)\n\n def __array_wrap__(self, result):\n return self._constructor(\n result, index=self.index, columns=self.columns,\n default_kind=self._default_kind,\n default_fill_value=self._default_fill_value).__finalize__(self)\n\n def __getstate__(self):\n # pickling\n return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,\n _default_fill_value=self._default_fill_value,\n _default_kind=self._default_kind)\n\n def _unpickle_sparse_frame_compat(self, state):\n \"\"\" original pickle format \"\"\"\n series, cols, idx, fv, kind = state\n\n if not isinstance(cols, Index): # pragma: no cover\n from pandas.io.pickle import _unpickle_array\n columns = _unpickle_array(cols)\n else:\n columns = cols\n\n if not isinstance(idx, Index): # pragma: no cover\n from pandas.io.pickle import _unpickle_array\n index = _unpickle_array(idx)\n else:\n index = idx\n\n series_dict = DataFrame()\n for col, (sp_index, sp_values) in compat.iteritems(series):\n series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,\n fill_value=fv)\n\n self._data = to_manager(series_dict, columns, index)\n self._default_fill_value = fv\n self._default_kind = kind\n\n def to_dense(self):\n \"\"\"\n Convert to dense DataFrame\n\n Returns\n -------\n df : DataFrame\n \"\"\"\n data = {k: v.to_dense() for k, v in compat.iteritems(self)}\n return DataFrame(data, index=self.index, columns=self.columns)\n\n def _apply_columns(self, func):\n \"\"\" get new SparseDataFrame applying func to each columns \"\"\"\n\n new_data = {}\n for col, series in compat.iteritems(self):\n new_data[col] = func(series)\n\n return self._constructor(\n data=new_data, index=self.index, columns=self.columns,\n default_fill_value=self.default_fill_value).__finalize__(self)\n\n def astype(self, dtype):\n return self._apply_columns(lambda x: x.astype(dtype))\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of this SparseDataFrame\n \"\"\"\n result = super(SparseDataFrame, self).copy(deep=deep)\n result._default_fill_value = self._default_fill_value\n result._default_kind = self._default_kind\n return result\n\n @property\n def default_fill_value(self):\n return self._default_fill_value\n\n @property\n def default_kind(self):\n return self._default_kind\n\n @property\n def density(self):\n \"\"\"\n Ratio of non-sparse points to total (dense) data points\n represented in the frame\n \"\"\"\n tot_nonsparse = sum(ser.sp_index.npoints\n for _, ser in compat.iteritems(self))\n tot = len(self.index) * len(self.columns)\n return tot_nonsparse / float(tot)\n\n def fillna(self, value=None, method=None, axis=0, inplace=False,\n limit=None, downcast=None):\n new_self = super(SparseDataFrame,\n self).fillna(value=value, method=method, axis=axis,\n inplace=inplace, limit=limit,\n downcast=downcast)\n if not inplace:\n self = new_self\n\n # set the fill value if we are filling as a scalar with nothing special\n # going on\n if (value is not None and value == value and method is None and\n limit is None):\n self._default_fill_value = value\n\n if not inplace:\n return self\n\n # ----------------------------------------------------------------------\n # Support different internal representation of SparseDataFrame\n\n def _sanitize_column(self, key, value, **kwargs):\n \"\"\"\n Creates a new SparseArray from the input value.\n\n Parameters\n ----------\n key : object\n value : scalar, Series, or array-like\n kwargs : dict\n\n Returns\n -------\n sanitized_column : SparseArray\n\n \"\"\"\n def sp_maker(x, index=None):\n return SparseArray(x, index=index,\n fill_value=self._default_fill_value,\n kind=self._default_kind)\n if isinstance(value, SparseSeries):\n clean = value.reindex(self.index).as_sparse_array(\n fill_value=self._default_fill_value, kind=self._default_kind)\n\n elif isinstance(value, SparseArray):\n if len(value) != len(self.index):\n raise AssertionError('Length of values does not match '\n 'length of index')\n clean = value\n\n elif hasattr(value, '__iter__'):\n if isinstance(value, Series):\n clean = value.reindex(self.index)\n if not isinstance(value, SparseSeries):\n clean = sp_maker(clean)\n else:\n if len(value) != len(self.index):\n raise AssertionError('Length of values does not match '\n 'length of index')\n clean = sp_maker(value)\n\n # Scalar\n else:\n clean = sp_maker(value, self.index)\n\n # always return a SparseArray!\n return clean\n\n def get_value(self, index, col, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed column and index\n\n .. deprecated:: 0.21.0\n\n Please use .at[] or .iat[] accessors.\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n warnings.warn(\"get_value is deprecated and will be removed \"\n \"in a future release. Please use \"\n \".at[] or .iat[] accessors instead\", FutureWarning,\n stacklevel=2)\n return self._get_value(index, col, takeable=takeable)\n\n def _get_value(self, index, col, takeable=False):\n if takeable is True:\n series = self._iget_item_cache(col)\n else:\n series = self._get_item_cache(col)\n\n return series._get_value(index, takeable=takeable)\n _get_value.__doc__ = get_value.__doc__\n\n def set_value(self, index, col, value, takeable=False):\n \"\"\"\n Put single value at passed column and index\n\n .. deprecated:: 0.21.0\n\n Please use .at[] or .iat[] accessors.\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar value\n takeable : interpret the index/col as indexers, default False\n\n Notes\n -----\n This method *always* returns a new object. It is currently not\n particularly efficient (and potentially very expensive) but is provided\n for API compatibility with DataFrame\n\n Returns\n -------\n frame : DataFrame\n \"\"\"\n warnings.warn(\"set_value is deprecated and will be removed \"\n \"in a future release. Please use \"\n \".at[] or .iat[] accessors instead\", FutureWarning,\n stacklevel=2)\n return self._set_value(index, col, value, takeable=takeable)\n\n def _set_value(self, index, col, value, takeable=False):\n dense = self.to_dense()._set_value(\n index, col, value, takeable=takeable)\n return dense.to_sparse(kind=self._default_kind,\n fill_value=self._default_fill_value)\n _set_value.__doc__ = set_value.__doc__\n\n def _slice(self, slobj, axis=0, kind=None):\n if axis == 0:\n new_index = self.index[slobj]\n new_columns = self.columns\n else:\n new_index = self.index\n new_columns = self.columns[slobj]\n\n return self.reindex(index=new_index, columns=new_columns)\n\n def xs(self, key, axis=0, copy=False):\n \"\"\"\n Returns a row (cross-section) from the SparseDataFrame as a Series\n object.\n\n Parameters\n ----------\n key : some index contained in the index\n\n Returns\n -------\n xs : Series\n \"\"\"\n if axis == 1:\n data = self[key]\n return data\n\n i = self.index.get_loc(key)\n data = self.take([i]).get_values()[0]\n return Series(data, index=self.columns)\n\n # ----------------------------------------------------------------------\n # Arithmetic-related methods\n\n def _combine_frame(self, other, func, fill_value=None, level=None):\n this, other = self.align(other, join='outer', level=level, copy=False)\n new_index, new_columns = this.index, this.columns\n\n if level is not None:\n raise NotImplementedError(\"'level' argument is not supported\")\n\n if self.empty and other.empty:\n return self._constructor(index=new_index).__finalize__(self)\n\n new_data = {}\n if fill_value is not None:\n # TODO: be a bit more intelligent here\n for col in new_columns:\n if col in this and col in other:\n dleft = this[col].to_dense()\n dright = other[col].to_dense()\n result = dleft._binop(dright, func, fill_value=fill_value)\n result = result.to_sparse(fill_value=this[col].fill_value)\n new_data[col] = result\n else:\n\n for col in new_columns:\n if col in this and col in other:\n new_data[col] = func(this[col], other[col])\n\n # if the fill values are the same use them? or use a valid one\n new_fill_value = None\n other_fill_value = getattr(other, 'default_fill_value', np.nan)\n if self.default_fill_value == other_fill_value:\n new_fill_value = self.default_fill_value\n elif np.isnan(self.default_fill_value) and not np.isnan(\n other_fill_value):\n new_fill_value = other_fill_value\n elif not np.isnan(self.default_fill_value) and np.isnan(\n other_fill_value):\n new_fill_value = self.default_fill_value\n\n return self._constructor(data=new_data, index=new_index,\n columns=new_columns,\n default_fill_value=new_fill_value\n ).__finalize__(self)\n\n def _combine_match_index(self, other, func, level=None):\n new_data = {}\n\n if level is not None:\n raise NotImplementedError(\"'level' argument is not supported\")\n\n new_index = self.index.union(other.index)\n this = self\n if self.index is not new_index:\n this = self.reindex(new_index)\n\n if other.index is not new_index:\n other = other.reindex(new_index)\n\n for col, series in compat.iteritems(this):\n new_data[col] = func(series.values, other.values)\n\n # fill_value is a function of our operator\n fill_value = None\n if isna(other.fill_value) or isna(self.default_fill_value):\n fill_value = np.nan\n else:\n fill_value = func(np.float64(self.default_fill_value),\n np.float64(other.fill_value))\n\n return self._constructor(\n new_data, index=new_index, columns=self.columns,\n default_fill_value=fill_value).__finalize__(self)\n\n def _combine_match_columns(self, other, func, level=None, try_cast=True):\n # patched version of DataFrame._combine_match_columns to account for\n # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,\n # where 3.0 is numpy.float64 and series is a SparseSeries. Still\n # possible for this to happen, which is bothersome\n\n if level is not None:\n raise NotImplementedError(\"'level' argument is not supported\")\n\n new_data = {}\n\n union = intersection = self.columns\n\n if not union.equals(other.index):\n union = other.index.union(self.columns)\n intersection = other.index.intersection(self.columns)\n\n for col in intersection:\n new_data[col] = func(self[col], float(other[col]))\n\n return self._constructor(\n new_data, index=self.index, columns=union,\n default_fill_value=self.default_fill_value).__finalize__(self)\n\n def _combine_const(self, other, func, errors='raise', try_cast=True):\n return self._apply_columns(lambda x: func(x, other))\n\n def _reindex_index(self, index, method, copy, level, fill_value=np.nan,\n limit=None, takeable=False):\n if level is not None:\n raise TypeError('Reindex by level not supported for sparse')\n\n if self.index.equals(index):\n if copy:\n return self.copy()\n else:\n return self\n\n if len(self.index) == 0:\n return self._constructor(\n index=index, columns=self.columns).__finalize__(self)\n\n indexer = self.index.get_indexer(index, method, limit=limit)\n indexer = ensure_platform_int(indexer)\n mask = indexer == -1\n need_mask = mask.any()\n\n new_series = {}\n for col, series in self.iteritems():\n if mask.all():\n continue\n\n values = series.values\n # .take returns SparseArray\n new = values.take(indexer)\n if need_mask:\n new = new.values\n # convert integer to float if necessary. need to do a lot\n # more than that, handle boolean etc also\n new, fill_value = maybe_upcast(new, fill_value=fill_value)\n np.putmask(new, mask, fill_value)\n\n new_series[col] = new\n\n return self._constructor(\n new_series, index=index, columns=self.columns,\n default_fill_value=self._default_fill_value).__finalize__(self)\n\n def _reindex_columns(self, columns, method, copy, level, fill_value=None,\n limit=None, takeable=False):\n if level is not None:\n raise TypeError('Reindex by level not supported for sparse')\n\n if notna(fill_value):\n raise NotImplementedError(\"'fill_value' argument is not supported\")\n\n if limit:\n raise NotImplementedError(\"'limit' argument is not supported\")\n\n if method is not None:\n raise NotImplementedError(\"'method' argument is not supported\")\n\n # TODO: fill value handling\n sdict = {k: v for k, v in compat.iteritems(self) if k in columns}\n return self._constructor(\n sdict, index=self.index, columns=columns,\n default_fill_value=self._default_fill_value).__finalize__(self)\n\n def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,\n limit=None, copy=False, allow_dups=False):\n\n if method is not None or limit is not None:\n raise NotImplementedError(\"cannot reindex with a method or limit \"\n \"with sparse\")\n\n if fill_value is None:\n fill_value = np.nan\n\n reindexers = {self._get_axis_number(a): val\n for (a, val) in compat.iteritems(reindexers)}\n\n index, row_indexer = reindexers.get(0, (None, None))\n columns, col_indexer = reindexers.get(1, (None, None))\n\n if columns is None:\n columns = self.columns\n\n new_arrays = {}\n for col in columns:\n if col not in self:\n continue\n if row_indexer is not None:\n new_arrays[col] = algos.take_1d(self[col].get_values(),\n row_indexer,\n fill_value=fill_value)\n else:\n new_arrays[col] = self[col]\n\n return self._constructor(new_arrays, index=index,\n columns=columns).__finalize__(self)\n\n def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False):\n if on is not None:\n raise NotImplementedError(\"'on' keyword parameter is not yet \"\n \"implemented\")\n return self._join_index(other, how, lsuffix, rsuffix)\n\n def _join_index(self, other, how, lsuffix, rsuffix):\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError('Other Series must have a name')\n\n other = SparseDataFrame(\n {other.name: other},\n default_fill_value=self._default_fill_value)\n\n join_index = self.index.join(other.index, how=how)\n\n this = self.reindex(join_index)\n other = other.reindex(join_index)\n\n this, other = this._maybe_rename_join(other, lsuffix, rsuffix)\n\n from pandas import concat\n return concat([this, other], axis=1, verify_integrity=True)\n\n def _maybe_rename_join(self, other, lsuffix, rsuffix):\n to_rename = self.columns.intersection(other.columns)\n if len(to_rename) > 0:\n if not lsuffix and not rsuffix:\n raise ValueError('columns overlap but no suffix specified: '\n '{to_rename}'.format(to_rename=to_rename))\n\n def lrenamer(x):\n if x in to_rename:\n return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)\n return x\n\n def rrenamer(x):\n if x in to_rename:\n return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)\n return x\n\n this = self.rename(columns=lrenamer)\n other = other.rename(columns=rrenamer)\n else:\n this = self\n\n return this, other\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Returns a DataFrame with the rows/columns switched.\n \"\"\"\n nv.validate_transpose(args, kwargs)\n return self._constructor(\n self.values.T, index=self.columns, columns=self.index,\n default_fill_value=self._default_fill_value,\n default_kind=self._default_kind).__finalize__(self)\n\n T = property(transpose)\n\n @Appender(DataFrame.count.__doc__)\n def count(self, axis=0, **kwds):\n if axis is None:\n axis = self._stat_axis_number\n\n return self.apply(lambda x: x.count(), axis=axis)\n\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"\n Return SparseDataFrame of cumulative sums over requested axis.\n\n Parameters\n ----------\n axis : {0, 1}\n 0 for row-wise, 1 for column-wise\n\n Returns\n -------\n y : SparseDataFrame\n \"\"\"\n nv.validate_cumsum(args, kwargs)\n\n if axis is None:\n axis = self._stat_axis_number\n\n return self.apply(lambda x: x.cumsum(), axis=axis)\n\n @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)\n def isna(self):\n return self._apply_columns(lambda x: x.isna())\n isnull = isna\n\n @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)\n def notna(self):\n return self._apply_columns(lambda x: x.notna())\n notnull = notna\n\n def apply(self, func, axis=0, broadcast=None, reduce=None,\n result_type=None):\n \"\"\"\n Analogous to DataFrame.apply, for SparseDataFrame\n\n Parameters\n ----------\n func : function\n Function to apply to each column\n axis : {0, 1, 'index', 'columns'}\n broadcast : bool, default False\n For aggregation functions, return object of same size with values\n propagated\n\n .. deprecated:: 0.23.0\n This argument will be removed in a future version, replaced\n by result_type='broadcast'.\n\n reduce : boolean or None, default None\n Try to apply reduction procedures. If the DataFrame is empty,\n apply will use reduce to determine whether the result should be a\n Series or a DataFrame. If reduce is None (the default), apply's\n return value will be guessed by calling func an empty Series (note:\n while guessing, exceptions raised by func will be ignored). If\n reduce is True a Series will always be returned, and if False a\n DataFrame will always be returned.\n\n .. deprecated:: 0.23.0\n This argument will be removed in a future version, replaced\n by result_type='reduce'.\n\n result_type : {'expand', 'reduce', 'broadcast, None}\n These only act when axis=1 {columns}:\n\n * 'expand' : list-like results will be turned into columns.\n * 'reduce' : return a Series if possible rather than expanding\n list-like results. This is the opposite to 'expand'.\n * 'broadcast' : results will be broadcast to the original shape\n of the frame, the original index & columns will be retained.\n\n The default behaviour (None) depends on the return value of the\n applied function: list-like results will be returned as a Series\n of those. However if the apply function returns a Series these\n are expanded to columns.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n applied : Series or SparseDataFrame\n \"\"\"\n if not len(self.columns):\n return self\n axis = self._get_axis_number(axis)\n\n if isinstance(func, np.ufunc):\n new_series = {}\n for k, v in compat.iteritems(self):\n applied = func(v)\n applied.fill_value = func(v.fill_value)\n new_series[k] = applied\n return self._constructor(\n new_series, index=self.index, columns=self.columns,\n default_fill_value=self._default_fill_value,\n default_kind=self._default_kind).__finalize__(self)\n\n from pandas.core.apply import frame_apply\n op = frame_apply(self,\n func=func,\n axis=axis,\n reduce=reduce,\n broadcast=broadcast,\n result_type=result_type)\n return op.get_result()\n\n def applymap(self, func):\n \"\"\"\n Apply a function to a DataFrame that is intended to operate\n elementwise, i.e. like doing map(func, series) for each series in the\n DataFrame\n\n Parameters\n ----------\n func : function\n Python function, returns a single value from a single value\n\n Returns\n -------\n applied : DataFrame\n \"\"\"\n return self.apply(lambda x: lmap(func, x))\n\n\ndef to_manager(sdf, columns, index):\n \"\"\" create and return the block manager from a dataframe of series,\n columns, index\n \"\"\"\n\n # from BlockManager perspective\n axes = [ensure_index(columns), ensure_index(index)]\n\n return create_block_manager_from_arrays(\n [sdf[c] for c in columns], columns, axes)\n\n\ndef stack_sparse_frame(frame):\n \"\"\"\n Only makes sense when fill_value is NaN\n \"\"\"\n lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]\n nobs = sum(lengths)\n\n # this is pretty fast\n minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)\n\n inds_to_concat = []\n vals_to_concat = []\n # TODO: Figure out whether this can be reached.\n # I think this currently can't be reached because you can't build a\n # SparseDataFrame with a non-np.NaN fill value (fails earlier).\n for _, series in compat.iteritems(frame):\n if not np.isnan(series.fill_value):\n raise TypeError('This routine assumes NaN fill value')\n\n int_index = series.sp_index.to_int_index()\n inds_to_concat.append(int_index.indices)\n vals_to_concat.append(series.sp_values)\n\n major_labels = np.concatenate(inds_to_concat)\n stacked_values = np.concatenate(vals_to_concat)\n index = MultiIndex(levels=[frame.index, frame.columns],\n labels=[major_labels, minor_labels],\n verify_integrity=False)\n\n lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,\n columns=['foo'])\n return lp.sort_index(level=0)\n\n\ndef homogenize(series_dict):\n \"\"\"\n Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex\n corresponding to the locations where they all have data\n\n Parameters\n ----------\n series_dict : dict or DataFrame\n\n Notes\n -----\n Using the dumbest algorithm I could think of. Should put some more thought\n into this\n\n Returns\n -------\n homogenized : dict of SparseSeries\n \"\"\"\n index = None\n\n need_reindex = False\n\n for _, series in compat.iteritems(series_dict):\n if not np.isnan(series.fill_value):\n raise TypeError('this method is only valid with NaN fill values')\n\n if index is None:\n index = series.sp_index\n elif not series.sp_index.equals(index):\n need_reindex = True\n index = index.intersect(series.sp_index)\n\n if need_reindex:\n output = {}\n for name, series in compat.iteritems(series_dict):\n if not series.sp_index.equals(index):\n series = series.sparse_reindex(index)\n\n output[name] = series\n else:\n output = series_dict\n\n return output\n\n\n# use unaccelerated ops for sparse objects\nops.add_flex_arithmetic_methods(SparseDataFrame)\nops.add_special_arithmetic_methods(SparseDataFrame)\n" ]
[ [ "pandas.core.generic.NDFrame.__init__", "pandas.compat.numpy.function.validate_cumsum", "pandas.core.dtypes.missing.notna", "numpy.concatenate", "pandas.core.dtypes.common.is_scipy_sparse", "pandas.compat.iteritems", "pandas.core.frame.DataFrame", "scipy.sparse.coo_matrix", "pandas.core.series.Series", "pandas.core.ops.add_special_arithmetic_methods", "pandas.core.frame._prep_ndarray", "pandas.core.index.ensure_index", "pandas.compat.lmap", "pandas._libs.sparse.BlockIndex", "pandas.core.indexes.base.default_index", "pandas.core.index.MultiIndex", "pandas.concat", "pandas.core.apply.frame_apply", "pandas.util._decorators.Appender", "pandas.core.sparse.series.SparseArray", "numpy.putmask", "numpy.isnan", "pandas.compat.numpy.function.validate_transpose", "pandas.core.common._dict_keys_to_ordered_list", "pandas.io.pickle._unpickle_array", "pandas.core.dtypes.common.ensure_platform_int", "pandas.core.ops.add_flex_arithmetic_methods", "pandas.core.internals.create_block_manager_from_arrays", "pandas.core.sparse.series.SparseSeries", "pandas.core.dtypes.cast.find_common_type", "pandas._libs.sparse.get_blocks", "pandas.core.dtypes.cast.maybe_upcast", "numpy.float64", "pandas.core.dtypes.missing.isna", "pandas.core.index.Index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
jamesdu0504/760GroupProject
[ "dd870b3af7958fb2088c627ab02c781412b2a20f" ]
[ "dataset_characteristics.py" ]
[ "import datasets.import_datasets as im\nimport pandas as pd\n\n#Takes a very long time to run, probably not worth running when the output \n\ndatasets = [\"BMS1\", \n \"BMS2\", \n \"toydata\"\n \"uci_retail\",\n \"mushroom\", \n \"Belgian_retail\",\n \"chess\", \n \"connect\", \n \"mushroom\", \n \"pumsb\", \n \"pumsb_star\", \n \"T40I10D100K\", \n \"T10I4D100K\", \n \"accidents\", \n \"instacart\"]\n\ndef main(datasets):\n df = pd.DataFrame(columns=['Dataset Name',\n 'Number of transactions',\n 'Number of Unique items',\n 'Minimum Transaction Length',\n 'Maximum Transaction Length',\n 'Average Transaction Length'])\n\n for dataset_name in datasets:\n print(\"Analysing\", dataset_name)\n data = im.import_dataset(dataset_name)\n \n data = data.astype('bool')\n\n average = 0\n minimum = 100000\n maximum = 0\n for _, row in data.iterrows():\n transaction_len = sum(row)\n #Minimum transaction length\n if minimum > transaction_len:\n minimum = transaction_len\n\n #Maximum transaction length\n if maximum < transaction_len:\n maximum = transaction_len\n \n #Average transaction length\n average += transaction_len\n\n new_row = {'Dataset Name':dataset_name,\n 'Number of transactions':data.shape[0],\n 'Number of Unique items':data.shape[1],\n 'Minimum Transaction Length':minimum,\n 'Maximum Transaction Length':maximum,\n 'Average Transaction Length':average/data.shape[0]\n }\n\n df = df.append(new_row, ignore_index=True)\n\n print(df)\n return df\n\nmain(datasets).to_csv('Dataset_details.csv')" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
gaozhangyang/DecST
[ "116ce9efa28a07793900d09345abab4cb512db98", "116ce9efa28a07793900d09345abab4cb512db98", "116ce9efa28a07793900d09345abab4cb512db98" ]
[ "ex_ablation/exp_conv.py", "ex_TM_comparison/modules/e3d_lstm.py", "ex_TM_comparison/algorithms/utils.py" ]
[ "\nimport sys; sys.path.append('..')\nfrom API.tools import EarlyStopping\nfrom API.exp_basic import Exp_Basic\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom ex_ablation.model import ConvUnet\nfrom API.dataloader import load_data\nimport json\n\nimport os\nimport time\nimport logging\nfrom tqdm import tqdm\nfrom API.metrics import metric\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport nni\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nclass Exp_Traffic(Exp_Basic):\n def __init__(self, args):\n super(Exp_Traffic, self).__init__(args)\n self.path = args.res_dir+'/{}'.format(args.ex_name)\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n self.checkpoints_path = os.path.join(self.path, 'checkpoints')\n if not os.path.exists(self.checkpoints_path):\n os.makedirs(self.checkpoints_path)\n\n sv_param = os.path.join(self.path, 'model_param.json')\n with open(sv_param, 'w') as file_obj:\n json.dump(args.__dict__, file_obj)\n \n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(level=logging.INFO,#控制台打印的日志级别\n filename=self.path+'/log.log',#'log/{}_{}_{}.log'.format(args.gcn_type,args.graph_type,args.order_list)\n filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format='%(asctime)s - %(message)s'#日志格式\n )\n \n self._get_data()\n\n self._select_optimizer()\n if self.args.epoch_s>0:\n self._load(self.args.epoch_s-1)\n \n def _build_model(self):\n from ast import literal_eval as make_tuple\n in_shape = tuple(self.args.in_shape)\n # logging.info('{}'.format(self.args.in_shape))\n model = ConvUnet(self.args,self.args.dataname,in_shape,self.args.hidC,self.args.hidT)\n return model\n\n def _get_data(self):\n config = self.args.__dict__\n\n self.train_loader, self.vali_loader, self.test_loader, self.data_mean, self.data_std = load_data(config['dataname'],config['batch_size'], config['val_batch_size'], config['data_root'],require_back=True)\n if self.vali_loader is None:\n self.vali_loader = self.test_loader\n\n def _select_optimizer(self):\n self.model_optim = optim.Adam(self.model.parameters(), lr=self.args.lr)\n self.scheduler = ReduceLROnPlateau(self.model_optim, mode='min', patience=3,factor=0.8,verbose=True)\n return self.model_optim\n \n def _adjust_learning_rate(self,optimizer,epoch,args):\n lr_adjust = {epoch: args.lr * (0.5 ** ((epoch-1) // 2))}\n\n if epoch in lr_adjust.keys():\n lr = lr_adjust[epoch]\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print('Updating learning rate to {}'.format(lr))\n \n def _select_criterion(self):\n criterion = nn.MSELoss()\n return criterion\n \n def _save(self,epoch):\n torch.save(self.model.state_dict(), os.path.join(self.checkpoints_path, str(epoch) + '.pth'))\n state=self.scheduler.state_dict()\n with open(os.path.join(self.checkpoints_path, str(epoch) + '.json'), 'w') as file_obj:\n json.dump(state, file_obj)\n \n def _load(self,epoch):\n self.model.load_state_dict(torch.load(os.path.join(self.checkpoints_path, str(epoch) + '.pth')))\n state = json.load(open(self.checkpoints_path+'/'+str(epoch) + '.json','r'))\n self.scheduler.load_state_dict(state)\n\n def vali(self, vali_loader, criterion, name,epoch):\n self.model.eval()\n preds=[]\n trues=[]\n total_loss = []\n vali_pbar = tqdm(vali_loader)\n for i, (batch_x,batch_y,background) in enumerate(vali_pbar):\n batch_x = batch_x.to(self.device)\n batch_y = batch_y\n background = background.float().to(self.device)\n\n # pred_y, pred_b = self.model(batch_x,background)\n pred_y = self.model(batch_x,background)\n true = batch_y.detach().cpu()\n pred_y = pred_y.detach().cpu()\n loss = criterion(pred_y, true)\n vali_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))\n total_loss.append(loss)\n\n preds.append(pred_y.numpy())\n trues.append(true.numpy())\n if i*batch_x.shape[0]>500:\n break\n\n total_loss = np.average(total_loss)\n\n preds = np.concatenate(preds,axis=0)\n trues = np.concatenate(trues,axis=0)\n mae, mse, rmse, mape, mspe = metric(preds, trues,vali_loader.dataset.mean,vali_loader.dataset.std)\n print('{}\\tmse:{}, mae:{}, rmse:{}, mape:{} ,mspe:{}'.format(name,mse, mae, rmse, mape, mspe ))\n logging.info('{}\\tmse:{}, mae:{}, rmse:{}, mape:{} ,mspe:{}'.format(name,mse, mae, rmse, mape, mspe ))\n self.model.train()\n\n if name == 'vali':\n nni.report_intermediate_result(mse)\n\n return total_loss\n \n\n def train(self, args):\n config = args.__dict__\n time_now = time.time()\n \n train_steps = len(self.train_loader)\n early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n \n model_optim = self._select_optimizer()\n criterion = self._select_criterion()\n\n for epoch in range(config['epoch_s'], config['epoch_e']):\n iter_count = 0\n train_loss = []\n \n self.model.train()\n train_pbar = tqdm(self.train_loader)\n i=0\n for batch_x,batch_y,background in train_pbar:\n iter_count += 1\n \n model_optim.zero_grad()\n batch_x = batch_x.to(self.device) # [32,12,3,32,64]\n batch_y = batch_y.to(self.device) # [32,12,3,32,64]\n background = background.float().to(self.device)\n\n # pred_y, pred_b = self.model(batch_x,background)\n # loss = criterion(pred_y, batch_y)+criterion(pred_b, background)\n\n pred_y = self.model(batch_x,background)\n loss = criterion(pred_y, batch_y)\n train_loss.append(loss.item())\n train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))\n \n loss.backward()\n model_optim.step()\n i+=1\n\n train_loss = np.average(train_loss)\n if epoch % args.log_step == 0:\n self._save(epoch)\n vali_loss = self.vali(self.vali_loader, criterion,'vali',epoch)\n test_loss = self.vali(self.test_loader, criterion,'test',epoch)\n self.scheduler.step(test_loss)\n # nni.report_intermediate_result(test_loss)\n\n\n print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\\n\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n logging.info(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\\n\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n early_stopping(vali_loss, self.model, self.path)\n\n if early_stopping.early_stop:\n print(\"Early stopping\")\n logging.info(\"Early stopping\")\n break\n \n best_model_path = self.path+'/'+'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n return self.model\n\n def test(self,args):\n self.model.eval()\n preds = []\n trues = []\n \n for batch_x,batch_y,background in self.test_loader:\n batch_x = batch_x.to(self.device)\n batch_y = batch_y\n background = background.to(self.device)\n\n pred_y = self.model(batch_x,background)#.squeeze()\n pred_y = pred_y.detach().cpu()\n true = batch_y.detach().cpu().numpy()#.squeeze()\n \n preds.append(pred_y)\n trues.append(true)\n\n preds = np.concatenate(preds,axis=0)\n trues = np.concatenate(trues,axis=0)\n print('test shape:', preds.shape, trues.shape)\n logging.info('test shape:{}-{}'.format(preds.shape, trues.shape))\n\n # result save\n folder_path = self.path+'/results/{}/sv/'.format(args.ex_name)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n mae, mse, rmse, mape, mspe = metric(preds, trues,self.test_loader.dataset.mean,self.test_loader.dataset.std)\n print('mse:{}, mae:{}'.format(mse, mae))\n logging.info('mse:{}, mae:{}'.format(mse, mae))\n\n np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n np.save(folder_path+'pred.npy', preds)\n np.save(folder_path+'true.npy', trues)\n return mse", "# Cite from https://github.com/metrofun/E3D-LSTM\nfrom functools import reduce\nimport copy\nimport operator\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .rnn_cell import E3DLSTMCell, ConvDeconv3d\nfrom .utils import window\nfrom tqdm import tqdm\nimport numpy as np\n\n\nclass E3DLSTM_Module(nn.Module):\n def __init__(self, input_shape, hidden_size, num_layers, kernel_size, tau):\n super().__init__()\n\n self._tau = tau\n self._cells = []\n\n input_shape = list(input_shape)\n for i in range(num_layers):\n cell = E3DLSTMCell(input_shape, hidden_size, kernel_size)\n # NOTE hidden state becomes input to the next cell\n input_shape[0] = hidden_size\n self._cells.append(cell)\n # Hook to register submodule\n setattr(self, \"cell{}\".format(i), cell)\n\n def forward(self, input):\n # NOTE (seq_len, batch, input_shape)\n batch_size = input.size(1)\n c_history_states = []\n h_states = []\n outputs = []\n\n for step, x in enumerate(input):\n for cell_idx, cell in enumerate(self._cells):\n if step == 0:\n c_history, m, h = self._cells[cell_idx].init_hidden(\n batch_size, self._tau, input.device\n )\n c_history_states.append(c_history)\n h_states.append(h)\n\n # NOTE c_history and h are coming from the previous time stamp, but we iterate over cells\n c_history, m, h = cell(\n x, c_history_states[cell_idx], m, h_states[cell_idx]\n )\n c_history_states[cell_idx] = c_history\n h_states[cell_idx] = h\n # NOTE hidden state of previous LSTM is passed as input to the next one\n x = h\n\n outputs.append(h)\n\n # NOTE Concat along the channels\n return torch.cat(outputs, dim=1)\n\n\nclass E3DLSTM_Model(nn.Module):\n def __init__(self, params):\n super().__init__()\n self.encoder = E3DLSTM_Module(params.input_shape, params.hidden_size, params.lstm_layers, params.kernel, params.tau)\n # self.decoder = nn.Conv3d(params.hidden_size * params.time_steps, params.output_shape[0], params.kernel, padding=(0, 2, 2))\n self.decoder = nn.Conv3d(64, 1, kernel_size=(1, 5, 5), stride=(1, 1, 1), padding=(0, 2, 2))\n def forward(self, x):\n return self.decoder(self.encoder(x))\n\nfrom .basic_algo import Basic_algo\nclass E3DLSTM(Basic_algo):\n def __init__(self, params):\n config = params.__dict__\n config.update({\n 'input_shape': (3, 4, 128, 128),\n 'output_shape': (3, 4, 128, 128),\n 'hidden_size': 64,\n 'lstm_layers': 4,\n 'kernel': (2, 5, 5),\n 'tau': 2,\n 'temporal_frames': 4,\n 'temporal_stride': 1, \n 'input_time_window': 4,\n 'output_time_horizon': 1,\n 'time_steps': 1, \n 'lr': 0.001,\n 'device': torch.device('cuda:0')\n })\n model = E3DLSTM_Model(params).to(params.device)\n Basic_algo.__init__(self, model)\n self.device = params.device\n self.params = params\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=params.lr)\n self.criterion = torch.nn.MSELoss()\n\n def _iter_batch(self, x, y):\n pred = self.model(x)\n loss = self.criterion(pred, y)\n return pred, loss\n\n def train(self, train_loader, epoch): \n '''\n Train the model with train_loader.\n Input params:\n train_loader: dataloader of train.\n Output params:\n mse_loss: mean square loss between predictions and ground truth.\n '''\n self.model.train()\n train_pbar = tqdm(train_loader)\n mse_loss = []\n for i, (batch_x, batch_y, _) in enumerate(train_pbar):\n batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)\n batch_x = batch_x.permute(0, 2, 1, 3, 4)\n batch_y = batch_y.permute(0, 2, 1, 3, 4)\n # train model\n self.optimizer.zero_grad()\n frames_seq = []\n for indices in window(range(self.params.input_time_window), \\\n self.params.temporal_frames, self.params.temporal_stride):\n frames_seq.append(batch_x[:, :, indices[0] : indices[-1] + 1])\n batch_x = torch.stack(frames_seq, dim=0)\n pred, loss = self._iter_batch(batch_x, batch_y)\n loss.backward()\n self.optimizer.step()\n # trian model\n train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))\n mse_loss.append(loss.item())\n mse_loss = np.average(mse_loss)\n return mse_loss\n\n def evaluate(self, val_loader):\n '''\n Evaluate the model with val_loader.\n Input params:\n val_loader: dataloader of validation.\n Output params:\n (mse, mae, ssim): mse, mas, ssim between predictions and ground truth.\n '''\n self.model.eval()\n val_pbar = tqdm(val_loader)\n mse_loss, preds, trues = [], [], []\n for i, (batch_x, batch_y, _) in enumerate(val_pbar):\n batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)\n # eval model\n batch_x = batch_x.permute(0, 2, 1, 3, 4)\n batch_y = batch_y.permute(0, 2, 1, 3, 4)\n frames_seq = []\n for indices in window(range(self.params.input_time_window), \\\n self.params.temporal_frames, self.params.temporal_stride):\n frames_seq.append(batch_x[:, :, indices[0] : indices[-1] + 1])\n batch_x = torch.stack(frames_seq, dim=0)\n pred_y, loss = self._iter_batch(batch_x, batch_y)\n # eval model\n true, pred_y = batch_y.detach().cpu(), pred_y.detach().cpu()\n val_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))\n mse_loss.append(loss.item())\n\n preds.append(pred_y.numpy())\n trues.append(true.numpy())\n\n mse_loss = np.average(mse_loss)\n\n preds = np.concatenate(preds,axis=0)\n trues = np.concatenate(trues,axis=0)\n import sys; sys.path.append('/usr/data/gzy/Weather_Forecast')\n from API.metrics import metric\n mae, mse, rmse, mape, mspe,ssim,psnr = metric(preds, trues,val_loader.dataset.mean,val_loader.dataset.std,return_ssim_psnr=True)\n return mse, mae, ssim\n\n def validate(self, val_loader):\n self.model.eval()\n number = 0\n val_pbar = tqdm(val_loader)\n mse_loss, preds, trues = [], [], []\n for i, (batch_x, batch_y, _) in enumerate(val_pbar):\n number += batch_x.shape[0]\n batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)\n # eval model\n batch_x = batch_x.permute(0, 2, 1, 3, 4)\n batch_y = batch_y.permute(0, 2, 1, 3, 4)\n frames_seq = []\n for indices in window(range(self.params.input_time_window), \\\n self.params.temporal_frames, self.params.temporal_stride):\n frames_seq.append(batch_x[:, :, indices[0] : indices[-1] + 1])\n batch_x = torch.stack(frames_seq, dim=0)\n pred_y, loss = self._iter_batch(batch_x, batch_y)\n # eval model\n true, pred_y = batch_y.detach().cpu(), pred_y.detach().cpu()\n val_pbar.set_description('vali loss: {:.4f}'.format(loss.item()))\n mse_loss.append(loss.item())\n\n preds.append(pred_y.numpy())\n trues.append(true.numpy())\n if number >= 1000:\n break\n\n mse_loss = np.average(mse_loss)\n\n preds = np.concatenate(preds,axis=0)\n trues = np.concatenate(trues,axis=0)\n import sys; sys.path.append('/usr/data/gzy/Weather_Forecast')\n from API.metrics import metric\n mae, mse, rmse, mape, mspe,ssim,psnr = metric(preds, trues,val_loader.dataset.mean,val_loader.dataset.std,return_ssim_psnr=True)\n return mse, mae", "# import numpy as np\nimport torch\n\ndef reshape_patch(img_tensor, patch_size):\n assert 5 == img_tensor.ndim\n batch_size, seq_length, img_height, img_width, num_channels = img_tensor.shape\n a = torch.reshape(img_tensor, [batch_size, seq_length,\n img_height//patch_size, patch_size,\n img_width//patch_size, patch_size,\n num_channels])\n b = a.permute(0, 1, 2, 4, 3, 5, 6)\n\n patch_tensor = torch.reshape(b, [batch_size, seq_length,\n img_height//patch_size,\n img_width//patch_size,\n patch_size*patch_size*num_channels])\n return patch_tensor\n\ndef reshape_patch_back(patch_tensor, patch_size):\n assert 5 == patch_tensor.ndim\n batch_size, seq_length, patch_height, patch_width, channels = patch_tensor.shape\n img_channels = channels // (patch_size*patch_size)\n a = torch.reshape(patch_tensor, [batch_size, seq_length,\n patch_height, patch_width,\n patch_size, patch_size,\n img_channels])\n b = a.permute(0, 1, 2, 4, 3, 5, 6)\n img_tensor = torch.reshape(b, [batch_size, seq_length,\n patch_height * patch_size,\n patch_width * patch_size,\n img_channels])\n return img_tensor" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "numpy.save", "numpy.concatenate", "numpy.array", "numpy.average", "torch.nn.MSELoss" ], [ "torch.cat", "numpy.concatenate", "torch.nn.Conv3d", "torch.stack", "torch.device", "numpy.average", "torch.nn.MSELoss" ], [ "torch.reshape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cvitolo/DataScienceVM
[ "97e1b780de572266dcdab89d443af55d5b930f42" ]
[ "Tutorials/MLADS-spring-2018/CNTK_distributed/CNTK_distributed.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport cntk\nfrom cntk.layers import Convolution2D, MaxPooling, Dense, Dropout\nfrom utils import *\nimport argparse\nfrom cntk.train.distributed import Communicator, mpi_communicator\n\n# Hyperparams\nEPOCHS = 1\nBATCHSIZE = 64 * 4\nLR = 0.01\nMOMENTUM = 0.9\nN_CLASSES = 10\n\ndef create_basic_model(input, out_dims):\n with cntk.layers.default_options(init=cntk.glorot_uniform(), activation=cntk.relu):\n net = cntk.layers.Convolution((5,5), 32, pad=True)(input)\n net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)\n\n net = cntk.layers.Convolution((5,5), 32, pad=True)(net)\n net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)\n\n net = cntk.layers.Convolution((5,5), 64, pad=True)(net)\n net = cntk.layers.MaxPooling((3,3), strides=(2,2))(net)\n \n net = cntk.layers.Dense(64)(net)\n net = cntk.layers.Dense(out_dims, activation=None)(net)\n \n return net\n \ndef init_model(m):\n progress_writers = [cntk.logging.ProgressPrinter(\n freq=int(BATCHSIZE / 2),\n rank=cntk.train.distributed.Communicator.rank(),\n num_epochs=EPOCHS)]\n\n # Loss (dense labels); check if support for sparse labels\n loss = cntk.cross_entropy_with_softmax(m, labels)\n # Momentum SGD\n # https://github.com/Microsoft/CNTK/blob/master/Manual/Manual_How_to_use_learners.ipynb\n # unit_gain=False: momentum_direction = momentum*old_momentum_direction + gradient\n # if unit_gain=True then ...(1-momentum)*gradient\n local_learner = cntk.momentum_sgd(m.parameters,\n lr=cntk.learning_rate_schedule(LR, cntk.UnitType.minibatch) ,\n momentum=cntk.momentum_schedule(MOMENTUM),\n unit_gain=False)\n\n distributed_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner)\n\n trainer = cntk.Trainer(m, (loss, cntk.classification_error(m, labels)), [distributed_learner], progress_writers)\n\n return trainer, distributed_learner\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input_dir')\n#parser.add_argument('--output_dir')\n\nprint(sys.argv)\n\nargs = parser.parse_args()\n\n# Data into format for library\nx_train, x_test, y_train, y_test = cifar_for_library(download_dir=args.input_dir, channel_first=True, one_hot=True)\n# CNTK format\ny_train = y_train.astype(np.float32)\ny_test = y_test.astype(np.float32)\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\nprint(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)\n\n# Placeholders\nfeatures = cntk.input_variable((3, 32, 32), np.float32)\nlabels = cntk.input_variable(N_CLASSES, np.float32)\n# Load symbol\nsym = create_basic_model(features, N_CLASSES)\n\ndef save_model(model, learner, file_name):\n if learner.communicator().is_main():\n model.save(file_name)\n\ntrainer, learner = init_model(sym)\n\nfor j in range(EPOCHS):\n for data, label in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True):\n trainer.train_minibatch({features: data, labels: label})\n # Log (this is just last batch in epoch, not average of batches)\n eval_error = trainer.previous_minibatch_evaluation_average\n print(\"Epoch %d | Accuracy: %.6f\" % (j+1, (1-eval_error)))\n \nz = cntk.softmax(sym)\n\nsave_model(sym, learner, \"{}/cifar_final.model\".format(args.input_dir))\n\nn_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE\ny_guess = np.zeros(n_samples, dtype=np.int)\ny_truth = np.argmax(y_test[:n_samples], axis=-1)\nc = 0\nfor data, label in yield_mb(x_test, y_test, BATCHSIZE):\n predicted_label_probs = z.eval({features : data})\n y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = np.argmax(predicted_label_probs, axis=-1)\n c += 1\n\nprint(\"Accuracy: \", sum(y_guess == y_truth)/len(y_guess))\n\ncntk.train.distributed.Communicator.finalize()\n" ]
[ [ "numpy.argmax", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jjjjohnson/OpenTransformer
[ "9a6371095ee83896d886addf55bda3a42c3918f6" ]
[ "otrans/encoder/transformer.py" ]
[ "# File : transformer.py\n# Author : Zhengkun Tian\n# Email : [email protected]\n\nimport logging\nimport torch\nimport torch.nn as nn\nfrom otrans.module.pos import MixedPositionalEncoding, RelPositionalEncoding\nfrom otrans.module.ffn import PositionwiseFeedForward\nfrom otrans.module.attention import MultiHeadedSelfAttention, MultiHeadedSelfAttentionWithRelPos\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout, residual_dropout,\n normalize_before=False, concat_after=False, relative_positional=False, activation='relu'):\n super(TransformerEncoderLayer, self).__init__()\n\n self.relative_positional = relative_positional\n\n if self.relative_positional:\n self.slf_attn = MultiHeadedSelfAttentionWithRelPos(n_heads, d_model, slf_attn_dropout)\n else:\n self.slf_attn = MultiHeadedSelfAttention(n_heads, d_model, slf_attn_dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout, activation)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n\n self.dropout1 = nn.Dropout(residual_dropout)\n self.dropout2 = nn.Dropout(residual_dropout)\n\n self.normalize_before = normalize_before\n self.concat_after = concat_after\n\n if self.concat_after:\n self.concat_linear = nn.Linear(d_model * 2, d_model)\n\n def forward(self, x, mask, pos=None):\n if self.normalize_before:\n x = self.norm1(x)\n residual = x\n\n if self.relative_positional:\n slf_attn_out, slf_attn_weights = self.slf_attn(x, mask, pos)\n else:\n slf_attn_out, slf_attn_weights = self.slf_attn(x, mask)\n \n if self.concat_after:\n x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))\n else:\n x = residual + self.dropout1(slf_attn_out)\n if not self.normalize_before:\n x = self.norm1(x)\n\n if self.normalize_before:\n x = self.norm2(x)\n residual = x\n x = residual + self.dropout2(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm2(x)\n\n return x, {'slf_attn_weights': slf_attn_weights}\n\n def inference(self, x, mask, pos=None, cache=None):\n if self.normalize_before:\n x = self.norm1(x)\n residual = x\n if self.relative_positional:\n slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache, pos)\n else:\n slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache)\n\n if self.concat_after:\n x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))\n else:\n x = residual + slf_attn_out\n if not self.normalize_before:\n x = self.norm1(x)\n\n if self.normalize_before:\n x = self.norm2(x)\n residual = x\n x = residual + self.feed_forward(x)\n if not self.normalize_before:\n x = self.norm2(x)\n\n return x, new_cache, {'slf_attn_weights': slf_attn_weights}\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6, pos_dropout=0.0, \n slf_attn_dropout=0.0, ffn_dropout=0.0, residual_dropout=0.1, normalize_before=False,\n concat_after=False, relative_positional=False, activation='relu'):\n super(TransformerEncoder, self).__init__()\n\n self.normalize_before = normalize_before\n self.relative_positional = relative_positional\n\n if self.relative_positional:\n self.pos_emb = RelPositionalEncoding(d_model, pos_dropout)\n else:\n self.pos_emb = MixedPositionalEncoding(d_model, pos_dropout)\n\n self.blocks = nn.ModuleList([\n TransformerEncoderLayer(\n n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout,\n residual_dropout=residual_dropout, normalize_before=normalize_before,\n concat_after=concat_after, relative_positional=relative_positional, activation=activation) for _ in range(n_blocks)\n ])\n\n if self.normalize_before:\n self.norm = nn.LayerNorm(d_model)\n\n def forward(self, inputs, mask):\n \n enc_output, pos = self.pos_emb(inputs)\n\n enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)\n\n attn_weights = {}\n for i, block in enumerate(self.blocks):\n enc_output, attn_weight = block(enc_output, mask.unsqueeze(1), pos)\n attn_weights['enc_block_%d' % i] = attn_weight\n\n if self.normalize_before:\n enc_output = self.norm(enc_output)\n\n return enc_output, mask, attn_weights\n\n\n def inference(self, inputs, mask, cache=None):\n \n enc_output, pos = self.pos_emb.inference(inputs)\n\n enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)\n\n attn_weights = {}\n new_caches = []\n for i, block in enumerate(self.blocks):\n enc_output, new_cache, attn_weight = block.inference(enc_output, mask.unsqueeze(1), pos, cache)\n attn_weights['enc_block_%d' % i] = attn_weight\n new_caches.append(new_cache)\n\n if self.normalize_before:\n enc_output = self.norm(enc_output)\n\n return enc_output, mask, new_caches, attn_weights\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.cat", "torch.nn.LayerNorm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
haruiz/models
[ "4dfcf48f7e15646dca2089a0e9f583d24661924c", "2db2501bc9928f68e225282f3884b81680a9cccb", "2db2501bc9928f68e225282f3884b81680a9cccb", "2db2501bc9928f68e225282f3884b81680a9cccb" ]
[ "research/object_detection/utils/visualization_utils.py", "research/object_detection/utils/test_utils.py", "official/vision/detection/modeling/retinanet_model.py", "official/nlp/modeling/networks/token_classification.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A set of functions that are used for visualization.\n\nThese functions often receive an image, perform some visualization on the image.\nThe functions do not return a value, instead they modify the image itself.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\n# Set headless-friendly backend.\n#import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements\nimport matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top\nimport numpy as np\nimport PIL.Image as Image\nimport PIL.ImageColor as ImageColor\nimport PIL.ImageDraw as ImageDraw\nimport PIL.ImageFont as ImageFont\nimport six\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import shape_utils\n\n_TITLE_LEFT_MARGIN = 10\n_TITLE_TOP_MARGIN = 10\nSTANDARD_COLORS = [\n 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',\n 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',\n 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',\n 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',\n 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',\n 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',\n 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',\n 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',\n 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',\n 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',\n 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',\n 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',\n 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',\n 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',\n 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',\n 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',\n 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',\n 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',\n 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',\n 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',\n 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',\n 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',\n 'WhiteSmoke', 'Yellow', 'YellowGreen'\n]\n\n\ndef _get_multiplier_for_color_randomness():\n \"\"\"Returns a multiplier to get semi-random colors from successive indices.\n\n This function computes a prime number, p, in the range [2, 17] that:\n - is closest to len(STANDARD_COLORS) / 10\n - does not divide len(STANDARD_COLORS)\n\n If no prime numbers in that range satisfy the constraints, p is returned as 1.\n\n Once p is established, it can be used as a multiplier to select\n non-consecutive colors from STANDARD_COLORS:\n colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]\n \"\"\"\n num_colors = len(STANDARD_COLORS)\n prime_candidates = [5, 7, 11, 13, 17]\n\n # Remove all prime candidates that divide the number of colors.\n prime_candidates = [p for p in prime_candidates if num_colors % p]\n if not prime_candidates:\n return 1\n\n # Return the closest prime number to num_colors / 10.\n abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]\n num_candidates = len(abs_distance)\n inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]\n return prime_candidates[inds[0]]\n\n\ndef save_image_array_as_png(image, output_path):\n \"\"\"Saves an image (represented as a numpy array) to PNG.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n output_path: path to which image should be written.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n with tf.gfile.Open(output_path, 'w') as fid:\n image_pil.save(fid, 'PNG')\n\n\ndef encode_image_array_as_png_str(image):\n \"\"\"Encodes a numpy array into a PNG string.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n\n Returns:\n PNG encoded image string.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image))\n output = six.BytesIO()\n image_pil.save(output, format='PNG')\n png_string = output.getvalue()\n output.close()\n return png_string\n\n\ndef draw_bounding_box_on_image_array(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image (numpy array).\n\n Bounding box coordinates can be specified in either absolute (pixel) or\n normalized coordinates by setting the use_normalized_coordinates argument.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n ymin: ymin of bounding box.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,\n thickness, display_str_list,\n use_normalized_coordinates)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image.\n\n Bounding box coordinates can be specified in either absolute (pixel) or\n normalized coordinates by setting the use_normalized_coordinates argument.\n\n Each string in display_str_list is displayed on a separate line above the\n bounding box in black text on a rectangle filled with the input 'color'.\n If the top of the bounding box extends to the edge of the image, the strings\n are displayed below the bounding box.\n\n Args:\n image: a PIL.Image object.\n ymin: ymin of bounding box.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n \"\"\"\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n if thickness > 0:\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top),\n (left, top)],\n width=thickness,\n fill=color)\n try:\n font = ImageFont.truetype('arial.ttf', 24)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin), (left + text_width,\n text_bottom)],\n fill=color)\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n text_bottom -= text_height - 2 * margin\n\n\ndef draw_bounding_boxes_on_image_array(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n \"\"\"Draws bounding boxes on image (numpy array).\n\n Args:\n image: a numpy array object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list_list: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n\n Raises:\n ValueError: if boxes is not a [N, 4] array\n \"\"\"\n image_pil = Image.fromarray(image)\n draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,\n display_str_list_list)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_bounding_boxes_on_image(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n \"\"\"Draws bounding boxes on image.\n\n Args:\n image: a PIL.Image object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list_list: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n\n Raises:\n ValueError: if boxes is not a [N, 4] array\n \"\"\"\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n raise ValueError('Input must be of size [N, 4]')\n for i in range(boxes_shape[0]):\n display_str_list = ()\n if display_str_list_list:\n display_str_list = display_str_list_list[i]\n draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],\n boxes[i, 3], color, thickness, display_str_list)\n\n\ndef create_visualization_fn(category_index,\n include_masks=False,\n include_keypoints=False,\n include_keypoint_scores=False,\n include_track_ids=False,\n **kwargs):\n \"\"\"Constructs a visualization function that can be wrapped in a py_func.\n\n py_funcs only accept positional arguments. This function returns a suitable\n function with the correct positional argument mapping. The positional\n arguments in order are:\n 0: image\n 1: boxes\n 2: classes\n 3: scores\n [4]: masks (optional)\n [4-5]: keypoints (optional)\n [4-6]: keypoint_scores (optional)\n [4-7]: track_ids (optional)\n\n -- Example 1 --\n vis_only_masks_fn = create_visualization_fn(category_index,\n include_masks=True, include_keypoints=False, include_track_ids=False,\n **kwargs)\n image = tf.py_func(vis_only_masks_fn,\n inp=[image, boxes, classes, scores, masks],\n Tout=tf.uint8)\n\n -- Example 2 --\n vis_masks_and_track_ids_fn = create_visualization_fn(category_index,\n include_masks=True, include_keypoints=False, include_track_ids=True,\n **kwargs)\n image = tf.py_func(vis_masks_and_track_ids_fn,\n inp=[image, boxes, classes, scores, masks, track_ids],\n Tout=tf.uint8)\n\n Args:\n category_index: a dict that maps integer ids to category dicts. e.g.\n {1: {1: 'dog'}, 2: {2: 'cat'}, ...}\n include_masks: Whether masks should be expected as a positional argument in\n the returned function.\n include_keypoints: Whether keypoints should be expected as a positional\n argument in the returned function.\n include_keypoint_scores: Whether keypoint scores should be expected as a\n positional argument in the returned function.\n include_track_ids: Whether track ids should be expected as a positional\n argument in the returned function.\n **kwargs: Additional kwargs that will be passed to\n visualize_boxes_and_labels_on_image_array.\n\n Returns:\n Returns a function that only takes tensors as positional arguments.\n \"\"\"\n\n def visualization_py_func_fn(*args):\n \"\"\"Visualization function that can be wrapped in a tf.py_func.\n\n Args:\n *args: First 4 positional arguments must be:\n image - uint8 numpy array with shape (img_height, img_width, 3).\n boxes - a numpy array of shape [N, 4].\n classes - a numpy array of shape [N].\n scores - a numpy array of shape [N] or None.\n -- Optional positional arguments --\n instance_masks - a numpy array of shape [N, image_height, image_width].\n keypoints - a numpy array of shape [N, num_keypoints, 2].\n keypoint_scores - a numpy array of shape [N, num_keypoints].\n track_ids - a numpy array of shape [N] with unique track ids.\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid\n boxes.\n \"\"\"\n image = args[0]\n boxes = args[1]\n classes = args[2]\n scores = args[3]\n masks = keypoints = keypoint_scores = track_ids = None\n pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).\n if include_masks:\n masks = args[pos_arg_ptr]\n pos_arg_ptr += 1\n if include_keypoints:\n keypoints = args[pos_arg_ptr]\n pos_arg_ptr += 1\n if include_keypoint_scores:\n keypoint_scores = args[pos_arg_ptr]\n pos_arg_ptr += 1\n if include_track_ids:\n track_ids = args[pos_arg_ptr]\n\n return visualize_boxes_and_labels_on_image_array(\n image,\n boxes,\n classes,\n scores,\n category_index=category_index,\n instance_masks=masks,\n keypoints=keypoints,\n keypoint_scores=keypoint_scores,\n track_ids=track_ids,\n **kwargs)\n return visualization_py_func_fn\n\n\ndef draw_heatmaps_on_image(image, heatmaps):\n \"\"\"Draws heatmaps on an image.\n\n The heatmaps are handled channel by channel and different colors are used to\n paint different heatmap channels.\n\n Args:\n image: a PIL.Image object.\n heatmaps: a numpy array with shape [image_height, image_width, channel].\n Note that the image_height and image_width should match the size of input\n image.\n \"\"\"\n draw = ImageDraw.Draw(image)\n channel = heatmaps.shape[2]\n for c in range(channel):\n heatmap = heatmaps[:, :, c] * 255\n heatmap = heatmap.astype('uint8')\n bitmap = Image.fromarray(heatmap, 'L')\n bitmap.convert('1')\n draw.bitmap(\n xy=[(0, 0)],\n bitmap=bitmap,\n fill=STANDARD_COLORS[c])\n\n\ndef draw_heatmaps_on_image_array(image, heatmaps):\n \"\"\"Overlays heatmaps to an image (numpy array).\n\n The function overlays the heatmaps on top of image. The heatmap values will be\n painted with different colors depending on the channels. Similar to\n \"draw_heatmaps_on_image_array\" function except the inputs are numpy arrays.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n heatmaps: a numpy array with shape [height, width, channel].\n\n Returns:\n An uint8 numpy array representing the input image painted with heatmap\n colors.\n \"\"\"\n if not isinstance(image, np.ndarray):\n image = image.numpy()\n if not isinstance(heatmaps, np.ndarray):\n heatmaps = heatmaps.numpy()\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_heatmaps_on_image(image_pil, heatmaps)\n return np.array(image_pil)\n\n\ndef draw_heatmaps_on_image_tensors(images,\n heatmaps,\n apply_sigmoid=False):\n \"\"\"Draws heatmaps on batch of image tensors.\n\n Args:\n images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional\n channels will be ignored. If C = 1, then we convert the images to RGB\n images.\n heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the\n heatmaps will be resized to match the input image size before overlaying\n the heatmaps with input images. Theoretically the heatmap height width\n should have the same aspect ratio as the input image to avoid potential\n misalignment introduced by the image resize.\n apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If\n the heatmaps come directly from the prediction logits, then we should\n apply the sigmoid layer to make sure the values are in between [0.0, 1.0].\n\n Returns:\n 4D image tensor of type uint8, with heatmaps overlaid on top.\n \"\"\"\n # Additional channels are being ignored.\n if images.shape[3] > 3:\n images = images[:, :, :, 0:3]\n elif images.shape[3] == 1:\n images = tf.image.grayscale_to_rgb(images)\n\n _, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)\n if apply_sigmoid:\n heatmaps = tf.math.sigmoid(heatmaps)\n resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])\n\n elems = [images, resized_heatmaps]\n\n def draw_heatmaps(image_and_heatmaps):\n \"\"\"Draws heatmaps on image.\"\"\"\n image_with_heatmaps = tf.py_function(\n draw_heatmaps_on_image_array,\n image_and_heatmaps,\n tf.uint8)\n return image_with_heatmaps\n images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)\n return images\n\n\ndef _resize_original_image(image, image_shape):\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_images(\n image,\n image_shape,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=True)\n return tf.cast(tf.squeeze(image, 0), tf.uint8)\n\n\ndef draw_bounding_boxes_on_image_tensors(images,\n boxes,\n classes,\n scores,\n category_index,\n original_image_spatial_shape=None,\n true_image_shape=None,\n instance_masks=None,\n keypoints=None,\n keypoint_scores=None,\n keypoint_edges=None,\n track_ids=None,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True):\n \"\"\"Draws bounding boxes, masks, and keypoints on batch of image tensors.\n\n Args:\n images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional\n channels will be ignored. If C = 1, then we convert the images to RGB\n images.\n boxes: [N, max_detections, 4] float32 tensor of detection boxes.\n classes: [N, max_detections] int tensor of detection classes. Note that\n classes are 1-indexed.\n scores: [N, max_detections] float32 tensor of detection scores.\n category_index: a dict that maps integer ids to category dicts. e.g.\n {1: {1: 'dog'}, 2: {2: 'cat'}, ...}\n original_image_spatial_shape: [N, 2] tensor containing the spatial size of\n the original image.\n true_image_shape: [N, 3] tensor containing the spatial size of unpadded\n original_image.\n instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with\n instance masks.\n keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]\n with keypoints.\n keypoint_scores: A 3D float32 tensor of shape [N, max_detection,\n num_keypoints] with keypoint scores.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.\n instance ids for each object). If provided, the color-coding of boxes is\n dictated by these ids, and not classes.\n max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.\n min_score_thresh: Minimum score threshold for visualization. Default 0.2.\n use_normalized_coordinates: Whether to assume boxes and kepoints are in\n normalized coordinates (as opposed to absolute coordiantes).\n Default is True.\n\n Returns:\n 4D image tensor of type uint8, with boxes drawn on top.\n \"\"\"\n # Additional channels are being ignored.\n if images.shape[3] > 3:\n images = images[:, :, :, 0:3]\n elif images.shape[3] == 1:\n images = tf.image.grayscale_to_rgb(images)\n visualization_keyword_args = {\n 'use_normalized_coordinates': use_normalized_coordinates,\n 'max_boxes_to_draw': max_boxes_to_draw,\n 'min_score_thresh': min_score_thresh,\n 'agnostic_mode': False,\n 'line_thickness': 4,\n 'keypoint_edges': keypoint_edges\n }\n if true_image_shape is None:\n true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])\n else:\n true_shapes = true_image_shape\n if original_image_spatial_shape is None:\n original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])\n else:\n original_shapes = original_image_spatial_shape\n\n visualize_boxes_fn = create_visualization_fn(\n category_index,\n include_masks=instance_masks is not None,\n include_keypoints=keypoints is not None,\n include_keypoint_scores=keypoint_scores is not None,\n include_track_ids=track_ids is not None,\n **visualization_keyword_args)\n\n elems = [true_shapes, original_shapes, images, boxes, classes, scores]\n if instance_masks is not None:\n elems.append(instance_masks)\n if keypoints is not None:\n elems.append(keypoints)\n if keypoint_scores is not None:\n elems.append(keypoint_scores)\n if track_ids is not None:\n elems.append(track_ids)\n\n def draw_boxes(image_and_detections):\n \"\"\"Draws boxes on image.\"\"\"\n true_shape = image_and_detections[0]\n original_shape = image_and_detections[1]\n if true_image_shape is not None:\n image = shape_utils.pad_or_clip_nd(image_and_detections[2],\n [true_shape[0], true_shape[1], 3])\n if original_image_spatial_shape is not None:\n image_and_detections[2] = _resize_original_image(image, original_shape)\n\n image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],\n tf.uint8)\n return image_with_boxes\n\n images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)\n return images\n\n\ndef draw_side_by_side_evaluation_image(eval_dict,\n category_index,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True,\n keypoint_edges=None):\n \"\"\"Creates a side-by-side image with detections and groundtruth.\n\n Bounding boxes (and instance masks, if available) are visualized on both\n subimages.\n\n Args:\n eval_dict: The evaluation dictionary returned by\n eval_util.result_dict_for_batched_example() or\n eval_util.result_dict_for_single_example().\n category_index: A category index (dictionary) produced from a labelmap.\n max_boxes_to_draw: The maximum number of boxes to draw for detections.\n min_score_thresh: The minimum score threshold for showing detections.\n use_normalized_coordinates: Whether to assume boxes and keypoints are in\n normalized coordinates (as opposed to absolute coordinates).\n Default is True.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n\n Returns:\n A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left\n corresponds to detections, while the subimage on the right corresponds to\n groundtruth.\n \"\"\"\n detection_fields = fields.DetectionResultFields()\n input_data_fields = fields.InputDataFields()\n\n images_with_detections_list = []\n\n # Add the batch dimension if the eval_dict is for single example.\n if len(eval_dict[detection_fields.detection_classes].shape) == 1:\n for key in eval_dict:\n if (key != input_data_fields.original_image and\n key != input_data_fields.image_additional_channels):\n eval_dict[key] = tf.expand_dims(eval_dict[key], 0)\n\n for indx in range(eval_dict[input_data_fields.original_image].shape[0]):\n instance_masks = None\n if detection_fields.detection_masks in eval_dict:\n instance_masks = tf.cast(\n tf.expand_dims(\n eval_dict[detection_fields.detection_masks][indx], axis=0),\n tf.uint8)\n keypoints = None\n keypoint_scores = None\n if detection_fields.detection_keypoints in eval_dict:\n keypoints = tf.expand_dims(\n eval_dict[detection_fields.detection_keypoints][indx], axis=0)\n if detection_fields.detection_keypoint_scores in eval_dict:\n keypoint_scores = tf.expand_dims(\n eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)\n else:\n keypoint_scores = tf.cast(keypoint_ops.set_keypoint_visibilities(\n keypoints), dtype=tf.float32)\n\n groundtruth_instance_masks = None\n if input_data_fields.groundtruth_instance_masks in eval_dict:\n groundtruth_instance_masks = tf.cast(\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_instance_masks][indx],\n axis=0), tf.uint8)\n groundtruth_keypoints = None\n groundtruth_keypoint_scores = None\n gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities\n if input_data_fields.groundtruth_keypoints in eval_dict:\n groundtruth_keypoints = tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)\n if gt_kpt_vis_fld in eval_dict:\n groundtruth_keypoint_scores = tf.expand_dims(\n tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)\n else:\n groundtruth_keypoint_scores = tf.cast(\n keypoint_ops.set_keypoint_visibilities(\n groundtruth_keypoints), dtype=tf.float32)\n\n images_with_detections = draw_bounding_boxes_on_image_tensors(\n tf.expand_dims(\n eval_dict[input_data_fields.original_image][indx], axis=0),\n tf.expand_dims(\n eval_dict[detection_fields.detection_boxes][indx], axis=0),\n tf.expand_dims(\n eval_dict[detection_fields.detection_classes][indx], axis=0),\n tf.expand_dims(\n eval_dict[detection_fields.detection_scores][indx], axis=0),\n category_index,\n original_image_spatial_shape=tf.expand_dims(\n eval_dict[input_data_fields.original_image_spatial_shape][indx],\n axis=0),\n true_image_shape=tf.expand_dims(\n eval_dict[input_data_fields.true_image_shape][indx], axis=0),\n instance_masks=instance_masks,\n keypoints=keypoints,\n keypoint_scores=keypoint_scores,\n keypoint_edges=keypoint_edges,\n max_boxes_to_draw=max_boxes_to_draw,\n min_score_thresh=min_score_thresh,\n use_normalized_coordinates=use_normalized_coordinates)\n images_with_groundtruth = draw_bounding_boxes_on_image_tensors(\n tf.expand_dims(\n eval_dict[input_data_fields.original_image][indx], axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),\n tf.expand_dims(\n tf.ones_like(\n eval_dict[input_data_fields.groundtruth_classes][indx],\n dtype=tf.float32),\n axis=0),\n category_index,\n original_image_spatial_shape=tf.expand_dims(\n eval_dict[input_data_fields.original_image_spatial_shape][indx],\n axis=0),\n true_image_shape=tf.expand_dims(\n eval_dict[input_data_fields.true_image_shape][indx], axis=0),\n instance_masks=groundtruth_instance_masks,\n keypoints=groundtruth_keypoints,\n keypoint_scores=groundtruth_keypoint_scores,\n keypoint_edges=keypoint_edges,\n max_boxes_to_draw=None,\n min_score_thresh=0.0,\n use_normalized_coordinates=use_normalized_coordinates)\n images_to_visualize = tf.concat([images_with_detections,\n images_with_groundtruth], axis=2)\n\n if input_data_fields.image_additional_channels in eval_dict:\n images_with_additional_channels_groundtruth = (\n draw_bounding_boxes_on_image_tensors(\n tf.expand_dims(\n eval_dict[input_data_fields.image_additional_channels][indx],\n axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),\n tf.expand_dims(\n eval_dict[input_data_fields.groundtruth_classes][indx],\n axis=0),\n tf.expand_dims(\n tf.ones_like(\n eval_dict[input_data_fields.groundtruth_classes][indx],\n dtype=tf.float32),\n axis=0),\n category_index,\n original_image_spatial_shape=tf.expand_dims(\n eval_dict[input_data_fields.original_image_spatial_shape]\n [indx],\n axis=0),\n true_image_shape=tf.expand_dims(\n eval_dict[input_data_fields.true_image_shape][indx], axis=0),\n instance_masks=groundtruth_instance_masks,\n keypoints=None,\n keypoint_edges=None,\n max_boxes_to_draw=None,\n min_score_thresh=0.0,\n use_normalized_coordinates=use_normalized_coordinates))\n images_to_visualize = tf.concat(\n [images_to_visualize, images_with_additional_channels_groundtruth],\n axis=2)\n images_with_detections_list.append(images_to_visualize)\n\n return images_with_detections_list\n\n\ndef draw_keypoints_on_image_array(image,\n keypoints,\n keypoint_scores=None,\n min_score_thresh=0.5,\n color='red',\n radius=2,\n use_normalized_coordinates=True,\n keypoint_edges=None,\n keypoint_edge_color='green',\n keypoint_edge_width=2):\n \"\"\"Draws keypoints on an image (numpy array).\n\n Args:\n image: a numpy array with shape [height, width, 3].\n keypoints: a numpy array with shape [num_keypoints, 2].\n keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only\n those keypoints with a score above score_threshold will be visualized.\n min_score_thresh: A scalar indicating the minimum keypoint score required\n for a keypoint to be visualized. Note that keypoint_scores must be\n provided for this threshold to take effect.\n color: color to draw the keypoints with. Default is red.\n radius: keypoint radius. Default value is 2.\n use_normalized_coordinates: if True (default), treat keypoint values as\n relative to the image. Otherwise treat them as absolute.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n keypoint_edge_color: color to draw the keypoint edges with. Default is red.\n keypoint_edge_width: width of the edges drawn between keypoints. Default\n value is 2.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_keypoints_on_image(image_pil,\n keypoints,\n keypoint_scores=keypoint_scores,\n min_score_thresh=min_score_thresh,\n color=color,\n radius=radius,\n use_normalized_coordinates=use_normalized_coordinates,\n keypoint_edges=keypoint_edges,\n keypoint_edge_color=keypoint_edge_color,\n keypoint_edge_width=keypoint_edge_width)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_keypoints_on_image(image,\n keypoints,\n keypoint_scores=None,\n min_score_thresh=0.5,\n color='red',\n radius=2,\n use_normalized_coordinates=True,\n keypoint_edges=None,\n keypoint_edge_color='green',\n keypoint_edge_width=2):\n \"\"\"Draws keypoints on an image.\n\n Args:\n image: a PIL.Image object.\n keypoints: a numpy array with shape [num_keypoints, 2].\n keypoint_scores: a numpy array with shape [num_keypoints].\n min_score_thresh: a score threshold for visualizing keypoints. Only used if\n keypoint_scores is provided.\n color: color to draw the keypoints with. Default is red.\n radius: keypoint radius. Default value is 2.\n use_normalized_coordinates: if True (default), treat keypoint values as\n relative to the image. Otherwise treat them as absolute.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n keypoint_edge_color: color to draw the keypoint edges with. Default is red.\n keypoint_edge_width: width of the edges drawn between keypoints. Default\n value is 2.\n \"\"\"\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n keypoints = np.array(keypoints)\n keypoints_x = [k[1] for k in keypoints]\n keypoints_y = [k[0] for k in keypoints]\n if use_normalized_coordinates:\n keypoints_x = tuple([im_width * x for x in keypoints_x])\n keypoints_y = tuple([im_height * y for y in keypoints_y])\n if keypoint_scores is not None:\n keypoint_scores = np.array(keypoint_scores)\n valid_kpt = np.greater(keypoint_scores, min_score_thresh)\n else:\n valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),\n np.zeros_like(keypoints[:, 0]),\n np.ones_like(keypoints[:, 0]))\n valid_kpt = [v for v in valid_kpt]\n\n for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):\n if valid:\n draw.ellipse([(keypoint_x - radius, keypoint_y - radius),\n (keypoint_x + radius, keypoint_y + radius)],\n outline=color, fill=color)\n if keypoint_edges is not None:\n for keypoint_start, keypoint_end in keypoint_edges:\n if (keypoint_start < 0 or keypoint_start >= len(keypoints) or\n keypoint_end < 0 or keypoint_end >= len(keypoints)):\n continue\n if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):\n continue\n edge_coordinates = [\n keypoints_x[keypoint_start], keypoints_y[keypoint_start],\n keypoints_x[keypoint_end], keypoints_y[keypoint_end]\n ]\n draw.line(\n edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)\n\n\ndef draw_mask_on_image_array(image, mask, color='red', alpha=0.4):\n \"\"\"Draws mask on an image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_height, 3)\n mask: a uint8 numpy array of shape (img_height, img_height) with\n values between either 0 or 1.\n color: color to draw the keypoints with. Default is red.\n alpha: transparency value between 0 and 1. (default: 0.4)\n\n Raises:\n ValueError: On incorrect data type for image or masks.\n \"\"\"\n if image.dtype != np.uint8:\n raise ValueError('`image` not of type np.uint8')\n if mask.dtype != np.uint8:\n raise ValueError('`mask` not of type np.uint8')\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n if image.shape[:2] != mask.shape:\n raise ValueError('The image has spatial dimensions %s but the mask has '\n 'dimensions %s' % (image.shape[:2], mask.shape))\n rgb = ImageColor.getrgb(color)\n pil_image = Image.fromarray(image)\n\n solid_color = np.expand_dims(\n np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')\n pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n\n\ndef visualize_boxes_and_labels_on_image_array(\n image,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n instance_boundaries=None,\n keypoints=None,\n keypoint_scores=None,\n keypoint_edges=None,\n track_ids=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4,\n groundtruth_box_visualization_color='black',\n skip_boxes=False,\n skip_scores=False,\n skip_labels=False,\n skip_track_ids=False):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image in place, and returns\n that same image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]. Note that class indices are 1-based,\n and match the keys in the label map.\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width] with\n values ranging between 0 and 1, can be None.\n instance_boundaries: a numpy array of shape [N, image_height, image_width]\n with values ranging between 0 and 1, can be None.\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None.\n keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n track_ids: a numpy array of shape [N] with unique track ids. If provided,\n color-coding of boxes will be determined by these ids, and not the class\n indices.\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box or keypoint to be\n visualized.\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n groundtruth_box_visualization_color: box color for visualizing groundtruth\n boxes\n skip_boxes: whether to skip the drawing of bounding boxes.\n skip_scores: whether to skip score when drawing a single detection\n skip_labels: whether to skip label when drawing a single detection\n skip_track_ids: whether to skip track id when drawing a single detection\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_instance_boundaries_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n box_to_keypoint_scores_map = collections.defaultdict(list)\n box_to_track_ids_map = {}\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(boxes.shape[0]):\n if max_boxes_to_draw == len(box_to_color_map):\n break\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if instance_boundaries is not None:\n box_to_instance_boundaries_map[box] = instance_boundaries[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if keypoint_scores is not None:\n box_to_keypoint_scores_map[box].extend(keypoint_scores[i])\n if track_ids is not None:\n box_to_track_ids_map[box] = track_ids[i]\n if scores is None:\n box_to_color_map[box] = groundtruth_box_visualization_color\n else:\n display_str = ''\n if not skip_labels:\n if not agnostic_mode:\n if classes[i] in six.viewkeys(category_index):\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n if not skip_scores:\n if not display_str:\n display_str = '{}%'.format(round(100*scores[i]))\n else:\n display_str = '{}: {}%'.format(display_str, round(100*scores[i]))\n if not skip_track_ids and track_ids is not None:\n if not display_str:\n display_str = 'ID {}'.format(track_ids[i])\n else:\n display_str = '{}: ID {}'.format(display_str, track_ids[i])\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n elif track_ids is not None:\n prime_multipler = _get_multiplier_for_color_randomness()\n box_to_color_map[box] = STANDARD_COLORS[\n (prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n if instance_masks is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_masks_map[box],\n color=color\n )\n if instance_boundaries is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_boundaries_map[box],\n color='red',\n alpha=1.0\n )\n draw_bounding_box_on_image_array(\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=0 if skip_boxes else line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates)\n if keypoints is not None:\n keypoint_scores_for_box = None\n if box_to_keypoint_scores_map:\n keypoint_scores_for_box = box_to_keypoint_scores_map[box]\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n keypoint_scores_for_box,\n min_score_thresh=min_score_thresh,\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates,\n keypoint_edges=keypoint_edges,\n keypoint_edge_color=color,\n keypoint_edge_width=line_thickness // 2)\n\n return image\n\n\ndef add_cdf_image_summary(values, name):\n \"\"\"Adds a tf.summary.image for a CDF plot of the values.\n\n Normalizes `values` such that they sum to 1, plots the cumulative distribution\n function and creates a tf image summary.\n\n Args:\n values: a 1-D float32 tensor containing the values.\n name: name for the image summary.\n \"\"\"\n def cdf_plot(values):\n \"\"\"Numpy function to plot CDF.\"\"\"\n normalized_values = values / np.sum(values)\n sorted_values = np.sort(normalized_values)\n cumulative_values = np.cumsum(sorted_values)\n fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)\n / cumulative_values.size)\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot('111')\n ax.plot(fraction_of_examples, cumulative_values)\n ax.set_ylabel('cumulative normalized values')\n ax.set_xlabel('fraction of examples')\n fig.canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(\n 1, int(height), int(width), 3)\n return image\n cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)\n tf.summary.image(name, cdf_plot)\n\n\ndef add_hist_image_summary(values, bins, name):\n \"\"\"Adds a tf.summary.image for a histogram plot of the values.\n\n Plots the histogram of values and creates a tf image summary.\n\n Args:\n values: a 1-D float32 tensor containing the values.\n bins: bin edges which will be directly passed to np.histogram.\n name: name for the image summary.\n \"\"\"\n\n def hist_plot(values, bins):\n \"\"\"Numpy function to plot hist.\"\"\"\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot('111')\n y, x = np.histogram(values, bins=bins)\n ax.plot(x[:-1], y)\n ax.set_ylabel('count')\n ax.set_xlabel('value')\n fig.canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n image = np.fromstring(\n fig.canvas.tostring_rgb(), dtype='uint8').reshape(\n 1, int(height), int(width), 3)\n return image\n hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)\n tf.summary.image(name, hist_plot)\n\n\nclass EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):\n \"\"\"Abstract base class responsible for visualizations during evaluation.\n\n Currently, summary images are not run during evaluation. One way to produce\n evaluation images in Tensorboard is to provide tf.summary.image strings as\n `value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is\n responsible for accruing images (with overlaid detections and groundtruth)\n and returning a dictionary that can be passed to `eval_metric_ops`.\n \"\"\"\n\n def __init__(self,\n category_index,\n max_examples_to_draw=5,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True,\n summary_name_prefix='evaluation_image',\n keypoint_edges=None):\n \"\"\"Creates an EvalMetricOpsVisualization.\n\n Args:\n category_index: A category index (dictionary) produced from a labelmap.\n max_examples_to_draw: The maximum number of example summaries to produce.\n max_boxes_to_draw: The maximum number of boxes to draw for detections.\n min_score_thresh: The minimum score threshold for showing detections.\n use_normalized_coordinates: Whether to assume boxes and keypoints are in\n normalized coordinates (as opposed to absolute coordinates).\n Default is True.\n summary_name_prefix: A string prefix for each image summary.\n keypoint_edges: A list of tuples with keypoint indices that specify which\n keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws\n edges from keypoint 0 to 1 and from keypoint 2 to 4.\n \"\"\"\n\n self._category_index = category_index\n self._max_examples_to_draw = max_examples_to_draw\n self._max_boxes_to_draw = max_boxes_to_draw\n self._min_score_thresh = min_score_thresh\n self._use_normalized_coordinates = use_normalized_coordinates\n self._summary_name_prefix = summary_name_prefix\n self._keypoint_edges = keypoint_edges\n self._images = []\n\n def clear(self):\n self._images = []\n\n def add_images(self, images):\n \"\"\"Store a list of images, each with shape [1, H, W, C].\"\"\"\n if len(self._images) >= self._max_examples_to_draw:\n return\n\n # Store images and clip list if necessary.\n self._images.extend(images)\n if len(self._images) > self._max_examples_to_draw:\n self._images[self._max_examples_to_draw:] = []\n\n def get_estimator_eval_metric_ops(self, eval_dict):\n \"\"\"Returns metric ops for use in tf.estimator.EstimatorSpec.\n\n Args:\n eval_dict: A dictionary that holds an image, groundtruth, and detections\n for a batched example. Note that, we use only the first example for\n visualization. See eval_util.result_dict_for_batched_example() for a\n convenient method for constructing such a dictionary. The dictionary\n contains\n fields.InputDataFields.original_image: [batch_size, H, W, 3] image.\n fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]\n tensor containing the size of the original image.\n fields.InputDataFields.true_image_shape: [batch_size, 3]\n tensor containing the spatial size of the upadded original image.\n fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]\n float32 tensor with groundtruth boxes in range [0.0, 1.0].\n fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]\n int64 tensor with 1-indexed groundtruth classes.\n fields.InputDataFields.groundtruth_instance_masks - (optional)\n [batch_size, num_boxes, H, W] int64 tensor with instance masks.\n fields.InputDataFields.groundtruth_keypoints - (optional)\n [batch_size, num_boxes, num_keypoints, 2] float32 tensor with\n keypoint coordinates in format [y, x].\n fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)\n [batch_size, num_boxes, num_keypoints] bool tensor with\n keypoint visibilities.\n fields.DetectionResultFields.detection_boxes - [batch_size,\n max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,\n 1.0].\n fields.DetectionResultFields.detection_classes - [batch_size,\n max_num_boxes] int64 tensor with 1-indexed detection classes.\n fields.DetectionResultFields.detection_scores - [batch_size,\n max_num_boxes] float32 tensor with detection scores.\n fields.DetectionResultFields.detection_masks - (optional) [batch_size,\n max_num_boxes, H, W] float32 tensor of binarized masks.\n fields.DetectionResultFields.detection_keypoints - (optional)\n [batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with\n keypoints.\n fields.DetectionResultFields.detection_keypoint_scores - (optional)\n [batch_size, max_num_boxes, num_keypoints] float32 tensor with\n keypoints scores.\n\n Returns:\n A dictionary of image summary names to tuple of (value_op, update_op). The\n `update_op` is the same for all items in the dictionary, and is\n responsible for saving a single side-by-side image with detections and\n groundtruth. Each `value_op` holds the tf.summary.image string for a given\n image.\n \"\"\"\n if self._max_examples_to_draw == 0:\n return {}\n images = self.images_from_evaluation_dict(eval_dict)\n\n def get_images():\n \"\"\"Returns a list of images, padded to self._max_images_to_draw.\"\"\"\n images = self._images\n while len(images) < self._max_examples_to_draw:\n images.append(np.array(0, dtype=np.uint8))\n self.clear()\n return images\n\n def image_summary_or_default_string(summary_name, image):\n \"\"\"Returns image summaries for non-padded elements.\"\"\"\n return tf.cond(\n tf.equal(tf.size(tf.shape(image)), 4),\n lambda: tf.summary.image(summary_name, image),\n lambda: tf.constant(''))\n\n if tf.executing_eagerly():\n update_op = self.add_images([[images[0]]])\n image_tensors = get_images()\n else:\n update_op = tf.py_func(self.add_images, [[images[0]]], [])\n image_tensors = tf.py_func(\n get_images, [], [tf.uint8] * self._max_examples_to_draw)\n eval_metric_ops = {}\n for i, image in enumerate(image_tensors):\n summary_name = self._summary_name_prefix + '/' + str(i)\n value_op = image_summary_or_default_string(summary_name, image)\n eval_metric_ops[summary_name] = (value_op, update_op)\n return eval_metric_ops\n\n @abc.abstractmethod\n def images_from_evaluation_dict(self, eval_dict):\n \"\"\"Converts evaluation dictionary into a list of image tensors.\n\n To be overridden by implementations.\n\n Args:\n eval_dict: A dictionary with all the necessary information for producing\n visualizations.\n\n Returns:\n A list of [1, H, W, C] uint8 tensors.\n \"\"\"\n raise NotImplementedError\n\n\nclass VisualizeSingleFrameDetections(EvalMetricOpsVisualization):\n \"\"\"Class responsible for single-frame object detection visualizations.\"\"\"\n\n def __init__(self,\n category_index,\n max_examples_to_draw=5,\n max_boxes_to_draw=20,\n min_score_thresh=0.2,\n use_normalized_coordinates=True,\n summary_name_prefix='Detections_Left_Groundtruth_Right',\n keypoint_edges=None):\n super(VisualizeSingleFrameDetections, self).__init__(\n category_index=category_index,\n max_examples_to_draw=max_examples_to_draw,\n max_boxes_to_draw=max_boxes_to_draw,\n min_score_thresh=min_score_thresh,\n use_normalized_coordinates=use_normalized_coordinates,\n summary_name_prefix=summary_name_prefix,\n keypoint_edges=keypoint_edges)\n\n def images_from_evaluation_dict(self, eval_dict):\n return draw_side_by_side_evaluation_image(eval_dict, self._category_index,\n self._max_boxes_to_draw,\n self._min_score_thresh,\n self._use_normalized_coordinates,\n self._keypoint_edges)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains functions which are convenient for unit testing.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.core import anchor_generator\nfrom object_detection.core import box_coder\nfrom object_detection.core import box_list\nfrom object_detection.core import box_predictor\nfrom object_detection.core import matcher\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import tf_version\n\n# Default size (both width and height) used for testing mask predictions.\nDEFAULT_MASK_SIZE = 5\n\n\nclass MockBoxCoder(box_coder.BoxCoder):\n \"\"\"Simple `difference` BoxCoder.\"\"\"\n\n @property\n def code_size(self):\n return 4\n\n def _encode(self, boxes, anchors):\n return boxes.get() - anchors.get()\n\n def _decode(self, rel_codes, anchors):\n return box_list.BoxList(rel_codes + anchors.get())\n\n\nclass MockMaskHead(object):\n \"\"\"Simple maskhead that returns all zeros as mask predictions.\"\"\"\n\n def __init__(self, num_classes):\n self._num_classes = num_classes\n\n def predict(self, features):\n batch_size = tf.shape(features)[0]\n return tf.zeros((batch_size, 1, self._num_classes, DEFAULT_MASK_SIZE,\n DEFAULT_MASK_SIZE),\n dtype=tf.float32)\n\n\nclass MockBoxPredictor(box_predictor.BoxPredictor):\n \"\"\"Simple box predictor that ignores inputs and outputs all zeros.\"\"\"\n\n def __init__(self, is_training, num_classes, add_background_class=True):\n super(MockBoxPredictor, self).__init__(is_training, num_classes)\n self._add_background_class = add_background_class\n\n def _predict(self, image_features, num_predictions_per_location):\n image_feature = image_features[0]\n combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(\n image_feature)\n batch_size = combined_feature_shape[0]\n num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])\n code_size = 4\n zero = tf.reduce_sum(0 * image_feature)\n num_class_slots = self.num_classes\n if self._add_background_class:\n num_class_slots = num_class_slots + 1\n box_encodings = zero + tf.zeros(\n (batch_size, num_anchors, 1, code_size), dtype=tf.float32)\n class_predictions_with_background = zero + tf.zeros(\n (batch_size, num_anchors, num_class_slots), dtype=tf.float32)\n predictions_dict = {\n box_predictor.BOX_ENCODINGS:\n box_encodings,\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:\n class_predictions_with_background\n }\n return predictions_dict\n\n\nclass MockKerasBoxPredictor(box_predictor.KerasBoxPredictor):\n \"\"\"Simple box predictor that ignores inputs and outputs all zeros.\"\"\"\n\n def __init__(self, is_training, num_classes, add_background_class=True):\n super(MockKerasBoxPredictor, self).__init__(\n is_training, num_classes, False, False)\n self._add_background_class = add_background_class\n\n def _predict(self, image_features, **kwargs):\n image_feature = image_features[0]\n combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(\n image_feature)\n batch_size = combined_feature_shape[0]\n num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])\n code_size = 4\n zero = tf.reduce_sum(0 * image_feature)\n num_class_slots = self.num_classes\n if self._add_background_class:\n num_class_slots = num_class_slots + 1\n box_encodings = zero + tf.zeros(\n (batch_size, num_anchors, 1, code_size), dtype=tf.float32)\n class_predictions_with_background = zero + tf.zeros(\n (batch_size, num_anchors, num_class_slots), dtype=tf.float32)\n predictions_dict = {\n box_predictor.BOX_ENCODINGS:\n box_encodings,\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:\n class_predictions_with_background\n }\n return predictions_dict\n\n\nclass MockAnchorGenerator(anchor_generator.AnchorGenerator):\n \"\"\"Mock anchor generator.\"\"\"\n\n def name_scope(self):\n return 'MockAnchorGenerator'\n\n def num_anchors_per_location(self):\n return [1]\n\n def _generate(self, feature_map_shape_list):\n num_anchors = sum([shape[0] * shape[1] for shape in feature_map_shape_list])\n return box_list.BoxList(tf.zeros((num_anchors, 4), dtype=tf.float32))\n\n\nclass MockMatcher(matcher.Matcher):\n \"\"\"Simple matcher that matches first anchor to first groundtruth box.\"\"\"\n\n def _match(self, similarity_matrix, valid_rows):\n return tf.constant([0, -1, -1, -1], dtype=tf.int32)\n\n\ndef create_diagonal_gradient_image(height, width, depth):\n \"\"\"Creates pyramid image. Useful for testing.\n\n For example, pyramid_image(5, 6, 1) looks like:\n # [[[ 5. 4. 3. 2. 1. 0.]\n # [ 6. 5. 4. 3. 2. 1.]\n # [ 7. 6. 5. 4. 3. 2.]\n # [ 8. 7. 6. 5. 4. 3.]\n # [ 9. 8. 7. 6. 5. 4.]]]\n\n Args:\n height: height of image\n width: width of image\n depth: depth of image\n\n Returns:\n pyramid image\n \"\"\"\n row = np.arange(height)\n col = np.arange(width)[::-1]\n image_layer = np.expand_dims(row, 1) + col\n image_layer = np.expand_dims(image_layer, 2)\n\n image = image_layer\n for i in range(1, depth):\n image = np.concatenate((image, image_layer * pow(10, i)), 2)\n\n return image.astype(np.float32)\n\n\ndef create_random_boxes(num_boxes, max_height, max_width):\n \"\"\"Creates random bounding boxes of specific maximum height and width.\n\n Args:\n num_boxes: number of boxes.\n max_height: maximum height of boxes.\n max_width: maximum width of boxes.\n\n Returns:\n boxes: numpy array of shape [num_boxes, 4]. Each row is in form\n [y_min, x_min, y_max, x_max].\n \"\"\"\n\n y_1 = np.random.uniform(size=(1, num_boxes)) * max_height\n y_2 = np.random.uniform(size=(1, num_boxes)) * max_height\n x_1 = np.random.uniform(size=(1, num_boxes)) * max_width\n x_2 = np.random.uniform(size=(1, num_boxes)) * max_width\n\n boxes = np.zeros(shape=(num_boxes, 4))\n boxes[:, 0] = np.minimum(y_1, y_2)\n boxes[:, 1] = np.minimum(x_1, x_2)\n boxes[:, 2] = np.maximum(y_1, y_2)\n boxes[:, 3] = np.maximum(x_1, x_2)\n\n return boxes.astype(np.float32)\n\n\ndef first_rows_close_as_set(a, b, k=None, rtol=1e-6, atol=1e-6):\n \"\"\"Checks if first K entries of two lists are close, up to permutation.\n\n Inputs to this assert are lists of items which can be compared via\n numpy.allclose(...) and can be sorted.\n\n Args:\n a: list of items which can be compared via numpy.allclose(...) and are\n sortable.\n b: list of items which can be compared via numpy.allclose(...) and are\n sortable.\n k: a non-negative integer. If not provided, k is set to be len(a).\n rtol: relative tolerance.\n atol: absolute tolerance.\n\n Returns:\n boolean, True if input lists a and b have the same length and\n the first k entries of the inputs satisfy numpy.allclose() after\n sorting entries.\n \"\"\"\n if not isinstance(a, list) or not isinstance(b, list) or len(a) != len(b):\n return False\n if not k:\n k = len(a)\n k = min(k, len(a))\n a_sorted = sorted(a[:k])\n b_sorted = sorted(b[:k])\n return all([\n np.allclose(entry_a, entry_b, rtol, atol)\n for (entry_a, entry_b) in zip(a_sorted, b_sorted)\n ])\n\n\nclass GraphContextOrNone(object):\n \"\"\"A new Graph context for TF1.X and None for TF2.X.\n\n This is useful to write model tests that work with both TF1.X and TF2.X.\n\n Example test using this pattern:\n\n class ModelTest(test_case.TestCase):\n def test_model(self):\n with test_utils.GraphContextOrNone() as g:\n model = Model()\n def compute_fn():\n out = model.predict()\n return out['detection_boxes']\n boxes = self.execute(compute_fn, [], graph=g)\n self.assertAllClose(boxes, expected_boxes)\n \"\"\"\n\n def __init__(self):\n if tf_version.is_tf2():\n self.graph = None\n else:\n self.graph = tf.Graph().as_default()\n\n def __enter__(self):\n if tf_version.is_tf2():\n return None\n else:\n return self.graph.__enter__()\n\n def __exit__(self, ttype, value, traceback):\n if tf_version.is_tf2():\n return False\n else:\n return self.graph.__exit__(ttype, value, traceback)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model defination for the RetinaNet Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import backend\nfrom official.vision.detection.dataloader import mode_keys\nfrom official.vision.detection.evaluation import factory as eval_factory\nfrom official.vision.detection.modeling import base_model\nfrom official.vision.detection.modeling import losses\nfrom official.vision.detection.modeling.architecture import factory\nfrom official.vision.detection.ops import postprocess_ops\n\n\nclass RetinanetModel(base_model.Model):\n \"\"\"RetinaNet model function.\"\"\"\n\n def __init__(self, params):\n super(RetinanetModel, self).__init__(params)\n\n # For eval metrics.\n self._params = params\n\n # Architecture generators.\n self._backbone_fn = factory.backbone_generator(params)\n self._fpn_fn = factory.multilevel_features_generator(params)\n self._head_fn = factory.retinanet_head_generator(params)\n\n # Loss function.\n self._cls_loss_fn = losses.RetinanetClassLoss(\n params.retinanet_loss, params.architecture.num_classes)\n self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)\n self._box_loss_weight = params.retinanet_loss.box_loss_weight\n self._keras_model = None\n\n # Predict function.\n self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(\n params.architecture.min_level,\n params.architecture.max_level,\n params.postprocess)\n\n self._transpose_input = params.train.transpose_input\n assert not self._transpose_input, 'Transpose input is not supportted.'\n # Input layer.\n input_shape = (\n params.retinanet_parser.output_size +\n [params.retinanet_parser.num_channels])\n self._input_layer = tf.keras.layers.Input(\n shape=input_shape, name='',\n dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)\n\n def build_outputs(self, inputs, mode):\n # If the input image is transposed (from NHWC to HWCN), we need to revert it\n # back to the original shape before it's used in the computation.\n if self._transpose_input:\n inputs = tf.transpose(inputs, [3, 0, 1, 2])\n\n backbone_features = self._backbone_fn(\n inputs, is_training=(mode == mode_keys.TRAIN))\n fpn_features = self._fpn_fn(\n backbone_features, is_training=(mode == mode_keys.TRAIN))\n cls_outputs, box_outputs = self._head_fn(\n fpn_features, is_training=(mode == mode_keys.TRAIN))\n\n if self._use_bfloat16:\n levels = cls_outputs.keys()\n for level in levels:\n cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)\n box_outputs[level] = tf.cast(box_outputs[level], tf.float32)\n\n model_outputs = {\n 'cls_outputs': cls_outputs,\n 'box_outputs': box_outputs,\n }\n return model_outputs\n\n def build_loss_fn(self):\n if self._keras_model is None:\n raise ValueError('build_loss_fn() must be called after build_model().')\n\n filter_fn = self.make_filter_trainable_variables_fn()\n trainable_variables = filter_fn(self._keras_model.trainable_variables)\n\n def _total_loss_fn(labels, outputs):\n cls_loss = self._cls_loss_fn(outputs['cls_outputs'],\n labels['cls_targets'],\n labels['num_positives'])\n box_loss = self._box_loss_fn(outputs['box_outputs'],\n labels['box_targets'],\n labels['num_positives'])\n model_loss = cls_loss + self._box_loss_weight * box_loss\n l2_regularization_loss = self.weight_decay_loss(trainable_variables)\n total_loss = model_loss + l2_regularization_loss\n return {\n 'total_loss': total_loss,\n 'cls_loss': cls_loss,\n 'box_loss': box_loss,\n 'model_loss': model_loss,\n 'l2_regularization_loss': l2_regularization_loss,\n }\n\n return _total_loss_fn\n\n def build_model(self, params, mode=None):\n if self._keras_model is None:\n with backend.get_graph().as_default():\n outputs = self.model_outputs(self._input_layer, mode)\n\n model = tf.keras.models.Model(\n inputs=self._input_layer, outputs=outputs, name='retinanet')\n assert model is not None, 'Fail to build tf.keras.Model.'\n model.optimizer = self.build_optimizer()\n self._keras_model = model\n\n return self._keras_model\n\n def post_processing(self, labels, outputs):\n # TODO(yeqing): Moves the output related part into build_outputs.\n required_output_fields = ['cls_outputs', 'box_outputs']\n for field in required_output_fields:\n if field not in outputs:\n raise ValueError('\"%s\" is missing in outputs, requried %s found %s',\n field, required_output_fields, outputs.keys())\n required_label_fields = ['image_info', 'groundtruths']\n for field in required_label_fields:\n if field not in labels:\n raise ValueError('\"%s\" is missing in outputs, requried %s found %s',\n field, required_label_fields, labels.keys())\n boxes, scores, classes, valid_detections = self._generate_detections_fn(\n outputs['box_outputs'], outputs['cls_outputs'],\n labels['anchor_boxes'], labels['image_info'][:, 1:2, :])\n # Discards the old output tensors to save memory. The `cls_outputs` and\n # `box_outputs` are pretty big and could potentiall lead to memory issue.\n outputs = {\n 'source_id': labels['groundtruths']['source_id'],\n 'image_info': labels['image_info'],\n 'num_detections': valid_detections,\n 'detection_boxes': boxes,\n 'detection_classes': classes,\n 'detection_scores': scores,\n }\n\n if 'groundtruths' in labels:\n labels['source_id'] = labels['groundtruths']['source_id']\n labels['boxes'] = labels['groundtruths']['boxes']\n labels['classes'] = labels['groundtruths']['classes']\n labels['areas'] = labels['groundtruths']['areas']\n labels['is_crowds'] = labels['groundtruths']['is_crowds']\n\n return labels, outputs\n\n def eval_metrics(self):\n return eval_factory.evaluator_generator(self._params.eval)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classification network.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\n# from __future__ import google_type_annotations\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Text')\nclass TokenClassification(tf.keras.Model):\n \"\"\"TokenClassification network head for BERT modeling.\n\n This network implements a simple token classifier head based on a dense layer.\n\n Arguments:\n input_width: The innermost dimension of the input tensor to this network.\n num_classes: The number of classes that this network should classify to.\n activation: The activation, if any, for the dense layer in this network.\n initializer: The intializer for the dense layer in this network. Defaults to\n a Glorot uniform initializer.\n output: The output style for this network. Can be either 'logits' or\n 'predictions'.\n \"\"\"\n\n def __init__(self,\n input_width,\n num_classes,\n initializer='glorot_uniform',\n output='logits',\n **kwargs):\n self._self_setattr_tracking = False\n self._config_dict = {\n 'input_width': input_width,\n 'num_classes': num_classes,\n 'initializer': initializer,\n 'output': output,\n }\n\n sequence_data = tf.keras.layers.Input(\n shape=(None, input_width), name='sequence_data', dtype=tf.float32)\n\n self.logits = tf.keras.layers.Dense(\n num_classes,\n activation=None,\n kernel_initializer=initializer,\n name='predictions/transform/logits')(\n sequence_data)\n predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(self.logits)\n\n if output == 'logits':\n output_tensors = self.logits\n elif output == 'predictions':\n output_tensors = predictions\n else:\n raise ValueError(\n ('Unknown `output` value \"%s\". `output` can be either \"logits\" or '\n '\"predictions\"') % output)\n\n super(TokenClassification, self).__init__(\n inputs=[sequence_data], outputs=output_tensors, **kwargs)\n\n def get_config(self):\n return self._config_dict\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n" ]
[ [ "tensorflow.compat.v1.math.sigmoid", "tensorflow.compat.v1.concat", "numpy.cumsum", "tensorflow.compat.v1.py_func", "tensorflow.compat.v1.executing_eagerly", "tensorflow.compat.v1.shape", "numpy.zeros_like", "tensorflow.compat.v1.image.grayscale_to_rgb", "numpy.histogram", "tensorflow.compat.v1.constant", "numpy.ones_like", "numpy.greater", "tensorflow.compat.v1.image.resize_images", "numpy.uint8", "tensorflow.compat.v1.py_function", "tensorflow.compat.v1.summary.image", "numpy.arange", "numpy.ceil", "tensorflow.compat.v1.image.resize", "matplotlib.pyplot.figure", "tensorflow.compat.v1.ones_like", "numpy.isnan", "tensorflow.compat.v1.map_fn", "tensorflow.compat.v1.cast", "numpy.array", "numpy.logical_and", "numpy.sum", "numpy.abs", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.gfile.Open", "numpy.sort", "tensorflow.compat.v1.squeeze" ], [ "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "numpy.allclose", "numpy.arange", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.Graph", "numpy.random.uniform", "numpy.zeros", "tensorflow.compat.v1.constant" ], [ "tensorflow.transpose", "tensorflow.keras.models.Model", "tensorflow.cast", "tensorflow.python.keras.backend.get_graph", "tensorflow.keras.layers.Input" ], [ "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Activation", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
huy-ha/dreamer-pytorch
[ "98561a5fe4ee5323b955f5fc79bbebf483f08d58", "98561a5fe4ee5323b955f5fc79bbebf483f08d58", "98561a5fe4ee5323b955f5fc79bbebf483f08d58" ]
[ "dreamer/models/rnns.py", "dreamer/models/observation.py", "dmc2gym/wrappers.py" ]
[ "import torch\nimport torch.distributions as td\nimport torch.nn as nn\nimport torch.nn.functional as tf\nfrom rlpyt.utils.collections import namedarraytuple\nfrom rlpyt.utils.buffer import buffer_method\n\nfrom dreamer.utils.module import FreezeParameters\n\nRSSMState = namedarraytuple('RSSMState', ['mean', 'std', 'stoch', 'deter'])\n\n\ndef stack_states(rssm_states: list, dim):\n return RSSMState(\n torch.stack([state.mean for state in rssm_states], dim=dim),\n torch.stack([state.std for state in rssm_states], dim=dim),\n torch.stack([state.stoch for state in rssm_states], dim=dim),\n torch.stack([state.deter for state in rssm_states], dim=dim),\n )\n\n\ndef get_feat(rssm_state: RSSMState):\n return torch.cat((rssm_state.stoch, rssm_state.deter), dim=-1)\n\n\ndef get_dist(rssm_state: RSSMState):\n return td.independent.Independent(td.Normal(rssm_state.mean, rssm_state.std), 1)\n\n\nclass TransitionBase(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, prev_action, prev_state):\n \"\"\":return: next state\"\"\"\n raise NotImplementedError\n\n\nclass RepresentationBase(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, obs_embed, prev_action, prev_state):\n \"\"\":return: next state\"\"\"\n raise NotImplementedError\n\n\nclass RollOutModule(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, steps, obs_embed, prev_action, prev_state):\n raise NotImplementedError\n\n\nclass RSSMTransition(TransitionBase):\n def __init__(self, action_size, stochastic_size=30, deterministic_size=200, hidden_size=200, activation=nn.ELU,\n distribution=td.Normal):\n super().__init__()\n self._action_size = action_size\n self._stoch_size = stochastic_size\n self._deter_size = deterministic_size\n self._hidden_size = hidden_size\n self._activation = activation\n self._cell = nn.GRUCell(hidden_size, deterministic_size)\n self._rnn_input_model = self._build_rnn_input_model()\n self._stochastic_prior_model = self._build_stochastic_model()\n self._dist = distribution\n\n def _build_rnn_input_model(self):\n rnn_input_model = [\n nn.Linear(self._action_size + self._stoch_size, self._hidden_size)]\n rnn_input_model += [self._activation()]\n return nn.Sequential(*rnn_input_model)\n\n def _build_stochastic_model(self):\n stochastic_model = [nn.Linear(self._hidden_size, self._hidden_size)]\n stochastic_model += [self._activation()]\n stochastic_model += [nn.Linear(self._hidden_size,\n 2 * self._stoch_size)]\n return nn.Sequential(*stochastic_model)\n\n def initial_state(self, batch_size, **kwargs):\n return RSSMState(\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._deter_size, **kwargs),\n )\n\n def forward(self, prev_action: torch.Tensor, prev_state: RSSMState):\n if len(prev_action.shape) != len(prev_state.stoch.shape):\n prev_state = RSSMState(\n mean=prev_state.mean.unsqueeze(dim=0),\n std=prev_state.std.unsqueeze(dim=0),\n stoch=prev_state.stoch.unsqueeze(dim=0),\n deter=prev_state.deter.unsqueeze(dim=0))\n rnn_input = self._rnn_input_model(\n torch.cat([prev_action, prev_state.stoch], dim=-1))\n deter_state = self._cell(rnn_input, prev_state.deter)\n mean, std = torch.chunk(\n self._stochastic_prior_model(deter_state), 2, dim=-1)\n std = tf.softplus(std) + 0.1\n dist = self._dist(mean, std)\n stoch_state = dist.rsample()\n return RSSMState(mean, std, stoch_state, deter_state)\n\n\nclass RSSMRepresentation(RepresentationBase):\n def __init__(self, transition_model: RSSMTransition, obs_embed_size, action_size, stochastic_size=30,\n deterministic_size=200, hidden_size=200, activation=nn.ELU, distribution=td.Normal):\n super().__init__()\n self._transition_model = transition_model\n self._obs_embed_size = obs_embed_size\n self._action_size = action_size\n self._stoch_size = stochastic_size\n self._deter_size = deterministic_size\n self._hidden_size = hidden_size\n self._activation = activation\n self._dist = distribution\n self._stochastic_posterior_model = self._build_stochastic_model()\n\n def _build_stochastic_model(self):\n stochastic_model = [\n nn.Linear(self._deter_size + self._obs_embed_size, self._hidden_size)]\n stochastic_model += [self._activation()]\n stochastic_model += [nn.Linear(self._hidden_size,\n 2 * self._stoch_size)]\n return nn.Sequential(*stochastic_model)\n\n def initial_state(self, batch_size, **kwargs):\n return RSSMState(\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._stoch_size, **kwargs),\n torch.zeros(batch_size, self._deter_size, **kwargs),\n )\n\n def forward(self, obs_embed: torch.Tensor, prev_action: torch.Tensor, prev_state: RSSMState):\n prior_state = self._transition_model(prev_action, prev_state)\n x = torch.cat([prior_state.deter, obs_embed], -1)\n mean, std = torch.chunk(self._stochastic_posterior_model(x), 2, dim=-1)\n std = tf.softplus(std) + 0.1\n dist = self._dist(mean, std)\n stoch_state = dist.rsample()\n posterior_state = RSSMState(mean, std, stoch_state, prior_state.deter)\n return prior_state, posterior_state\n\n\nclass RSSMRollout(RollOutModule):\n def __init__(self, representation_model: RSSMRepresentation, transition_model: RSSMTransition):\n super().__init__()\n self.representation_model = representation_model\n self.transition_model = transition_model\n\n def forward(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor, prev_state: RSSMState):\n return self.rollout_representation(steps, obs_embed, action, prev_state)\n\n def rollout_representation(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor,\n prev_state: RSSMState):\n \"\"\"\n Roll out the model with actions and observations from data.\n :param steps: number of steps to roll out\n :param obs_embed: size(time_steps, batch_size, embedding_size)\n :param action: size(time_steps, batch_size, action_size)\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: prior, posterior states. size(time_steps, batch_size, state_size)\n \"\"\"\n priors = []\n posteriors = []\n for t in range(steps):\n prior_state, posterior_state = self.representation_model(\n obs_embed[t], action[t], prev_state)\n priors.append(prior_state)\n posteriors.append(posterior_state)\n prev_state = posterior_state\n prior = stack_states(priors, dim=0)\n post = stack_states(posteriors, dim=0)\n return prior, post\n\n def rollout_transition(self, steps: int, action: torch.Tensor, prev_state: RSSMState):\n \"\"\"\n Roll out the model with actions from data.\n :param steps: number of steps to roll out\n :param action: size(time_steps, batch_size, action_size)\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: prior states. size(time_steps, batch_size, state_size)\n \"\"\"\n priors = []\n state = prev_state\n for t in range(steps):\n state = self.transition_model(action[t], state)\n priors.append(state)\n return stack_states(priors, dim=0)\n\n def rollout_policy(self, steps: int, policy, prev_state: RSSMState):\n \"\"\"\n Roll out the model with a policy function.\n :param steps: number of steps to roll out\n :param policy: RSSMState -> action\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: next states size(time_steps, batch_size, state_size),\n actions size(time_steps, batch_size, action_size)\n \"\"\"\n state = prev_state\n next_states = []\n actions = []\n state = buffer_method(state, 'detach')\n for t in range(steps):\n action, _ = policy(buffer_method(state, 'detach'))\n state = self.transition_model(action, state)\n next_states.append(state)\n actions.append(action)\n next_states = stack_states(next_states, dim=0)\n actions = torch.stack(actions, dim=0)\n return next_states, actions\n", "import numpy as np\nimport torch\nimport torch.distributions as td\nimport torch.nn as nn\n\n\nclass ObservationEncoder(nn.Module):\n def __init__(self, depth=32, stride=2, shape=(3, 64, 64), activation=nn.ReLU):\n super().__init__()\n self.convolutions = nn.Sequential(\n nn.Conv2d(shape[0], 1 * depth, 4, stride),\n activation(),\n nn.Conv2d(1 * depth, 2 * depth, 4, stride),\n activation(),\n nn.Conv2d(2 * depth, 4 * depth, 4, stride),\n activation(),\n nn.Conv2d(4 * depth, 8 * depth, 4, stride),\n activation(),\n )\n self.shape = shape\n self.stride = stride\n self.depth = depth\n\n def forward(self, obs):\n batch_shape = obs.shape[:-3]\n img_shape = obs.shape[-3:]\n embed = self.convolutions(obs.reshape(-1, *img_shape))\n embed = torch.reshape(embed, (*batch_shape, -1))\n return embed\n\n @property\n def embed_size(self):\n conv1_shape = conv_out_shape(self.shape[1:], 0, 4, self.stride)\n conv2_shape = conv_out_shape(conv1_shape, 0, 4, self.stride)\n conv3_shape = conv_out_shape(conv2_shape, 0, 4, self.stride)\n conv4_shape = conv_out_shape(conv3_shape, 0, 4, self.stride)\n embed_size = 8 * self.depth * np.prod(conv4_shape).item()\n return embed_size\n\n\nclass ObservationDecoder(nn.Module):\n def __init__(self, depth=32, stride=2, activation=nn.ReLU, embed_size=1024, shape=(3, 64, 64)):\n super().__init__()\n self.depth = depth\n self.shape = shape\n\n c, h, w = shape\n conv1_kernel_size = 6\n conv2_kernel_size = 6\n conv3_kernel_size = 5\n conv4_kernel_size = 5\n padding = 0\n conv1_shape = conv_out_shape((h, w), padding, conv1_kernel_size, stride)\n conv1_pad = output_padding_shape((h, w), conv1_shape, padding, conv1_kernel_size, stride)\n conv2_shape = conv_out_shape(conv1_shape, padding, conv2_kernel_size, stride)\n conv2_pad = output_padding_shape(conv1_shape, conv2_shape, padding, conv2_kernel_size, stride)\n conv3_shape = conv_out_shape(conv2_shape, padding, conv3_kernel_size, stride)\n conv3_pad = output_padding_shape(conv2_shape, conv3_shape, padding, conv3_kernel_size, stride)\n conv4_shape = conv_out_shape(conv3_shape, padding, conv4_kernel_size, stride)\n conv4_pad = output_padding_shape(conv3_shape, conv4_shape, padding, conv4_kernel_size, stride)\n self.conv_shape = (32 * depth, *conv4_shape)\n self.linear = nn.Linear(embed_size, 32 * depth * np.prod(conv4_shape).item())\n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(32 * depth, 4 * depth, conv4_kernel_size, stride, output_padding=conv4_pad),\n activation(),\n nn.ConvTranspose2d(4 * depth, 2 * depth, conv3_kernel_size, stride, output_padding=conv3_pad),\n activation(),\n nn.ConvTranspose2d(2 * depth, 1 * depth, conv2_kernel_size, stride, output_padding=conv2_pad),\n activation(),\n nn.ConvTranspose2d(1 * depth, shape[0], conv1_kernel_size, stride, output_padding=conv1_pad),\n )\n\n def forward(self, x):\n \"\"\"\n :param x: size(*batch_shape, embed_size)\n :return: obs_dist = size(*batch_shape, *self.shape)\n \"\"\"\n batch_shape = x.shape[:-1]\n embed_size = x.shape[-1]\n squeezed_size = np.prod(batch_shape).item()\n x = x.reshape(squeezed_size, embed_size)\n x = self.linear(x)\n x = torch.reshape(x, (squeezed_size, *self.conv_shape))\n x = self.decoder(x)\n mean = torch.reshape(x, (*batch_shape, *self.shape))\n obs_dist = td.Independent(td.Normal(mean, 1), len(self.shape))\n return obs_dist\n\n\ndef conv_out(h_in, padding, kernel_size, stride):\n return int((h_in + 2. * padding - (kernel_size - 1.) - 1.) / stride + 1.)\n\n\ndef output_padding(h_in, conv_out, padding, kernel_size, stride):\n return h_in - (conv_out - 1) * stride + 2 * padding - (kernel_size - 1) - 1\n\n\ndef conv_out_shape(h_in, padding, kernel_size, stride):\n return tuple(conv_out(x, padding, kernel_size, stride) for x in h_in)\n\n\ndef output_padding_shape(h_in, conv_out, padding, kernel_size, stride):\n return tuple(output_padding(h_in[i], conv_out[i], padding, kernel_size, stride) for i in range(len(h_in)))\n", "from gym import spaces, core\nimport glob\nimport os\nimport local_dm_control_suite as suite\nfrom dm_env import specs\nimport numpy as np\nimport skimage.io\nfrom rlpyt.envs.base import Env, EnvStep\nfrom dmc2gym import natural_imgsource\n\n\ndef _spec_to_box(spec):\n def extract_min_max(s):\n assert s.dtype == np.float64 or s.dtype == np.float32\n dim = np.int(np.prod(s.shape))\n if type(s) == specs.Array:\n bound = np.inf * np.ones(dim, dtype=np.float32)\n return -bound, bound\n elif type(s) == specs.BoundedArray:\n zeros = np.zeros(dim, dtype=np.float32)\n return s.minimum + zeros, s.maximum + zeros\n\n mins, maxs = [], []\n for s in spec:\n mn, mx = extract_min_max(s)\n mins.append(mn)\n maxs.append(mx)\n low = np.concatenate(mins, axis=0)\n high = np.concatenate(maxs, axis=0)\n assert low.shape == high.shape\n return spaces.Box(low, high, dtype=np.float32)\n\n\ndef _flatten_obs(obs):\n obs_pieces = []\n for v in obs.values():\n flat = np.array([v]) if np.isscalar(v) else v.ravel()\n obs_pieces.append(flat)\n return np.concatenate(obs_pieces, axis=0)\n\n\nclass DMCWrapper(core.Env, Env):\n def __init__(\n self,\n domain_name,\n task_name,\n resource_files,\n img_source,\n total_frames,\n task_kwargs=None,\n visualize_reward={},\n from_pixels=False,\n height=84,\n width=84,\n camera_id=0,\n frame_skip=1,\n environment_kwargs=None\n ):\n assert 'random' in task_kwargs, 'please specify a seed, for deterministic behaviour'\n self._from_pixels = from_pixels\n self._height = height\n self._width = width\n self._camera_id = camera_id\n self._frame_skip = frame_skip\n self._img_source = img_source\n\n # create task\n self._env = suite.load(\n domain_name=domain_name,\n task_name=task_name,\n task_kwargs=task_kwargs,\n visualize_reward=visualize_reward,\n environment_kwargs=environment_kwargs\n )\n\n # true and normalized action spaces\n self._true_action_space = _spec_to_box([self._env.action_spec()])\n self._norm_action_space = spaces.Box(\n low=-1.0,\n high=1.0,\n shape=self._true_action_space.shape,\n dtype=np.float32\n )\n\n # create observation space\n if from_pixels:\n self._observation_space = spaces.Box(\n low=0, high=255, shape=[3, height, width], dtype=np.uint8\n )\n else:\n self._observation_space = _spec_to_box(\n self._env.observation_spec().values()\n )\n\n self._internal_state_space = spaces.Box(\n low=-np.inf,\n high=np.inf,\n shape=self._env.physics.get_state().shape,\n dtype=np.float32\n )\n\n # background\n if img_source is not None:\n shape2d = (height, width)\n if img_source == \"color\":\n self._bg_source = natural_imgsource.RandomColorSource(shape2d)\n elif img_source == \"noise\":\n self._bg_source = natural_imgsource.NoiseSource(shape2d)\n else:\n files = glob.glob(os.path.expanduser(\n resource_files), recursive=True)\n assert len(files), \"Pattern {} does not match any files\".format(\n resource_files\n )\n if img_source == \"images\":\n self._bg_source = natural_imgsource.RandomImageSource(\n shape2d, files, grayscale=True, total_frames=total_frames)\n elif img_source == \"video\":\n self._bg_source = natural_imgsource.RandomVideoSource(\n shape2d, files, grayscale=True, total_frames=total_frames)\n else:\n raise Exception(\"img_source %s not defined.\" % img_source)\n\n # set seed\n self.seed(seed=task_kwargs.get('random', 1))\n from dreamer.envs.env import EnvInfo\n self.info_class = EnvInfo\n\n def __getattr__(self, name):\n return getattr(self._env, name)\n\n def _get_obs(self, time_step):\n if self._from_pixels:\n obs = self.render(\n height=self._height,\n width=self._width,\n camera_id=self._camera_id\n )\n if self._img_source is not None:\n mask = np.logical_and(\n (obs[:, :, 2] > obs[:, :, 1]), (obs[:, :, 2] > obs[:, :, 0])) # hardcoded for dmc\n bg = self._bg_source.get_image()\n obs[mask] = bg[mask]\n obs = obs.transpose(2, 0, 1).copy()\n else:\n obs = _flatten_obs(time_step.observation)\n return obs\n\n def _convert_action(self, action):\n action = action.astype(np.float64)\n true_delta = self._true_action_space.high - self._true_action_space.low\n norm_delta = self._norm_action_space.high - self._norm_action_space.low\n action = (action - self._norm_action_space.low) / norm_delta\n action = action * true_delta + self._true_action_space.low\n action = action.astype(np.float32)\n return action\n\n @property\n def observation_space(self):\n return self._observation_space\n\n @property\n def internal_state_space(self):\n return self._internal_state_space\n\n @property\n def action_space(self):\n return self._norm_action_space\n\n def seed(self, seed):\n self._true_action_space.seed(seed)\n self._norm_action_space.seed(seed)\n self._observation_space.seed(seed)\n\n def step(self, action):\n assert self._norm_action_space.contains(action)\n action = self._convert_action(action)\n assert self._true_action_space.contains(action)\n reward = 0\n extra = {'internal_state': self._env.physics.get_state().copy()}\n\n for _ in range(self._frame_skip):\n time_step = self._env.step(action)\n reward += time_step.reward or 0\n done = time_step.last()\n if done:\n break\n obs = self._get_obs(time_step)\n extra['discount'] = time_step.discount\n extra['traj_done'] = done\n extra['game_score'] = reward\n info = self.info_class(**extra)\n return EnvStep(obs, reward, done, info)\n\n def reset(self):\n time_step = self._env.reset()\n obs = self._get_obs(time_step)\n return obs\n\n def render(self, mode='rgb_array', height=None, width=None, camera_id=0):\n assert mode == 'rgb_array', 'only support rgb_array mode, given %s' % mode\n height = height or self._height\n width = width or self._width\n camera_id = camera_id or self._camera_id\n return self._env.physics.render(\n height=height, width=width, camera_id=camera_id\n )\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.zeros", "torch.nn.Linear", "torch.distributions.Normal", "torch.stack", "torch.nn.GRUCell", "torch.nn.functional.softplus" ], [ "torch.nn.ConvTranspose2d", "torch.reshape", "torch.nn.Conv2d", "numpy.prod", "torch.distributions.Normal" ], [ "numpy.logical_and", "numpy.ones", "numpy.concatenate", "numpy.isscalar", "numpy.prod", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PhilaController/phl-budget-data
[ "fd249937c843aaff2375624160e2bec0b8043e3c", "fd249937c843aaff2375624160e2bec0b8043e3c" ]
[ "src/phl_budget_data/etl/collections/monthly/school.py", "src/phl_budget_data/etl/collections/monthly/city.py" ]
[ "\"\"\"Module for parsing montly school collections data.\"\"\"\nfrom typing import ClassVar\n\nimport pandas as pd\nimport pdfplumber\n\nfrom ...utils.misc import rename_tax_rows\nfrom ...utils.pdf import extract_words, words_to_table\nfrom .core import COLLECTION_TYPES, MonthlyCollectionsReport, get_column_names\n\n\nclass SchoolTaxCollections(MonthlyCollectionsReport): # type: ignore\n \"\"\"\n Monthly School District Collections Report.\n\n Parameters\n ----------\n month :\n the calendar month number (starting at 1)\n year :\n the calendar year\n \"\"\"\n\n report_type: ClassVar[COLLECTION_TYPES] = \"school\"\n\n @property\n def legacy(self) -> bool:\n \"\"\"Whether the format is the legacy or current version.\"\"\"\n return self.num_pages > 1\n\n def extract(self) -> pd.DataFrame:\n \"\"\"Internal function to parse the contents of a legacy PDF page.\"\"\"\n\n # Open the PDF document\n with pdfplumber.open(self.path) as pdf:\n\n # Loop over each page\n out: list[pd.DataFrame] = []\n for pg in pdf.pages:\n\n # Extract the words\n words = extract_words(\n pg, keep_blank_chars=False, x_tolerance=1, y_tolerance=1\n )\n\n # Group the words into a table\n data = words_to_table(\n words,\n text_tolerance_y=5,\n text_tolerance_x=5,\n column_tolerance=20,\n min_col_sep=24,\n row_header_tolerance=10,\n )\n\n # Skip the header (first five rows)\n data = data.iloc[6:]\n assert \"REAL ESTATE\" in data.iloc[0][0]\n\n # # Remove first row of header if we need to\n # for phrase in [\"prelim\", \"final\", \"budget\"]:\n # sel = data[0].str.lower().str.startswith(phrase)\n # data = data.loc[~sel]\n\n # # Remove empty columns\n # data = remove_empty_columns(data, use_nan=False)\n\n # Check number of columns\n if len(out):\n if len(data.columns) != len(out[-1].columns):\n raise ValueError(\"Column mismatch when parsing multiple pages\")\n\n # Save it\n out.append(data)\n\n # Return concatenation\n return pd.concat(out, axis=0, ignore_index=True)\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Transform the raw parsing data into a clean data frame.\"\"\"\n\n # Call base transform\n data = super().transform(data)\n\n # Determine columns for the report\n columns = get_column_names(self.month, self.year)\n\n ncols = len(data.columns)\n assert ncols in [11, 12, 14]\n\n if ncols == 14:\n data = data.drop(labels=[7, 8, 9, 10], axis=1)\n else:\n data = data.drop(labels=[data.columns[-6]], axis=1)\n\n # Set the columns\n columns = [\"name\"] + columns[-7:]\n data = data[[0] + list(data.columns[-7:])]\n\n assert len(columns) == len(data.columns)\n assert len(data) in [14, 15]\n\n # Do current/prior/total\n if len(data) == 14:\n index = rename_tax_rows(\n data,\n 0,\n [\"real_estate\", \"school_income\", \"use_and_occupancy\", \"liquor\"],\n )\n else:\n index = rename_tax_rows(\n data,\n 0,\n [\"real_estate\"], # , \"school_income\", \"use_and_occupancy\", \"liquor\"],\n )\n\n if \"PAYMENT\" in data.loc[index, 0]:\n data.loc[index, 0] = \"pilots_total\"\n index += 1\n\n index = rename_tax_rows(\n data,\n index,\n [\"school_income\", \"use_and_occupancy\", \"liquor\"],\n )\n\n if \"PAYMENT\" in data.loc[index, 0]:\n data.loc[index, 0] = \"pilots_total\"\n index += 1\n\n # Other non-tax\n data.loc[index, 0] = \"other_nontax_total\"\n index += 1\n\n # Total\n data.loc[index, 0] = \"total_revenue_total\"\n index += 1\n\n # Set the columns\n data.columns = columns\n\n # Split out current/prior/total into its own column\n data[\"kind\"] = data[\"name\"].apply(lambda x: x.split(\"_\")[-1])\n data[\"name\"] = data[\"name\"].apply(lambda x: \"_\".join(x.split(\"_\")[:-1]))\n\n return data\n\n def validate(self, data: pd.DataFrame) -> bool:\n \"\"\"Validate the input data.\"\"\"\n\n # Sum up\n t = data.query(\"kind == 'total' and name != 'total_revenue'\")\n t = t.filter(regex=f\"^{self.month_name}\", axis=1)\n\n # Compare to total\n for col in t.columns:\n total_revenue = data.query(\"name == 'total_revenue'\")[col].squeeze()\n diff = t[col].sum() - total_revenue\n assert diff < 5\n\n return True\n\n def load(self, data: pd.DataFrame) -> None:\n \"\"\"Load the data.\"\"\"\n\n # Get the path\n dirname = self.get_data_directory(\"processed\")\n path = dirname / f\"{self.year}-{self.month:02d}-tax.csv\"\n\n # Load\n super()._load_csv_data(data, path)\n", "\"\"\"Module for parsing montly city collections data.\"\"\"\n\nfrom typing import ClassVar, Optional\n\nimport pandas as pd\nimport pdfplumber\n\nfrom ...utils.pdf import extract_words, words_to_table\nfrom ...utils.transformations import remove_empty_columns\nfrom .core import COLLECTION_TYPES, MonthlyCollectionsReport\n\n\ndef find_top_cutoff(pg: pdfplumber.page.Page) -> float:\n \"\"\"\n Search for the top cutoff of header on the page.\n\n This looks for specific text in the header.\n \"\"\"\n # Extract text to look for a header\n pg_text = pg.extract_text()\n\n # This shows up in the header columns\n top = 0\n has_header = \"FISCAL YEAR TO DATE\" in pg_text\n if has_header:\n\n # Determine if header has 1 or 2 horizontal lines\n # We want to find the bottom horizontal line to trim\n rects = sorted(pg.rects, key=lambda x: x[\"y0\"], reverse=True)\n if \"Comparative Statement\" in pg_text:\n top = rects[1][\"bottom\"]\n else:\n top = rects[0][\"bottom\"]\n\n return top\n\n\ndef find_footer_cutoff(pg: pdfplumber.page.Page) -> Optional[dict[str, float]]:\n \"\"\"Search for a horizontal line separating footer from data.\"\"\"\n\n rects = [r for r in pg.rects if r[\"width\"] / pg.width > 0.5]\n max_rect = max(rects, key=lambda r: r[\"bottom\"])\n if max_rect[\"bottom\"] > 0.9 * float(pg.height):\n return max_rect\n else:\n return None\n\n\nclass CityCollectionsReport(MonthlyCollectionsReport): # type: ignore\n \"\"\"\n Monthly City Collections Report.\n\n Parameters\n ----------\n month :\n the calendar month number (starting at 1)\n year :\n the calendar year\n \"\"\"\n\n report_type: ClassVar[COLLECTION_TYPES] = \"city\"\n\n @property\n def legacy(self) -> bool:\n \"\"\"Whether the format is the legacy or current version.\"\"\"\n return self.num_pages > 4\n\n def extract(self) -> pd.DataFrame:\n \"\"\"Parse and extract the contents of a legacy PDF page.\"\"\"\n\n # Open the PDF document\n with pdfplumber.open(self.path) as pdf:\n\n # Loop over each page\n out: list[pd.DataFrame] = []\n for pg in pdf.pages:\n\n # Is there a width-spanning line at the top?\n top = find_top_cutoff(pg)\n\n # Is there a width-spanning line at the bottom?\n footer_cutoff = find_footer_cutoff(pg)\n if footer_cutoff is not None:\n bottom = footer_cutoff[\"bottom\"]\n else:\n bottom = pg.height\n\n # Crop the main part of the document and extract the words\n cropped = pg.crop([0, top, pg.width, bottom])\n words = extract_words(\n cropped, keep_blank_chars=False, x_tolerance=1, y_tolerance=1\n )\n\n # Group the words into a table\n data = words_to_table(\n words,\n text_tolerance_y=5,\n text_tolerance_x=5,\n column_tolerance=20,\n min_col_sep=24,\n row_header_tolerance=20,\n )\n\n # Remove first row of header if we need to\n for phrase in [\"prelim\", \"final\", \"budget\"]:\n sel = data[0].str.lower().str.startswith(phrase)\n data = data.loc[~sel]\n\n # Remove empty columns\n data = remove_empty_columns(data, use_nan=False)\n\n # Check number of columns\n if len(out):\n if len(data.columns) != len(out[-1].columns):\n raise ValueError(\"Column mismatch when parsing multiple pages\")\n\n # Save it\n out.append(data)\n\n # Return concatenation\n return pd.concat(out, axis=0, ignore_index=True)\n" ]
[ [ "pandas.concat" ], [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
code-backdoor/code-backdoor
[ "1eeb3d79aa8a54c8f08e8d0156b569de5edd974e", "1eeb3d79aa8a54c8f08e8d0156b569de5edd974e", "9c3329dd8387c8242deb52bf590ebe3ac795f8de", "1eeb3d79aa8a54c8f08e8d0156b569de5edd974e", "9c3329dd8387c8242deb52bf590ebe3ac795f8de", "1eeb3d79aa8a54c8f08e8d0156b569de5edd974e", "9c3329dd8387c8242deb52bf590ebe3ac795f8de", "1eeb3d79aa8a54c8f08e8d0156b569de5edd974e", "1eeb3d79aa8a54c8f08e8d0156b569de5edd974e" ]
[ "Birnn_Transformer/ncc/utils/graph.py", "Birnn_Transformer/ncc/eval/summarization/transformer_generator.py", "Birnn_Transformer/ncc/eval/retrieval/retrieval_metrics.py", "Birnn_Transformer/ncc/modules/seq2seq/lstm_decoder.py", "Birnn_Transformer/ncc/eval/inference/type_predictor.py", "Birnn_Transformer/ncc/models/codebert/code_docstring_unilm.py", "Birnn_Transformer/ncc/tasks/summarization/summarization.py", "Birnn_Transformer/ncc/models/legacy_distributed_data_parallel.py", "Birnn_Transformer/ncc/models/type_prediction/typetransformer.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport dgl\nimport networkx as nx\nimport numpy as np\nimport torch\n\nfrom dataset.codesearchnet import MAX_SUB_TOKEN_LEN\n\n\ndef build_graph(tree_dict, dictionary, tree_leaf_subtoken=1, DGLGraph_PAD_WORD=-1) -> dgl.DGLGraph:\n # 叶子节点存的是拆开后的subtoken ,当然,如果token拆不开,那就还是一个token\n # 用来训练的.pt数据里叶子节点token保存格式是[\"a_hu\",[\"a\",\"hu\"]],\n # (1)tree_leaf_subtoken为1时 本函数只将其subtoken转换成wordid ,#即保存为[和a对应的id,和hu对应的id],比如[23,179]\n # 如果是拆不开的token,pt数据里格式是 [\"imq\",[\"imq\",PAD_WORD]]\n # 那么这里将其转换为[和imq对应的id,和codesum.PAD_WORD],比如[258,0]\n # pad到的长度由train val test整个数据集里token拆开后最大长度决定\n # (2)tree_leaf_subtoken为0时,本函数用的拆之前的token得到wordid,即比如用a_hu得到wordid\n nx_graph = nx.DiGraph()\n\n def _build(nid, idx, tree):\n # non-leaf node, 'children': [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]]\n if not isinstance(tree[idx]['children'][1], list):\n child_ids = tree[idx]['children']\n if nid is None:\n nx_graph.add_node(0, x=[DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y=int(idx), mask=0)\n # print('node={}, x={}, y={}, mask={}'.format(0, [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, int(idx), 0))\n nid = 0\n for idx in child_ids:\n cid = nx_graph.number_of_nodes()\n y_value = int(idx)\n if not isinstance(tree[str(idx)]['children'][1], list): # non-leaf node\n nx_graph.add_node(cid, x=[DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y=y_value, mask=0)\n # print(\n # 'node={}, x={}, y={}, mask={}'.format(cid, [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN, y_value, 0))\n _build(cid, str(idx), tree)\n else: # leaf node\n if tree_leaf_subtoken:\n word_index = [dictionary.index(subtoken) for subtoken in tree[str(idx)]['children'][1]]\n else:\n word_index = [dictionary.index(tree[idx]['children'][0])]\n nx_graph.add_node(cid, x=word_index, y=y_value, mask=1)\n # print('node={}, x={}, y={}, mask={}'.format(cid, word_index, y_value, 1))\n nx_graph.add_edge(cid, nid) # 因为用的 DiGraph,所以这里添加的edge应该是cid指向nid,而nid是root节点的方向,cid是叶子节点的方向\n # print('edge={}->{}'.format(cid, nid))\n else: # leaf node\n if tree_leaf_subtoken:\n word_index = [dictionary.index(subtoken) for subtoken in tree[idx]['children'][-1]]\n else:\n word_index = [dictionary.index(tree[idx]['children'][0])]\n if nid is None:\n cid = 0\n else:\n cid = nx_graph.number_of_nodes()\n nx_graph.add_node(cid, x=word_index, y=int(idx), mask=1)\n # print('node={}, x={}, y={}, mask={}'.format(cid, word_index, int(idx), 1))\n\n if nid is not None:\n nx_graph.add_edge(cid, nid) # 因为用的 DiGraph,所以这里添加的edge应该是cid指向nid,而nid是root节点的方向,cid是叶子节点的方向\n # print('edge={}->{}'.format(cid, nid))\n\n _build(None, '0', tree_dict)\n dgl_graph = dgl.DGLGraph()\n\n dgl_graph.from_networkx(nx_graph, node_attrs=['x', 'y', 'mask'])\n assert len(tree_dict) == dgl_graph.number_of_nodes(), Exception('build dgl tree error')\n return dgl_graph\n\n\ndef tree2dgl(tree_dict, dictionary, DGLGraph_PAD_WORD=-1):\n \"\"\"\n if _subtoken == True, it means that we tokenize leaf node info into sub-tokens\n e.g. [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]]\n else, no tokenization. e.g. [\"sub_token\"]\n \"\"\"\n _subtoken = False\n for node in tree_dict.values():\n if isinstance(node['children'][1], list):\n _subtoken = True\n break\n\n def nonleaf_node_info():\n if _subtoken:\n return [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN\n else:\n return [DGLGraph_PAD_WORD]\n\n def token2idx(node_info):\n \"\"\"\n node info => indices\n if _subtoken == True, [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]] => index([\"sub\", \"token\", <PAD>, <PAD>, <PAD>])\n else, [\"sub_token\"] => index([\"sub_token\"])\n \"\"\"\n if _subtoken:\n return [dictionary.index(subtoken) for subtoken in node_info[-1]]\n else:\n return [dictionary.index(node_info[0])]\n\n \"\"\"\n how to build DGL graph?\n node: \n x: node info (if it's non-leaf nodes, padded with [-1, ...]),\n y: current node idx\n mask: if leaf node, mask=1; else, mask=0\n * if current node is the root node,\n edge: child => parent \n \"\"\"\n dgl_graph = dgl.DGLGraph()\n ids = sorted(tree_dict.keys(), key=int)\n\n dgl_graph.add_nodes(\n len(tree_dict),\n data={\n 'x': torch.LongTensor([\n token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \\\n else nonleaf_node_info()\n for idx in ids\n ]),\n 'y': torch.LongTensor(range(len(tree_dict))),\n 'mask': torch.LongTensor([isinstance(tree_dict[idx]['children'][1], list) for idx in ids]),\n }\n )\n\n for idx in ids:\n node = tree_dict[idx]\n if node['parent'] is not None:\n dgl_graph.add_edges(int(idx), int(node['parent']))\n # print('edge={}->{}'.format(int(idx), int(node['parent'])))\n\n return dgl_graph\n\n\ndef tree2nx2dgl(tree_dict, dictionary, DGLGraph_PAD_WORD=-1):\n \"\"\"\n if _subtoken == True, it means that we tokenize leaf node info into sub-tokens\n e.g. [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]]\n else, no tokenization. e.g. [\"sub_token\"]\n \"\"\"\n _subtoken = False\n for node in tree_dict.values():\n if isinstance(node['children'][1], list):\n _subtoken = True\n break\n\n def nonleaf_node_info():\n if _subtoken:\n return [DGLGraph_PAD_WORD] * MAX_SUB_TOKEN_LEN\n else:\n return [DGLGraph_PAD_WORD]\n\n def token2idx(node_info):\n \"\"\"\n node info => indices\n if _subtoken == True, [\"sub_token\", [\"sub\", \"token\", <PAD>, <PAD>, <PAD>]] => index([\"sub\", \"token\", <PAD>, <PAD>, <PAD>])\n else, [\"sub_token\"] => index([\"sub_token\"])\n \"\"\"\n if _subtoken:\n return [dictionary.index(subtoken) for subtoken in node_info[-1]]\n else:\n return [dictionary.index(node_info[0])]\n\n \"\"\"\n how to build DGL graph?\n node: \n x: node info (if it's non-leaf nodes, padded with [-1, ...]),\n y: current node idx\n mask: if leaf node, mask=1; else, mask=0\n * if current node is the root node,\n edge: child => parent \n \"\"\"\n\n nx_graph = nx.DiGraph()\n ids = sorted(tree_dict.keys(), key=int)\n\n for idx in ids:\n node = tree_dict[idx]\n\n nx_graph.add_node(\n int(idx),\n x=token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \\\n else nonleaf_node_info(),\n y=int(idx),\n mask=int(isinstance(tree_dict[idx]['children'][1], list))\n )\n # print('node={}, x={}, y={}, mask={}'.format(\n # idx, token2idx(tree_dict[idx]['children']) if isinstance(tree_dict[idx]['children'][1], list) \\\n # else nonleaf_node_info(), int(idx), int(isinstance(tree_dict[idx]['children'][1], list))))\n if node['parent'] is not None:\n nx_graph.add_edge(int(idx), int(node['parent']))\n # print('edge={}->{}'.format(int(idx), int(node['parent'])))\n\n dgl_graph = dgl.DGLGraph()\n\n dgl_graph.from_networkx(nx_graph, node_attrs=['x', 'y', 'mask'])\n assert len(tree_dict) == dgl_graph.number_of_nodes(), Exception('build dgl tree error')\n return dgl_graph\n\n\ndef pack_graph(graphs):\n def get_root_node_info(dgl_trees):\n root_indices, node_nums = [None] * len(dgl_trees), [None] * len(dgl_trees)\n for ind, tree in enumerate(dgl_trees):\n topological_nodes = dgl.topological_nodes_generator(tree)\n root_ind_tree_dgldigraph = topological_nodes[-1].item()\n root_indices[ind] = root_ind_tree_dgldigraph\n all_num_node_tree_dgldigraph = tree.number_of_nodes()\n node_nums[ind] = all_num_node_tree_dgldigraph\n root_indices = np.array(root_indices)\n num_nodes = np.array(node_nums)\n return root_indices, num_nodes,\n\n # merge many dgl graphs into a huge one\n root_indices, node_nums, = get_root_node_info(graphs)\n packed_graph = dgl.batch(graphs)\n return packed_graph, root_indices, node_nums,\n\n\nif __name__ == '__main__':\n from ncc.tasks.summarization import SummarizationTask\n\n dict = SummarizationTask.load_dictionary(\n filename='/home/yang/.ncc/multi/summarization/data-mmap/ruby/binary_ast.dict.json'\n )\n\n bin_ast = {\n \"0\": {\"type\": \"method\", \"parent\": None, \"children\": [1, 2]},\n \"1\": {\"type\": \"def_keyword\", \"parent\": 0, \"children\": [\"def\", [\"def\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"2\": {\"type\": \"TMP\", \"parent\": 0, \"children\": [3, 4]},\n \"3\": {\"type\": \"identifier\", \"parent\": 2, \"children\": [\"set\", [\"set\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"4\": {\"type\": \"TMP\", \"parent\": 2, \"children\": [5, 10]},\n \"5\": {\"type\": \"method_parameters\", \"parent\": 4, \"children\": [6, 7]},\n \"6\": {\"type\": \"LeftParenOp\", \"parent\": 5, \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"7\": {\"type\": \"TMP\", \"parent\": 5, \"children\": [8, 9]}, \"8\": {\"type\": \"identifier\", \"parent\": 7,\n \"children\": [\"set_attributes\",\n [\"set\", \"attributes\", \"<pad>\",\n \"<pad>\", \"<pad>\"]]},\n \"9\": {\"type\": \"LeftParenOp\", \"parent\": 7, \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"10\": {\"type\": \"TMP\", \"parent\": 4, \"children\": [11, 26]},\n \"11\": {\"type\": \"assignment\", \"parent\": 10, \"children\": [12, 13]},\n \"12\": {\"type\": \"identifier\", \"parent\": 11,\n \"children\": [\"old_attributes\", [\"old\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"13\": {\"type\": \"TMP\", \"parent\": 11, \"children\": [14, 15]},\n \"14\": {\"type\": \"AsgnOp\", \"parent\": 13, \"children\": [\"=\", [\"=\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"15\": {\"type\": \"method_call\", \"parent\": 13, \"children\": [16, 17]},\n \"16\": {\"type\": \"identifier\", \"parent\": 15,\n \"children\": [\"compute_attributes\", [\"compute\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"17\": {\"type\": \"argument_list\", \"parent\": 15, \"children\": [18, 19]},\n \"18\": {\"type\": \"LeftParenOp\", \"parent\": 17,\n \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"19\": {\"type\": \"TMP\", \"parent\": 17, \"children\": [20, 25]},\n \"20\": {\"type\": \"call\", \"parent\": 19, \"children\": [21, 22]},\n \"21\": {\"type\": \"identifier\", \"parent\": 20,\n \"children\": [\"set_attributes\", [\"set\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"22\": {\"type\": \"TMP\", \"parent\": 20, \"children\": [23, 24]},\n \"23\": {\"type\": \"DotOp\", \"parent\": 22, \"children\": [\".\", [\".\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"24\": {\"type\": \"identifier\", \"parent\": 22,\n \"children\": [\"keys\", [\"keys\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"25\": {\"type\": \"LeftParenOp\", \"parent\": 19,\n \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"26\": {\"type\": \"TMP\", \"parent\": 10, \"children\": [27, 34]},\n \"27\": {\"type\": \"method_call\", \"parent\": 26, \"children\": [28, 29]},\n \"28\": {\"type\": \"identifier\", \"parent\": 27,\n \"children\": [\"assign_attributes\", [\"assign\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"29\": {\"type\": \"argument_list\", \"parent\": 27, \"children\": [30, 31]},\n \"30\": {\"type\": \"LeftParenOp\", \"parent\": 29,\n \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"31\": {\"type\": \"TMP\", \"parent\": 29, \"children\": [32, 33]},\n \"32\": {\"type\": \"identifier\", \"parent\": 31,\n \"children\": [\"set_attributes\", [\"set\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"33\": {\"type\": \"LeftParenOp\", \"parent\": 31,\n \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"34\": {\"type\": \"TMP\", \"parent\": 26, \"children\": [35, 36]},\n \"35\": {\"type\": \"yield_keyword\", \"parent\": 34,\n \"children\": [\"yield\", [\"yield\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"36\": {\"type\": \"TMP\", \"parent\": 34, \"children\": [37, 46]},\n \"37\": {\"type\": \"ensure\", \"parent\": 36, \"children\": [38, 39]},\n \"38\": {\"type\": \"ensure_keyword\", \"parent\": 37,\n \"children\": [\"ensure\", [\"ensure\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"39\": {\"type\": \"method_call\", \"parent\": 37, \"children\": [40, 41]},\n \"40\": {\"type\": \"identifier\", \"parent\": 39,\n \"children\": [\"assign_attributes\", [\"assign\", \"attributes\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"41\": {\"type\": \"argument_list\", \"parent\": 39, \"children\": [42, 43]},\n \"42\": {\"type\": \"LeftParenOp\", \"parent\": 41,\n \"children\": [\"(\", [\"(\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"43\": {\"type\": \"TMP\", \"parent\": 41, \"children\": [44, 45]}, \"44\": {\"type\": \"identifier\", \"parent\": 43,\n \"children\": [\"old_attributes\",\n [\"old\", \"attributes\",\n \"<pad>\", \"<pad>\",\n \"<pad>\"]]},\n \"45\": {\"type\": \"LeftParenOp\", \"parent\": 43,\n \"children\": [\")\", [\")\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]},\n \"46\": {\"type\": \"end_keyword\", \"parent\": 36,\n \"children\": [\"end\", [\"end\", \"<pad>\", \"<pad>\", \"<pad>\", \"<pad>\"]]}}\n nx2dgl_graph = build_graph(bin_ast, dict)\n dgl_graph = tree2dgl(bin_ast, dict)\n dgl_graph", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom ncc.utils import utils\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom typing import Optional, List, Dict\n\n\nclass TransformerGenerator(object):\n def __init__(\n self,\n tgt_dict,\n beam_size=1,\n max_len_a=0,\n max_len_b=200,\n min_len=1,\n normalize_scores=True,\n len_penalty=1.,\n unk_penalty=0.,\n retain_dropout=False,\n temperature=1.,\n match_source_len=False,\n no_repeat_ngram_size=0,\n eos=None\n ):\n \"\"\"Generates translations of a given source sentence.\n\n Args:\n tgt_dict (~fairseq.data.Dictionary): target dictionary\n beam_size (int, optional): beam width (default: 1)\n max_len_a/b (int, optional): generate sequences of maximum length\n ax + b, where x is the source length\n min_len (int, optional): the minimum length of the generated output\n (not including end-of-sentence)\n normalize_scores (bool, optional): normalize scores by the length\n of the output (default: True)\n len_penalty (float, optional): length penalty, where <1.0 favors\n shorter, >1.0 favors longer sentences (default: 1.0)\n unk_penalty (float, optional): unknown word penalty, where <0\n produces more unks, >0 produces fewer (default: 0.0)\n retain_dropout (bool, optional): use dropout when generating\n (default: False)\n temperature (float, optional): temperature, where values\n >1.0 produce more uniform samples and values <1.0 produce\n sharper samples (default: 1.0)\n match_source_len (bool, optional): outputs should match the source\n length (default: False)\n \"\"\"\n self.pad = tgt_dict.pad()\n self.unk = tgt_dict.unk()\n self.bos = tgt_dict.bos()\n self.eos = tgt_dict.eos() if eos is None else eos\n self.vocab_size = len(tgt_dict)\n self.beam_size = beam_size\n # the max beam size is the dictionary size - 1, since we never select pad\n self.beam_size = min(beam_size, self.vocab_size - 1)\n self.max_len_a = max_len_a\n self.max_len_b = max_len_b\n self.min_len = min_len\n self.normalize_scores = normalize_scores\n self.len_penalty = len_penalty\n self.unk_penalty = unk_penalty\n self.retain_dropout = retain_dropout\n self.temperature = temperature\n self.match_source_len = match_source_len\n self.no_repeat_ngram_size = no_repeat_ngram_size\n assert temperature > 0, '--temperature must be greater than 0'\n\n @torch.no_grad()\n def generate(self, models, sample, **kwargs):\n \"\"\"Generate a batch of translations.\n\n Args:\n models (List[~fairseq.models.NccModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n \"\"\"\n model = models[0] # for ensemble expansion\n\n if not self.retain_dropout:\n model.eval()\n\n src_tokens = sample['net_input']['src_tokens']\n src_lengths = (src_tokens != self.pad).int().sum(-1)\n bsz, src_len = src_tokens.size()\n device = src_tokens.device\n\n if self.match_source_len:\n max_len = src_lengths.max().item()\n else:\n max_len = min(\n int(self.max_len_a * src_len + self.max_len_b),\n # exclude the EOS marker\n model.max_decoder_positions() - 1,\n )\n assert self.min_len <= max_len, 'min_len cannot be larger than max_len, please adjust these!'\n\n encoder_out = model.encoder(sample['net_input']['src_tokens'], src_lengths=sample['net_input']['src_lengths'])\n\n prev_output_tokens = torch.zeros(bsz, 1).long().fill_(self.bos).to(device)\n # prev_output_tokens = torch.zeros(bsz, 1).long().fill_(self.eos).to(device)\n\n dec_preds = []\n # 2. generate\n from collections import OrderedDict\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = OrderedDict()\n full_context_alignment: bool = False\n alignment_layer: Optional[int] = None\n alignment_heads: Optional[int] = None\n for j in range(max_len + 1):\n\n # incremental_state['step'] = j\n decoder_outputs, attns = model.decoder(prev_output_tokens, encoder_out=encoder_out, \\\n incremental_state=incremental_state)\n\n prediction = decoder_outputs.squeeze(1)\n prediction = prediction.log_softmax(dim=1)\n\n sample_max = True\n if sample_max:\n sample_logprobs, predicted = torch.max(prediction, dim=-1, keepdim=True)\n else:\n predicted = torch.multinomial(prediction, 1) # .to(device)\n dec_preds.append(predicted.squeeze(1).clone())\n prev_output_tokens = torch.cat((prev_output_tokens, predicted), dim=-1)\n\n dec_preds = torch.stack(dec_preds, dim=1)\n\n predictions = []\n for pred in dec_preds.tolist():\n predictions.append([{'tokens': torch.Tensor(pred).type_as(dec_preds)}])\n\n return predictions\n", "import math\nimport torch\n\n\ndef accuracy(similarity, topk=1, max_order=True):\n if not max_order:\n similarity = -similarity\n _, topk_ids = similarity.topk(topk, dim=-1)\n gt = similarity.new(similarity.size(0)).copy_(\n torch.arange(0, similarity.size(0))\n ).unsqueeze(dim=-1) # [[0], [1], ..., [B-1]]\n return (gt == topk_ids).sum(-1) / topk\n\n\ndef map(similarity, topk=1, max_order=True):\n \"\"\"\n MAP@k = mean(1/r_{i}), where i = {1, ..., k}.\n in 1-to-1 retrieval task, only 1 candidate is related.\n \"\"\"\n if not max_order:\n similarity = -similarity\n _, topk_ids = similarity.topk(topk, dim=-1)\n gt = similarity.new(similarity.size(0)).copy_(\n torch.arange(0, similarity.size(0))\n ).unsqueeze(dim=-1) # [[0], [1], ..., [B-1]]\n rank = similarity.new(similarity.size(0), topk).copy_(\n (1. / torch.range(1, topk)).expand(similarity.size(0), topk)\n )\n map_k = rank.masked_fill(topk_ids != gt, 0.).mean(dim=-1)\n return map_k\n\n\ndef mrr(similarity, max_order=True):\n if not max_order:\n similarity = -similarity\n gt = similarity.diag()\n ids = similarity >= gt.unsqueeze(dim=-1)\n mrr = 1. / ids.sum(dim=-1)\n return mrr\n\n\ndef ndcg(similarity, topk=1, max_order=True):\n \"\"\"\n NDCG@k = DCG / IDCG\n where\n DCG = sum_{i}^{k} (2^r_{i} - 1) / log_{2}(i + 1)\n IDCG = sum_{i}^{k} (2^sorted_r_{i} -1) * log2(i + 1)\n sorted_r = sort(r, descending)\n\n In code retrieval task, relativity between a and b is 1 or 0,\n and only ONE relativity is 1 and others are 0.\n Therefore, r_{i} = {0, 1} and sum(r) = 1.\n sorted_r_{0} = 1 and sorted_r_{j} = 0 where j > 0,\n IDCG = 1,\n\n \"\"\"\n if not max_order:\n similarity = -similarity\n _, topk_ids = similarity.topk(topk, dim=-1)\n gt = similarity.new(similarity.size(0)).int().copy_(\n torch.arange(0, similarity.size(0))\n ).unsqueeze(dim=-1) # [[0], [1], ..., [B-1]]\n rank = similarity.new(similarity.size(0), topk).copy_(\n (torch.range(1, topk)).expand(similarity.size(0), topk)\n )\n rank_mask = topk_ids == gt\n rank = rank.masked_fill(~rank_mask, 0.)\n ndcg = rank_mask.float() * math.log(2) / (rank + 1.).log()\n ndcg = ndcg.masked_fill(~rank_mask, 0.).sum(dim=-1)\n return ndcg\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ncc.modules.seq2seq.ncc_incremental_decoder import NccIncrementalDecoder\nfrom ncc.modules.embedding import Embedding\nfrom ncc.utils import utils\nfrom ncc.modules.adaptive_softmax import AdaptiveSoftmax\nfrom ncc.modules.common.layers import (\n Linear, LSTMCell\n)\nfrom ncc.data.constants import DEFAULT_MAX_TARGET_POSITIONS\n\n\nclass AttentionLayer(nn.Module):\n def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False):\n super().__init__()\n self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)\n self.output_proj = Linear(input_embed_dim + source_embed_dim, output_embed_dim, bias=bias)\n\n def forward(self, input, source_hids, encoder_padding_mask):\n # input: bsz x input_embed_dim\n # source_hids: srclen x bsz x source_embed_dim\n\n # x: bsz x source_embed_dim\n x = self.input_proj(input)\n\n # compute attention\n attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)\n\n # don't attend over padding\n if encoder_padding_mask is not None:\n attn_scores = attn_scores.float().masked_fill_(\n encoder_padding_mask,\n float('-inf')\n ).type_as(attn_scores) # FP16 support: cast to float and back\n\n attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz\n\n # sum weighted sources\n x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)\n\n x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1)))\n return x, attn_scores\n\n\nclass LSTMDecoder(NccIncrementalDecoder):\n \"\"\"LSTM decoder.\"\"\"\n\n def __init__(\n self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512,\n num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True,\n encoder_output_units=512, pretrained_embed=None,\n share_input_output_embed=False, adaptive_softmax_cutoff=None,\n max_target_positions=DEFAULT_MAX_TARGET_POSITIONS\n ):\n super().__init__(dictionary)\n self.dropout_in = dropout_in\n self.dropout_out = dropout_out\n self.hidden_size = hidden_size\n self.share_input_output_embed = share_input_output_embed\n self.need_attn = True\n self.max_target_positions = max_target_positions\n\n self.adaptive_softmax = None\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n if pretrained_embed is None:\n self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)\n else:\n self.embed_tokens = pretrained_embed\n\n self.encoder_output_units = encoder_output_units\n if encoder_output_units != hidden_size and encoder_output_units != 0:\n self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size)\n self.encoder_cell_proj = Linear(encoder_output_units, hidden_size)\n else:\n self.encoder_hidden_proj = self.encoder_cell_proj = None\n\n # disable input feeding if there is no encoder\n # input feeding is described in arxiv.org/abs/1508.04025\n input_feed_size = 0 if encoder_output_units == 0 else hidden_size\n self.layers = nn.ModuleList([\n LSTMCell(\n input_size=input_feed_size + embed_dim if layer == 0 else hidden_size,\n hidden_size=hidden_size,\n )\n for layer in range(num_layers)\n ])\n if attention:\n # TODO make bias configurable\n self.attention = AttentionLayer(hidden_size, encoder_output_units, hidden_size, bias=False)\n else:\n self.attention = None\n if hidden_size != out_embed_dim:\n self.additional_fc = Linear(hidden_size, out_embed_dim)\n if adaptive_softmax_cutoff is not None:\n # setting adaptive_softmax dropout to dropout_out for now but can be redefined\n self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, hidden_size, adaptive_softmax_cutoff,\n dropout=dropout_out)\n elif self.share_input_output_embed:\n self.fc_out = Linear(out_embed_dim, num_embeddings)\n self.fc_out.weight = self.embed_tokens.weight\n elif not self.share_input_output_embed:\n self.fc_out = Linear(out_embed_dim, num_embeddings)\n\n def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):\n x, attn_scores = self.extract_features(\n prev_output_tokens, encoder_out, incremental_state\n )\n return self.output_layer(x), attn_scores\n\n def extract_features(self, prev_output_tokens, encoder_out, incremental_state=None):\n \"\"\"\n Similar to *forward* but only return features.\n \"\"\"\n if encoder_out is not None:\n encoder_padding_mask = encoder_out['encoder_padding_mask']\n encoder_out = encoder_out['encoder_out']\n else:\n encoder_padding_mask = None\n encoder_out = None\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n bsz, seqlen = prev_output_tokens.size()\n\n # get outputs from encoder\n if encoder_out is not None:\n encoder_outs, encoder_hiddens, encoder_cells = encoder_out[:3]\n srclen = encoder_outs.size(0)\n else:\n srclen = None\n\n # embed tokens\n x = self.embed_tokens(prev_output_tokens)\n x = F.dropout(x, p=self.dropout_in, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # initialize previous states (or get from cache during incremental generation)\n cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')\n if cached_state is not None:\n prev_hiddens, prev_cells, input_feed = cached_state\n elif encoder_out is not None:\n # setup recurrent cells\n num_layers = len(self.layers)\n prev_hiddens = [encoder_hiddens[i] for i in range(num_layers)]\n prev_cells = [encoder_cells[i] for i in range(num_layers)]\n if self.encoder_hidden_proj is not None:\n prev_hiddens = [self.encoder_hidden_proj(x) for x in prev_hiddens]\n prev_cells = [self.encoder_cell_proj(x) for x in prev_cells]\n input_feed = x.new_zeros(bsz, self.hidden_size)\n else:\n # setup zero cells, since there is no encoder\n num_layers = len(self.layers)\n zero_state = x.new_zeros(bsz, self.hidden_size)\n prev_hiddens = [zero_state for i in range(num_layers)]\n prev_cells = [zero_state for i in range(num_layers)]\n input_feed = None\n\n assert srclen is not None or self.attention is None, \\\n \"attention is not supported if there are no encoder outputs\"\n attn_scores = x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None\n outs = []\n for j in range(seqlen):\n # input feeding: concatenate context vector from previous time step\n if input_feed is not None:\n input = torch.cat((x[j, :, :], input_feed), dim=1)\n else:\n input = x[j]\n\n for i, rnn in enumerate(self.layers):\n # recurrent cell\n hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))\n\n # hidden state becomes the input to the next layer\n input = F.dropout(hidden, p=self.dropout_out, training=self.training)\n\n # save state for next time step\n prev_hiddens[i] = hidden\n prev_cells[i] = cell\n\n # apply attention using the last layer's hidden state\n if self.attention is not None:\n out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs, encoder_padding_mask)\n else:\n out = hidden\n out = F.dropout(out, p=self.dropout_out, training=self.training) # 16x512\n\n # input feeding\n if input_feed is not None:\n input_feed = out\n\n # save final output\n outs.append(out)\n\n # cache previous states (no-op except during incremental generation)\n utils.set_incremental_state(\n self, incremental_state, 'cached_state',\n (prev_hiddens, prev_cells, input_feed),\n )\n\n # collect outputs across time steps\n x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)\n\n # T x B x C -> B x T x C\n x = x.transpose(1, 0)\n\n if hasattr(self, 'additional_fc') and self.adaptive_softmax is None:\n x = self.additional_fc(x)\n x = F.dropout(x, p=self.dropout_out, training=self.training)\n\n # srclen x tgtlen x bsz -> bsz x tgtlen x srclen\n if not self.training and self.need_attn and self.attention is not None:\n attn_scores = attn_scores.transpose(0, 2)\n else:\n attn_scores = None\n return x, attn_scores\n\n def output_layer(self, x):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n if self.share_input_output_embed:\n x = F.linear(x, self.embed_tokens.weight)\n else:\n x = self.fc_out(x)\n return x\n\n def reorder_incremental_state(self, incremental_state, new_order):\n super().reorder_incremental_state(incremental_state, new_order)\n cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')\n if cached_state is None:\n return\n\n def reorder_state(state):\n if isinstance(state, list):\n return [reorder_state(state_i) for state_i in state]\n elif state is not None:\n return state.index_select(0, new_order)\n else:\n return None\n\n new_state = tuple(map(reorder_state, cached_state))\n utils.set_incremental_state(self, incremental_state, 'cached_state', new_state)\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n return self.max_target_positions\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom ncc.modules.seq2seq.ncc_incremental_decoder import NccIncrementalDecoder\n\n\nclass TypePredictor(object):\n def __init__(\n self,\n retain_dropout=False,\n ):\n \"\"\"Generates translations of a given source sentence.\n\n Args:\n retain_dropout (bool, optional): use dropout when generating\n (default: False)\n \"\"\"\n self.retain_dropout = retain_dropout\n\n @torch.no_grad()\n def predict(self, models, sample, **kwargs):\n \"\"\"Generate a batch of translations.\n\n Args:\n models (List[~fairseq.models.NccModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n \"\"\"\n model = EnsembleModel(models)\n return self._predict(model, sample, **kwargs)\n\n @torch.no_grad()\n def _predict(\n self,\n model,\n sample,\n **kwargs\n ):\n if not self.retain_dropout:\n model.eval()\n\n net_output = model(**sample['net_input'])\n return net_output\n\n\nclass EnsembleModel(torch.nn.Module):\n \"\"\"A wrapper around an ensemble of models.\"\"\"\n\n def __init__(self, models):\n super().__init__()\n self.models = torch.nn.ModuleList(models)\n self.incremental_states = None\n if all(hasattr(m, 'decoder') and isinstance(m.decoder, NccIncrementalDecoder) for m in models):\n self.incremental_states = {m: {} for m in models}\n\n @torch.no_grad()\n def forward(self, src_tokens, **kwargs):\n \"\"\"\n Run the forward pass for a decoder-only model.\n\n Feeds a batch of tokens through the decoder to predict the next tokens.\n\n Args:\n src_tokens (LongTensor): tokens on which to condition the decoder,\n of shape `(batch, tgt_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, seq_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n if len(self.models) == 1:\n return self.models[0](src_tokens, **kwargs)\n for model in zip(self.models):\n pass\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nRoBERTa: A Robustly Optimized BERT Pretraining Approach.\n\"\"\"\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ncc.utils import utils\nfrom ncc.models import (\n NccLanguageModel,\n register_model,\n # register_model_architecture,\n)\nfrom ncc.modules.seq2seq.ncc_decoder import NccDecoder\nfrom ncc.modules.roberta.layer_norm import LayerNorm\nfrom ncc.modules.codebert.unilm_transformer_sentence_encoder import init_bert_params\nfrom ncc.modules.codebert.unilm_transformer_sentence_encoder import UnilmTransformerSentenceEncoder\n# from ncc.models.hub_interface import RobertaHubInterface\nfrom ncc import LOGGER\n\nfrom ncc.data.constants import INF\n\n\n@register_model('code_docstring_unilm')\nclass CodeDocstringUnilmModel(NccLanguageModel):\n\n @classmethod\n def hub_models(cls):\n return {\n 'roberta.base': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz',\n 'roberta.large': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz',\n 'roberta.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz',\n 'roberta.large.wsc': 'http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz',\n }\n\n def __init__(self, args, encoder):\n super().__init__(encoder)\n self.args = args\n\n # We follow BERT's random weight initialization\n self.apply(init_bert_params)\n\n self.classification_heads = nn.ModuleDict()\n\n @classmethod\n def build_model(cls, args, config, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present\n # base_architecture(args)\n\n # if not hasattr(args, 'max_positions'):\n if 'max_positions' not in args['model']:\n args['model']['max_positions'] = args['task']['tokens_per_sample']\n\n encoder = RobertaEncoder(args, task.source_dictionary)\n return cls(args, encoder)\n\n def forward_(self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None,\n **kwargs):\n if classification_head_name is not None:\n features_only = True\n\n x, extra = self.decoder(src_tokens, features_only, return_all_hiddens, **kwargs)\n\n if classification_head_name is not None:\n x = self.classification_heads[classification_head_name](x)\n return x, extra\n\n def forward(self, src_tokens, segment_labels, attention_mask_unilm, mask_qkv=None, **kwargs):\n x, extra = self.decoder(src_tokens, segment_labels, attention_mask_unilm,\n output_all_encoded_layers=False, mask_qkv=mask_qkv, **kwargs)\n\n return x, extra\n\n def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):\n \"\"\"Register a classification head.\"\"\"\n if name in self.classification_heads:\n prev_num_classes = self.classification_heads[name].out_proj.out_features\n prev_inner_dim = self.classification_heads[name].dense.out_features\n if num_classes != prev_num_classes or inner_dim != prev_inner_dim:\n LOGGER.warning(\n 're-registering head \"{}\" with num_classes {} (prev: {}) '\n 'and inner_dim {} (prev: {})'.format(\n name, num_classes, prev_num_classes, inner_dim, prev_inner_dim\n )\n )\n self.classification_heads[name] = RobertaClassificationHead(\n self.args['model']['encoder_embed_dim'],\n inner_dim or self.args['model']['encoder_embed_dim'],\n num_classes,\n self.args['task']['pooler_activation_fn'],\n self.args['model']['pooler_dropout'],\n )\n\n @property\n def supported_targets(self):\n return {'self'}\n\n # @classmethod\n # def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2',\n # **kwargs):\n # from ncc.utils import hub_utils\n # x = hub_utils.from_pretrained(\n # model_name_or_path,\n # checkpoint_file,\n # data_name_or_path,\n # archive_map=cls.hub_models(),\n # bpe=bpe,\n # load_checkpoint_heads=True,\n # **kwargs,\n # )\n # return RobertaHubInterface(x['args'], x['task'], x['models'][0])\n\n def upgrade_state_dict_named(self, state_dict, name):\n super().upgrade_state_dict_named(state_dict, name)\n\n prefix = name + '.' if name != '' else ''\n current_head_names = [] if not hasattr(self, 'classification_heads') else \\\n self.classification_heads.keys()\n\n # Handle new classification heads present in the state dict.\n keys_to_delete = []\n for k in state_dict.keys():\n if not k.startswith(prefix + 'classification_heads.'):\n continue\n\n head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]\n num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)\n inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)\n\n # if getattr(self.args, 'load_checkpoint_heads', False):\n if 'load_checkpoint_heads' in self.args['model']:\n if head_name not in current_head_names:\n self.register_classification_head(head_name, num_classes, inner_dim)\n else:\n if head_name not in current_head_names:\n LOGGER.warning(\n 'deleting classification head ({}) from checkpoint '\n 'not present in current model: {}'.format(head_name, k)\n )\n keys_to_delete.append(k)\n elif (\n num_classes != self.classification_heads[head_name].out_proj.out_features\n or inner_dim != self.classification_heads[head_name].dense.out_features\n ):\n LOGGER.warning(\n 'deleting classification head ({}) from checkpoint '\n 'with different dimensions than current model: {}'.format(head_name, k)\n )\n keys_to_delete.append(k)\n for k in keys_to_delete:\n del state_dict[k]\n\n # Copy any newly-added classification heads into the state dict\n # with their current weights.\n if hasattr(self, 'classification_heads'):\n cur_state = self.classification_heads.state_dict()\n for k, v in cur_state.items():\n if prefix + 'classification_heads.' + k not in state_dict:\n LOGGER.info('Overwriting ' + prefix + 'classification_heads.' + k)\n state_dict[prefix + 'classification_heads.' + k] = v\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Head for masked language modeling.\"\"\"\n\n def __init__(self, embed_dim, output_dim, activation_fn, weight=None):\n super().__init__()\n self.dense = nn.Linear(embed_dim, embed_dim)\n self.activation_fn = utils.get_activation_fn(activation_fn)\n self.layer_norm = LayerNorm(embed_dim)\n\n if weight is None:\n weight = nn.Linear(embed_dim, output_dim, bias=False).weight\n self.weight = weight\n self.bias = nn.Parameter(torch.zeros(output_dim))\n\n def forward(self, features, masked_pos=None, **kwargs):\n # Only project the unmasked tokens while training,\n # saves both memory and computation\n # [[12,30, 1,1,1], ...]\n # features=[b, len, C] -> [b, max_b, C]\n if masked_pos is not None:\n features = torch.gather(features, 1, masked_pos.unsqueeze(2).expand(-1, -1, features.size(-1)))\n\n x = self.dense(features)\n x = self.activation_fn(x)\n x = self.layer_norm(x)\n # project back to size of vocabulary with bias\n x = F.linear(x, self.weight) + self.bias\n return x\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.activation_fn = utils.get_activation_fn(activation_fn)\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = self.activation_fn(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\nclass RobertaEncoder(NccDecoder):\n \"\"\"RoBERTa encoder.\n\n Implements the :class:`~fairseq.models.NccDecoder` interface required\n by :class:`~fairseq.models.NccLanguageModel`.\n \"\"\"\n\n def __init__(self, args, dictionary):\n super().__init__(dictionary)\n self.args = args\n\n # RoBERTa is a sentence encoder model, so users will intuitively trim\n # encoder layers. However, the implementation uses the fairseq decoder,\n # so we fix here.\n if args['model']['encoder_layers_to_keep']:\n args['model']['encoder_layers'] = len(args['model']['encoder_layers_to_keep'].split(\",\"))\n args['model']['decoder_layers_to_keep'] = args['model']['encoder_layers_to_keep']\n args['model']['encoder_layers_to_keep'] = None\n\n # def __init__(\n # self,\n # padding_idx: int,\n # vocab_size: int,\n # num_encoder_layers: int = 6,\n # embedding_dim: int = 768,\n # ffn_embedding_dim: int = 3072,\n # num_attention_heads: int = 8,\n # dropout: float = 0.1,\n # attention_dropout: float = 0.1,\n # activation_dropout: float = 0.1,\n # layerdrop: float = 0.0,\n # max_seq_len: int = 256,\n # num_segments: int = 2,\n # use_position_embeddings: bool = True,\n # offset_positions_by_padding: bool = True,\n # encoder_normalize_before: bool = False,\n # apply_bert_init: bool = False,\n # activation_fn: str = \"relu\",\n # learned_pos_embedding: bool = True,\n # add_bias_kv: bool = False,\n # add_zero_attn: bool = False,\n # embed_scale: float = None,\n # freeze_embeddings: bool = False,\n # n_trans_layers_to_freeze: int = 0,\n # export: bool = False,\n # traceable: bool = False,\n # ) -> None:\n #\n self.sentence_encoder = UnilmTransformerSentenceEncoder(\n padding_idx=dictionary.pad(),\n vocab_size=len(dictionary),\n num_encoder_layers=args['model']['encoder_layers'],\n embedding_dim=args['model']['encoder_embed_dim'],\n ffn_embedding_dim=args['model']['encoder_ffn_embed_dim'],\n num_attention_heads=args['model']['encoder_attention_heads'],\n dropout=args['model']['dropout'],\n attention_dropout=args['model']['attention_dropout'],\n activation_dropout=args['model']['activation_dropout'],\n layerdrop=args['model']['encoder_layerdrop'],\n max_seq_len=args['model']['max_positions'],\n num_segments=10, # avoid\n encoder_normalize_before=True,\n apply_bert_init=True,\n activation_fn=args['model']['activation_fn'],\n )\n self.lm_head = RobertaLMHead(\n embed_dim=args['model']['encoder_embed_dim'],\n output_dim=len(dictionary),\n activation_fn=args['model']['activation_fn'],\n weight=self.sentence_encoder.embed_tokens.weight,\n )\n\n def forward(self, src_tokens, segment_labels, attention_mask,\n features_only=False, return_all_hiddens=False, **kwargs):\n \"\"\"\n Args:\n src_tokens (LongTensor): input tokens of shape `(batch, src_len)`\n features_only (bool, optional): skip LM head and just return\n features. If True, the output will be of shape\n `(batch, src_len, embed_dim)`.\n return_all_hiddens (bool, optional): also return all of the\n intermediate hidden states (default: False).\n\n Returns:\n tuple:\n - the LM output of shape `(batch, src_len, vocab)`\n - a dictionary of additional data, where 'inner_states'\n is a list of hidden states. Note that the hidden\n states have shape `(src_len, batch, vocab)`.\n \"\"\"\n x, extra = self.extract_features(src_tokens, segment_labels, attention_mask,\n return_all_hiddens=return_all_hiddens)\n if not features_only:\n x = self.output_layer(x, masked_pos=kwargs['masked_pos'])\n return x, extra\n\n def extract_features(self, src_tokens, segment_labels, attention_mask, return_all_hiddens=False, **unused):\n inner_states, _ = self.sentence_encoder(\n src_tokens, segment_labels, attention_mask,\n last_state_only=not return_all_hiddens,\n )\n features = inner_states[-1]\n return features, {'inner_states': inner_states if return_all_hiddens else None}\n\n def output_layer(self, features, masked_pos=None, **unused):\n return self.lm_head(features, masked_pos)\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the encoder.\"\"\"\n return self.args['model']['max_positions']\n", "# -*- coding: utf-8 -*-\n\nimport json\nimport os\nfrom functools import lru_cache\n\nimport numpy as np\nimport torch\n\nfrom ncc import (\n tokenizers,\n LOGGER,\n)\nfrom ncc.data import (\n indexed_dataset,\n)\nfrom ncc.data.dictionary import Dictionary\nfrom ncc.data.ncc_dataset import NccDataset\nfrom ncc.data.summarization.language_pair_dataset import LanguagePairDataset\nfrom ncc.data.wrappers.append_token_dataset import AppendTokenDataset\nfrom ncc.data.wrappers.portion_dataset import PortionDataset\nfrom ncc.data.wrappers.prepend_token_dataset import PrependTokenDataset\nfrom ncc.data.wrappers.truncate_dataset import TruncateDataset\nfrom ncc.eval.summarization import summarization_metrics\nfrom ncc.tasks import register_task\nfrom ncc.tasks.ncc_task import NccTask\nfrom ncc.tokenizers import tokenization\nfrom ncc.utils import utils\nfrom ncc.utils.logging import metrics\n\nEVAL_BLEU_ORDER = 4\n\n\nclass IndexedRawTextDataset(NccDataset):\n \"\"\"Takes a text file as input and binarizes it in memory at instantiation.\n Original lines are also kept in memory\"\"\"\n\n def __init__(self, path, dictionary, append_eos=True, reverse_order=False):\n self.tokens_list = []\n self.lines = []\n self.sizes = []\n self.append_eos = append_eos\n self.reverse_order = reverse_order\n self.read_data(path, dictionary)\n self.size = len(self.tokens_list)\n\n def read_data(self, path, dictionary):\n with open(path, 'r', encoding='utf-8') as f:\n for line in f:\n self.lines.append(line.strip('\\n'))\n tokens = dictionary.encode_line(\n line, tokenization._space_tokenizer, add_if_not_exist=False,\n append_eos=self.append_eos, reverse_order=self.reverse_order,\n ).long()\n self.tokens_list.append(tokens)\n self.sizes.append(len(tokens))\n self.sizes = np.array(self.sizes)\n\n def check_index(self, i):\n if i < 0 or i >= self.size:\n raise IndexError('index out of range')\n\n @lru_cache(maxsize=8)\n def __getitem__(self, i):\n self.check_index(i)\n return self.tokens_list[i]\n\n def get_original_text(self, i):\n self.check_index(i)\n return self.lines[i]\n\n def __del__(self):\n pass\n\n def __len__(self):\n return self.size\n\n def num_tokens(self, index):\n return self.sizes[index]\n\n def size(self, index):\n return self.sizes[index]\n\n @staticmethod\n def exists(path):\n return os.path.exists(path)\n\n\ndef _load_dataset(path, impl, dict):\n if impl == 'raw':\n src_dataset = IndexedRawTextDataset(path=path, dictionary=dict)\n elif impl == 'mmap':\n # mmap dataset has been numberized, no need for dict\n src_dataset = indexed_dataset.MMapIndexedDataset(path=path)\n else:\n raise NotImplementedError(\"No such {} dataset implementation.\".format(impl))\n return src_dataset\n\n\ndef load_langpair_dataset(\n data_path, split,\n src, src_dict,\n tgt, tgt_dict,\n dataset_impl,\n # combine, dataset_impl, upsample_primary,\n left_pad_source, left_pad_target,\n max_source_positions, max_target_positions,\n prepend_bos=False, load_alignments=False,\n truncate_source=False, append_source_id=False,\n truncate_target=False,\n append_eos_to_target=False,\n portion=None,\n):\n # load source dataset\n src_path = os.path.join(data_path, '{}.{}'.format(split, src))\n src_dataset = _load_dataset(path=src_path, impl=dataset_impl, dict=src_dict)\n\n if truncate_source:\n LOGGER.info('truncate {}.{} to {}'.format(split, src, max_source_positions))\n src_dataset = TruncateDataset(src_dataset, max_source_positions)\n\n if portion is not None and split == 'train':\n LOGGER.info('set {}.{} portion to {}'.format(split, src, portion))\n src_dataset = PortionDataset(src_dataset, portion)\n\n # load target dataset\n tgt_path = os.path.join(data_path, '{}.{}'.format(split, tgt))\n tgt_dataset = _load_dataset(path=tgt_path, impl=dataset_impl, dict=tgt_dict)\n if truncate_target:\n LOGGER.info('truncate {}.{} to {}'.format(split, tgt, max_target_positions))\n tgt_dataset = TruncateDataset(tgt_dataset, max_target_positions)\n\n if prepend_bos:\n assert hasattr(src_dict, \"bos_index\") and hasattr(tgt_dict, \"bos_index\")\n src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())\n if tgt_dataset is not None:\n tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())\n\n eos = None\n if append_source_id:\n src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))\n if tgt_dataset is not None:\n tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))\n eos = tgt_dict.index('[{}]'.format(tgt))\n\n if portion is not None and split == 'train':\n LOGGER.info('set {}.{} portion to {}'.format(split, tgt, portion))\n tgt_dataset = PortionDataset(tgt_dataset, portion)\n\n # align_dataset = None\n # if load_alignments:\n # align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))\n # if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):\n # align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)\n\n tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None\n\n LOGGER.info('loaded {} examples from: {}'.format(len(src_dataset), src_path))\n LOGGER.info('loaded {} examples from: {}'.format(len(tgt_dataset), tgt_path))\n return LanguagePairDataset(\n src_dataset, src_dataset.sizes, src_dict,\n tgt_dataset, tgt_dataset_sizes, tgt_dict,\n left_pad_source=left_pad_source,\n left_pad_target=left_pad_target,\n max_source_positions=max_source_positions,\n max_target_positions=max_target_positions,\n align_dataset=None, eos=eos,\n remove_eos_from_source=True,\n append_eos_to_target=append_eos_to_target,\n shuffle=(split == 'train'),\n )\n\n\n@register_task('summarization')\nclass SummarizationTask(NccTask):\n \"\"\"\n This task`SummarizationTask` will handle file as follows:\n 1) truncate source/target sentence\n 2) append eos for target sentence for offset\n 3) move eos of target sentence to the head of it, e.g.\n decoder input: a b c\n ground truth: <eos> a b c\n \"\"\"\n\n def __init__(self, args, src_dict, tgt_dict):\n super().__init__(args)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n paths = utils.split_paths(args['task']['data'])\n assert len(paths) > 0\n\n dict = args['task'].get('dict', None)\n dict_type = args['task'].get('dict_type', None)\n if dict is None and dict_type is None:\n # load dictionaries\n src_dict = cls.load_dictionary(os.path.join(paths[0], '{}.dict.jsonl'.format(args['task']['source_lang'])))\n tgt_dict = cls.load_dictionary(os.path.join(paths[0], '{}.dict.jsonl'.format(args['task']['target_lang'])))\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n LOGGER.info('[{}] dictionary: {} types'.format(args['task']['source_lang'], len(src_dict)))\n LOGGER.info('[{}] dictionary: {} types'.format(args['task']['target_lang'], len(tgt_dict)))\n else:\n raise NotImplementedError\n return cls(args, src_dict, tgt_dict)\n\n @classmethod\n def build_dictionary(\n cls, filenames, tokenize_func,\n workers=1, threshold=-1, nwords=-1, padding_factor=8,\n **special_symbols,\n ):\n \"\"\"Build the dictionary\n\n Args:\n filenames (list): list of filenames\n workers (int): number of concurrent workers\n threshold (int): defines the minimum word count\n nwords (int): defines the total number of words in the final dictionary,\n including special symbols\n padding_factor (int): can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n from ncc.data import constants\n d = Dictionary(\n pad=special_symbols.get('pad', constants.PAD),\n bos=special_symbols.get('bos', constants.BOS),\n eos=special_symbols.get('eos', constants.EOS),\n unk=special_symbols.get('unk', constants.UNK),\n extra_special_symbols=special_symbols.get('extra_special_symbols', None),\n )\n\n for filename in filenames:\n Dictionary.add_token_to_dictionary(\n filename, d, tokenize_func, workers\n )\n\n d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)\n return d\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n paths = utils.split_paths(self.args['task']['data'])\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n src, tgt = self.args['task']['source_lang'], self.args['task']['target_lang']\n\n self.datasets[split] = load_langpair_dataset(\n data_path, split, src, self.src_dict, tgt, self.tgt_dict,\n dataset_impl=self.args['dataset']['dataset_impl'],\n left_pad_source=self.args['task']['left_pad_source'],\n left_pad_target=self.args['task']['left_pad_target'],\n max_source_positions=self.args['task']['max_source_positions'],\n max_target_positions=self.args['task']['max_target_positions'],\n load_alignments=self.args['task']['load_alignments'],\n truncate_source=self.args['task']['truncate_source'],\n truncate_target=self.args['task']['truncate_target'],\n append_eos_to_target=self.args['task']['append_eos_to_target'],\n portion=self.args['dataset'].get('portion', None),\n )\n\n def build_dataset_for_inference(self, src_tokens, src_lengths):\n return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)\n\n def build_model(self, args):\n model = super().build_model(args)\n if args['task']['eval_bleu']:\n assert args['task']['eval_bleu_detok'] is not None, (\n '--eval-bleu-detok is required if using --eval-bleu; '\n 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '\n 'to disable detokenization, e.g., when using sentencepiece)'\n )\n # detok_args = args['task']['eval_bleu_detok_args'] if args['task']['eval_bleu_detok_args'] else '{}'\n # if args['bpe'] is not None:\n # self.tokenizer = tokenizers.build_bpe(\n # dict(bpe=args['task'].get('eval_bleu_detok', '{}'), **detok_args)\n # )\n # else:\n # self.tokenizer = tokenizers.build_tokenizer(\n # dict(tokenizer=args['task'].get('eval_bleu_detok', '{}'), **detok_args)\n # )\n detok_args = json.loads(\n args['task']['eval_bleu_detok_args'] if args['task']['eval_bleu_detok_args'] else '{}'\n )\n self.tokenizer = tokenizers.build_tokenizer(\n dict(tokenizer=args['task'].get('eval_bleu_detok', '{}'), **detok_args)\n )\n self.sequence_generator = self.build_generator([model], args)\n return model\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False\n ):\n \"\"\"\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.NccDataset`.\n model (~fairseq.models.BaseNccModel): the model\n criterion (~fairseq.criterions.NccCriterion): the criterion\n optimizer (~fairseq.optim.NccOptimizer): the optimizer\n update_num (int): the current update\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n \"\"\"\n model.train()\n model.set_num_updates(update_num)\n loss, sample_size, logging_output = criterion(model, sample)\n if ignore_grad:\n loss *= 0\n optimizer.backward(loss)\n return loss, sample_size, logging_output\n\n def valid_step(self, sample, model, criterion):\n loss, sample_size, logging_output = super().valid_step(sample, model, criterion)\n\n def decode(toks, escape_unk=False, trunc_eos=True):\n s = self.tgt_dict.string(\n toks.int().cpu(),\n self.args['task']['eval_bleu_remove_bpe'],\n escape_unk=escape_unk,\n trunc_eos=trunc_eos,\n )\n if self.tokenizer:\n s = self.tokenizer.decode(s)\n if len(s) == 0:\n s = '0' # if predict sentence is null, use '0'\n return s\n\n if self.args['task']['eval_bleu']:\n gen_out = self.inference_step(self.sequence_generator, [model], sample)\n ids = sample['id'].tolist()\n hyps, refs = [], []\n for i in range(len(gen_out)):\n hyps.append(decode(gen_out[i][0]['tokens']))\n refs.append(decode(\n utils.strip_pad(sample['target'][i], self.tgt_dict.pad()),\n escape_unk=True, # don't count <unk> as matches to the hypo\n ))\n if self.args['task']['eval_with_sacrebleu']:\n import sacrebleu\n tokenize = sacrebleu.DEFAULT_TOKENIZER if not self.args['task']['eval_tokenized_bleu'] else 'none'\n bleu = sacrebleu.corpus_bleu(hyps, [refs], tokenize=tokenize)\n logging_output['_bleu_sys_len'] = bleu.sys_len\n logging_output['_bleu_ref_len'] = bleu.ref_len\n # we split counts into separate entries so that they can be\n # summed efficiently across workers using fast-stat-sync\n assert len(bleu.counts) == EVAL_BLEU_ORDER\n for i in range(EVAL_BLEU_ORDER):\n logging_output['_bleu_counts_' + str(i)] = bleu.counts[i]\n logging_output['_bleu_totals_' + str(i)] = bleu.totals[i]\n else:\n bleu, rouge_l, meteor = self._inference_score(hyps, refs, ids)\n logging_output['bleu'] = round(bleu, 4)\n logging_output['rouge_l'] = round(rouge_l, 4)\n logging_output['meteor'] = round(meteor, 4)\n return loss, sample_size, logging_output\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n if self.args['task']['eval_bleu']:\n\n if self.args['task']['eval_with_sacrebleu']:\n def sum_logs(key):\n import torch\n result = sum(log.get(key, 0) for log in logging_outputs)\n if torch.is_tensor(result):\n result = result.cpu()\n return result\n\n counts, totals = [], []\n for i in range(EVAL_BLEU_ORDER):\n counts.append(sum_logs('_bleu_counts_' + str(i)))\n totals.append(sum_logs('_bleu_totals_' + str(i)))\n\n if max(totals) > 0:\n # log counts as numpy arrays -- log_scalar will sum them correctly\n metrics.log_scalar('_bleu_counts', np.array(counts))\n metrics.log_scalar('_bleu_totals', np.array(totals))\n metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))\n metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))\n\n def compute_bleu(meters):\n import inspect\n import sacrebleu\n fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]\n if 'smooth_method' in fn_sig:\n smooth = {'smooth_method': 'exp'}\n else:\n smooth = {'smooth': 'exp'}\n bleu = sacrebleu.compute_bleu(\n correct=meters['_bleu_counts'].sum,\n total=meters['_bleu_totals'].sum,\n sys_len=meters['_bleu_sys_len'].sum,\n ref_len=meters['_bleu_ref_len'].sum,\n **smooth\n )\n return round(bleu.score, 6)\n\n metrics.log_derived('bleu', compute_bleu)\n else:\n\n def sum_logs(key):\n return sum(log.get(key, 0) for log in logging_outputs)\n\n metrics.log_scalar('bleu', sum_logs('bleu'), round=6)\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.args['task']['max_source_positions'], self.args['task']['max_target_positions'])\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict\n\n def _inference_score(self, hyps, refs, ids):\n hypotheses, references = dict(), dict()\n\n for key, pred, tgt in zip(ids, hyps, refs):\n hypotheses[key] = [pred]\n references[key] = tgt if isinstance(tgt, list) else [tgt]\n\n bleu, rouge_l, meteor = summarization_metrics.eval_accuracies(hypotheses, references)\n\n return bleu, rouge_l, meteor\n\n def encode_input(self, input, tokenizer=None):\n if tokenizer is not None:\n input = ''.join(char if str.isalnum(char) else ' ' for char in input) # for python_wan dataset\n input = tokenizer(input)\n input = input[:self.args['task']['max_source_positions']]\n input = [self.src_dict.index(token) for token in input] + [self.src_dict.eos()]\n input = torch.Tensor(input).long() # [bsz, len]\n input = {\n 'net_input': {\n 'src_tokens': input.unsqueeze(dim=0),\n 'src_lengths': torch.LongTensor([input.numel()]),\n },\n }\n return input\n\n def decode_output(self, output):\n output = output[0][0]['tokens']\n output = self.tgt_dict.string(output)\n if not str.endswith(output, \".\"):\n output += \".\"\n return output\n", "\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nA modified version of the legacy DistributedDataParallel module that uses c10d\ncommunication primitives. This version is simpler than the latest PyTorch\nversion and is useful for debugging. Notably it does not overlap gradient\ncommunication with the backward pass, which makes it slower but more robust\nthan the PyTorch version.\n\nThis version also supports the *no_sync* context manager, which allows faster\ntraining with `--update-freq`.\n\"\"\"\n\nfrom contextlib import contextmanager\nimport copy\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\n\nfrom ncc.utils import distributed_utils\n\n\nclass LegacyDistributedDataParallel(nn.Module):\n \"\"\"Implements distributed data parallelism at the module level.\n\n A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.\n This version uses a c10d process group for communication and does not\n broadcast buffers.\n\n Args:\n module (~torch.nn.Module): module to be parallelized\n world_size (int): number of parallel workers\n process_group (optional): the c10d process group to be used for\n distributed data all-reduction. If None, the default process group\n will be used.\n buffer_size (int, optional): number of elements to buffer before\n performing all-reduce (default: 256M).\n \"\"\"\n\n def __init__(self, module, world_size, process_group=None, buffer_size=2**28):\n super().__init__()\n\n self.module = module\n self.world_size = world_size\n self.process_group = process_group\n\n # Never use a bigger buffer than the number of model params\n self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))\n self.buffer = None\n\n # Flag used by the NCCL backend to make sure we only reduce gradients\n # one time in the execution engine\n self.need_reduction = False\n\n # We can also forcibly accumulate grads locally and only do the\n # all-reduce at some later time\n self.accumulate_grads = False\n\n # For NCCL backend, since every single NCCL call is asynchoronous, we\n # therefore directly enqueue all the NCCL reduction calls to the\n # default CUDA stream without spawning up other reduction threads.\n # This achieves the best performance.\n self._register_grad_hook()\n\n def __getstate__(self):\n attrs = copy.copy(self.__dict__)\n return attrs\n\n def __setstate__(self, state):\n super().__setstate__(state)\n self._register_grad_hook()\n\n @contextmanager\n def no_sync(self):\n \"\"\"A context manager to disable gradient synchronization.\"\"\"\n old_accumulate_grads = self.accumulate_grads\n self.accumulate_grads = True\n yield\n self.accumulate_grads = old_accumulate_grads\n\n def forward(self, *inputs, **kwargs):\n return self.module(*inputs, **kwargs)\n\n def _register_grad_hook(self):\n \"\"\"\n This function registers the callback all-reduction function for the\n NCCL backend. All gradients will be all reduced in one single step.\n The NCCL reduction will directly be enqueued into the default CUDA\n stream. Therefore, no synchronization is needed.\n \"\"\"\n\n def all_reduce(params):\n buffer = self.buffer\n nonzero_buffer = False\n if len(params) > 1:\n offset = 0\n for p in params:\n sz = p.numel()\n if p.grad is not None:\n buffer[offset:offset+sz].copy_(p.grad.data.view(-1))\n nonzero_buffer = True\n else:\n buffer[offset:offset+sz].zero_()\n offset += sz\n else:\n # we only have a single grad to all-reduce\n p = params[0]\n if p.grad is not None:\n buffer = p.grad.data\n nonzero_buffer = True\n elif p.numel() <= self.buffer.numel():\n buffer = buffer[:p.numel()]\n buffer.zero_()\n else:\n buffer = torch.zeros_like(p)\n\n if nonzero_buffer:\n buffer.div_(self.world_size)\n\n distributed_utils.all_reduce(buffer, self.process_group)\n\n # copy all-reduced grads back into their original place\n offset = 0\n for p in params:\n sz = p.numel()\n if p.grad is not None:\n p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))\n else:\n p.grad = buffer[offset:offset+sz].view_as(p).clone()\n offset += sz\n\n def reduction_fn():\n # This function only needs to be called once\n if not self.need_reduction or self.accumulate_grads:\n return\n self.need_reduction = False\n\n if self.buffer is None:\n self.buffer = next(self.module.parameters()).new(self.buffer_size)\n\n # All-reduce the gradients in buckets\n offset = 0\n buffered_params = []\n for param in self.module.parameters():\n if not param.requires_grad:\n continue\n if param.grad is None:\n param.grad = torch.zeros_like(param)\n if param.grad.requires_grad:\n raise RuntimeError(\"DistributedDataParallel only works \"\n \"with gradients that don't require \"\n \"grad\")\n sz = param.numel()\n if sz > self.buffer.numel():\n # all-reduce big params directly\n all_reduce([param])\n else:\n if offset + sz > self.buffer.numel():\n all_reduce(buffered_params)\n offset = 0\n buffered_params.clear()\n buffered_params.append(param)\n offset += sz\n\n if len(buffered_params) > 0:\n all_reduce(buffered_params)\n\n # Now register the reduction hook on the parameters\n for p in self.module.parameters():\n\n def allreduce_hook(*unused):\n self.need_reduction = True\n Variable._execution_engine.queue_callback(reduction_fn)\n\n if p.requires_grad:\n p.register_hook(allreduce_hook)", "import math\n\nimport torch\nimport torch.nn as nn\n\nfrom ncc.models.type_prediction.encoder import CodeEncoder, CodeEncoderLSTM\nfrom ncc.models import NccLanguageModel, register_model\nfrom ncc.modules.seq2seq.ncc_decoder import NccDecoder\n\n\n@register_model('typetransformer')\nclass TypeTransformer(NccLanguageModel):\n def __init__(self, args, encoder):\n super().__init__(encoder)\n self.args = args\n\n # We follow BERT's random weight initialization\n # self.apply(init_bert_params)\n\n self.classification_heads = nn.ModuleDict()\n\n @classmethod\n def build_model(cls, args, config, task):\n \"\"\"Build a new model instance.\"\"\"\n\n # make sure all arguments are present\n # base_architecture(args)\n\n # if not hasattr(args, 'max_positions'):\n if 'max_positions' not in args['model']:\n args['model']['max_positions'] = args['task']['tokens_per_sample']\n\n encoder = RobertaEncoder(args, task.source_dictionary, task.target_dictionary, encoder_type=args['model']['encoder_type'])\n return cls(args, encoder)\n\n def forward(self, src_tokens, **kwargs): #, features_only=False, return_all_hiddens=False, classification_head_name=None,\n # if classification_head_name is not None:\n # features_only = True\n #\n # x, extra = self.decoder(src_tokens, features_only, return_all_hiddens, **kwargs)\n #\n # if classification_head_name is not None:\n # x = self.classification_heads[classification_head_name](x)\n # return x, extra\n\n x = self.decoder(src_tokens, **kwargs)\n\n return x, None\n\n\nclass RobertaEncoder(NccDecoder):\n def __init__(\n self,\n args,\n source_dictionary,\n target_dictionary,\n # n_tokens,\n # n_output_tokens,\n d_model=512,\n d_rep=128,\n n_head=8,\n n_encoder_layers=6,\n d_ff=2048,\n dropout=0.0, # 0.1\n activation=\"relu\",\n norm=True,\n # pad_id=None,\n encoder_type=\"transformer\"\n ):\n # super(TypeTransformer, self).__init__()\n super().__init__(source_dictionary)\n self.args = args\n assert norm\n # assert pad_id is not None\n padding_idx = source_dictionary.pad()\n self.config = {k: v for k, v in locals().items() if k != \"self\"}\n\n # Encoder and output for type prediction\n assert (encoder_type in [\"transformer\", \"lstm\"])\n if encoder_type == \"transformer\":\n self.encoder = CodeEncoder(\n len(source_dictionary), d_model, d_rep, n_head, n_encoder_layers, d_ff, dropout, activation, norm, padding_idx, project=False\n )\n # TODO: Try LeakyReLU\n self.output = nn.Sequential(nn.Linear(d_model, d_model), nn.ReLU(), nn.Linear(d_model, len(target_dictionary)))\n elif encoder_type == \"lstm\":\n self.encoder = CodeEncoderLSTM(\n n_tokens=len(source_dictionary),\n d_model=d_model,\n d_rep=d_rep,\n n_encoder_layers=n_encoder_layers,\n dropout=dropout,\n pad_id=padding_idx,\n project=False\n )\n self.output = nn.Sequential(nn.Linear(d_model*2, d_model), nn.ReLU(), nn.Linear(d_model, len(target_dictionary)))\n\n def forward(self, src_tokens, src_length=None, output_attention=None):\n r\"\"\"\n Arguments:\n src_tok_ids: [B, L] long tensor\n output_attention: [B, L, L] float tensor\n \"\"\"\n if output_attention is not None and src_tokens.size(0) != output_attention.size(0):\n raise RuntimeError(\"the batch number of src_tok_ids and output_attention must be equal\")\n\n # Encode\n memory = self.encoder(src_tokens, src_length) # LxBxD\n memory = memory.transpose(0, 1) # BxLxD\n\n if output_attention is not None:\n # Aggregate features to the starting token in each labeled identifier\n memory = torch.matmul(output_attention, memory) # BxLxD\n\n # Predict logits over types\n return self.output(memory) # BxLxV\n" ]
[ [ "numpy.array" ], [ "torch.max", "torch.Tensor", "torch.cat", "torch.zeros", "torch.multinomial", "torch.no_grad", "torch.stack" ], [ "torch.range" ], [ "torch.nn.functional.softmax", "torch.cat", "torch.nn.functional.linear", "torch.nn.functional.dropout" ], [ "torch.nn.ModuleList", "torch.no_grad" ], [ "torch.nn.Dropout", "torch.zeros", "torch.nn.ModuleDict", "torch.nn.Linear", "torch.nn.functional.linear" ], [ "numpy.array", "torch.is_tensor", "torch.Tensor" ], [ "torch.autograd.Variable._execution_engine.queue_callback", "torch.zeros_like" ], [ "torch.nn.Linear", "torch.matmul", "torch.nn.ModuleDict", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jphacks/C_2111
[ "df87580614d7e5c225ea30746e5f2cd0576bbc98", "df87580614d7e5c225ea30746e5f2cd0576bbc98" ]
[ "bert/wtfml/data_loaders/nlp/classification.py", "bert/wtfml/engine/pl_engine/BERT_classification.py" ]
[ "import pandas as pd\r\nimport torch\r\nfrom transformers import BertJapaneseTokenizer\r\nfrom wtfml.data_loaders.nlp.utils import clean_sentence\r\nimport transformers\r\n\r\nclass BERTSimpleDataset:\r\n \"\"\"\r\n Dataset for bert which can accept clearning function\r\n \"\"\"\r\n\r\n def __init__(self, input_texts, target, clearning_function=clean_sentence):\r\n if isinstance(input_texts, pd.Series):\r\n input_texts = list(input_texts)\r\n self.input_texts = input_texts\r\n self.target = target\r\n self.tokenizer = BertJapaneseTokenizer.from_pretrained(\r\n \"cl-tohoku/bert-base-japanese-whole-word-masking\"\r\n )\r\n self.max_len = 144 # twitter\r\n self.clearning_function = clearning_function\r\n\r\n def __len__(self):\r\n return len(self.input_texts)\r\n\r\n def __getitem__(self, item):\r\n input_text = str(self.input_texts[item])\r\n if self.clearning_function:\r\n input_text = self.clearning_function(input_text)\r\n\r\n inputs = self.tokenizer.encode_plus(\r\n input_text,\r\n None,\r\n add_special_tokens=True,\r\n max_length=self.max_len,\r\n padding=\"max_length\",\r\n truncation=True,\r\n # return_tensors=\"pt\"\r\n )\r\n\r\n ids = inputs[\"input_ids\"]\r\n mask = inputs[\"attention_mask\"]\r\n token_type_ids = inputs[\"token_type_ids\"]\r\n target = self.target[item]\r\n \r\n return {\r\n \"ids\": torch.tensor(ids, dtype=torch.long),\r\n \"mask\": torch.tensor(mask, dtype=torch.long),\r\n \"token_type_ids\": torch.tensor(token_type_ids, dtype=torch.long),\r\n \"targets\": torch.tensor(target, dtype=torch.long), # floatからlongに変更\r\n }\r\n\r\n \r\nclass DistilBERTDataset:\r\n \"\"\"\r\n Dataset for bert which can accept clearning function\r\n \"\"\"\r\n\r\n def __init__(self, input_texts, target, clearning_function=clean_sentence):\r\n if isinstance(input_texts, pd.Series):\r\n input_texts = list(input_texts)\r\n self.input_texts = input_texts\r\n self.target = target\r\n \r\n self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(\r\n \"cl-tohoku/bert-base-japanese-whole-word-masking\"\r\n )\r\n\r\n self.max_len = 144 # twitter\r\n self.clearning_function = clearning_function\r\n\r\n def __len__(self):\r\n return len(self.input_texts)\r\n\r\n def __getitem__(self, item):\r\n input_text = str(self.input_texts[item])\r\n if self.clearning_function:\r\n input_text = self.clearning_function(input_text)\r\n\r\n inputs = self.tokenizer.encode_plus(\r\n input_text,\r\n None,\r\n add_special_tokens=True,\r\n max_length=self.max_len,\r\n padding=\"max_length\",\r\n truncation=True,\r\n # return_tensors=\"pt\"\r\n )\r\n\r\n ids = inputs[\"input_ids\"]\r\n mask = inputs[\"attention_mask\"]\r\n # token_type_ids = inputs[\"token_type_ids\"]\r\n target = self.target[item]\r\n \r\n return {\r\n \"ids\": torch.tensor(ids, dtype=torch.long),\r\n \"mask\": torch.tensor(mask, dtype=torch.long),\r\n # \"token_type_ids\": torch.tensor(token_type_ids, dtype=torch.long),\r\n \"targets\": torch.tensor(target, dtype=torch.long), # floatからlongに変更\r\n }\r\n", "\"\"\"\r\n__author__: Abhishek Thakur\r\n\"\"\"\r\n\r\nfrom typing import Optional\r\n\r\nimport pytorch_lightning as pl\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torchmetrics\r\nfrom tqdm import tqdm\r\nfrom transformers import AdamW, get_linear_schedule_with_warmup\r\nfrom wtfml.engine.nlp.model import BERTBaseClassifier\r\n\r\n\r\nclass BERTClassificationPlEngine(pl.LightningModule):\r\n def __init__(\r\n self,\r\n model=BERTBaseClassifier(num_classes=4),\r\n \r\n # loss_fn=nn.BCEWithLogitsLoss(),\r\n loss_fn = nn.CrossEntropyLoss(),\r\n train_acc=torchmetrics.Accuracy(),\r\n valid_acc=torchmetrics.Accuracy(),\r\n lr: float = 3e-5,\r\n max_epoch=10,\r\n ):\r\n super(BERTClassificationPlEngine, self).__init__()\r\n self.model = model\r\n self.scaler = None\r\n self.loss_function = loss_fn\r\n self.train_acc = train_acc\r\n self.valid_acc = valid_acc\r\n self.lr = lr\r\n self.max_epoch = max_epoch\r\n\r\n def forward(self, ids, mask, token_type_ids):\r\n x = self.model(ids, mask, token_type_ids)\r\n return x\r\n\r\n def training_step(self, batch, batch_idx):\r\n # REQUIRED\r\n ids, mask, token_type_ids, target = (\r\n batch[\"ids\"],\r\n batch[\"mask\"],\r\n batch[\"token_type_ids\"],\r\n batch[\"targets\"],\r\n )\r\n # target = main_target + sub_target * self.sub_adjustment\r\n pred_batch_train = self.forward(ids, mask, token_type_ids)\r\n train_loss = self.loss_function(pred_batch_train, target)\r\n pred_batch_train_for_metrics = torch.softmax(pred_batch_train,dim = 1)\r\n target = target.to(torch.long)\r\n self.train_acc(pred_batch_train_for_metrics, target)\r\n self.log(\r\n \"train_acc\",\r\n self.train_acc,\r\n on_step=True,\r\n on_epoch=False,\r\n logger=True,\r\n prog_bar=True,\r\n )\r\n\r\n self.log(\r\n \"train_loss\",\r\n train_loss,\r\n prog_bar=True,\r\n on_epoch=True,\r\n on_step=True,\r\n logger=True,\r\n )\r\n return {\"loss\": train_loss}\r\n\r\n def validation_step(self, batch, batch_idx):\r\n # OPTIONAL\r\n ids, mask, token_type_ids, target = (\r\n batch[\"ids\"],\r\n batch[\"mask\"],\r\n batch[\"token_type_ids\"],\r\n batch[\"targets\"],\r\n )\r\n # target = main_target + sub_target * self.sub_adjustment\r\n out = self.forward(ids, mask, token_type_ids)\r\n # print(out, target)\r\n loss = self.loss_function(out, target)\r\n out_for_metrics = torch.softmax(out,dim = 1) \r\n\r\n target = target.to(torch.long)\r\n self.valid_acc(out_for_metrics, target)\r\n\r\n self.log(\r\n \"valid_acc\",\r\n self.valid_acc,\r\n prog_bar=True,\r\n logger=True,\r\n on_epoch=True,\r\n on_step=False,\r\n )\r\n self.log(\r\n \"valid_loss\",\r\n loss,\r\n prog_bar=True,\r\n logger=True,\r\n on_epoch=True,\r\n on_step=False,\r\n )\r\n return {\r\n \"val_loss\": loss,\r\n # \"acc\": acc,\r\n }\r\n\r\n def configure_optimizers(self):\r\n # REQUIRED\r\n\r\n param_optimizer = list(self.model.named_parameters())\r\n no_decay = [\"bias\", \"LayerNorm.bias\"]\r\n optimizer_parameters = [\r\n {\r\n \"params\": [\r\n p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\r\n ],\r\n \"weight_decay\": 0.001,\r\n },\r\n {\r\n \"params\": [\r\n p for n, p in param_optimizer if any(nd in n for nd in no_decay)\r\n ],\r\n \"weight_decay\": 0.0,\r\n },\r\n ]\r\n opt = AdamW(optimizer_parameters, lr=self.lr)\r\n\r\n # opt = optim.AdamW(self.model.parameters(), lr=self.lr)\r\n sch = get_linear_schedule_with_warmup(\r\n opt, num_warmup_steps=3, num_training_steps=self.max_epoch\r\n )\r\n return [opt], [sch]\r\n" ]
[ [ "torch.tensor" ], [ "torch.nn.CrossEntropyLoss", "torch.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Evelkos/CellularEvolutionaryAlgorithm
[ "9633337a00e20cb0c4d8a679e72755e165113468" ]
[ "src/cec2017/utils.py" ]
[ "# cec2017.utils\n# Author: Duncan Tilley\n# Additional functions for graphing and benchmarking\n\n\ndef surface_plot(function, domain=(-100, 100), points=30, dimension=2, ax=None):\n \"\"\"\n Creates a surface plot of a function.\n\n Args:\n function (function): The objective function to be called at each point.\n domain (num, num): The inclusive (min, max) domain for each dimension.\n points (int): The number of points to collect on each dimension. A total\n of points^2 function evaluations will be performed.\n dimension (int): The dimension to pass to the function. If this is more\n than 2, the elements after the first 2 will simply be zero,\n providing a slice at x_3 = 0, ..., x_n = 0.\n ax (matplotlib axes): Optional axes to use (must have projection='3d').\n Note, if specified plt.show() will not be called.\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n from mpl_toolkits import mplot3d\n\n # create points^2 tuples of (x,y) and populate z\n xys = np.linspace(domain[0], domain[1], points)\n xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])\n zs = np.zeros(points * points)\n\n if dimension > 2:\n # concatenate remaining zeros\n tail = np.zeros(dimension - 2)\n for i in range(0, xys.shape[0]):\n zs[i] = function(np.concatenate([xys[i], tail]))\n else:\n for i in range(0, xys.shape[0]):\n zs[i] = function(xys[i])\n\n # create the plot\n ax_in = ax\n if ax is None:\n ax = plt.axes(projection=\"3d\")\n\n X = xys[:, 0].reshape((points, points))\n Y = xys[:, 1].reshape((points, points))\n Z = zs.reshape((points, points))\n ax.plot_surface(X, Y, Z, cmap=\"gist_ncar\", edgecolor=\"none\")\n ax.set_title(function.__name__)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n\n if ax_in is None:\n plt.show()\n\n\ndef time(function, domain=(-100, 100), points=30):\n \"\"\"\n Returns the time in seconds to calculate points^2 evaluations of the\n given function.\n\n function\n The objective function to be called at each point.\n domain\n The inclusive (min, max) domain for each dimension.\n points\n The number of points to collect on each dimension. A total of points^2\n function evaluations will be performed.\n \"\"\"\n from time import time\n\n import numpy as np\n\n # create points^2 tuples of (x,y) and populate z\n xys = np.linspace(domain[0], domain[1], points)\n xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])\n zs = np.zeros(points * points)\n\n before = time()\n for i in range(0, xys.shape[0]):\n zs[i] = function(xys[i])\n return time() - before\n" ]
[ [ "numpy.linspace", "matplotlib.pyplot.axes", "numpy.concatenate", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chamwen/NT-Benchmark
[ "d5a17a07fdfa89d80d47843c35ecf3e078b94371", "d5a17a07fdfa89d80d47843c35ecf3e078b94371", "d5a17a07fdfa89d80d47843c35ecf3e078b94371", "d5a17a07fdfa89d80d47843c35ecf3e078b94371", "d5a17a07fdfa89d80d47843c35ecf3e078b94371", "d5a17a07fdfa89d80d47843c35ecf3e078b94371" ]
[ "NT_UDA/demo_syn_atdoc.py", "NT_UDA/demo_syn_shot.py", "NT_SSDA/demo_seed_dann.py", "NT_UDA/utils/utils.py", "NT_Noise/demo_uda_seed_mcc.py", "NT_UDA/demo_seed_fixbi.py" ]
[ "# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport numpy as np\nimport argparse\nimport os\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils import network, loss, utils\nfrom utils.network import calc_coeff\nfrom utils.dataloader import read_syn_src_tar\nfrom utils.utils import lr_scheduler_full, fix_random_seed, add_label_noise_noimg\nfrom utils.loss import CELabelSmooth, CDANE, Entropy, RandomLayer\nimport torch.utils.data as Data\n\n\ndef data_load(Xs, Ys, Xt, Yt, args):\n dset_loaders = {}\n train_bs = args.batch_size\n\n if args.noise_rate > 0:\n Ys = add_label_noise_noimg(Ys, args.seed, args.class_num, args.noise_rate)\n\n sample_idx_tar = tr.from_numpy(np.arange(len(Yt))).long()\n data_src = Data.TensorDataset(Xs, Ys)\n data_tar = Data.TensorDataset(Xt, Yt)\n data_tar_idx = Data.TensorDataset(Xt, Yt, sample_idx_tar)\n\n # for DAN/DANN/CDAN/MCC\n dset_loaders[\"source\"] = Data.DataLoader(data_src, batch_size=train_bs, shuffle=True, drop_last=True)\n dset_loaders[\"target\"] = Data.DataLoader(data_tar_idx, batch_size=train_bs, shuffle=True, drop_last=True)\n dset_loaders[\"Target\"] = Data.DataLoader(data_tar, batch_size=train_bs * 3, shuffle=False, drop_last=False)\n\n return dset_loaders\n\n\ndef train_target(args):\n X_src, y_src, X_tar, y_tar = read_syn_src_tar(args)\n dset_loaders = data_load(X_src, y_src, X_tar, y_tar, args)\n\n netF, netC = network.backbone_net(args, args.bottleneck)\n netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))\n netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))\n base_network = nn.Sequential(netF, netC)\n\n max_len = max(len(dset_loaders[\"source\"]), len(dset_loaders[\"target\"]))\n args.max_iter = args.max_epoch * max_len\n\n ad_net = network.AdversarialNetwork(args.bottleneck, 20).cuda()\n ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_full.pt'))\n random_layer = RandomLayer([args.bottleneck, args.class_num], args.bottleneck)\n random_layer.cuda()\n\n optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)\n optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)\n optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)\n\n max_len = max(len(dset_loaders[\"source\"]), len(dset_loaders[\"target\"]))\n max_iter = args.max_epoch * max_len\n interval_iter = max_iter // 10\n iter_num = 0\n base_network.train()\n\n class_num = args.class_num\n mem_fea = tr.rand(len(dset_loaders[\"target\"].dataset), args.bottleneck).cuda()\n mem_fea = mem_fea / tr.norm(mem_fea, p=2, dim=1, keepdim=True)\n mem_cls = tr.ones(len(dset_loaders[\"target\"].dataset), class_num).cuda() / class_num\n\n while iter_num < max_iter:\n try:\n inputs_source, labels_source = iter_source.next()\n except:\n iter_source = iter(dset_loaders[\"source\"])\n inputs_source, labels_source = iter_source.next()\n\n try:\n inputs_target, _, idx = iter_target.next()\n except:\n iter_target = iter(dset_loaders[\"target\"])\n inputs_target, _, idx = iter_target.next()\n\n if inputs_source.size(0) == 1:\n continue\n\n iter_num += 1\n lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n\n inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()\n features_source, outputs_source = base_network(inputs_source)\n features_target, outputs_target = base_network(inputs_target)\n features = tr.cat((features_source, features_target), dim=0)\n\n # new version img loss\n args.loss_trade_off = 1.0\n outputs = tr.cat((outputs_source, outputs_target), dim=0)\n softmax_out = nn.Softmax(dim=1)(outputs)\n entropy = Entropy(softmax_out)\n transfer_loss = CDANE([features, softmax_out], ad_net, entropy, calc_coeff(iter_num), random_layer=random_layer)\n classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_source, labels_source)\n\n # ATDOC\n dis = -tr.mm(features_target.detach(), mem_fea.t())\n for di in range(dis.size(0)):\n dis[di, idx[di]] = tr.max(dis)\n _, p1 = tr.sort(dis, dim=1)\n\n w = tr.zeros(features_target.size(0), mem_fea.size(0)).cuda()\n for wi in range(w.size(0)):\n for wj in range(args.K):\n w[wi][p1[wi, wj]] = 1 / args.K\n\n weight_, pred = tr.max(w.mm(mem_cls), 1)\n loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred)\n classifier_loss_atdoc = tr.sum(weight_ * loss_) / (tr.sum(weight_).item())\n\n eff = iter_num / args.max_iter\n total_loss = args.loss_trade_off * transfer_loss + classifier_loss + args.tar_par * eff * classifier_loss_atdoc\n\n optimizer_f.zero_grad()\n optimizer_c.zero_grad()\n optimizer_d.zero_grad()\n total_loss.backward()\n optimizer_f.step()\n optimizer_c.step()\n optimizer_d.step()\n\n # label memory\n netF.eval()\n netC.eval()\n with tr.no_grad():\n features_target, outputs_target = netC(netF(inputs_target))\n features_target = features_target / tr.norm(features_target, p=2, dim=1, keepdim=True)\n softmax_out = nn.Softmax(dim=1)(outputs_target)\n outputs_target = softmax_out ** 2 / ((softmax_out ** 2).sum(dim=0))\n\n mem_fea[idx] = (1.0 - args.momentum) * mem_fea[idx] + args.momentum * features_target.clone()\n mem_cls[idx] = (1.0 - args.momentum) * mem_cls[idx] + args.momentum * outputs_target.clone()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n base_network.eval()\n\n acc_t_te = utils.cal_acc_base(dset_loaders[\"Target\"], base_network)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n print(log_str)\n\n base_network.train()\n\n return acc_t_te\n\n\nif __name__ == '__main__':\n\n data_name = 'moon'\n if data_name == 'moon': num_class = 2\n base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']\n domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']\n file_list = [data_name + i for i in base_name_list]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,\n epsilon=1e-05, layer='wn', class_num=num_class, smooth=0)\n\n args.K = 5\n args.momentum = 1.0\n args.tar_par = 0.2\n\n args.method = 'CDANE-ATDOC'\n args.dset = data_name\n args.backbone = 'ShallowNet'\n args.batch_size = 32\n args.max_epoch = 50\n args.input_dim = 2\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '5'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n print(dset_n, args.method)\n\n args.root_path = './data_synth/'\n args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'\n args.result_dir = 'results/target/'\n\n acc_all = np.zeros((len(domain_list) - 1))\n for s in range(1, num_domain): # source\n for t in [0]: # target\n itr_idx = s - 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = file_list[s], file_list[t]\n args.task_str = domain_list[s] + '_' + domain_list[t]\n print(args)\n\n acc_all[itr_idx] = train_target(args)\n print('All acc: ', np.round(acc_all, 2))\n print('Avg acc: ', np.round(np.mean(acc_all), 2))\n", "# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport argparse\nimport os, sys\nimport os.path as osp\nimport numpy as np\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom scipy.spatial.distance import cdist\nimport torch.utils.data as Data\nfrom utils import network, loss\nfrom utils.dataloader import read_syn_single\nfrom utils.utils import lr_scheduler, fix_random_seed, op_copy, cal_acc_noimg\n\n\ndef data_load(X, y, args):\n dset_loaders = {}\n train_bs = args.batch_size\n\n sample_idx = tr.from_numpy(np.arange(len(y))).long()\n data_tar = Data.TensorDataset(X, y, sample_idx)\n data_test = Data.TensorDataset(X, y, sample_idx)\n\n dset_loaders[\"target\"] = Data.DataLoader(data_tar, batch_size=train_bs, shuffle=True)\n dset_loaders[\"Target\"] = Data.DataLoader(data_test, batch_size=train_bs * 3, shuffle=False)\n return dset_loaders\n\n\ndef train_target(args):\n X_tar, y_tar = read_syn_single(args, args.tar)\n dset_loaders = data_load(X_tar, y_tar, args)\n\n # base network feature extract\n netF, netC = network.backbone_net(args, args.bottleneck)\n\n modelpath = args.output_dir_src + '/source_F.pt'\n netF.load_state_dict(tr.load(modelpath))\n modelpath = args.output_dir_src + '/source_C.pt'\n netC.load_state_dict(tr.load(modelpath))\n netC.eval()\n\n for k, v in netC.named_parameters():\n v.requires_grad = False\n\n param_group = []\n for k, v in netF.named_parameters():\n if args.lr_decay1 > 0:\n param_group += [{'params': v, 'lr': args.lr * args.lr_decay1}]\n else:\n v.requires_grad = False\n\n optimizer = optim.SGD(param_group)\n optimizer = op_copy(optimizer)\n\n max_iter = args.max_epoch * len(dset_loaders[\"target\"])\n interval_iter = max_iter // args.interval\n iter_num = 0\n\n while iter_num < max_iter:\n try:\n inputs_test, _, tar_idx = iter_test.next()\n except:\n iter_test = iter(dset_loaders[\"target\"])\n inputs_test, _, tar_idx = iter_test.next()\n\n if inputs_test.size(0) == 1:\n continue\n\n inputs_test = inputs_test.cuda()\n if iter_num % interval_iter == 0 and args.cls_par > 0:\n netF.eval()\n mem_label = obtain_label(dset_loaders[\"Target\"], netF, netC, args)\n mem_label = tr.from_numpy(mem_label).cuda()\n netF.train()\n\n iter_num += 1\n lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)\n features_test = netF(inputs_test)\n _, outputs_test = netC(features_test)\n\n # # loss definition\n if args.cls_par > 0:\n pred = mem_label[tar_idx].long()\n classifier_loss = nn.CrossEntropyLoss()(outputs_test, pred)\n classifier_loss *= args.cls_par\n else:\n classifier_loss = tr.tensor(0.0).cuda()\n\n if args.ent:\n softmax_out = nn.Softmax(dim=1)(outputs_test)\n entropy_loss = tr.mean(loss.Entropy(softmax_out))\n if args.gent:\n msoftmax = softmax_out.mean(dim=0)\n gentropy_loss = tr.sum(msoftmax * tr.log(msoftmax + args.epsilon))\n entropy_loss += gentropy_loss\n im_loss = entropy_loss * args.ent_par\n classifier_loss += im_loss\n\n optimizer.zero_grad()\n classifier_loss.backward()\n optimizer.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n netF.eval()\n acc_t_te, _ = cal_acc_noimg(dset_loaders[\"Target\"], netF, netC)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n print(log_str)\n netF.train()\n\n if iter_num == max_iter:\n print('{}, TL Acc = {:.2f}%'.format(args.task_str, acc_t_te))\n return acc_t_te\n\n\ndef obtain_label(loader, netF, netC, args):\n start_test = True\n with tr.no_grad():\n iter_test = iter(loader)\n for _ in range(len(loader)):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n feas = netF(inputs)\n _, outputs = netC(feas)\n if start_test:\n all_fea = feas.float().cpu()\n all_output = outputs.float().cpu()\n all_label = labels.float()\n start_test = False\n else:\n all_fea = tr.cat((all_fea, feas.float().cpu()), 0)\n all_output = tr.cat((all_output, outputs.float().cpu()), 0)\n all_label = tr.cat((all_label, labels.float()), 0)\n\n all_output = nn.Softmax(dim=1)(all_output)\n ent = tr.sum(-all_output * tr.log(all_output + args.epsilon), dim=1)\n unknown_weight = 1 - ent / np.log(args.class_num)\n _, predict = tr.max(all_output, 1)\n\n accuracy = tr.sum(tr.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n if args.distance == 'cosine':\n all_fea = tr.cat((all_fea, tr.ones(all_fea.size(0), 1)), 1)\n all_fea = (all_fea.t() / tr.norm(all_fea, p=2, dim=1)).t()\n\n all_fea = all_fea.float().cpu().numpy()\n K = all_output.size(1)\n aff = all_output.float().cpu().numpy()\n initc = aff.transpose().dot(all_fea)\n initc = initc / (1e-8 + aff.sum(axis=0)[:, None])\n cls_count = np.eye(K)[predict].sum(axis=0)\n labelset = np.where(cls_count > args.threshold)\n labelset = labelset[0]\n # print(labelset)\n\n dd = cdist(all_fea, initc[labelset], args.distance)\n pred_label = dd.argmin(axis=1)\n pred_label = labelset[pred_label]\n\n for round in range(1): # SSL\n aff = np.eye(K)[pred_label]\n initc = aff.transpose().dot(all_fea)\n initc = initc / (1e-8 + aff.sum(axis=0)[:, None])\n dd = cdist(all_fea, initc[labelset], args.distance)\n pred_label = dd.argmin(axis=1)\n pred_label = labelset[pred_label]\n\n acc = np.sum(pred_label == all_label.float().numpy()) / len(all_fea)\n log_str = 'SSL_Acc = {:.2f}% -> {:.2f}%'.format(accuracy * 100, acc * 100)\n print(log_str)\n\n return pred_label.astype('int')\n\n\nif __name__ == \"__main__\":\n\n data_name = 'moon'\n if data_name == 'moon': num_class = 2\n base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']\n domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']\n file_list = [data_name + i for i in base_name_list]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0, ent=True,\n gent=True, cls_par=0.3, ent_par=1.0, epsilon=1e-05, layer='wn',\n threshold=0, class_num=num_class, distance='cosine')\n\n args.method = 'SHOT'\n args.dset = data_name\n args.backbone = 'ShallowNet'\n args.batch_size = 32\n args.interval = 2\n args.max_epoch = 5\n args.input_dim = 2\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '3'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n\n args.dset = data_name\n args.root_path = './data_synth/'\n mdl_path = 'outputs/models/'\n args.output_src = mdl_path + dset_n + '/source/'\n print(dset_n, args.method)\n\n acc_all = np.zeros((len(domain_list) - 1))\n for s in range(1, num_domain): # source\n for t in [0]: # target\n itr_idx = s - 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = file_list[s], file_list[t]\n args.task_str = domain_list[s] + domain_list[t]\n\n args.name_src = domain_list[s]\n args.output_dir_src = osp.join(args.output_src, args.name_src)\n print(args)\n\n acc_all[itr_idx] = train_target(args)\n print('All acc: ', np.round(acc_all, 2))\n print('Avg acc: ', np.round(np.mean(acc_all), 2))\n", "# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport numpy as np\nimport argparse\nimport os\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils import network, loss, utils\nfrom utils.LogRecord import LogRecord\nfrom utils.dataloader import read_seed_src_tar\nfrom utils.utils import lr_scheduler_full, fix_random_seed, data_load_noimg_ssda\nfrom utils.loss import CELabelSmooth, Entropy, ReverseLayerF\n\n\ndef train_target(args):\n X_src, y_src, X_tar, y_tar = read_seed_src_tar(args)\n dset_loaders = data_load_noimg_ssda(X_src, y_src, X_tar, y_tar, args)\n\n netF, netC = network.backbone_net(args, args.bottleneck)\n netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))\n netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))\n base_network = nn.Sequential(netF, netC)\n\n args.max_iter = args.max_epoch * len(dset_loaders[\"source\"])\n\n ad_net = network.feat_classifier(type=args.layer, class_num=2, bottleneck_dim=args.bottleneck).cuda()\n ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_clf.pt'))\n\n optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)\n optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)\n optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)\n\n max_iter = args.max_epoch * len(dset_loaders[\"source\"])\n interval_iter = max_iter // 10\n args.max_iter = max_iter\n iter_num = 0\n base_network.train()\n\n while iter_num < max_iter:\n try:\n inputs_source, labels_source = iter_source.next()\n except:\n iter_source = iter(dset_loaders[\"source\"])\n inputs_source, labels_source = iter_source.next()\n\n try:\n inputs_target_tr, labels_target_tr = iter_target_tr.next()\n except:\n iter_target_tr = iter(dset_loaders[\"target_tr\"])\n inputs_target_tr, labels_target_tr = iter_target_tr.next()\n\n try:\n inputs_target, _ = iter_target.next()\n except:\n iter_target = iter(dset_loaders[\"target_te\"])\n inputs_target, _ = iter_target.next()\n\n if inputs_source.size(0) == 1:\n continue\n\n iter_num += 1\n lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n\n inputs_source, labels_source = inputs_source.cuda(), labels_source.cuda()\n inputs_target = inputs_target.cuda()\n\n inputs_target_tr, labels_target_tr = inputs_target_tr.cuda(), labels_target_tr.cuda()\n _, outputs_source = netC(netF(inputs_source))\n _, outputs_target_tr = netC(netF(inputs_target_tr))\n outputs_comb = tr.cat((outputs_source, outputs_target_tr), dim=0)\n labels_comb = tr.cat((labels_source, labels_target_tr), dim=0)\n\n feas_source = netF(inputs_source)\n feas_target_tr = netF(inputs_target_tr)\n fea_comb = tr.cat((feas_source, feas_target_tr), dim=0)\n feas_target = netF(inputs_target)\n\n # # loss definition\n p = float(iter_num) / max_iter\n alpha = 2. / (1. + np.exp(-10 * p)) - 1\n reverse_source, reverse_target = ReverseLayerF.apply(fea_comb, alpha), ReverseLayerF.apply(feas_target,\n alpha)\n _, domain_output_s = ad_net(reverse_source)\n _, domain_output_t = ad_net(reverse_target)\n domain_label_s = tr.ones(inputs_source.size()[0] + inputs_target_tr.size()[0]).long().cuda()\n domain_label_t = tr.zeros(inputs_target.size()[0]).long().cuda()\n\n classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_comb, labels_comb)\n adv_loss = nn.CrossEntropyLoss()(domain_output_s, domain_label_s) + nn.CrossEntropyLoss()(domain_output_t,\n domain_label_t)\n total_loss = classifier_loss + adv_loss\n\n optimizer_f.zero_grad()\n optimizer_c.zero_grad()\n optimizer_d.zero_grad()\n total_loss.backward()\n optimizer_f.step()\n optimizer_c.step()\n optimizer_d.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n base_network.eval()\n\n acc_t_te = utils.cal_acc_base(dset_loaders[\"Target\"], base_network)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n args.log.record(log_str)\n print(log_str)\n\n base_network.train()\n\n return acc_t_te\n\n\nif __name__ == '__main__':\n\n data_name = 'SEED'\n if data_name == 'SEED': chn, class_num, trial_num = 62, 3, 3394\n focus_domain_idx = [0, 1, 2]\n # focus_domain_idx = np.arange(15)\n domain_list = ['S' + str(i) for i in focus_domain_idx]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,\n epsilon=1e-05, layer='wn', smooth=0,\n N=num_domain, chn=chn, class_num=class_num)\n\n args.dset = data_name\n args.method = 'DANN'\n args.backbone = 'ShallowNet'\n args.batch_size = 32 # 32\n args.max_epoch = 50 # 50\n args.input_dim = 310\n args.norm = 'zscore'\n args.bz_tar_tr = args.batch_size\n args.bz_tar_te = args.batch_size * 2\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n args.tar_lbl_rate = 5 # [5, 10, ..., 50]/100\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '6'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n\n print(dset_n, args.method)\n print(args)\n\n args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_SSDA/'\n args.result_dir = 'results/target/'\n my_log = LogRecord(args)\n my_log.log_init()\n my_log.record('=' * 50 + '\\n' + os.path.basename(__file__) + '\\n' + '=' * 50)\n\n acc_all = np.zeros(num_domain * (num_domain - 1))\n for s in range(num_domain):\n for t in range(num_domain):\n if s != t:\n itr_idx = (num_domain - 1) * s + t\n if t > s: itr_idx -= 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = focus_domain_idx[s], focus_domain_idx[t]\n args.task_str = domain_list[s] + '_' + domain_list[t]\n print(args)\n\n my_log.record(info_str)\n args.log = my_log\n acc_all[itr_idx] = train_target(args)\n print('\\nSub acc: ', np.round(acc_all, 3))\n print('Avg acc: ', np.round(np.mean(acc_all), 3))\n\n acc_sub_str = str(np.round(acc_all, 3).tolist())\n acc_mean_str = str(np.round(np.mean(acc_all), 3).tolist())\n args.log.record(\"\\n==========================================\")\n args.log.record(acc_sub_str)\n args.log.record(acc_mean_str)\n\n", "# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport os.path as osp\nimport os\nimport numpy as np\nimport random\nimport torch as tr\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.utils.data as Data\nfrom utils.loss import Entropy\nimport utils.network as network\nfrom utils.dataloader import read_seed_src_tar\nfrom utils.data_list import ImageList, ImageList_idx\n\n\ndef op_copy(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr0'] = param_group['lr']\n return optimizer\n\n\ndef fix_random_seed(SEED):\n tr.manual_seed(SEED)\n tr.cuda.manual_seed(SEED)\n np.random.seed(SEED)\n random.seed(SEED)\n tr.cuda.manual_seed_all(SEED)\n\n\ndef print_args(args):\n s = \"==========================================\\n\"\n for arg, content in args.__dict__.items():\n s += \"{}:{}\\n\".format(arg, content)\n return s\n\n\ndef lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):\n decay = (1 + gamma * iter_num / max_iter) ** (-power)\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr0'] * decay\n param_group['weight_decay'] = 1e-3\n param_group['momentum'] = 0.9\n param_group['nesterov'] = True\n return optimizer\n\n\ndef lr_scheduler_full(optimizer, init_lr, iter_num, max_iter, gamma=10, power=0.75):\n decay = (1 + gamma * iter_num / max_iter) ** (-power)\n for param_group in optimizer.param_groups:\n param_group['lr'] = init_lr * decay\n param_group['weight_decay'] = 1e-3\n param_group['momentum'] = 0.9\n param_group['nesterov'] = True\n return optimizer\n\n\ndef create_folder(dir_name, data_env, win_root):\n if not osp.exists(dir_name):\n os.system('mkdir -p ' + dir_name)\n if not osp.exists(dir_name):\n if data_env == 'gpu':\n os.mkdir(dir_name)\n elif data_env == 'local':\n os.makedirs(win_root + dir_name)\n\n\ndef image_train(resize_size=256, crop_size=224, alexnet=False):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef image_test(resize_size=256, crop_size=224, alexnet=False):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n return transforms.Compose([\n transforms.Resize((resize_size, resize_size)),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n normalize\n ])\n\n\ndef create_folder(dir_name, data_env, win_root):\n if not osp.exists(dir_name):\n os.system('mkdir -p ' + dir_name)\n if not osp.exists(dir_name):\n if data_env == 'gpu':\n os.mkdir(dir_name)\n elif data_env == 'local':\n os.makedirs(win_root + dir_name)\n\n\ndef cal_acc_base(loader, model, flag=True, fc=None):\n start_test = True\n with tr.no_grad():\n iter_test = iter(loader)\n for i in range(len(loader)):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n if flag:\n _, outputs = model(inputs)\n else:\n if fc is not None:\n feas, outputs = model(inputs)\n outputs = fc(feas)\n else:\n outputs = model(inputs)\n if start_test:\n all_output = outputs.float().cpu()\n all_label = labels.float()\n start_test = False\n else:\n all_output = tr.cat((all_output, outputs.float().cpu()), 0)\n all_label = tr.cat((all_label, labels.float()), 0)\n all_output = nn.Softmax(dim=1)(all_output)\n _, predict = tr.max(all_output, 1)\n accuracy = tr.sum(tr.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n return accuracy * 100\n\n\ndef cal_acc_img(loader, netF, netB, netC, flag=False):\n start_test = True\n with tr.no_grad():\n iter_test = iter(loader)\n for i in range(len(loader)):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n _, outputs = netC(netB(netF(inputs)))\n if start_test:\n all_output = outputs.float().cpu()\n all_label = labels.float()\n start_test = False\n else:\n all_output = tr.cat((all_output, outputs.float().cpu()), 0)\n all_label = tr.cat((all_label, labels.float()), 0)\n _, predict = tr.max(all_output, 1)\n accuracy = tr.sum(tr.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n mean_ent = tr.mean(Entropy(nn.Softmax(dim=1)(all_output))).cpu().data.item()\n\n # if flag:\n # matrix = confusion_matrix(all_label, tr.squeeze(predict).float())\n # acc = matrix.diagonal() / matrix.sum(axis=1) * 100\n # aacc = acc.mean()\n # aa = [str(np.round(i, 2)) for i in acc]\n # acc = ' '.join(aa)\n # return aacc, acc\n # else:\n # return accuracy * 100, mean_ent\n\n return accuracy * 100, mean_ent\n\n\ndef cal_acc_noimg(loader, netF, netC):\n start_test = True\n with tr.no_grad():\n iter_test = iter(loader)\n for i in range(len(loader)):\n data = iter_test.next()\n inputs = data[0].cuda()\n labels = data[1].float()\n _, outputs = netC(netF(inputs))\n if start_test:\n all_output = outputs.float().cpu()\n all_label = labels\n start_test = False\n else:\n all_output = tr.cat((all_output, outputs.float().cpu()), 0)\n all_label = tr.cat((all_label, labels), 0)\n\n all_output = nn.Softmax(dim=1)(all_output)\n _, predict = tr.max(all_output, 1)\n accuracy = tr.sum(tr.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n mean_ent = tr.mean(Entropy(all_output)).cpu().data.item()\n\n return accuracy * 100, mean_ent\n\n\ndef save_fea_base(args):\n if args.dset in ['SEED', 'blob', 'moon']:\n Xs, Ys, Xt, Yt = read_seed_src_tar(args)\n dset_loaders = data_load_noimg(Xs, Ys, Xt, Yt, args)\n else:\n dset_loaders = data_load_img(args)\n source_data = dset_loaders['Source']\n target_data = dset_loaders['Target']\n\n # set base network\n if args.net[0:3] == 'res':\n netF = network.ResBase(res_name=args.net).cuda()\n elif args.net[0:3] == 'vgg':\n netF = network.VGGBase(vgg_name=args.net).cuda()\n\n netB = network.feat_bottleneck(type=args.classifier, feature_dim=netF.in_features,\n bottleneck_dim=args.bottleneck).cuda()\n netC = network.feat_classifier(type=args.layer, class_num=args.class_num, bottleneck_dim=args.bottleneck).cuda()\n\n args.modelpath = osp.join(args.output_dir, \"sourceF\" + \"_\" + str(args.repeat) + \".pt\")\n netF.load_state_dict({k.replace('module.', ''): v for k, v in tr.load(args.modelpath).items()})\n args.modelpath = osp.join(args.output_dir, \"sourceB\" + \"_\" + str(args.repeat) + \".pt\")\n netB.load_state_dict(tr.load(args.modelpath))\n args.modelpath = osp.join(args.output_dir, \"sourceC\" + \"_\" + str(args.repeat) + \".pt\")\n netC.load_state_dict(tr.load(args.modelpath))\n\n netF.eval()\n netB.eval()\n netC.eval()\n\n start_test = True\n with tr.no_grad():\n iter_train = iter(source_data)\n for i in range(len(source_data)):\n data = iter_train.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n fea = netB(netF(inputs))\n _, outputs = netC(fea)\n if start_test:\n source_output = outputs.float().cpu()\n source_label = labels.float().cpu()\n source_fea = fea.float().cpu()\n start_test = False\n else:\n source_output = tr.cat((source_output, outputs.float().cpu()), 0)\n source_label = tr.cat((source_label, labels.float().cpu()), 0)\n source_fea = tr.cat((source_fea, fea.float().cpu()), 0)\n\n X_source = source_fea.detach().numpy()\n y_source = source_label.detach().numpy()\n\n start_test = True\n with tr.no_grad():\n iter_test = iter(target_data)\n for i in range(len(target_data)):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n fea = netB(netF(inputs))\n _, outputs = netC(fea)\n if start_test:\n target_output = outputs.float().cpu()\n target_label = labels.float().cpu()\n target_fea = fea.float().cpu()\n start_test = False\n else:\n target_output = tr.cat((target_output, outputs.float().cpu()), 0)\n target_label = tr.cat((target_label, labels.float().cpu()), 0)\n target_fea = tr.cat((target_fea, fea.float().cpu()), 0)\n\n X_target = target_fea.detach().numpy()\n y_target = target_label.detach().numpy()\n\n # output_source = source_output.detach().numpy()\n # output_target = target_output.detach().numpy()\n # weight = netC.fc.weight.cpu().detach().permute(1, 0).numpy()\n # bias = netC.fc.bias.cpu().detach().view(-1).numpy()\n # ACC = test_target_img(args)\n\n save_path = osp.join(args.fea_dir, args.task_str + \"_\" + str(args.repeat) + \".npz\")\n np.savez(save_path, X_source=X_source, y_source=y_source, X_target=X_target, y_target=y_target)\n\n\ndef test_target_img(args):\n dset_loaders = data_load_img(args)\n # set base network\n if args.net[0:3] == 'res':\n netF = network.ResBase(res_name=args.net).cuda()\n elif args.net[0:3] == 'vgg':\n netF = network.VGGBase(vgg_name=args.net).cuda()\n\n netB = network.feat_bottleneck(type=args.classifier, feature_dim=netF.in_features,\n bottleneck_dim=args.bottleneck).cuda()\n netC = network.feat_classifier(type=args.layer, class_num=args.class_num, bottleneck_dim=args.bottleneck).cuda()\n\n args.modelpath = osp.join(args.output_dir, \"sourceF\" + \"_\" + str(args.repeat) + \".pt\")\n netF.load_state_dict({k.replace('module.', ''): v for k, v in tr.load(args.modelpath).items()})\n args.modelpath = osp.join(args.output_dir, \"sourceB\" + \"_\" + str(args.repeat) + \".pt\")\n netB.load_state_dict(tr.load(args.modelpath))\n args.modelpath = osp.join(args.output_dir, \"sourceC\" + \"_\" + str(args.repeat) + \".pt\")\n netC.load_state_dict(tr.load(args.modelpath))\n\n netF.eval()\n netB.eval()\n netC.eval()\n\n acc, _ = cal_acc_img(dset_loaders['Target'], netF, netB, netC, False)\n log_str = 'Training: {}, Task: {}, Acc = {:.2f}%'.format(args.trte, args.task_str, acc)\n print(log_str)\n\n return acc\n\n\ndef data_load_img(args):\n # prepare data\n dsets = {}\n dset_loaders = {}\n train_bs = args.batch_size\n txt_src = open(args.s_dset_path).readlines()\n txt_tar = open(args.test_dset_path).readlines()\n\n # only add for source domain\n if args.noise_rate > 0:\n txt_src = add_label_noise_img(args, txt_src)\n\n if args.trte == \"val\":\n dsize = len(txt_src)\n tr_size = int(0.9 * dsize)\n tr.manual_seed(args.seed)\n tr_txt, te_txt = tr.utils.data.random_split(txt_src, [tr_size, dsize - tr_size])\n else:\n dsize = len(txt_src)\n tr_size = int(0.9 * dsize)\n tr.manual_seed(args.seed)\n _, te_txt = tr.utils.data.random_split(txt_src, [tr_size, dsize - tr_size])\n tr_txt = txt_src\n\n # for DAN/DANN/CDAN/MCC\n dsets[\"source\"] = ImageList(txt_src, transform=image_train())\n dset_loaders[\"source\"] = DataLoader(dsets[\"source\"], batch_size=train_bs, shuffle=True,\n num_workers=args.worker, drop_last=True)\n dsets[\"target\"] = ImageList(txt_tar, transform=image_train())\n dset_loaders[\"target\"] = DataLoader(dsets[\"target\"], batch_size=train_bs, shuffle=True, num_workers=args.worker,\n drop_last=True)\n\n # for DNN\n dsets[\"source_tr\"] = ImageList(tr_txt, transform=image_train())\n dset_loaders[\"source_tr\"] = DataLoader(dsets[\"source_tr\"], batch_size=train_bs, shuffle=True,\n num_workers=args.worker, drop_last=True)\n dsets[\"source_te\"] = ImageList(te_txt, transform=image_test())\n dset_loaders[\"source_te\"] = DataLoader(dsets[\"source_te\"], batch_size=train_bs, shuffle=False,\n num_workers=args.worker, drop_last=False)\n\n # for generating feature\n dsets[\"Source\"] = ImageList(txt_src, transform=image_test())\n dset_loaders[\"Source\"] = DataLoader(dsets[\"Source\"], batch_size=train_bs * 3, shuffle=False,\n num_workers=args.worker, drop_last=False)\n dsets[\"Target\"] = ImageList(txt_tar, transform=image_test())\n dset_loaders[\"Target\"] = DataLoader(dsets[\"Target\"], batch_size=train_bs * 3, shuffle=False,\n num_workers=args.worker, drop_last=False)\n\n return dset_loaders\n\n\ndef data_load_noimg(Xs, Ys, Xt, Yt, args):\n dset_loaders = {}\n train_bs = args.batch_size\n\n if args.noise_rate > 0:\n Ys = add_label_noise_noimg(Ys, args.seed, args.class_num, args.noise_rate)\n\n args.validation = 'random'\n src_idx = np.arange(len(Ys.numpy()))\n if args.validation == 'random':\n num_train = int(0.9 * len(src_idx))\n tr.manual_seed(args.seed)\n id_train, id_val = tr.utils.data.random_split(src_idx, [num_train, len(src_idx) - num_train])\n\n source_tr = Data.TensorDataset(Xs[id_train, :], Ys[id_train])\n source_te = Data.TensorDataset(Xs[id_val, :], Ys[id_val])\n data_tar = Data.TensorDataset(Xt, Yt)\n data_src = Data.TensorDataset(Xs, Ys)\n\n # for DAN/DANN/CDAN/MCC\n dset_loaders[\"source\"] = Data.DataLoader(data_src, batch_size=train_bs, shuffle=True, drop_last=True)\n dset_loaders[\"target\"] = Data.DataLoader(data_tar, batch_size=train_bs, shuffle=True, drop_last=True)\n\n # for DNN\n dset_loaders[\"source_tr\"] = Data.DataLoader(source_tr, batch_size=train_bs, shuffle=True, drop_last=True)\n dset_loaders[\"source_te\"] = Data.DataLoader(source_te, batch_size=train_bs, shuffle=False, drop_last=False)\n\n # for generating feature\n dset_loaders[\"Source\"] = Data.DataLoader(data_src, batch_size=train_bs * 3, shuffle=False, drop_last=False)\n dset_loaders[\"Target\"] = Data.DataLoader(data_tar, batch_size=train_bs * 3, shuffle=False, drop_last=False)\n\n return dset_loaders\n\n\ndef add_label_noise_img(args, txt_list):\n txt_path_list = [i.split('==')[0] for i in txt_list]\n lbl_list = [int(i.split('==')[1]) for i in txt_list]\n\n random.seed(args.seed)\n idx_shuffle = random.sample(np.arange(len(lbl_list)).tolist(), int(args.noise_rate * len(lbl_list)))\n idx_shuffle.sort()\n\n class_list = np.arange(args.class_num)\n lbl_list_new = lbl_list.copy()\n for i in range(len(idx_shuffle)):\n class_list_tmp = class_list.copy().tolist()\n class_list_tmp.remove(lbl_list[idx_shuffle[i]])\n random.seed(args.seed + i)\n lbl_list_new[idx_shuffle[i]] = random.sample(class_list_tmp, 1)[0]\n\n txt_list_new = []\n for i in range(len(lbl_list)):\n txt_list_new.append(txt_path_list[i] + '==' + str(lbl_list_new[i]) + '\\n')\n\n return txt_list_new\n\n\ndef add_label_noise_noimg(Y_raw, seed, class_num, noise_rate):\n if tr.is_tensor(Y_raw):\n Y_raw_np = Y_raw.clone().numpy()\n else:\n Y_raw_np = Y_raw.copy()\n\n random.seed(seed)\n idx_shuffle = random.sample(np.arange(len(Y_raw_np)).tolist(), int(noise_rate * len(Y_raw_np)))\n idx_shuffle.sort()\n\n class_list = np.arange(class_num)\n Y_new = Y_raw_np.copy()\n for i in range(len(idx_shuffle)):\n class_list_tmp = class_list.copy().tolist()\n class_list_tmp.remove(Y_raw_np[idx_shuffle[i]])\n random.seed(seed + i)\n Y_new[idx_shuffle[i]] = random.sample(class_list_tmp, 1)[0]\n if tr.is_tensor(Y_raw):\n Y_new = tr.from_numpy(Y_new).long()\n\n return Y_new\n", "# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport numpy as np\nimport argparse\nimport os\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils import network, loss, utils\nfrom utils.dataloader import read_seed_src_tar\nfrom utils.utils import lr_scheduler_full, fix_random_seed, data_load_noimg\nfrom utils.loss import ClassConfusionLoss, CELabelSmooth\n\n\ndef train_target(args):\n X_src, y_src, X_tar, y_tar = read_seed_src_tar(args)\n dset_loaders = data_load_noimg(X_src, y_src, X_tar, y_tar, args)\n\n netF, netC = network.backbone_net(args, args.bottleneck)\n netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))\n netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))\n base_network = nn.Sequential(netF, netC)\n\n max_len = max(len(dset_loaders[\"source\"]), len(dset_loaders[\"target\"]))\n args.max_iter = args.max_epoch * max_len\n optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)\n optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)\n\n max_iter = args.max_epoch * len(dset_loaders[\"source\"])\n interval_iter = max_iter // 10\n args.max_iter = max_iter\n iter_num = 0\n base_network.train()\n\n while iter_num < max_iter:\n try:\n inputs_source, labels_source = iter_source.next()\n except:\n iter_source = iter(dset_loaders[\"source\"])\n inputs_source, labels_source = iter_source.next()\n\n try:\n inputs_target, _ = iter_target.next()\n except:\n iter_target = iter(dset_loaders[\"target\"])\n inputs_target, _ = iter_target.next()\n\n if inputs_source.size(0) == 1:\n continue\n\n iter_num += 1\n lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)\n lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)\n\n inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()\n features_source, outputs_source = base_network(inputs_source)\n features_target, outputs_target = base_network(inputs_target)\n\n # new version img loss\n # p = float(iter_num) / max_iter\n # alpha = 2. / (1. + np.exp(-10 * p)) - 1\n args.loss_trade_off = 1.0\n args.t_mcc = 2\n transfer_loss = ClassConfusionLoss(t=args.t_mcc)(outputs_target)\n classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_source, labels_source)\n total_loss = args.loss_trade_off * transfer_loss + classifier_loss\n\n optimizer_f.zero_grad()\n optimizer_c.zero_grad()\n total_loss.backward()\n optimizer_f.step()\n optimizer_c.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n base_network.eval()\n\n acc_t_te = utils.cal_acc_base(dset_loaders[\"Target\"], base_network)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n print(log_str)\n\n base_network.train()\n\n return acc_t_te\n\n\nif __name__ == '__main__':\n\n data_name = 'SEED'\n if data_name == 'SEED': chn, class_num, trial_num = 62, 3, 3394\n focus_domain_idx = [0, 1, 2]\n # focus_domain_idx = np.arange(15)\n domain_list = ['S' + str(i) for i in focus_domain_idx]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,\n epsilon=1e-05, layer='wn', smooth=0,\n N=num_domain, chn=chn, class_num=class_num)\n\n args.dset = data_name\n args.method = 'MCC'\n args.backbone = 'ShallowNet'\n args.batch_size = 32 # 32\n args.max_epoch = 50 # 50\n args.input_dim = 310\n args.norm = 'zscore'\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '6'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n\n noise_list = np.linspace(0, 100, 11).tolist()\n num_test = len(noise_list)\n acc_all = np.zeros(num_test)\n s, t = 0, 1\n for ns in range(num_test):\n args.noise_rate = np.round(noise_list[ns] / 100, 2)\n dset_n = args.dset + '_' + str(args.noise_rate)\n print(dset_n, args.method)\n info_str = '\\nnoise %s: %s --> %s' % (str(noise_list[ns]), domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = focus_domain_idx[s], focus_domain_idx[t]\n args.task_str = domain_list[s] + '_' + domain_list[t]\n print(args)\n\n acc_all[ns] = train_target(args)\n print('\\nSub acc: ', np.round(acc_all, 3))\n print('Avg acc: ', np.round(np.mean(acc_all), 3))\n\n acc_sub_str = str(np.round(acc_all, 3).tolist())\n acc_mean_str = str(np.round(np.mean(acc_all), 3).tolist())\n", "# -*- coding: utf-8 -*-\n# A Survey on Negative Transfer\n# https://github.com/chamwen/NT-Benchmark\nimport numpy as np\nimport argparse\nimport os\nimport torch as tr\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils.LogRecord import LogRecord\nfrom utils.dataloader import read_seed_src_tar\nfrom utils.utils import fix_random_seed, data_load_noimg, op_copy, lr_scheduler\nfrom utils import network, utils, loss\n\n\ndef train_target(args):\n X_src, y_src, X_tar, y_tar = read_seed_src_tar(args)\n dset_loaders = data_load_noimg(X_src, y_src, X_tar, y_tar, args)\n\n netF_s, netC_s = network.backbone_net(args, args.bottleneck)\n netF_t, netC_t = network.backbone_net(args, args.bottleneck)\n\n mdl_type_list = ['netF', 'netC']\n if args.use_pretrain:\n mdl_path_name = [args.mdl_path + args.task_str + '/' + mdl + '.pt' for mdl in mdl_type_list]\n else:\n mdl_path_name = [args.mdl_path + mdl + '.pt' for mdl in mdl_type_list]\n netF_s.load_state_dict(tr.load(mdl_path_name[0]))\n netC_s.load_state_dict(tr.load(mdl_path_name[1]))\n\n netF_t.load_state_dict(tr.load(mdl_path_name[0]))\n netC_t.load_state_dict(tr.load(mdl_path_name[1]))\n\n param_group = []\n for k, v in netF_s.named_parameters():\n if args.lr_decay1 > 0:\n param_group += [{'params': v, 'lr': args.lr * args.lr_decay1}]\n else:\n v.requires_grad = False\n for k, v in netC_s.named_parameters():\n if args.lr_decay2 > 0:\n param_group += [{'params': v, 'lr': args.lr * args.lr_decay2}]\n else:\n v.requires_grad = False\n sp_param_sd = nn.Parameter(tr.tensor(5.0).cuda(), requires_grad=True)\n param_group += [{\"params\": [sp_param_sd], \"lr\": args.lr}]\n optimizer_s = optim.SGD(param_group)\n optimizer_s = op_copy(optimizer_s)\n\n param_group = []\n for k, v in netF_t.named_parameters():\n if args.lr_decay1 > 0:\n param_group += [{'params': v, 'lr': args.lr * args.lr_decay1}]\n else:\n v.requires_grad = False\n for k, v in netC_t.named_parameters():\n if args.lr_decay2 > 0:\n param_group += [{'params': v, 'lr': args.lr * args.lr_decay2}]\n else:\n v.requires_grad = False\n sp_param_td = nn.Parameter(tr.tensor(5.0).cuda(), requires_grad=True)\n param_group += [{\"params\": [sp_param_td], \"lr\": args.lr}]\n optimizer_t = optim.SGD(param_group)\n optimizer_t = op_copy(optimizer_t)\n\n models_sd = nn.Sequential(netF_s, netC_s)\n models_td = nn.Sequential(netF_t, netC_t)\n\n ce = nn.CrossEntropyLoss().cuda()\n mse = nn.MSELoss().cuda()\n\n models_sd.train()\n models_td.train()\n\n num_batch = len(dset_loaders[\"source\"])\n max_iter = args.max_epoch * num_batch\n interval_iter = max_iter // 10\n iter_num = 0\n\n while iter_num < max_iter:\n try:\n inputs_source, labels_source = iter_source.next()\n except:\n iter_source = iter(dset_loaders[\"source\"])\n inputs_source, labels_source = iter_source.next()\n\n try:\n inputs_target, labels_target = iter_target.next()\n except:\n iter_target = iter(dset_loaders[\"target\"])\n inputs_target, labels_target = iter_target.next()\n\n if inputs_source.size(0) == 1:\n continue\n\n iter_num += 1\n lr_scheduler(optimizer_s, iter_num=iter_num, max_iter=max_iter)\n lr_scheduler(optimizer_t, iter_num=iter_num, max_iter=max_iter)\n\n inputs_source, labels_source = inputs_source.cuda(), labels_source.cuda()\n inputs_target = inputs_target.cuda()\n\n inputs_source, labels_source = inputs_source.cuda(non_blocking=True), labels_source.cuda(non_blocking=True)\n inputs_target, labels_target = inputs_target.cuda(non_blocking=True), labels_target.cuda(non_blocking=True)\n\n _, pred_tar_sd = models_sd(inputs_target)\n _, pred_tar_td = models_td(inputs_target)\n\n pseudo_sd, top_prob_sd, threshold_sd = loss.get_target_preds(args, pred_tar_sd)\n fixmix_sd_loss = loss.get_fixmix_loss(models_sd, inputs_source, inputs_target, labels_source, pseudo_sd,\n args.lam_sd)\n\n pseudo_td, top_prob_td, threshold_td = loss.get_target_preds(args, pred_tar_td)\n fixmix_td_loss = loss.get_fixmix_loss(models_td, inputs_source, inputs_target, labels_source, pseudo_td,\n args.lam_td)\n\n total_loss = fixmix_sd_loss + fixmix_td_loss\n\n if iter_num == 0:\n print('Fixed-mixup Loss, sdm: {:.4f}, tdm: {:.4f}'.format(fixmix_sd_loss.item(), fixmix_td_loss.item()))\n\n # Bidirectional Matching\n if iter_num // num_batch > args.bim_start:\n bim_mask_sd = tr.ge(top_prob_sd, threshold_sd)\n bim_mask_sd = tr.nonzero(bim_mask_sd).squeeze()\n\n bim_mask_td = tr.ge(top_prob_td, threshold_td)\n bim_mask_td = tr.nonzero(bim_mask_td).squeeze()\n\n if bim_mask_sd.dim() > 0 and bim_mask_td.dim() > 0:\n if bim_mask_sd.numel() > 0 and bim_mask_td.numel() > 0:\n bim_mask = min(bim_mask_sd.size(0), bim_mask_td.size(0))\n bim_sd_loss = ce(pred_tar_sd[bim_mask_td[:bim_mask]], pseudo_td[bim_mask_td[:bim_mask]].cuda().detach())\n bim_td_loss = ce(pred_tar_td[bim_mask_sd[:bim_mask]], pseudo_sd[bim_mask_sd[:bim_mask]].cuda().detach())\n\n total_loss += bim_sd_loss\n total_loss += bim_td_loss\n\n if iter_num == 0:\n print('Bidirectional Loss sdm: {:.4f}, tdm: {:.4f}'.format(bim_sd_loss.item(),\n bim_td_loss.item()))\n\n # Self-penalization\n if iter_num // num_batch <= args.sp_start:\n sp_mask_sd = tr.lt(top_prob_sd, threshold_sd)\n sp_mask_sd = tr.nonzero(sp_mask_sd).squeeze()\n\n sp_mask_td = tr.lt(top_prob_sd, threshold_td)\n sp_mask_td = tr.nonzero(sp_mask_td).squeeze()\n\n if sp_mask_sd.dim() > 0 and sp_mask_td.dim() > 0:\n if sp_mask_sd.numel() > 0 and sp_mask_td.numel() > 0:\n sp_mask = min(sp_mask_sd.size(0), sp_mask_td.size(0))\n sp_sd_loss = loss.get_sp_loss(pred_tar_sd[sp_mask_sd[:sp_mask]], pseudo_sd[sp_mask_sd[:sp_mask]],\n sp_param_sd)\n sp_td_loss = loss.get_sp_loss(pred_tar_td[sp_mask_td[:sp_mask]], pseudo_td[sp_mask_td[:sp_mask]],\n sp_param_td)\n\n total_loss += sp_sd_loss\n total_loss += sp_td_loss\n\n if iter_num == 0:\n print('Penalization Loss sdm: {:.4f}, tdm: {:.4f}', sp_sd_loss.item(), sp_td_loss.item())\n\n # Consistency Regularization\n if iter_num // num_batch > args.cr_start:\n mixed_cr = 0.5 * inputs_source + 0.5 * inputs_target\n _, out_sd = models_sd(mixed_cr)\n _, out_td = models_td(mixed_cr)\n cr_loss = mse(out_sd, out_td)\n total_loss += cr_loss\n if iter_num == 0:\n print('Consistency Loss: {:.4f}', cr_loss.item())\n\n optimizer_s.zero_grad()\n optimizer_t.zero_grad()\n total_loss.backward()\n optimizer_s.step()\n optimizer_t.step()\n\n if iter_num % interval_iter == 0 or iter_num == max_iter:\n netF_t.eval()\n netC_t.eval()\n\n acc_t_te, _ = utils.cal_acc_noimg(dset_loaders[\"Target\"], netF_t, netC_t)\n log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)\n print(log_str)\n\n netF_t.train()\n netC_t.train()\n\n return acc_t_te\n\n\nif __name__ == '__main__':\n\n data_name = 'SEED'\n if data_name == 'SEED': chn, class_num, trial_num = 62, 3, 3394\n focus_domain_idx = [0, 1, 2]\n # focus_domain_idx = np.arange(15)\n domain_list = ['S' + str(i) for i in focus_domain_idx]\n num_domain = len(domain_list)\n\n args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,\n epsilon=1e-05, layer='wn', smooth=0,\n N=num_domain, chn=chn, class_num=class_num)\n\n # para for FixBi\n args.max_epoch = 50\n args.th = 2.0\n args.bim_start = 20\n args.sp_start = 20\n args.cr_start = 20\n args.lam_sd = 0.7\n args.lam_td = 0.3\n\n # para for train\n args.dset = data_name\n args.method = 'FixBi'\n args.backbone = 'ShallowNet'\n args.batch_size = 32 # 32\n args.input_dim = 310\n args.norm = 'zscore'\n args.noise_rate = 0\n dset_n = args.dset + '_' + str(args.noise_rate)\n args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'\n args.mdl_fixbi_dir = 'outputs/mdl_fixbi/' + dset_n + '/'\n args.use_pretrain = 1\n args.mdl_path = args.mdl_fixbi_dir if args.use_pretrain else args.mdl_init_dir\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '5'\n args.data_env = 'gpu' # 'local'\n args.seed = 2022\n fix_random_seed(args.seed)\n tr.backends.cudnn.deterministic = True\n\n print(dset_n, args.method)\n print(args)\n\n args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'\n args.result_dir = 'results/target/'\n my_log = LogRecord(args)\n my_log.log_init()\n my_log.record('=' * 50 + '\\n' + os.path.basename(__file__) + '\\n' + '=' * 50)\n\n acc_all = np.zeros(num_domain * (num_domain - 1))\n for s in range(num_domain):\n for t in range(num_domain):\n if s != t:\n itr_idx = (num_domain - 1) * s + t\n if t > s: itr_idx -= 1\n info_str = '\\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])\n print(info_str)\n args.src, args.tar = focus_domain_idx[s], focus_domain_idx[t]\n args.task_str = domain_list[s] + '_' + domain_list[t]\n print(args)\n\n my_log.record(info_str)\n args.log = my_log\n acc_all[itr_idx] = train_target(args)\n print('\\nSub acc: ', np.round(acc_all, 3))\n print('Avg acc: ', np.round(np.mean(acc_all), 3))\n\n acc_sub_str = str(np.round(acc_all, 3).tolist())\n acc_mean_str = str(np.round(np.mean(acc_all), 3).tolist())\n args.log.record(\"\\n==========================================\")\n args.log.record(acc_sub_str)\n args.log.record(acc_mean_str)\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Softmax", "torch.norm", "torch.nn.CrossEntropyLoss", "torch.max", "torch.load", "torch.cat", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.sum", "numpy.round", "torch.no_grad", "torch.sort", "numpy.mean" ], [ "torch.nn.Softmax", "torch.max", "torch.load", "torch.utils.data.DataLoader", "numpy.round", "torch.no_grad", "numpy.mean", "numpy.where", "torch.nn.CrossEntropyLoss", "torch.norm", "torch.utils.data.TensorDataset", "numpy.eye", "torch.from_numpy", "torch.tensor", "torch.optim.SGD", "torch.squeeze", "numpy.log", "scipy.spatial.distance.cdist", "torch.log" ], [ "torch.nn.Sequential", "torch.nn.CrossEntropyLoss", "torch.cat", "torch.load", "numpy.round", "numpy.mean", "numpy.exp", "numpy.zeros" ], [ "torch.nn.Softmax", "numpy.savez", "torch.max", "numpy.random.seed", "torch.cuda.manual_seed", "torch.load", "torch.manual_seed", "torch.utils.data.TensorDataset", "numpy.arange", "torch.utils.data.DataLoader", "torch.is_tensor", "torch.cat", "torch.from_numpy", "torch.utils.data.random_split", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.squeeze" ], [ "torch.nn.Sequential", "numpy.linspace", "torch.load", "numpy.round", "numpy.mean", "numpy.zeros" ], [ "torch.nn.Sequential", "torch.nn.CrossEntropyLoss", "torch.ge", "torch.load", "torch.lt", "torch.tensor", "numpy.round", "numpy.mean", "torch.nonzero", "torch.optim.SGD", "numpy.zeros", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yangwenbo99/UNIQUE
[ "50136f3169b82f20c8677f36c1b0882905b6d809" ]
[ "plot1.py" ]
[ "#!/bin/python3\n\n'''\nThis file is to plot a graph with the following setting.\n\n1. We first select an image x_0\n2. We then add some pertubation to the image to get x_1 (its type shall\n configurable in the future, but we set it to be random or loaded from file\n currently)\n3. Next, we plot f(x) for all x on the segment x_0 to x_1\n4. Finally, we optionally save the pertuabation for future work\n\nExample:\n python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01\n\n python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01\n\n\n python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization\n\n python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization\n\n python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization\n'''\n\nimport argparse\nimport TrainModel\nimport scipy.io as sio\nimport os\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\n\ndef parse_config():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-x', '--img', type=str, help='the base image')\n parser.add_argument('-p', '--pertubation', type=str, default='',\n help='the pertubation of the image, will be randomly generated if not presented')\n parser.add_argument('--pertubation_length', type=float, default=0.01,\n help='the length of the pertubataion, if random generation is nessesary')\n parser.add_argument('-s', '--save_pertubation', type=str, default='',\n help='whether the pertubation should be saved')\n\n parser.add_argument(\"--train\", type=bool, default=True)\n parser.add_argument('--get_scores', type=bool, default=False)\n parser.add_argument(\"--use_cuda\", type=bool, default=True)\n # parser.add_argument(\"--device\", type=str, default=\"cuda\")\n parser.add_argument(\"--resume\", action='store_true')\n parser.add_argument(\"--seed\", type=int, default=19901116)\n\n parser.add_argument(\"--backbone\", type=str, default='resnet34')\n parser.add_argument(\"--fc\", type=bool, default=True)\n parser.add_argument('--scnn_root', type=str, default='saved_weights/scnn.pkl')\n\n parser.add_argument(\"--network\", type=str, default=\"basecnn\",\n help='basecnn or dbcnn or lfc')\n\n parser.add_argument(\"--representation\", type=str, default=\"BCNN\")\n\n parser.add_argument(\"--ranking\", type=bool, default=True,\n help='True for learning-to-rank False for regular regression')\n\n parser.add_argument(\"--fidelity\", type=bool, default=True,\n help='True for fidelity loss False for regular ranknet with CE loss')\n\n parser.add_argument(\"--std_modeling\", type=bool,\n default=True) # True for modeling std False for not\n parser.add_argument(\"--std_loss\", type=bool, default=True)\n parser.add_argument(\"--fixvar\", action='store_true') #+\n parser.add_argument(\"--force_normalization\", action='store_true')\n parser.add_argument(\"--lipschitz\", action='store_true')\n parser.add_argument(\"--margin\", type=float, default=0.025)\n\n parser.add_argument(\"--split\", type=int, default=1)\n parser.add_argument(\"--trainset\", type=str, default=\"./IQA_database/\")\n parser.add_argument(\"--live_set\", type=str, default=\"./IQA_database/databaserelease2/\")\n parser.add_argument(\"--csiq_set\", type=str, default=\"./IQA_database/CSIQ/\")\n parser.add_argument(\"--tid2013_set\", type=str, default=\"./IQA_database/TID2013/\")\n parser.add_argument(\"--bid_set\", type=str, default=\"./IQA_database/BID/\")\n #parser.add_argument(\"--cid_set\", type=str, default=\"./IQA_database/CID2013_camera/\")\n parser.add_argument(\"--clive_set\", type=str, default=\"./IQA_database/ChallengeDB_release/\")\n parser.add_argument(\"--koniq10k_set\", type=str, default=\"./IQA_database/koniq-10k/\")\n parser.add_argument(\"--kadid10k_set\", type=str, default=\"./IQA_database/kadid10k/\")\n\n parser.add_argument(\"--eval_live\", type=bool, default=True)\n parser.add_argument(\"--eval_csiq\", type=bool, default=True)\n parser.add_argument(\"--eval_tid2013\", type=bool, default=False)\n parser.add_argument(\"--eval_kadid10k\", type=bool, default=True)\n parser.add_argument(\"--eval_bid\", type=bool, default=True)\n parser.add_argument(\"--eval_clive\", type=bool, default=True)\n parser.add_argument(\"--eval_koniq10k\", type=bool, default=True)\n\n parser.add_argument(\"--split_modeling\", type=bool, default=False)\n\n parser.add_argument('--ckpt_path', default='./checkpoint', type=str,\n metavar='PATH', help='path to checkpoints')\n parser.add_argument('--ckpt', default=None, type=str, help='name of the checkpoint to load')\n\n parser.add_argument(\"--train_txt\", type=str, default='train.txt') # train.txt | train_synthetic.txt | train_authentic.txt | train_sub2.txt | train_score.txt\n\n parser.add_argument(\"--batch_size\", type=int, default=128)\n parser.add_argument(\"--batch_size2\", type=int, default=32)\n parser.add_argument(\"--image_size\", type=int, default=384, help='None means random resolution')\n parser.add_argument(\"--max_epochs\", type=int, default=3)\n parser.add_argument(\"--max_epochs2\", type=int, default=12)\n parser.add_argument(\"--lr\", type=float, default=1e-4)\n parser.add_argument(\"--decay_interval\", type=int, default=3)\n parser.add_argument(\"--decay_ratio\", type=float, default=0.1)\n parser.add_argument(\"--epochs_per_eval\", type=int, default=1)\n parser.add_argument(\"--epochs_per_save\", type=int, default=1)\n\n parser.add_argument(\"--verbose\", action='store_true')\n\n config = parser.parse_args()\n config.to_test = []\n\n return config\n\n\ndef main(config):\n t = TrainModel.Trainer(config)\n # checking compatability\n if config.fixvar and not config.network.startswith('lfc'):\n raise NotImplementedError()\n if str(config.backbone).startswith('lfc') and not config.std_modeling:\n raise NotImplementedError()\n\n\n model = t.model\n pil_img = Image.open(config.img)\n # pil_img = pil_img.reshape((1,) + tuple(pil_img.shape))\n img = t.test_transform(pil_img).to(t.device)\n\n if config.pertubation:\n with open(config.pertubation, 'rb') as f:\n pertubation = torch.load(f)\n else:\n pertubation = torch.rand(img.shape) * config.pertubation_length\n pertubation = pertubation.to(t.device)\n\n img = img.unsqueeze(0)\n print(img.shape)\n\n if config.save_pertubation:\n with open(config.save_pertubation, 'wb') as f:\n torch.save(pertubation, f)\n\n should_normalize = not config.network.startswith('lfc') or config.force_normalization\n\n if should_normalize:\n normalization_transform = \\\n transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n pertubation = normalization_transform(pertubation)\n\n x = list(np.linspace(0, 1, 100))\n y = [t.predict_single_image(img + p * pertubation).detach().cpu().numpy() for p in x]\n plt.plot(x, y)\n plt.show()\n\n\nif __name__ == \"__main__\":\n config = parse_config()\n main(config)\n" ]
[ [ "numpy.linspace", "torch.load", "matplotlib.pyplot.plot", "torch.rand", "matplotlib.pyplot.show", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlexErfan/Image_manipulation_detection
[ "f07008b86112ae7d40a3728c715c53b6054ecc70" ]
[ "lib/datasets/dist_fake.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Peng Zhou\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom lib.datasets.imdb import imdb\nimport lib.datasets.ds_utils as ds_utils\nimport numpy as np\nimport scipy.sparse\nimport scipy.io as sio\nimport lib.utils.cython_bbox\nimport pickle\nimport subprocess\nimport uuid\nimport pdb\nfrom .voc_eval import voc_eval\nfrom lib.config import config as cfg \nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nclass dist_fake(imdb):\n def __init__(self, image_set, year, dist_path=None):\n imdb.__init__(self, image_set)\n self._year = year\n self._image_set = image_set.split('dist_')[1]\n self._dist_path = self._get_default_path() if dist_path is None \\\n else dist_path\n self._data_path=self._dist_path\n self._classes = ('__background__', # always index 0\n 'tamper','authentic')\n self._classes = ('authentic', # always index 0\n 'tamper')\n #self.classes =('authentic', # always index 0\n #'splicing','removal')\n self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))\n self._image_ext = {'.png','.jpg','.tif','.bmp','.JPG'}\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n self._roidb_handler = self.gt_roidb\n\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(os.path.splitext(self._image_index[i].split(' ')[0])[0])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n for ext in self._image_ext:\n #image_path = os.path.join('/home-3/[email protected]/work/xintong/medifor/portrait/test_data',\n #index + ext)\n image_path = os.path.join(self._data_path,\n index + ext)\n image_path1=os.path.join('/home-3/[email protected]/work/pengzhou/dataset/NC2016_Test0613',\n index + ext)\n if os.path.isfile(image_path):\n return image_path\n elif os.path.isfile(image_path1):\n return image_path1\n else:\n continue\n assert os.path.isfile(image_path) and os.path.isfile(image_path1), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path,\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n #print(image_index)\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'NC2016_Test0613')\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n try:\n roidb = pickle.load(fid)\n except:\n roidb = pickle.load(fid, encoding='bytes')\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self.roidb_gt(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n def roidb_gt(self,image_id):\n num_objs = int(len(image_id.split(' ')[1:])/5)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix in range(num_objs):\n bbox = image_id.split(' ')[ix*5+1:ix*5+5]\n # Make pixel indexes 0-based\n x1 = float(bbox[0]) \n y1 = float(bbox[1]) \n x2 = float(bbox[2]) \n y2 = float(bbox[3])\n if x1<0:\n x1=0\n if y1<0:\n y1=0 \n try:\n cls=self._class_to_ind[image_id.split(' ')[ix*5+5]]\n except:\n if int(image_id.split(' ')[ix*5+5])==0:\n print('authentic')\n cls=2\n else:\n cls = int(image_id.split(' ')[ix*5+5])\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 ) * (y2 - y1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'JPGed':False,\n 'noised':False,\n 'seg_areas': seg_areas}\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = pickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n if not self.config['use_diff']:\n # Exclude the samples labeled as difficult\n non_diff_objs = [\n obj for obj in objs if int(obj.find('difficult').text) == 0]\n # if len(non_diff_objs) != len(objs):\n # print 'Removed {} difficult objects'.format(\n # len(objs) - len(non_diff_objs))\n objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = 'nist_' + self._image_set + '_{:s}.txt'\n path = os.path.join(\n '.',\n filename)\n return path\n\n def _get_voc_noise_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = 'nist_' + self._image_set + '_{:s}_noise.txt'\n path = os.path.join(\n '.',\n filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n print(filename)\n with open(filename, 'w') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n #pdb.set_trace()\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.format(index.split(' ')[0], dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n #pdb.set_trace()\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._dist_path,\n 'coco_multi' ,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._dist_path,\n self._image_set + '.txt')\n cachedir = os.path.join(self._dist_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n #use_07_metric = True if int(self._year) < 2010 else False\n use_07_metric = False\n print('dist metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__' or cls == self.classes[0]:\n cls_ind=0\n continue\n else:\n cls_ind=self._class_to_ind[cls]\n #elif cls=='median_filtering':\n #cls_ind=3\n #continue\n filename = self._get_voc_results_file_template().format(cls)\n filename2 = self._get_voc_noise_results_file_template().format(cls)\n print(cls_ind)\n rec, prec, ap = voc_eval(\n filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric,fuse=False)\n aps += [ap]\n print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n fig=plt.figure()\n plt.plot(rec,prec)\n fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)\n plt.xlabel('recall',fontsize=15)\n plt.xlim((0,1.0))\n plt.ylim((0,1.0))\n plt.ylabel('precision',fontsize=15)\n fig.savefig('{}.jpg'.format(cls))\n\n print(('Mean AP = {:.4f}'.format(np.mean(aps))))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print(('{:.3f}'.format(ap)))\n print(('{:.3f}'.format(np.mean(aps))))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print(('Running:\\n{}'.format(cmd)))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(all_boxes)\n self._do_python_eval(output_dir)\n #if self.config['matlab_eval']:\n #self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n #os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n from datasets.dist_fake import dist_fake\n\n d = dist_fake('trainval', '2007')\n res = d.roidb\n from IPython import embed;\n\n embed()\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylabel", "numpy.mean", "matplotlib.pyplot.xlabel", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andrerubeis/AIF360
[ "c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44", "c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44" ]
[ "examples/demo_optim_data_preproc..py", "aif360/datasets/standard_dataset.py" ]
[ "# %% md\n\n#### This notebook demonstrates the use of an optimized data pre-processing algorithm for bias mitigation\n\n# - The\n# debiasing\n# function\n# used is implemented in the\n# `OptimPreproc`\n\n#\n# class .\n# - Define\n# parameters\n# for optimized pre - processing specific to the dataset.\n#\n#\n# - Divide\n# the\n# dataset\n# into\n# training, validation, and testing\n# partitions.\n# - Learn\n# the\n# optimized\n# pre - processing\n# transformation\n# from the training\n#\n# data.\n# - Train\n# classifier\n# on\n# original\n# training\n# data.\n# - Estimate\n# the\n# optimal\n# classification\n# threshold, that\n# maximizes\n# balanced\n# accuracy\n# without\n# fairness\n# constraints(\n# from the original\n#\n# validation\n# set).\n# - Determine\n# the\n# prediction\n# scores\n# for original testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.\n# - Transform\n# the\n# testing\n# set\n# using\n# the\n# learned\n# probabilistic\n# transformation.\n# - Determine\n# the\n# prediction\n# scores\n# for transformed testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.\n#\n\n# %%\n\n# Load all necessary packages\nimport sys\n\nsys.path.append(\"../\")\nimport numpy as np\nfrom tqdm import tqdm\n\n\nfrom aif360.datasets import BinaryLabelDataset\nfrom aif360.datasets import AdultDataset, GermanDataset, CompasDataset\nfrom aif360.metrics import BinaryLabelDatasetMetric\nfrom aif360.metrics import ClassificationMetric\nfrom aif360.metrics.utils import compute_boolean_conditioning_vector\nfrom aif360.algorithms.preprocessing.optim_preproc import OptimPreproc\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions \\\n import load_preproc_data_adult, load_preproc_data_german, load_preproc_data_compas\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions \\\n import get_distortion_adult, get_distortion_german, get_distortion_compas\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools\nfrom common_utils import compute_metrics\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\n\nfrom IPython.display import Markdown, display\nimport matplotlib.pyplot as plt\n\n# %% md\n\n#### Load dataset and specify options\n\n# %%\n\n# import dataset\ndataset_used = \"adult\" # \"adult\", \"german\", \"compas\"\nprotected_attribute_used = 1 # 1, 2\n\nif dataset_used == \"adult\":\n if protected_attribute_used == 1:\n privileged_groups = [{'sex': 1}]\n unprivileged_groups = [{'sex': 0}]\n dataset_orig = load_preproc_data_adult(['sex'])\n else:\n privileged_groups = [{'race': 1}]\n unprivileged_groups = [{'race': 0}]\n dataset_orig = load_preproc_data_adult(['race'])\n\n optim_options = {\n \"distortion_fun\": get_distortion_adult,\n \"epsilon\": 0.05,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\nelif dataset_used == \"german\":\n if protected_attribute_used == 1:\n privileged_groups = [{'sex': 1}]\n unprivileged_groups = [{'sex': 0}]\n dataset_orig = load_preproc_data_german(['sex'])\n optim_options = {\n \"distortion_fun\": get_distortion_german,\n \"epsilon\": 0.05,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\n else:\n privileged_groups = [{'age': 1}]\n unprivileged_groups = [{'age': 0}]\n dataset_orig = load_preproc_data_german(['age'])\n optim_options = {\n \"distortion_fun\": get_distortion_german,\n \"epsilon\": 0.1,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\nelif dataset_used == \"compas\":\n if protected_attribute_used == 1:\n privileged_groups = [{'sex': 1}]\n unprivileged_groups = [{'sex': 0}]\n dataset_orig = load_preproc_data_compas(['sex'])\n else:\n privileged_groups = [{'race': 1}]\n unprivileged_groups = [{'race': 0}]\n dataset_orig = load_preproc_data_compas(['race'])\n\n optim_options = {\n \"distortion_fun\": get_distortion_compas,\n \"epsilon\": 0.05,\n \"clist\": [0.99, 1.99, 2.99],\n \"dlist\": [.1, 0.05, 0]\n }\n\n# random seed\nnp.random.seed(1)\n\n# Split into train, validation, and test\ndataset_orig_train, dataset_orig_vt = dataset_orig.split([0.7], shuffle=True)\ndataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5], shuffle=True)\n\n# %% md\n\n#### Display dataset attributes\n\n# %%\n\n# print out some labels, names, etc.\ndisplay(Markdown(\"#### Training Dataset shape\"))\nprint(dataset_orig_train.features.shape)\ndisplay(Markdown(\"#### Favorable and unfavorable labels\"))\nprint(dataset_orig_train.favorable_label, dataset_orig_train.unfavorable_label)\ndisplay(Markdown(\"#### Protected attribute names\"))\nprint(dataset_orig_train.protected_attribute_names)\ndisplay(Markdown(\"#### Privileged and unprivileged protected attribute values\"))\nprint(dataset_orig_train.privileged_protected_attributes,\n dataset_orig_train.unprivileged_protected_attributes)\ndisplay(Markdown(\"#### Dataset feature names\"))\nprint(dataset_orig_train.feature_names)\n\n# %% md\n\n#### Metric for original training data\n\n# %%\n\n# Metric for the original dataset\nmetric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Original training dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())\n\n# %% md\n\n#### Train with and transform the original training data\n\n# %%\n\nOP = OptimPreproc(OptTools, optim_options,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\n\nOP = OP.fit(dataset_orig_train)\n\n# Transform training data and align features\ndataset_transf_train = OP.transform(dataset_orig_train, transform_Y=True)\ndataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train)\n\n# %% md\n\n#### Metric with the transformed training data\n\n# %%\n\nmetric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Transformed training dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_transf_train.mean_difference())\n\n# %% md\n\n# Optimized\n# preprocessing\n# has\n# reduced\n# the\n# disparity in favorable\n# outcomes\n# between\n# the\n# privileged and unprivileged\n# groups(training\n# data).\n\n# %%\n\n### Testing\nassert np.abs(metric_transf_train.mean_difference()) < np.abs(metric_orig_train.mean_difference())\n\n# %% md\n\n#### Load, clean up original test data and compute metric\n\n# %%\n\ndataset_orig_test = dataset_transf_train.align_datasets(dataset_orig_test)\ndisplay(Markdown(\"#### Testing Dataset shape\"))\nprint(dataset_orig_test.features.shape)\n\nmetric_orig_test = BinaryLabelDatasetMetric(dataset_orig_test,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Original test dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_test.mean_difference())\n\n# %% md\n\n#### Transform test data and compute metric\n\n# %%\n\ndataset_transf_test = OP.transform(dataset_orig_test, transform_Y=True)\ndataset_transf_test = dataset_orig_test.align_datasets(dataset_transf_test)\n\nmetric_transf_test = BinaryLabelDatasetMetric(dataset_transf_test,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\ndisplay(Markdown(\"#### Transformed test dataset\"))\nprint(\n \"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_transf_test.mean_difference())\n\n# %% md\n\n# Optimized\n# preprocessing\n# has\n# reduced\n# the\n# disparity in favorable\n# outcomes\n# between\n# the\n# privileged and unprivileged\n# groups(test\n# data).\n\n# %%\n\n### Testing\nassert np.abs(metric_transf_test.mean_difference()) < np.abs(metric_orig_test.mean_difference())\n\n# %% md\n\n### Train classifier on original data\n\n# %%\n\n# Logistic regression classifier and predictions\nscale_orig = StandardScaler()\nX_train = scale_orig.fit_transform(dataset_orig_train.features)\ny_train = dataset_orig_train.labels.ravel()\n\nlmod = LogisticRegression()\nlmod.fit(X_train, y_train)\ny_train_pred = lmod.predict(X_train)\n\n# positive class index\npos_ind = np.where(lmod.classes_ == dataset_orig_train.favorable_label)[0][0]\n\ndataset_orig_train_pred = dataset_orig_train.copy()\ndataset_orig_train_pred.labels = y_train_pred\n\n# %% md\n\n#### Obtain scores original test set\n\n# %%\n\ndataset_orig_valid_pred = dataset_orig_valid.copy(deepcopy=True)\nX_valid = scale_orig.transform(dataset_orig_valid_pred.features)\ny_valid = dataset_orig_valid_pred.labels\ndataset_orig_valid_pred.scores = lmod.predict_proba(X_valid)[:, pos_ind].reshape(-1, 1)\n\ndataset_orig_test_pred = dataset_orig_test.copy(deepcopy=True)\nX_test = scale_orig.transform(dataset_orig_test_pred.features)\ny_test = dataset_orig_test_pred.labels\ndataset_orig_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)\n\n# %% md\n\n### Find the optimal classification threshold from the validation set\n\n# %%\n\nnum_thresh = 100\nba_arr = np.zeros(num_thresh)\nclass_thresh_arr = np.linspace(0.01, 0.99, num_thresh)\nfor idx, class_thresh in enumerate(class_thresh_arr):\n fav_inds = dataset_orig_valid_pred.scores > class_thresh\n dataset_orig_valid_pred.labels[fav_inds] = dataset_orig_valid_pred.favorable_label\n dataset_orig_valid_pred.labels[~fav_inds] = dataset_orig_valid_pred.unfavorable_label\n\n classified_metric_orig_valid = ClassificationMetric(dataset_orig_valid,\n dataset_orig_valid_pred,\n unprivileged_groups=unprivileged_groups,\n privileged_groups=privileged_groups)\n\n ba_arr[idx] = 0.5 * (classified_metric_orig_valid.true_positive_rate() \\\n + classified_metric_orig_valid.true_negative_rate())\n\nbest_ind = np.where(ba_arr == np.max(ba_arr))[0][0]\nbest_class_thresh = class_thresh_arr[best_ind]\n\nprint(\"Best balanced accuracy (no fairness constraints) = %.4f\" % np.max(ba_arr))\nprint(\"Optimal classification threshold (no fairness constraints) = %.4f\" % best_class_thresh)\n\n# %% md\n\n### Predictions and fairness metrics from original test set\n\n# %%\n\ndisplay(Markdown(\"#### Predictions from original testing data\"))\n\nbal_acc_arr_orig = []\ndisp_imp_arr_orig = []\navg_odds_diff_arr_orig = []\n\ndisplay(Markdown(\"#### Testing set\"))\ndisplay(Markdown(\"##### Raw predictions - No fairness constraints\"))\n\nfor thresh in tqdm(class_thresh_arr):\n\n fav_inds = dataset_orig_test_pred.scores > thresh\n dataset_orig_test_pred.labels[fav_inds] = dataset_orig_test_pred.favorable_label\n dataset_orig_test_pred.labels[~fav_inds] = dataset_orig_test_pred.unfavorable_label\n\n if (thresh == best_class_thresh):\n disp = True\n else:\n disp = False\n\n metric_test_bef = compute_metrics(dataset_orig_test, dataset_orig_test_pred,\n unprivileged_groups, privileged_groups, disp=disp)\n\n bal_acc_arr_orig.append(metric_test_bef[\"Balanced accuracy\"])\n avg_odds_diff_arr_orig.append(metric_test_bef[\"Average odds difference\"])\n disp_imp_arr_orig.append(metric_test_bef[\"Disparate impact\"])\n\n# %%\n\nfig, ax1 = plt.subplots(figsize=(10, 7))\nax1.plot(class_thresh_arr, bal_acc_arr_orig)\nax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')\nax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')\nax1.xaxis.set_tick_params(labelsize=14)\nax1.yaxis.set_tick_params(labelsize=14)\n\nax2 = ax1.twinx()\nax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_orig)), color='r')\nax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')\nax2.axvline(np.array(class_thresh_arr)[best_ind],\n color='k', linestyle=':')\nax2.yaxis.set_tick_params(labelsize=14)\nax2.grid(True)\n\ndisp_imp_at_best_bal_acc_orig = np.abs(1.0 - np.array(disp_imp_arr_orig))[best_ind]\n\n# %% md\n\n# ```abs(1 - disparate\n# impact)``` must\n# be\n# close\n# to\n# zero\n# for classifier predictions to be fair.\n\n# %% md\n\n### Train classifier on transformed data and obtain predictions with its fairness metrics\n\n# %%\n\nscale_transf = StandardScaler()\nX_train = scale_transf.fit_transform(dataset_transf_train.features)\ny_train = dataset_transf_train.labels.ravel()\n\nlmod = LogisticRegression()\nlmod.fit(X_train, y_train)\ny_train_pred = lmod.predict(X_train)\n\ndataset_transf_train_pred = dataset_transf_train.copy()\ndataset_transf_train_pred.labels = y_train_pred\n\n# %% md\n\n### Predictions and fairness metrics from transformed test set\n\n# %%\n\ndataset_transf_test_pred = dataset_transf_test.copy(deepcopy=True)\nX_test = scale_transf.transform(dataset_transf_test_pred.features)\ny_test = dataset_transf_test_pred.labels\ndataset_transf_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)\n\n# %%\n\ndisplay(Markdown(\"#### Predictions from transformed testing data\"))\n\nbal_acc_arr_transf = []\ndisp_imp_arr_transf = []\navg_odds_diff_arr_transf = []\n\ndisplay(Markdown(\"#### Testing set\"))\ndisplay(Markdown(\"##### Transformed predictions - No fairness constraints\"))\n\nfor thresh in tqdm(class_thresh_arr):\n\n fav_inds = dataset_transf_test_pred.scores > thresh\n dataset_transf_test_pred.labels[fav_inds] = dataset_transf_test_pred.favorable_label\n dataset_transf_test_pred.labels[~fav_inds] = dataset_transf_test_pred.unfavorable_label\n\n if (thresh == best_class_thresh):\n disp = True\n else:\n disp = False\n\n metric_test_bef = compute_metrics(dataset_transf_test, dataset_transf_test_pred,\n unprivileged_groups, privileged_groups, disp=disp)\n\n bal_acc_arr_transf.append(metric_test_bef[\"Balanced accuracy\"])\n avg_odds_diff_arr_transf.append(metric_test_bef[\"Average odds difference\"])\n disp_imp_arr_transf.append(metric_test_bef[\"Disparate impact\"])\n\n# %%\n\nfig, ax1 = plt.subplots(figsize=(10, 7))\nax1.plot(class_thresh_arr, bal_acc_arr_transf)\nax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')\nax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')\nax1.xaxis.set_tick_params(labelsize=14)\nax1.yaxis.set_tick_params(labelsize=14)\n\nax2 = ax1.twinx()\nax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_transf)), color='r')\nax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')\nax2.axvline(np.array(class_thresh_arr)[best_ind],\n color='k', linestyle=':')\nax2.yaxis.set_tick_params(labelsize=14)\nax2.grid(True)\n\ndisp_imp_at_best_bal_acc_transf = np.abs(1.0 - np.array(disp_imp_arr_transf))[best_ind]\n\n# %% md\n\n# ```abs(1 - disparate\n# impact)``` must\n# be\n# close\n# to\n# zero\n# for classifier predictions to be fair.This measure has improved using classifier trained using the transformed data compared to the original data.\n\n\n# %%\n\n### testing\nassert disp_imp_at_best_bal_acc_transf < disp_imp_at_best_bal_acc_orig\n\n# %% md\n\n# Summary of Results\n# We\n# show\n# the\n# optimal\n# classification\n# thresholds, and the\n# fairness and accuracy\n# metrics.\n\n# %% md\n\n### Classification Thresholds\n\n# | Dataset | Classification\n# threshold |\n# | - | - |\n# | Adult | 0.2674 |\n# | German | 0.6732 |\n# | Compas | 0.5148 |\n\n# %% md\n\n### Fairness Metric: Disparate impact, Accuracy Metric: Balanced accuracy\n\n#### Performance\n\n# | Dataset | Sex(Acc - Bef) | Sex(Acc - Aft) | Sex(Fair - Bef) | Sex(Fair - Aft) | Race / Age(Acc - Bef) | Race / Age(\n# Acc - Aft) | Race / Age(Fair - Bef) | Race / Age(Fair - Aft) |\n# | - | - | - | - | - | - | - | - | - |\n# | Adult(Test) | 0.7417 | 0.7021 | 0.2774 | 0.7729 | 0.7417 | 0.7408 | 0.4423 | 0.7645 |\n# | German(Test) | 0.6524 | 0.5698 | 0.9948 | 1.0664 | 0.6524 | 0.6067 | 0.3824 | 0.8228 |\n# | Compas(Test) | 0.6774 | 0.6606 | 0.6631 | 0.8085 | 0.6774 | 0.6790 | 0.6600 | 0.8430 |\n\n# %%\n\n\n", "from logging import warning\n\nimport numpy as np\nimport pandas as pd\n\nfrom aif360.datasets import BinaryLabelDataset\n\n\nclass StandardDataset(BinaryLabelDataset):\n \"\"\"Base class for every :obj:`BinaryLabelDataset` provided out of the box by\n aif360.\n\n It is not strictly necessary to inherit this class when adding custom\n datasets but it may be useful.\n\n This class is very loosely based on code from\n https://github.com/algofairness/fairness-comparison.\n \"\"\"\n\n def __init__(self, df, label_name, favorable_classes,\n protected_attribute_names, privileged_classes,\n instance_weights_name='', scores_name='',\n categorical_features=[], features_to_keep=[],\n features_to_drop=[], na_values=[], custom_preprocessing=None,\n metadata=None):\n \"\"\"\n Subclasses of StandardDataset should perform the following before\n calling `super().__init__`:\n\n 1. Load the dataframe from a raw file.\n\n Then, this class will go through a standard preprocessing routine which:\n\n 2. (optional) Performs some dataset-specific preprocessing (e.g.\n renaming columns/values, handling missing data).\n\n 3. Drops unrequested columns (see `features_to_keep` and\n `features_to_drop` for details).\n\n 4. Drops rows with NA values.\n\n 5. Creates a one-hot encoding of the categorical variables.\n\n 6. Maps protected attributes to binary privileged/unprivileged\n values (1/0).\n\n 7. Maps labels to binary favorable/unfavorable labels (1/0).\n\n Args:\n df (pandas.DataFrame): DataFrame on which to perform standard\n processing.\n label_name: Name of the label column in `df`.\n favorable_classes (list or function): Label values which are\n considered favorable or a boolean function which returns `True`\n if favorable. All others are unfavorable. Label values are\n mapped to 1 (favorable) and 0 (unfavorable) if they are not\n already binary and numerical.\n protected_attribute_names (list): List of names corresponding to\n protected attribute columns in `df`.\n privileged_classes (list(list or function)): Each element is\n a list of values which are considered privileged or a boolean\n function which return `True` if privileged for the corresponding\n column in `protected_attribute_names`. All others are\n unprivileged. Values are mapped to 1 (privileged) and 0\n (unprivileged) if they are not already numerical.\n instance_weights_name (optional): Name of the instance weights\n column in `df`.\n categorical_features (optional, list): List of column names in the\n DataFrame which are to be expanded into one-hot vectors.\n features_to_keep (optional, list): Column names to keep. All others\n are dropped except those present in `protected_attribute_names`,\n `categorical_features`, `label_name` or `instance_weights_name`.\n Defaults to all columns if not provided.\n features_to_drop (optional, list): Column names to drop. *Note: this\n overrides* `features_to_keep`.\n na_values (optional): Additional strings to recognize as NA. See\n :func:`pandas.read_csv` for details.\n custom_preprocessing (function): A function object which\n acts on and returns a DataFrame (f: DataFrame -> DataFrame). If\n `None`, no extra preprocessing is applied.\n metadata (optional): Additional metadata to append.\n \"\"\"\n # 2. Perform dataset-specific preprocessing\n if custom_preprocessing:\n #recode labels, protected attributes and divide by groups some features (age, educational years..)\n df = custom_preprocessing(df)\n\n # 3. Drop unrequested columns\n features_to_keep = features_to_keep or df.columns.tolist()\n\n #| is an equivalent way to perform the union operation\n # features_to_keep: ['Age (decade)', 'race', 'Education Years', 'Income Binary', 'sex']\n # protected_attribute_names: ['sex']\n # label_name: Income Binary\n keep = (set(features_to_keep) | set(protected_attribute_names)\n | set(categorical_features) | set([label_name]))\n\n #keep: {'Age (decade)', 'Education Years', 'Income Binary', 'race', 'sex'}\n if instance_weights_name:\n keep |= set([instance_weights_name])\n \n #sorted ordina, key deve essere una funzione che stabilisce in che modo \n #debba avvenire l'ordinamento, key=df.columns.get_loc mantiene l'ordine\n #delle colonne.\n\n #Es. features dataset originale: a,b,c,d,e,f feature_to_remove = e\n\n #senza key=df.columns.get_loc: c,d,b,a,f\n #con key=df.columns.get_loc: a,b,c,d,f\n\n #print(df)\n df = df[sorted(keep - set(features_to_drop))]\n #print(df)\n\n #stesso discorso per categoriacl features se presenti\n categorical_features = sorted(set(categorical_features) - set(features_to_drop), key=df.columns.get_loc)\n \n # 4. Remove any rows that have missing data.\n dropped = df.dropna()\n count = df.shape[0] - dropped.shape[0] #df.shape[0] è un modo per sapere n_righe\n if count > 0:\n warning(\"Missing Data: {} rows removed from {}.\".format(count,\n type(self).__name__))\n df = dropped\n\n # 5. Create a one-hot encoding of the categorical variables.\n\n #Ex. s = pd.Series(list('abca'))\n\n # pd.get_dummies(s)\n # a b c\n # 0 1 0 0\n # 1 0 1 0\n # 2 0 0 1\n # 3 1 0 0\n\n df = pd.get_dummies(df, columns=categorical_features, prefix_sep='=')\n\n # 6. Map protected attributes to privileged/unprivileged\n privileged_protected_attributes = []\n unprivileged_protected_attributes = []\n #protected_attribute_names: sex (reweighting example), privileged_ classes : [1.0]\n\n #mapping in numerical values priviliged and unprivileged values\n for attr, vals in zip(protected_attribute_names, privileged_classes): #attr assumes names of the protected attribute while priviliged calsses assumes the value of the privileged value of the protected attribute\n privileged_values = [1.]\n unprivileged_values = [0.]\n if callable(vals): #vals is callable since it is a list [1.0]\n df[attr] = df[attr].apply(vals)\n elif np.issubdtype(df[attr].dtype, np.number):\n # this attribute is numeric; no remapping needed\n privileged_values = vals\n unprivileged_values = list(set(df[attr]).difference(vals))\n\n else:\n # priv: np array of True/False if the value of the array is priviliged or not\n priv = np.logical_or.reduce(np.equal.outer(vals, df[attr].to_numpy()))\n df.loc[priv, attr] = privileged_values[0] #assign 1 to all the priviliged values\n df.loc[~priv, attr] = unprivileged_values[0] #assign 0 to all the unprivileged values\n\n #Store for all the protected attributes the privileged and unprivileged values in the lists below\n #this is useful in particular for more than on protected attribute\n privileged_protected_attributes.append(\n np.array(privileged_values, dtype=np.float64))\n unprivileged_protected_attributes.append(\n np.array(unprivileged_values, dtype=np.float64))\n\n # 7. Make labels binary\n favorable_label = 1.\n unfavorable_label = 0.\n if callable(favorable_classes):\n df[label_name] = df[label_name].apply(favorable_classes)\n elif np.issubdtype(df[label_name], np.number) and len(set(df[label_name])) == 2:\n # labels are already binary; don't change them\n favorable_label = favorable_classes[0]\n unfavorable_label = set(df[label_name]).difference(favorable_classes).pop()\n else:\n #df[attr] is not numerical\n\n # find all instances which match any of the favorable classes\n #np.logical_or.reduce because if we put away reduce we are passing 2 arguments but np.logical_or.reduce\n #accepts only one argument\n\n #pos is a numpy array of booleans (True: > 50k, False: <= 50k)\n pos = np.logical_or.reduce(np.equal.outer(favorable_classes, \n df[label_name].to_numpy()))\n #Assign to correspondant True values the favorable label and to False values the negative\n df.loc[pos, label_name] = favorable_label\n df.loc[~pos, label_name] = unfavorable_label\n\n super(StandardDataset, self).__init__(df=df, label_names=[label_name],\n protected_attribute_names=protected_attribute_names,\n privileged_protected_attributes=privileged_protected_attributes,\n unprivileged_protected_attributes=unprivileged_protected_attributes,\n instance_weights_name=instance_weights_name,\n scores_names=[scores_name] if scores_name else [],\n favorable_label=favorable_label,\n unfavorable_label=unfavorable_label, metadata=metadata)\n" ]
[ [ "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "numpy.linspace", "matplotlib.pyplot.subplots", "numpy.max", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.issubdtype", "numpy.array", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
kasmith/geometry
[ "805b525ae8ffebb6bb1d84c094f76533d88dbb7a" ]
[ "geometry/shapes.py" ]
[ "\"\"\"Functions that work on collections of shapes\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nfrom .convex import convex_area, convex_centroid\n\n__all__ = ['recenter_polygon', 'centroid_for_shapes',\n 'centroid_for_uncomputed_shapes', 'recenter_system',\n 'rescale_and_recenter_system', 'rotate_polygon',\n 'rotate_system', 'mirror_polygon', 'mirror_system',\n 'find_concave_outline']\n\ndef recenter_polygon(vertices):\n \"\"\"Returns a new convex polygon with centroid at (0,0)\n\n Args:\n vertices (list): list of (x,y) vertices of convex polygon\n\n Returns:\n A list just like the input with the recentered vertices (but possibly\n transformed into numpy arrays)\n \"\"\"\n centroid = convex_centroid(vertices)\n new_verts = []\n for v in vertices:\n v = np.array(v)\n new_verts.append(v - centroid)\n return new_verts\n\ndef centroid_for_shapes(centroids, areas = None):\n \"\"\"Calculates the centroid for a set of shapes\n\n Requires pre-computed centroids and areas\n\n Args:\n centroids (list): list of (x,y) centroids for each shape\n areas (list): list of areas (floats) for each shape (if not given,\n assumes they are all equal)\n\n Returns:\n The (x,y) position of the weighted centroid (as np.array)\n \"\"\"\n gc = np.zeros(2)\n area = 0\n if areas is None:\n areas = np.ones(len(centroids))\n for pc, a in zip(centroids, areas):\n gc += np.array(pc)*a\n area += a\n gc /= area\n return np.array(gc)\n\n\ndef centroid_for_uncomputed_shapes(shape_list):\n \"\"\"Like centroid_for_shapes but calculates centroids & areas\n\n Args:\n shape_list (list): a list of list of vertices (one for each shape)\n\n Returns:\n The (x,y) position of the weighted centroid (as np.array)\n \"\"\"\n centroids = []\n areas = []\n for s in shape_list:\n centroids.append(convex_centroid(s))\n areas.append(convex_area(s))\n return centroid_for_shapes(centroids, areas)\n\n\ndef recenter_system(shape_list):\n \"\"\"Recenters a set of shapes around the centroid of all of them\n\n Args:\n shape_list (list): a list of list of vertices (one for each shape)\n\n Returns:\n List of two items:\n * Similar format as input, but transformed so that calculating the\n centroid_for_uncomputed_shapes() on that list returns (0,0)\n * The grand centroid for the system in original coordinates\n \"\"\"\n centroids = []\n areas = []\n new_shapes = []\n # Decompose each of the individual shapes\n for s in shape_list:\n c = convex_centroid(s)\n a = convex_area(s)\n new_s = []\n for v in s:\n new_s.append(np.array(v) - c)\n centroids.append(c)\n areas.append(a)\n new_shapes.append(new_s)\n # Find the grand centroid & new centers of each shape\n center = centroid_for_shapes(centroids, areas)\n re_centroids = [c - center for c in centroids]\n # Go back and change the vertices of each shape\n final_shapes = []\n for ns,c in zip(new_shapes, re_centroids):\n final_shapes.append([s+c for s in ns])\n return final_shapes, center\n\n\ndef rescale_and_recenter_system(shape_list, total_area):\n \"\"\"Recenters a set of shapes and resizes them to have a total fixed area\n\n Args:\n shape_list (list): a list of list of vertices (one for each shape)\n total_area (float): the area to fix the shapes to\n\n Returns:\n List of two items:\n * Similar format as input, but transformed so that calculating the\n `centroid_for_uncomputed_shapes()` on that list returns (0,0) and summing\n the areas gets to `total_area`\n * The grand centroid for the system in original coordinates\n \"\"\"\n centroids = []\n areas = []\n new_shapes = []\n # Decompose each of the individual shapes\n for s in shape_list:\n c = convex_centroid(s)\n a = convex_area(s)\n new_s = []\n for v in s:\n new_s.append(np.array(v) - c)\n centroids.append(c)\n areas.append(a)\n new_shapes.append(new_s)\n # Find the grand centroid & new centers of each shape\n center = centroid_for_shapes(centroids, areas)\n re_centroids = [c - center for c in centroids]\n # Find rescaling factor\n tot_a = sum(areas)\n dim_scale = np.sqrt(total_area / tot_a)\n # Go back and change the vertices of each shape\n final_shapes = []\n for ns,c in zip(new_shapes, re_centroids):\n final_shapes.append([(s+c)*dim_scale for s in ns])\n return final_shapes, center\n\ndef rotate_polygon(vertices, angle, center_point = [0., 0.]):\n \"\"\"Rotates a shape around a given point (the origin)\n\n Args:\n vertices (list): A list of (x,y) vertices\n angle (float): Angle in radians to rotate counterclockwise\n center_point ([float, float]): (x,y) point to rotate around\n\n Returns:\n A list of vertices rotated around the center point\n \"\"\"\n np_o = np.array(center_point)\n np_vs = [np.array(v) - np_o for v in vertices]\n rot_mat = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n return [np.dot(rot_mat, v)+np_o for v in np_vs]\n\ndef rotate_system(shape_list, angle, center_point = None):\n \"\"\"Rotates a set of shapes around a given point\n\n If no center point is given, assume the center of mass of the shape\n\n Args:\n shape_list (list): A list of list of (x,y) vertices\n angle (float): Angle in radians to rotate counterclockwise\n center_point ([float, float]): (x,y) point to rotate around\n\n Returns:\n A new shape list with rotated vertices\n \"\"\"\n if center_point is None:\n center_point = centroid_for_uncomputed_shapes(shape_list)\n return [rotate_polygon(s, angle, center_point) for s in shape_list]\n\ndef mirror_polygon(vertices, axes=(False, True), center_point=None):\n \"\"\"Mirrors a polygon around an x or y line\n\n If center_point is None, mirror around the center of the shape\n\n Args:\n vertices (list): A list of (x,y) vertices\n axes ([bool, bool]): Whether to mirror around the (x,y) axes\n center_point ([float, float]): (x,y) point to mirror around\n\n Returns:\n A new polygon with rotated vertices\n \"\"\"\n if center_point is None:\n center_point = convex_centroid(vertices)\n xm = -1 if axes[0] else 1\n ym = -1 if axes[1] else 1\n return [np.array([xm*(v[0]-center_point[0])+center_point[0],\n ym*(v[1]-center_point[1])+center_point[1]]) for v\n in vertices]\n\ndef mirror_system(shape_list, axes=(False, True), center_point=None):\n \"\"\"Mirrors a polygon around an x or y line\n\n Mirrors around the center of the system if center_point is None\n\n Args:\n shape_list (list): A list of list of (x,y) vertices\n axes ([bool, bool]): Whether to mirror around the (x,y) axes\n center_point ([float, float]): (x,y) point to mirror around\n\n Returns:\n A new shape list with rotated vertices\n \"\"\"\n if center_point is None:\n center_point = centroid_for_uncomputed_shapes(shape_list)\n return [mirror_polygon(s, axes, center_point) for s in shape_list]\n\n\ndef _point_equal(p1, p2):\n return p1[0]==p2[0] and p1[1] == p2[1]\n\ndef _arr_eq(a1, a2):\n return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))\n\ndef find_concave_outline(shape_list):\n \"\"\"Find the outline of a set of shapes\n\n Assuming all shapes have edges in common with other shapes where they touch,\n provides a set of vertices for drawing the outline\n\n Args:\n shape_list (list): A list of list of (x,y) vertices\n\n Returns:\n A list of ordered (x,y) vertices for drawing an outline\n \"\"\"\n # Find the most lower-right point\n current_shape = shape_list[0]\n current_pt = current_shape[0]\n test_idx = 1\n next_test_dir = 1\n for s in shape_list:\n for i in range(len(s)):\n p = s[i]\n if ((p[0] < current_pt[0]) or\n (p[0] == current_pt[0] and p[1] < current_pt[1])):\n # Replace\n current_pt = p\n current_shape = s\n test_idx = (i+1) % len(s)\n next_test_dir = 1\n vertex_list = [current_pt]\n # Keep going until you reach back to the first point\n while not _point_equal(current_shape[test_idx], vertex_list[0]):\n # Iterate through all the shapes to try to find a matching edge\n checking = True\n for s in (s for s in shape_list if not _arr_eq(s, current_shape)):\n if checking: # Way to break out if match found\n for i in range(len(s)):\n spt = s[i]\n if _point_equal(current_pt, spt):\n spt_after = s[(i+1) % len(s)]\n spt_before = s[(i-1) % len(s)]\n test_pt = current_shape[test_idx]\n if _point_equal(test_pt, spt_after):\n test_idx = (i-1) % len(s)\n next_test_dir = -1\n current_shape = s\n checking = False\n elif _point_equal(test_pt, spt_before):\n test_idx = (i+1) % len(s)\n next_test_dir = 1\n current_shape = s\n checking = False\n # Have you exhausted all shapes?\n if checking:\n current_pt = current_shape[test_idx]\n vertex_list.append(current_pt)\n test_idx += next_test_dir\n test_idx %= len(current_shape)\n return vertex_list\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.cos", "numpy.sin", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krfricke/pytorch-lightning
[ "fbd887df9d487da4c57d884e01b3401af140b1bc", "fbd887df9d487da4c57d884e01b3401af140b1bc", "fbd887df9d487da4c57d884e01b3401af140b1bc", "fbd887df9d487da4c57d884e01b3401af140b1bc" ]
[ "tests/strategies/test_ddp_strategy_with_comm_hook.py", "tests/utilities/test_apply_func_torchtext.py", "pytorch_lightning/utilities/__init__.py", "tests/utilities/test_cli.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import mock\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy\nfrom pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10\nfrom tests.helpers import BoringModel\nfrom tests.helpers.runif import RunIf\n\nif torch.distributed.is_available():\n from torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default\n from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook as powerSGD\n\n if _TORCH_GREATER_EQUAL_1_10:\n import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD\n\n\nclass TestDDPStrategy(DDPStrategy):\n def __init__(self, expected_ddp_comm_hook_name, *args, **kwargs):\n self.expected_ddp_comm_hook_name = expected_ddp_comm_hook_name\n super().__init__(*args, **kwargs)\n\n def teardown(self):\n # check here before unwrapping DistributedDataParallel in self.teardown\n attached_ddp_comm_hook_name = self.model._get_ddp_logging_data()[\"comm_hook\"]\n assert attached_ddp_comm_hook_name == self.expected_ddp_comm_hook_name\n return super().teardown()\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_fp16_compress_comm_hook(tmpdir):\n \"\"\"Test for DDP FP16 compress hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=default.fp16_compress_hook.__qualname__,\n ddp_comm_hook=default.fp16_compress_hook,\n )\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_sgd_comm_hook(tmpdir):\n \"\"\"Test for DDP FP16 compress hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=powerSGD.powerSGD_hook.__qualname__,\n ddp_comm_state=powerSGD.PowerSGDState(process_group=None),\n ddp_comm_hook=powerSGD.powerSGD_hook,\n )\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir):\n \"\"\"Test for DDP FP16 compress wrapper for SGD hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=default.fp16_compress_wrapper(powerSGD.powerSGD_hook).__qualname__,\n ddp_comm_state=powerSGD.PowerSGDState(process_group=None),\n ddp_comm_hook=powerSGD.powerSGD_hook,\n ddp_comm_wrapper=default.fp16_compress_wrapper,\n )\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.9.0\", skip_windows=True, standalone=True)\ndef test_ddp_spawn_fp16_compress_comm_hook(tmpdir):\n \"\"\"Test for DDP Spawn FP16 compress hook.\"\"\"\n model = BoringModel()\n strategy = DDPSpawnStrategy(ddp_comm_hook=default.fp16_compress_hook)\n trainer = Trainer(\n max_epochs=1,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n fast_dev_run=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(min_cuda_gpus=2, min_torch=\"1.10.0\", skip_windows=True, standalone=True)\ndef test_ddp_post_local_sgd_comm_hook(tmpdir):\n \"\"\"Test for DDP post-localSGD hook.\"\"\"\n model = BoringModel()\n strategy = TestDDPStrategy(\n expected_ddp_comm_hook_name=post_localSGD.post_localSGD_hook.__qualname__,\n ddp_comm_state=post_localSGD.PostLocalSGDState(\n process_group=None,\n subgroup=None,\n start_localSGD_iter=8,\n ),\n ddp_comm_hook=post_localSGD.post_localSGD_hook,\n model_averaging_period=4,\n )\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(skip_windows=True, min_torch=\"1.10.0\", min_cuda_gpus=2, standalone=True)\[email protected](\"torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters\")\ndef test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):\n \"\"\"Test that when using DDP with post-localSGD, model averaging is called.\"\"\"\n model = BoringModel()\n\n # test regular ddp does not call model averaging\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=\"ddp\",\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n\n trainer.fit(model)\n average_parameters_mock.assert_not_called()\n\n # test ddp with post-localSGD does call model averaging\n ddp_strategy = DDPStrategy(\n ddp_comm_state=post_localSGD.PostLocalSGDState(\n process_group=None,\n subgroup=None,\n start_localSGD_iter=8,\n ),\n ddp_comm_hook=post_localSGD.post_localSGD_hook,\n model_averaging_period=4,\n )\n\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=ddp_strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n )\n\n trainer.fit(model)\n average_parameters_mock.assert_called()\n\n\n@RunIf(skip_windows=True, min_torch=\"1.10.0\", min_cuda_gpus=2, standalone=True)\[email protected](\"torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters\")\ndef test_post_local_sgd_model_averaging_value_error(average_parameters_mock, tmpdir):\n \"\"\"Test that when using DDP with post-localSGD a ValueError is thrown when the optmizer is\n ZeroRedundancyOptimizer.\"\"\"\n from torch.distributed.optim import ZeroRedundancyOptimizer\n\n class OptimizerModel(BoringModel):\n def configure_optimizers(self):\n return ZeroRedundancyOptimizer(params=self.parameters(), optimizer_class=torch.optim.Adam, lr=0.01)\n\n model = OptimizerModel()\n strategy = DDPStrategy(\n ddp_comm_state=post_localSGD.PostLocalSGDState(\n process_group=None,\n subgroup=None,\n start_localSGD_iter=8,\n ),\n ddp_comm_hook=post_localSGD.post_localSGD_hook,\n model_averaging_period=4,\n )\n\n trainer = Trainer(\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=2,\n strategy=strategy,\n default_root_dir=tmpdir,\n sync_batchnorm=True,\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n\n with pytest.raises(ValueError, match=\"Currently model averaging cannot work with a distributed optimizer\"):\n trainer.fit(model)\n\n average_parameters_mock.assert_not_called()\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pytest\nimport torch\n\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device\nfrom pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.torchtext_utils import get_dummy_torchtext_data_iterator\n\n\[email protected](\"include_lengths\", [False, True])\[email protected](\"device\", [torch.device(\"cuda\", 0)])\[email protected](not _TORCHTEXT_LEGACY, reason=\"torchtext.legacy is deprecated.\")\n@RunIf(min_cuda_gpus=1)\ndef test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, device):\n data_iterator, _ = get_dummy_torchtext_data_iterator(num_samples=3, batch_size=3, include_lengths=include_lengths)\n data_iter = iter(data_iterator)\n batch = next(data_iter)\n\n with pytest.deprecated_call(match=\"The `torchtext.legacy.Batch` object is deprecated\"):\n batch_on_device = move_data_to_device(batch, device)\n\n if include_lengths:\n # tensor with data\n assert batch_on_device.text[0].device == device\n # tensor with length of data\n assert batch_on_device.text[1].device == device\n else:\n assert batch_on_device.text.device == device\n\n\[email protected](\"include_lengths\", [False, True])\[email protected](not _TORCHTEXT_LEGACY, reason=\"torchtext.legacy is deprecated.\")\ndef test_batch_move_data_to_device_torchtext_include_lengths_cpu(include_lengths):\n test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, torch.device(\"cpu\"))\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"General utilities.\"\"\"\n\nimport numpy\n\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device # noqa: F401\nfrom pytorch_lightning.utilities.distributed import AllGatherGrad # noqa: F401\nfrom pytorch_lightning.utilities.enums import ( # noqa: F401\n _AcceleratorType,\n _StrategyType,\n AMPType,\n DistributedType,\n GradClipAlgorithmType,\n LightningEnum,\n ModelSummaryMode,\n)\nfrom pytorch_lightning.utilities.grads import grad_norm # noqa: F401\nfrom pytorch_lightning.utilities.imports import ( # noqa: F401\n _APEX_AVAILABLE,\n _BAGUA_AVAILABLE,\n _DEEPSPEED_AVAILABLE,\n _FAIRSCALE_AVAILABLE,\n _FAIRSCALE_FULLY_SHARDED_AVAILABLE,\n _FAIRSCALE_OSS_FP16_BROADCAST_AVAILABLE,\n _GROUP_AVAILABLE,\n _HIVEMIND_AVAILABLE,\n _HOROVOD_AVAILABLE,\n _HPU_AVAILABLE,\n _HYDRA_AVAILABLE,\n _HYDRA_EXPERIMENTAL_AVAILABLE,\n _IPU_AVAILABLE,\n _IS_INTERACTIVE,\n _IS_WINDOWS,\n _module_available,\n _OMEGACONF_AVAILABLE,\n _POPTORCH_AVAILABLE,\n _RICH_AVAILABLE,\n _TORCH_GREATER_EQUAL_1_9,\n _TORCH_GREATER_EQUAL_1_10,\n _TORCH_GREATER_EQUAL_1_11,\n _TORCH_QUANTIZE_AVAILABLE,\n _TORCHTEXT_AVAILABLE,\n _TORCHVISION_AVAILABLE,\n _TPU_AVAILABLE,\n _XLA_AVAILABLE,\n)\nfrom pytorch_lightning.utilities.parameter_tying import find_shared_parameters, set_shared_parameters # noqa: F401\nfrom pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict, is_picklable # noqa: F401\nfrom pytorch_lightning.utilities.rank_zero import ( # noqa: F401\n rank_zero_deprecation,\n rank_zero_info,\n rank_zero_only,\n rank_zero_warn,\n)\n\nFLOAT16_EPSILON = numpy.finfo(numpy.float16).eps\nFLOAT32_EPSILON = numpy.finfo(numpy.float32).eps\nFLOAT64_EPSILON = numpy.finfo(numpy.float64).eps\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport json\nimport os\nimport pickle\nimport sys\nfrom argparse import Namespace\nfrom contextlib import contextmanager, ExitStack, redirect_stdout\nfrom io import StringIO\nfrom typing import List, Optional, Union\nfrom unittest import mock\nfrom unittest.mock import ANY\n\nimport pytest\nimport torch\nimport yaml\nfrom packaging import version\nfrom torch.optim import SGD\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR\n\nfrom pytorch_lightning import __version__, Callback, LightningDataModule, LightningModule, Trainer\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\nfrom pytorch_lightning.loggers import Logger, TensorBoardLogger\nfrom pytorch_lightning.plugins.environments import SLURMEnvironment\nfrom pytorch_lightning.trainer.states import TrainerFn\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE\nfrom pytorch_lightning.utilities.cli import (\n _populate_registries,\n CALLBACK_REGISTRY,\n DATAMODULE_REGISTRY,\n instantiate_class,\n LightningArgumentParser,\n LightningCLI,\n LOGGER_REGISTRY,\n LR_SCHEDULER_REGISTRY,\n LRSchedulerTypeTuple,\n MODEL_REGISTRY,\n OPTIMIZER_REGISTRY,\n SaveConfigCallback,\n)\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _TORCHVISION_AVAILABLE\nfrom tests.helpers import BoringDataModule, BoringModel\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.utils import no_warning_call\n\ntorchvision_version = version.parse(\"0\")\nif _TORCHVISION_AVAILABLE:\n torchvision_version = version.parse(__import__(\"torchvision\").__version__)\n\n\n@contextmanager\ndef mock_subclasses(baseclass, *subclasses):\n \"\"\"Mocks baseclass so that it only has the given child subclasses.\"\"\"\n with ExitStack() as stack:\n mgr = mock.patch.object(baseclass, \"__subclasses__\", return_value=[*subclasses])\n stack.enter_context(mgr)\n for mgr in [mock.patch.object(s, \"__subclasses__\", return_value=[]) for s in subclasses]:\n stack.enter_context(mgr)\n yield None\n\n\[email protected](\"argparse.ArgumentParser.parse_args\")\ndef test_default_args(mock_argparse):\n \"\"\"Tests default argument parser for Trainer.\"\"\"\n mock_argparse.return_value = Namespace(**Trainer.default_attributes())\n\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n args = parser.parse_args([])\n\n args.max_epochs = 5\n trainer = Trainer.from_argparse_args(args)\n\n assert isinstance(trainer, Trainer)\n assert trainer.max_epochs == 5\n\n\[email protected](\"cli_args\", [[\"--accumulate_grad_batches=22\"], []])\ndef test_add_argparse_args_redefined(cli_args):\n \"\"\"Redefines some default Trainer arguments via the cli and tests the Trainer initialization correctness.\"\"\"\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n parser.add_lightning_class_args(Trainer, None)\n\n args = parser.parse_args(cli_args)\n\n # make sure we can pickle args\n pickle.dumps(args)\n\n # Check few deprecated args are not in namespace:\n for depr_name in (\"gradient_clip\", \"nb_gpu_nodes\", \"max_nb_epochs\"):\n assert depr_name not in args\n\n trainer = Trainer.from_argparse_args(args=args)\n pickle.dumps(trainer)\n\n assert isinstance(trainer, Trainer)\n\n\[email protected](\"cli_args\", [[\"--callbacks=1\", \"--logger\"], [\"--foo\", \"--bar=1\"]])\ndef test_add_argparse_args_redefined_error(cli_args, monkeypatch):\n \"\"\"Asserts error raised in case of passing not default cli arguments.\"\"\"\n\n class _UnkArgError(Exception):\n pass\n\n def _raise():\n raise _UnkArgError\n\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n parser.add_lightning_class_args(Trainer, None)\n\n monkeypatch.setattr(parser, \"exit\", lambda *args: _raise(), raising=True)\n\n with pytest.raises(_UnkArgError):\n parser.parse_args(cli_args)\n\n\[email protected](\n [\"cli_args\", \"expected\"],\n [\n (\"--auto_lr_find=True --auto_scale_batch_size=power\", dict(auto_lr_find=True, auto_scale_batch_size=\"power\")),\n (\n \"--auto_lr_find any_string --auto_scale_batch_size ON\",\n dict(auto_lr_find=\"any_string\", auto_scale_batch_size=True),\n ),\n (\"--auto_lr_find=Yes --auto_scale_batch_size=On\", dict(auto_lr_find=True, auto_scale_batch_size=True)),\n (\"--auto_lr_find Off --auto_scale_batch_size No\", dict(auto_lr_find=False, auto_scale_batch_size=False)),\n (\"--auto_lr_find TRUE --auto_scale_batch_size FALSE\", dict(auto_lr_find=True, auto_scale_batch_size=False)),\n (\"--tpu_cores=8\", dict(tpu_cores=8)),\n (\"--tpu_cores=1,\", dict(tpu_cores=\"1,\")),\n (\"--limit_train_batches=100\", dict(limit_train_batches=100)),\n (\"--limit_train_batches 0.8\", dict(limit_train_batches=0.8)),\n (\"--enable_model_summary FALSE\", dict(enable_model_summary=False)),\n (\n \"\",\n dict(\n # These parameters are marked as Optional[...] in Trainer.__init__,\n # with None as default. They should not be changed by the argparse\n # interface.\n min_steps=None,\n accelerator=None,\n profiler=None,\n ),\n ),\n ],\n)\ndef test_parse_args_parsing(cli_args, expected):\n \"\"\"Test parsing simple types and None optionals not modified.\"\"\"\n cli_args = cli_args.split(\" \") if cli_args else []\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n parser.add_lightning_class_args(Trainer, None)\n args = parser.parse_args()\n\n for k, v in expected.items():\n assert getattr(args, k) == v\n if \"tpu_cores\" not in expected or _TPU_AVAILABLE:\n assert Trainer.from_argparse_args(args)\n\n\[email protected](\n [\"cli_args\", \"expected\", \"instantiate\"],\n [\n ([\"--gpus\", \"[0, 2]\"], dict(gpus=[0, 2]), False),\n ([\"--tpu_cores=[1,3]\"], dict(tpu_cores=[1, 3]), False),\n (['--accumulate_grad_batches={\"5\":3,\"10\":20}'], dict(accumulate_grad_batches={5: 3, 10: 20}), True),\n ],\n)\ndef test_parse_args_parsing_complex_types(cli_args, expected, instantiate):\n \"\"\"Test parsing complex types.\"\"\"\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n parser.add_lightning_class_args(Trainer, None)\n args = parser.parse_args()\n\n for k, v in expected.items():\n assert getattr(args, k) == v\n if instantiate:\n assert Trainer.from_argparse_args(args)\n\n\[email protected](\n [\"cli_args\", \"expected_gpu\"],\n [\n (\"--accelerator gpu --devices 1\", [0]),\n (\"--accelerator gpu --devices 0,\", [0]),\n (\"--accelerator gpu --devices 1,\", [1]),\n (\"--accelerator gpu --devices 0,1\", [0, 1]),\n ],\n)\ndef test_parse_args_parsing_gpus(monkeypatch, cli_args, expected_gpu):\n \"\"\"Test parsing of gpus and instantiation of Trainer.\"\"\"\n monkeypatch.setattr(\"torch.cuda.device_count\", lambda: 2)\n monkeypatch.setattr(\"torch.cuda.is_available\", lambda: True)\n cli_args = cli_args.split(\" \") if cli_args else []\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n parser = LightningArgumentParser(add_help=False, parse_as_dict=False)\n parser.add_lightning_class_args(Trainer, None)\n args = parser.parse_args()\n\n trainer = Trainer.from_argparse_args(args)\n assert trainer.device_ids == expected_gpu\n\n\[email protected](\n sys.version_info < (3, 7),\n reason=\"signature inspection while mocking is not working in Python < 3.7 despite autospec\",\n)\[email protected](\n [\"cli_args\", \"extra_args\"],\n [\n ({}, {}),\n (dict(logger=False), {}),\n (dict(logger=False), dict(logger=True)),\n (dict(logger=False), dict(enable_checkpointing=True)),\n ],\n)\ndef test_init_from_argparse_args(cli_args, extra_args):\n unknown_args = dict(unknown_arg=0)\n\n # unknown args in the argparser/namespace should be ignored\n with mock.patch(\"pytorch_lightning.Trainer.__init__\", autospec=True, return_value=None) as init:\n trainer = Trainer.from_argparse_args(Namespace(**cli_args, **unknown_args), **extra_args)\n expected = dict(cli_args)\n expected.update(extra_args) # extra args should override any cli arg\n init.assert_called_with(trainer, **expected)\n\n # passing in unknown manual args should throw an error\n with pytest.raises(TypeError, match=r\"__init__\\(\\) got an unexpected keyword argument 'unknown_arg'\"):\n Trainer.from_argparse_args(Namespace(**cli_args), **extra_args, **unknown_args)\n\n\nclass Model(LightningModule):\n def __init__(self, model_param: int):\n super().__init__()\n self.model_param = model_param\n\n\ndef _model_builder(model_param: int) -> Model:\n return Model(model_param)\n\n\ndef _trainer_builder(\n limit_train_batches: int, fast_dev_run: bool = False, callbacks: Optional[Union[List[Callback], Callback]] = None\n) -> Trainer:\n return Trainer(limit_train_batches=limit_train_batches, fast_dev_run=fast_dev_run, callbacks=callbacks)\n\n\[email protected]([\"trainer_class\", \"model_class\"], [(Trainer, Model), (_trainer_builder, _model_builder)])\ndef test_lightning_cli(trainer_class, model_class, monkeypatch):\n \"\"\"Test that LightningCLI correctly instantiates model, trainer and calls fit.\"\"\"\n\n expected_model = dict(model_param=7)\n expected_trainer = dict(limit_train_batches=100)\n\n def fit(trainer, model):\n for k, v in expected_model.items():\n assert getattr(model, k) == v\n for k, v in expected_trainer.items():\n assert getattr(trainer, k) == v\n save_callback = [x for x in trainer.callbacks if isinstance(x, SaveConfigCallback)]\n assert len(save_callback) == 1\n save_callback[0].on_train_start(trainer, model)\n\n def on_train_start(callback, trainer, _):\n config_dump = callback.parser.dump(callback.config, skip_none=False)\n for k, v in expected_model.items():\n assert f\" {k}: {v}\" in config_dump\n for k, v in expected_trainer.items():\n assert f\" {k}: {v}\" in config_dump\n trainer.ran_asserts = True\n\n monkeypatch.setattr(Trainer, \"fit\", fit)\n monkeypatch.setattr(SaveConfigCallback, \"on_train_start\", on_train_start)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", \"--model.model_param=7\", \"--trainer.limit_train_batches=100\"]):\n cli = LightningCLI(model_class, trainer_class=trainer_class, save_config_callback=SaveConfigCallback)\n assert hasattr(cli.trainer, \"ran_asserts\") and cli.trainer.ran_asserts\n\n\ndef test_lightning_cli_args_callbacks(tmpdir):\n\n callbacks = [\n dict(\n class_path=\"pytorch_lightning.callbacks.LearningRateMonitor\",\n init_args=dict(logging_interval=\"epoch\", log_momentum=True),\n ),\n dict(class_path=\"pytorch_lightning.callbacks.ModelCheckpoint\", init_args=dict(monitor=\"NAME\")),\n ]\n\n class TestModel(BoringModel):\n def on_fit_start(self):\n callback = [c for c in self.trainer.callbacks if isinstance(c, LearningRateMonitor)]\n assert len(callback) == 1\n assert callback[0].logging_interval == \"epoch\"\n assert callback[0].log_momentum is True\n callback = [c for c in self.trainer.callbacks if isinstance(c, ModelCheckpoint)]\n assert len(callback) == 1\n assert callback[0].monitor == \"NAME\"\n self.trainer.ran_asserts = True\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", f\"--trainer.callbacks={json.dumps(callbacks)}\"]):\n cli = LightningCLI(TestModel, trainer_defaults=dict(default_root_dir=str(tmpdir), fast_dev_run=True))\n\n assert cli.trainer.ran_asserts\n\n\[email protected](\"run\", (False, True))\ndef test_lightning_cli_configurable_callbacks(tmpdir, run):\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.add_lightning_class_args(LearningRateMonitor, \"learning_rate_monitor\")\n\n def fit(self, **_):\n pass\n\n cli_args = [\"fit\"] if run else []\n cli_args += [f\"--trainer.default_root_dir={tmpdir}\", \"--learning_rate_monitor.logging_interval=epoch\"]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = MyLightningCLI(BoringModel, run=run)\n\n callback = [c for c in cli.trainer.callbacks if isinstance(c, LearningRateMonitor)]\n assert len(callback) == 1\n assert callback[0].logging_interval == \"epoch\"\n\n\ndef test_lightning_cli_args_cluster_environments(tmpdir):\n plugins = [dict(class_path=\"pytorch_lightning.plugins.environments.SLURMEnvironment\")]\n\n class TestModel(BoringModel):\n def on_fit_start(self):\n # Ensure SLURMEnvironment is set, instead of default LightningEnvironment\n assert isinstance(self.trainer._accelerator_connector.cluster_environment, SLURMEnvironment)\n self.trainer.ran_asserts = True\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", f\"--trainer.plugins={json.dumps(plugins)}\"]):\n cli = LightningCLI(TestModel, trainer_defaults=dict(default_root_dir=str(tmpdir), fast_dev_run=True))\n\n assert cli.trainer.ran_asserts\n\n\ndef test_lightning_cli_args(tmpdir):\n\n cli_args = [\n \"fit\",\n f\"--data.data_dir={tmpdir}\",\n f\"--trainer.default_root_dir={tmpdir}\",\n \"--trainer.max_epochs=1\",\n \"--trainer.enable_model_summary=False\",\n \"--seed_everything=1234\",\n ]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = LightningCLI(BoringModel, BoringDataModule, trainer_defaults={\"callbacks\": [LearningRateMonitor()]})\n\n config_path = tmpdir / \"lightning_logs\" / \"version_0\" / \"config.yaml\"\n assert os.path.isfile(config_path)\n with open(config_path) as f:\n loaded_config = yaml.safe_load(f.read())\n\n cli_config = cli.config[\"fit\"].as_dict()\n assert cli_config[\"seed_everything\"] == 1234\n assert \"model\" not in loaded_config and \"model\" not in cli_config # no arguments to include\n assert loaded_config[\"data\"] == cli_config[\"data\"]\n assert loaded_config[\"trainer\"] == cli_config[\"trainer\"]\n\n\ndef test_lightning_cli_save_config_cases(tmpdir):\n\n config_path = tmpdir / \"config.yaml\"\n cli_args = [\"fit\", f\"--trainer.default_root_dir={tmpdir}\", \"--trainer.logger=False\", \"--trainer.fast_dev_run=1\"]\n\n # With fast_dev_run!=False config should not be saved\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n LightningCLI(BoringModel)\n assert not os.path.isfile(config_path)\n\n # With fast_dev_run==False config should be saved\n cli_args[-1] = \"--trainer.max_epochs=1\"\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n LightningCLI(BoringModel)\n assert os.path.isfile(config_path)\n\n # If run again on same directory exception should be raised since config file already exists\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args), pytest.raises(RuntimeError):\n LightningCLI(BoringModel)\n\n\ndef test_lightning_cli_config_and_subclass_mode(tmpdir):\n input_config = {\n \"fit\": {\n \"model\": {\"class_path\": \"tests.helpers.BoringModel\"},\n \"data\": {\"class_path\": \"tests.helpers.BoringDataModule\", \"init_args\": {\"data_dir\": str(tmpdir)}},\n \"trainer\": {\"default_root_dir\": str(tmpdir), \"max_epochs\": 1, \"enable_model_summary\": False},\n }\n }\n config_path = tmpdir / \"config.yaml\"\n with open(config_path, \"w\") as f:\n f.write(yaml.dump(input_config))\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--config\", str(config_path)]):\n cli = LightningCLI(\n BoringModel,\n BoringDataModule,\n subclass_mode_model=True,\n subclass_mode_data=True,\n trainer_defaults={\"callbacks\": LearningRateMonitor()},\n )\n\n config_path = tmpdir / \"lightning_logs\" / \"version_0\" / \"config.yaml\"\n assert os.path.isfile(config_path)\n with open(config_path) as f:\n loaded_config = yaml.safe_load(f.read())\n\n cli_config = cli.config[\"fit\"].as_dict()\n assert loaded_config[\"model\"] == cli_config[\"model\"]\n assert loaded_config[\"data\"] == cli_config[\"data\"]\n assert loaded_config[\"trainer\"] == cli_config[\"trainer\"]\n\n\ndef any_model_any_data_cli():\n LightningCLI(LightningModule, LightningDataModule, subclass_mode_model=True, subclass_mode_data=True)\n\n\ndef test_lightning_cli_help():\n\n cli_args = [\"any.py\", \"fit\", \"--help\"]\n out = StringIO()\n with mock.patch(\"sys.argv\", cli_args), redirect_stdout(out), pytest.raises(SystemExit):\n any_model_any_data_cli()\n out = out.getvalue()\n\n assert \"--print_config\" in out\n assert \"--config\" in out\n assert \"--seed_everything\" in out\n assert \"--model.help\" in out\n assert \"--data.help\" in out\n\n skip_params = {\"self\"}\n for param in inspect.signature(Trainer.__init__).parameters.keys():\n if param not in skip_params:\n assert f\"--trainer.{param}\" in out\n\n cli_args = [\"any.py\", \"fit\", \"--data.help=tests.helpers.BoringDataModule\"]\n out = StringIO()\n with mock.patch(\"sys.argv\", cli_args), redirect_stdout(out), pytest.raises(SystemExit):\n any_model_any_data_cli()\n\n assert \"--data.init_args.data_dir\" in out.getvalue()\n\n\ndef test_lightning_cli_print_config():\n cli_args = [\n \"any.py\",\n \"predict\",\n \"--seed_everything=1234\",\n \"--model=tests.helpers.BoringModel\",\n \"--data=tests.helpers.BoringDataModule\",\n \"--print_config\",\n ]\n out = StringIO()\n with mock.patch(\"sys.argv\", cli_args), redirect_stdout(out), pytest.raises(SystemExit):\n any_model_any_data_cli()\n\n text = out.getvalue()\n # test dump_header\n assert text.startswith(f\"# pytorch_lightning=={__version__}\")\n\n outval = yaml.safe_load(text)\n assert outval[\"seed_everything\"] == 1234\n assert outval[\"model\"][\"class_path\"] == \"pytorch_lightning.demos.boring_classes.BoringModel\"\n assert outval[\"data\"][\"class_path\"] == \"pytorch_lightning.demos.boring_classes.BoringDataModule\"\n assert outval[\"ckpt_path\"] is None\n\n\ndef test_lightning_cli_submodules(tmpdir):\n class MainModule(BoringModel):\n def __init__(self, submodule1: LightningModule, submodule2: LightningModule, main_param: int = 1):\n super().__init__()\n self.submodule1 = submodule1\n self.submodule2 = submodule2\n\n config = \"\"\"model:\n main_param: 2\n submodule1:\n class_path: pytorch_lightning.demos.boring_classes.BoringModel\n submodule2:\n class_path: pytorch_lightning.demos.boring_classes.BoringModel\n \"\"\"\n config_path = tmpdir / \"config.yaml\"\n with open(config_path, \"w\") as f:\n f.write(config)\n\n cli_args = [f\"--trainer.default_root_dir={tmpdir}\", f\"--config={str(config_path)}\"]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = LightningCLI(MainModule, run=False)\n\n assert cli.config[\"model\"][\"main_param\"] == 2\n assert isinstance(cli.model.submodule1, BoringModel)\n assert isinstance(cli.model.submodule2, BoringModel)\n\n\[email protected](torchvision_version < version.parse(\"0.8.0\"), reason=\"torchvision>=0.8.0 is required\")\ndef test_lightning_cli_torch_modules(tmpdir):\n class TestModule(BoringModel):\n def __init__(self, activation: torch.nn.Module = None, transform: Optional[List[torch.nn.Module]] = None):\n super().__init__()\n self.activation = activation\n self.transform = transform\n\n config = \"\"\"model:\n activation:\n class_path: torch.nn.LeakyReLU\n init_args:\n negative_slope: 0.2\n transform:\n - class_path: torchvision.transforms.Resize\n init_args:\n size: 64\n - class_path: torchvision.transforms.CenterCrop\n init_args:\n size: 64\n \"\"\"\n config_path = tmpdir / \"config.yaml\"\n with open(config_path, \"w\") as f:\n f.write(config)\n\n cli_args = [f\"--trainer.default_root_dir={tmpdir}\", f\"--config={str(config_path)}\"]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = LightningCLI(TestModule, run=False)\n\n assert isinstance(cli.model.activation, torch.nn.LeakyReLU)\n assert cli.model.activation.negative_slope == 0.2\n assert len(cli.model.transform) == 2\n assert all(isinstance(v, torch.nn.Module) for v in cli.model.transform)\n\n\nclass BoringModelRequiredClasses(BoringModel):\n def __init__(self, num_classes: int, batch_size: int = 8):\n super().__init__()\n self.num_classes = num_classes\n self.batch_size = batch_size\n\n\nclass BoringDataModuleBatchSizeAndClasses(BoringDataModule):\n def __init__(self, batch_size: int = 8):\n super().__init__()\n self.batch_size = batch_size\n self.num_classes = 5 # only available after instantiation\n\n\ndef test_lightning_cli_link_arguments(tmpdir):\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.link_arguments(\"data.batch_size\", \"model.batch_size\")\n parser.link_arguments(\"data.num_classes\", \"model.num_classes\", apply_on=\"instantiate\")\n\n cli_args = [f\"--trainer.default_root_dir={tmpdir}\", \"--data.batch_size=12\"]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = MyLightningCLI(BoringModelRequiredClasses, BoringDataModuleBatchSizeAndClasses, run=False)\n\n assert cli.model.batch_size == 12\n assert cli.model.num_classes == 5\n\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.link_arguments(\"data.batch_size\", \"model.init_args.batch_size\")\n parser.link_arguments(\"data.num_classes\", \"model.init_args.num_classes\", apply_on=\"instantiate\")\n\n cli_args[-1] = \"--model=tests.utilities.test_cli.BoringModelRequiredClasses\"\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = MyLightningCLI(\n BoringModelRequiredClasses, BoringDataModuleBatchSizeAndClasses, subclass_mode_model=True, run=False\n )\n\n assert cli.model.batch_size == 8\n assert cli.model.num_classes == 5\n\n\nclass EarlyExitTestModel(BoringModel):\n def on_fit_start(self):\n raise MisconfigurationException(\"Error on fit start\")\n\n\n@RunIf(skip_windows=True)\[email protected](\"logger\", (False, True))\[email protected](\"strategy\", (\"ddp_spawn\", \"ddp\"))\ndef test_cli_distributed_save_config_callback(tmpdir, logger, strategy):\n from torch.multiprocessing import ProcessRaisedException\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\"]), pytest.raises(\n (MisconfigurationException, ProcessRaisedException), match=r\"Error on fit start\"\n ):\n LightningCLI(\n EarlyExitTestModel,\n trainer_defaults={\n \"default_root_dir\": str(tmpdir),\n \"logger\": logger,\n \"max_steps\": 1,\n \"max_epochs\": 1,\n \"strategy\": strategy,\n \"accelerator\": \"auto\",\n \"devices\": 1,\n },\n )\n if logger:\n config_dir = tmpdir / \"lightning_logs\"\n # no more version dirs should get created\n assert os.listdir(config_dir) == [\"version_0\"]\n config_path = config_dir / \"version_0\" / \"config.yaml\"\n else:\n config_path = tmpdir / \"config.yaml\"\n assert os.path.isfile(config_path)\n\n\ndef test_cli_config_overwrite(tmpdir):\n trainer_defaults = {\"default_root_dir\": str(tmpdir), \"logger\": False, \"max_steps\": 1, \"max_epochs\": 1}\n\n argv = [\"any.py\", \"fit\"]\n with mock.patch(\"sys.argv\", argv):\n LightningCLI(BoringModel, trainer_defaults=trainer_defaults)\n with mock.patch(\"sys.argv\", argv), pytest.raises(RuntimeError, match=\"Aborting to avoid overwriting\"):\n LightningCLI(BoringModel, trainer_defaults=trainer_defaults)\n with mock.patch(\"sys.argv\", argv):\n LightningCLI(BoringModel, save_config_overwrite=True, trainer_defaults=trainer_defaults)\n\n\[email protected](\"run\", (False, True))\ndef test_lightning_cli_optimizer(tmpdir, run):\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.add_optimizer_args(torch.optim.Adam)\n\n match = \"BoringModel.configure_optimizers` will be overridden by \" \"`MyLightningCLI.configure_optimizers`\"\n argv = [\"fit\", f\"--trainer.default_root_dir={tmpdir}\", \"--trainer.fast_dev_run=1\"] if run else []\n with mock.patch(\"sys.argv\", [\"any.py\"] + argv), pytest.warns(UserWarning, match=match):\n cli = MyLightningCLI(BoringModel, run=run)\n\n assert cli.model.configure_optimizers is not BoringModel.configure_optimizers\n\n if not run:\n optimizer = cli.model.configure_optimizers()\n assert isinstance(optimizer, torch.optim.Adam)\n else:\n assert len(cli.trainer.optimizers) == 1\n assert isinstance(cli.trainer.optimizers[0], torch.optim.Adam)\n assert len(cli.trainer.lr_scheduler_configs) == 0\n\n\ndef test_lightning_cli_optimizer_and_lr_scheduler(tmpdir):\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.add_optimizer_args(torch.optim.Adam)\n parser.add_lr_scheduler_args(torch.optim.lr_scheduler.ExponentialLR)\n\n cli_args = [\"fit\", f\"--trainer.default_root_dir={tmpdir}\", \"--trainer.fast_dev_run=1\", \"--lr_scheduler.gamma=0.8\"]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = MyLightningCLI(BoringModel)\n\n assert cli.model.configure_optimizers is not BoringModel.configure_optimizers\n assert len(cli.trainer.optimizers) == 1\n assert isinstance(cli.trainer.optimizers[0], torch.optim.Adam)\n assert len(cli.trainer.lr_scheduler_configs) == 1\n assert isinstance(cli.trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.ExponentialLR)\n assert cli.trainer.lr_scheduler_configs[0].scheduler.gamma == 0.8\n\n\ndef test_cli_no_need_configure_optimizers():\n class BoringModel(LightningModule):\n def __init__(self):\n super().__init__()\n self.layer = torch.nn.Linear(32, 2)\n\n def training_step(self, *_):\n ...\n\n def train_dataloader(self):\n ...\n\n # did not define `configure_optimizers`\n\n from pytorch_lightning.trainer.configuration_validator import __verify_train_val_loop_configuration\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", \"--optimizer=Adam\"]), mock.patch(\n \"pytorch_lightning.Trainer._run_train\"\n ) as run, mock.patch(\n \"pytorch_lightning.trainer.configuration_validator.__verify_train_val_loop_configuration\",\n wraps=__verify_train_val_loop_configuration,\n ) as verify:\n cli = LightningCLI(BoringModel)\n run.assert_called_once()\n verify.assert_called_once_with(cli.trainer, cli.model)\n\n\ndef test_lightning_cli_optimizer_and_lr_scheduler_subclasses(tmpdir):\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.add_optimizer_args((torch.optim.SGD, torch.optim.Adam))\n parser.add_lr_scheduler_args((torch.optim.lr_scheduler.StepLR, torch.optim.lr_scheduler.ExponentialLR))\n\n optimizer_arg = dict(class_path=\"torch.optim.Adam\", init_args=dict(lr=0.01))\n lr_scheduler_arg = dict(class_path=\"torch.optim.lr_scheduler.StepLR\", init_args=dict(step_size=50))\n cli_args = [\n \"fit\",\n f\"--trainer.default_root_dir={tmpdir}\",\n \"--trainer.max_epochs=1\",\n f\"--optimizer={json.dumps(optimizer_arg)}\",\n f\"--lr_scheduler={json.dumps(lr_scheduler_arg)}\",\n ]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = MyLightningCLI(BoringModel)\n\n assert len(cli.trainer.optimizers) == 1\n assert isinstance(cli.trainer.optimizers[0], torch.optim.Adam)\n assert len(cli.trainer.lr_scheduler_configs) == 1\n assert isinstance(cli.trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.StepLR)\n assert cli.trainer.lr_scheduler_configs[0].scheduler.step_size == 50\n\n\[email protected](\"use_generic_base_class\", [False, True])\ndef test_lightning_cli_optimizers_and_lr_scheduler_with_link_to(use_generic_base_class, tmpdir):\n class MyLightningCLI(LightningCLI):\n def add_arguments_to_parser(self, parser):\n parser.add_optimizer_args(\n (torch.optim.Optimizer,) if use_generic_base_class else torch.optim.Adam,\n nested_key=\"optim1\",\n link_to=\"model.optim1\",\n )\n parser.add_optimizer_args((torch.optim.ASGD, torch.optim.SGD), nested_key=\"optim2\", link_to=\"model.optim2\")\n parser.add_lr_scheduler_args(\n LRSchedulerTypeTuple if use_generic_base_class else torch.optim.lr_scheduler.ExponentialLR,\n link_to=\"model.scheduler\",\n )\n\n class TestModel(BoringModel):\n def __init__(self, optim1: dict, optim2: dict, scheduler: dict):\n super().__init__()\n self.optim1 = instantiate_class(self.parameters(), optim1)\n self.optim2 = instantiate_class(self.parameters(), optim2)\n self.scheduler = instantiate_class(self.optim1, scheduler)\n\n cli_args = [\"fit\", f\"--trainer.default_root_dir={tmpdir}\", \"--trainer.max_epochs=1\"]\n if use_generic_base_class:\n cli_args += [\n \"--optim1\",\n \"Adam\",\n \"--optim1.weight_decay\",\n \"0.001\",\n \"--optim2=SGD\",\n \"--optim2.lr=0.01\",\n \"--lr_scheduler=ExponentialLR\",\n ]\n else:\n cli_args += [\"--optim2=SGD\", \"--optim2.lr=0.01\"]\n cli_args += [\"--lr_scheduler.gamma=0.2\"]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args):\n cli = MyLightningCLI(TestModel)\n\n assert isinstance(cli.model.optim1, torch.optim.Adam)\n assert isinstance(cli.model.optim2, torch.optim.SGD)\n assert cli.model.optim2.param_groups[0][\"lr\"] == 0.01\n assert isinstance(cli.model.scheduler, torch.optim.lr_scheduler.ExponentialLR)\n\n\[email protected](\"fn\", [fn.value for fn in TrainerFn])\ndef test_lightning_cli_trainer_fn(fn):\n class TestCLI(LightningCLI):\n def __init__(self, *args, **kwargs):\n self.called = []\n super().__init__(*args, **kwargs)\n\n def before_fit(self):\n self.called.append(\"before_fit\")\n\n def fit(self, **_):\n self.called.append(\"fit\")\n\n def after_fit(self):\n self.called.append(\"after_fit\")\n\n def before_validate(self):\n self.called.append(\"before_validate\")\n\n def validate(self, **_):\n self.called.append(\"validate\")\n\n def after_validate(self):\n self.called.append(\"after_validate\")\n\n def before_test(self):\n self.called.append(\"before_test\")\n\n def test(self, **_):\n self.called.append(\"test\")\n\n def after_test(self):\n self.called.append(\"after_test\")\n\n def before_predict(self):\n self.called.append(\"before_predict\")\n\n def predict(self, **_):\n self.called.append(\"predict\")\n\n def after_predict(self):\n self.called.append(\"after_predict\")\n\n def before_tune(self):\n self.called.append(\"before_tune\")\n\n def tune(self, **_):\n self.called.append(\"tune\")\n\n def after_tune(self):\n self.called.append(\"after_tune\")\n\n with mock.patch(\"sys.argv\", [\"any.py\", fn]):\n cli = TestCLI(BoringModel)\n assert cli.called == [f\"before_{fn}\", fn, f\"after_{fn}\"]\n\n\ndef test_lightning_cli_subcommands():\n subcommands = LightningCLI.subcommands()\n trainer = Trainer()\n for subcommand, exclude in subcommands.items():\n fn = getattr(trainer, subcommand)\n parameters = list(inspect.signature(fn).parameters)\n for e in exclude:\n # if this fails, it's because the parameter has been removed from the associated `Trainer` function\n # and the `LightningCLI` subcommand exclusion list needs to be updated\n assert e in parameters\n\n\ndef test_lightning_cli_custom_subcommand():\n class TestTrainer(Trainer):\n def foo(self, model: LightningModule, x: int, y: float = 1.0):\n \"\"\"Sample extra function.\n\n Args:\n model: A model\n x: The x\n y: The y\n \"\"\"\n\n class TestCLI(LightningCLI):\n @staticmethod\n def subcommands():\n subcommands = LightningCLI.subcommands()\n subcommands[\"foo\"] = {\"model\"}\n return subcommands\n\n out = StringIO()\n with mock.patch(\"sys.argv\", [\"any.py\", \"-h\"]), redirect_stdout(out), pytest.raises(SystemExit):\n TestCLI(BoringModel, trainer_class=TestTrainer)\n out = out.getvalue()\n assert \"Sample extra function.\" in out\n assert \"{fit,validate,test,predict,tune,foo}\" in out\n\n out = StringIO()\n with mock.patch(\"sys.argv\", [\"any.py\", \"foo\", \"-h\"]), redirect_stdout(out), pytest.raises(SystemExit):\n TestCLI(BoringModel, trainer_class=TestTrainer)\n out = out.getvalue()\n assert \"A model\" not in out\n assert \"Sample extra function:\" in out\n assert \"--x X\" in out\n assert \"The x (required, type: int)\" in out\n assert \"--y Y\" in out\n assert \"The y (type: float, default: 1.0)\" in out\n\n\ndef test_lightning_cli_run():\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(BoringModel, run=False)\n assert cli.trainer.global_step == 0\n assert isinstance(cli.trainer, Trainer)\n assert isinstance(cli.model, LightningModule)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\"]):\n cli = LightningCLI(BoringModel, trainer_defaults={\"max_steps\": 1, \"max_epochs\": 1})\n assert cli.trainer.global_step == 1\n assert isinstance(cli.trainer, Trainer)\n assert isinstance(cli.model, LightningModule)\n\n\[email protected](autouse=True)\ndef clear_registries():\n # since the registries are global, it's good to clear them after each test to avoid unwanted interactions\n yield\n OPTIMIZER_REGISTRY.clear()\n LR_SCHEDULER_REGISTRY.clear()\n CALLBACK_REGISTRY.clear()\n MODEL_REGISTRY.clear()\n DATAMODULE_REGISTRY.clear()\n LOGGER_REGISTRY.clear()\n\n\ndef test_registries():\n # the registries are global so this is only necessary when this test is run standalone\n _populate_registries(False)\n\n @OPTIMIZER_REGISTRY\n class CustomAdam(torch.optim.Adam):\n pass\n\n @LR_SCHEDULER_REGISTRY\n class CustomCosineAnnealingLR(torch.optim.lr_scheduler.CosineAnnealingLR):\n pass\n\n @CALLBACK_REGISTRY\n class CustomCallback(Callback):\n pass\n\n @LOGGER_REGISTRY\n class CustomLogger(Logger):\n pass\n\n assert \"SGD\" in OPTIMIZER_REGISTRY.names\n assert \"RMSprop\" in OPTIMIZER_REGISTRY.names\n assert \"CustomAdam\" in OPTIMIZER_REGISTRY.names\n\n assert \"CosineAnnealingLR\" in LR_SCHEDULER_REGISTRY.names\n assert \"CosineAnnealingWarmRestarts\" in LR_SCHEDULER_REGISTRY.names\n assert \"CustomCosineAnnealingLR\" in LR_SCHEDULER_REGISTRY.names\n assert \"ReduceLROnPlateau\" in LR_SCHEDULER_REGISTRY.names\n\n assert \"EarlyStopping\" in CALLBACK_REGISTRY.names\n assert \"CustomCallback\" in CALLBACK_REGISTRY.names\n\n class Foo:\n ...\n\n OPTIMIZER_REGISTRY(Foo, key=\"SGD\") # not overridden by default\n assert OPTIMIZER_REGISTRY[\"SGD\"] is torch.optim.SGD\n OPTIMIZER_REGISTRY(Foo, key=\"SGD\", override=True)\n assert OPTIMIZER_REGISTRY[\"SGD\"] is Foo\n\n # test `_Registry.__call__` returns the class\n assert isinstance(CustomCallback(), CustomCallback)\n\n assert \"WandbLogger\" in LOGGER_REGISTRY\n assert \"CustomLogger\" in LOGGER_REGISTRY\n\n\ndef test_registries_register_automatically():\n assert \"SaveConfigCallback\" not in CALLBACK_REGISTRY\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n LightningCLI(BoringModel, run=False, auto_registry=True)\n assert \"SaveConfigCallback\" in CALLBACK_REGISTRY\n\n\nclass TestModel(BoringModel):\n def __init__(self, foo, bar=5):\n super().__init__()\n self.foo = foo\n self.bar = bar\n\n\ndef test_lightning_cli_model_short_arguments():\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", \"--model=BoringModel\"]), mock.patch(\n \"pytorch_lightning.Trainer._fit_impl\"\n ) as run, mock_subclasses(LightningModule, BoringModel, TestModel):\n cli = LightningCLI(trainer_defaults={\"fast_dev_run\": 1})\n assert isinstance(cli.model, BoringModel)\n run.assert_called_once_with(cli.model, ANY, ANY, ANY, ANY)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--model=TestModel\", \"--model.foo\", \"123\"]), mock_subclasses(\n LightningModule, BoringModel, TestModel\n ):\n cli = LightningCLI(run=False)\n assert isinstance(cli.model, TestModel)\n assert cli.model.foo == 123\n assert cli.model.bar == 5\n\n\nclass MyDataModule(BoringDataModule):\n def __init__(self, foo, bar=5):\n super().__init__()\n self.foo = foo\n self.bar = bar\n\n\ndef test_lightning_cli_datamodule_short_arguments():\n # with set model\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", \"--data=BoringDataModule\"]), mock.patch(\n \"pytorch_lightning.Trainer._fit_impl\"\n ) as run, mock_subclasses(LightningDataModule, BoringDataModule):\n cli = LightningCLI(BoringModel, trainer_defaults={\"fast_dev_run\": 1})\n assert isinstance(cli.datamodule, BoringDataModule)\n run.assert_called_once_with(ANY, ANY, ANY, cli.datamodule, ANY)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--data=MyDataModule\", \"--data.foo\", \"123\"]), mock_subclasses(\n LightningDataModule, MyDataModule\n ):\n cli = LightningCLI(BoringModel, run=False)\n assert isinstance(cli.datamodule, MyDataModule)\n assert cli.datamodule.foo == 123\n assert cli.datamodule.bar == 5\n\n # with configurable model\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\", \"--model\", \"BoringModel\", \"--data=BoringDataModule\"]), mock.patch(\n \"pytorch_lightning.Trainer._fit_impl\"\n ) as run, mock_subclasses(LightningModule, BoringModel), mock_subclasses(LightningDataModule, BoringDataModule):\n cli = LightningCLI(trainer_defaults={\"fast_dev_run\": 1})\n assert isinstance(cli.model, BoringModel)\n assert isinstance(cli.datamodule, BoringDataModule)\n run.assert_called_once_with(cli.model, ANY, ANY, cli.datamodule, ANY)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--model\", \"BoringModel\", \"--data=MyDataModule\"]), mock_subclasses(\n LightningModule, BoringModel\n ), mock_subclasses(LightningDataModule, MyDataModule):\n cli = LightningCLI(run=False)\n assert isinstance(cli.model, BoringModel)\n assert isinstance(cli.datamodule, MyDataModule)\n\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(BoringModel, run=False)\n # data was not passed but we are adding it automatically because there are datamodules registered\n assert \"data\" in cli.parser.groups\n assert not hasattr(cli.parser.groups[\"data\"], \"group_class\")\n\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(BoringModel, BoringDataModule, run=False)\n # since we are passing the DataModule, that's whats added to the parser\n assert cli.parser.groups[\"data\"].group_class is BoringDataModule\n\n\[email protected](\"use_class_path_callbacks\", [False, True])\ndef test_registries_resolution(use_class_path_callbacks):\n\n \"\"\"This test validates registries are used when simplified command line are being used.\"\"\"\n cli_args = [\n \"--optimizer\",\n \"Adam\",\n \"--optimizer.lr\",\n \"0.0001\",\n \"--trainer.callbacks=LearningRateMonitor\",\n \"--trainer.callbacks.logging_interval=epoch\",\n \"--trainer.callbacks.log_momentum=True\",\n \"--model=BoringModel\",\n \"--trainer.callbacks=ModelCheckpoint\",\n \"--trainer.callbacks.monitor=loss\",\n \"--lr_scheduler\",\n \"StepLR\",\n \"--lr_scheduler.step_size=50\",\n ]\n\n extras = []\n if use_class_path_callbacks:\n callbacks = [\n {\"class_path\": \"pytorch_lightning.callbacks.Callback\"},\n {\"class_path\": \"pytorch_lightning.callbacks.Callback\", \"init_args\": {}},\n ]\n cli_args += [f\"--trainer.callbacks={json.dumps(callbacks)}\"]\n extras = [Callback, Callback]\n\n with mock.patch(\"sys.argv\", [\"any.py\"] + cli_args), mock_subclasses(LightningModule, BoringModel):\n cli = LightningCLI(run=False)\n\n assert isinstance(cli.model, BoringModel)\n optimizers, lr_scheduler = cli.model.configure_optimizers()\n assert isinstance(optimizers[0], torch.optim.Adam)\n assert optimizers[0].param_groups[0][\"lr\"] == 0.0001\n assert lr_scheduler[0].step_size == 50\n\n callback_types = [type(c) for c in cli.trainer.callbacks]\n expected = [LearningRateMonitor, SaveConfigCallback, ModelCheckpoint] + extras\n assert all(t in callback_types for t in expected)\n\n\ndef test_argv_transformation_noop():\n base = [\"any.py\", \"--trainer.max_epochs=1\"]\n argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, \"trainer.callbacks\", base)\n assert argv == base\n\n\ndef test_argv_transformation_single_callback():\n base = [\"any.py\", \"--trainer.max_epochs=1\"]\n input = base + [\"--trainer.callbacks=ModelCheckpoint\", \"--trainer.callbacks.monitor=val_loss\"]\n callbacks = [\n {\n \"class_path\": \"pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\",\n \"init_args\": {\"monitor\": \"val_loss\"},\n }\n ]\n expected = base + [\"--trainer.callbacks\", str(callbacks)]\n _populate_registries(False)\n argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, \"trainer.callbacks\", input)\n assert argv == expected\n\n\ndef test_argv_transformation_multiple_callbacks():\n base = [\"any.py\", \"--trainer.max_epochs=1\"]\n input = base + [\n \"--trainer.callbacks=ModelCheckpoint\",\n \"--trainer.callbacks.monitor=val_loss\",\n \"--trainer.callbacks=ModelCheckpoint\",\n \"--trainer.callbacks.monitor=val_acc\",\n ]\n callbacks = [\n {\n \"class_path\": \"pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\",\n \"init_args\": {\"monitor\": \"val_loss\"},\n },\n {\n \"class_path\": \"pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\",\n \"init_args\": {\"monitor\": \"val_acc\"},\n },\n ]\n expected = base + [\"--trainer.callbacks\", str(callbacks)]\n _populate_registries(False)\n argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, \"trainer.callbacks\", input)\n assert argv == expected\n\n\ndef test_argv_transformation_multiple_callbacks_with_config():\n base = [\"any.py\", \"--trainer.max_epochs=1\"]\n nested_key = \"trainer.callbacks\"\n input = base + [\n f\"--{nested_key}=ModelCheckpoint\",\n f\"--{nested_key}.monitor=val_loss\",\n f\"--{nested_key}=ModelCheckpoint\",\n f\"--{nested_key}.monitor=val_acc\",\n f\"--{nested_key}=[{{'class_path': 'pytorch_lightning.callbacks.Callback'}}]\",\n ]\n callbacks = [\n {\n \"class_path\": \"pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\",\n \"init_args\": {\"monitor\": \"val_loss\"},\n },\n {\n \"class_path\": \"pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\",\n \"init_args\": {\"monitor\": \"val_acc\"},\n },\n {\"class_path\": \"pytorch_lightning.callbacks.Callback\"},\n ]\n expected = base + [\"--trainer.callbacks\", str(callbacks)]\n nested_key = \"trainer.callbacks\"\n _populate_registries(False)\n argv = LightningArgumentParser._convert_argv_issue_85(CALLBACK_REGISTRY.classes, nested_key, input)\n assert argv == expected\n\n\ndef test_optimizers_and_lr_schedulers_reload(tmpdir):\n base = [\"any.py\", \"--trainer.max_epochs=1\"]\n input = base + [\n \"--lr_scheduler\",\n \"OneCycleLR\",\n \"--lr_scheduler.total_steps=10\",\n \"--lr_scheduler.max_lr=1\",\n \"--optimizer\",\n \"Adam\",\n \"--optimizer.lr=0.1\",\n ]\n\n # save config\n out = StringIO()\n with mock.patch(\"sys.argv\", input + [\"--print_config\"]), redirect_stdout(out), pytest.raises(SystemExit):\n LightningCLI(BoringModel, run=False)\n\n # validate yaml\n yaml_config = out.getvalue()\n dict_config = yaml.safe_load(yaml_config)\n assert dict_config[\"optimizer\"][\"class_path\"] == \"torch.optim.Adam\"\n assert dict_config[\"optimizer\"][\"init_args\"][\"lr\"] == 0.1\n assert dict_config[\"lr_scheduler\"][\"class_path\"] == \"torch.optim.lr_scheduler.OneCycleLR\"\n\n # reload config\n yaml_config_file = tmpdir / \"config.yaml\"\n yaml_config_file.write_text(yaml_config, \"utf-8\")\n with mock.patch(\"sys.argv\", base + [f\"--config={yaml_config_file}\"]):\n LightningCLI(BoringModel, run=False)\n\n\ndef test_optimizers_and_lr_schedulers_add_arguments_to_parser_implemented_reload(tmpdir):\n class TestLightningCLI(LightningCLI):\n def __init__(self, *args):\n super().__init__(*args, run=False)\n\n def add_arguments_to_parser(self, parser):\n parser.add_optimizer_args(OPTIMIZER_REGISTRY.classes, nested_key=\"opt1\", link_to=\"model.opt1_config\")\n parser.add_optimizer_args(\n (torch.optim.ASGD, torch.optim.SGD), nested_key=\"opt2\", link_to=\"model.opt2_config\"\n )\n parser.add_lr_scheduler_args(LR_SCHEDULER_REGISTRY.classes, link_to=\"model.sch_config\")\n parser.add_argument(\"--something\", type=str, nargs=\"+\")\n\n class TestModel(BoringModel):\n def __init__(self, opt1_config: dict, opt2_config: dict, sch_config: dict):\n super().__init__()\n self.opt1_config = opt1_config\n self.opt2_config = opt2_config\n self.sch_config = sch_config\n opt1 = instantiate_class(self.parameters(), opt1_config)\n assert isinstance(opt1, torch.optim.Adam)\n opt2 = instantiate_class(self.parameters(), opt2_config)\n assert isinstance(opt2, torch.optim.ASGD)\n sch = instantiate_class(opt1, sch_config)\n assert isinstance(sch, torch.optim.lr_scheduler.OneCycleLR)\n\n base = [\"any.py\", \"--trainer.max_epochs=1\"]\n input = base + [\n \"--lr_scheduler\",\n \"OneCycleLR\",\n \"--lr_scheduler.total_steps=10\",\n \"--lr_scheduler.max_lr=1\",\n \"--opt1\",\n \"Adam\",\n \"--opt2=ASGD\",\n \"--opt2.lr=0.1\",\n \"--lr_scheduler.anneal_strategy=linear\",\n \"--something\",\n \"a\",\n \"b\",\n \"c\",\n ]\n\n # save config\n out = StringIO()\n with mock.patch(\"sys.argv\", input + [\"--print_config\"]), redirect_stdout(out), pytest.raises(SystemExit):\n TestLightningCLI(TestModel)\n\n # validate yaml\n yaml_config = out.getvalue()\n dict_config = yaml.safe_load(yaml_config)\n assert dict_config[\"opt1\"][\"class_path\"] == \"torch.optim.Adam\"\n assert dict_config[\"opt2\"][\"class_path\"] == \"torch.optim.ASGD\"\n assert dict_config[\"opt2\"][\"init_args\"][\"lr\"] == 0.1\n assert dict_config[\"lr_scheduler\"][\"class_path\"] == \"torch.optim.lr_scheduler.OneCycleLR\"\n assert dict_config[\"lr_scheduler\"][\"init_args\"][\"anneal_strategy\"] == \"linear\"\n assert dict_config[\"something\"] == [\"a\", \"b\", \"c\"]\n\n # reload config\n yaml_config_file = tmpdir / \"config.yaml\"\n yaml_config_file.write_text(yaml_config, \"utf-8\")\n with mock.patch(\"sys.argv\", base + [f\"--config={yaml_config_file}\"]):\n cli = TestLightningCLI(TestModel)\n\n assert cli.model.opt1_config[\"class_path\"] == \"torch.optim.Adam\"\n assert cli.model.opt2_config[\"class_path\"] == \"torch.optim.ASGD\"\n assert cli.model.opt2_config[\"init_args\"][\"lr\"] == 0.1\n assert cli.model.sch_config[\"class_path\"] == \"torch.optim.lr_scheduler.OneCycleLR\"\n assert cli.model.sch_config[\"init_args\"][\"anneal_strategy\"] == \"linear\"\n\n\ndef test_lightning_cli_config_with_subcommand():\n config = {\"test\": {\"trainer\": {\"limit_test_batches\": 1}, \"verbose\": True, \"ckpt_path\": \"foobar\"}}\n with mock.patch(\"sys.argv\", [\"any.py\", f\"--config={config}\"]), mock.patch(\n \"pytorch_lightning.Trainer.test\", autospec=True\n ) as test_mock:\n cli = LightningCLI(BoringModel)\n\n test_mock.assert_called_once_with(cli.trainer, cli.model, verbose=True, ckpt_path=\"foobar\")\n assert cli.trainer.limit_test_batches == 1\n\n\ndef test_lightning_cli_config_before_subcommand():\n config = {\n \"validate\": {\"trainer\": {\"limit_val_batches\": 1}, \"verbose\": False, \"ckpt_path\": \"barfoo\"},\n \"test\": {\"trainer\": {\"limit_test_batches\": 1}, \"verbose\": True, \"ckpt_path\": \"foobar\"},\n }\n\n with mock.patch(\"sys.argv\", [\"any.py\", f\"--config={config}\", \"test\"]), mock.patch(\n \"pytorch_lightning.Trainer.test\", autospec=True\n ) as test_mock:\n cli = LightningCLI(BoringModel)\n\n test_mock.assert_called_once_with(cli.trainer, model=cli.model, verbose=True, ckpt_path=\"foobar\")\n assert cli.trainer.limit_test_batches == 1\n\n save_config_callback = cli.trainer.callbacks[0]\n assert save_config_callback.config.trainer.limit_test_batches == 1\n assert save_config_callback.parser.subcommand == \"test\"\n\n with mock.patch(\"sys.argv\", [\"any.py\", f\"--config={config}\", \"validate\"]), mock.patch(\n \"pytorch_lightning.Trainer.validate\", autospec=True\n ) as validate_mock:\n cli = LightningCLI(BoringModel)\n\n validate_mock.assert_called_once_with(cli.trainer, cli.model, verbose=False, ckpt_path=\"barfoo\")\n assert cli.trainer.limit_val_batches == 1\n\n save_config_callback = cli.trainer.callbacks[0]\n assert save_config_callback.config.trainer.limit_val_batches == 1\n assert save_config_callback.parser.subcommand == \"validate\"\n\n\ndef test_lightning_cli_config_before_subcommand_two_configs():\n config1 = {\"validate\": {\"trainer\": {\"limit_val_batches\": 1}, \"verbose\": False, \"ckpt_path\": \"barfoo\"}}\n config2 = {\"test\": {\"trainer\": {\"limit_test_batches\": 1}, \"verbose\": True, \"ckpt_path\": \"foobar\"}}\n\n with mock.patch(\"sys.argv\", [\"any.py\", f\"--config={config1}\", f\"--config={config2}\", \"test\"]), mock.patch(\n \"pytorch_lightning.Trainer.test\", autospec=True\n ) as test_mock:\n cli = LightningCLI(BoringModel)\n\n test_mock.assert_called_once_with(cli.trainer, model=cli.model, verbose=True, ckpt_path=\"foobar\")\n assert cli.trainer.limit_test_batches == 1\n\n with mock.patch(\"sys.argv\", [\"any.py\", f\"--config={config1}\", f\"--config={config2}\", \"validate\"]), mock.patch(\n \"pytorch_lightning.Trainer.validate\", autospec=True\n ) as validate_mock:\n cli = LightningCLI(BoringModel)\n\n validate_mock.assert_called_once_with(cli.trainer, cli.model, verbose=False, ckpt_path=\"barfoo\")\n assert cli.trainer.limit_val_batches == 1\n\n\ndef test_lightning_cli_config_after_subcommand():\n config = {\"trainer\": {\"limit_test_batches\": 1}, \"verbose\": True, \"ckpt_path\": \"foobar\"}\n with mock.patch(\"sys.argv\", [\"any.py\", \"test\", f\"--config={config}\"]), mock.patch(\n \"pytorch_lightning.Trainer.test\", autospec=True\n ) as test_mock:\n cli = LightningCLI(BoringModel)\n\n test_mock.assert_called_once_with(cli.trainer, cli.model, verbose=True, ckpt_path=\"foobar\")\n assert cli.trainer.limit_test_batches == 1\n\n\ndef test_lightning_cli_config_before_and_after_subcommand():\n config1 = {\"test\": {\"trainer\": {\"limit_test_batches\": 1}, \"verbose\": True, \"ckpt_path\": \"foobar\"}}\n config2 = {\"trainer\": {\"fast_dev_run\": 1}, \"verbose\": False, \"ckpt_path\": \"foobar\"}\n with mock.patch(\"sys.argv\", [\"any.py\", f\"--config={config1}\", \"test\", f\"--config={config2}\"]), mock.patch(\n \"pytorch_lightning.Trainer.test\", autospec=True\n ) as test_mock:\n cli = LightningCLI(BoringModel)\n\n test_mock.assert_called_once_with(cli.trainer, model=cli.model, verbose=False, ckpt_path=\"foobar\")\n assert cli.trainer.limit_test_batches == 1\n assert cli.trainer.fast_dev_run == 1\n\n\ndef test_lightning_cli_parse_kwargs_with_subcommands(tmpdir):\n fit_config = {\"trainer\": {\"limit_train_batches\": 2}}\n fit_config_path = tmpdir / \"fit.yaml\"\n fit_config_path.write_text(str(fit_config), \"utf8\")\n\n validate_config = {\"trainer\": {\"limit_val_batches\": 3}}\n validate_config_path = tmpdir / \"validate.yaml\"\n validate_config_path.write_text(str(validate_config), \"utf8\")\n\n parser_kwargs = {\n \"fit\": {\"default_config_files\": [str(fit_config_path)]},\n \"validate\": {\"default_config_files\": [str(validate_config_path)]},\n }\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\"]), mock.patch(\n \"pytorch_lightning.Trainer.fit\", autospec=True\n ) as fit_mock:\n cli = LightningCLI(BoringModel, parser_kwargs=parser_kwargs)\n fit_mock.assert_called()\n assert cli.trainer.limit_train_batches == 2\n assert cli.trainer.limit_val_batches == 1.0\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"validate\"]), mock.patch(\n \"pytorch_lightning.Trainer.validate\", autospec=True\n ) as validate_mock:\n cli = LightningCLI(BoringModel, parser_kwargs=parser_kwargs)\n validate_mock.assert_called()\n assert cli.trainer.limit_train_batches == 1.0\n assert cli.trainer.limit_val_batches == 3\n\n\ndef test_lightning_cli_subcommands_common_default_config_files(tmpdir):\n class Model(BoringModel):\n def __init__(self, foo: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.foo = foo\n\n config = {\"fit\": {\"model\": {\"foo\": 123}}}\n config_path = tmpdir / \"default.yaml\"\n config_path.write_text(str(config), \"utf8\")\n parser_kwargs = {\"default_config_files\": [str(config_path)]}\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"fit\"]), mock.patch(\n \"pytorch_lightning.Trainer.fit\", autospec=True\n ) as fit_mock:\n cli = LightningCLI(Model, parser_kwargs=parser_kwargs)\n fit_mock.assert_called()\n assert cli.model.foo == 123\n\n\ndef test_lightning_cli_reinstantiate_trainer():\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(BoringModel, run=False)\n assert cli.trainer.max_epochs == 1000\n\n class TestCallback(Callback):\n ...\n\n # make sure a new trainer can be easily created\n trainer = cli.instantiate_trainer(max_epochs=123, callbacks=[TestCallback()])\n # the new config is used\n assert trainer.max_epochs == 123\n assert {c.__class__ for c in trainer.callbacks} == {c.__class__ for c in cli.trainer.callbacks}.union(\n {TestCallback}\n )\n # the existing config is not updated\n assert cli.config_init[\"trainer\"][\"max_epochs\"] is None\n\n\ndef test_cli_configure_optimizers_warning():\n match = \"configure_optimizers` will be overridden by `LightningCLI\"\n with mock.patch(\"sys.argv\", [\"any.py\"]), no_warning_call(UserWarning, match=match):\n LightningCLI(BoringModel, run=False)\n with mock.patch(\"sys.argv\", [\"any.py\", \"--optimizer=Adam\"]), pytest.warns(UserWarning, match=match):\n LightningCLI(BoringModel, run=False)\n\n\ndef test_cli_help_message():\n # full class path\n cli_args = [\"any.py\", \"--optimizer.help=torch.optim.Adam\"]\n classpath_help = StringIO()\n with mock.patch(\"sys.argv\", cli_args), redirect_stdout(classpath_help), pytest.raises(SystemExit):\n LightningCLI(BoringModel, run=False)\n\n cli_args = [\"any.py\", \"--optimizer.help=Adam\"]\n shorthand_help = StringIO()\n with mock.patch(\"sys.argv\", cli_args), redirect_stdout(shorthand_help), pytest.raises(SystemExit):\n LightningCLI(BoringModel, run=False)\n\n # the help messages should match\n assert shorthand_help.getvalue() == classpath_help.getvalue()\n # make sure it's not empty\n assert \"Implements Adam\" in shorthand_help.getvalue()\n\n\ndef test_cli_reducelronplateau():\n with mock.patch(\n \"sys.argv\", [\"any.py\", \"--optimizer=Adam\", \"--lr_scheduler=ReduceLROnPlateau\", \"--lr_scheduler.monitor=foo\"]\n ):\n cli = LightningCLI(BoringModel, run=False)\n config = cli.model.configure_optimizers()\n assert isinstance(config[\"lr_scheduler\"][\"scheduler\"], ReduceLROnPlateau)\n assert config[\"lr_scheduler\"][\"scheduler\"].monitor == \"foo\"\n\n\ndef test_cli_configureoptimizers_can_be_overridden():\n class MyCLI(LightningCLI):\n def __init__(self):\n super().__init__(BoringModel, run=False)\n\n @staticmethod\n def configure_optimizers(self, optimizer, lr_scheduler=None):\n assert isinstance(self, BoringModel)\n assert lr_scheduler is None\n return 123\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--optimizer=Adam\"]):\n cli = MyCLI()\n assert cli.model.configure_optimizers() == 123\n\n # with no optimization config, we don't override\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = MyCLI()\n [optimizer], [scheduler] = cli.model.configure_optimizers()\n assert isinstance(optimizer, SGD)\n assert isinstance(scheduler, StepLR)\n with mock.patch(\"sys.argv\", [\"any.py\", \"--lr_scheduler=StepLR\"]):\n cli = MyCLI()\n [optimizer], [scheduler] = cli.model.configure_optimizers()\n assert isinstance(optimizer, SGD)\n assert isinstance(scheduler, StepLR)\n\n\ndef test_cli_parameter_with_lazy_instance_default():\n from jsonargparse import lazy_instance\n\n class TestModel(BoringModel):\n def __init__(self, activation: torch.nn.Module = lazy_instance(torch.nn.LeakyReLU, negative_slope=0.05)):\n super().__init__()\n self.activation = activation\n\n model = TestModel()\n assert isinstance(model.activation, torch.nn.LeakyReLU)\n\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(TestModel, run=False)\n assert isinstance(cli.model.activation, torch.nn.LeakyReLU)\n assert cli.model.activation.negative_slope == 0.05\n assert cli.model.activation is not model.activation\n\n\ndef test_cli_logger_shorthand():\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(TestModel, run=False, trainer_defaults={\"logger\": False})\n assert cli.trainer.logger is None\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--trainer.logger=TensorBoardLogger\", \"--trainer.logger.save_dir=foo\"]):\n cli = LightningCLI(TestModel, run=False, trainer_defaults={\"logger\": False})\n assert isinstance(cli.trainer.logger, TensorBoardLogger)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--trainer.logger=False\"]):\n cli = LightningCLI(TestModel, run=False)\n assert cli.trainer.logger is None\n\n\ndef test_cli_auto_seeding():\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=False)\n assert cli.seed_everything_default is False\n assert cli.config[\"seed_everything\"] is False\n\n with mock.patch(\"sys.argv\", [\"any.py\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=True)\n assert cli.seed_everything_default is True\n assert isinstance(cli.config[\"seed_everything\"], int)\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--seed_everything\", \"3\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=False)\n assert cli.seed_everything_default is False\n assert cli.config[\"seed_everything\"] == 3\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--seed_everything\", \"3\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=True)\n assert cli.seed_everything_default is True\n assert cli.config[\"seed_everything\"] == 3\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--seed_everything\", \"3\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=10)\n assert cli.seed_everything_default == 10\n assert cli.config[\"seed_everything\"] == 3\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--seed_everything\", \"false\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=10)\n assert cli.seed_everything_default == 10\n assert cli.config[\"seed_everything\"] is False\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--seed_everything\", \"false\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=True)\n assert cli.seed_everything_default is True\n assert cli.config[\"seed_everything\"] is False\n\n with mock.patch(\"sys.argv\", [\"any.py\", \"--seed_everything\", \"true\"]):\n cli = LightningCLI(TestModel, run=False, seed_everything_default=False)\n assert cli.seed_everything_default is False\n assert isinstance(cli.config[\"seed_everything\"], int)\n" ]
[ [ "torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_wrapper", "torch.distributed.is_available", "torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook.PostLocalSGDState", "torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState" ], [ "torch.device" ], [ "numpy.finfo" ], [ "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nathanheidacker/AlphaGradient
[ "cf031058f3e91381575e2df44cc029bcc7f4cc73" ]
[ "alphagradient/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Standard utility functions used throughout AlphaGradient\"\"\"\n\n# Standard Imports\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nimport builtins\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nimport math\nfrom pathlib import Path\n\n# Third Party Imports\nimport numpy as np\nimport pandas as pd\n\n# Typing\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Literal,\n Generator,\n Generic,\n Iterable,\n Optional,\n TypeVar,\n Union,\n)\n\nT = TypeVar(\"T\")\n\n\nclass PropertyType(Generic[T]):\n \"\"\"A Type class for property objects themselves, before being bound to a class instance\"\"\"\n\n def fget(self, *args: Any) -> T:\n ...\n\n\nProperty = builtins.property\n\"\"\"A Type for builtin properties that have been bound to a class instance\"\"\"\n\nPyNumber = Union[int, float]\n\"\"\"Numeric type that does not include complex numbers (only native python types)\"\"\"\n\nNumber = Union[PyNumber, np.number, pd.core.arrays.numeric.NumericDtype]\n\"\"\"Numeric type that does not include complex numbers\"\"\"\n\nDatetimeLike = Union[pd.Timestamp, np.datetime64, date, datetime, str]\n\"\"\"Objects convertable to python datetimes\"\"\"\n\nTimeLike = Union[time, str]\n\"\"\"Objects convertable to python time objects\"\"\"\n\nDateOrTime = Union[DatetimeLike, time]\n\"\"\"Objects that are either DatetimeLike or TimeLike in nature\"\"\"\n\nif TYPE_CHECKING:\n from typeshed import SupportsLessThanT as SLTT\n\n_global_persistent_path: PropertyType[Path]\n\n\ndef auto_batch(iterable: Iterable) -> Generator:\n \"\"\"\n Returns a generator which yields automatically sized batches\n\n Given a sized iterable, determines an optimal batch size to be used for\n multiprocessing purposes. Using this batch size, returns a generator which\n yields batches of the iterable with the optimal size\n\n Parameters:\n iterable: An iterable from which to create a batch generator\n\n Returns:\n The batch generator of the iterable input\n \"\"\"\n return get_batches(iterable, auto_batch_size(iterable))\n\n\ndef auto_batch_size(iterable: Iterable) -> int:\n \"\"\"\n Returns a multiprocessing-optimal batch size for an iterable\n\n Given an iterable, returns an integer value representing an optimal batch\n size for use in python's multiprocessing library\n\n Parameters:\n iterable (Iterable): Sized iterable to determine optimal batch size for\n\n Returns:\n The optimal batch size for multiprocessing\n \"\"\"\n # Converting to a sized iterable to guarantee __len__ functionality\n iterable = list(iterable)\n\n # Output Parameters\n horizontal_offset = 10000\n horizontal_stretch = 70 / 100_000_000\n vertical_offset = 100\n\n # Building the quadratic\n output: Number\n output = len(iterable) - horizontal_offset\n output = output**2\n output *= -1\n output *= horizontal_stretch\n output += vertical_offset\n\n # Output bounded between 30 and 100\n return bounded(int(output), lower=30, upper=100)\n\n\ndef bounded(\n to_bound: SLTT, lower: Optional[SLTT] = None, upper: Optional[SLTT] = None\n) -> SLTT:\n \"\"\"\n Bounds an object between a lower and upper bound\n\n Given an object that defines behavior for comparison (__lt__, __gt__),\n returns the object bounded between the lower and upper bounds. Boundaries\n will be ommited if they are not provided (None). If lower and upper are not\n None, they must be of the same type as to_bound.\n\n Type Explanation:\n SLTT (SupportsLessThanT): A TypeVar which implements the __lt__ method.\n\n Parameters:\n to_bound (SLTT): the object to be bounded\n lower (Optional[SLTT]): the lower boundary of the operation\n upper (Optional[SLTT]): the upper boundary of the operation\n\n Returns:\n The bounded object\n \"\"\"\n if lower is None and upper is None:\n raise ValueError(\n \"Of the parameters 'lower' and 'upper', at least one must be\" \"specified\"\n )\n if lower:\n to_bound = max(to_bound, lower)\n if upper:\n to_bound = min(to_bound, upper)\n\n return to_bound\n\n\ndef deconstruct_dt(dt: DateOrTime) -> dict[str, float]:\n \"\"\"\n Returns a dictionary of datetime attribute values on object 'dt'\n\n Given a DatetimeLike object, returns a dictionary where keys are the\n object's date and time related attribute names, and values are the object's\n associated attribute values.\n\n Parameters:\n dt (DateOrTime): the dt to deconstruct\n\n Returns:\n A dictionary of attributes and their associated values on dt\n\n Raises:\n TypeError: Raised if dt is not a datetime-like object, as it wont have\n the proper attributes.\n \"\"\"\n # The potential attributes to be accessed\n d = [\"year\", \"month\", \"day\"]\n t = [\"hour\", \"minute\", \"second\", \"microsecond\"]\n attrs = []\n\n # Accept string arguments to convert to datetime\n if isinstance(dt, str):\n dt = read_timestring(dt)\n\n # Determine which elements should be accessed on the dt\n if isinstance(dt, datetime):\n attrs = d + t\n elif isinstance(dt, time):\n attrs = t\n elif isinstance(dt, date):\n attrs = d\n else:\n raise TypeError(f\"{dt=} is not a valid datetime object\")\n\n # Collecting the attributes\n dtdict = {}\n for attr in attrs:\n dtdict[attr] = getattr(dt, attr)\n\n return dtdict\n\n\ndef get_batches(iterable: Iterable, size: int = 100) -> Generator:\n \"\"\"\n Returns a generator of the iterable which yields batches of the given size\n\n Given an iterable, uses the size parameter to create a generator which\n yields batches of the iterable of the given size.\n\n Parameter:\n iterable: The iterable to yield batches of\n size: The batch size of the returned generator\n\n Returns:\n A generator which yields batches of size 'size' of the iterable\n \"\"\"\n # Because we will be indexing the iterable, we must instantiate the entire\n # thing in memory in case it isnt (ie generators)\n iterable = list(iterable)\n last = len(iterable)\n for i in range(math.ceil(last / size)):\n start = i * size\n end = start + size\n end = end if end < last else last\n yield iterable[start:end]\n\n\ndef get_time(t: DateOrTime) -> time:\n \"\"\"\n Given a timestring or datetime-like object, returns a datetime.time object\n\n Given an object t which represents a time or a datetime, returns a native\n python datetime.time object of the appropriate time. t can be an isoformat\n time string or datetime string, or a datetime-like object\n\n Parameters:\n dt (DateOrTime): The time object to convert\n\n Returns:\n The converted datetime.time object\n \"\"\"\n if isinstance(t, (time, str)):\n return to_time(t)\n return to_datetime(t).time()\n\n\ndef get_weekday(dt: DatetimeLike) -> str:\n \"\"\"\n Returns the day of the week on which a DatetimeLike object falls\n\n Parameters:\n dt (DatetimeLike): The object whose weekday is determined\n\n Returns:\n String of the day of the week on which the DatetimeLike object falls\n \"\"\"\n weekdays = {\n 0: \"Monday\",\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\",\n }\n\n return weekdays[to_datetime(dt).weekday()]\n\n\ndef is_func(f: Any) -> bool:\n \"\"\"\n Returns a boolean value indicating whether or not f is a kind of function\n\n Given an object f, returns a boolean value indicating whether or not the\n object is a function. Idenfities all python objects whose sole or primary\n purpose is to be called directly, rather than objects that simply support\n an implementation of __call__.\n\n Behavior is slightly different than the inspect module's isfunction(), as it\n includes methods (bound and unbound), as well as abstract, static, and class\n methods.\n\n A 'function' is an instance of any of the following:\n * function\n * method (bound or unbound)\n * staticmethod\n * classmethod\n * abstractmethod\n * lambda\n * built-in-function\n\n Parameters:\n f: The object who's status as a function is being determined\n\n Returns:\n True if f is a method, function, builtin-method-or-function, or lambda,\n else False\n \"\"\"\n\n # Fake class to access type 'method' and 'classmethod'\n class C:\n def method(self):\n pass\n\n # Getting abstract base methods\n class ABCC(ABC):\n @abstractmethod\n def amethod(self):\n pass\n\n # Fake function to access type 'function'\n def func():\n pass\n\n # Getting classic and static methods\n cmethod = classmethod(func)\n smethod = staticmethod(func)\n\n # Fake lambda to access type 'lambda'\n lamb = lambda: None\n\n # Fake instance to access type 'bound method'\n c = C()\n\n # Gathering all callable types\n functype = type(func)\n methodtype = type(C.method)\n classmethodtype = type(cmethod)\n staticmethodtype = type(smethod)\n abstractmethodtype = type(ABCC.amethod)\n boundmethodtype = type(c.method)\n lambdatype = type(lamb)\n builtintype = type(print)\n\n return isinstance(\n f,\n (\n functype,\n methodtype,\n boundmethodtype,\n lambdatype,\n builtintype,\n abstractmethodtype,\n classmethodtype,\n staticmethodtype,\n ),\n )\n\n\ndef nearest_expiry(\n expiry: DatetimeLike, method: Literal[\"after\", \"before\", \"both\"] = \"after\"\n) -> datetime:\n \"\"\"\n Returns the nearest valid expiry to the input datetime object\n\n Determining expiries for options contracts can be difficult, because they\n must fall on a business day, and their expiry time must be the market close.\n Given an expiry whose validity is unknown, this function returns the\n nearest expiry that is guaranteed to be valid. If the given expiry is\n valid, it will be unchanged when it is returned.\n\n The method argument is used to determine how the 'nearest' is defined. It\n has three options: \"after\", \"before\", and \"both\"\n\n Method must be one of the following string literals:\n * \"after\": returns the nearest expiry that is AFTER the input expiry\n * \"before\": returns the nearest expiry that is BEFORE the input expiry.\n * | \"both\": compares the distances of the nearest before and after, and\n | return the smaller of the two. In the case that they are equal, the\n | date determined by \"after\" will be used.\n\n The default argument is \"after\" because using \"before\" or \"both\" can\n potentially lead to dangerous behavior for algorithms, as it can return an\n expiry which is before the current date of the algorithm. This can cause\n options contracts to initialize as expired. Only change the method\n argument if you are positive that the returned expiry will be greater\n than the algorithm's current date.\n\n Parameters:\n expiry (DatetimeLike):\n The expiry who's closest valid expiry will be determined\n\n method:\n One of \"after\", \"before\", or \"both\"\n\n Returns:\n The nearest valid expiry\n \"\"\"\n\n # Ensuring expiry is a pydatetime\n expiry = to_datetime(expiry)\n\n # All expiries must expire at market close (4PM)\n expiry = set_time(expiry, \"4:00 PM\")\n\n # Change the expiry day if it is not a weekday\n if expiry.weekday() > 4:\n\n # Closest AFTER\n if method == \"after\":\n dist = 7 - expiry.weekday()\n expiry += timedelta(days=dist)\n\n # Closest BEFORE\n elif method == \"before\":\n dist = expiry.weekday() - 4\n expiry -= timedelta(days=dist)\n\n # Comparing both\n elif method == \"both\":\n bdist = expiry.weekday() - 4\n adist = 7 - expiry.weekday()\n if bdist < adist:\n expiry -= timedelta(days=bdist)\n else:\n expiry += timedelta(days=adist)\n\n return expiry\n\n\ndef optimal_start(\n start: datetime,\n max_start: datetime,\n min_end: datetime,\n end: Optional[DatetimeLike] = None,\n t: Optional[TimeLike] = None,\n) -> datetime:\n \"\"\"\n Based an Environment's instantiated/tracked assets, returns an optimal datetime\n for starting a backtest\n\n Returns a backtest starting datetime that:\n * Is guaranteed to be within the date range of all intantiated assets\n * | Is guaranteed to have ample time for calculations of historical\n | volatility, beta, percent change etc. BEFORE the start date\n * Automatically adjusts to accomodate shorter ending periods\n\n Parameters:\n start:\n A datetime object indictating the actual starting datetime\n\n max_start:\n A datetime object indicating the maximum possible starting datetime\n\n min_end:\n A datetime object indicating the minimum possible ending datetime\n\n end (Optional[DatetimeLike]):\n The desired endpoint on which to base the optimal start point\n\n t (Optional[TimeLike]):\n The returned optimal start's time\n\n Returns:\n The optimal starting datetime\n \"\"\"\n end = min_end if end is None else to_datetime(end)\n\n # If the maximum start date is before the minimum end date, there is\n # no valid 'optimal start', because there is no date range that allows\n # backtesting of all available data.\n if max_start >= end:\n return start\n\n # Determining the optimal start period. To avoid errors, we will not sync to the beginning\n optimal_delta = (end - max_start) / 2\n optimal_date = max_start + optimal_delta\n\n # Setting the optimal date's time to market open unless specified otherwise\n t = \"00:00:00\" if t is None else to_time(t)\n set_time(optimal_date, t)\n\n # Bounding the date to acceptable minimums and maximums\n lower_bound = set_time(max_start + timedelta(days=1), t)\n upper_bound = set_time(max_start + timedelta(days=365), t)\n optimal_start = bounded(optimal_date, lower=lower_bound, upper=upper_bound)\n\n return optimal_start\n\n\ndef progress_print(to_print: Any, last: list[int] = [0]) -> None:\n \"\"\"Prints, but returns the carriage to the front of the last print\"\"\"\n print(\"\\r\" + (\" \" * last[0]), end=\"\\r\", flush=True) # type: ignore[operator]\n print(to_print, end=\"\", flush=True)\n last[0] = len(str(to_print))\n\n\ndef read_timestring(timestring: str) -> time:\n \"\"\"\n Given a timestring, returns a datetime.time object representative of the time\n\n This function reads in 'timestrings', which are one of two things:\n #. | Isoformat times as strings, using 24 hours\n | (eg 04:00:00, 18:30, 02:59:59.99, etc)\n\n #. | Strings based on 12 hour clocks\n | (see ag.utils.read_twelve_hour_timestring docs)\n\n Using this timestring, returns a python datetime.time object corresponding\n to the time in the timestring. if dtype is set to dict, a deconstructed\n datetime attr dictionary will instead be returned. For more info on\n dtdicts, read the docs for ag.utils.deconstruct_dt\n\n Parameters:\n timestring:\n string representing the time\n\n dtype:\n The type of data to return\n\n Returns:\n The time or dict object corresponding to the time in the timestring\n \"\"\"\n try:\n return read_twelve_hour_timestring(timestring)\n except (TypeError, ValueError) as e:\n return time.fromisoformat(timestring)\n\n\ndef read_twelve_hour_timestring(timestring: str) -> time:\n \"\"\"Reads a timestring based on a 12 hour clock and returns a time\n\n Given a timestring representing a time on a 12 hour clock, returns the\n appropriate time object\n\n Must be formatted as follows:\n * hour | This is the only required value, integer\n * minute | separated from hour by a colon, optional, integer\n * second | separated from minute by a colon, optional, float\n * AM/PM | string 'AM' or 'PM', separated from second by a space\n\n When AM or PM is not provided in the timestring, AM will be assumed.\n\n Valid Examples:\n * '4:30 PM'\n * '4:30 AM'\n * '1 PM'\n * '1'\n * '11:59:59.999 PM'\n * '12:00:00 AM'\n\n Invalid Examples:\n * '0:00'\n * '13:30'\n * '103 PM'\n * '0'\n * '22'\n * '4:30:99 PM'\n * '3:99 PM'\n\n Parameters:\n timestring: The string containing the time to convert to a time object\n\n Returns:\n The corresponding time object\n\n Raises:\n TypeError:\n When timestring is not a string. Only str objects can be parsed\n\n ValueError:\n When the timetring is invalid / improperly formatted.\n \"\"\"\n # Timestrings must be strs\n if not isinstance(timestring, str):\n raise TypeError(f\"timestring must be a string, got {type(timestring)}\")\n\n # Variable Initialization\n ampm = \"AM\"\n info = []\n timestring = timestring.split(\" \") # type: ignore[assignment]\n\n # Getting AM/PM component\n if len(timestring) > 1:\n ampm = timestring[1]\n\n # Getting individual time components\n info = timestring[0].split(\":\")\n\n # isoformat is 00:00:00.00, max 3 colons\n if len(info) > 4:\n raise ValueError(f\"Failed to parse timestring {timestring}\")\n\n # collecting the attributes necessary to create a time object\n tdict = {}\n attrs = [\"hour\", \"minute\", \"second\", \"microsecond\"]\n for attr, value in zip(attrs, info):\n tdict[attr] = int(value)\n\n # Setting missing components to 0\n for attr in attrs:\n if not tdict.get(attr):\n tdict[attr] = 0\n\n # hours less and 1 and more than 12 are off limits in 12 hour clocks\n if not 1 <= tdict[\"hour\"] <= 12:\n raise ValueError(f\"Failed to parse timestring {timestring}\")\n\n # 12:30 AM is 00:30 isoformat\n if ampm == \"AM\" and tdict[\"hour\"] == 12:\n tdict[\"hour\"] == 0\n\n # 12:30 PM is 12:30 isoformat, 1:30 PM is 13:30 isoformat\n elif ampm == \"PM\" and tdict[\"hour\"] < 12:\n tdict[\"hour\"] += 12\n\n # Building and returning a time object\n return time(**tdict) # type: ignore[arg-type]\n\n\ndef set_time(dt: DatetimeLike, t: DateOrTime) -> datetime:\n \"\"\"Sets the given datetime-like object to the given time\n\n Given a DatetimeLike object 'dt' and a time-like object 't', returns a\n datetime like object that shares the date of dt and the time of t.\n\n Very similar to datetime.combine, but accepts datetime objects for both\n inputs.\n\n Parameters:\n dt (DatetimeLike): Datetime to convert\n t (DateOrTime): Time to convert to\n\n Returns:\n python datetime.datetime object with converted time\n \"\"\"\n # Initializing the new time that will be set\n newtime: dict[str, float] = {}\n\n # Reading the necessary time attributes\n if isinstance(t, str):\n t = read_timestring(t)\n newtime = deconstruct_dt(t)\n elif isinstance(t, time):\n newtime = deconstruct_dt(t)\n else:\n newtime = deconstruct_dt(to_datetime(t).time())\n\n # Creating the new datetime with t=t\n return to_datetime(dt).replace(**newtime) # type: ignore [arg-type]\n\n\ndef timestring(t: DateOrTime) -> str:\n \"\"\"Converts a time-like object to a 12-hour-clock timestring\n\n Given a time-like object t, returns a timestring represented by the\n 12-hour-clock (eg. 4:30 PM).\n\n Parameters:\n t (DateOrTime):\n date or time object to read into a 12-hour-clock-based timestring\n\n Returns:\n A string representing the time on a 12-hour-clock\n \"\"\"\n # Ensuring that t is a time object\n if not isinstance(t, time):\n t = to_datetime(t).time()\n\n # Deconstructing components to create a time string\n ampm = \"AM\"\n hour = t.hour\n minute = t.minute if t.minute > 9 else f\"0{t.minute}\"\n if hour > 12:\n ampm = \"PM\"\n hour -= 12\n return f\"{hour}:{minute} {ampm}\"\n\n\ndef to_datetime(dtlike: DatetimeLike) -> datetime:\n \"\"\"\n Given a datetime-like object, converts it to a python standard datetime\n\n Parameters:\n dtlike (DatetimeLike):\n The Datetime-convertable object\n\n Returns:\n The converted python datetime\n\n Raises:\n TypeError: Only accepts python-datetime-convertable objects\n \"\"\"\n if isinstance(dtlike, datetime):\n return dtlike\n elif isinstance(dtlike, pd.Timestamp):\n return dtlike.to_pydatetime()\n elif isinstance(dtlike, np.datetime64):\n return pd.Timestamp(dtlike).to_pydatetime()\n elif isinstance(dtlike, date):\n return datetime.combine(dtlike, datetime.min.time())\n elif isinstance(dtlike, str):\n return datetime.fromisoformat(dtlike)\n\n raise TypeError(f\"Can not convert passed object {dtlike} to python datetime\")\n\n\ndef to_step(current: datetime, delta: Union[DateOrTime, timedelta, float]) -> timedelta:\n \"\"\"\n Converts an ambiguous delta object to a python timedelta\n\n Given an amiguous object which can in some way be interpreted as a timedelta\n relative to some 'current' time, converts that object to an appropriate\n timedelta object, or 'step' in time.\n\n Parameters:\n current:\n The 'current' time, which determines how to interpret the delta\n\n delta (Union[DateOrTime, timedelta, float]);\n The object being passed that may represent a 'step' in time\n\n Returns:\n the appropriate timedelta 'step'\n\n Raises:\n TypeError:\n When passed a type that can not be coerced/interpreted\n\n ValueError:\n When a type-appropriate object can not be coerced, or is in some way\n invalid (eg. the step in time is BEFORE the current time)\n \"\"\"\n # Multiple parses must be made on strings to successfully coerce all of them\n if isinstance(delta, str):\n try:\n delta = set_time(current, read_timestring(delta))\n except ValueError:\n delta = datetime.fromisoformat(delta) # type: ignore[arg-type]\n\n elif isinstance(delta, time):\n delta = set_time(current, delta)\n\n elif isinstance(delta, (float, int)):\n delta = current + timedelta(days=delta)\n\n elif isinstance(delta, timedelta):\n delta = current + delta\n\n # if isinstance(delta, DatetimeLike):\n else:\n delta = to_datetime(delta)\n\n if delta > current:\n return delta - current\n\n raise ValueError(\n f\"Passed delta {delta} is prior to current time {current}. Please \"\n \"choose a time AFTER the current date.\"\n )\n\n\ndef to_time(tlike: TimeLike) -> time:\n \"\"\"\n Given a TimeLike object, converts it to a python standard time object\n\n Parameters:\n tlike:\n The time-convertable object\n\n Returns:\n The converted python time object\n\n Raises:\n TypeError: Only accepts python-time-convertable objects\n \"\"\"\n if isinstance(tlike, str):\n return read_timestring(tlike)\n elif isinstance(tlike, time):\n return tlike\n\n raise TypeError(f\"Can not convert passed object {tlike} to python time\")\n\n\nclass NullClass:\n \"\"\"\n A class designed to take the place of other functions, modules, or classes\n\n This class stands in place of a function, class, or module attached to\n another class as an attribute. When an attribute is initialized as a\n NullClass, one can safely access it as an attribute, call it, and access\n attributes on it. These actions can also be performed recursively; any of\n these operations performed on the nullclass will simply return itself,\n allowing them to be chained infinitely.\n\n Use this class in place of another function or class in order to safely\n use an attribute without making constant checks.\n\n This is most useful in place of functions/classes that perform\n logging/printing, but also makes sense in place of functions that modify\n things in place or always return None.\n\n Examples:\n .. highlight:: python\n .. code-block:: python\n\n class MyClass:\n def __init__(self, data, verbose=False):\n # This is cleaner and more pythonic than...\n self.print = print if verbose else NullClass()\n self.print(\"Initialized as Verbose!\")\n\n # Alternative 1\n self.print = print if verbose else lambda *args, **kwargs: None\n self.print(\"Initialized as Verbose!\")\n\n # Alternative 2\n self.print = print if print is verbose else None\n if self.print is not None:\n self.print(\"Initialized as Verbose!\")\n\n # Alternative 3\n self.verbose = verbose\n if self.verbose:\n print(\"Initialized as Verbose!\")\n\n # etc etc etc...\n\n # This is cleaner and more pythonic than...\n self.tqdm = tqdm.progress_bar if verbose else NullClass()\n with self.tqdm(total=1000) as pbar:\n while condition:\n self.do_something()\n pbar.update(1) # Safe!\n\n # Alternative\n self.verbose = verbose\n if verbose:\n with tqdm.progress_bar(total=1000) as pbar:\n while condition:\n self.do_something()\n pbar.update(1)\n else:\n while condition:\n self.do_something() # gross.\n \"\"\"\n\n def __call__(self, *args: Any, **kwargs: Any) -> NullClass:\n return self\n\n def __getattr__(self, attr: str) -> NullClass:\n return self\n\n def __enter__(self, *args, **kwargs) -> NullClass:\n return self\n\n def __exit__(self, *args, **kwargs) -> None:\n pass\n\n def __bool__(self) -> bool:\n return False\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ozcell/gym_wmgds_ma
[ "c2cb22943913361947216b908d50decc46616e99", "c2cb22943913361947216b908d50decc46616e99", "c2cb22943913361947216b908d50decc46616e99" ]
[ "gym_wmgds/envs/mujoco/ant.py", "gym_wmgds/envs/mujoco/thrower.py", "gym_wmgds/envs/box2d/bipedal_walker.py" ]
[ "import numpy as np\nfrom gym_wmgds import utils\nfrom gym_wmgds.envs.mujoco import mujoco_env\n\nclass AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)\n utils.EzPickle.__init__(self)\n\n def step(self, a):\n xposbefore = self.get_body_com(\"torso\")[0]\n self.do_simulation(a, self.frame_skip)\n xposafter = self.get_body_com(\"torso\")[0]\n forward_reward = (xposafter - xposbefore)/self.dt\n ctrl_cost = .5 * np.square(a).sum()\n contact_cost = 0.5 * 1e-3 * np.sum(\n np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n survive_reward = 1.0\n reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n state = self.state_vector()\n notdone = np.isfinite(state).all() \\\n and state[2] >= 0.2 and state[2] <= 1.0\n done = not notdone\n ob = self._get_obs()\n return ob, reward, done, dict(\n reward_forward=forward_reward,\n reward_ctrl=-ctrl_cost,\n reward_contact=-contact_cost,\n reward_survive=survive_reward)\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[2:],\n self.sim.data.qvel.flat,\n np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n ])\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 0.5\n", "import numpy as np\nfrom gym_wmgds import utils\nfrom gym_wmgds.envs.mujoco import mujoco_env\n\nclass ThrowerEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n utils.EzPickle.__init__(self)\n self._ball_hit_ground = False\n self._ball_hit_location = None\n mujoco_env.MujocoEnv.__init__(self, 'thrower.xml', 5)\n\n def step(self, a):\n ball_xy = self.get_body_com(\"ball\")[:2]\n goal_xy = self.get_body_com(\"goal\")[:2]\n\n if not self._ball_hit_ground and self.get_body_com(\"ball\")[2] < -0.25:\n self._ball_hit_ground = True\n self._ball_hit_location = self.get_body_com(\"ball\")\n\n if self._ball_hit_ground:\n ball_hit_xy = self._ball_hit_location[:2]\n reward_dist = -np.linalg.norm(ball_hit_xy - goal_xy)\n else:\n reward_dist = -np.linalg.norm(ball_xy - goal_xy)\n reward_ctrl = - np.square(a).sum()\n\n reward = reward_dist + 0.002 * reward_ctrl\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n done = False\n return ob, reward, done, dict(reward_dist=reward_dist,\n reward_ctrl=reward_ctrl)\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n self.viewer.cam.distance = 4.0\n\n def reset_model(self):\n self._ball_hit_ground = False\n self._ball_hit_location = None\n\n qpos = self.init_qpos\n self.goal = np.array([self.np_random.uniform(low=-0.3, high=0.3),\n self.np_random.uniform(low=-0.3, high=0.3)])\n\n qpos[-9:-7] = self.goal\n qvel = self.init_qvel + self.np_random.uniform(low=-0.005,\n high=0.005, size=self.model.nv)\n qvel[7:] = 0\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat[:7],\n self.sim.data.qvel.flat[:7],\n self.get_body_com(\"r_wrist_roll_link\"),\n self.get_body_com(\"ball\"),\n self.get_body_com(\"goal\"),\n ])\n", "import sys\nimport math\n\nimport numpy as np\nimport Box2D\nfrom Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)\n\nimport gym_wmgds\nfrom gym_wmgds import spaces\nfrom gym_wmgds.utils import colorize, seeding, EzPickle\n\n# This is simple 4-joints walker robot environment.\n#\n# There are two versions:\n#\n# - Normal, with slightly uneven terrain.\n#\n# - Hardcore with ladders, stumps, pitfalls.\n#\n# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,\n# it gets -100. Applying motor torque costs a small amount of points, more optimal agent\n# will get better score.\n#\n# Heuristic is provided for testing, it's also useful to get demonstrations to\n# learn from. To run heuristic:\n#\n# python gym_wmgds/envs/box2d/bipedal_walker.py\n#\n# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,\n# position of joints and joints angular speed, legs contact with ground, and 10 lidar\n# rangefinder measurements to help to deal with the hardcore version. There's no coordinates\n# in the state vector. Lidar is less useful in normal version, but it works.\n#\n# To solve the game you need to get 300 points in 1600 time steps.\n#\n# To solve hardcore version you need 300 points in 2000 time steps.\n#\n# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI gym_wmgds.\n\nFPS = 50\nSCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well\n\nMOTORS_TORQUE = 80\nSPEED_HIP = 4\nSPEED_KNEE = 6\nLIDAR_RANGE = 160/SCALE\n\nINITIAL_RANDOM = 5\n\nHULL_POLY =[\n (-30,+9), (+6,+9), (+34,+1),\n (+34,-8), (-30,-8)\n ]\nLEG_DOWN = -8/SCALE\nLEG_W, LEG_H = 8/SCALE, 34/SCALE\n\nVIEWPORT_W = 600\nVIEWPORT_H = 400\n\nTERRAIN_STEP = 14/SCALE\nTERRAIN_LENGTH = 200 # in steps\nTERRAIN_HEIGHT = VIEWPORT_H/SCALE/4\nTERRAIN_GRASS = 10 # low long are grass spots, in steps\nTERRAIN_STARTPAD = 20 # in steps\nFRICTION = 2.5\n\nHULL_FD = fixtureDef(\n shape=polygonShape(vertices=[ (x/SCALE,y/SCALE) for x,y in HULL_POLY ]),\n density=5.0,\n friction=0.1,\n categoryBits=0x0020,\n maskBits=0x001, # collide only with ground\n restitution=0.0) # 0.99 bouncy\n\nLEG_FD = fixtureDef(\n shape=polygonShape(box=(LEG_W/2, LEG_H/2)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001)\n\nLOWER_FD = fixtureDef(\n shape=polygonShape(box=(0.8*LEG_W/2, LEG_H/2)),\n density=1.0,\n restitution=0.0,\n categoryBits=0x0020,\n maskBits=0x001)\n\nclass ContactDetector(contactListener):\n def __init__(self, env):\n contactListener.__init__(self)\n self.env = env\n def BeginContact(self, contact):\n if self.env.hull==contact.fixtureA.body or self.env.hull==contact.fixtureB.body:\n self.env.game_over = True\n for leg in [self.env.legs[1], self.env.legs[3]]:\n if leg in [contact.fixtureA.body, contact.fixtureB.body]:\n leg.ground_contact = True\n def EndContact(self, contact):\n for leg in [self.env.legs[1], self.env.legs[3]]:\n if leg in [contact.fixtureA.body, contact.fixtureB.body]:\n leg.ground_contact = False\n\nclass BipedalWalker(gym_wmgds.Env, EzPickle):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : FPS\n }\n\n hardcore = False\n\n def __init__(self):\n EzPickle.__init__(self)\n self.seed()\n self.viewer = None\n\n self.world = Box2D.b2World()\n self.terrain = None\n self.hull = None\n\n self.prev_shaping = None\n\n self.fd_polygon = fixtureDef(\n shape = polygonShape(vertices=\n [(0, 0),\n (1, 0),\n (1, -1),\n (0, -1)]),\n friction = FRICTION)\n\n self.fd_edge = fixtureDef(\n shape = edgeShape(vertices=\n [(0, 0),\n (1, 1)]),\n friction = FRICTION,\n categoryBits=0x0001,\n )\n\n self.reset()\n\n high = np.array([np.inf] * 24)\n self.action_space = spaces.Box(np.array([-1, -1, -1, -1]), np.array([1, 1, 1, 1]), dtype=np.float32)\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _destroy(self):\n if not self.terrain: return\n self.world.contactListener = None\n for t in self.terrain:\n self.world.DestroyBody(t)\n self.terrain = []\n self.world.DestroyBody(self.hull)\n self.hull = None\n for leg in self.legs:\n self.world.DestroyBody(leg)\n self.legs = []\n self.joints = []\n\n def _generate_terrain(self, hardcore):\n GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)\n state = GRASS\n velocity = 0.0\n y = TERRAIN_HEIGHT\n counter = TERRAIN_STARTPAD\n oneshot = False\n self.terrain = []\n self.terrain_x = []\n self.terrain_y = []\n for i in range(TERRAIN_LENGTH):\n x = i*TERRAIN_STEP\n self.terrain_x.append(x)\n\n if state==GRASS and not oneshot:\n velocity = 0.8*velocity + 0.01*np.sign(TERRAIN_HEIGHT - y)\n if i > TERRAIN_STARTPAD: velocity += self.np_random.uniform(-1, 1)/SCALE #1\n y += velocity\n\n elif state==PIT and oneshot:\n counter = self.np_random.randint(3, 5)\n poly = [\n (x, y),\n (x+TERRAIN_STEP, y),\n (x+TERRAIN_STEP, y-4*TERRAIN_STEP),\n (x, y-4*TERRAIN_STEP),\n ]\n self.fd_polygon.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n\n self.fd_polygon.shape.vertices=[(p[0]+TERRAIN_STEP*counter,p[1]) for p in poly]\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n counter += 2\n original_y = y\n\n elif state==PIT and not oneshot:\n y = original_y\n if counter > 1:\n y -= 4*TERRAIN_STEP\n\n elif state==STUMP and oneshot:\n counter = self.np_random.randint(1, 3)\n poly = [\n (x, y),\n (x+counter*TERRAIN_STEP, y),\n (x+counter*TERRAIN_STEP, y+counter*TERRAIN_STEP),\n (x, y+counter*TERRAIN_STEP),\n ]\n self.fd_polygon.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n\n elif state==STAIRS and oneshot:\n stair_height = +1 if self.np_random.rand() > 0.5 else -1\n stair_width = self.np_random.randint(4, 5)\n stair_steps = self.np_random.randint(3, 5)\n original_y = y\n for s in range(stair_steps):\n poly = [\n (x+( s*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),\n (x+((1+s)*stair_width)*TERRAIN_STEP, y+( s*stair_height)*TERRAIN_STEP),\n (x+((1+s)*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),\n (x+( s*stair_width)*TERRAIN_STEP, y+(-1+s*stair_height)*TERRAIN_STEP),\n ]\n self.fd_polygon.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_polygon)\n t.color1, t.color2 = (1,1,1), (0.6,0.6,0.6)\n self.terrain.append(t)\n counter = stair_steps*stair_width\n\n elif state==STAIRS and not oneshot:\n s = stair_steps*stair_width - counter - stair_height\n n = s/stair_width\n y = original_y + (n*stair_height)*TERRAIN_STEP\n\n oneshot = False\n self.terrain_y.append(y)\n counter -= 1\n if counter==0:\n counter = self.np_random.randint(TERRAIN_GRASS/2, TERRAIN_GRASS)\n if state==GRASS and hardcore:\n state = self.np_random.randint(1, _STATES_)\n oneshot = True\n else:\n state = GRASS\n oneshot = True\n\n self.terrain_poly = []\n for i in range(TERRAIN_LENGTH-1):\n poly = [\n (self.terrain_x[i], self.terrain_y[i]),\n (self.terrain_x[i+1], self.terrain_y[i+1])\n ]\n self.fd_edge.shape.vertices=poly\n t = self.world.CreateStaticBody(\n fixtures = self.fd_edge)\n color = (0.3, 1.0 if i%2==0 else 0.8, 0.3)\n t.color1 = color\n t.color2 = color\n self.terrain.append(t)\n color = (0.4, 0.6, 0.3)\n poly += [ (poly[1][0], 0), (poly[0][0], 0) ]\n self.terrain_poly.append( (poly, color) )\n self.terrain.reverse()\n\n def _generate_clouds(self):\n # Sorry for the clouds, couldn't resist\n self.cloud_poly = []\n for i in range(TERRAIN_LENGTH//20):\n x = self.np_random.uniform(0, TERRAIN_LENGTH)*TERRAIN_STEP\n y = VIEWPORT_H/SCALE*3/4\n poly = [\n (x+15*TERRAIN_STEP*math.sin(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP),\n y+ 5*TERRAIN_STEP*math.cos(3.14*2*a/5)+self.np_random.uniform(0,5*TERRAIN_STEP) )\n for a in range(5) ]\n x1 = min( [p[0] for p in poly] )\n x2 = max( [p[0] for p in poly] )\n self.cloud_poly.append( (poly,x1,x2) )\n\n def reset(self):\n self._destroy()\n self.world.contactListener_bug_workaround = ContactDetector(self)\n self.world.contactListener = self.world.contactListener_bug_workaround\n self.game_over = False\n self.prev_shaping = None\n self.scroll = 0.0\n self.lidar_render = 0\n\n W = VIEWPORT_W/SCALE\n H = VIEWPORT_H/SCALE\n\n self._generate_terrain(self.hardcore)\n self._generate_clouds()\n\n init_x = TERRAIN_STEP*TERRAIN_STARTPAD/2\n init_y = TERRAIN_HEIGHT+2*LEG_H\n self.hull = self.world.CreateDynamicBody(\n position = (init_x, init_y),\n fixtures = HULL_FD\n )\n self.hull.color1 = (0.5,0.4,0.9)\n self.hull.color2 = (0.3,0.3,0.5)\n self.hull.ApplyForceToCenter((self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True)\n\n self.legs = []\n self.joints = []\n for i in [-1,+1]:\n leg = self.world.CreateDynamicBody(\n position = (init_x, init_y - LEG_H/2 - LEG_DOWN),\n angle = (i*0.05),\n fixtures = LEG_FD\n )\n leg.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)\n leg.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)\n rjd = revoluteJointDef(\n bodyA=self.hull,\n bodyB=leg,\n localAnchorA=(0, LEG_DOWN),\n localAnchorB=(0, LEG_H/2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=MOTORS_TORQUE,\n motorSpeed = i,\n lowerAngle = -0.8,\n upperAngle = 1.1,\n )\n self.legs.append(leg)\n self.joints.append(self.world.CreateJoint(rjd))\n\n lower = self.world.CreateDynamicBody(\n position = (init_x, init_y - LEG_H*3/2 - LEG_DOWN),\n angle = (i*0.05),\n fixtures = LOWER_FD\n )\n lower.color1 = (0.6-i/10., 0.3-i/10., 0.5-i/10.)\n lower.color2 = (0.4-i/10., 0.2-i/10., 0.3-i/10.)\n rjd = revoluteJointDef(\n bodyA=leg,\n bodyB=lower,\n localAnchorA=(0, -LEG_H/2),\n localAnchorB=(0, LEG_H/2),\n enableMotor=True,\n enableLimit=True,\n maxMotorTorque=MOTORS_TORQUE,\n motorSpeed = 1,\n lowerAngle = -1.6,\n upperAngle = -0.1,\n )\n lower.ground_contact = False\n self.legs.append(lower)\n self.joints.append(self.world.CreateJoint(rjd))\n\n self.drawlist = self.terrain + self.legs + [self.hull]\n\n class LidarCallback(Box2D.b2.rayCastCallback):\n def ReportFixture(self, fixture, point, normal, fraction):\n if (fixture.filterData.categoryBits & 1) == 0:\n return 1\n self.p2 = point\n self.fraction = fraction\n return 0\n self.lidar = [LidarCallback() for _ in range(10)]\n\n return self.step(np.array([0,0,0,0]))[0]\n\n def step(self, action):\n #self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help\n control_speed = False # Should be easier as well\n if control_speed:\n self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1))\n self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1))\n self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1))\n self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1))\n else:\n self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))\n self.joints[0].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1))\n self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))\n self.joints[1].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1))\n self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))\n self.joints[2].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1))\n self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))\n self.joints[3].maxMotorTorque = float(MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1))\n\n self.world.Step(1.0/FPS, 6*30, 2*30)\n\n pos = self.hull.position\n vel = self.hull.linearVelocity\n\n for i in range(10):\n self.lidar[i].fraction = 1.0\n self.lidar[i].p1 = pos\n self.lidar[i].p2 = (\n pos[0] + math.sin(1.5*i/10.0)*LIDAR_RANGE,\n pos[1] - math.cos(1.5*i/10.0)*LIDAR_RANGE)\n self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)\n\n state = [\n self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.\n 2.0*self.hull.angularVelocity/FPS,\n 0.3*vel.x*(VIEWPORT_W/SCALE)/FPS, # Normalized to get -1..1 range\n 0.3*vel.y*(VIEWPORT_H/SCALE)/FPS,\n self.joints[0].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)\n self.joints[0].speed / SPEED_HIP,\n self.joints[1].angle + 1.0,\n self.joints[1].speed / SPEED_KNEE,\n 1.0 if self.legs[1].ground_contact else 0.0,\n self.joints[2].angle,\n self.joints[2].speed / SPEED_HIP,\n self.joints[3].angle + 1.0,\n self.joints[3].speed / SPEED_KNEE,\n 1.0 if self.legs[3].ground_contact else 0.0\n ]\n state += [l.fraction for l in self.lidar]\n assert len(state)==24\n\n self.scroll = pos.x - VIEWPORT_W/SCALE/5\n\n shaping = 130*pos[0]/SCALE # moving forward is a way to receive reward (normalized to get 300 on completion)\n shaping -= 5.0*abs(state[0]) # keep head straight, other than that and falling, any behavior is unpunished\n\n reward = 0\n if self.prev_shaping is not None:\n reward = shaping - self.prev_shaping\n self.prev_shaping = shaping\n\n for a in action:\n reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)\n # normalized to about -50.0 using heuristic, more optimal agent should spend less\n\n done = False\n if self.game_over or pos[0] < 0:\n reward = -100\n done = True\n if pos[0] > (TERRAIN_LENGTH-TERRAIN_GRASS)*TERRAIN_STEP:\n done = True\n return np.array(state), reward, done, {}\n\n def render(self, mode='human'):\n from gym_wmgds.envs.classic_control import rendering\n if self.viewer is None:\n self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)\n self.viewer.set_bounds(self.scroll, VIEWPORT_W/SCALE + self.scroll, 0, VIEWPORT_H/SCALE)\n\n self.viewer.draw_polygon( [\n (self.scroll, 0),\n (self.scroll+VIEWPORT_W/SCALE, 0),\n (self.scroll+VIEWPORT_W/SCALE, VIEWPORT_H/SCALE),\n (self.scroll, VIEWPORT_H/SCALE),\n ], color=(0.9, 0.9, 1.0) )\n for poly,x1,x2 in self.cloud_poly:\n if x2 < self.scroll/2: continue\n if x1 > self.scroll/2 + VIEWPORT_W/SCALE: continue\n self.viewer.draw_polygon( [(p[0]+self.scroll/2, p[1]) for p in poly], color=(1,1,1))\n for poly, color in self.terrain_poly:\n if poly[1][0] < self.scroll: continue\n if poly[0][0] > self.scroll + VIEWPORT_W/SCALE: continue\n self.viewer.draw_polygon(poly, color=color)\n\n self.lidar_render = (self.lidar_render+1) % 100\n i = self.lidar_render\n if i < 2*len(self.lidar):\n l = self.lidar[i] if i < len(self.lidar) else self.lidar[len(self.lidar)-i-1]\n self.viewer.draw_polyline( [l.p1, l.p2], color=(1,0,0), linewidth=1 )\n\n for obj in self.drawlist:\n for f in obj.fixtures:\n trans = f.body.transform\n if type(f.shape) is circleShape:\n t = rendering.Transform(translation=trans*f.shape.pos)\n self.viewer.draw_circle(f.shape.radius, 30, color=obj.color1).add_attr(t)\n self.viewer.draw_circle(f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2).add_attr(t)\n else:\n path = [trans*v for v in f.shape.vertices]\n self.viewer.draw_polygon(path, color=obj.color1)\n path.append(path[0])\n self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)\n\n flagy1 = TERRAIN_HEIGHT\n flagy2 = flagy1 + 50/SCALE\n x = TERRAIN_STEP*3\n self.viewer.draw_polyline( [(x, flagy1), (x, flagy2)], color=(0,0,0), linewidth=2 )\n f = [(x, flagy2), (x, flagy2-10/SCALE), (x+25/SCALE, flagy2-5/SCALE)]\n self.viewer.draw_polygon(f, color=(0.9,0.2,0) )\n self.viewer.draw_polyline(f + [f[0]], color=(0,0,0), linewidth=2 )\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n\nclass BipedalWalkerHardcore(BipedalWalker):\n hardcore = True\n\nif __name__==\"__main__\":\n # Heurisic: suboptimal, have no notion of balance.\n env = BipedalWalker()\n env.reset()\n steps = 0\n total_reward = 0\n a = np.array([0.0, 0.0, 0.0, 0.0])\n STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1,2,3\n SPEED = 0.29 # Will fall forward on higher speed\n state = STAY_ON_ONE_LEG\n moving_leg = 0\n supporting_leg = 1 - moving_leg\n SUPPORT_KNEE_ANGLE = +0.1\n supporting_knee_angle = SUPPORT_KNEE_ANGLE\n while True:\n s, r, done, info = env.step(a)\n total_reward += r\n if steps % 20 == 0 or done:\n print(\"\\naction \" + str([\"{:+0.2f}\".format(x) for x in a]))\n print(\"step {} total_reward {:+0.2f}\".format(steps, total_reward))\n print(\"hull \" + str([\"{:+0.2f}\".format(x) for x in s[0:4] ]))\n print(\"leg0 \" + str([\"{:+0.2f}\".format(x) for x in s[4:9] ]))\n print(\"leg1 \" + str([\"{:+0.2f}\".format(x) for x in s[9:14]]))\n steps += 1\n\n contact0 = s[8]\n contact1 = s[13]\n moving_s_base = 4 + 5*moving_leg\n supporting_s_base = 4 + 5*supporting_leg\n\n hip_targ = [None,None] # -0.8 .. +1.1\n knee_targ = [None,None] # -0.6 .. +0.9\n hip_todo = [0.0, 0.0]\n knee_todo = [0.0, 0.0]\n\n if state==STAY_ON_ONE_LEG:\n hip_targ[moving_leg] = 1.1\n knee_targ[moving_leg] = -0.6\n supporting_knee_angle += 0.03\n if s[2] > SPEED: supporting_knee_angle += 0.03\n supporting_knee_angle = min( supporting_knee_angle, SUPPORT_KNEE_ANGLE )\n knee_targ[supporting_leg] = supporting_knee_angle\n if s[supporting_s_base+0] < 0.10: # supporting leg is behind\n state = PUT_OTHER_DOWN\n if state==PUT_OTHER_DOWN:\n hip_targ[moving_leg] = +0.1\n knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE\n knee_targ[supporting_leg] = supporting_knee_angle\n if s[moving_s_base+4]:\n state = PUSH_OFF\n supporting_knee_angle = min( s[moving_s_base+2], SUPPORT_KNEE_ANGLE )\n if state==PUSH_OFF:\n knee_targ[moving_leg] = supporting_knee_angle\n knee_targ[supporting_leg] = +1.0\n if s[supporting_s_base+2] > 0.88 or s[2] > 1.2*SPEED:\n state = STAY_ON_ONE_LEG\n moving_leg = 1 - moving_leg\n supporting_leg = 1 - moving_leg\n\n if hip_targ[0]: hip_todo[0] = 0.9*(hip_targ[0] - s[4]) - 0.25*s[5]\n if hip_targ[1]: hip_todo[1] = 0.9*(hip_targ[1] - s[9]) - 0.25*s[10]\n if knee_targ[0]: knee_todo[0] = 4.0*(knee_targ[0] - s[6]) - 0.25*s[7]\n if knee_targ[1]: knee_todo[1] = 4.0*(knee_targ[1] - s[11]) - 0.25*s[12]\n\n hip_todo[0] -= 0.9*(0-s[0]) - 1.5*s[1] # PID to keep head strait\n hip_todo[1] -= 0.9*(0-s[0]) - 1.5*s[1]\n knee_todo[0] -= 15.0*s[3] # vertical speed, to damp oscillations\n knee_todo[1] -= 15.0*s[3]\n\n a[0] = hip_todo[0]\n a[1] = knee_todo[0]\n a[2] = hip_todo[1]\n a[3] = knee_todo[1]\n a = np.clip(0.5*a, -1.0, 1.0)\n\n env.render()\n if done: break\n" ]
[ [ "numpy.square", "numpy.isfinite", "numpy.clip" ], [ "numpy.square", "numpy.linalg.norm" ], [ "numpy.sign", "numpy.array", "numpy.abs", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SivilTaram/dialogue-utterance-rewriter-pytorch
[ "92c2254958b7a1ee9199836f7f2236575270983f", "92c2254958b7a1ee9199836f7f2236575270983f", "92c2254958b7a1ee9199836f7f2236575270983f" ]
[ "onmt/encoders/bert.py", "bert_ckpt_convert.py", "onmt/train_single.py" ]
[ "\"\"\"\nImplementation from: https://raw.githubusercontent.com/Zenglinxiao/OpenNMT-py/bert/onmt/encoders/bert.py\n@Author: Zenglinxiao\n\"\"\"\n\nimport torch.nn as nn\nfrom onmt.encoders.transformer import TransformerEncoderLayer\nfrom onmt.utils.misc import sequence_mask\n\n\nclass BertEncoder(nn.Module):\n \"\"\"BERT Encoder: A Transformer Encoder with LayerNorm and BertPooler.\n :cite:`DBLP:journals/corr/abs-1810-04805`\n\n Args:\n embeddings (onmt.modules.BertEmbeddings): embeddings to use\n num_layers (int): number of encoder layers.\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n dropout (float): dropout parameters\n \"\"\"\n\n def __init__(self, embeddings, num_layers=12, d_model=768, heads=12,\n d_ff=3072, dropout=0.1, attention_dropout=0.1,\n max_relative_positions=0):\n super(BertEncoder, self).__init__()\n self.num_layers = num_layers\n self.d_model = d_model\n self.heads = heads\n self.dropout = dropout\n # Feed-Forward size should be 4*d_model as in paper\n self.d_ff = d_ff\n\n self.embeddings = embeddings\n # Transformer Encoder Block\n self.encoder = nn.ModuleList(\n [TransformerEncoderLayer(d_model, heads, d_ff,\n dropout, attention_dropout,\n max_relative_positions=max_relative_positions,\n activation='gelu') for _ in range(num_layers)])\n\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)\n self.pooler = BertPooler(d_model)\n\n @classmethod\n def from_opt(cls, opt, embeddings):\n \"\"\"Alternate constructor.\"\"\"\n return cls(\n embeddings,\n opt.enc_layers,\n opt.word_vec_size,\n opt.heads,\n opt.transformer_ff,\n opt.dropout[0] if type(opt.dropout) is list else opt.dropout,\n opt.attention_dropout[0] if type(opt.attention_dropout)\n is list else opt.attention_dropout,\n opt.max_relative_positions\n )\n\n def forward(self, input_ids, lengths, token_type_ids=None):\n \"\"\"\n Args:\n input_ids (Tensor): ``(seq_len, batch_size, feature_dim)``, padding ids=0\n lengths (Tensor): ``(batch_size)``, record length of sequence\n token_type_ids (seq_len, batch_size): ``(B, S)``, A(0), B(1), pad(0)\n Returns:\n all_encoder_layers (list of Tensor): ``(B, S, H)``, token level\n pooled_output (Tensor): ``(B, H)``, sequence level\n \"\"\"\n # remove the feature dimension\n # seq_len x batch_size\n\n emb = self.embeddings(input_ids, token_type_ids)\n\n out = emb.transpose(0, 1).contiguous()\n # [batch, seq] -> [batch, 1, seq]\n mask = ~sequence_mask(lengths).unsqueeze(1)\n\n for layer in self.encoder:\n out = layer(out, mask)\n out = self.layer_norm(out)\n\n return emb, out.transpose(0, 1).contiguous(), lengths\n\n def update_dropout(self, dropout):\n self.dropout = dropout\n self.embeddings.update_dropout(dropout)\n for layer in self.encoder:\n layer.update_dropout(dropout)\n\n\nclass BertPooler(nn.Module):\n def __init__(self, hidden_size):\n \"\"\"A pooling block (Linear layer followed by Tanh activation).\n\n Args:\n hidden_size (int): size of hidden layer.\n \"\"\"\n\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(hidden_size, hidden_size)\n self.activation_fn = nn.Tanh()\n\n def forward(self, hidden_states):\n \"\"\"hidden_states[:, 0, :] --> {Linear, Tanh} --> Returns.\n\n Args:\n hidden_states (Tensor): last layer's hidden_states, ``(B, S, H)``\n Returns:\n pooled_output (Tensor): transformed output of last layer's hidden\n \"\"\"\n\n first_token_tensor = hidden_states[:, 0, :] # [batch, d_model]\n pooled_output = self.activation_fn(self.dense(first_token_tensor))\n return pooled_output", "#!/usr/bin/env python\n\"\"\" Convert weights of huggingface Bert to onmt Bert\"\"\"\nfrom argparse import ArgumentParser\nimport torch\nfrom onmt.encoders.bert import BertEncoder\nfrom onmt.models.bert_generators import BertPreTrainingHeads\nfrom onmt.modules.bert_embeddings import BertEmbeddings\nfrom collections import OrderedDict\nimport re\n\n\ndef decrement(matched):\n value = int(matched.group(1))\n if value < 1:\n raise ValueError('Value Error when converting string')\n string = \"bert.encoder.layer.{}.output.LayerNorm\".format(value-1)\n return string\n\n\ndef mapping_key(key, max_layers):\n if 'bert.embeddings' in key:\n key = key\n\n elif 'bert.encoder' in key:\n # convert layer_norm weights\n key = re.sub(r'bert.encoder.0.layer_norm\\.(.*)',\n r'bert.embeddings.LayerNorm.\\1', key)\n key = re.sub(r'bert.encoder\\.(\\d+)\\.layer_norm',\n decrement, key)\n # convert attention weights\n key = re.sub(r'bert.encoder\\.(\\d+)\\.self_attn.linear_keys\\.(.*)',\n r'bert.encoder.layer.\\1.attention.self.key.\\2', key)\n key = re.sub(r'bert.encoder\\.(\\d+)\\.self_attn.linear_values\\.(.*)',\n r'bert.encoder.layer.\\1.attention.self.value.\\2', key)\n key = re.sub(r'bert.encoder\\.(\\d+)\\.self_attn.linear_query\\.(.*)',\n r'bert.encoder.layer.\\1.attention.self.query.\\2', key)\n key = re.sub(r'bert.encoder\\.(\\d+)\\.self_attn.final_linear\\.(.*)',\n r'bert.encoder.layer.\\1.attention.output.dense.\\2', key)\n # convert feed forward weights\n key = re.sub(r'bert.encoder\\.(\\d+)\\.feed_forward.layer_norm\\.(.*)',\n r'bert.encoder.layer.\\1.attention.output.LayerNorm.\\2',\n key)\n key = re.sub(r'bert.encoder\\.(\\d+)\\.feed_forward.w_1\\.(.*)',\n r'bert.encoder.layer.\\1.intermediate.dense.\\2', key)\n key = re.sub(r'bert.encoder\\.(\\d+)\\.feed_forward.w_2\\.(.*)',\n r'bert.encoder.layer.\\1.output.dense.\\2', key)\n\n elif 'bert.layer_norm' in key:\n key = re.sub(r'bert.layer_norm',\n r'bert.encoder.layer.' + str(max_layers - 1) +\n '.output.LayerNorm', key)\n elif 'bert.pooler' in key:\n key = key\n elif 'generator.next_sentence' in key:\n key = re.sub(r'generator.next_sentence.linear\\.(.*)',\n r'cls.seq_relationship.\\1', key)\n elif 'generator.mask_lm' in key:\n key = re.sub(r'generator.mask_lm.bias',\n r'cls.predictions.bias', key)\n key = re.sub(r'generator.mask_lm.decode.weight',\n r'cls.predictions.decoder.weight', key)\n key = re.sub(r'generator.mask_lm.transform.dense\\.(.*)',\n r'cls.predictions.transform.dense.\\1', key)\n key = re.sub(r'generator.mask_lm.transform.layer_norm\\.(.*)',\n r'cls.predictions.transform.LayerNorm.\\1', key)\n else:\n raise KeyError(\"Unexpected keys! Please provide HuggingFace weights\")\n return key\n\n\ndef convert_bert_weights(bert_model, weights, n_layers=12):\n bert_model_keys = bert_model.state_dict().keys()\n bert_weights = OrderedDict()\n generator_weights = OrderedDict()\n model_weights = {\"bert\": bert_weights,\n \"generator\": generator_weights}\n hugface_keys = weights.keys()\n try:\n for key in bert_model_keys:\n hugface_key = mapping_key(key, n_layers)\n if hugface_key not in hugface_keys:\n if 'LayerNorm' in hugface_key:\n # Fix LayerNorm of old huggingface ckp\n hugface_key = re.sub(r'LayerNorm.weight',\n r'LayerNorm.gamma', hugface_key)\n hugface_key = re.sub(r'LayerNorm.bias',\n r'LayerNorm.beta', hugface_key)\n if hugface_key in hugface_keys:\n print(\"[OLD Weights file]gamma/beta is used in \" +\n \"naming BertLayerNorm. Mapping succeed.\")\n else:\n raise KeyError(\"Failed fix LayerNorm %s, check file\"\n % hugface_key)\n else:\n raise KeyError(\"Mapped key %s not in weight file\"\n % hugface_key)\n if 'generator' not in key:\n onmt_key = re.sub(r'bert\\.(.*)', r'\\1', key)\n model_weights['bert'][onmt_key] = weights[hugface_key]\n else:\n onmt_key = re.sub(r'generator\\.(.*)', r'\\1', key)\n model_weights['generator'][onmt_key] = weights[hugface_key]\n except KeyError:\n print(\"Unsuccessful convert.\")\n raise\n return model_weights\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"--layers\", type=int, default=12, required=True)\n\n parser.add_argument(\"--bert_model_weights_file\", \"-i\", type=str, default=\"bert-base-chinese/pytorch_model.bin\",\n required=True, help=\"Path to the \"\n \"huggingface Bert weights file download from \"\n \"https://github.com/huggingface/pytorch-transformers\")\n\n parser.add_argument(\"--output_name\", \"-o\", type=str, required=True, default=\"bert-base-chinese/onmt_bert.pt\",\n help=\"output onmt version Bert weight file Path\")\n args = parser.parse_args()\n\n print(\"Model contain {} layers.\".format(args.layers))\n\n print(\"Load weights from {}.\".format(args.bert_model_weights_file))\n\n bert_weights = torch.load(args.bert_model_weights_file)\n embeddings = BertEmbeddings(28996) # vocab don't bother the conversion\n bert_encoder = BertEncoder(embeddings)\n generator = BertPreTrainingHeads(bert_encoder.d_model,\n embeddings.vocab_size)\n bertlm = torch.nn.Sequential(OrderedDict([\n ('bert', bert_encoder),\n ('generator', generator)]))\n model_weights = convert_bert_weights(bertlm, bert_weights, args.layers)\n\n ckp = {'model': model_weights['bert'],\n 'generator': model_weights['generator']}\n\n outfile = args.output_name\n print(\"Converted weights file in {}\".format(outfile))\n torch.save(ckp, outfile)\n\n\nif __name__ == '__main__':\n main()\n", "#!/usr/bin/env python\n\"\"\"Training on a single process.\"\"\"\nimport os\n\nimport torch\n\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n load_old_vocab, old_style_vocab, build_dataset_iter_multiple\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import Optimizer\nfrom onmt.utils.misc import set_random_seed\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n else:\n dec += param.nelement()\n return enc + dec, enc, dec\n\n\ndef configure_process(opt, device_id):\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n set_random_seed(opt.seed, device_id >= 0)\n\n\ndef main(opt, device_id, batch_queue=None, semaphore=None):\n # NOTE: It's important that ``opt`` has been validated and updated\n # at this point.\n configure_process(opt, device_id)\n init_logger(opt.log_file)\n assert len(opt.accum_count) == len(opt.accum_steps), \\\n 'Number of accum_count values must match number of accum_steps'\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n if 'opt' in checkpoint:\n model_opt = ArgumentParser.ckpt_model_opts(checkpoint[\"opt\"])\n ArgumentParser.update_model_opts(model_opt)\n ArgumentParser.validate_model_opts(model_opt)\n else:\n model_opt = opt\n\n if 'vocab' in checkpoint:\n logger.info('Loading vocab from checkpoint at %s.', opt.train_from)\n vocab = checkpoint['vocab']\n else:\n vocab = torch.load(opt.data + '.vocab.pt')\n else:\n checkpoint = None\n model_opt = opt\n vocab = torch.load(opt.data + '.vocab.pt')\n\n # check for code where vocab is saved instead of fields\n # (in the future this will be done in a smarter way)\n if old_style_vocab(vocab):\n fields = load_old_vocab(\n vocab, opt.model_type, dynamic_dict=opt.copy_attn)\n else:\n fields = vocab\n\n # Report src and tgt vocab sizes, including for features\n for side in ['src', 'tgt']:\n f = fields[side]\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(side, f)]\n for sn, sf in f_iter:\n if sf.use_vocab:\n logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, device_id, model, fields, optim, model_saver=model_saver)\n\n if batch_queue is None:\n if len(opt.data_ids) > 1:\n train_shards = []\n for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n train_shards.append(shard_base)\n train_iter = build_dataset_iter_multiple(train_shards, fields, opt)\n else:\n if opt.data_ids[0] is not None:\n shard_base = \"train_\" + opt.data_ids[0]\n else:\n shard_base = \"train\"\n train_iter = build_dataset_iter(shard_base, fields, opt)\n\n else:\n assert semaphore is not None, \\\n \"Using batch_queue requires semaphore as well\"\n\n def _train_iter():\n while True:\n batch = batch_queue.get()\n semaphore.release()\n yield batch\n\n train_iter = _train_iter()\n\n valid_iter = build_dataset_iter(\n \"valid\", fields, opt, is_train=False)\n\n if len(opt.gpu_ranks):\n logger.info('Starting training on GPU: %s' % opt.gpu_ranks)\n else:\n logger.info('Starting training on CPU, could be very slow')\n train_steps = opt.train_steps\n if opt.single_pass and train_steps > 0:\n logger.warning(\"Option single_pass is enabled, ignoring train_steps.\")\n train_steps = 0\n\n trainer.train(\n train_iter,\n train_steps,\n save_checkpoint_steps=opt.save_checkpoint_steps,\n valid_iter=valid_iter,\n valid_steps=opt.valid_steps)\n\n if trainer.report_manager.tensorboard_writer is not None:\n trainer.report_manager.tensorboard_writer.close()\n" ]
[ [ "torch.nn.Linear", "torch.nn.Tanh", "torch.nn.LayerNorm" ], [ "torch.save", "torch.load" ], [ "torch.cuda.set_device", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emaballarin/phytorch
[ "68cf0a630e2fee9dd98f08639edcceb2389adf35" ]
[ "tests/cosmology/test_cosmology_apsuite.py" ]
[ "# Based on the astropy test suite (v4.2.1)\n# (https://github.com/astropy/astropy/blob/v4.2.1/astropy/cosmology/tests/test_cosmology.py)\nfrom io import StringIO\nfrom typing import Type\n\nimport numpy as np\nimport pytest\nimport torch\nfrom pytest import mark\nfrom torch import tensor\n\nimport phytorch.cosmology.drivers.analytic\nimport phytorch.cosmology.drivers.analytic_diff\nimport phytorch.cosmology.special\nfrom phytorch.constants import codata2014, G as Newton_G\nfrom phytorch.cosmology.special import AbstractFlatLambdaCDMR, AbstractLambdaCDMR\nfrom phytorch.units.astro import Gpc, Gyr, Mpc\nfrom phytorch.units.si import cm, gram, kelvin, km, s\nfrom phytorch.units.unit import Unit\nfrom tests.common.closeness import close\nfrom tests.common.dtypes import with_default_double\n\n\nZERO = torch.zeros(())\nONE = torch.ones(())\nSMALL = 1e-16\nZ = tensor([0, 0.5, 1, 2])\n\nH70 = 70 * km/s/Mpc\nH704 = 70.4 * km/s/Mpc\n\n\ndef test_critical_density():\n fac = (Newton_G / codata2014.G).to(Unit())\n\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.H0 = H704\n cosmo.Om0 = 0.272\n\n # constants defined only so accurately\n assert ((cosmo.critical_density0 * fac).to(gram / cm**3) - 9.309668456020899e-30) < 1e-9\n assert cosmo.critical_density0 == cosmo.critical_density(0)\n\n assert close((cosmo.critical_density(tensor([1, 5])) * fac).to(gram / cm**3).value,\n [2.70352772e-29, 5.53739080e-28])\n\n\ndef test_xtfuncs():\n cosmo = AbstractLambdaCDMR()\n cosmo.H0, cosmo.Om0, cosmo.Ode0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, 0.5, 3.04, 2.725 * kelvin\n\n z = tensor([2, 3.2])\n assert close(cosmo.lookback_time_integrand(tensor(3)), 0.052218976654969378)\n assert close(cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541])\n assert close(cosmo.abs_distance_integrand(tensor(3)), 3.3420145059180402)\n assert close(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758])\n\n\ndef test_zeroing():\n cosmo = AbstractLambdaCDMR()\n cosmo.Om0 = 0.27\n cosmo.Ode0 = 0\n cosmo.Or0 = 0\n\n assert cosmo.Ode(1.5) == 0\n assert (cosmo.Ode(Z) == ZERO).all()\n assert cosmo.Or(1.5) == 0\n assert (cosmo.Or(Z) == ZERO).all()\n # TODO: add neutrinos\n # assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])\n # assert allclose(cosmo.Onu(z), [0, 0, 0, 0])\n assert (cosmo.Ob(Z) == ZERO).all()\n\n\ndef test_matter():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.Om0 = 0.3\n cosmo.Ob0 = 0.045\n\n assert cosmo.Om(0) == 0.3\n assert cosmo.Ob(0) == 0.045\n assert close(cosmo.Om(Z), [0.3, 0.59124088, 0.77419355, 0.92045455])\n assert close(cosmo.Ob(Z), [0.045, 0.08868613, 0.11612903, 0.13806818])\n assert close(cosmo.Odm(Z), [0.255, 0.50255474, 0.65806452, 0.78238636])\n assert close(cosmo.Ob(Z) + cosmo.Odm(Z), cosmo.Om(Z))\n\n\ndef test_ocurv():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.Om0 = 0.3\n\n assert cosmo.Ok0 == 0\n assert cosmo.Ok(0) == 0\n assert (cosmo.Ok(Z) == ZERO).all()\n\n cosmo = AbstractLambdaCDMR()\n cosmo.Om0 = 0.3\n cosmo.Ode0 = 0.5\n assert abs(cosmo.Ok0 - 0.2) < SMALL\n assert abs(cosmo.Ok(0) - 0.2) < SMALL\n assert close(cosmo.Ok(Z), [0.2, 0.22929936, 0.21621622, 0.17307692])\n\n assert (cosmo.Ok(Z) + cosmo.Om(Z) + cosmo.Ode(Z) == ONE).all()\n\n\ndef test_ode():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.Om0 = 0.3\n\n assert cosmo.Ode(0) == cosmo.Ode0\n assert close(cosmo.Ode(Z), [0.7, 0.408759, 0.2258065, 0.07954545])\n\n\ndef test_tcmb():\n cosmo = AbstractFlatLambdaCDMR()\n cosmo.H0 = H704\n cosmo.Om0 = 0.272\n cosmo.Tcmb0 = 2.5 * kelvin\n\n assert cosmo.Tcmb(2) == 7.5 * kelvin\n assert (cosmo.Tcmb(tensor([0, 1, 2, 3, 9.])).to(kelvin).value == tensor([2.5, 5, 7.5, 10, 25])).all()\n\n\ndef test_efunc_vs_invefunc():\n cosmo = AbstractLambdaCDMR()\n cosmo.Om0 = 0.3\n cosmo.Ode0 = 0.7\n\n assert cosmo.efunc(0.5) * cosmo.inv_efunc(0.5) == 1\n assert (cosmo.efunc(Z) * cosmo.inv_efunc(Z) == ONE).all()\n # TODO: test this for subclasses?\n\n\nclass BaseLambdaCDMDriverTest:\n flat_cosmo_cls: Type[phytorch.cosmology.special.BaseFlatLambdaCDM]\n cosmo_cls: Type[phytorch.cosmology.special.BaseLambdaCDM]\n\n\nclass BaseLambdaCDMTest(BaseLambdaCDMDriverTest):\n flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDM]\n cosmo_cls: Type[phytorch.cosmology.special.LambdaCDM]\n\n @with_default_double\n @mark.parametrize(('func', 'vals', 'unit', 'rtol'), (\n # From the astropy test suite:\n # Test values were taken from the following web cosmology\n # calculators on 27th Feb 2012:\n # Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html\n # (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)\n # Kempner: http://www.kempner.net/cosmic.php\n # iCosmos: http://www.icosmos.co.uk/index.html\n (phytorch.cosmology.special.FlatLambdaCDM.comoving_distance,\n (3364.5, 3364.8, 3364.7988), Mpc, 1e-4),\n (phytorch.cosmology.special.FlatLambdaCDM.angular_diameter_distance,\n (1682.3, 1682.4, 1682.3994), Mpc, 1e-4),\n (phytorch.cosmology.special.FlatLambdaCDM.luminosity_distance,\n (6729.2, 6729.6, 6729.5976), Mpc, 1e-4),\n (phytorch.cosmology.special.FlatLambdaCDM.lookback_time,\n (7.841, 7.84178, 7.843), Gyr, 1e-3),\n (phytorch.cosmology.special.FlatLambdaCDM.lookback_distance,\n (2404.0, 2404.24, 2404.4), Mpc, 1e-3),\n ))\n def test_flat_z1(self, func, vals, unit, rtol):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0 = H70\n cosmo.Om0 = 0.27\n\n assert close(getattr(cosmo, func.__name__)(1).to(unit).value, vals, rtol=rtol)\n\n @mark.parametrize('Om0, Ode0, vals', (\n (0.27, 0.73, (29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802)),\n (0.27, 0, (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814)),\n (2, 0, (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))\n ))\n def test_comoving_volume(self, Om0, Ode0, vals):\n z = tensor([0.5, 1, 2, 3, 5, 9])\n # for (Om0, Ode0), vals in zip(\n # ((0.27, 0.73), (0.27, 0), (2, 0)),\n # # Form Ned Wright's calculator: not very *accurate* (sic), so\n # # like astropy, test to very low precision\n # ((29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802),\n # (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814),\n # (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))\n # ):\n c = self.cosmo_cls()\n c.H0, c.Om0, c.Ode0 = H70, Om0, Ode0\n\n assert close(c.comoving_volume(z).to(Gpc**3).value, vals, rtol=1e-2)\n\n # TODO: (requires integration) test_differential_comoving_volume\n\n icosmo_flat = \"\"\"\\\n # from icosmo (icosmo.org)\n # Om 0.3 w -1 h 0.7 Ol 0.7\n # z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 669.77536 576.15085 778.61386\n 0.32500000 1285.5964 970.26143 1703.4152\n 0.50000000 1888.6254 1259.0836 2832.9381\n 0.66250000 2395.5489 1440.9317 3982.6000\n 0.82500000 2855.5732 1564.6976 5211.4210\n 1.0000000 3303.8288 1651.9144 6607.6577\n 1.1625000 3681.1867 1702.2829 7960.5663\n 1.3250000 4025.5229 1731.4077 9359.3408\n 1.5000000 4363.8558 1745.5423 10909.640\n 1.6625000 4651.4830 1747.0359 12384.573\n 1.8250000 4916.5970 1740.3883 13889.387\n 2.0000000 5179.8621 1726.6207 15539.586\n 2.1625000 5406.0204 1709.4136 17096.540\n 2.3250000 5616.5075 1689.1752 18674.888\n 2.5000000 5827.5418 1665.0120 20396.396\n 2.6625000 6010.4886 1641.0890 22013.414\n 2.8250000 6182.1688 1616.2533 23646.796\n 3.0000000 6355.6855 1588.9214 25422.742\n 3.1625000 6507.2491 1563.3031 27086.425\n 3.3250000 6650.4520 1537.6768 28763.205\n 3.5000000 6796.1499 1510.2555 30582.674\n 3.6625000 6924.2096 1485.0852 32284.127\n 3.8250000 7045.8876 1460.2876 33996.408\n 4.0000000 7170.3664 1434.0733 35851.832\n 4.1625000 7280.3423 1410.2358 37584.767\n 4.3250000 7385.3277 1386.9160 39326.870\n 4.5000000 7493.2222 1362.4040 41212.722\n 4.6625000 7588.9589 1340.2135 42972.480\n \"\"\"\n\n icosmo_open = \"\"\"\\\n # from icosmo (icosmo.org)\n # Om 0.3 w -1 h 0.7 Ol 0.1\n # z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 643.08185 553.18868 747.58265\n 0.32500000 1200.9858 906.40441 1591.3062\n 0.50000000 1731.6262 1154.4175 2597.4393\n 0.66250000 2174.3252 1307.8648 3614.8157\n 0.82500000 2578.7616 1413.0201 4706.2399\n 1.0000000 2979.3460 1489.6730 5958.6920\n 1.1625000 3324.2002 1537.2024 7188.5829\n 1.3250000 3646.8432 1568.5347 8478.9104\n 1.5000000 3972.8407 1589.1363 9932.1017\n 1.6625000 4258.1131 1599.2913 11337.226\n 1.8250000 4528.5346 1603.0211 12793.110\n 2.0000000 4804.9314 1601.6438 14414.794\n 2.1625000 5049.2007 1596.5852 15968.097\n 2.3250000 5282.6693 1588.7727 17564.875\n 2.5000000 5523.0914 1578.0261 19330.820\n 2.6625000 5736.9813 1566.4113 21011.694\n 2.8250000 5942.5803 1553.6158 22730.370\n 3.0000000 6155.4289 1538.8572 24621.716\n 3.1625000 6345.6997 1524.4924 26413.975\n 3.3250000 6529.3655 1509.6799 28239.506\n 3.5000000 6720.2676 1493.3928 30241.204\n 3.6625000 6891.5474 1478.0799 32131.840\n 3.8250000 7057.4213 1462.6780 34052.058\n 4.0000000 7230.3723 1446.0745 36151.862\n 4.1625000 7385.9998 1430.7021 38130.224\n 4.3250000 7537.1112 1415.4199 40135.117\n 4.5000000 7695.0718 1399.1040 42322.895\n 4.6625000 7837.5510 1384.1150 44380.133\n \"\"\"\n\n icosmo_closed = \"\"\"\\\n # from icosmo (icosmo.org)\n # Om 2 w -1 h 0.7 Ol 0.1\n # z comoving_transvers_dist angular_diameter_dist luminosity_dist\n 0.0000000 0.0000000 0.0000000 0.0000000\n 0.16250000 601.80160 517.67879 699.59436\n 0.32500000 1057.9502 798.45297 1401.7840\n 0.50000000 1438.2161 958.81076 2157.3242\n 0.66250000 1718.6778 1033.7912 2857.3019\n 0.82500000 1948.2400 1067.5288 3555.5381\n 1.0000000 2152.7954 1076.3977 4305.5908\n 1.1625000 2312.3427 1069.2914 5000.4410\n 1.3250000 2448.9755 1053.3228 5693.8681\n 1.5000000 2575.6795 1030.2718 6439.1988\n 1.6625000 2677.9671 1005.8092 7130.0873\n 1.8250000 2768.1157 979.86398 7819.9270\n 2.0000000 2853.9222 951.30739 8561.7665\n 2.1625000 2924.8116 924.84161 9249.7167\n 2.3250000 2988.5333 898.80701 9936.8732\n 2.5000000 3050.3065 871.51614 10676.073\n 2.6625000 3102.1909 847.01459 11361.774\n 2.8250000 3149.5043 823.39982 12046.854\n 3.0000000 3195.9966 798.99915 12783.986\n 3.1625000 3235.5334 777.30533 13467.908\n 3.3250000 3271.9832 756.52790 14151.327\n 3.5000000 3308.1758 735.15017 14886.791\n 3.6625000 3339.2521 716.19347 15569.263\n 3.8250000 3368.1489 698.06195 16251.319\n 4.0000000 3397.0803 679.41605 16985.401\n 4.1625000 3422.1142 662.87926 17666.664\n 4.3250000 3445.5542 647.05243 18347.576\n 4.5000000 3469.1805 630.76008 19080.493\n 4.6625000 3489.7534 616.29199 19760.729\n \"\"\"\n\n @mark.parametrize('Om0, Ode0, data', (\n (0.3, 0.7, icosmo_flat), (0.3, 0.1, icosmo_open), (2, 0.1, icosmo_closed)\n ))\n def test_flat_open_closed_icosmo(self, Om0, Ode0, data):\n cosmo = self.cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, Om0, Ode0\n\n z, dm, da, dl = (tensor(_, dtype=torch.get_default_dtype())\n for _ in np.loadtxt(StringIO(data), unpack=True))\n\n assert close(cosmo.comoving_transverse_distance(z).to(Mpc).value, dm)\n assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, da)\n assert close(cosmo.luminosity_distance(z).to(Mpc).value, dl)\n\n def test_distmod(self):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0 = H704, 0.272\n\n assert cosmo.hubble_distance.to(Mpc) == 4258.415596590909\n assert close(cosmo.distmod(tensor([1, 5])), [44.124857, 48.40167258])\n\n @with_default_double\n def test_negdistmod(self):\n cosmo = self.cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, 0.2, 1.3\n z = tensor([50, 100])\n assert close(cosmo.luminosity_distance(z).to(Mpc).value, [16612.44047622, -46890.79092244])\n assert close(cosmo.distmod(z), [46.102167189, 48.355437790944])\n\n def test_comoving_distance_z1z2(self):\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 0.3, 0.8\n\n with pytest.raises(RuntimeError):\n cosmo.comoving_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))\n\n assert cosmo.comoving_distance_z1z2(1, 2) == - cosmo.comoving_distance_z1z2(2, 1)\n assert close(\n cosmo.comoving_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,\n [3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683]\n )\n\n @with_default_double\n @mark.parametrize('Om0, val', (\n # (0, 2997.92458), # TODO: cannot do Om0=0 with LambdaCDM, need special cosmology\n (1, 1756.1435599923348),\n ))\n def test_distance_in_special_cosmologies(self, Om0, val):\n cosmo = self.flat_cosmo_cls()\n cosmo.Om0 = Om0\n\n assert close(cosmo.comoving_distance(0).to(Mpc).value, 0)\n assert close(cosmo.comoving_distance(1).to(Mpc).value, val)\n\n @with_default_double\n def test_comoving_transverse_distance_z1z2(self):\n z1, z2 = tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])\n\n cosmo = self.flat_cosmo_cls()\n cosmo.Om0 = 0.3\n\n with pytest.raises(RuntimeError):\n cosmo.comoving_transverse_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))\n\n assert close(cosmo.comoving_transverse_distance_z1z2(1, 2).to(Mpc).value, 1313.2232194828466)\n\n assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)\n\n cosmo = self.flat_cosmo_cls()\n cosmo.Om0 = 1.5\n assert close(\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,\n [2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258]\n )\n assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)\n\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 0.3, 0.5\n assert close(\n cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,\n [3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884]\n )\n\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 1, 0.2\n assert close(\n cosmo.comoving_transverse_distance_z1z2(0.1, tensor([0, 0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,\n [-281.31602666724865, 0, 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927]\n )\n\n def test_angular_diameter_distance_z1z2(self):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0 = H704, 0.272\n\n with pytest.raises(RuntimeError):\n cosmo.angular_diameter_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))\n\n assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 646.22968662822018)\n assert close(\n cosmo.angular_diameter_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,\n [1760.0628637762106, 1670.7497657219858, -969.34452994, 1159.0970895962193, 115.72768186186921]\n )\n assert close(\n cosmo.angular_diameter_distance_z1z2(0.1, tensor([0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,\n [0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976]\n )\n\n # Non-flat (positive Ok0) test\n cosmo = self.cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Ode0 = H704, 0.2, 0.5\n assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 620.1175337852428)\n\n # Non-flat (negative Ok0) test\n cosmo = self.cosmo_cls()\n cosmo.Om0, cosmo.Ode0 = 2, 1\n assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 228.42914659246014)\n\n def test_absorption_distance(self):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0 = H704, 0.272\n assert close(cosmo.absorption_distance(3), 7.98685853)\n assert close(cosmo.absorption_distance(tensor([1, 3])), [1.72576635, 7.98685853])\n\n\nclass BaseLambdaCDMRTest(BaseLambdaCDMDriverTest):\n flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDMR]\n cosmo_cls: Type[phytorch.cosmology.special.LambdaCDMR]\n\n @with_default_double\n def test_ogamma(self):\n z = tensor([1, 10, 500, 1000])\n\n for Neff, Tcmb0, vals in (\n # (3, 0, [1651.9, 858.2, 26.855, 13.642]), # cannot have Or0=0\n (3, 2.725, [1651.8, 857.9, 26.767, 13.582]),\n (3, 4, [1651.4, 856.6, 26.489, 13.405]),\n # (3.04, 0, [1651.91, 858.205, 26.8586, 13.6469]), # cannot have Or0=0\n (3.04, 2.725, [1651.76, 857.817, 26.7688, 13.5841]),\n (3.04, 4, [1651.21, 856.411, 26.4845, 13.4028]),\n ):\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Om0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, Neff, Tcmb0*kelvin\n\n assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, vals, rtol=5e-4)\n\n # from astropy: Just to be really sure, we also do a version where the\n # integral is analytic, which is a Ode = 0 flat universe. In this case\n # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)\n # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.\n hubdis = (299792.458 / 70.0)\n Neff = 3.04\n for Tcmb0 in (2.725, 5):\n Ogamma0h2 = 4 * 5.670373e-8 / 299792458**3 * Tcmb0**4 / 1.87837e-26\n Onu0h2 = Ogamma0h2 * 7/8 * (4 / 11)**(4/3) * Neff\n Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2\n vals = 2 * hubdis * (((1 + Or0*z) / (1+z))**0.5 - 1) / (Or0 - 1)\n\n cosmo = self.flat_cosmo_cls()\n cosmo.H0, cosmo.Neff, cosmo.Tcmb0, cosmo.Ode0 = H70, Neff, Tcmb0 * kelvin, 0\n\n assert close(cosmo.comoving_distance(z).to(Mpc).value, vals)\n\n\nclass TestAnalyticLambdaCDM(BaseLambdaCDMTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDM\n cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDM\n\n\nclass TestAnalyticCDMR(BaseLambdaCDMRTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDMR\n cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDMR\n\n\nclass TestAnalyticDiffLambdaCDM(BaseLambdaCDMTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDM\n cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDM\n\n\nclass TestAnalyticDiffCDMR(BaseLambdaCDMRTest):\n flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDMR\n cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDMR\n\n\n# TODO: (age...) test_age\n# TODO: (age...) test_age_in_special_cosmologies\n# TODO: (neutrinos, weird models...) test_distances\n" ]
[ [ "torch.tensor", "torch.get_default_dtype", "torch.ones", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ankuraxz/gan
[ "b956c7d571539fd1053b3df3dddddbcbd27be65c", "b956c7d571539fd1053b3df3dddddbcbd27be65c", "b956c7d571539fd1053b3df3dddddbcbd27be65c", "b956c7d571539fd1053b3df3dddddbcbd27be65c", "b956c7d571539fd1053b3df3dddddbcbd27be65c", "b956c7d571539fd1053b3df3dddddbcbd27be65c" ]
[ "tensorflow_gan/examples/progressive_gan/networks_test.py", "tensorflow_gan/examples/cifar/networks.py", "tensorflow_gan/python/eval/sliced_wasserstein_test.py", "tensorflow_gan/examples/progressive_gan/layers_test.py", "tensorflow_gan/examples/progressive_gan/train_main.py", "tensorflow_gan/examples/evaluation_helper.py" ]
[ "# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python2 python3\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_gan.examples.progressive_gan import layers\nfrom tensorflow_gan.examples.progressive_gan import networks\n\n\ndef _get_grad_norm(ys, xs):\n \"\"\"Compute 2-norm of dys / dxs.\"\"\"\n return tf.sqrt(\n tf.add_n([\n tf.reduce_sum(input_tensor=tf.square(g))\n for g in tf.gradients(ys=ys, xs=xs)\n ]))\n\n\ndef _num_filters_stub(block_id):\n return networks.num_filters(block_id, 8, 1, 8)\n\n\nclass NetworksTest(tf.test.TestCase):\n\n def test_resolution_schedule_correct(self):\n rs = networks.ResolutionSchedule(\n start_resolutions=[5, 3], scale_base=2, num_resolutions=3)\n self.assertEqual(rs.start_resolutions, (5, 3))\n self.assertEqual(rs.scale_base, 2)\n self.assertEqual(rs.num_resolutions, 3)\n self.assertEqual(rs.final_resolutions, (20, 12))\n self.assertEqual(rs.scale_factor(1), 4)\n self.assertEqual(rs.scale_factor(2), 2)\n self.assertEqual(rs.scale_factor(3), 1)\n with self.assertRaises(ValueError):\n rs.scale_factor(0)\n with self.assertRaises(ValueError):\n rs.scale_factor(4)\n\n def test_block_name(self):\n self.assertEqual(networks.block_name(10), 'progressive_gan_block_10')\n\n def test_min_total_num_images(self):\n self.assertEqual(networks.min_total_num_images(7, 8, 4), 52)\n\n def test_compute_progress(self):\n if tf.executing_eagerly():\n progress_output = []\n for current_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]:\n progress = networks.compute_progress(\n current_image_id,\n stable_stage_num_images=7,\n transition_stage_num_images=8,\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n progress_output.append(sess.run(progress))\n else:\n current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])\n progress = networks.compute_progress(\n current_image_id_ph,\n stable_stage_num_images=7,\n transition_stage_num_images=8,\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n progress_output = [\n sess.run(progress, feed_dict={current_image_id_ph: cur_image_id})\n for cur_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]\n ]\n\n self.assertArrayNear(progress_output,\n [0.0, 0.0, 0.0, 0.0, 0.125, 0.375, 1.0, 1.0, 1.0],\n 1.0e-6)\n\n def test_generator_alpha(self):\n with self.cached_session(use_gpu=True) as sess:\n alpha_fixed_block_id = [\n sess.run(\n networks._generator_alpha(2, tf.constant(progress, tf.float32)))\n for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]\n ]\n alpha_fixed_progress = [\n sess.run(\n networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))\n for block_id in range(1, 5)\n ]\n\n self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],\n 1.0e-6)\n self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)\n\n def test_discriminator_alpha(self):\n with self.cached_session(use_gpu=True) as sess:\n alpha_fixed_block_id = [sess.run(networks._discriminator_alpha(\n 2, tf.constant(progress, tf.float32))) for progress in\n [0, 0.2, 1, 1.2, 2, 2.2, 3]]\n alpha_fixed_progress = [sess.run(networks._discriminator_alpha(\n block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5)]\n\n self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)\n self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6)\n\n def test_blend_images_in_stable_stage(self):\n x_np = np.random.normal(size=[2, 8, 8, 3])\n x = tf.constant(x_np, tf.float32)\n x_blend = networks.blend_images(\n x,\n progress=tf.constant(0.0),\n resolution_schedule=networks.ResolutionSchedule(\n scale_base=2, num_resolutions=2),\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n x_blend_np = sess.run(x_blend)\n x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))\n self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)\n\n def test_blend_images_in_transition_stage(self):\n x_np = np.random.normal(size=[2, 8, 8, 3])\n x = tf.constant(x_np, tf.float32)\n x_blend = networks.blend_images(\n x,\n tf.constant(0.2),\n resolution_schedule=networks.ResolutionSchedule(\n scale_base=2, num_resolutions=2),\n num_blocks=2)\n with self.cached_session(use_gpu=True) as sess:\n x_blend_np = sess.run(x_blend)\n x_blend_expected_np = 0.8 * sess.run(\n layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np\n self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)\n\n def test_num_filters(self):\n self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)\n self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)\n\n def test_generator_grad_norm_progress(self):\n if tf.executing_eagerly():\n # tf.placeholder() is not compatible with eager execution.\n return\n stable_stage_num_images = 2\n transition_stage_num_images = 3\n\n current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])\n progress = networks.compute_progress(\n current_image_id_ph,\n stable_stage_num_images,\n transition_stage_num_images,\n num_blocks=3)\n z = tf.random.normal([2, 10], dtype=tf.float32)\n x, _ = networks.generator(\n z, progress, _num_filters_stub,\n networks.ResolutionSchedule(\n start_resolutions=(4, 4), scale_base=2, num_resolutions=3))\n fake_loss = tf.reduce_sum(input_tensor=tf.square(x))\n grad_norms = [\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))\n ]\n\n grad_norms_output = None\n with self.cached_session(use_gpu=True) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})\n x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})\n grad_norms_output = np.array([\n sess.run(grad_norms, feed_dict={current_image_id_ph: i})\n for i in range(15) # total num of images\n ])\n\n self.assertEqual((2, 16, 16, 3), x1_np.shape)\n self.assertEqual((2, 16, 16, 3), x2_np.shape)\n # The gradient of block_1 is always on.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 0] > 0), 0,\n 'gradient norms {} for block 1 is not always on'.format(\n grad_norms_output[:, 0]))\n # The gradient of block_2 is on after 1 stable stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 1] > 0), 3,\n 'gradient norms {} for block 2 is not on at step 3'.format(\n grad_norms_output[:, 1]))\n # The gradient of block_3 is on after 2 stable stage + 1 transition stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 2] > 0), 8,\n 'gradient norms {} for block 3 is not on at step 8'.format(\n grad_norms_output[:, 2]))\n\n def test_discriminator_grad_norm_progress(self):\n if tf.executing_eagerly():\n # tf.placeholder() is not compatible with eager execution.\n return\n stable_stage_num_images = 2\n transition_stage_num_images = 3\n\n current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])\n progress = networks.compute_progress(\n current_image_id_ph,\n stable_stage_num_images,\n transition_stage_num_images,\n num_blocks=3)\n x = tf.random.normal([2, 16, 16, 3])\n logits, _ = networks.discriminator(\n x, progress, _num_filters_stub,\n networks.ResolutionSchedule(\n start_resolutions=(4, 4), scale_base=2, num_resolutions=3))\n fake_loss = tf.reduce_sum(input_tensor=tf.square(logits))\n grad_norms = [\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),\n _get_grad_norm(\n fake_loss,\n tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))\n ]\n\n grad_norms_output = None\n with self.cached_session(use_gpu=True) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n grad_norms_output = np.array([\n sess.run(grad_norms, feed_dict={current_image_id_ph: i})\n for i in range(15) # total num of images\n ])\n\n # The gradient of block_1 is always on.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 0] > 0), 0,\n 'gradient norms {} for block 1 is not always on'.format(\n grad_norms_output[:, 0]))\n # The gradient of block_2 is on after 1 stable stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 1] > 0), 3,\n 'gradient norms {} for block 2 is not on at step 3'.format(\n grad_norms_output[:, 1]))\n # The gradient of block_3 is on after 2 stable stage + 1 transition stage.\n self.assertEqual(\n np.argmax(grad_norms_output[:, 2] > 0), 8,\n 'gradient norms {} for block 3 is not on at step 8'.format(\n grad_norms_output[:, 2]))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple generator and discriminator models.\n\nBased on the convolutional and \"deconvolutional\" models presented in\n\"Unsupervised Representation Learning with Deep Convolutional Generative\nAdversarial Networks\" by A. Radford et. al.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef _leaky_relu(x):\n return tf.nn.leaky_relu(x, alpha=0.2)\n\n\ndef _batch_norm(x, is_training, name):\n return tf.compat.v1.layers.batch_normalization(\n x, momentum=0.9, epsilon=1e-5, training=is_training, name=name)\n\n\ndef _dense(x, channels, name):\n return tf.compat.v1.layers.dense(\n x,\n channels,\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.02),\n name=name)\n\n\ndef _conv2d(x, filters, kernel_size, stride, name):\n return tf.compat.v1.layers.conv2d(\n x,\n filters, [kernel_size, kernel_size],\n strides=[stride, stride],\n padding='same',\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.02),\n name=name)\n\n\ndef _deconv2d(x, filters, kernel_size, stride, name):\n return tf.compat.v1.layers.conv2d_transpose(\n x,\n filters, [kernel_size, kernel_size],\n strides=[stride, stride],\n padding='same',\n kernel_initializer=tf.compat.v1.initializers.truncated_normal(\n stddev=0.02),\n name=name)\n\n\ndef discriminator(images, unused_conditioning, is_training=True,\n scope='Discriminator'):\n \"\"\"Discriminator for CIFAR images.\n\n Args:\n images: A Tensor of shape [batch size, width, height, channels], that can be\n either real or generated. It is the discriminator's goal to distinguish\n between the two.\n unused_conditioning: The TFGAN API can help with conditional GANs, which\n would require extra `condition` information to both the generator and the\n discriminator. Since this example is not conditional, we do not use this\n argument.\n is_training: If `True`, batch norm uses batch statistics. If `False`, batch\n norm uses the exponential moving average collected from population\n statistics.\n scope: A variable scope or string for the discriminator.\n\n Returns:\n A 1D Tensor of shape [batch size] representing the confidence that the\n images are real. The output can lie in [-inf, inf], with positive values\n indicating high confidence that the images are real.\n \"\"\"\n with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):\n x = _conv2d(images, 64, 5, 2, name='d_conv1')\n x = _leaky_relu(x)\n\n x = _conv2d(x, 128, 5, 2, name='d_conv2')\n x = _leaky_relu(_batch_norm(x, is_training, name='d_bn2'))\n\n x = _conv2d(x, 256, 5, 2, name='d_conv3')\n x = _leaky_relu(_batch_norm(x, is_training, name='d_bn3'))\n\n x = tf.reshape(x, [-1, 4 * 4 * 256])\n\n x = _dense(x, 1, name='d_fc_4')\n\n return x\n\n\ndef generator(noise, is_training=True, scope='Generator'):\n \"\"\"Generator to produce CIFAR images.\n\n Args:\n noise: A 2D Tensor of shape [batch size, noise dim]. Since this example\n does not use conditioning, this Tensor represents a noise vector of some\n kind that will be reshaped by the generator into CIFAR examples.\n is_training: If `True`, batch norm uses batch statistics. If `False`, batch\n norm uses the exponential moving average collected from population\n statistics.\n scope: A variable scope or string for the generator.\n\n Returns:\n A single Tensor with a batch of generated CIFAR images.\n \"\"\"\n with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):\n net = _dense(noise, 4096, name='g_fc1')\n net = tf.nn.relu(_batch_norm(net, is_training, name='g_bn1'))\n\n net = tf.reshape(net, [-1, 4, 4, 256])\n\n net = _deconv2d(net, 128, 5, 2, name='g_dconv2')\n net = tf.nn.relu(_batch_norm(net, is_training, name='g_bn2'))\n\n net = _deconv2d(net, 64, 4, 2, name='g_dconv3')\n net = tf.nn.relu(_batch_norm(net, is_training, name='g_bn3'))\n\n net = _deconv2d(net, 3, 4, 2, name='g_dconv4')\n net = tf.tanh(net)\n\n return net\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tfgan.eval.sliced_wasserstein.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy import ndimage\n\nimport tensorflow as tf\nimport tensorflow_gan as tfgan\n\nfrom tensorflow_gan.python.eval.sliced_wasserstein import laplacian_pyramid\n\n\nclass ClassifierMetricsTest(tf.test.TestCase):\n\n def test_laplacian_pyramid(self):\n # The numpy/scipy code for reference estimation comes from:\n # https://github.com/tkarras/progressive_growing_of_gans\n gaussian_filter = np.float32([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4],\n [6, 24, 36, 24, 6], [4, 16, 24, 16, 4],\n [1, 4, 6, 4, 1]]) / 256.0\n\n def np_pyr_down(minibatch): # matches cv2.pyrDown()\n assert minibatch.ndim == 4\n return ndimage.convolve(\n minibatch,\n gaussian_filter[np.newaxis, np.newaxis, :, :],\n mode='mirror')[:, :, ::2, ::2]\n\n def np_pyr_up(minibatch): # matches cv2.pyrUp()\n assert minibatch.ndim == 4\n s = minibatch.shape\n res = np.zeros((s[0], s[1], s[2] * 2, s[3] * 2), minibatch.dtype)\n res[:, :, ::2, ::2] = minibatch\n return ndimage.convolve(\n res,\n gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0,\n mode='mirror')\n\n def np_laplacian_pyramid(minibatch, num_levels):\n # Note: there's a bug in the original SWD, fixed repeatability.\n pyramid = [minibatch.astype('f').copy()]\n for _ in range(1, num_levels):\n pyramid.append(np_pyr_down(pyramid[-1]))\n pyramid[-2] -= np_pyr_up(pyramid[-1])\n return pyramid\n\n data = np.random.normal(size=[256, 3, 32, 32]).astype('f')\n pyramid = np_laplacian_pyramid(data, 3)\n data_tf = tf.constant(data.transpose(0, 2, 3, 1))\n pyramid_tf = laplacian_pyramid(data_tf, 3)\n with self.cached_session() as sess:\n pyramid_tf = sess.run(pyramid_tf)\n for x in range(3):\n self.assertAllClose(\n pyramid[x].transpose(0, 2, 3, 1), pyramid_tf[x], atol=1e-6)\n\n def test_sliced_wasserstein_distance(self):\n \"\"\"Test the distance.\"\"\"\n d1 = tf.random.uniform([256, 32, 32, 3])\n d2 = tf.random.normal([256, 32, 32, 3])\n wfunc = tfgan.eval.sliced_wasserstein_distance(d1, d2)\n with self.cached_session() as sess:\n wscores = [sess.run(x) for x in wfunc]\n self.assertAllClose(\n np.array([0.014, 0.014], 'f'),\n np.array([x[0] for x in wscores], 'f'),\n rtol=0.15)\n self.assertAllClose(\n np.array([0.014, 0.020], 'f'),\n np.array([x[1] for x in wscores], 'f'),\n rtol=0.15)\n\n def test_sliced_wasserstein_distance_svd(self):\n \"\"\"Test the distance with svd.\"\"\"\n d1 = tf.random.uniform([256, 32, 32, 3])\n d2 = tf.random.normal([256, 32, 32, 3])\n wfunc = tfgan.eval.sliced_wasserstein_distance(d1, d2, use_svd=True)\n with self.cached_session() as sess:\n wscores = [sess.run(x) for x in wfunc]\n self.assertAllClose(\n np.array([0.013, 0.013], 'f'),\n np.array([x[0] for x in wscores], 'f'),\n rtol=0.15)\n self.assertAllClose(\n np.array([0.014, 0.019], 'f'),\n np.array([x[1] for x in wscores], 'f'),\n rtol=0.15)\n\n def test_swd_mismatched(self):\n \"\"\"Test the inputs mismatched shapes are detected.\"\"\"\n d1 = tf.random.uniform([256, 32, 32, 3])\n d2 = tf.random.normal([256, 32, 31, 3])\n d3 = tf.random.normal([256, 31, 32, 3])\n d4 = tf.random.normal([255, 32, 32, 3])\n with self.assertRaises(ValueError):\n tfgan.eval.sliced_wasserstein_distance(d1, d2)\n with self.assertRaises(ValueError):\n tfgan.eval.sliced_wasserstein_distance(d1, d3)\n with self.assertRaises(ValueError):\n tfgan.eval.sliced_wasserstein_distance(d1, d4)\n\n def test_swd_not_rgb(self):\n \"\"\"Test that only RGB is supported.\"\"\"\n d1 = tf.random.uniform([256, 32, 32, 1])\n d2 = tf.random.normal([256, 32, 32, 1])\n with self.assertRaises(ValueError):\n tfgan.eval.sliced_wasserstein_distance(d1, d2)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python2 python3\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_gan.examples.progressive_gan import layers\n\nmock = tf.compat.v1.test.mock\n\n\ndef dummy_apply_kernel(kernel_shape, kernel_initializer):\n kernel = tf.compat.v1.get_variable(\n 'kernel', shape=kernel_shape, initializer=kernel_initializer)\n return tf.reduce_sum(input_tensor=kernel) + 1.5\n\n\nclass LayersTest(tf.test.TestCase):\n\n def setUp(self):\n super(LayersTest, self).setUp()\n\n # Do a dummy computation to trigger lazy loading of the conv2d method before\n # mocking it in tests.\n _ = tf.compat.v1.layers.conv2d(\n np.ones(shape=[1, 3, 3, 1], dtype=np.float32), filters=1, kernel_size=2)\n\n def test_pixel_norm_4d_images_returns_channel_normalized_images(self):\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n with self.cached_session() as sess:\n output_np = sess.run(layers.pixel_norm(x))\n\n expected_np = [[[[0.46291006, 0.92582011, 1.38873017],\n [0.78954202, 0.98692751, 1.18431306]],\n [[0.87047803, 0.99483204, 1.11918604],\n [0.90659684, 0.99725652, 1.08791625]]],\n [[[0., 0., 0.], [-0.46291006, -0.92582011, -1.38873017]],\n [[0.57735026, -1.15470052, 1.15470052],\n [0.56195146, 1.40487862, 0.84292722]]]]\n self.assertAllClose(output_np, expected_np, 1.0e-5)\n\n def test_get_validated_scale_invalid_scale_throws_exception(self):\n with self.assertRaises(ValueError):\n layers._get_validated_scale(0)\n\n def test_get_validated_scale_float_scale_returns_integer(self):\n self.assertEqual(layers._get_validated_scale(5.5), 5)\n\n def test_downscale_invalid_scale_throws_exception(self):\n with self.assertRaises(ValueError):\n layers.downscale(tf.constant([]), -1)\n\n def test_downscale_4d_images_returns_downscaled_images(self):\n x_np = np.array([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=np.float32)\n with self.cached_session() as sess:\n x1_np, x2_np = sess.run(\n [layers.downscale(tf.constant(x_np), n) for n in [1, 2]])\n\n expected2_np = [[[[5.5, 6.5, 7.5]]], [[[0.5, 0.25, 0.5]]]]\n\n self.assertAllClose(x1_np, x_np, 1.0e-5)\n self.assertAllClose(x2_np, expected2_np, 1.0e-5)\n\n def test_upscale_invalid_scale_throws_exception(self):\n with self.assertRaises(ValueError):\n layers.upscale(tf.constant([]), -1)\n\n def test_upscale_4d_images_returns_upscaled_images(self):\n x_np = np.array([[[[1, 2, 3]]], [[[4, 5, 6]]]], dtype=np.float32)\n with self.cached_session() as sess:\n x1_np, x2_np = sess.run(\n [layers.upscale(tf.constant(x_np), n) for n in [1, 2]])\n\n expected2_np = [[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],\n [[[4, 5, 6], [4, 5, 6]], [[4, 5, 6], [4, 5, 6]]]]\n\n self.assertAllClose(x1_np, x_np, 1.0e-5)\n self.assertAllClose(x2_np, expected2_np, 1.0e-5)\n\n def test_minibatch_mean_stddev_4d_images_returns_scalar(self):\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n with self.cached_session() as sess:\n output_np = sess.run(layers.minibatch_mean_stddev(x))\n\n self.assertAlmostEqual(output_np, 3.0416667, 5)\n\n def test_scalar_concat_invalid_input_throws_exception(self):\n with self.assertRaises(ValueError):\n layers.scalar_concat(tf.constant(1.2), 2.0)\n\n def test_scalar_concat_4d_images_and_scalar(self):\n x = tf.constant([[[[1, 2], [4, 5]], [[7, 8], [10, 11]]],\n [[[0, 0], [-1, -2]], [[1, -2], [2, 5]]]],\n dtype=tf.float32)\n with self.cached_session() as sess:\n output_np = sess.run(layers.scalar_concat(x, 7))\n\n expected_np = [[[[1, 2, 7], [4, 5, 7]], [[7, 8, 7], [10, 11, 7]]],\n [[[0, 0, 7], [-1, -2, 7]], [[1, -2, 7], [2, 5, 7]]]]\n\n self.assertAllClose(output_np, expected_np, 1.0e-5)\n\n def test_he_initializer_scale_slope_linear(self):\n self.assertAlmostEqual(\n layers.he_initializer_scale([3, 4, 5, 6], 1.0), 0.1290994, 5)\n\n def test_he_initializer_scale_slope_relu(self):\n self.assertAlmostEqual(\n layers.he_initializer_scale([3, 4, 5, 6], 0.0), 0.1825742, 5)\n\n @mock.patch.object(tf.compat.v1, 'random_normal_initializer', autospec=False)\n @mock.patch.object(tf.compat.v1, 'zeros_initializer', autospec=False)\n def test_custom_layer_impl_with_weight_scaling(\n self, mock_zeros_initializer, mock_random_normal_initializer):\n mock_zeros_initializer.return_value = tf.compat.v1.constant_initializer(1.0)\n mock_random_normal_initializer.return_value = (\n tf.compat.v1.constant_initializer(3.0))\n output = layers._custom_layer_impl(\n apply_kernel=dummy_apply_kernel,\n kernel_shape=(25, 6),\n bias_shape=(),\n activation=lambda x: 2.0 * x,\n he_initializer_slope=1.0,\n use_weight_scaling=True)\n mock_zeros_initializer.assert_called_once_with()\n mock_random_normal_initializer.assert_called_once_with(stddev=1.0)\n with self.cached_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output_np = sess.run(output)\n\n self.assertAlmostEqual(output_np, 182.6, 3)\n\n @mock.patch.object(tf.compat.v1, 'random_normal_initializer', autospec=False)\n @mock.patch.object(tf.compat.v1, 'zeros_initializer', autospec=False)\n def test_custom_layer_impl_no_weight_scaling(self, mock_zeros_initializer,\n mock_random_normal_initializer):\n mock_zeros_initializer.return_value = tf.compat.v1.constant_initializer(1.0)\n mock_random_normal_initializer.return_value = (\n tf.compat.v1.constant_initializer(3.0))\n output = layers._custom_layer_impl(\n apply_kernel=dummy_apply_kernel,\n kernel_shape=(25, 6),\n bias_shape=(),\n activation=lambda x: 2.0 * x,\n he_initializer_slope=1.0,\n use_weight_scaling=False)\n mock_zeros_initializer.assert_called_once_with()\n mock_random_normal_initializer.assert_called_once_with(stddev=0.2)\n with self.cached_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output_np = sess.run(output)\n\n self.assertAlmostEqual(output_np, 905.0, 3)\n\n @mock.patch.object(tf.compat.v1.layers, 'conv2d', autospec=True)\n def test_custom_conv2d_passes_conv2d_options(self, mock_conv2d):\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n layers.custom_conv2d(x, 1, 2)\n mock_conv2d.assert_called_once_with(\n x,\n filters=1,\n kernel_size=[2, 2],\n strides=(1, 1),\n padding='SAME',\n use_bias=False,\n kernel_initializer=mock.ANY)\n\n @mock.patch.object(layers, '_custom_layer_impl', autospec=True)\n def test_custom_conv2d_passes_custom_layer_options(self,\n mock_custom_layer_impl):\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n layers.custom_conv2d(x, 1, 2)\n mock_custom_layer_impl.assert_called_once_with(\n mock.ANY,\n kernel_shape=[2, 2, 3, 1],\n bias_shape=(1,),\n activation=None,\n he_initializer_slope=1.0,\n use_weight_scaling=True)\n\n @mock.patch.object(tf.compat.v1, 'random_normal_initializer', autospec=False)\n @mock.patch.object(tf.compat.v1, 'zeros_initializer', autospec=False)\n def test_custom_conv2d_scalar_kernel_size(self, mock_zeros_initializer,\n mock_random_normal_initializer):\n mock_zeros_initializer.return_value = tf.compat.v1.constant_initializer(1.0)\n mock_random_normal_initializer.return_value = (\n tf.compat.v1.constant_initializer(3.0))\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n output = layers.custom_conv2d(x, 1, 2)\n mock_zeros_initializer.assert_called_once_with()\n mock_random_normal_initializer.assert_called_once_with(stddev=1.0)\n with self.cached_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output_np = sess.run(output)\n\n expected_np = [[[[68.54998016], [42.56921768]],\n [[50.36344528], [29.57883835]]],\n [[[5.33012676], [4.46410179]], [[10.52627945],\n [9.66025352]]]]\n self.assertAllClose(output_np, expected_np, 1.0e-5)\n\n @mock.patch.object(tf.compat.v1, 'random_normal_initializer', autospec=True)\n @mock.patch.object(tf.compat.v1, 'zeros_initializer', autospec=True)\n def test_custom_conv2d_list_kernel_size(self, mock_zeros_initializer,\n mock_random_normal_initializer):\n mock_zeros_initializer.return_value = tf.compat.v1.constant_initializer(1.0)\n mock_random_normal_initializer.return_value = (\n tf.compat.v1.constant_initializer(3.0))\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n output = layers.custom_conv2d(x, 1, [2, 3])\n mock_zeros_initializer.assert_called_once_with()\n mock_random_normal_initializer.assert_called_once_with(stddev=1.0)\n with self.cached_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output_np = sess.run(output)\n\n expected_np = [[\n [[56.15432739], [56.15432739]],\n [[41.30508804], [41.30508804]],\n ], [[[4.53553391], [4.53553391]], [[8.7781744], [8.7781744]]]]\n self.assertAllClose(output_np, expected_np, 1.0e-5)\n\n @mock.patch.object(layers, '_custom_layer_impl', autospec=True)\n def test_custom_dense_passes_custom_layer_options(self,\n mock_custom_layer_impl):\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n layers.custom_dense(x, 3)\n mock_custom_layer_impl.assert_called_once_with(\n mock.ANY,\n kernel_shape=(12, 3),\n bias_shape=(3,),\n activation=None,\n he_initializer_slope=1.0,\n use_weight_scaling=True)\n\n @mock.patch.object(tf.compat.v1, 'random_normal_initializer', autospec=False)\n @mock.patch.object(tf.compat.v1, 'zeros_initializer', autospec=False)\n def test_custom_dense_output_is_correct(self, mock_zeros_initializer,\n mock_random_normal_initializer):\n mock_zeros_initializer.return_value = tf.compat.v1.constant_initializer(1.0)\n mock_random_normal_initializer.return_value = (\n tf.compat.v1.constant_initializer(3.0))\n x = tf.constant([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],\n [[[0, 0, 0], [-1, -2, -3]], [[1, -2, 2], [2, 5, 3]]]],\n dtype=tf.float32)\n output = layers.custom_dense(x, 3)\n mock_zeros_initializer.assert_called_once_with()\n mock_random_normal_initializer.assert_called_once_with(stddev=1.0)\n with self.cached_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output_np = sess.run(output)\n\n expected_np = [[68.54998016, 68.54998016, 68.54998016],\n [5.33012676, 5.33012676, 5.33012676]]\n self.assertAllClose(output_np, expected_np, 1.0e-5)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python2 python3\n\"\"\"Train a progressive GAN model.\n\nSee https://arxiv.org/abs/1710.10196 for details about the model.\n\nSee https://github.com/tkarras/progressive_growing_of_gans for the original\ntheano implementation.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nfrom absl import flags\nfrom absl import logging\nimport six\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow_gan.examples.progressive_gan import data_provider\nfrom tensorflow_gan.examples.progressive_gan import train\n\nflags.DEFINE_string('dataset_file_pattern', '', 'Dataset file pattern.')\n\nflags.DEFINE_integer('start_height', 4, 'Start image height.')\n\nflags.DEFINE_integer('start_width', 4, 'Start image width.')\n\nflags.DEFINE_integer('scale_base', 2, 'Resolution multiplier.')\n\nflags.DEFINE_integer('num_resolutions', 4, 'Number of progressive resolutions.')\n\nflags.DEFINE_list(\n 'batch_size_schedule', [8, 8, 4],\n 'A list of batch sizes for each resolution, if '\n 'len(batch_size_schedule) < num_resolutions, pad the schedule in the '\n 'beginning with the first batch size.')\n\nflags.DEFINE_integer('kernel_size', 3, 'Convolution kernel size.')\n\nflags.DEFINE_integer('colors', 3, 'Number of image channels.')\n\nflags.DEFINE_bool('to_rgb_use_tanh_activation', False,\n 'Whether to apply tanh activation when output rgb.')\n\nflags.DEFINE_integer('stable_stage_num_images', 1000,\n 'Number of images in the stable stage.')\n\nflags.DEFINE_integer('transition_stage_num_images', 1000,\n 'Number of images in the transition stage.')\n\nflags.DEFINE_integer('total_num_images', 10000, 'Total number of images.')\n\nflags.DEFINE_integer('save_summaries_num_images', 100,\n 'Save summaries in this number of images.')\n\nflags.DEFINE_integer('latent_vector_size', 128, 'Latent vector size.')\n\nflags.DEFINE_integer('fmap_base', 4096, 'Base number of filters.')\n\nflags.DEFINE_float('fmap_decay', 1.0, 'Decay of number of filters.')\n\nflags.DEFINE_integer('fmap_max', 128, 'Max number of filters.')\n\nflags.DEFINE_float('gradient_penalty_target', 1.0,\n 'Gradient norm target for wasserstein loss.')\n\nflags.DEFINE_float('gradient_penalty_weight', 10.0,\n 'Gradient penalty weight for wasserstein loss.')\n\nflags.DEFINE_float(\n 'real_score_penalty_weight', 0.001,\n 'Additional penalty to keep the scores from drifting too '\n 'far from zero.')\n\nflags.DEFINE_float('generator_learning_rate', 0.001, 'Learning rate.')\n\nflags.DEFINE_float('discriminator_learning_rate', 0.001, 'Learning rate.')\n\nflags.DEFINE_float('adam_beta1', 0.0, 'Adam beta 1.')\n\nflags.DEFINE_float('adam_beta2', 0.99, 'Adam beta 2.')\n\nflags.DEFINE_integer('fake_grid_size', 8, 'The fake image grid size for eval.')\n\nflags.DEFINE_integer('interp_grid_size', 8,\n 'The interp image grid size for eval.')\n\nflags.DEFINE_string('train_log_dir', '/tmp/tfgan_logdir/progressive_gan/',\n 'Directory where to write event logs.')\n\nflags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')\n\nflags.DEFINE_integer(\n 'ps_replicas', 0,\n 'The number of parameter servers. If the value is 0, then the parameters '\n 'are handled locally by the worker.')\n\nflags.DEFINE_integer(\n 'task', 0,\n 'The Task ID. This value is used when training with multiple workers to '\n 'identify each worker.')\n\nFLAGS = flags.FLAGS\n\n\ndef _make_config_from_flags():\n \"\"\"Makes a config dictionary from commandline flags.\"\"\"\n return dict([(flag.name, flag.value)\n for flag in FLAGS.get_key_flags_for_module(sys.argv[0])])\n\n\ndef _provide_real_images(batch_size, **kwargs):\n \"\"\"Provides real images.\"\"\"\n dataset_file_pattern = kwargs.get('dataset_file_pattern')\n colors = kwargs['colors']\n final_height, final_width = train.make_resolution_schedule(\n **kwargs).final_resolutions\n if not dataset_file_pattern:\n return data_provider.provide_data(\n split='train',\n batch_size=batch_size,\n patch_height=final_height,\n patch_width=final_width,\n colors=colors)\n else:\n return data_provider.provide_data_from_image_files(\n file_pattern=dataset_file_pattern,\n batch_size=batch_size,\n patch_height=final_height,\n patch_width=final_width,\n colors=colors)\n\n\ndef main(_):\n if not tf.io.gfile.exists(FLAGS.train_log_dir):\n tf.io.gfile.makedirs(FLAGS.train_log_dir)\n\n config = _make_config_from_flags()\n logging.info('\\n'.join(\n ['{}={}'.format(k, v) for k, v in six.iteritems(config)]))\n\n for stage_id in train.get_stage_ids(**config):\n batch_size = train.get_batch_size(stage_id, **config)\n tf.reset_default_graph()\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_replicas)):\n real_images = None\n with tf.device('/cpu:0'), tf.name_scope('inputs'):\n real_images = _provide_real_images(batch_size, **config)\n model = train.build_model(stage_id, batch_size, real_images, **config)\n train.add_model_summaries(model, **config)\n train.train(model, **config)\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "# coding=utf-8\n# Copyright 2020 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains functions for evaluation and summarization of metrics.\n\nCopied from tensorflow/python/training/evaluation.py and\nthird_party/tensorflow/contrib/training/python/training/evaluation.py.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport time\n\nimport tensorflow as tf\n\nfrom tensorflow.python.training import basic_session_run_hooks # pylint:disable=g-direct-tensorflow-import\n\n\ndef get_or_create_eval_step():\n \"\"\"Gets or creates the eval step `Tensor`.\n\n Returns:\n A `Tensor` representing a counter for the evaluation step.\n\n Raises:\n ValueError: If multiple `Tensors` have been added to the\n `tf.GraphKeys.EVAL_STEP` collection.\n \"\"\"\n graph = tf.compat.v1.get_default_graph()\n eval_steps = graph.get_collection(tf.compat.v1.GraphKeys.EVAL_STEP)\n if len(eval_steps) == 1:\n return eval_steps[0]\n elif len(eval_steps) > 1:\n raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')\n else:\n counter = tf.compat.v1.get_variable(\n 'eval_step',\n shape=[],\n dtype=tf.int64,\n initializer=tf.compat.v1.zeros_initializer(),\n trainable=False,\n collections=[\n tf.compat.v1.GraphKeys.LOCAL_VARIABLES,\n tf.compat.v1.GraphKeys.EVAL_STEP\n ])\n return counter\n\n\ndef get_latest_eval_step_value(update_ops):\n \"\"\"Gets the eval step `Tensor` value after running `update_ops`.\n\n Args:\n update_ops: A list of `Tensors` or a dictionary of names to `Tensors`,\n which are run before reading the eval step value.\n\n Returns:\n A `Tensor` representing the value for the evaluation step.\n \"\"\"\n if isinstance(update_ops, dict):\n update_ops = list(update_ops.values())\n\n with tf.control_dependencies(update_ops):\n return tf.identity(get_or_create_eval_step().read_value())\n\n\nclass MultiStepStopAfterNEvalsHook(tf.estimator.SessionRunHook):\n \"\"\"Run hook used by the evaluation routines to run the `eval_ops` N times.\"\"\"\n\n def __init__(self, num_evals, steps_per_run=1):\n \"\"\"Constructs the run hook.\n\n Args:\n num_evals: The number of evaluations to run for. if set to None, will\n iterate the dataset until all inputs are exhausted.\n steps_per_run: Number of steps executed per run call.\n \"\"\"\n self._num_evals = num_evals\n self._evals_completed = None\n self._steps_per_run_initial_value = steps_per_run\n\n def _set_evals_completed_tensor(self, updated_eval_step):\n self._evals_completed = updated_eval_step\n\n def begin(self):\n self._steps_per_run_variable = \\\n basic_session_run_hooks.get_or_create_steps_per_run_variable()\n\n def after_create_session(self, session, coord):\n # Update number of steps to run in the first run call\n if self._num_evals is None:\n steps = self._steps_per_run_initial_value\n else:\n steps = min(self._steps_per_run_initial_value, self._num_evals)\n self._steps_per_run_variable.load(steps, session=session)\n\n def before_run(self, run_context):\n return tf.estimator.SessionRunArgs(\n {'evals_completed': self._evals_completed})\n\n def after_run(self, run_context, run_values):\n evals_completed = run_values.results['evals_completed']\n # Update number of steps to run in the next iteration\n if self._num_evals is None:\n steps = self._steps_per_run_initial_value\n else:\n steps = min(self._num_evals - evals_completed,\n self._steps_per_run_initial_value)\n self._steps_per_run_variable.load(steps, session=run_context.session)\n\n if self._num_evals is None:\n tf.compat.v1.logging.info('Evaluation [%d]', evals_completed)\n else:\n tf.compat.v1.logging.info('Evaluation [%d/%d]', evals_completed,\n self._num_evals)\n if self._num_evals is not None and evals_completed >= self._num_evals:\n run_context.request_stop()\n\n\nclass StopAfterNEvalsHook(tf.estimator.SessionRunHook):\n \"\"\"Run hook used by the evaluation routines to run the `eval_ops` N times.\"\"\"\n\n def __init__(self, num_evals, log_progress=True):\n \"\"\"Constructs the run hook.\n\n Args:\n num_evals: The number of evaluations to run for. if set to None, will\n iterate the dataset until all inputs are exhausted.\n log_progress: Whether to log evaluation progress, defaults to True.\n \"\"\"\n # The number of evals to run for.\n self._num_evals = num_evals\n self._evals_completed = None\n self._log_progress = log_progress\n # Reduce logging frequency if there are 20 or more evaluations.\n self._log_frequency = (1 if (num_evals is None or num_evals < 20)\n else math.floor(num_evals / 10.))\n\n def _set_evals_completed_tensor(self, updated_eval_step):\n self._evals_completed = updated_eval_step\n\n def before_run(self, run_context):\n return tf.estimator.SessionRunArgs(\n {'evals_completed': self._evals_completed})\n\n def after_run(self, run_context, run_values):\n evals_completed = run_values.results['evals_completed']\n if self._log_progress:\n if self._num_evals is None:\n tf.compat.v1.logging.info('Evaluation [%d]', evals_completed)\n else:\n if ((evals_completed % self._log_frequency) == 0 or\n (self._num_evals == evals_completed)):\n tf.compat.v1.logging.info('Evaluation [%d/%d]', evals_completed,\n self._num_evals)\n if self._num_evals is not None and evals_completed >= self._num_evals:\n run_context.request_stop()\n\n\nclass SummaryAtEndHook(tf.estimator.SessionRunHook):\n \"\"\"A run hook that saves a summary with the results of evaluation.\"\"\"\n\n def __init__(self,\n log_dir=None,\n summary_writer=None,\n summary_op=None,\n feed_dict=None):\n \"\"\"Constructs the Summary Hook.\n\n Args:\n log_dir: The directory where the summary events are saved to. Used only\n when `summary_writer` is not specified.\n summary_writer: A `tf.summary.FileWriter` to write summary\n events with.\n summary_op: The summary op to run. If left as `None`, then all summaries\n in the tf.GraphKeys.SUMMARIES collection are used.\n feed_dict: An optional feed dictionary to use when evaluating the\n summaries.\n\n Raises:\n ValueError: If both `log_dir` and `summary_writer` are `None`.\n \"\"\"\n self._summary_op = summary_op\n self._replace_summary_op = summary_op is None\n self._feed_dict = feed_dict\n self._summary_writer = summary_writer\n self._log_dir = log_dir\n if self._log_dir is None and self._summary_writer is None:\n raise ValueError('One of log_dir or summary_writer should be used.')\n\n def begin(self):\n if self._replace_summary_op:\n # This can still remain None if there are no summaries.\n self._summary_op = tf.compat.v1.summary.merge_all()\n self._global_step = tf.compat.v1.train.get_or_create_global_step()\n\n def after_create_session(self, session, coord):\n if self._summary_writer is None and self._log_dir:\n self._summary_writer = tf.compat.v1.summary.FileWriterCache.get(\n self._log_dir)\n\n def end(self, session):\n if self._summary_op is not None:\n global_step = tf.compat.v1.train.global_step(session, self._global_step)\n summary_str = session.run(self._summary_op, self._feed_dict)\n if self._summary_writer:\n self._summary_writer.add_summary(summary_str, global_step)\n if self._summary_writer:\n self._summary_writer.flush()\n\n\ndef wait_for_new_checkpoint(checkpoint_dir,\n last_checkpoint=None,\n seconds_to_sleep=1,\n timeout=None):\n \"\"\"Waits until a new checkpoint file is found.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n last_checkpoint: The last checkpoint path used or `None` if we're expecting\n a checkpoint for the first time.\n seconds_to_sleep: The number of seconds to sleep for before looking for a\n new checkpoint.\n timeout: The maximum number of seconds to wait. If left as `None`, then the\n process will wait indefinitely.\n\n Returns:\n a new checkpoint path, or None if the timeout was reached.\n \"\"\"\n tf.compat.v1.logging.info('Waiting for new checkpoint at %s', checkpoint_dir)\n stop_time = time.time() + timeout if timeout is not None else None\n while True:\n checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)\n if checkpoint_path is None or checkpoint_path == last_checkpoint:\n if stop_time is not None and time.time() + seconds_to_sleep > stop_time:\n return None\n time.sleep(seconds_to_sleep)\n else:\n tf.compat.v1.logging.info('Found new checkpoint at %s', checkpoint_path)\n return checkpoint_path\n\n\ndef checkpoints_iterator(checkpoint_dir,\n min_interval_secs=0,\n timeout=None,\n timeout_fn=None):\n \"\"\"Continuously yield new checkpoint files as they appear.\n\n The iterator only checks for new checkpoints when control flow has been\n reverted to it. This means it can miss checkpoints if your code takes longer\n to run between iterations than `min_interval_secs` or the interval at which\n new checkpoints are written.\n\n The `timeout` argument is the maximum number of seconds to block waiting for\n a new checkpoint. It is used in combination with the `timeout_fn` as\n follows:\n\n * If the timeout expires and no `timeout_fn` was specified, the iterator\n stops yielding.\n * If a `timeout_fn` was specified, that function is called and if it returns\n a true boolean value the iterator stops yielding.\n * If the function returns a false boolean value then the iterator resumes the\n wait for new checkpoints. At this point the timeout logic applies again.\n\n This behavior gives control to callers on what to do if checkpoints do not\n come fast enough or stop being generated. For example, if callers have a way\n to detect that the training has stopped and know that no new checkpoints\n will be generated, they can provide a `timeout_fn` that returns `True` when\n the training has stopped. If they know that the training is still going on\n they return `False` instead.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n min_interval_secs: The minimum number of seconds between yielding\n checkpoints.\n timeout: The maximum number of seconds to wait between checkpoints. If left\n as `None`, then the process will wait indefinitely.\n timeout_fn: Optional function to call after a timeout. If the function\n returns True, then it means that no new checkpoints will be generated and\n the iterator will exit. The function is called with no arguments.\n\n Yields:\n String paths to latest checkpoint files as they arrive.\n \"\"\"\n checkpoint_path = None\n while True:\n new_checkpoint_path = wait_for_new_checkpoint(\n checkpoint_dir, checkpoint_path, timeout=timeout)\n if new_checkpoint_path is None:\n if not timeout_fn:\n # timed out\n tf.compat.v1.logging.info('Timed-out waiting for a checkpoint.')\n return\n if timeout_fn():\n # The timeout_fn indicated that we are truly done.\n return\n else:\n # The timeout_fn indicated that more checkpoints may come.\n continue\n start = time.time()\n checkpoint_path = new_checkpoint_path\n yield checkpoint_path\n time_to_next_eval = start + min_interval_secs - time.time()\n if time_to_next_eval > 0:\n time.sleep(time_to_next_eval)\n\n\ndef evaluate_once(checkpoint_path,\n master='',\n scaffold=None,\n eval_ops=None,\n feed_dict=None,\n final_ops=None,\n final_ops_feed_dict=None,\n hooks=None,\n config=None):\n \"\"\"Evaluates the model at the given checkpoint path.\n\n During a single evaluation, the `eval_ops` is run until the session is\n interrupted or requested to finish. This is typically requested via a\n `StopAfterNEvalsHook` which results in `eval_ops` running the requested number\n of times.\n\n Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of\n `Tensors` or a dictionary from names to `Tensors`. The `final_ops` is\n evaluated a single time after `eval_ops` has finished running and the fetched\n values of `final_ops` are returned. If `final_ops` is left as `None`, then\n `None` is returned.\n\n One may also consider using a `SummaryAtEndHook` to record summaries after the\n `eval_ops` have run. If `eval_ops` is `None`, the summaries run immediately\n after the model checkpoint has been restored.\n\n Note that `evaluate_once` creates a local variable used to track the number of\n evaluations run via `get_or_create_eval_step`.\n Consequently, if a custom local init op is provided via a `scaffold`, the\n caller should ensure that the local init op also initializes the eval step.\n\n Args:\n checkpoint_path: The path to a checkpoint to use for evaluation.\n master: The BNS address of the TensorFlow master.\n scaffold: An tf.train.Scaffold instance for initializing variables and\n restoring variables. Note that `scaffold.init_fn` is used by the function\n to restore the checkpoint. If you supply a custom init_fn, then it must\n also take care of restoring the model from its checkpoint.\n eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\n to `Tensors`, which is run until the session is requested to stop,\n commonly done by a `StopAfterNEvalsHook`.\n feed_dict: The feed dictionary to use when executing the `eval_ops`.\n final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\n to `Tensors`.\n final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.\n hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the\n evaluation loop.\n config: An instance of `tf.ConfigProto` that will be used to\n configure the `Session`. If left as `None`, the default will be used.\n\n Returns:\n The fetched values of `final_ops` or `None` if `final_ops` is `None`.\n \"\"\"\n eval_step = get_or_create_eval_step()\n\n # Prepare the run hooks.\n hooks = list(hooks or [])\n\n if eval_ops is not None:\n if any(isinstance(h, MultiStepStopAfterNEvalsHook) for h in hooks):\n steps_per_run_variable = \\\n basic_session_run_hooks.get_or_create_steps_per_run_variable()\n update_eval_step = tf.compat.v1.assign_add(\n eval_step,\n tf.cast(steps_per_run_variable, dtype=eval_step.dtype),\n use_locking=True)\n else:\n update_eval_step = tf.compat.v1.assign_add(eval_step, 1, use_locking=True)\n\n if isinstance(eval_ops, dict):\n eval_ops['update_eval_step'] = update_eval_step\n elif isinstance(eval_ops, (tuple, list)):\n eval_ops = list(eval_ops) + [update_eval_step]\n else:\n eval_ops = [eval_ops, update_eval_step]\n\n eval_step_value = get_latest_eval_step_value(eval_ops)\n\n for h in hooks:\n if isinstance(h, (StopAfterNEvalsHook, MultiStepStopAfterNEvalsHook)):\n h._set_evals_completed_tensor(eval_step_value) # pylint: disable=protected-access\n\n tf.compat.v1.logging.info(\n 'Starting evaluation at ' +\n time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime()))\n\n # Prepare the session creator.\n session_creator = tf.compat.v1.train.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_filename_with_path=checkpoint_path,\n master=master,\n config=config)\n\n final_ops_hook = tf.estimator.FinalOpsHook(final_ops, final_ops_feed_dict)\n hooks.append(final_ops_hook)\n\n with tf.compat.v1.train.MonitoredSession(\n session_creator=session_creator, hooks=hooks) as session:\n if eval_ops is not None:\n while not session.should_stop():\n session.run(eval_ops, feed_dict)\n\n tf.compat.v1.logging.info(\n 'Finished evaluation at ' +\n time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()))\n return final_ops_hook.final_ops_values\n\n\ndef evaluate_repeatedly(checkpoint_dir,\n master='',\n scaffold=None,\n eval_ops=None,\n feed_dict=None,\n final_ops=None,\n final_ops_feed_dict=None,\n eval_interval_secs=60,\n hooks=None,\n config=None,\n max_number_of_evaluations=None,\n timeout=None,\n timeout_fn=None):\n \"\"\"Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.\n\n During a single evaluation, the `eval_ops` is run until the session is\n interrupted or requested to finish. This is typically requested via a\n `StopAfterNEvalsHook` which results in `eval_ops` running the requested number\n of times.\n\n Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of\n `Tensors` or a dictionary from names to `Tensors`. The `final_ops` is\n evaluated a single time after `eval_ops` has finished running and the fetched\n values of `final_ops` are returned. If `final_ops` is left as `None`, then\n `None` is returned.\n\n One may also consider using a `SummaryAtEndHook` to record summaries after the\n `eval_ops` have run. If `eval_ops` is `None`, the summaries run immediately\n after the model checkpoint has been restored.\n\n Note that `evaluate_once` creates a local variable used to track the number of\n evaluations run via `get_or_create_eval_step`.\n Consequently, if a custom local init op is provided via a `scaffold`, the\n caller should ensure that the local init op also initializes the eval step.\n\n Args:\n checkpoint_dir: The directory where checkpoints are stored.\n master: The address of the TensorFlow master.\n scaffold: An tf.train.Scaffold instance for initializing variables and\n restoring variables. Note that `scaffold.init_fn` is used by the function\n to restore the checkpoint. If you supply a custom init_fn, then it must\n also take care of restoring the model from its checkpoint.\n eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to\n `Tensors`, which is run until the session is requested to stop, commonly\n done by a `StopAfterNEvalsHook`.\n feed_dict: The feed dictionary to use when executing the `eval_ops`.\n final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\n to `Tensors`.\n final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.\n eval_interval_secs: The minimum number of seconds between evaluations.\n hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside\n the evaluation loop.\n config: An instance of `tf.ConfigProto` that will be used to\n configure the `Session`. If left as `None`, the default will be used.\n max_number_of_evaluations: The maximum times to run the evaluation. If left\n as `None`, then evaluation runs indefinitely.\n timeout: The maximum number of seconds to wait between checkpoints. If left\n as `None`, then the process will wait indefinitely.\n timeout_fn: Optional function to call after a timeout. If the function\n returns True, then it means that no new checkpoints will be generated and\n the iterator will exit. The function is called with no arguments.\n\n Returns:\n The fetched values of `final_ops` or `None` if `final_ops` is `None`.\n \"\"\"\n eval_step = get_or_create_eval_step()\n\n # Prepare the run hooks.\n hooks = hooks or []\n\n if eval_ops is not None:\n update_eval_step = tf.compat.v1.assign_add(eval_step, 1)\n\n for h in hooks:\n if isinstance(h, StopAfterNEvalsHook):\n h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access\n\n if isinstance(eval_ops, dict):\n eval_ops['update_eval_step'] = update_eval_step\n elif isinstance(eval_ops, (tuple, list)):\n eval_ops = list(eval_ops) + [update_eval_step]\n else:\n eval_ops = [eval_ops, update_eval_step]\n\n final_ops_hook = tf.estimator.FinalOpsHook(final_ops, final_ops_feed_dict)\n hooks.append(final_ops_hook)\n num_evaluations = 0\n for checkpoint_path in checkpoints_iterator(\n checkpoint_dir,\n min_interval_secs=eval_interval_secs,\n timeout=timeout,\n timeout_fn=timeout_fn):\n\n session_creator = tf.compat.v1.train.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_filename_with_path=checkpoint_path,\n master=master,\n config=config)\n\n with tf.compat.v1.train.MonitoredSession(\n session_creator=session_creator, hooks=hooks) as session:\n tf.compat.v1.logging.info(\n 'Starting evaluation at ' +\n time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))\n if eval_ops is not None:\n while not session.should_stop():\n session.run(eval_ops, feed_dict)\n\n tf.compat.v1.logging.info(\n 'Finished evaluation at ' +\n time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))\n num_evaluations += 1\n\n if (max_number_of_evaluations is not None and\n num_evaluations >= max_number_of_evaluations):\n return final_ops_hook.final_ops_values\n\n return final_ops_hook.final_ops_values\n" ]
[ [ "tensorflow.constant", "tensorflow.executing_eagerly", "tensorflow.gradients", "tensorflow.test.main", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.global_variables_initializer", "numpy.random.normal", "tensorflow.compat.v1.placeholder", "numpy.argmax", "tensorflow.square", "tensorflow.random.normal" ], [ "tensorflow.reshape", "tensorflow.compat.v1.initializers.truncated_normal", "tensorflow.tanh", "tensorflow.compat.v1.layers.batch_normalization", "tensorflow.compat.v1.variable_scope", "tensorflow.nn.leaky_relu" ], [ "tensorflow.random.normal", "tensorflow.random.uniform", "tensorflow.test.main", "scipy.ndimage.convolve", "numpy.random.normal", "numpy.float32", "numpy.array", "numpy.zeros" ], [ "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.compat.v1.get_variable", "tensorflow.test.main", "numpy.ones", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.constant_initializer", "numpy.array" ], [ "tensorflow.compat.v1.io.gfile.exists", "tensorflow.compat.v1.io.gfile.makedirs", "tensorflow.compat.v1.device", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.train.replica_device_setter", "tensorflow.compat.v1.reset_default_graph", "tensorflow.compat.v1.app.run" ], [ "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.summary.merge_all", "tensorflow.train.latest_checkpoint", "tensorflow.control_dependencies", "tensorflow.cast", "tensorflow.compat.v1.train.global_step", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.train.MonitoredSession", "tensorflow.compat.v1.assign_add", "tensorflow.python.training.basic_session_run_hooks.get_or_create_steps_per_run_variable", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.summary.FileWriterCache.get", "tensorflow.estimator.FinalOpsHook", "tensorflow.estimator.SessionRunArgs", "tensorflow.compat.v1.train.ChiefSessionCreator" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adrienxu/SATE
[ "a932859287b2d3a944f7b0ae6670c84c98db7965" ]
[ "examples/speech_to_text/prep_covost_data.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport logging\nfrom pathlib import Path\nimport shutil\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, Tuple\nimport string\n\nimport pandas as pd\nimport torchaudio\nfrom examples.speech_to_text.data_utils import (\n create_zip,\n extract_fbank_features,\n filter_manifest_df,\n gen_config_yaml,\n gen_vocab,\n get_zip_manifest,\n load_df_from_tsv,\n save_df_to_tsv,\n)\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\nfrom torchaudio.datasets.utils import download_url, extract_archive\nfrom tqdm import tqdm\n\n\nlog = logging.getLogger(__name__)\n\n\nMANIFEST_COLUMNS = [\"id\", \"audio\", \"n_frames\", \"tgt_text\", \"speaker\"]\n\n\nclass CoVoST(Dataset):\n \"\"\"Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).\n\n Args:\n root (str): root path to the dataset and generated manifests/features\n source_language (str): source (audio) language\n target_language (str, optional): target (text) language,\n None for no translation (default: None)\n version (int, optional): CoVoST version. (default: 2)\n download (bool, optional): Whether to download the dataset if it is not\n found at root path. (default: ``False``).\n \"\"\"\n\n COVOST_URL_TEMPLATE = (\n \"https://dl.fbaipublicfiles.com/covost/\"\n \"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz\"\n )\n\n VERSIONS = {2}\n # SPLITS = [\"train\", \"dev\", \"test\"]\n SPLITS = [\"train\"]\n\n XX_EN_LANGUAGES = {\n 1: [\"fr\", \"de\", \"nl\", \"ru\", \"es\", \"it\", \"tr\", \"fa\", \"sv-SE\", \"mn\", \"zh-CN\"],\n 2: [\n \"fr\",\n \"de\",\n \"es\",\n \"ca\",\n \"it\",\n \"ru\",\n \"zh-CN\",\n \"pt\",\n \"fa\",\n \"et\",\n \"mn\",\n \"nl\",\n \"tr\",\n \"ar\",\n \"sv-SE\",\n \"lv\",\n \"sl\",\n \"ta\",\n \"ja\",\n \"id\",\n \"cy\",\n ],\n }\n EN_XX_LANGUAGES = {\n 1: [],\n 2: [\n \"de\",\n \"tr\",\n \"fa\",\n \"sv-SE\",\n \"mn\",\n \"zh-CN\",\n \"cy\",\n \"ca\",\n \"sl\",\n \"et\",\n \"id\",\n \"ar\",\n \"ta\",\n \"lv\",\n \"ja\",\n ],\n }\n\n def __init__(\n self,\n root: str,\n split: str,\n source_language: str,\n target_language: Optional[str] = None,\n version: int = 2,\n ) -> None:\n assert version in self.VERSIONS and split in self.SPLITS\n assert source_language is not None\n self.no_translation = target_language is None\n if not self.no_translation:\n assert \"en\" in {source_language, target_language}\n if source_language == \"en\":\n assert target_language in self.EN_XX_LANGUAGES[version]\n else:\n assert source_language in self.XX_EN_LANGUAGES[version]\n else:\n # Hack here so that we can get \"split\" column from CoVoST TSV.\n # Note that we use CoVoST train split for ASR which is an extension\n # to Common Voice train split.\n target_language = \"de\" if source_language == \"en\" else \"en\"\n\n self.root: Path = Path(root)\n\n cv_tsv_path = self.root / \"validated.tsv\"\n assert cv_tsv_path.is_file()\n cv_tsv = load_df_from_tsv(cv_tsv_path)\n\n if self.no_translation:\n print(\"No target translation.\")\n df = cv_tsv[[\"path\", \"sentence\", \"client_id\"]]\n df = df.set_index([\"path\"], drop=False)\n else:\n covost_url = self.COVOST_URL_TEMPLATE.format(\n src_lang=source_language, tgt_lang=target_language\n )\n covost_archive = self.root / Path(covost_url).name\n if not covost_archive.is_file():\n download_url(covost_url, self.root.as_posix(), hash_value=None)\n extract_archive(covost_archive.as_posix())\n\n covost_tsv = load_df_from_tsv(\n self.root / Path(covost_url).name.replace(\".tar.gz\", \"\")\n )\n df = pd.merge(\n left=cv_tsv[[\"path\", \"sentence\", \"client_id\"]],\n right=covost_tsv[[\"path\", \"translation\", \"split\"]],\n how=\"inner\",\n on=\"path\",\n )\n if split == \"train\":\n df = df[(df[\"split\"] == split) | (df[\"split\"] == f\"{split}_covost\")]\n else:\n df = df[df[\"split\"] == split]\n\n data = df.to_dict(orient=\"index\").items()\n data = [v for k, v in sorted(data, key=lambda x: x[0])]\n self.data = []\n for e in data:\n try:\n path = self.root / \"wav\" / e[\"path\"]\n _ = torchaudio.info(path.as_posix())\n self.data.append(e)\n except RuntimeError:\n pass\n\n def __getitem__(\n self, n: int\n ) -> Tuple[Path, int, int, str, str, str, str]:\n \"\"\"Load the n-th sample from the dataset.\n\n Args:\n n (int): The index of the sample to be loaded\n\n Returns:\n tuple: ``(wav_path, sample_rate, n_frames, sentence, translation, speaker_id,\n sample_id)``\n \"\"\"\n data = self.data[n]\n path = self.root / \"wav\" / data[\"path\"]\n info = torchaudio.info(path)\n sample_rate = info.sample_rate\n n_frames = info.num_frames\n sentence = data[\"sentence\"]\n translation = None if self.no_translation else data[\"translation\"]\n speaker_id = data[\"client_id\"]\n _id = data[\"path\"].replace(\".mp3\", \"\")\n return path, sample_rate, n_frames, sentence, translation, speaker_id, _id\n\n def __len__(self) -> int:\n return len(self.data)\n\n\ndef process(args):\n root = Path(args.data_root).absolute() / args.src_lang\n output_root = Path(args.output_root).absolute()\n if args.tgt_lang is not None:\n output_root = output_root / f\"{args.src_lang}-{args.tgt_lang}\"\n else:\n output_root = output_root / f\"{args.src_lang}\"\n if not root.is_dir():\n raise NotADirectoryError(f\"{root} does not exist\")\n\n zip_path = output_root / \"fbank80.zip\"\n if not zip_path.exists():\n # Extract features\n feature_root = output_root / \"fbank80\"\n feature_root.mkdir(exist_ok=True)\n\n for split in CoVoST.SPLITS:\n print(f\"Fetching split {split}...\")\n dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)\n print(\"Extracting log mel filter bank features...\")\n for wav_path, sample_rate, _, _, _, _, utt_id in tqdm(dataset):\n waveform, sample_rate = torchaudio.load(wav_path)\n extract_fbank_features(\n waveform, sample_rate, feature_root / f\"{utt_id}.npy\"\n )\n # Pack features into ZIP\n print(\"ZIPing features...\")\n create_zip(feature_root, zip_path)\n\n # # Clean up\n # shutil.rmtree(feature_root)\n\n print(\"Fetching ZIP manifest...\")\n zip_manifest = get_zip_manifest(zip_path)\n # Generate TSV manifest\n print(\"Generating manifest...\")\n train_text = []\n task = args.task\n # if args.tgt_lang is not None:\n # task = f\"st_{args.src_lang}_{args.tgt_lang}\"\n for split in CoVoST.SPLITS:\n manifest = {c: [] for c in MANIFEST_COLUMNS}\n if args.task == \"st\" and args.add_src:\n manifest[\"src_text\"] = []\n dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)\n for _, sr, n_frames, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):\n manifest[\"id\"].append(utt_id)\n manifest[\"audio\"].append(zip_manifest[utt_id])\n duration_ms = int(n_frames / sr * 1000)\n manifest[\"n_frames\"].append(int(1 + (duration_ms - 25) / 10))\n if args.lowercase_src:\n src_utt = src_utt.lower()\n if args.rm_punc_src:\n for w in string.punctuation:\n src_utt = src_utt.replace(w, \"\")\n src_utt = src_utt.replace(\" \", \"\")\n manifest[\"tgt_text\"].append(src_utt if args.tgt_lang is None else tgt_utt)\n if args.task == \"st\" and args.add_src:\n manifest[\"src_text\"].append(src_utt)\n manifest[\"speaker\"].append(speaker_id)\n is_train_split = split.startswith(\"train\")\n if is_train_split:\n if args.task == \"st\" and args.add_src and args.share:\n train_text.extend(manifest[\"src_text\"])\n train_text.extend(manifest[\"tgt_text\"])\n df = pd.DataFrame.from_dict(manifest)\n df = filter_manifest_df(df, is_train_split=is_train_split)\n save_df_to_tsv(df, output_root / f\"{split}_{task}.tsv\")\n\n # Generate vocab\n v_size_str = \"\" if args.vocab_type == \"char\" else str(args.vocab_size)\n spm_filename_prefix = f\"spm_{args.vocab_type}{v_size_str}_{task}\"\n asr_spm_filename = None\n gen_vocab_flag = True\n\n if args.task == \"st\" and args.add_src:\n if args.share:\n if args.st_spm_prefix is not None:\n gen_vocab_flag = False\n spm_filename_prefix = args.st_spm_prefix\n else:\n spm_filename_prefix = f\"spm_{args.vocab_type}{v_size_str}_{args.task}_share\"\n asr_spm_filename = spm_filename_prefix + \".model\"\n else:\n if args.st_spm_prefix is not None:\n gen_vocab_flag = False\n spm_filename_prefix = args.st_spm_prefix\n assert args.asr_prefix is not None\n asr_spm_filename = args.asr_prefix + \".model\"\n elif args.task == \"asr\":\n if args.asr_prefix is not None:\n gen_vocab_flag = False\n spm_filename_prefix = args.asr_prefix\n\n if gen_vocab_flag:\n with NamedTemporaryFile(mode=\"w\") as f:\n for t in train_text:\n f.write(t + \"\\n\")\n gen_vocab(\n Path(f.name),\n output_root / spm_filename_prefix,\n args.vocab_type,\n args.vocab_size\n )\n # Generate config YAML\n gen_config_yaml(\n output_root,\n spm_filename_prefix + \".model\",\n yaml_filename=f\"config_{task}.yaml\",\n specaugment_policy=\"lb\",\n cmvn_type=args.cmvn_type,\n asr_spm_filename=asr_spm_filename,\n share_src_and_tgt=True if args.task == \"asr\" else False\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data-root\", \"-d\", required=True, type=str,\n help=\"data root with sub-folders for each language <root>/<src_lang>\"\n )\n parser.add_argument(\n \"--output-root\", \"-o\", required=True, type=str,\n help=\"output root to save the results\"\n )\n parser.add_argument(\n \"--vocab-type\",\n default=\"unigram\",\n required=True,\n type=str,\n choices=[\"bpe\", \"unigram\", \"char\"],\n ),\n parser.add_argument(\"--vocab-size\", default=1000, type=int)\n parser.add_argument(\"--src-lang\", \"-s\", required=True, type=str)\n parser.add_argument(\"--task\", type=str, default=\"asr\", choices=[\"asr\", \"st\"])\n parser.add_argument(\"--tgt-lang\", \"-t\", type=str)\n parser.add_argument(\"--share\", action=\"store_true\",\n help=\"share the tokenizer and dictionary of the transcription and translation\")\n parser.add_argument(\"--add-src\", action=\"store_true\", help=\"add the src text for st task\")\n parser.add_argument(\"--asr-prefix\", type=str, help=\"prefix of the asr dict\")\n parser.add_argument(\"--st-spm-prefix\", type=str, default=None, help=\"prefix of the existing st dict\")\n parser.add_argument(\"--lowercase-src\", action=\"store_true\", help=\"lowercase the source text\")\n parser.add_argument(\"--rm-punc-src\", action=\"store_true\", help=\"remove the punctuation of the source text\")\n parser.add_argument(\"--cmvn-type\", default=\"utterance\",\n choices=[\"global\", \"utterance\"],\n help=\"The type of cepstral mean and variance normalization\")\n args = parser.parse_args()\n\n process(args)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.merge", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
bderembl/mitgcm_configs
[ "8aa0343fc56e9da831e7a8b857838c4f4a76aa9a", "8aa0343fc56e9da831e7a8b857838c4f4a76aa9a", "8aa0343fc56e9da831e7a8b857838c4f4a76aa9a" ]
[ "corner/input/plot_field.py", "eddy_airsea/analysis/ode_wave.py", "floats/input/mygendata.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io.netcdf as netcdf\n\nplt.ion()\n\nflag_mov = 0\nflag_traj = 0\n\ndir0 = '../run/'\n\nfile1 = 'diags.0000000000.t001.nc'\nfile2 = 'grid.t001.nc'\n\nf1 = netcdf.netcdf_file(dir0 + file1)\nf2 = netcdf.netcdf_file(dir0 + file2)\n\n\nx = f2.variables['X'][:].copy()\ny = f2.variables['Y'][:].copy()\n\nxp1 = f2.variables['Xp1'][:].copy()\nyp1 = f2.variables['Yp1'][:].copy()\nT = f1.variables['T'][:].copy()\n\n\nsi_x = len(x)\nsi_y = len(y)\nsi_t = len(T)\n\nh_mit = f2.variables['Depth'][:,:].copy()\n\nvort = f1.variables['momVort3'][0,:,:].copy()\n\nvmin = np.min(vort)\nvmax = -vmin\nvcont = np.linspace(vmin,vmax,20)\n\n\nxunit = 1000.0 # 1:m -- 1000:km\n\nposxy = np.zeros((2,si_t),dtype='int')\n\nif flag_traj == 1:\n for nt in range(0,si_t):\n vort = f1.variables['momVort3'][nt,:,:].copy()\n posxy[0,nt],posxy[1,nt] = np.unravel_index(np.argmin(vort),vort.shape)\n \n\nplt.figure()\n\nif flag_mov == -1:\n nt = 0\n mytime = [49]\n vort = f1.variables['momVort3'][mytime[nt],:,:].copy()\n plt.contour(xp1[:si_x/2]/xunit,yp1/xunit,vort[:,:si_x/2],vcont,colors='k')\n plt.title('Day ' + str(mytime[nt]+1))\n plt.xlabel('x (km)')\n plt.ylabel('y (km)')\n myci = \"CI: {:.1e}\".format(vcont[1]-vcont[0])\n plt.text(x[120]/xunit,y[5]/xunit,myci)\n\n if flag_traj:\n plt.plot(xp1[posxy[1,:mytime[nt]]]/xunit,yp1[posxy[0,:mytime[nt]]]/xunit,'b')\n plt.plot(xp1[posxy[1,mytime[nt]:]]/xunit,yp1[posxy[0,mytime[nt]:]]/xunit,'b--')\n\nelif flag_mov == 0:\n mytime = [0,9,19,29]\n\n for nt in range(0,len(mytime)):\n plt.subplot(2,2,nt+1, aspect='equal')\n vort = f1.variables['momVort3'][mytime[nt],:,:].copy()\n plt.contour(xp1/xunit,yp1/xunit,vort.squeeze(),vcont,colors='k')\n plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')\n plt.title('Day ' + str(mytime[nt]+1))\n if nt == 2 or nt == 3:\n plt.xlabel('x (km)')\n if nt == 0 or nt == 2: \n plt.ylabel('y (km)')\n myci = \"CI: {:.1e}\".format(vcont[1]-vcont[0])\n plt.text(x[-170]/xunit,y[5]/xunit,myci)\n\n plt.savefig('corner_10mit.eps')\n\nelif flag_mov == 1:\n\n vort = f1.variables['momVort3'][:,:,:].copy()\n\n vmin = np.min(vort)\n vmax = -vmin\n\n vcont = np.linspace(vmin,vmax,20)\n\n for nt in range(0,si_t):\n vort = f1.variables['momVort3'][nt,:,:].copy()\n vort = vort.squeeze()\n vort[0,0] = vmin\n vort[0,1] = vmax\n plt.contourf(xp1/xunit,yp1/xunit,vort,vcont,cmap = plt.cm.bwr)\n plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')\n ext = '0'\n if nt > 9:\n ext = ''\n plt.savefig('movie/ewall_'+ ext + str(nt) + 'mit.png') \n plt.clf()\n\nf1.close()\nf2.close()\n", "#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\n\nplt.ion()\n\nf0 = 1e-4\nu0 = 1.0\nR0 = 40e3 # radius\nvmax = -1.0 # m/s\n\n\n\ndef v1(rr):\n v = -vmax*rr/R0*np.exp(-0.5*(rr/R0)**2)\n# v = -vmax*np.tanh(rr/R0)/(np.cosh(rr/R0))**2/(np.tanh(1.0)/(np.cosh(1.0))**2)\n return v\ndef dv1(rr):\n v = -vmax/R0*np.exp(-0.5*(rr/R0)**2)*(1-(rr/R0)**2)\n# v = -vmax*2/R0*np.tanh(rr/R0)/((np.cosh(rr/R0))**2)*(1/(np.cosh(rr/R0))**2 - (np.tanh(rr/R0))**2)/(np.tanh(1.0)/(np.cosh(1.0))**2)\n return v\n\n\ndef f(r, t):\n omega = np.sqrt((dv1(r)+v1(r)/r + f0)*(2*v1(r)/r + f0))\n return u0*np.sin(omega*t)\n\nsi_r = 30\nsi_t = 30000\nr0 = np.linspace(1,5*R0,si_r)\nt = np.linspace(0, si_t/f0/1000, si_t)\nra = np.zeros((si_t,si_r))\nfor ni in range(0,si_r):\n ra[:,ni] = integrate.odeint(f, r0[ni], t).squeeze()\n\nplt.figure()\nplt.plot(t*f0/(2*np.pi),ra/R0,'k',linewidth=1)\nplt.xlabel(r'$tf/2\\pi$')\nplt.ylabel(r'$r_p/R_0$')\n\nplt.xlim([np.min(t*f0/(2*np.pi)), np.max(t*f0/(2*np.pi))])\nplt.ylim([np.min(ra/R0), 1.05*np.max(ra/R0)])\n\nplt.savefig(\"ode_k0.pdf\",bbox_inches='tight')\n", "\n#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.ion()\n\nbinprec = '>f4'\nflag_plot = 0\n\n\n#% ================== GRID =====================================\ndef stretch(xf,yf,Lx,si_x,rev):\n\n hh = np.linspace(0,1,si_x+1)\n xf = Lx*np.interp(hh,xf,yf)\n\n dx = np.diff(xf)\n\n # reverse order to get high resolution near the bottom\n if rev:\n dx = dx[::-1]\n xf[1:] = np.cumsum(dx)\n \n xc = xf[0:-1] + 0.5*dx\n return xc,xf,dx\n\n\n\nrSphere = 6370.e3\ndeg2m = 2*np.pi*rSphere/360.0\ngg = 9.8\n\nLx = 5000.0e3\nLy = 5000.0e3\nLz = 5000.0\n\nsi_x = 800\nsi_y = 800\nsi_z = 33\n\nsi_x1 = si_x + 1\nsi_y1 = si_y + 1\nsi_z1 = si_z + 1\n\ndx = Lx/si_x;\ndy = Ly/si_y;\n\nxx = Lx*(np.arange(0,si_x) + 0.5)/(1.0*si_x)\nyy = Ly*(np.arange(0,si_y) + 0.5)/(1.0*si_y)\n\nxx1 = Lx*(np.arange(0,si_x+1) )/(1.0*si_x)\nyy1 = Ly*(np.arange(0,si_y+1) )/(1.0*si_y)\n\n\nxg,yg = np.meshgrid(xx,yy) \nxu,yu = np.meshgrid(xx1[:-1],yy) \nxv,yv = np.meshgrid(xx,yy1[:-1]) \nxc,yc = np.meshgrid(xx1,yy1) \n\n\ndx1 = dx*np.ones((si_x))\ndy1 = dy*np.ones((si_y))\n\nslope = 4\nxfz = np.linspace(0,1,1000)\nyfz = np.sinh(slope*xfz)/np.sinh(slope)\nzc,zf,dz1 = stretch(xfz,yfz,Lz,si_z,0)\n\n\ndx1.astype(binprec).tofile('dx.box')\ndy1.astype(binprec).tofile('dy.box')\ndz1.astype(binprec).tofile('dz.box')\n\n\n# ==== physical parameters\nfMin = 3.78e-05\nfMax = 1.3e-4\n\nfmid = 0.5*(fMin + fMax)\n \nbeta = (fMax-fMin)/Ly\nff = np.linspace(fMin,fMax,si_y)\n\nprint('f_south = {0}; beta = {1}'.format(fMin,beta) )\n\n\n#%==================== LAND ===================================\n\nlandh = np.zeros((si_y,si_x));\n\nlandh = -Lz + landh\n\n# walls\nlandh[:,0] = 0.0\nlandh[-1,:] = 0.0\nlandh.astype(binprec).tofile('topog.box')\n\n#%=============== Surface forcing ===================================\n# -- temperature --\nsst = np.zeros((si_y,si_x));\n\nTS = 20.0\nTN = 0.0\n\nsst = (TN-TS)*yg/Ly + TS\n\n#thetaClimFile\nsst.astype(binprec).tofile('sstclim.box')\n\n# -- salinity --\nempmr = np.zeros((si_y,si_x));\n\nF0 = 1e-7 # m/s ~ 10mm/day\nempmr = F0*np.sin(2*np.pi*yg/Ly)\nempmr.astype(binprec).tofile('empmr.box')\n\n\n# -- wind -- \nwindx = np.zeros((si_y,si_x));\n\ntauW = 0.2\nwindx = -tauW*np.sin(2*np.pi*yg/Ly )\n\n#windx = windx*ff.reshape(si_y,1)/fMin\nwindx.astype(binprec).tofile('windx.box')\n\n#=============== Initial conditions ===================================\n\nuinit = np.zeros((si_z,si_y,si_x));\nvinit = np.zeros((si_z,si_y,si_x));\ntinit = np.zeros((si_z,si_y,si_x))\nsinit = 35 + np.zeros((si_z,si_y,si_x))\neinit = np.zeros((si_y,si_x));\n\nuinit.astype(binprec).tofile('uinit.box')\nvinit.astype(binprec).tofile('vinit.box')\ntinit.astype(binprec).tofile('tinit.box')\nsinit.astype(binprec).tofile('sinit.box')\neinit.astype(binprec).tofile('einit.box')\n\n#% ================ floats ============================= \n\nnfl0 = 22\nnfl = nfl0**2\nxfl = np.linspace(1.5*dx,Lx-0.5*dx,nfl0) # no float in topography\nyfl = np.linspace(0.5*dx,Ly-1.5*dx,nfl0)\nxxfl,yyfl = np.meshgrid(xfl,yfl) \n\nvar_fl = np.zeros((nfl+1,9))\n\n\n# float id\nvar_fl[:,0] = np.linspace(0,nfl,nfl+1)\n# tstart\nvar_fl[:,1] = 0.0\n#xpart\nvar_fl[1:,2] = xxfl.flatten() # np.linspace(0,Lx,nfl)\n#ypart\nvar_fl[1:,3] = yyfl.flatten() #np.linspace(0,Lx,nfl)\n#kpart\nvar_fl[:,4] = 0.0\n#kfloat\nvar_fl[:,5] = 5.0\n#iup\nvar_fl[:,6] = 0.0\n#itop\nvar_fl[:,7] = 0.0\n#tend\nvar_fl[:,8] = -1\n\n\n#first line\nvar_fl[0,0] = nfl*1.0\nvar_fl[0,1] = -1 \nvar_fl[0,5] = nfl*1.0\nvar_fl[0,8] = -1\n\nvar_fl.astype(binprec).tofile('flinit.box')\n" ]
[ [ "matplotlib.pyplot.contourf", "numpy.linspace", "numpy.min", "matplotlib.pyplot.savefig", "scipy.io.netcdf.netcdf_file", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.contour", "numpy.argmin", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.text", "matplotlib.pyplot.ion", "numpy.zeros", "matplotlib.pyplot.figure" ], [ "numpy.linspace", "numpy.min", "matplotlib.pyplot.savefig", "numpy.sin", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.ylabel", "scipy.integrate.odeint", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ion", "numpy.zeros", "matplotlib.pyplot.figure" ], [ "numpy.meshgrid", "numpy.linspace", "numpy.arange", "numpy.cumsum", "numpy.sinh", "numpy.ones", "numpy.sin", "numpy.diff", "numpy.interp", "matplotlib.pyplot.ion", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kzeiler/modflow6
[ "a185d95b91985e965f8a04ae353305dff19b9637", "a185d95b91985e965f8a04ae353305dff19b9637", "a185d95b91985e965f8a04ae353305dff19b9637" ]
[ "autotest/test_gwf_maw04.py", "autotest/test_gwf_npf_tvk01.py", "autotest/test_gwt_prudic2004t2fmiats.py" ]
[ "import os\nimport pytest\nimport sys\nimport numpy as np\n\ntry:\n import pymake\nexcept:\n msg = \"Error. Pymake package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install https://github.com/modflowpy/pymake/zipball/master\"\n raise Exception(msg)\n\ntry:\n import flopy\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\nfrom framework import testing_framework, running_on_CI\nfrom simulation import Simulation\n\nex = [\n \"maw_iss305a\",\n \"maw_iss305b\",\n \"maw_iss305c\",\n \"maw_iss305d\",\n \"maw_iss305e\",\n \"maw_iss305f\",\n]\nexdirs = []\nfor s in ex:\n exdirs.append(os.path.join(\"temp\", s))\nddir = \"data\"\ncmppth = \"mf2005\"\n\npaktest = \"maw\"\n\nrequire_failure = [True for i in range(len(exdirs))]\nrequire_failure[0] = False\n\n# set travis to True when version 1.13.0 is released\ncontinuous_integration = [True for n in ex]\n\n# set replace_exe to None to use default executable\nreplace_exe = None\n\n# temporal discretization\nnper = 2\nperlen = [0.0, 365.0]\nnstp = [1, 25]\ntsmult = [1.0, 1.1]\nsteady = [True, False]\n\n# spatial discretization\nnlay, nrow, ncol = 2, 101, 101\nshape3d = (nlay, nrow, ncol)\nsize3d = nlay * nrow * ncol\n\nxlen = 1000.0\ncommon_ratio = 1.01\nnhalf = int(0.5 * ncol) + 1\nfirst_term = 0.5 * xlen / ((1 - common_ratio**nhalf) / (1 - common_ratio))\ndelr = np.zeros((ncol), dtype=float)\nfor n in range(nhalf):\n if n == 0:\n v = first_term\n else:\n v = first_term * common_ratio**n\n delr[nhalf + n - 1] = v\ndelr[: nhalf - 1] = delr[-1 : nhalf - 1 : -1]\n\n# add error to edge cells\nerr = xlen - delr.sum()\ndelr[0] += 0.5 * err\ndelr[-1] += 0.5 * err\n\ntop = 0.0\nbotm = [-175, -350.0]\nstrt = 0.0\n\n# hydraulic data\nhk = 1.0\nss = 1e-5\nconfined = 0\n\nchd_spd = []\nchd5_spd = []\nfor i in range(nrow):\n if i == 0 or i == ncol - 1:\n for j in range(ncol):\n chd_spd.append([(0, i, j), strt])\n chd5_spd.append([0, i, j, strt, strt])\n else:\n chd_spd.append([(0, i, 0), strt])\n chd_spd.append([(0, i, ncol - 1), strt])\n chd5_spd.append([0, i, 0, strt, strt])\n chd5_spd.append([0, i, ncol - 1, strt, strt])\n\n# maw data\nradius0 = np.sqrt(delr[nhalf] * delr[nhalf] / (8.0 * np.pi))\nradius = 0.25\nsradius0 = radius + 0.1\nwellq = -100.0\nskin_mult = [0.1, 10.0, 1.0, 0.0, -1.0, 100.0]\ncondeqn = [\"CUMULATIVE\", \"SKIN\", \"SKIN\", \"SKIN\", \"SPECIFIED\", \"CUMULATIVE\"]\nsradius = [sradius0, sradius0, sradius0, sradius0, sradius0, radius0 * 1.5]\n\ntdis_rc = []\nfor idx in range(nper):\n tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))\n\nhclose, rclose = 1e-9, 1e-6\n\n\ndef build_model(idx, dir):\n name = ex[idx]\n ws = dir\n\n # build MODFLOW 6 files\n sim = flopy.mf6.MFSimulation(\n sim_name=name, version=\"mf6\", exe_name=\"mf6\", sim_ws=ws\n )\n # create tdis package\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n # create iterative model solution\n ims = flopy.mf6.ModflowIms(\n sim, inner_dvclose=hclose, rcloserecord=rclose, outer_dvclose=hclose\n )\n\n # create gwf model\n gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)\n\n # discretization\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delr,\n top=top,\n botm=botm,\n )\n # initial conditions\n ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)\n\n # node property flow\n npf = flopy.mf6.ModflowGwfnpf(\n gwf, save_flows=False, icelltype=confined, k=hk\n )\n # storage\n sto = flopy.mf6.ModflowGwfsto(\n gwf,\n save_flows=False,\n iconvert=confined,\n ss=ss,\n steady_state={0: True},\n transient={1: True},\n )\n # constant head\n chd = flopy.mf6.ModflowGwfchd(\n gwf, stress_period_data=chd_spd, save_flows=False\n )\n # multi-aquifer well\n hks = hk * skin_mult[idx]\n mpd = [[0, radius, botm[-1], strt, condeqn[idx], 2]]\n mcd = [\n [0, 0, (0, nhalf, nhalf), top, botm[0], hks, sradius[idx]],\n [0, 1, (1, nhalf, nhalf), botm[0], botm[1], hks, sradius[idx]],\n ]\n perioddata = {1: [[0, \"RATE\", wellq]]}\n maw = flopy.mf6.ModflowGwfmaw(\n gwf,\n print_input=True,\n no_well_storage=True,\n packagedata=mpd,\n connectiondata=mcd,\n perioddata=perioddata,\n )\n # output control\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.cbc\".format(name),\n head_filerecord=\"{}.hds\".format(name),\n saverecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n )\n # build MODFLOW-2005 files\n if require_failure[idx]:\n mc = None\n else:\n ws = os.path.join(dir, cmppth)\n mc = flopy.modflow.Modflow(name, model_ws=ws, version=cmppth)\n dis = flopy.modflow.ModflowDis(\n mc,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n nper=nper,\n perlen=perlen,\n nstp=nstp,\n tsmult=tsmult,\n steady=steady,\n delr=delr,\n delc=delr,\n top=top,\n botm=botm,\n )\n bas = flopy.modflow.ModflowBas(mc, strt=strt)\n lpf = flopy.modflow.ModflowLpf(\n mc, laytyp=confined, hk=hk, vka=hk, ss=ss, sy=0\n )\n chd = flopy.modflow.ModflowChd(mc, stress_period_data=chd5_spd)\n # mnw2\n # empty mnw2 file to create recarrays\n mnw2 = flopy.modflow.ModflowMnw2(mc)\n node_data = mnw2.get_empty_node_data(2)\n node_data[\"ztop\"] = np.array([top, botm[0]])\n node_data[\"zbotm\"] = np.array([botm[0], botm[1]])\n node_data[\"i\"] = np.array([nhalf, nhalf])\n node_data[\"j\"] = np.array([nhalf, nhalf])\n node_data[\"wellid\"] = np.array([\"well1\", \"well1\"])\n node_data[\"losstype\"] = np.array([\"skin\", \"skin\"])\n node_data[\"rw\"] = np.array([radius, radius])\n node_data[\"rskin\"] = np.array([sradius[idx], sradius[idx]])\n node_data[\"kskin\"] = np.array([hks, hks])\n dtype = [(\"wellid\", np.unicode_, 20), (\"qdes\", \"<f8\")]\n spd0 = np.zeros(1, dtype=dtype)\n spd0[\"wellid\"] = \"well1\"\n spd1 = np.zeros(1, dtype=dtype)\n spd1[\"wellid\"] = \"well1\"\n spd1[\"qdes\"] = wellq\n spd = {0: spd0, 1: spd1}\n mnw2 = flopy.modflow.ModflowMnw2(\n mc,\n mnwmax=1,\n node_data=node_data,\n stress_period_data=spd,\n itmp=[1, 1],\n mnwprnt=2,\n )\n oc = flopy.modflow.ModflowOc(\n mc,\n stress_period_data=None,\n save_every=1,\n save_types=[\"save head\", \"save budget\"],\n )\n pcg = flopy.modflow.ModflowPcg(mc, hclose=hclose, rclose=rclose)\n\n return sim, mc\n\n\n# - No need to change any code below\[email protected](\n \"idx, dir\",\n list(enumerate(exdirs)),\n)\ndef test_mf6model(idx, dir):\n # determine if running on CI infrastructure\n is_CI = running_on_CI()\n\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n test.build_mf6_models_legacy(build_model, idx, dir)\n\n # run the test model\n if is_CI and not continuous_integration[idx]:\n return\n test.run_mf6(Simulation(dir, require_failure=require_failure[idx]))\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n # run the test model\n for idx, dir in enumerate(exdirs):\n test.build_mf6_models_legacy(build_model, idx, dir)\n sim = Simulation(dir, require_failure=require_failure[idx])\n test.run_mf6(sim)\n\n return\n\n\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run main routine\n main()\n", "import os\nimport pytest\nimport sys\nimport numpy as np\n\ntry:\n import pymake\nexcept:\n msg = \"Error. Pymake package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install https://github.com/modflowpy/pymake/zipball/master\"\n raise Exception(msg)\n\ntry:\n import flopy\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\nfrom framework import testing_framework\nfrom simulation import Simulation\n\nex = [\n \"tvk01\",\n]\nexdirs = []\nfor s in ex:\n exdirs.append(os.path.join(\"temp\", s))\nddir = \"data\"\n\ntime_varying_k = [1.0, 10.0]\n\n\ndef build_model(idx, dir):\n nlay, nrow, ncol = 3, 3, 3\n perlen = [100.0, 100.0]\n nper = len(perlen)\n nstp = nper * [1]\n tsmult = nper * [1.0]\n delr = 1.0\n delc = 1.0\n top = 1.0\n laytyp = 0\n botm = [0.0, -1.0, -2.0]\n strt = 1.0\n hk = 0.1\n\n nouter, ninner = 100, 300\n hclose, rclose, relax = 1e-6, 1e-6, 1.0\n\n tdis_rc = []\n for i in range(nper):\n tdis_rc.append((perlen[i], nstp[i], tsmult[i]))\n\n name = ex[idx]\n\n # build MODFLOW 6 files\n ws = dir\n sim = flopy.mf6.MFSimulation(\n sim_name=name, version=\"mf6\", exe_name=\"mf6\", sim_ws=ws\n )\n # create tdis package\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n # create gwf model\n gwfname = \"gwf_\" + name\n gwf = flopy.mf6.MFModel(\n sim,\n model_type=\"gwf6\",\n modelname=gwfname,\n model_nam_file=\"{}.nam\".format(gwfname),\n )\n gwf.name_file.save_flows = True\n\n # create iterative model solution and register the gwf model with it\n imsgwf = flopy.mf6.ModflowIms(\n sim,\n print_option=\"SUMMARY\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n under_relaxation=\"NONE\",\n inner_maximum=ninner,\n inner_dvclose=hclose,\n rcloserecord=rclose,\n linear_acceleration=\"CG\",\n scaling_method=\"NONE\",\n reordering_method=\"NONE\",\n relaxation_factor=relax,\n filename=\"{}.ims\".format(gwfname),\n )\n sim.register_ims_package(imsgwf, [gwf.name])\n\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n idomain=np.ones((nlay, nrow, ncol), dtype=int),\n filename=\"{}.dis\".format(gwfname),\n )\n\n # initial conditions\n ic = flopy.mf6.ModflowGwfic(\n gwf, strt=strt, filename=\"{}.ic\".format(gwfname)\n )\n\n # node property flow\n tvk_filename = f\"{gwfname}.npf.tvk\"\n npf = flopy.mf6.ModflowGwfnpf(\n gwf,\n save_specific_discharge=True,\n icelltype=laytyp,\n k=hk,\n k33=hk,\n )\n\n # tvk\n tvkspd = {}\n for kper in range(nper):\n hydraulic_conductivity = time_varying_k[kper]\n spd = []\n for k in range(nlay):\n for i in range(nrow):\n for j in range(ncol):\n spd.append([(k, i, j), \"K\", hydraulic_conductivity])\n tvkspd[kper] = spd\n tvk = flopy.mf6.ModflowUtltvk(\n npf, print_input=True, perioddata=tvkspd, filename=tvk_filename\n )\n\n # chd files\n chdspd = []\n for k in range(nlay):\n for i in range(nrow):\n chdspd.append([(k, i, 0), top + 1])\n chdspd.append([(k, i, ncol - 1), top])\n chd = flopy.mf6.ModflowGwfchd(\n gwf,\n stress_period_data=chdspd,\n save_flows=False,\n print_flows=True,\n pname=\"CHD-1\",\n )\n\n # output control\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.cbc\".format(gwfname),\n head_filerecord=\"{}.hds\".format(gwfname),\n headprintrecord=[(\"COLUMNS\", 10, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")],\n saverecord=[(\"HEAD\", \"LAST\"), (\"BUDGET\", \"LAST\")],\n printrecord=[(\"HEAD\", \"LAST\"), (\"BUDGET\", \"LAST\")],\n )\n\n return sim, None\n\n\ndef eval_model(sim):\n print(\"evaluating model...\")\n\n name = ex[sim.idxsim]\n gwfname = \"gwf_\" + name\n\n # head\n fpth = os.path.join(sim.simpath, \"{}.hds\".format(gwfname))\n try:\n hobj = flopy.utils.HeadFile(fpth, precision=\"double\")\n head = hobj.get_data()\n except:\n assert False, 'could not load data from \"{}\"'.format(fpth)\n\n # budget\n fpth = os.path.join(sim.simpath, \"{}.cbc\".format(gwfname))\n try:\n bobj = flopy.utils.CellBudgetFile(fpth, precision=\"double\")\n bud_allspd = bobj.get_data(text=\"CHD\")\n except:\n assert False, 'could not load data from \"{}\"'.format(fpth)\n\n # This is the answer to this problem.\n hk = time_varying_k[sim.idxsim]\n delc = 1.0\n delr = 1.0\n delz = 1.0\n dh = 1.0\n dl = 2 * delr\n\n for kper, bud in enumerate(bud_allspd):\n flow_rate_calc = time_varying_k[kper] * delc * delz * dh / dl\n print(f\"Calculated q is {flow_rate_calc}\")\n for node, node2, q in bud:\n print(node, node2, q, flow_rate_calc)\n errmsg = f\"Expected flow rate {flow_rate_calc} but found {q}\"\n assert np.isclose(flow_rate_calc, abs(q))\n\n # comment when done testing\n # assert False\n\n return\n\n\n# - No need to change any code below\[email protected](\n \"idx, dir\",\n list(enumerate(exdirs)),\n)\ndef test_mf6model(idx, dir):\n # initialize testing framework\n test = testing_framework()\n\n # build the model\n test.build_mf6_models(build_model, idx, dir)\n\n # run the test model\n test.run_mf6(Simulation(dir, exfunc=eval_model, idxsim=idx))\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # run the test model\n for idx, dir in enumerate(exdirs):\n test.build_mf6_models(build_model, idx, dir)\n sim = Simulation(dir, exfunc=eval_model, idxsim=idx)\n test.run_mf6(sim)\n\n\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run main routine\n main()\n", "# tests ats on the prudic transport model. With these ATS settings, the\n# solver should fail on time step 19 in period 2, and should converge on the\n# second try with a smaller time step. This test will not pass if the states\n# are not restored properly for the advanced transport packages when the\n# failure occurs.\n\nimport os\nimport pytest\nimport shutil\nimport numpy as np\n\ntry:\n import pymake\nexcept:\n msg = \"Error. Pymake package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install https://github.com/modflowpy/pymake/zipball/master\"\n raise Exception(msg)\n\ntry:\n import flopy\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\n\nfrom framework import set_teardown_test\n\nimport targets\n\nexe_name_mf6 = targets.target_dict[\"mf6\"]\nexe_name_mf6 = os.path.abspath(exe_name_mf6)\n\ndata_ws = os.path.abspath(\"./data/prudic2004test2/\")\ntestdir = \"./temp\"\ntestgroup = \"prudic2004t2fmiats\"\nd = os.path.join(testdir, testgroup)\nif os.path.isdir(d):\n shutil.rmtree(d)\n\nnlay = 8\nnrow = 36\nncol = 23\ndelr = 405.665\ndelc = 403.717\ntop = 100.0\nfname = os.path.join(data_ws, \"bot1.dat\")\nbot0 = np.loadtxt(fname)\nbotm = [bot0] + [bot0 - (15.0 * k) for k in range(1, nlay)]\nfname = os.path.join(data_ws, \"idomain1.dat\")\nidomain0 = np.loadtxt(fname, dtype=int)\nidomain = nlay * [idomain0]\n\n\ndef run_flow_model():\n global idomain\n name = \"flow\"\n gwfname = name\n wsf = os.path.join(testdir, testgroup, name)\n sim = flopy.mf6.MFSimulation(\n sim_name=name, sim_ws=wsf, exe_name=exe_name_mf6\n )\n tdis_rc = [(1.0, 1, 1.0), (365.25 * 25, 1, 1.0)]\n nper = len(tdis_rc)\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, save_flows=True)\n\n # ims\n hclose = 0.001\n rclose = 0.1\n nouter = 1000\n ninner = 100\n relax = 0.99\n imsgwf = flopy.mf6.ModflowIms(\n sim,\n print_option=\"SUMMARY\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n under_relaxation=\"NONE\",\n inner_maximum=ninner,\n inner_dvclose=hclose,\n rcloserecord=rclose,\n linear_acceleration=\"CG\",\n scaling_method=\"NONE\",\n reordering_method=\"NONE\",\n relaxation_factor=relax,\n filename=\"{}.ims\".format(gwfname),\n )\n\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n idomain=idomain,\n )\n idomain = dis.idomain.array\n\n ic = flopy.mf6.ModflowGwfic(gwf, strt=50.0)\n\n npf = flopy.mf6.ModflowGwfnpf(\n gwf,\n xt3doptions=False,\n save_flows=True,\n save_specific_discharge=True,\n save_saturation=True,\n icelltype=[1] + 7 * [0],\n k=250.0,\n k33=125.0,\n )\n\n sto_on = False\n if sto_on:\n sto = flopy.mf6.ModflowGwfsto(\n gwf,\n save_flows=True,\n iconvert=[1] + 7 * [0],\n ss=1.0e-5,\n sy=0.3,\n steady_state={0: True},\n transient={1: False},\n )\n\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.bud\".format(gwfname),\n head_filerecord=\"{}.hds\".format(gwfname),\n headprintrecord=[\n (\"COLUMNS\", ncol, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")\n ],\n saverecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n printrecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n )\n\n rch_on = True\n if rch_on:\n rch = flopy.mf6.ModflowGwfrcha(\n gwf, recharge={0: 4.79e-3}, pname=\"RCH-1\"\n )\n\n chdlist = []\n fname = os.path.join(data_ws, \"chd.dat\")\n for line in open(fname, \"r\").readlines():\n ll = line.strip().split()\n if len(ll) == 4:\n k, i, j, hd = ll\n chdlist.append(\n [\n (\n int(k) - 1,\n int(i) - 1,\n int(j) - 1,\n ),\n float(hd),\n ]\n )\n chd = flopy.mf6.ModflowGwfchd(\n gwf, stress_period_data=chdlist, pname=\"CHD-1\"\n )\n\n rivlist = []\n fname = os.path.join(data_ws, \"riv.dat\")\n for line in open(fname, \"r\").readlines():\n ll = line.strip().split()\n if len(ll) == 7:\n k, i, j, s, c, rb, bn = ll\n rivlist.append(\n [\n (\n int(k) - 1,\n int(i) - 1,\n int(j) - 1,\n ),\n float(s),\n float(c),\n float(rb),\n bn,\n ]\n )\n rivra = flopy.mf6.ModflowGwfriv.stress_period_data.empty(\n gwf, maxbound=len(rivlist), boundnames=True\n )[0]\n for i, t in enumerate(rivlist):\n rivra[i] = tuple(t)\n fname = os.path.join(data_ws, \"sfr-packagedata.dat\")\n sfrpd = np.genfromtxt(fname, names=True)\n sfrpackagedata = flopy.mf6.ModflowGwfsfr.packagedata.empty(\n gwf, boundnames=True, maxbound=sfrpd.shape[0]\n )\n for name in sfrpackagedata.dtype.names:\n if name in rivra.dtype.names:\n sfrpackagedata[name] = rivra[name]\n for name in sfrpackagedata.dtype.names:\n if name in sfrpd.dtype.names:\n sfrpackagedata[name] = sfrpd[name]\n sfrpackagedata[\"boundname\"] = rivra[\"boundname\"]\n fname = os.path.join(data_ws, \"sfr-connectiondata.dat\")\n with open(fname) as f:\n lines = f.readlines()\n sfrconnectiondata = []\n for line in lines:\n t = line.split()\n c = []\n for v in t:\n i = int(v)\n c.append(i)\n sfrconnectiondata.append(c)\n sfrperioddata = {0: [[0, \"inflow\", 86400], [18, \"inflow\", 8640.0]]}\n\n sfr_obs = {\n (gwfname + \".sfr.obs.csv\",): [\n (\"reach1leakage\", \"SFR\", \"LONGESTRIVERINTHEWORLD1\"),\n (\"reach2leakage\", \"SFR\", \"LONGESTRIVERINTHEWORLD2\"),\n (\"reach3leakage\", \"SFR\", \"LONGESTRIVERINTHEWORLD3\"),\n (\"reach4leakage\", \"SFR\", \"LONGESTRIVERINTHEWORLD4\"),\n ],\n }\n sfr_obs[\"digits\"] = 7\n sfr_obs[\"print_input\"] = True\n sfr_obs[\"filename\"] = gwfname + \".sfr.obs\"\n\n sfr_on = True\n if sfr_on:\n sfr = flopy.mf6.ModflowGwfsfr(\n gwf,\n print_stage=True,\n print_flows=True,\n stage_filerecord=gwfname + \".sfr.bin\",\n budget_filerecord=gwfname + \".sfr.bud\",\n mover=True,\n pname=\"SFR-1\",\n unit_conversion=128390.00,\n boundnames=True,\n nreaches=len(rivlist),\n packagedata=sfrpackagedata,\n connectiondata=sfrconnectiondata,\n perioddata=sfrperioddata,\n observations=sfr_obs,\n )\n\n fname = os.path.join(data_ws, \"lakibd.dat\")\n lakibd = np.loadtxt(fname, dtype=int)\n lakeconnectiondata = []\n nlakecon = [0, 0]\n lak_leakance = 1.0\n for i in range(nrow):\n for j in range(ncol):\n if lakibd[i, j] == 0:\n continue\n else:\n ilak = lakibd[i, j] - 1\n # back\n if i > 0:\n if lakibd[i - 1, j] == 0 and idomain[0, i - 1, j]:\n h = [\n ilak,\n nlakecon[ilak],\n (0, i - 1, j),\n \"horizontal\",\n lak_leakance,\n 0.0,\n 0.0,\n delc / 2.0,\n delr,\n ]\n nlakecon[ilak] += 1\n lakeconnectiondata.append(h)\n # left\n if j > 0:\n if lakibd[i, j - 1] and idomain[0, i, j - 1] == 0:\n h = [\n ilak,\n nlakecon[ilak],\n (0, i, j - 1),\n \"horizontal\",\n lak_leakance,\n 0.0,\n 0.0,\n delr / 2.0,\n delc,\n ]\n nlakecon[ilak] += 1\n lakeconnectiondata.append(h)\n # right\n if j < ncol - 1:\n if lakibd[i, j + 1] == 0 and idomain[0, i, j + 1]:\n h = [\n ilak,\n nlakecon[ilak],\n (0, i, j + 1),\n \"horizontal\",\n lak_leakance,\n 0.0,\n 0.0,\n delr / 2.0,\n delc,\n ]\n nlakecon[ilak] += 1\n lakeconnectiondata.append(h)\n # front\n if i < nrow - 1:\n if lakibd[i + 1, j] == 0 and idomain[0, i + 1, j]:\n h = [\n ilak,\n nlakecon[ilak],\n (0, i + 1, j),\n \"horizontal\",\n lak_leakance,\n 0.0,\n 0.0,\n delc / 2.0,\n delr,\n ]\n nlakecon[ilak] += 1\n lakeconnectiondata.append(h)\n # vertical\n v = [\n ilak,\n nlakecon[ilak],\n (1, i, j),\n \"vertical\",\n lak_leakance,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n nlakecon[ilak] += 1\n lakeconnectiondata.append(v)\n\n lak_obs = {\n (gwfname + \".lak.obs.csv\",): [\n (\"lake1stage\", \"STAGE\", \"lake1\"),\n (\"lake2stage\", \"STAGE\", \"lake2\"),\n (\"lake1leakage\", \"LAK\", \"lake1\"),\n (\"lake2leakage\", \"LAK\", \"lake2\"),\n ],\n }\n sfr_obs[\"digits\"] = 7\n sfr_obs[\"print_input\"] = True\n sfr_obs[\"filename\"] = gwfname + \".sfr.obs\"\n\n i, j = np.where(lakibd > 0)\n idomain[0, i, j] = 0\n gwf.dis.idomain.set_data(idomain[0], layer=0, multiplier=[1])\n\n lakpackagedata = [\n [0, 44.0, nlakecon[0], \"lake1\"],\n [1, 35.2, nlakecon[1], \"lake2\"],\n ]\n # <outletno> <lakein> <lakeout> <couttype> <invert> <width> <rough> <slope>\n outlets = [[0, 0, -1, \"MANNING\", 44.5, 5.000000, 0.03, 0.2187500e-02]]\n\n lake_on = True\n if lake_on:\n lak = flopy.mf6.ModflowGwflak(\n gwf,\n time_conversion=86400.000,\n print_stage=True,\n print_flows=True,\n stage_filerecord=gwfname + \".lak.bin\",\n budget_filerecord=gwfname + \".lak.bud\",\n mover=True,\n pname=\"LAK-1\",\n boundnames=True,\n nlakes=2,\n noutlets=len(outlets),\n outlets=outlets,\n packagedata=lakpackagedata,\n connectiondata=lakeconnectiondata,\n observations=lak_obs,\n )\n\n mover_on = True\n if mover_on:\n maxmvr, maxpackages = 2, 2\n mvrpack = [[\"SFR-1\"], [\"LAK-1\"]]\n mvrperioddata = [\n [\"SFR-1\", 5, \"LAK-1\", 0, \"FACTOR\", 1.0],\n [\"LAK-1\", 0, \"SFR-1\", 6, \"FACTOR\", 1.0],\n ]\n mvr = flopy.mf6.ModflowGwfmvr(\n gwf,\n maxmvr=maxmvr,\n print_flows=True,\n budget_filerecord=gwfname + \".mvr.bud\",\n maxpackages=maxpackages,\n packages=mvrpack,\n perioddata=mvrperioddata,\n )\n\n sim.write_simulation()\n sim.run_simulation(silent=False)\n\n fname = gwfname + \".hds\"\n fname = os.path.join(wsf, fname)\n hobj = flopy.utils.HeadFile(fname, precision=\"double\")\n head = hobj.get_data()\n hobj.file.close()\n\n if lake_on:\n fname = gwfname + \".lak.bin\"\n fname = os.path.join(wsf, fname)\n lkstage = None\n if os.path.isfile(fname):\n lksobj = flopy.utils.HeadFile(\n fname, precision=\"double\", text=\"stage\"\n )\n lkstage = lksobj.get_data().flatten()\n lksobj.file.close()\n\n if sfr_on:\n fname = gwfname + \".sfr.bin\"\n fname = os.path.join(wsf, fname)\n sfstage = None\n if os.path.isfile(fname):\n bobj = flopy.utils.HeadFile(\n fname, precision=\"double\", text=\"stage\"\n )\n sfstage = bobj.get_data().flatten()\n bobj.file.close()\n\n if mover_on:\n fname = gwfname + \".mvr.bud\"\n fname = os.path.join(wsf, fname)\n bobj = flopy.utils.CellBudgetFile(fname, precision=\"double\")\n ra = bobj.recordarray\n print(ra)\n print(ra.dtype)\n for idx in range(ra.shape[0]):\n d = bobj.get_data(idx=idx)[0]\n if d.shape[0] > 0:\n p1 = ra[idx][\"paknam\"].decode().strip()\n p2 = ra[idx][\"paknam2\"].decode().strip()\n print(\n ra[idx][\"kstp\"],\n ra[idx][\"kper\"],\n ra[idx][\"paknam\"],\n ra[idx][\"paknam2\"],\n )\n for node, node2, q in d:\n print(p1, node, p2, node2, q)\n\n return\n\n\ndef run_transport_model():\n name = \"transport\"\n gwtname = name\n wst = os.path.join(testdir, testgroup, name)\n sim = flopy.mf6.MFSimulation(\n sim_name=name,\n version=\"mf6\",\n exe_name=exe_name_mf6,\n sim_ws=wst,\n continue_=False,\n )\n\n tdis_rc = [(1.0, 1, 1.0), (365.25 * 25, 25, 1.0)]\n nper = len(tdis_rc)\n tdis = flopy.mf6.ModflowTdis(\n sim,\n time_units=\"DAYS\",\n nper=nper,\n perioddata=tdis_rc,\n )\n\n if True:\n dt0 = 100\n dtmin = 1.0e-5\n dtmax = 10000.0\n dtadj = 2.0\n dtfailadj = 5.0\n ats_filerecord = name + \".ats\"\n atsperiod = [\n (1, dt0, dtmin, dtmax, dtadj, dtfailadj),\n ]\n tdis.ats.initialize(\n maxats=len(atsperiod),\n perioddata=atsperiod,\n filename=ats_filerecord,\n )\n\n gwt = flopy.mf6.ModflowGwt(sim, modelname=gwtname)\n\n # ims\n hclose = 0.001\n rclose = 0.001\n nouter = 50\n ninner = 20\n relax = 0.97\n imsgwt = flopy.mf6.ModflowIms(\n sim,\n print_option=\"ALL\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n under_relaxation=\"DBD\",\n under_relaxation_theta=0.7,\n inner_maximum=ninner,\n inner_dvclose=hclose,\n rcloserecord=rclose,\n linear_acceleration=\"BICGSTAB\",\n scaling_method=\"NONE\",\n reordering_method=\"NONE\",\n relaxation_factor=relax,\n filename=\"{}.ims\".format(gwtname),\n )\n sim.register_ims_package(imsgwt, [gwt.name])\n\n dis = flopy.mf6.ModflowGwtdis(\n gwt,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n idomain=idomain,\n )\n ic = flopy.mf6.ModflowGwtic(gwt, strt=0.0)\n sto = flopy.mf6.ModflowGwtmst(gwt, porosity=0.3)\n adv = flopy.mf6.ModflowGwtadv(gwt, scheme=\"TVD\")\n dsp = flopy.mf6.ModflowGwtdsp(gwt, alh=20.0, ath1=2, atv=0.2)\n sourcerecarray = [()]\n ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray)\n cnclist = [\n [(0, 0, 11), 500.0],\n [(0, 0, 12), 500.0],\n [(0, 0, 13), 500.0],\n [(0, 0, 14), 500.0],\n [(1, 0, 11), 500.0],\n [(1, 0, 12), 500.0],\n [(1, 0, 13), 500.0],\n [(1, 0, 14), 500.0],\n ]\n cnc = flopy.mf6.ModflowGwtcnc(\n gwt,\n maxbound=len(cnclist),\n stress_period_data=cnclist,\n save_flows=False,\n pname=\"CNC-1\",\n )\n\n lktpackagedata = [\n (0, 0.0, 99.0, 999.0, \"mylake1\"),\n (1, 0.0, 99.0, 999.0, \"mylake2\"),\n ]\n lktperioddata = [\n (0, \"STATUS\", \"ACTIVE\"),\n (1, \"STATUS\", \"ACTIVE\"),\n ]\n lkt_obs = {\n (gwtname + \".lkt.obs.csv\",): [\n (\"lkt1conc\", \"CONCENTRATION\", 1),\n (\"lkt2conc\", \"CONCENTRATION\", 2),\n (\"lkt1frommvr\", \"FROM-MVR\", (0,)),\n (\"lkt2frommvr\", \"FROM-MVR\", (1,)),\n (\"lkt1tomvr\", \"TO-MVR\", (0,)),\n (\"lkt1bntomvr\", \"TO-MVR\", \"mylake1\"),\n ],\n }\n lkt_obs[\"digits\"] = 7\n lkt_obs[\"print_input\"] = True\n lkt_obs[\"filename\"] = gwtname + \".lkt.obs\"\n\n lkt_on = True\n if lkt_on:\n lkt = flopy.mf6.modflow.ModflowGwtlkt(\n gwt,\n boundnames=True,\n save_flows=True,\n print_input=True,\n print_flows=True,\n print_concentration=True,\n concentration_filerecord=gwtname + \".lkt.bin\",\n budget_filerecord=gwtname + \".lkt.bud\",\n packagedata=lktpackagedata,\n lakeperioddata=lktperioddata,\n observations=lkt_obs,\n pname=\"LAK-1\",\n auxiliary=[\"aux1\", \"aux2\"],\n )\n\n nreach = 38\n sftpackagedata = []\n for irno in range(nreach):\n t = (irno, 0.0, 99.0, 999.0, \"myreach{}\".format(irno + 1))\n sftpackagedata.append(t)\n\n sftperioddata = [(0, \"STATUS\", \"ACTIVE\"), (0, \"CONCENTRATION\", 0.0)]\n\n sft_obs = {\n (gwtname + \".sft.obs.csv\",): [\n (\"sft{}conc\".format(i + 1), \"CONCENTRATION\", i + 1)\n for i in range(nreach)\n ]\n }\n # append additional obs attributes to obs dictionary\n sft_obs[\"digits\"] = 7\n sft_obs[\"print_input\"] = True\n sft_obs[\"filename\"] = gwtname + \".sft.obs\"\n\n sft_on = True\n if sft_on:\n sft = flopy.mf6.modflow.ModflowGwtsft(\n gwt,\n boundnames=True,\n save_flows=True,\n print_input=True,\n print_flows=True,\n print_concentration=True,\n concentration_filerecord=gwtname + \".sft.bin\",\n budget_filerecord=gwtname + \".sft.bud\",\n packagedata=sftpackagedata,\n reachperioddata=sftperioddata,\n observations=sft_obs,\n pname=\"SFR-1\",\n auxiliary=[\"aux1\", \"aux2\"],\n )\n\n pd = [\n (\"GWFHEAD\", \"../flow/flow.hds\", None),\n (\"GWFBUDGET\", \"../flow/flow.bud\", None),\n (\"GWFMOVER\", \"../flow/flow.mvr.bud\", None),\n (\"LAK-1\", \"../flow/flow.lak.bud\", None),\n (\"SFR-1\", \"../flow/flow.sfr.bud\", None),\n ]\n fmi = flopy.mf6.ModflowGwtfmi(gwt, packagedata=pd)\n\n # mover transport package\n mvt = flopy.mf6.modflow.ModflowGwtmvt(gwt, print_flows=True)\n\n oc = flopy.mf6.ModflowGwtoc(\n gwt,\n budget_filerecord=\"{}.cbc\".format(gwtname),\n concentration_filerecord=\"{}.ucn\".format(gwtname),\n concentrationprintrecord=[\n (\"COLUMNS\", ncol, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")\n ],\n saverecord=[(\"CONCENTRATION\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n printrecord=[(\"CONCENTRATION\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n )\n\n sim.write_simulation()\n sim.run_simulation()\n\n fname = gwtname + \".lkt.bin\"\n fname = os.path.join(wst, fname)\n bobj = flopy.utils.HeadFile(\n fname, precision=\"double\", text=\"concentration\"\n )\n lkaconc = bobj.get_alldata()[:, 0, 0, :]\n times = bobj.times\n bobj.file.close()\n\n fname = gwtname + \".sft.bin\"\n fname = os.path.join(wst, fname)\n bobj = flopy.utils.HeadFile(\n fname, precision=\"double\", text=\"concentration\"\n )\n sfaconc = bobj.get_alldata()[:, 0, 0, :]\n times = bobj.times\n bobj.file.close()\n times = np.array(times)\n ans_times = [\n 1.0,\n 101.0,\n 201.0,\n 301.0,\n 501.0,\n 701.0,\n 901.0,\n 1101.0,\n 1501.0,\n 1901.0,\n 2301.0,\n 2701.0,\n 3101.0,\n 3501.0,\n 3901.0,\n 4301.0,\n 4701.0,\n 5101.0,\n 5501.0,\n 5581.0,\n 5661.0,\n 5821.0,\n 5981.0,\n 6141.0,\n 6301.0,\n 6621.0,\n 6941.0,\n 7581.0,\n 8221.0,\n 8861.0,\n 9132.25,\n ]\n ans_times = np.array(ans_times)\n errmsg = f\"Expected number of total timesteps is different.\"\n assert times.shape == ans_times.shape, errmsg\n errmsg = f\"Times {times} not equal expected times {ans_times}\"\n assert np.allclose(times, ans_times)\n\n # set atol\n atol = 0.05\n\n # check simulated concentration in lak 1 and 2 sfr reaches\n res_lak1 = lkaconc[:, 0]\n ans_lak1 = [\n -1.7334085635551077e-19,\n -3.187033329925361e-07,\n -1.9287290216857604e-06,\n 5.808788660373555e-07,\n 0.005591936631026452,\n 0.04542773591098022,\n 0.1928635682644908,\n 0.5690001383534176,\n 2.999420704893868,\n 7.110019025850782,\n 12.268025985205634,\n 17.67417138740906,\n 22.69808352286938,\n 27.00477920391491,\n 30.50733530522461,\n 33.222437858798955,\n 35.25052779893794,\n 36.73069024938392,\n 37.792799707882,\n 37.98952686059535,\n 38.171619378463866,\n 38.48532541433273,\n 38.755615320864834,\n 38.98852685483134,\n 39.189072004020026,\n 39.491640226795035,\n 39.71996654913013,\n 40.00486884056597,\n 40.18758842234358,\n 40.309629842366334,\n 40.35288988875558,\n ]\n ans_lak1 = np.array(ans_lak1)\n d = res_lak1 - ans_lak1\n msg = \"{}\\n{}\\n{}\".format(res_lak1, ans_lak1, d)\n assert np.allclose(res_lak1, ans_lak1, atol=atol), msg\n\n res_sfr3 = sfaconc[:, 30]\n ans_sfr3 = [\n -7.607756096700458e-23,\n -1.3669399889004086e-08,\n -9.199259301584774e-08,\n 5.257821481671474e-08,\n 0.00039938114816238295,\n 0.003355197965954286,\n 0.014744417223049597,\n 0.04510445881222458,\n 0.27877628044373737,\n 0.7458007019884897,\n 1.4665631236737788,\n 2.444128940946191,\n 3.6753371432162,\n 5.158039470416099,\n 6.873092531310018,\n 8.780865680435873,\n 10.839098670662382,\n 13.005360141577922,\n 15.230011242915861,\n 15.676842775991494,\n 16.124955033719825,\n 17.022019083397183,\n 17.92055413970275,\n 18.81985349843973,\n 19.717233813700727,\n 21.475928749632136,\n 23.177014613583257,\n 26.206656959204977,\n 28.767131611820425,\n 30.825804240468084,\n 31.611737865014057,\n ]\n ans_sfr3 = np.array(ans_sfr3)\n d = res_sfr3 - ans_sfr3\n msg = \"{}\\n{}\\n{}\".format(res_sfr3, ans_sfr3, d)\n assert np.allclose(res_sfr3, ans_sfr3, atol=atol), msg\n\n res_sfr4 = sfaconc[:, 37]\n ans_sfr4 = [\n -2.0041563248238944e-20,\n -1.319932823356964e-07,\n -8.732574723159916e-07,\n 7.946044660303284e-07,\n 0.00328713663771796,\n 0.026759005065411397,\n 0.11383700188916444,\n 0.33657660610442625,\n 1.7925681982241561,\n 4.287068052572867,\n 7.4770472357597395,\n 10.91908508386622,\n 14.260965416010272,\n 17.315284886138496,\n 20.02515736345197,\n 22.382282530045032,\n 24.423804946506543,\n 26.208303689647263,\n 27.786040077700754,\n 28.093696017884817,\n 28.393775338962325,\n 28.966213039403637,\n 29.515140249737588,\n 30.043603904714946,\n 30.553203510121918,\n 31.501577161886416,\n 32.38308331851197,\n 33.88529441352757,\n 35.12256241115968,\n 36.10351180222542,\n 36.47615223874849,\n ]\n ans_sfr4 = np.array(ans_sfr4)\n d = res_sfr4 - ans_sfr4\n msg = \"{}\\n{}\\n{}\".format(res_sfr4, ans_sfr4, d)\n assert np.allclose(res_sfr4, ans_sfr4, atol=atol), msg\n\n # make some checks on lake obs csv file\n fname = gwtname + \".lkt.obs.csv\"\n fname = os.path.join(wst, fname)\n try:\n tc = np.genfromtxt(fname, names=True, delimiter=\",\")\n except:\n assert False, 'could not load data from \"{}\"'.format(fname)\n errmsg = \"to-mvr boundname and outlet number do not match for {}\".format(\n fname\n )\n assert np.allclose(tc[\"LKT1TOMVR\"], tc[\"LKT1BNTOMVR\"]), errmsg\n\n # check simulation list file for ats information\n fname = os.path.join(wst, \"mfsim.lst\")\n with open(fname, \"r\") as f:\n lines = f.readlines()\n\n txtlist = [\n (\n \"Failed solution for step 19 and period 2 will be retried using \"\n \"time step of 80.00000\"\n ),\n \"ATS IS OVERRIDING TIME STEPPING FOR THIS PERIOD\",\n \"INITIAL TIME STEP SIZE (DT0) = 100.0000\",\n \"MINIMUM TIME STEP SIZE (DTMIN) = 0.1000000E-04\",\n \"MAXIMUM TIME STEP SIZE (DTMAX) = 10000.00\",\n \"MULTIPLIER/DIVIDER FOR TIME STEP (DTADJ) = 2.000000\",\n \"DIVIDER FOR FAILED TIME STEP (DTFAILADJ) = 5.000000\",\n ]\n all_found = True\n for stxt in txtlist:\n msg = \"Checking for string in mfsim.lst: {}\".format(stxt)\n found = False\n for line in lines:\n if stxt in line:\n found = True\n break\n if not found:\n msg += \" -- NOT FOUND!\"\n all_found = False\n print(\"text not found in mfsim.lst: {}\".format(stxt))\n print(msg)\n assert (\n all_found\n ), \"One or more required text strings not found in mfsim.lst\"\n\n return\n\n\ndef test_prudic2004t2fmiats():\n run_flow_model()\n run_transport_model()\n d = os.path.join(testdir, testgroup)\n teardowntest = set_teardown_test()\n if teardowntest:\n if os.path.isdir(d):\n shutil.rmtree(d)\n return\n\n\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run tests\n test_prudic2004t2fmiats()\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.sqrt" ], [ "numpy.ones" ], [ "numpy.allclose", "numpy.genfromtxt", "numpy.array", "numpy.where", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
deep-spin/SIGMORPHON2019
[ "60cf3b53be42e76238e7928405b2916cd9aed6c4", "60cf3b53be42e76238e7928405b2916cd9aed6c4" ]
[ "onmt/tests/test_attention.py", "onmt/modules/sparse_losses.py" ]
[ "\"\"\"\nHere come the tests for attention types and their compatibility\n\"\"\"\nimport unittest\nimport torch\nfrom torch.autograd import Variable\n\nimport onmt\n\n\nclass TestAttention(unittest.TestCase):\n\n def test_masked_global_attention(self):\n\n source_lengths = torch.IntTensor([7, 3, 5, 2])\n # illegal_weights_mask = torch.ByteTensor([\n # [0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 1, 1, 1, 1],\n # [0, 0, 0, 0, 0, 1, 1],\n # [0, 0, 1, 1, 1, 1, 1]])\n\n batch_size = source_lengths.size(0)\n dim = 20\n\n memory_bank = Variable(torch.randn(batch_size,\n source_lengths.max(), dim))\n hidden = Variable(torch.randn(batch_size, dim))\n\n attn = onmt.modules.Attention(dim)\n\n _, alignments = attn(hidden, memory_bank,\n memory_lengths=source_lengths)\n # TODO: fix for pytorch 0.3\n # illegal_weights = alignments.masked_select(illegal_weights_mask)\n\n # self.assertEqual(0.0, illegal_weights.data.sum())\n", "import torch\nimport torch.nn as nn\nfrom torch.autograd import Function\nfrom onmt.utils.misc import aeq as assert_equal\n\nfrom onmt.modules.sparse_activations import sparsemax\n\n\ndef _fy_backward(ctx, grad_output):\n p_star, = ctx.saved_tensors\n grad = grad_output.unsqueeze(1) * p_star\n return grad\n\n\ndef _omega_sparsemax(p_star):\n return (1 - (p_star ** 2).sum(dim=1)) / 2\n\n\nclass SparsemaxLossFunction(Function):\n\n @classmethod\n def forward(cls, ctx, input, target):\n \"\"\"\n input (FloatTensor): n x num_classes\n target (LongTensor): n, the indices of the target classes\n \"\"\"\n assert_equal(input.shape[0], target.shape[0])\n\n p_star = sparsemax(input, 1)\n cls.p_star = p_star.clone().detach()\n loss = _omega_sparsemax(p_star)\n\n p_star.scatter_add_(1, target.unsqueeze(1),\n torch.full_like(p_star, -1))\n loss += torch.einsum(\"ij,ij->i\", p_star, input)\n\n ctx.save_for_backward(p_star)\n\n return loss\n\n @staticmethod\n def backward(ctx, grad_output):\n return _fy_backward(ctx, grad_output), None\n\n\nsparsemax_loss = SparsemaxLossFunction.apply\n\n\nclass SparsemaxLoss(nn.Module):\n\n def __init__(self, weight=None, ignore_index=-100,\n reduction='elementwise_mean'):\n assert reduction in ['elementwise_mean', 'sum', 'none']\n self.reduction = reduction\n self.weight = weight\n self.ignore_index = ignore_index\n super(SparsemaxLoss, self).__init__()\n\n def forward(self, input, target):\n loss = sparsemax_loss(input, target)\n if self.ignore_index >= 0:\n ignored_positions = target == self.ignore_index\n size = float((target.size(0) - ignored_positions.sum()).item())\n loss.masked_fill_(ignored_positions, 0.0)\n else:\n size = float(target.size(0))\n if self.reduction == 'sum':\n loss = loss.sum()\n elif self.reduction == 'elementwise_mean':\n loss = loss.sum() / size\n return loss, SparsemaxLossFunction.p_star\n" ]
[ [ "torch.randn", "torch.IntTensor" ], [ "torch.full_like", "torch.einsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GingerBear/texar
[ "46e006f9349893a3015cd937bee9914c516e26af", "46e006f9349893a3015cd937bee9914c516e26af", "46e006f9349893a3015cd937bee9914c516e26af", "46e006f9349893a3015cd937bee9914c516e26af" ]
[ "texar/tf/data/data/tfrecord_data_test.py", "texar/tf/modules/classifiers/conv_classifiers.py", "texar/tf/modules/regressors/xlnet_regressor_test.py", "texar/tf/agents/agent_utils.py" ]
[ "# -*- coding: utf-8 -*-\n#\n\"\"\"\nUnit tests for data related operations.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport copy\nimport shutil\nimport tempfile\nimport ssl\nimport tensorflow as tf\nimport texar.tf as tx\n\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nclass TFRecordDataTest(tf.test.TestCase):\n \"\"\"Tests tfrecord data class.\n \"\"\"\n # pylint: disable=too-many-locals\n def setUp(self):\n tf.test.TestCase.setUp(self)\n\n # Create test data\n # pylint: disable=no-member\n self._test_dir = tempfile.mkdtemp()\n\n cat_in_snow = tf.keras.utils.get_file(\n os.path.join(self._test_dir, 'cat_0.jpg'),\n 'https://storage.googleapis.com/download.tensorflow.org/'\n 'example_images/320px-Felis_catus-cat_on_snow.jpg')\n williamsburg_bridge = tf.keras.utils.get_file(\n os.path.join(self._test_dir, 'bridge_0.jpg'),\n 'https://storage.googleapis.com/download.tensorflow.org/'\n 'example_images/194px-New_East_River_Bridge_from_Brooklyn_'\n 'det.4a09796u.jpg')\n\n def _bytes_feature(value=None):\n \"\"\"Returns a bytes_list from a string / byte.\n \"\"\"\n # pylint: disable=undefined-loop-variable\n value = tf.compat.as_bytes(\n value,\n encoding='utf-8'\n )\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value]))\n\n def _int64_feature(value=None):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\n \"\"\"\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=[value]))\n\n _feature_original_types = {\n 'height': ['tf.int64', 'FixedLenFeature'],\n 'width': ['tf.int64', 'FixedLenFeature'],\n 'label': ['tf.int64', 'FixedLenFeature'],\n 'shape': [tf.int64, 'VarLenFeature'],\n 'image_raw': ['tf.string', 'FixedLenFeature'],\n 'variable1': [tf.string, 'FixedLenFeature'],\n 'variable2': ['tf.int64', 'FixedLenFeature'],\n }\n self._feature_convert_types = {\n 'variable1': 'tf.float32',\n 'variable2': 'tf.string',\n }\n _image_options = {}\n self._unconvert_features = ['height', 'width', 'label']\n def _image_example(image_string, image_shape, label):\n \"\"\"Create data example with image\n \"\"\"\n feature = {\n 'height': _int64_feature(image_shape[0]),\n 'width': _int64_feature(image_shape[1]),\n 'shape': tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(image_shape))),\n 'label': _int64_feature(label),\n 'image_raw': _bytes_feature(image_string),\n 'variable1': _bytes_feature('1234567890'),\n 'variable2': _int64_feature(9876543210),\n }\n return tf.train.Example(\n features=tf.train.Features(feature=feature))\n\n self._dataset_valid = {\n 'height': [],\n 'width': [],\n 'shape': [],\n 'label': [],\n 'image_raw': [],\n 'variable1': [],\n 'variable2': [],\n }\n _toy_image_labels_valid = {\n cat_in_snow : 0,\n williamsburg_bridge : 1,\n }\n _toy_image_shapes = {\n cat_in_snow: (213, 320, 3),\n williamsburg_bridge: (239, 194),\n }\n _tfrecord_filepath = os.path.join(\n self._test_dir,\n 'test.tfrecord')\n # Prepare Validation data\n with tf.python_io.TFRecordWriter(_tfrecord_filepath) as writer:\n for image_path, label in _toy_image_labels_valid.items():\n\n with open(image_path, 'rb') as fid:\n image_data = fid.read()\n image_shape = _toy_image_shapes[image_path]\n\n tf_example = _image_example(image_data, image_shape, label)\n writer.write(tf_example.SerializeToString())\n\n #_construct_dataset_valid(\"\", shape, label)\n single_data = {\n 'height': image_shape[0],\n 'width': image_shape[1],\n 'shape': image_shape,\n 'label': label,\n 'image_raw': image_data,\n 'variable1': \"1234567890\",\n 'variable2': int(9876543210),\n }\n for key, value in single_data.items():\n self._dataset_valid[key].append(value)\n\n self._hparams = {\n \"num_epochs\": 1,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"dataset\": {\n \"files\": _tfrecord_filepath,\n \"feature_original_types\": _feature_original_types,\n \"feature_convert_types\": self._feature_convert_types,\n \"image_options\": [_image_options],\n }\n }\n\n def tearDown(self):\n \"\"\"Remove the downloaded files after the test\n \"\"\"\n shutil.rmtree(self._test_dir)\n\n def _run_and_test(self, hparams):\n # Construct database\n tfrecord_data = tx.data.TFRecordData(hparams)\n iterator = tfrecord_data.dataset.make_initializable_iterator()\n data_batch = iterator.get_next()\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n sess.run(iterator.initializer)\n i = 0\n def _prod(lst):\n res = 1\n for i in lst:\n res *= i\n return res\n while True:\n try:\n # Run the logics\n data_batch_ = sess.run(data_batch)\n self.assertEqual(\n set(data_batch_.keys()),\n set(tfrecord_data.list_items()))\n\n # Check data consistency\n for key in self._unconvert_features:\n value = data_batch_[key][0]\n self.assertEqual(value, self._dataset_valid[key][i])\n self.assertEqual(\n list(data_batch_['shape'].values),\n list(self._dataset_valid['shape'][i]))\n\n # Check data type conversion\n for key, item in self._feature_convert_types.items():\n value = data_batch_[key][0]\n if item == 'tf.string' or item is tf.string:\n self.assertTrue(isinstance(value, bytes))\n else:\n dtype_matched = (\n tx.utils.dtypes.get_tf_dtype(str(value.dtype))\n is tx.utils.dtypes.get_tf_dtype(item))\n self.assertTrue(dtype_matched)\n\n # Check image decoding and resize\n if hparams[\"dataset\"].get(\"image_options\"):\n image_options = hparams[\"dataset\"].get(\"image_options\")\n if isinstance(image_options, dict):\n image_options = [image_options]\n for image_option_feature in image_options:\n image_key = image_option_feature.get(\n \"image_feature_name\")\n if image_key is None:\n continue\n image_gen = data_batch_[image_key][0]\n image_valid_shape = self._dataset_valid[\"shape\"][i]\n resize_height = image_option_feature.get(\n \"resize_height\")\n resize_width = image_option_feature.get(\n \"resize_width\")\n if resize_height and resize_width:\n self.assertEqual(\n image_gen.shape[0] * image_gen.shape[1],\n resize_height * resize_width)\n else:\n self.assertEqual(\n _prod(image_gen.shape),\n _prod(image_valid_shape))\n i += 1\n except tf.errors.OutOfRangeError:\n print('Done -- epoch limit reached')\n break\n\n def test_default_setting(self):\n \"\"\"Tests the logics of TFRecordData.\n \"\"\"\n self._run_and_test(self._hparams)\n\n def test_image_resize(self):\n \"\"\"Tests the image resize function\n \"\"\"\n hparams = copy.copy(self._hparams)\n _image_options = {\n 'image_feature_name': 'image_raw',\n 'resize_height': 512,\n 'resize_width': 512,\n }\n hparams[\"dataset\"].update({\"image_options\": _image_options})\n self._run_and_test(hparams)\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nVarious classifier classes.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=not-context-manager, too-many-arguments, too-many-locals\n\nimport tensorflow as tf\n\nfrom texar.tf.utils.exceptions import TexarError\nfrom texar.tf.modules.classifiers.classifier_base import ClassifierBase\nfrom texar.tf.modules.encoders.conv_encoders import Conv1DEncoder\nfrom texar.tf.utils import utils\nfrom texar.tf.hyperparams import HParams\n\n__all__ = [\n \"Conv1DClassifier\"\n]\n\nclass Conv1DClassifier(ClassifierBase):\n \"\"\"Simple Conv-1D classifier.\n This is a combination of the\n :class:`~texar.tf.modules.Conv1DEncoder` with a classification layer.\n\n Args:\n hparams (dict, optional): Hyperparameters. Missing\n hyperparamerter will be set to default values. See\n :meth:`default_hparams` for the hyperparameter sturcture and\n default values.\n\n Example:\n\n .. code-block:: python\n\n clas = Conv1DClassifier(hparams={'num_classes': 10})\n\n inputs = tf.random_uniform([64, 20, 256])\n logits, pred = clas(inputs)\n # logits == Tensor of shape [64, 10]\n # pred == Tensor of shape [64]\n\n .. document private functions\n .. automethod:: _build\n \"\"\"\n\n def __init__(self, hparams=None):\n ClassifierBase.__init__(self, hparams)\n\n with tf.variable_scope(self.variable_scope):\n encoder_hparams = utils.dict_fetch(\n hparams, Conv1DEncoder.default_hparams())\n self._encoder = Conv1DEncoder(hparams=encoder_hparams)\n\n # Add an additional dense layer if needed\n self._num_classes = self._hparams.num_classes\n if self._num_classes > 0:\n if self._hparams.num_dense_layers <= 0:\n self._encoder.append_layer({\"type\": \"Flatten\"})\n\n logit_kwargs = self._hparams.logit_layer_kwargs\n if logit_kwargs is None:\n logit_kwargs = {}\n elif not isinstance(logit_kwargs, HParams):\n raise ValueError(\n \"hparams['logit_layer_kwargs'] must be a dict.\")\n else:\n logit_kwargs = logit_kwargs.todict()\n logit_kwargs.update({\"units\": self._num_classes})\n if 'name' not in logit_kwargs:\n logit_kwargs['name'] = \"logit_layer\"\n\n self._encoder.append_layer(\n {\"type\": \"Dense\", \"kwargs\": logit_kwargs})\n\n @staticmethod\n def default_hparams():\n \"\"\"Returns a dictionary of hyperparameters with default values.\n\n .. code-block:: python\n\n {\n # (1) Same hyperparameters as in Conv1DEncoder\n ...\n\n # (2) Additional hyperparameters\n \"num_classes\": 2,\n \"logit_layer_kwargs\": {\n \"use_bias\": False\n },\n \"name\": \"conv1d_classifier\"\n }\n\n Here:\n\n 1. Same hyperparameters as in :class:`~texar.tf.modules.Conv1DEncoder`.\n See the :meth:`~texar.tf.modules.Conv1DEncoder.default_hparams`.\n An instance of Conv1DEncoder is created for feature extraction.\n\n 2. Additional hyperparameters:\n\n \"num_classes\": int\n Number of classes:\n\n - If **`> 0`**, an additional :tf_main:`Dense <layers/Dense>` \\\n layer is appended to the encoder to compute the logits over \\\n classes.\n - If **`<= 0`**, no dense layer is appended. The number of \\\n classes is assumed to be the final dense layer size of the \\\n encoder.\n\n \"logit_layer_kwargs\": dict\n Keyword arguments for the logit Dense layer constructor,\n except for argument \"units\" which is set to \"num_classes\".\n Ignored if no extra logit layer is appended.\n\n \"name\": str\n Name of the classifier.\n \"\"\"\n hparams = Conv1DEncoder.default_hparams()\n hparams.update({\n \"name\": \"conv1d_classifier\",\n \"num_classes\": 2, #set to <=0 to avoid appending output layer\n \"logit_layer_kwargs\": {\"use_bias\": False}\n })\n return hparams\n\n def _build(self, # pylint: disable=arguments-differ\n inputs,\n sequence_length=None,\n dtype=None,\n mode=None):\n \"\"\"Feeds the inputs through the network and makes classification.\n\n The arguments are the same as in :class:`~texar.tf.modules.Conv1DEncoder`.\n\n The predictions of binary classification (\"num_classes\"=1) and\n multi-way classification (\"num_classes\">1) are different, as explained\n below.\n\n Args:\n inputs: The inputs to the network, which is a 3D tensor. See\n :class:`~texar.tf.modules.Conv1DEncoder` for more details.\n sequence_length (optional): An int tensor of shape `[batch_size]`\n containing the length of each element in :attr:`inputs`.\n If given, time steps beyond the length will first be masked out\n before feeding to the layers.\n dtype (optional): Type of the inputs. If not provided, infers\n from inputs automatically.\n mode (optional): A tensor taking value in\n :tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including\n `TRAIN`, `EVAL`, and `PREDICT`. If `None`,\n :func:`texar.tf.global_mode` is used.\n\n Returns:\n A tuple `(logits, pred)`, where\n\n - **`logits`** is a Tensor of shape `[batch_size, num_classes]`\\\n for `num_classes` >1, and `[batch_size]` for `num_classes` =1 \\\n (i.e., binary classification).\n - **`pred`** is the prediction, a Tensor of shape `[batch_size]` \\\n and type `tf.int64`. For binary classification, the standard \\\n sigmoid function is used for prediction, and the class labels are \\\n `{0, 1}`.\n \"\"\"\n logits = self._encoder(inputs, sequence_length, dtype, mode)\n\n num_classes = self._hparams.num_classes\n is_binary = num_classes == 1\n is_binary = is_binary or (num_classes <= 0 and logits.shape[1] == 1)\n\n if is_binary:\n pred = tf.greater(logits, 0)\n logits = tf.reshape(logits, [-1])\n else:\n pred = tf.argmax(logits, 1)\n pred = tf.cast(tf.reshape(pred, [-1]), tf.int64)\n\n self._built = True\n\n return logits, pred\n\n @property\n def trainable_variables(self):\n \"\"\"The list of trainable variables of the module.\n \"\"\"\n if not self._built:\n raise TexarError(\n \"Attempting to access trainable_variables before module %s \"\n \"was fully built. The module is built once it is called, \"\n \"e.g., with `%s(...)`\" % (self.name, self.name))\n return self._encoder.trainable_variables\n\n @property\n def num_classes(self):\n \"\"\"The number of classes.\n \"\"\"\n return self._num_classes\n\n @property\n def nn(self): # pylint: disable=invalid-name\n \"\"\"The classifier neural network.\n \"\"\"\n return self._encoder\n\n def has_layer(self, layer_name):\n \"\"\"Returns `True` if the network with the name exists. Returns `False`\n otherwise.\n\n Args:\n layer_name (str): Name of the layer.\n \"\"\"\n return self._encoder.has_layer(layer_name)\n\n def layer_by_name(self, layer_name):\n \"\"\"Returns the layer with the name. Returns 'None' if the layer name\n does not exist.\n\n Args:\n layer_name (str): Name of the layer.\n \"\"\"\n return self._encoder.layer_by_name(layer_name)\n\n @property\n def layers_by_name(self):\n \"\"\"A dictionary mapping layer names to the layers.\n \"\"\"\n return self._encoder.layers_by_name\n\n @property\n def layers(self):\n \"\"\"A list of the layers.\n \"\"\"\n return self._encoder.layers\n\n @property\n def layer_names(self):\n \"\"\"A list of uniquified layer names.\n \"\"\"\n return self._encoder.layer_names\n\n def layer_outputs_by_name(self, layer_name):\n \"\"\"Returns the output tensors of the layer with the specified name.\n Returns `None` if the layer name does not exist.\n\n Args:\n layer_name (str): Name of the layer.\n \"\"\"\n return self._encoder.layer_outputs_by_name(layer_name)\n\n @property\n def layer_outputs(self):\n \"\"\"A list containing output tensors of each layer.\n \"\"\"\n return self._encoder.layer_outputs\n", "#\n\"\"\"\nUnit tests for XLNet regressor.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom texar.tf.modules.regressors.xlnet_regressor import XLNetRegressor\nfrom texar.tf.utils.test import pretrained_test\n\n# pylint: disable=too-many-locals, no-member\n\n\nclass XLNetRegressorTest(tf.test.TestCase):\n \"\"\"Tests :class:`~texar.tf.modules.XLNetRegressor` class.\n \"\"\"\n\n @pretrained_test\n def test_model_loading(self):\n r\"\"\"Tests model loading functionality.\"\"\"\n\n inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])\n\n for pretrained_model_name in XLNetRegressor.available_checkpoints():\n regressor = XLNetRegressor(\n pretrained_model_name=pretrained_model_name)\n _ = regressor(inputs)\n\n def test_trainable_variables(self):\n \"\"\"Tests the functionality of automatically collecting trainable\n variables.\n \"\"\"\n inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])\n\n # case 1\n hparams = {\n \"pretrained_model_name\": None,\n }\n regressor = XLNetRegressor(hparams=hparams)\n regressor(inputs)\n n_xlnet_vars = 162\n n_projection_vars = 2\n n_logits_vars = 2\n self.assertEqual(len(regressor.trainable_variables),\n n_xlnet_vars + n_logits_vars + n_projection_vars)\n\n # case 2\n hparams = {\n \"pretrained_model_name\": None,\n \"regr_strategy\": \"all_time\"\n }\n regressor = XLNetRegressor(hparams=hparams)\n regressor(inputs)\n self.assertEqual(len(regressor.trainable_variables),\n n_xlnet_vars + n_logits_vars + n_projection_vars)\n\n # case 3\n hparams = {\n \"pretrained_model_name\": None,\n \"regr_strategy\": \"time_wise\"\n }\n regressor = XLNetRegressor(hparams=hparams)\n regressor(inputs)\n self.assertEqual(len(regressor.trainable_variables),\n n_xlnet_vars + n_logits_vars + n_projection_vars)\n\n def test_encode(self):\n \"\"\"Tests encoding.\n \"\"\"\n max_time = 8\n batch_size = 16\n inputs = tf.random_uniform([batch_size, max_time],\n maxval=30521, dtype=tf.int32)\n\n # case 1\n hparams = {\n \"pretrained_model_name\": None,\n }\n regressor = XLNetRegressor(hparams=hparams)\n logits = regressor(inputs)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n logits_ = sess.run(logits)\n self.assertEqual(logits_.shape, (batch_size,))\n\n # case 2\n hparams = {\n \"pretrained_model_name\": None,\n \"regr_strategy\": \"cls_time\"\n }\n regressor = XLNetRegressor(hparams=hparams)\n logits = regressor(inputs)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n logits_ = sess.run(logits)\n self.assertEqual(logits_.shape, (batch_size,))\n\n # case 3\n hparams = {\n \"pretrained_model_name\": None,\n \"regr_strategy\": \"time_wise\"\n }\n regressor = XLNetRegressor(hparams=hparams)\n logits = regressor(inputs)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n logits_ = sess.run(logits)\n self.assertEqual(logits_.shape,\n (batch_size, max_time))\n\n # case 4\n hparams = {\n \"pretrained_model_name\": None,\n \"regr_strategy\": \"all_time\",\n \"max_seq_len\": max_time\n }\n inputs = tf.placeholder(tf.int32, shape=[batch_size, 6])\n regressor = XLNetRegressor(hparams=hparams)\n logits = regressor(inputs)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n logits_ = sess.run(\n logits,\n feed_dict={inputs: np.random.randint(30521,\n size=(batch_size, 6))})\n self.assertEqual(logits_.shape, (batch_size,))\n\n def test_regression(self):\n \"\"\"Test the type of regression output.\"\"\"\n batch_size = 8\n\n hparams = {\n \"pretrained_model_name\": None,\n \"regr_strategy\": \"cls_time\"\n }\n inputs = tf.placeholder(tf.int32, shape=[batch_size, 6])\n regressor = XLNetRegressor(hparams=hparams)\n logits = regressor(inputs)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n logits_ = sess.run(\n logits,\n feed_dict={inputs: np.random.randint(30521,\n size=(batch_size, 6))})\n self.assertEqual(logits_.dtype, np.float32)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nVarious agent utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=too-many-arguments, too-few-public-methods, no-member\n# pylint: disable=invalid-name, wrong-import-position\n\nimport numpy as np\n\ngym_utils = None\ntry:\n from texar.tf.agents import agent_gym_utils as gym_utils\nexcept ImportError:\n pass\n\n__all__ = [\n \"Space\",\n \"EnvConfig\"\n]\n\nclass Space(object):\n \"\"\"Observation and action spaces. Describes valid actions and observations.\n Similar to :gym:`gym.Space <#spaces>`.\n\n Args:\n shape (optional): Shape of the space, a tuple. If not\n given, infers from :attr:`low` and :attr:`high`.\n low (optional): Lower bound (inclusive) of each dimension of the\n space. Must have\n shape as specified by :attr:`shape`, and of the same shape with\n with :attr:`high` (if given). If `None`, set to `-inf` for each\n dimension.\n high (optional): Upper bound (inclusive) of each dimension of the\n space. Must have\n shape as specified by :attr:`shape`, and of the same shape with\n with :attr:`low` (if given). If `None`, set to `inf` for each\n dimension.\n dtype (optional): Data type of elements in the space. If not given,\n infers from :attr:`low` (if given) or set to `float`.\n\n Example:\n\n .. code-block:: python\n\n s = Space(low=0, high=10, dtype=np.int32)\n #s.contains(2) == True\n #s.contains(10) == True\n #s.contains(11) == False\n #s.shape == ()\n\n s2 = Space(shape=(2,2), high=np.ones([2,2]), dtype=np.float)\n #s2.low == [[-inf, -inf], [-inf, -inf]]\n #s2.high == [[1., 1.], [1., 1.]]\n \"\"\"\n def __init__(self, shape=None, low=None, high=None, dtype=None):\n if low is None:\n low = -float('inf')\n if high is None:\n high = float('inf')\n\n if shape is None:\n low = np.asarray(low)\n high = np.asarray(high)\n if low.shape != high.shape:\n raise ValueError('`low` and `high` must have the same shape.')\n shape = low.shape\n else:\n shape = tuple(shape)\n\n if np.isscalar(low):\n low = low + np.zeros(shape, dtype=dtype)\n if np.isscalar(high):\n high = high + np.zeros(shape, dtype=dtype)\n if shape != low.shape or shape != high.shape:\n raise ValueError(\n 'Shape inconsistent: shape={}, low.shape={}, high.shape={}'\n .format(shape, low.shape, high.shape))\n if dtype is None:\n dtype = low.dtype\n dtype = np.dtype(dtype)\n low = low.astype(dtype)\n high = high.astype(dtype)\n self._shape = shape\n self._low = low\n self._high = high\n self._dtype = dtype\n\n def contains(self, x):\n \"\"\"Checks if x is contained in the space. Returns a `bool`.\n \"\"\"\n x = np.asarray(x)\n dtype_match = True\n if self._dtype.kind in np.typecodes['AllInteger']:\n if x.dtype.kind not in np.typecodes['AllInteger']:\n dtype_match = False\n shape_match = x.shape == self._shape\n low_match = (x >= self._low).all()\n high_match = (x <= self._high).all()\n return dtype_match and shape_match and low_match and high_match\n\n @property\n def shape(self):\n \"\"\"Shape of the space.\n \"\"\"\n return self._shape\n\n @property\n def low(self):\n \"\"\"Lower bound of the space.\n \"\"\"\n return self._low\n\n @property\n def high(self):\n \"\"\"Upper bound of the space.\n \"\"\"\n return self._high\n\n @property\n def dtype(self):\n \"\"\"Data type of the element.\n \"\"\"\n return self._dtype\n\nclass EnvConfig(object):\n \"\"\"Configurations of an environment.\n\n Args:\n action_space: An instance of :class:`~texar.tf.agents.Space` or\n :gym:`gym.Space <#spaces>`, the action space.\n observ_space: An instance of :class:`~texar.tf.agents.Space` or\n :gym:`gym.Space <#spaces>`, the observation space.\n reward_range: A tuple corresponding to the min and max possible\n rewards, e.g., `reward_range=(-1.0, 1.0)`.\n \"\"\"\n\n def __init__(self,\n action_space,\n observ_space,\n reward_range):\n if gym_utils:\n action_space = gym_utils.convert_gym_space(action_space)\n observ_space = gym_utils.convert_gym_space(observ_space)\n\n self.action_space = action_space\n self.action_dtype = action_space.dtype\n self.action_shape = action_space.shape\n\n self.observ_space = observ_space\n self.observ_dtype = observ_space.dtype\n self.observ_shape = observ_space.shape\n\n self.reward_range = reward_range\n" ]
[ [ "tensorflow.local_variables_initializer", "tensorflow.test.main", "tensorflow.python_io.TFRecordWriter", "tensorflow.compat.as_bytes", "tensorflow.global_variables_initializer", "tensorflow.train.BytesList", "tensorflow.train.Features", "tensorflow.tables_initializer", "tensorflow.test.TestCase.setUp", "tensorflow.train.Int64List" ], [ "tensorflow.variable_scope", "tensorflow.argmax", "tensorflow.reshape", "tensorflow.greater" ], [ "tensorflow.placeholder", "tensorflow.test.main", "tensorflow.global_variables_initializer", "tensorflow.random_uniform", "numpy.random.randint" ], [ "numpy.asarray", "numpy.dtype", "numpy.zeros", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jeasonlee313/paperdev_Phy_SORT-
[ "24c9ee5d3fc18ed6d3d85e4f95195d39bdf527e2" ]
[ "deep_sort/sort/tracker.py" ]
[ "# vim: expandtab:ts=4:sw=4\nfrom __future__ import absolute_import\nimport numpy as np\nfrom . import kalman_filter\nfrom . import linear_assignment\nfrom . import iou_matching\nfrom .track import Track\n\n\nclass Tracker:\n \"\"\"\n This is the multi-target tracker.\n\n Parameters\n ----------\n metric : nn_matching.NearestNeighborDistanceMetric\n A distance metric for measurement-to-track association.\n max_age : int\n Maximum number of missed misses before a track is deleted.\n n_init : int\n Number of consecutive detections before the track is confirmed. The\n track state is set to `Deleted` if a miss occurs within the first\n `n_init` frames.\n\n Attributes\n ----------\n metric : nn_matching.NearestNeighborDistanceMetric\n The distance metric used for measurement to track association.\n max_age : int\n Maximum number of missed misses before a track is deleted.\n n_init : int\n Number of frames that a track remains in initialization phase.\n kf : kalman_filter.KalmanFilter\n A Kalman filter to filter target trajectories in image space.\n tracks : List[Track]\n The list of active tracks at the current time step.\n\n \"\"\"\n\n def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3, h = np.identity(3, float)):\n self.metric = metric\n self.max_iou_distance = max_iou_distance\n self.max_age = max_age\n self.n_init = n_init\n\n self.kf = kalman_filter.KalmanFilter()\n self.tracks = []\n self._next_id = 1\n\n self.H = h\n\n def predict(self):\n \"\"\"Propagate track state distributions one time step forward.\n\n This function should be called once every time step, before `update`.\n \"\"\"\n for track in self.tracks:\n track.predict(self.kf)\n\n def update(self, detections, h=np.identity(3)):\n \"\"\"Perform measurement update and track management.\n\n Parameters\n ----------\n detections : List[deep_sort.detection.Detection]\n A list of detections at the current time step.\n\n \"\"\"\n # Run matching cascade.\n matches, unmatched_tracks, unmatched_detections = \\\n self._match(detections)\n\n # Update track set.\n for track_idx, detection_idx in matches:\n self.tracks[track_idx].update(\n self.kf, detections[detection_idx])\n for track_idx in unmatched_tracks:\n self.tracks[track_idx].mark_missed()\n for detection_idx in unmatched_detections:\n self._initiate_track(detections[detection_idx])\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\n\n # Update distance metric.\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\n features, targets = [], []\n for track in self.tracks:\n if not track.is_confirmed():\n continue\n features += track.features\n targets += [track.track_id for _ in track.features]\n track.features = []\n self.metric.partial_fit(\n np.asarray(features), np.asarray(targets), active_targets)\n\n def _match(self, detections):\n\n def gated_metric(tracks, dets, track_indices, detection_indices):\n features = np.array([dets[i].feature for i in detection_indices])\n targets = np.array([tracks[i].track_id for i in track_indices])\n cost_matrix = self.metric.distance(features, targets)\n print(\"cost_matrix1:\\n\", cost_matrix)\n cost_matrix = linear_assignment.gate_cost_matrix(\n self.kf, cost_matrix, tracks, dets, track_indices,\n detection_indices, only_position=True)\n print(\"cost_matrix2:\\n\", cost_matrix)\n return cost_matrix\n\n # Split track set into confirmed and unconfirmed tracks.\n confirmed_tracks = [\n i for i, t in enumerate(self.tracks) if t.is_confirmed()]\n unconfirmed_tracks = [\n i for i, t in enumerate(self.tracks) if not t.is_confirmed()]\n\n # Associate confirmed tracks using appearance features.\n matches_a, unmatched_tracks_a, unmatched_detections = \\\n linear_assignment.matching_cascade(\n gated_metric, self.metric.matching_threshold, self.max_age,\n self.tracks, detections, confirmed_tracks)\n\n # Associate remaining tracks together with unconfirmed tracks using IOU.\n iou_track_candidates = unconfirmed_tracks + [\n k for k in unmatched_tracks_a if\n self.tracks[k].time_since_update == 1]\n unmatched_tracks_a = [\n k for k in unmatched_tracks_a if\n self.tracks[k].time_since_update != 1]\n matches_b, unmatched_tracks_b, unmatched_detections = \\\n linear_assignment.min_cost_matching(\n iou_matching.iou_cost, self.max_iou_distance, self.tracks,\n detections, iou_track_candidates, unmatched_detections)\n\n matches = matches_a + matches_b\n unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))\n return matches, unmatched_tracks, unmatched_detections\n\n def _initiate_track(self, detection):\n mean, covariance = self.kf.initiate(detection.to_toppoint())\n self.tracks.append(Track(\n mean, covariance, self._next_id, self.n_init, self.max_age,\n detection.feature, h=self.H))\n self._next_id += 1\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
moojink/drq
[ "e05c337aeb6fcae30c2db6e4afaca65e94511bbd", "e05c337aeb6fcae30c2db6e4afaca65e94511bbd" ]
[ "meta_logger.py", "meta_drq.py" ]
[ "import csv\nimport json\nimport os\nimport shutil\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport torch\nimport torchvision\nfrom termcolor import colored\nfrom torch.utils.tensorboard import SummaryWriter\n\nCOMMON_TRAIN_FORMAT = [('episode', 'E', 'int'), ('step', 'S', 'int'),\n ('episode_reward', 'R', 'float'),\n ('duration', 'D', 'time')]\n\nCOMMON_EVAL_FORMAT = [('episode', 'E', 'int'), ('step', 'S', 'int'),\n ('episode_reward', 'R', 'float')]\n\nAGENT_TRAIN_FORMAT = {\n 'drq': [('batch_reward', 'BR', 'float'), ('actor_loss', 'ALOSS', 'float'),\n ('critic_loss', 'CLOSS', 'float'),\n ('alpha_loss', 'TLOSS', 'float'), ('alpha_value', 'TVAL', 'float'),\n ('actor_entropy', 'AENT', 'float')]\n}\n\n\nclass AverageMeter(object):\n def __init__(self):\n self._sum = 0\n self._count = 0\n\n def update(self, value, n=1):\n self._sum += value\n self._count += n\n\n def value(self):\n return self._sum / max(1, self._count)\n\n\nclass MetersGroup(object):\n def __init__(self, file_name, formating):\n self._csv_file_name = self._prepare_file(file_name, 'csv')\n self._formating = formating\n self._meters = defaultdict(AverageMeter)\n self._csv_file = open(self._csv_file_name, 'w')\n self._csv_writer = None\n\n def _prepare_file(self, prefix, suffix):\n file_name = f'{prefix}.{suffix}'\n if os.path.exists(file_name):\n os.remove(file_name)\n return file_name\n\n def log(self, key, value, n=1):\n self._meters[key].update(value, n)\n\n def _prime_meters(self):\n data = dict()\n for key, meter in self._meters.items():\n if key.startswith('train'):\n key = key[len('train') + 1:]\n else:\n key = key[len('eval') + 1:]\n key = key.replace('/', '_')\n data[key] = meter.value()\n return data\n\n def _dump_to_csv(self, data):\n if self._csv_writer is None:\n self._csv_writer = csv.DictWriter(self._csv_file,\n fieldnames=sorted(data.keys()),\n restval=0.0)\n self._csv_writer.writeheader()\n self._csv_writer.writerow(data)\n self._csv_file.flush()\n\n def _format(self, key, value, ty):\n if ty == 'int':\n value = int(value)\n return f'{key}: {value}'\n elif ty == 'float':\n return f'{key}: {value:.04f}'\n elif ty == 'time':\n return f'{key}: {value:04.1f} s'\n else:\n raise f'invalid format type: {ty}'\n\n def _dump_to_console(self, data, prefix):\n prefix = colored(prefix, 'yellow' if prefix == 'train' else 'green')\n pieces = [f'| {prefix: <14}']\n for key, disp_key, ty in self._formating:\n value = data.get(key, 0)\n pieces.append(self._format(disp_key, value, ty))\n print(' | '.join(pieces))\n\n def dump(self, step, prefix, save=True):\n if len(self._meters) == 0:\n return\n if save:\n data = self._prime_meters()\n data['step'] = step\n self._dump_to_csv(data)\n self._dump_to_console(data, prefix)\n self._meters.clear()\n\n\nclass Logger(object):\n def __init__(self,\n log_dir,\n save_tb=False,\n log_frequency=10000,\n action_repeat=1,\n agent='drq'):\n self._log_dir = log_dir\n self._log_frequency = log_frequency\n self._action_repeat = action_repeat\n if save_tb:\n tb_dir = os.path.join(log_dir, 'tb')\n if os.path.exists(tb_dir):\n try:\n shutil.rmtree(tb_dir)\n except:\n print(\"logger.py warning: Unable to remove tb directory\")\n pass\n self._sw = SummaryWriter(tb_dir)\n else:\n self._sw = None\n # each agent has specific output format for training\n assert agent in AGENT_TRAIN_FORMAT\n train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]\n self._train_mg = MetersGroup(os.path.join(log_dir, 'train'),\n formating=train_format)\n self._eval_mg = MetersGroup(os.path.join(log_dir, 'eval'),\n formating=COMMON_EVAL_FORMAT)\n\n def _should_log(self, step, log_frequency):\n log_frequency = log_frequency or self._log_frequency\n return step % log_frequency == 0\n\n def _update_step(self, step):\n return step * self._action_repeat\n\n def _try_sw_log(self, key, value, step):\n step = self._update_step(step)\n if self._sw is not None:\n self._sw.add_scalar(key, value, step)\n\n def _try_sw_log_image(self, key, image, step):\n step = self._update_step(step)\n if self._sw is not None:\n assert image.dim() == 3\n grid = torchvision.utils.make_grid(image.unsqueeze(1))\n self._sw.add_image(key, grid, step)\n\n def _try_sw_log_video(self, key, frames, step):\n step = self._update_step(step)\n if self._sw is not None:\n frames = torch.from_numpy(np.array(frames))\n frames = frames.unsqueeze(0)\n self._sw.add_video(key, frames, step, fps=30)\n\n def _try_sw_log_histogram(self, key, histogram, step):\n step = self._update_step(step)\n if self._sw is not None:\n self._sw.add_histogram(key, histogram, step)\n\n def log(self, key, value, step, n=1, log_frequency=1):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n \n def eval_log(self, key, value, step, n=1, log_frequency=1):\n \"\"\"Same as self.log(), except we don't call self._should_log().\n In other words, we always log.\"\"\"\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n\n\n def log_param(self, key, param, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n self.log_histogram(key + '_w', param.weight.data, step)\n if hasattr(param.weight, 'grad') and param.weight.grad is not None:\n self.log_histogram(key + '_w_g', param.weight.grad.data, step)\n if hasattr(param, 'bias') and hasattr(param.bias, 'data'):\n self.log_histogram(key + '_b', param.bias.data, step)\n if hasattr(param.bias, 'grad') and param.bias.grad is not None:\n self.log_histogram(key + '_b_g', param.bias.grad.data, step)\n\n def log_image(self, key, image, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_image(key, image, step)\n\n def log_video(self, key, frames, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_video(key, frames, step)\n\n def log_histogram(self, key, histogram, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_histogram(key, histogram, step)\n\n def dump(self, step, save=True, ty=None):\n step = self._update_step(step)\n if ty is None:\n self._train_mg.dump(step, 'train', save)\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'eval':\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'train':\n self._train_mg.dump(step, 'train', save)\n else:\n raise f'invalid log type: {ty}'\n", "import numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport copy\nimport math\n\nimport meta_utils as utils\nimport hydra\n\n\nclass Encoder(nn.Module):\n \"\"\"Convolutional encoder for image-based observations.\"\"\"\n def __init__(self, view, obs_shape, feature_dim):\n super().__init__()\n\n assert len(obs_shape) == 3\n self.num_layers = 4\n self.num_filters = 32\n self.output_logits = False\n self.feature_dim = feature_dim\n output_dim = feature_dim # TODO: change this if sharing weights between encoders\n\n self.convs = nn.ModuleList([\n nn.Conv2d(obs_shape[0], self.num_filters, 3, stride=2),\n nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),\n nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),\n nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1)\n ])\n\n if obs_shape[1] == 84: # DeepMind control suite images are 84x84\n conv_out_size = 35\n elif obs_shape[1] == 128:\n conv_out_size = 57\n else:\n raise ValueError(\"Unsupported image size.\")\n\n self.head = nn.Sequential(\n nn.Linear(self.num_filters * conv_out_size * conv_out_size, output_dim),\n nn.LayerNorm(output_dim))\n\n self.outputs = dict()\n\n def forward_conv(self, obs):\n obs = obs / 255.\n self.outputs['obs'] = obs\n\n conv = torch.relu(self.convs[0](obs))\n self.outputs['conv1'] = conv\n\n for i in range(1, self.num_layers):\n conv = torch.relu(self.convs[i](conv))\n self.outputs['conv%s' % (i + 1)] = conv\n\n h = conv.view(conv.size(0), -1)\n return h\n\n def forward(self, obs, detach=False):\n h = self.forward_conv(obs)\n\n if detach:\n h = h.detach()\n\n out = self.head(h)\n if not self.output_logits:\n out = torch.tanh(out)\n\n self.outputs['out'] = out\n\n return out\n\n def copy_conv_weights_from(self, source):\n \"\"\"Tie convolutional layers\"\"\"\n for i in range(self.num_layers):\n utils.tie_weights(src=source.convs[i], trg=self.convs[i])\n\n def log(self, logger, step, encoder_num): # TODO: change this if sharing weights\n for k, v in self.outputs.items():\n logger.log_histogram(f'train_encoder{encoder_num}/{k}_hist', v, step)\n if len(v.shape) > 2:\n logger.log_image(f'train_encoder{encoder_num}/{k}_img', v[0], step)\n\n for i in range(self.num_layers):\n logger.log_param(f'train_encoder{encoder_num}/conv{i + 1}', self.convs[i], step)\n\n\nclass Actor(nn.Module):\n \"\"\"torch.distributions implementation of an diagonal Gaussian policy.\"\"\"\n def __init__(self, view, encoder_cfg, action_shape, hidden_dim, hidden_depth,\n log_std_bounds, proprio_obs_shape):\n super().__init__()\n\n self.view = view\n if str(self.view) == 'both':\n self.encoder1 = hydra.utils.instantiate(encoder_cfg) # TODO: change this if sharing weights\n self.encoder3 = hydra.utils.instantiate(encoder_cfg) # TODO: change this if sharing weights\n self.trunk = utils.mlp(self.encoder1.feature_dim + self.encoder3.feature_dim + proprio_obs_shape, hidden_dim, # TODO: change this if sharing weights\n 2 * action_shape[0], hidden_depth)\n else:\n self.encoder = hydra.utils.instantiate(encoder_cfg)\n self.trunk = utils.mlp(self.encoder.feature_dim + proprio_obs_shape, hidden_dim,\n 2 * action_shape[0], hidden_depth)\n\n self.log_std_bounds = log_std_bounds\n\n self.outputs = dict()\n self.apply(utils.weight_init)\n\n def forward(self, obs, detach_encoder=False):\n if str(self.view) == 'both':\n img_obs1, img_obs3, proprio_obs = obs\n encoder_out1 = self.encoder1(img_obs1, detach=detach_encoder) # TODO: change this if sharing weights\n encoder_out3 = self.encoder3(img_obs3, detach=detach_encoder) # TODO: change this if sharing weights\n obs_out = torch.cat((encoder_out1, encoder_out3, proprio_obs), dim=-1)\n else:\n img_obs, proprio_obs = obs\n encoder_out = self.encoder(img_obs, detach=detach_encoder)\n obs_out = torch.cat((encoder_out, proprio_obs), dim=-1)\n\n mu, log_std = self.trunk(obs_out).chunk(2, dim=-1)\n\n # constrain log_std inside [log_std_min, log_std_max]\n log_std = torch.tanh(log_std)\n log_std_min, log_std_max = self.log_std_bounds\n log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std +\n 1)\n std = log_std.exp()\n\n self.outputs['mu'] = mu\n self.outputs['std'] = std\n\n dist = utils.SquashedNormal(mu, std)\n return dist\n\n def log(self, logger, step):\n for k, v in self.outputs.items():\n logger.log_histogram(f'train_actor/{k}_hist', v, step)\n\n for i, m in enumerate(self.trunk):\n if type(m) == nn.Linear:\n logger.log_param(f'train_actor/fc{i}', m, step)\n\n\nclass Critic(nn.Module):\n \"\"\"Critic network, employes double Q-learning.\"\"\"\n def __init__(self, view, encoder_cfg, action_shape, hidden_dim, hidden_depth, proprio_obs_shape):\n super().__init__()\n\n self.view = view\n if str(self.view) == 'both':\n self.encoder1 = hydra.utils.instantiate(encoder_cfg) # TODO: change this if sharing weights\n self.encoder3 = hydra.utils.instantiate(encoder_cfg) # TODO: change this if sharing weights\n self.Q1 = utils.mlp(self.encoder1.feature_dim + self.encoder3.feature_dim + proprio_obs_shape + action_shape[0], # TODO: change this if sharing weights\n hidden_dim, 1, hidden_depth)\n self.Q2 = utils.mlp(self.encoder1.feature_dim + self.encoder3.feature_dim + proprio_obs_shape + action_shape[0], # TODO: change this if sharing weights\n hidden_dim, 1, hidden_depth)\n else:\n self.encoder = hydra.utils.instantiate(encoder_cfg)\n self.Q1 = utils.mlp(self.encoder.feature_dim + proprio_obs_shape + action_shape[0], # TODO: change this if sharing weights\n hidden_dim, 1, hidden_depth)\n self.Q2 = utils.mlp(self.encoder.feature_dim + proprio_obs_shape + action_shape[0], # TODO: change this if sharing weights\n hidden_dim, 1, hidden_depth)\n\n\n self.outputs = dict()\n self.apply(utils.weight_init)\n\n def forward(self, obs, action, detach_encoder=False):\n\n if str(self.view) == 'both':\n img_obs1, img_obs3, proprio_obs = obs\n assert img_obs1.size(0) == action.size(0)\n assert img_obs3.size(0) == action.size(0)\n encoder_out1 = self.encoder1(img_obs1, detach=detach_encoder) # TODO: change this if sharing weights\n encoder_out3 = self.encoder3(img_obs3, detach=detach_encoder) # TODO: change this if sharing weights\n obs_out = torch.cat((encoder_out1, encoder_out3, proprio_obs), dim=-1)\n else:\n img_obs, proprio_obs = obs\n assert img_obs.size(0) == action.size(0)\n encoder_out = self.encoder(img_obs, detach=detach_encoder)\n obs_out = torch.cat((encoder_out, proprio_obs), dim=-1)\n\n obs_action = torch.cat([obs_out, action], dim=-1)\n q1 = self.Q1(obs_action)\n q2 = self.Q2(obs_action)\n\n self.outputs['q1'] = q1\n self.outputs['q2'] = q2\n\n return q1, q2\n\n def log(self, logger, step):\n if str(self.view) == 'both':\n self.encoder1.log(logger, step, 1) # TODO: change this if sharing weights\n self.encoder3.log(logger, step, 3) # TODO: change this if sharing weights\n else:\n self.encoder.log(logger, step, 1) # TODO: change this if sharing weights\n\n for k, v in self.outputs.items():\n logger.log_histogram(f'train_critic/{k}_hist', v, step)\n\n assert len(self.Q1) == len(self.Q2)\n for i, (m1, m2) in enumerate(zip(self.Q1, self.Q2)):\n assert type(m1) == type(m2)\n if type(m1) is nn.Linear:\n logger.log_param(f'train_critic/q1_fc{i}', m1, step)\n logger.log_param(f'train_critic/q2_fc{i}', m2, step)\n\n\nclass DRQAgent(object):\n \"\"\"Data regularized Q: actor-critic method for learning from pixels.\"\"\"\n def __init__(self, view, obs_shape, proprio_obs_shape, action_shape, action_range, device,\n encoder_cfg, critic_cfg, actor_cfg, discount,\n init_temperature, lr, actor_update_frequency, critic_tau,\n critic_target_update_frequency, batch_size):\n self.view = view\n self.action_range = action_range\n self.device = device\n self.discount = discount\n self.critic_tau = critic_tau\n self.actor_update_frequency = actor_update_frequency\n self.critic_target_update_frequency = critic_target_update_frequency\n self.batch_size = batch_size\n\n self.actor = hydra.utils.instantiate(actor_cfg).to(self.device)\n\n self.critic = hydra.utils.instantiate(critic_cfg).to(self.device)\n self.critic_target = hydra.utils.instantiate(critic_cfg).to(\n self.device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n\n # tie conv layers between actor and critic\n if str(self.view) == 'both':\n self.actor.encoder1.copy_conv_weights_from(self.critic.encoder1) # TODO: change this if sharing weights\n self.actor.encoder3.copy_conv_weights_from(self.critic.encoder3) # TODO: change this if sharing weights\n else:\n self.actor.encoder.copy_conv_weights_from(self.critic.encoder)\n\n self.log_alpha = torch.tensor(np.log(init_temperature)).to(device)\n self.log_alpha.requires_grad = True\n # set target entropy to -|A|\n self.target_entropy = -action_shape[0]\n\n # optimizers\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=lr)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n lr=lr)\n self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=lr)\n\n self.train()\n self.critic_target.train()\n\n def train(self, training=True):\n self.training = training\n self.actor.train(training)\n self.critic.train(training)\n\n @property\n def alpha(self):\n return self.log_alpha.exp()\n\n def act(self, obs, sample=False):\n if str(self.view) == 'both':\n img_obs1, img_obs3, proprio_obs = obs\n img_obs1 = torch.FloatTensor(img_obs1).to(self.device)\n img_obs1 = img_obs1.unsqueeze(0)\n img_obs3 = torch.FloatTensor(img_obs3).to(self.device)\n img_obs3 = img_obs3.unsqueeze(0)\n else:\n img_obs, proprio_obs = obs\n img_obs = torch.FloatTensor(img_obs).to(self.device)\n img_obs = img_obs.unsqueeze(0)\n proprio_obs = torch.FloatTensor(proprio_obs).to(self.device)\n proprio_obs = proprio_obs.unsqueeze(0)\n if str(self.view) == 'both':\n obs = img_obs1, img_obs3, proprio_obs\n else:\n obs = img_obs, proprio_obs\n dist = self.actor(obs)\n action = dist.sample() if sample else dist.mean\n action = action.clamp(*self.action_range)\n assert action.ndim == 2 and action.shape[0] == 1\n return utils.to_np(action[0])\n\n def update_critic(self, obs, obs_aug, action, reward, next_obs,\n next_obs_aug, not_done, logger, step):\n with torch.no_grad():\n dist = self.actor(next_obs)\n next_action = dist.rsample()\n log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)\n target_Q1, target_Q2 = self.critic_target(next_obs, next_action)\n target_V = torch.min(target_Q1,\n target_Q2) - self.alpha.detach() * log_prob\n target_Q = reward + (not_done * self.discount * target_V)\n\n dist_aug = self.actor(next_obs_aug)\n next_action_aug = dist_aug.rsample()\n log_prob_aug = dist_aug.log_prob(next_action_aug).sum(-1,\n keepdim=True)\n target_Q1, target_Q2 = self.critic_target(next_obs_aug,\n next_action_aug)\n target_V = torch.min(\n target_Q1, target_Q2) - self.alpha.detach() * log_prob_aug\n target_Q_aug = reward + (not_done * self.discount * target_V)\n\n target_Q = (target_Q + target_Q_aug) / 2\n\n # get current Q estimates\n current_Q1, current_Q2 = self.critic(obs, action)\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(\n current_Q2, target_Q)\n\n Q1_aug, Q2_aug = self.critic(obs_aug, action)\n\n critic_loss += F.mse_loss(Q1_aug, target_Q) + F.mse_loss(\n Q2_aug, target_Q)\n\n logger.log('train_critic/loss', critic_loss, step)\n\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n self.critic.log(logger, step)\n\n def update_actor_and_alpha(self, obs, logger, step):\n # detach conv filters, so we don't update them with the actor loss\n dist = self.actor(obs, detach_encoder=True)\n action = dist.rsample()\n log_prob = dist.log_prob(action).sum(-1, keepdim=True)\n # detach conv filters, so we don't update them with the actor loss\n actor_Q1, actor_Q2 = self.critic(obs, action, detach_encoder=True)\n\n actor_Q = torch.min(actor_Q1, actor_Q2)\n\n actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()\n\n logger.log('train_actor/loss', actor_loss, step)\n logger.log('train_actor/target_entropy', self.target_entropy, step)\n logger.log('train_actor/entropy', -log_prob.mean(), step)\n\n # optimize the actor\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.actor.log(logger, step)\n\n self.log_alpha_optimizer.zero_grad()\n alpha_loss = (self.alpha *\n (-log_prob - self.target_entropy).detach()).mean()\n logger.log('train_alpha/loss', alpha_loss, step)\n logger.log('train_alpha/value', self.alpha, step)\n alpha_loss.backward()\n self.log_alpha_optimizer.step()\n\n def update(self, replay_buffer, logger, step):\n obs, action, reward, next_obs, not_done, obs_aug, next_obs_aug = replay_buffer.sample(\n self.batch_size)\n\n logger.log('train/batch_reward', reward.mean(), step)\n\n self.update_critic(obs, obs_aug, action, reward, next_obs,\n next_obs_aug, not_done, logger, step)\n\n if step % self.actor_update_frequency == 0:\n self.update_actor_and_alpha(obs, logger, step)\n\n if step % self.critic_target_update_frequency == 0:\n utils.soft_update_params(self.critic, self.critic_target,\n self.critic_tau)\n\n def save_checkpoint(self, log_dir, step):\n torch.save(\n {\n 'step': step,\n 'actor_state_dict': self.actor.state_dict(),\n 'critic_state_dict': self.critic.state_dict(),\n 'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),\n 'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),\n 'log_alpha_optimizer_state_dict': self.log_alpha_optimizer.state_dict(),\n },\n os.path.join(log_dir, str(step) + '.ckpt')\n )\n\n def load_checkpoint(self, checkpoint_dir, checkpoint_step):\n checkpoint_path = checkpoint_dir + '/' + str(checkpoint_step) + '.ckpt'\n checkpoint = torch.load(checkpoint_path)\n self.actor.load_state_dict(checkpoint['actor_state_dict'])\n self.critic.load_state_dict(checkpoint['critic_state_dict'])\n self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])\n self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])\n self.log_alpha_optimizer.load_state_dict(checkpoint['log_alpha_optimizer_state_dict'])" ]
[ [ "numpy.array", "torch.utils.tensorboard.SummaryWriter" ], [ "torch.optim.Adam", "numpy.log", "torch.load", "torch.cat", "torch.min", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.tanh", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
G-Thor/merlin
[ "33fa6e65ddb903ed5633ccb66c74d3e7c128667f", "33fa6e65ddb903ed5633ccb66c74d3e7c128667f" ]
[ "src/logplot/logging_plotting.py", "src/utils/providers.py" ]
[ "################################################################################\n# The Neural Network (NN) based Speech Synthesis System\n# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/\n#\n# Centre for Speech Technology Research\n# University of Edinburgh, UK\n# Copyright (c) 2014-2015\n# All Rights Reserved.\n#\n# The system as a whole and most of the files in it are distributed\n# under the following copyright and conditions\n#\n# Permission is hereby granted, free of charge, to use and distribute\n# this software and its documentation without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of this work, and to\n# permit persons to whom this work is furnished to do so, subject to\n# the following conditions:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# - The authors' names may not be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK\n# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING\n# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT\n# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE\n# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN\n# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,\n# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF\n# THIS SOFTWARE.\n################################################################################\n\n# NOTES\n# still to consider: pygal, for HTML5 SVG plotting\n\nimport math\nimport string\nimport os\n\n# this module provides the base classes that we specialise here\nimport logging # as logging\n\n# for plotting\nimport matplotlib\n\n# should make this user-configurable - TO DO later\n# this line has to come before the import of matplotlib.pyplot\nmatplotlib.use('PDF')\n\nimport matplotlib.pyplot as plt\nimport pylab\n\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\n# matplotlib needs to be passed numpy arrays\nimport numpy\n\n# for sorting tuples\nfrom operator import itemgetter, attrgetter\n\n\n# TO DO - this needs to be attached to the logging module so that it's available via config options\n# class PlotHandler(logging.FileHandler):\n# \"\"\"A handler for saving plots to disk\"\"\"\n# def __init__(self,filename):\n# logging.FileHandler.__init__(self,filename, mode='a', encoding=None, delay=False)\n\n\n\nclass PlotWithData(object):\n # a generic plot object that contains both the underlying data and the plot itself\n # this class needs to be subclassed for each specialised type of plot that we want\n\n # the underlying data for the plot - a dictionary of data series\n # each series is a list of data points of arbitrary type (e.g., tuples, arrays, ..)\n data=None\n # the plot generated from these data\n plot=None\n\n def __init__(self,name):\n # clear the data series\n self.data={}\n\n def add_data_point(self,series_name,data_point):\n # if there is no data series with this name yet, create an empty one\n if series_name not in self.data:\n self.data[series_name]=[]\n # append this data point (e.g., it might be a tuple (x,y) )\n # don't worry about data type or sorting - that is not our concern here\n self.data[series_name].append(data_point)\n\n def sort_and_validate(self):\n # only applied if the data points are tuples, such as (x,y) values\n\n # TO DO: first check that each series is a list of tuples, and that they have the same number of elements\n\n # this method checks that all data series\n # 1. have the same length\n # 2. are sorted in ascending order of x\n # 3. have identical values in their x series\n\n\n # there has to be at least one data series\n try:\n assert len(self.data) > 0\n except AssertionError:\n logger.critical('No data series found in plot')\n raise\n\n # check lengths are consistent, sort, then check x values are identical\n l=-1\n reference_x=None\n # print \"starting with self.data=\",self.data\n for series_name,data_points in self.data.items():\n if l > 0:\n assert l == len(data_points)\n else:\n l = len(data_points)\n # sort by ascending x value\n data_points.sort(key=itemgetter(0))\n\n if reference_x:\n assert reference_x == [seq[0] for seq in data_points]\n else:\n # extract a list of just the x values\n reference_x = [seq[0] for seq in data_points]\n\n\n # print \"ending with self.data=\",self.data\n\n def generate_plot(self,**kwargs):\n logger = logging.getLogger(\"plotting\")\n logger.error('Cannot generate a plot from abstract class: PlotWithData' )\n # raise an exception here?\n\nclass MultipleSeriesPlot(PlotWithData):\n\n def generate_plot(self,filename,title='',xlabel='',ylabel='',xlim=None,ylim=None):\n\n logger = logging.getLogger(\"plotting\")\n logger.debug('MultipleSeriesPlot.generate_plot')\n\n # a plot with one or more time series sharing a common x axis:\n # e.g., the training error and the validation error plotted against epochs\n\n # sort the data series and make sure they are consistent\n self.sort_and_validate()\n\n # if there is a plot already in existence, we will clear it and re-use it;\n # this avoids creating extraneous figures which will stay in memory\n # (even if we are no longer referencing them)\n if self.plot:\n self.plot.clf()\n else:\n # create a plot\n self.plot = plt.figure()\n\n splt = self.plot.add_subplot(1, 1, 1)\n splt.set_title(title)\n splt.set_xlabel(xlabel)\n splt.set_ylabel(ylabel)\n\n if xlim:\n pylab.xlim(xlim)\n if ylim:\n pylab.ylim(ylim)\n\n for series_name,data_points in self.data.items():\n xpoints=numpy.asarray([seq[0] for seq in data_points])\n ypoints=numpy.asarray([seq[1] for seq in data_points])\n line, = splt.plot(xpoints, ypoints, '-', linewidth=2)\n logger.debug('set_label for %s' % series_name)\n line.set_label(series_name)\n\n splt.legend()\n\n # TO DO - better filename configuration for plots\n self.plot.savefig(filename)\n\nclass SingleWeightMatrixPlot(PlotWithData):\n\n def generate_plot(self, filename, title='', xlabel='', ylabel=''):\n\n data_keys = list(self.data.keys())\n key_num = len(data_keys)\n\n self.plot = plt.figure()\n if key_num == 1:\n splt = self.plot.add_subplot(1, 1, 1)\n im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')\n splt.set_xlabel(xlabel)\n splt.set_ylabel(ylabel)\n splt.set_title(title)\n else: ## still plotting multiple image in one figure still has problem. the visualization is not good\n logger.error('no supported yet')\n\n self.plot.colorbar(im_data)\n self.plot.savefig(filename) #, bbox_inches='tight'\n\n#class MultipleLinesPlot(PlotWithData):\n# def generate_plot(self, filename, title='', xlabel='', ylabel=''):\n\nclass LoggerPlotter(logging.getLoggerClass()):\n \"\"\"Based on the built-in logging class, with added capabilities including plotting\"\"\"\n\n # a dictionary to store all generated plots\n # keys are plot names\n # values are\n plots ={}\n # where the plots will be saved - a directory\n plot_path='/tmp' # default location\n\n def __init__(self,name):\n # initialise the logging parent class\n # (should really use 'super' here I think, but that fails - perhaps because the built in logger class is not derived from 'object' ?)\n logging.Logger.__init__(self,name)\n\n def set_plot_path(self,path):\n self.plot_path = path\n\n def remove_all_plots(self):\n self.plots={}\n\n def create_plot(self,plot_name,plot_object):\n self.plots[plot_name] = plot_object(plot_name)\n\n def add_plot_point(self,plot_name,series_name,data_point):\n # add a data point to a named plot\n if plot_name not in self.plots:\n self.plots[plot_name] = PlotWithData(plot_name)\n self.plots[plot_name].add_data_point(series_name,data_point)\n\n def save_plot(self,plot_name,**kwargs):\n logger = logging.getLogger(\"plotting\")\n if plot_name not in self.plots:\n logger.warn('Tried to generate a plot called %s that does not exist' % plot_name)\n # raise an exception here?\n else:\n # # the filename to save to is known by the handler, which needs to be assigned to this logger\n # # look at the handlers attached to this logger instance\n # ph=None\n # for h in self.handlers:\n # # we want an instance of a PlotHandler - we'll take the first one we find\n # # (behaviour will be unpredictable if there is more than one handler of this type)\n # if isinstance(h,PlotHandler):\n # ph=h\n # break\n # if ph:\n # TO DO - need to be sure of safe file names\n if not os.path.isdir(self.plot_path):\n os.makedirs(self.plot_path)\n filename = self.plot_path + \"/\" + string.replace(plot_name, \" \", \"_\") + \".pdf\"\n logger.info('Generating a plot in file %s' % filename)\n self.plots[plot_name].generate_plot(filename,**kwargs)\n # else:\n # logger.warn('No handler of type PlotHandler is attached to this logger - cannot save plots')\n\n\n\n\nclass ColouredFormatter(logging.Formatter):\n\n # colourising formatter adapted from an answer to this question on Stack Overflow\n # http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output\n\n BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))\n\n COLOURS = {\n 'DEBUG': BLUE,\n 'INFO': GREEN,\n 'WARNING': YELLOW,\n 'ERROR': RED,\n 'CRITICAL': MAGENTA\n }\n\n max_level_name_width = '8'\n\n # terminal escape sequences\n RESET_SEQ = \"\\033[0m\"\n COLOUR_SEQ = \"\\033[1;%dm\"\n BOLD_SEQ = \"\\033[1m\"\n\n def format(self, record):\n if record.levelname in self.COLOURS:\n # pad to fixed width - currently hardwired, should make this dynamic\n # maximum width of level names, which is the 8 characters of \"CRITICAL\"\n fixed_width_levelname = '{0:8s}'.format(record.levelname)\n record.name = '{0:8s}'.format(record.name)\n # The background is set with 40 plus the number of the color, and the foreground with 30\n record.levelname = self.COLOUR_SEQ % (30 + self.COLOURS[record.levelname]) + fixed_width_levelname + self.RESET_SEQ\n return logging.Formatter.format(self, record)\n\n def factory(fmt, datefmt):\n default = logging.Formatter(fmt, datefmt)\n return ColouredFormatter(default)\n\nif __name__ == '__main__':\n # some simple tests\n\n # tell the built-in logger module to use our custom class when instantiating any new logger\n logging.setLoggerClass(LoggerPlotter)\n\n\n logger = logging.getLogger(\"test_logger\")\n logger.setLevel(logging.DEBUG)\n\n # a console handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = ColouredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n\n\n print(\"testing the logging code\")\n logger.debug('A DEBUG message')\n logger.info('A INFO message')\n logger.warning('A WARN message')\n logger.error('A ERROR message')\n logger.critical('A CRITICAL message')\n\n\n plotlogger = logging.getLogger(\"plotting\")\n plotlogger.setLevel(logging.DEBUG)\n # handler for plotting logger - will write only to console\n plotlogger.addHandler(ch)\n\n\n # # need a handler which will control where to save plots\n # ph = PlotHandler(\"/tmp/plot_test/testing.pdf\")\n # logger.addHandler(ph)\n\n\n print(\"testing the plotting code\")\n\n # the first argument is just a key for referring to this plot within the code\n # the second argument says what kind of plot we will be making\n\n\n plotlogger.set_plot_path(\"./tmp\")\n\n logger.create_plot('test plot',MultipleTimeSeriesPlot)\n\n plotlogger.add_plot_point('test plot','validation',(1,4))\n plotlogger.add_plot_point('test plot','validation',(3,2))\n plotlogger.add_plot_point('test plot','validation',(2,3))\n plotlogger.add_plot_point('test plot','validation',(4,3))\n\n plotlogger.add_plot_point('test plot','training',(1,3))\n plotlogger.add_plot_point('test plot','training',(3,1))\n plotlogger.add_plot_point('test plot','training',(2,2))\n plotlogger.add_plot_point('test plot','training',(4,4))\n\n plotlogger.save_plot('test plot',title='Training and validation error',xlabel='epochs',ylabel='error')\n\n weights = [[1, 2, 3, 3], [1, 1, 2, 1], [2, 1, 2, 2]]\n logger.create_plot('activation weight', SingleWeightMatrixPlot)\n plotlogger.add_plot_point('activation weight', 'weight1', weights)\n plotlogger.add_plot_point('activation weight', 'weight2', weights)\n plotlogger.add_plot_point('activation weight', 'weight3', weights)\n\n plotlogger.save_plot('activation weight', title='weight', xlabel='dimension', ylabel='dimension')\n", "################################################################################\n# The Neural Network (NN) based Speech Synthesis System\n# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/\n#\n# Centre for Speech Technology Research\n# University of Edinburgh, UK\n# Copyright (c) 2014-2015\n# All Rights Reserved.\n#\n# The system as a whole and most of the files in it are distributed\n# under the following copyright and conditions\n#\n# Permission is hereby granted, free of charge, to use and distribute\n# this software and its documentation without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of this work, and to\n# permit persons to whom this work is furnished to do so, subject to\n# the following conditions:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# - The authors' names may not be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK\n# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING\n# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT\n# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE\n# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN\n# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,\n# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF\n# THIS SOFTWARE.\n################################################################################\n\nimport os, sys\nimport numpy, theano, random\nfrom io_funcs.binary_io import BinaryIOCollection\nimport logging\nfrom frontend.label_normalisation import HTSLabelNormalisation\n\nclass ListDataProvider(object):\n \"\"\" This class provides an interface to load data into CPU/GPU memory utterance by utterance or block by block.\n\n In speech synthesis, usually we are not able to load all the training data/evaluation data into RAMs, we will do the following three steps:\n\n - Step 1: a data provide will load part of the data into a buffer\n\n - Step 2: training a DNN by using the data from the buffer\n\n - Step 3: Iterate step 1 and 2 until all the data are used for DNN training. Until now, one epoch of DNN training is finished.\n\n The utterance-by-utterance data loading will be useful when sequential training is used, while block-by-block loading will be used when the order of frames is not important.\n\n This provide assumes binary format with float32 precision without any header (e.g. HTK header).\n\n \"\"\"\n def __init__(self, x_file_list, y_file_list, dur_file_list=None, n_ins=0, n_outs=0, buffer_size=500000, sequential=False, network_type=None, shuffle=False):\n \"\"\"Initialise a data provider\n\n :param x_file_list: list of file names for the input files to DNN\n :type x_file_list: python list\n :param y_file_list: list of files for the output files to DNN\n :param n_ins: the dimensionality for input feature\n :param n_outs: the dimensionality for output features\n :param buffer_size: the size of the buffer, indicating the number of frames in the buffer. The value depends on the memory size of RAM/GPU.\n :param shuffle: True/False. To indicate whether the file list will be shuffled. When loading data block by block, the data in the buffer will be shuffle no matter this value is True or False.\n \"\"\"\n\n self.logger = logging.getLogger(\"ListDataProvider\")\n\n self.n_ins = n_ins\n self.n_outs = n_outs\n\n self.buffer_size = buffer_size\n\n self.sequential = sequential\n self.network_type = network_type\n\n self.rnn_batch_training = False\n self.reshape_io = False\n\n #remove potential empty lines and end of line signs\n\n try:\n assert len(x_file_list) > 0\n except AssertionError:\n self.logger.critical('first list is empty')\n raise\n\n try:\n assert len(y_file_list) > 0\n except AssertionError:\n self.logger.critical('second list is empty')\n raise\n\n try:\n assert len(x_file_list) == len(y_file_list)\n except AssertionError:\n self.logger.critical('two lists are of differing lengths: %d versus %d',len(x_file_list),len(y_file_list))\n raise\n\n if dur_file_list:\n try:\n assert len(x_file_list) == len(dur_file_list)\n except AssertionError:\n self.logger.critical('two lists are of differing lengths: %d versus %d',len(x_file_list),len(y_file_list))\n raise\n\n self.x_files_list = x_file_list\n self.y_files_list = y_file_list\n self.dur_files_list = dur_file_list\n\n self.logger.debug('first list of items from ...%s to ...%s' % (self.x_files_list[0].rjust(20)[-20:],self.x_files_list[-1].rjust(20)[-20:]) )\n self.logger.debug('second list of items from ...%s to ...%s' % (self.y_files_list[0].rjust(20)[-20:],self.y_files_list[-1].rjust(20)[-20:]) )\n\n if shuffle:\n random.seed(271638)\n random.shuffle(self.x_files_list)\n random.seed(271638)\n random.shuffle(self.y_files_list)\n if self.dur_files_list:\n random.seed(271638)\n random.shuffle(self.dur_files_list)\n\n self.file_index = 0\n self.list_size = len(self.x_files_list)\n\n self.remain_data_x = numpy.empty((0, self.n_ins))\n self.remain_data_y = numpy.empty((0, self.n_outs))\n self.remain_frame_number = 0\n\n self.end_reading = False\n\n self.logger.debug('initialised')\n\n def __iter__(self):\n return self\n\n def reset(self):\n \"\"\"When all the files in the file list have been used for DNN training, reset the data provider to start a new epoch.\n\n \"\"\"\n self.file_index = 0\n self.end_reading = False\n\n self.remain_frame_number = 0\n \n self.bucket_index = 0\n self.bucket_file_index = 0\n self.current_bucket_size = 0\n\n self.logger.debug('reset')\n\n def make_shared(self, data_set, data_name):\n \"\"\"To make data shared for theano implementation. If you want to know why we make it shared, please refer the theano documentation: http://deeplearning.net/software/theano/library/compile/shared.html\n\n :param data_set: normal data in CPU memory\n :param data_name: indicate the name of the data (e.g., 'x', 'y', etc)\n :returns: shared dataset -- data_set\n \"\"\"\n data_set = theano.shared(numpy.asarray(data_set, dtype=theano.config.floatX), name=data_name, borrow=True)\n\n return data_set\n\n def set_rnn_params(self, training_algo=1, batch_size=25, seq_length=200, merge_size=1, bucket_range=100):\n # get file lengths\n self.get_file_lengths()\n\n # set training algo\n self.training_algo = training_algo\n\n # set batch size\n self.batch_size = batch_size\n\n # set RNN batch training True\n self.rnn_batch_training = True\n\n # set params for each training algo\n if(self.training_algo == 1):\n self.merge_size = 1\n elif(self.training_algo == 2):\n self.merge_size = 1\n self.bucket_index = 0\n self.bucket_file_index = 0\n self.current_bucket_size = 0\n self.bucket_range = bucket_range\n self.x_frame_list = numpy.array(list(self.file_length_dict['framenum2utt'].keys()))\n self.list_of_buckets = list(range(min(self.x_frame_list), max(self.x_frame_list)+1, self.bucket_range))\n elif(self.training_algo == 3):\n self.seq_length = seq_length\n self.merge_size = merge_size\n else:\n self.logger.critical(\"Choose training algorithm for batch training with RNNs:\")\n self.logger.critical(\"1. Padding model -- pad utterances with zeros to maximum sequence length\")\n self.logger.critical(\"2. Bucket model -- form buckets with minimum and maximum sequence length\")\n self.logger.critical(\"3. Split model -- split utterances to a fixed sequence length\")\n sys.exit(1)\n\n def reshape_input_output(self):\n self.reshape_io = True\n\n def get_file_lengths(self):\n io_funcs = BinaryIOCollection()\n\n self.file_length_dict = {'framenum2utt':{}, 'utt2framenum':{}, 'utt2index':{}}\n\n ### read file by file ###\n while True:\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n break\n\n in_features, lab_frame_number = io_funcs.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)\n out_features, out_frame_number = io_funcs.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)\n \n base_file_name = os.path.basename(self.x_files_list[self.file_index]).split('.')[0]\n if abs(lab_frame_number - out_frame_number) < 5: ## we allow small difference here. may not be correct, but sometimes, there is one/two frames difference\n frame_number = min(lab_frame_number, out_frame_number)\n else:\n self.logger.critical(\"the number of frames in label and acoustic features are different: %d vs %d (%s)\" %(lab_frame_number, out_frame_number, base_file_name))\n raise\n\n if frame_number not in self.file_length_dict['framenum2utt']:\n self.file_length_dict['framenum2utt'][frame_number] = [base_file_name]\n else:\n self.file_length_dict['framenum2utt'][frame_number].append(base_file_name)\n\n self.file_length_dict['utt2framenum'][base_file_name] = frame_number\n self.file_length_dict['utt2index'][base_file_name] = self.file_index\n self.file_index += 1\n\n self.reset()\n\n def set_seq_length_from_current_batch(self):\n temp_list = []\n for indx in range(self.batch_size):\n if self.file_index+indx >= self.list_size:\n break\n base_file_name = os.path.basename(self.x_files_list[self.file_index+indx]).split('.')[0]\n temp_list.append(self.file_length_dict['utt2framenum'][base_file_name])\n\n self.seq_length = max(temp_list)\n\n def get_next_bucket(self):\n min_seq_length = self.list_of_buckets[self.bucket_index]\n max_seq_length = self.list_of_buckets[self.bucket_index] + self.bucket_range\n \n current_bucket = self.x_frame_list[(self.x_frame_list >= min_seq_length) & (self.x_frame_list < max_seq_length)]\n self.current_bucket_list = sum([self.file_length_dict['framenum2utt'][framenum] for framenum in current_bucket], [])\n \n self.bucket_file_index = 0 \n self.current_bucket_size = len(self.current_bucket_list)\n \n self.seq_length = max_seq_length\n self.bucket_index = self.bucket_index + 1\n\n def set_s2s_division(self, linguistic_feats_file=None, frame_length=4):\n self.MLU_div = {}\n in_f = open(linguistic_feats_file, 'r')\n for newline in in_f.readlines():\n temp_list = newline.strip().split()\n unit = temp_list[0]\n feat1 = temp_list[1][1:-1].split('-')\n feat2 = temp_list[2][1:-1].split('-')\n\n self.MLU_div[unit] = [int(feat1[0]), int(feat1[1]), int(feat2[0]), int(feat2[1])]\n \n syl_length = (self.MLU_div['syl'][1] - self.MLU_div['syl'][0])+ (self.MLU_div['syl'][3] - self.MLU_div['syl'][2])\n phone_length = (self.MLU_div['phone'][1] - self.MLU_div['phone'][0]) + (self.MLU_div['phone'][3] - self.MLU_div['phone'][2])\n self.MLU_div['length'] = [0, syl_length, syl_length+phone_length, syl_length+phone_length+frame_length]\n\n return self.MLU_div\n\n def load_one_partition(self):\n if self.sequential == True:\n if not self.network_type or self.network_type==\"RNN\":\n if self.rnn_batch_training:\n shared_set_xy, temp_set_x, temp_set_y = self.load_next_batch()\n else:\n shared_set_xy, temp_set_x, temp_set_y = self.load_next_utterance()\n elif self.network_type==\"CTC\":\n shared_set_xy, temp_set_x, temp_set_y = self.load_next_utterance_CTC()\n elif self.network_type==\"S2S\":\n shared_set_xyd, temp_set_x, temp_set_y, temp_set_d, temp_set_af = self.load_next_utterance_S2SML()\n return shared_set_xyd, temp_set_x, temp_set_y, temp_set_d, temp_set_af\n else:\n logger.critical(\"Unknown network type: %s \\n Please use one of the following: DNN, RNN, S2S, CTC\\n\" %(self.network_type))\n sys.exit(1)\n else:\n shared_set_xy, temp_set_x, temp_set_y = self.load_next_partition()\n\n return shared_set_xy, temp_set_x, temp_set_y\n\n def load_next_batch(self):\n io_funcs = BinaryIOCollection()\n\n ## set sequence length for batch training \n if(self.training_algo == 1):\n # set seq length to maximum seq length from current batch\n self.set_seq_length_from_current_batch()\n elif(self.training_algo == 2):\n # set seq length to maximum seq length from current bucket\n while not self.current_bucket_size:\n self.get_next_bucket()\n elif(self.training_algo == 3):\n # seq length is set based on default/user configuration \n pass;\n \n temp_set_x = numpy.zeros((self.buffer_size, self.n_ins))\n temp_set_y = numpy.zeros((self.buffer_size, self.n_outs))\n\n ### read file by file ###\n current_index = 0\n while True:\n if current_index >= self.buffer_size:\n print('buffer size reached by file index %d' %(self.file_index))\n break\n\n if self.training_algo == 2:\n # choose utterance from current bucket list\n base_file_name = self.current_bucket_list[self.bucket_file_index]\n self.utt_index = self.file_length_dict['utt2index'][base_file_name] \n else: \n # choose utterance randomly from current file list \n #self.utt_index = numpy.random.randint(self.list_size)\n ## choose utterance in serial order\n self.utt_index = self.file_index \n base_file_name = os.path.basename(self.x_files_list[self.utt_index]).split('.')[0]\n\n in_features, lab_frame_number = io_funcs.load_binary_file_frame(self.x_files_list[self.utt_index], self.n_ins)\n out_features, out_frame_number = io_funcs.load_binary_file_frame(self.y_files_list[self.utt_index], self.n_outs)\n \n frame_number = self.file_length_dict['utt2framenum'][base_file_name]\n\n temp_set_x[current_index:current_index+frame_number, ] = in_features\n temp_set_y[current_index:current_index+frame_number, ] = out_features\n current_index += frame_number\n\n if((self.file_index+1)%self.merge_size == 0):\n num_of_samples = int(numpy.ceil(float(current_index)/float(self.seq_length)))\n current_index = self.seq_length * num_of_samples\n \n self.file_index += 1\n \n # break for any of the below conditions\n if self.training_algo == 2:\n self.bucket_file_index += 1\n if(self.bucket_file_index >= self.current_bucket_size):\n self.current_bucket_size = 0\n break;\n if(self.bucket_file_index%self.batch_size==0):\n break;\n else: \n if(self.file_index%self.batch_size==0) or (self.file_index >= self.list_size):\n break\n \n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n \n num_of_samples = int(numpy.ceil(float(current_index)/float(self.seq_length)))\n\n temp_set_x = temp_set_x[0: num_of_samples*self.seq_length, ]\n temp_set_y = temp_set_y[0: num_of_samples*self.seq_length, ]\n \n temp_set_x = temp_set_x.reshape(num_of_samples, self.seq_length, self.n_ins)\n temp_set_y = temp_set_y.reshape(num_of_samples, self.seq_length, self.n_outs)\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n\n shared_set_xy = (shared_set_x, shared_set_y)\n\n return shared_set_xy, temp_set_x, temp_set_y\n \n def load_next_utterance(self):\n \"\"\"Load the data for one utterance. This function will be called when utterance-by-utterance loading is required (e.g., sequential training).\n\n \"\"\"\n\n temp_set_x = numpy.empty((self.buffer_size, self.n_ins))\n temp_set_y = numpy.empty((self.buffer_size, self.n_outs))\n\n io_fun = BinaryIOCollection()\n\n in_features, lab_frame_number = io_fun.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)\n\n frame_number = lab_frame_number\n if abs(lab_frame_number - out_frame_number) < 5: ## we allow small difference here. may not be correct, but sometimes, there is one/two frames difference\n if lab_frame_number > out_frame_number:\n frame_number = out_frame_number\n else:\n base_file_name = os.path.basename(self.x_files_list[self.file_index]).split('.')[0]\n self.logger.critical(\"the number of frames in label and acoustic features are different: %d vs %d (%s)\" %(lab_frame_number, out_frame_number, base_file_name))\n raise\n\n temp_set_y = out_features[0:frame_number, ]\n temp_set_x = in_features[0:frame_number, ]\n\n self.file_index += 1\n\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n \n # reshape input-output\n if self.reshape_io:\n temp_set_x = numpy.reshape(temp_set_x, (1, temp_set_x.shape[0], self.n_ins))\n temp_set_y = numpy.reshape(temp_set_y, (1, temp_set_y.shape[0], self.n_outs))\n \n temp_set_x = numpy.array(temp_set_x, 'float32')\n temp_set_y = numpy.array(temp_set_y, 'float32')\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n\n shared_set_xy = (shared_set_x, shared_set_y)\n\n return shared_set_xy, temp_set_x, temp_set_y\n\n def load_next_utterance_S2S(self):\n \"\"\"Load the data for one utterance. This function will be called when utterance-by-utterance loading is required (e.g., sequential training).\n\n \"\"\"\n\n temp_set_x = numpy.empty((self.buffer_size, self.n_ins))\n temp_set_y = numpy.empty((self.buffer_size, self.n_outs))\n\n io_fun = BinaryIOCollection()\n\n in_features, lab_frame_number = io_fun.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)\n\n temp_set_x = in_features[0:lab_frame_number, ]\n temp_set_y = out_features[0:out_frame_number, ]\n\n if not self.dur_files_list:\n dur_frame_number = out_frame_number\n dur_features = numpy.array([dur_frame_number])\n else:\n dur_features, dur_frame_number = io_fun.load_binary_file_frame(self.dur_files_list[self.file_index], 1)\n assert sum(dur_features) == out_frame_number\n \n dur_features = numpy.reshape(dur_features, (-1, ))\n temp_set_d = dur_features.astype(int) \n \n self.file_index += 1\n\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n shared_set_d = theano.shared(numpy.asarray(temp_set_d, dtype='int32'), name='d', borrow=True)\n\n shared_set_xyd = (shared_set_x, shared_set_y, shared_set_d)\n\n return shared_set_xyd, temp_set_x, temp_set_y, temp_set_d\n\n def load_next_utterance_S2SML(self):\n \"\"\"Load the data for one utterance. This function will be called when utterance-by-utterance loading is required (e.g., sequential training).\n \n \"\"\"\n \n io_fun = BinaryIOCollection()\n\n in_features, lab_frame_number = io_fun.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)\n dur_features, dur_frame_number = io_fun.load_binary_file_frame(self.dur_files_list[self.file_index], 1)\n \n ### MLU features sub-division ###\n temp_set_MLU = in_features[0:lab_frame_number, ]\n temp_set_y = out_features[0:out_frame_number, ]\n \n temp_set_phone = numpy.concatenate([temp_set_MLU[:, self.MLU_div['phone'][0]: self.MLU_div['phone'][1]], temp_set_MLU[:, self.MLU_div['phone'][2]: self.MLU_div['phone'][3]]], axis = 1)\n temp_set_syl = numpy.concatenate([temp_set_MLU[:, self.MLU_div['syl'][0]: self.MLU_div['syl'][1]], temp_set_MLU[:, self.MLU_div['syl'][2]: self.MLU_div['syl'][3]]], axis = 1)\n temp_set_word = numpy.concatenate([temp_set_MLU[:, self.MLU_div['word'][0]: self.MLU_div['word'][1]], temp_set_MLU[:, self.MLU_div['word'][2]: self.MLU_div['word'][3] ]], axis = 1)\n \n ### duration array sub-division ###\n dur_features = numpy.reshape(dur_features, (-1, ))\n temp_set_d = dur_features.astype(int) \n dur_word_syl = temp_set_d[0: -lab_frame_number] \n \n num_ph = lab_frame_number\n num_syl = (numpy.where(numpy.cumsum(dur_word_syl[::-1])==lab_frame_number)[0][0] + 1)\n num_words = len(dur_word_syl) - num_syl \n \n temp_set_dur_phone = temp_set_d[-num_ph:] \n temp_set_dur_word = dur_word_syl[0: num_words]\n temp_set_dur_syl = dur_word_syl[num_words: ]\n \n ### additional feature matrix (syllable+phone+frame=432) ###\n num_frames = sum(temp_set_dur_phone)\n temp_set_af = numpy.empty((num_frames, self.MLU_div['length'][-1]))\n \n temp_set_af[0: num_syl, self.MLU_div['length'][0]: self.MLU_div['length'][1] ] = temp_set_syl[numpy.cumsum(temp_set_dur_syl)-1]\n temp_set_af[0: num_ph, self.MLU_div['length'][1]: self.MLU_div['length'][2]] = temp_set_phone\n \n ### input word feature matrix ###\n temp_set_dur_word_segments = numpy.zeros(num_words, dtype='int32')\n syl_bound = numpy.cumsum(temp_set_dur_word)\n for indx in xrange(num_words):\n temp_set_dur_word_segments[indx] = int(sum(temp_set_dur_syl[0: syl_bound[indx]]))\n temp_set_x = temp_set_word[temp_set_dur_word_segments-1]\n \n ### rest of the code similar to S2S ###\n self.file_index += 1\n\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n shared_set_d = theano.shared(numpy.asarray(temp_set_d, dtype='int32'), name='d', borrow=True)\n\n shared_set_xyd = (shared_set_x, shared_set_y, shared_set_d)\n \n return shared_set_xyd, temp_set_x, temp_set_y, temp_set_d, temp_set_af\n\n def load_next_batch_S2S(self):\n \"\"\"Load the data for one utterance. This function will be called when utterance-by-utterance loading is required (e.g., sequential training).\n \n \"\"\"\n\n temp_set_x = numpy.empty((self.buffer_size, self.n_ins))\n temp_set_y = numpy.empty((self.buffer_size, self.n_outs))\n temp_set_d = numpy.empty((self.buffer_size, 1))\n\n io_fun = BinaryIOCollection()\n\n lab_start_frame_number = 0\n lab_end_frame_number = 0\n\n out_start_frame_number = 0\n out_end_frame_number = 0\n\n new_x_files_list = self.x_files_list[self.file_index].split(',')\n new_y_files_list = self.y_files_list[self.file_index].split(',')\n new_dur_files_list = self.dur_files_list[self.file_index].split(',')\n\n for new_file_index in xrange(len(new_x_files_list)):\n in_features, lab_frame_number = io_fun.load_binary_file_frame(new_x_files_list[new_file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(new_y_files_list[new_file_index], self.n_outs)\n \n lab_end_frame_number+=lab_frame_number\n out_end_frame_number+=out_frame_number\n\n temp_set_x[lab_start_frame_number: lab_end_frame_number, ] = in_features[0:lab_frame_number, ]\n temp_set_y[out_start_frame_number: out_end_frame_number, ] = out_features[0:out_frame_number, ]\n if not self.dur_files_list:\n dur_frame_number = out_end_frame_number\n temp_set_d = numpy.array([dur_frame_number])\n else:\n dur_features, dur_frame_number = io_fun.load_binary_file_frame(new_dur_files_list[new_file_index], 1)\n assert sum(dur_features) == out_frame_number\n temp_set_d[lab_start_frame_number: lab_end_frame_number, ] = dur_features[0:lab_frame_number, ]\n\n lab_start_frame_number = lab_end_frame_number\n out_start_frame_number = out_end_frame_number\n\n temp_set_x = temp_set_x[0:lab_end_frame_number, ]\n temp_set_y = temp_set_y[0:out_end_frame_number, ]\n\n temp_set_d = temp_set_d[0:lab_end_frame_number, ]\n temp_set_d = numpy.reshape(temp_set_d, (-1, ))\n temp_set_d = temp_set_d.astype(int) \n \n self.file_index += 1\n\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n shared_set_d = theano.shared(numpy.asarray(temp_set_d, dtype='int32'), name='d', borrow=True)\n\n shared_set_xyd = (shared_set_x, shared_set_y, shared_set_d)\n\n return shared_set_xyd, temp_set_x, temp_set_y, temp_set_d\n\n def load_next_batch_S2SML(self):\n \"\"\"Load the data for one utterance. This function will be called when utterance-by-utterance loading is required (e.g., sequential training).\n \n \"\"\"\n \n inp_length = (self.MLU_div['word'][1] - self.MLU_div['word'][0]) + (self.MLU_div['word'][3] - self.MLU_div['word'][2])\n af_length = self.MLU_div['length'][-1]\n\n new_temp_set_x = numpy.empty((self.buffer_size, inp_length))\n new_temp_set_y = numpy.empty((self.buffer_size, self.n_outs))\n new_temp_set_af = numpy.empty((self.buffer_size, af_length))\n new_temp_set_d = [numpy.array([], 'int32'),numpy.array([], 'int32'),numpy.array([], 'int32')]\n\n io_fun = BinaryIOCollection()\n\n lab_start_frame_number = 0\n lab_end_frame_number = 0\n\n out_start_frame_number = 0\n out_end_frame_number = 0\n\n new_x_files_list = self.x_files_list[self.file_index].split(',')\n new_y_files_list = self.y_files_list[self.file_index].split(',')\n new_dur_files_list = self.dur_files_list[self.file_index].split(',')\n\n for new_file_index in xrange(len(new_x_files_list)):\n in_features, lab_frame_number = io_fun.load_binary_file_frame(new_x_files_list[new_file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(new_y_files_list[new_file_index], self.n_outs)\n dur_features, dur_frame_number = io_fun.load_binary_file_frame(new_dur_files_list[new_file_index], 1)\n \n ### MLU features sub-division ###\n temp_set_MLU = in_features[0:lab_frame_number, ]\n temp_set_y = out_features[0:out_frame_number, ]\n \n temp_set_phone = numpy.concatenate([temp_set_MLU[:, self.MLU_div['phone'][0]: self.MLU_div['phone'][1]], temp_set_MLU[:, self.MLU_div['phone'][2]: self.MLU_div['phone'][3]]], axis = 1)\n temp_set_syl = numpy.concatenate([temp_set_MLU[:, self.MLU_div['syl'][0]: self.MLU_div['syl'][1]], temp_set_MLU[:, self.MLU_div['syl'][2]: self.MLU_div['syl'][3]]], axis = 1)\n temp_set_word = numpy.concatenate([temp_set_MLU[:, self.MLU_div['word'][0]: self.MLU_div['word'][1]], temp_set_MLU[:, self.MLU_div['word'][2]: self.MLU_div['word'][3] ]], axis = 1)\n \n ### duration array sub-division ###\n dur_features = numpy.reshape(dur_features, (-1, ))\n temp_set_d = dur_features.astype(int) \n dur_word_syl = temp_set_d[0: -lab_frame_number] \n \n num_ph = lab_frame_number\n num_syl = (numpy.where(numpy.cumsum(dur_word_syl[::-1])==lab_frame_number)[0][0] + 1)\n num_words = len(dur_word_syl) - num_syl \n \n temp_set_dur_phone = temp_set_d[-num_ph:] \n temp_set_dur_word = dur_word_syl[0: num_words]\n temp_set_dur_syl = dur_word_syl[num_words: ]\n \n ### additional feature matrix (syllable+phone+frame=432) ###\n num_frames = sum(temp_set_dur_phone)\n temp_set_af = numpy.empty((num_frames, self.MLU_div['length'][-1]))\n \n temp_set_af[0: num_syl, self.MLU_div['length'][0]: self.MLU_div['length'][1] ] = temp_set_syl[numpy.cumsum(temp_set_dur_syl)-1]\n temp_set_af[0: num_ph, self.MLU_div['length'][1]: self.MLU_div['length'][2]] = temp_set_phone\n \n ### input word feature matrix ###\n temp_set_dur_word_segments = numpy.zeros(num_words, dtype='int32')\n syl_bound = numpy.cumsum(temp_set_dur_word)\n for indx in xrange(num_words):\n temp_set_dur_word_segments[indx] = int(sum(temp_set_dur_syl[0: syl_bound[indx]]))\n temp_set_x = temp_set_word[temp_set_dur_word_segments-1]\n \n ### for batch processing ###\n lab_end_frame_number+=num_words\n out_end_frame_number+=out_frame_number\n \n new_temp_set_x[lab_start_frame_number: lab_end_frame_number, ] = temp_set_x[0:num_words, ]\n new_temp_set_y[out_start_frame_number: out_end_frame_number, ] = temp_set_y[0:out_frame_number, ]\n new_temp_set_af[out_start_frame_number: out_end_frame_number, ] = temp_set_af[0:out_frame_number, ]\n\n new_temp_set_d[0] = numpy.append(new_temp_set_d[0], temp_set_dur_word)\n new_temp_set_d[1] = numpy.append(new_temp_set_d[1], temp_set_dur_syl)\n new_temp_set_d[2] = numpy.append(new_temp_set_d[2], temp_set_dur_phone)\n\n lab_start_frame_number = lab_end_frame_number\n out_start_frame_number = out_end_frame_number\n \n new_temp_set_x = new_temp_set_x[0:lab_end_frame_number, ]\n new_temp_set_y = new_temp_set_y[0:out_end_frame_number, ]\n new_temp_set_af = new_temp_set_af[0:out_end_frame_number, ]\n \n new_temp_set_d = numpy.concatenate((new_temp_set_d[0], new_temp_set_d[1], new_temp_set_d[2]))\n \n ### rest of the code similar to S2S ###\n self.file_index += 1\n\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n\n shared_set_x = self.make_shared(new_temp_set_x, 'x')\n shared_set_y = self.make_shared(new_temp_set_y, 'y')\n shared_set_d = theano.shared(numpy.asarray(new_temp_set_d, dtype='int32'), name='d', borrow=True)\n\n shared_set_xyd = (shared_set_x, shared_set_y, shared_set_d)\n \n return shared_set_xyd, new_temp_set_x, new_temp_set_y, new_temp_set_d, new_temp_set_af\n\n def load_next_utterance_CTC(self):\n\n temp_set_x = numpy.empty((self.buffer_size, self.n_ins))\n temp_set_y = numpy.empty(self.buffer_size)\n\n io_fun = BinaryIOCollection()\n\n in_features, lab_frame_number = io_fun.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)\n\n frame_number = lab_frame_number\n temp_set_x = in_features[0:frame_number, ]\n\n temp_set_y = numpy.array([self.n_outs])\n for il in numpy.argmax(out_features, axis=1):\n temp_set_y = numpy.concatenate((temp_set_y, [il, self.n_outs]), axis=0)\n\n self.file_index += 1\n\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = theano.shared(numpy.asarray(temp_set_y, dtype='int32'), name='y', borrow=True)\n\n shared_set_xy = (shared_set_x, shared_set_y)\n\n return shared_set_xy, temp_set_x, temp_set_y\n\n\n def load_next_partition(self):\n \"\"\"Load one block data. The number of frames will be the buffer size set during intialisation.\n\n \"\"\"\n\n self.logger.debug('loading next partition')\n\n temp_set_x = numpy.empty((self.buffer_size, self.n_ins))\n temp_set_y = numpy.empty((self.buffer_size, self.n_outs))\n current_index = 0\n\n ### first check whether there are remaining data from previous utterance\n if self.remain_frame_number > 0:\n temp_set_x[current_index:self.remain_frame_number, ] = self.remain_data_x\n temp_set_y[current_index:self.remain_frame_number, ] = self.remain_data_y\n current_index += self.remain_frame_number\n\n self.remain_frame_number = 0\n\n io_fun = BinaryIOCollection()\n while True:\n if current_index >= self.buffer_size:\n break\n if self.file_index >= self.list_size:\n self.end_reading = True\n self.file_index = 0\n break\n\n in_features, lab_frame_number = io_fun.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)\n out_features, out_frame_number = io_fun.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)\n\n frame_number = lab_frame_number\n if abs(lab_frame_number - out_frame_number) < 5: ## we allow small difference here. may not be correct, but sometimes, there is one/two frames difference\n if lab_frame_number > out_frame_number:\n frame_number = out_frame_number\n else:\n base_file_name = os.path.basename(self.x_files_list[self.file_index]).split('.')[0]\n self.logger.critical(\"the number of frames in label and acoustic features are different: %d vs %d (%s)\" %(lab_frame_number, out_frame_number, base_file_name))\n raise\n\n out_features = out_features[0:frame_number, ]\n in_features = in_features[0:frame_number, ]\n\n if current_index + frame_number <= self.buffer_size:\n temp_set_x[current_index:current_index+frame_number, ] = in_features\n temp_set_y[current_index:current_index+frame_number, ] = out_features\n\n current_index = current_index + frame_number\n else: ## if current utterance cannot be stored in the block, then leave the remaining part for the next block\n used_frame_number = self.buffer_size - current_index\n temp_set_x[current_index:self.buffer_size, ] = in_features[0:used_frame_number, ]\n temp_set_y[current_index:self.buffer_size, ] = out_features[0:used_frame_number, ]\n current_index = self.buffer_size\n\n self.remain_data_x = in_features[used_frame_number:frame_number, ]\n self.remain_data_y = out_features[used_frame_number:frame_number, ]\n self.remain_frame_number = frame_number - used_frame_number\n\n self.file_index += 1\n\n temp_set_x = temp_set_x[0:current_index, ]\n temp_set_y = temp_set_y[0:current_index, ]\n\n numpy.random.seed(271639)\n numpy.random.shuffle(temp_set_x)\n numpy.random.seed(271639)\n numpy.random.shuffle(temp_set_y)\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n\n shared_set_xy = (shared_set_x, shared_set_y)\n# temp_set_x = self.make_shared(temp_set_x, 'x')\n# temp_set_y = self.make_shared(temp_set_y, 'y')\n\n return shared_set_xy, temp_set_x, temp_set_y\n\n def is_finish(self):\n return self.end_reading\n\n\nclass ListDataProviderWithProjectionIndex(ListDataProvider):\n '''\n Added kwarg index_to_project to __init__\n '''\n\n def __init__(self, x_file_list, y_file_list, n_ins=0, n_outs=0, \\\n buffer_size = 500000, shuffle=False, index_to_project=1, projection_insize=10000, indexes_only=False):\n ##ListDataProvider.__init__(x_file_list, \\\n ## y_file_list, n_ins=0, n_outs=0, buffer_size = 500000, shuffle=False)\n super( ListDataProviderWithProjectionIndex, self ).__init__(x_file_list, \\\n y_file_list, n_ins=n_ins, n_outs=n_outs, buffer_size=buffer_size, shuffle=shuffle)\n self.index_to_project = index_to_project\n self.projection_insize = projection_insize\n self.indexes_only = indexes_only\n\n def load_next_partition_with_projection(self):\n\n shared_set_xy, temp_set_x, temp_set_y = self.load_next_partition()\n\n if self.indexes_only:\n temp_set_x, p_indexes = get_unexpanded_projection_inputs(temp_set_x, self.index_to_project, \\\n self.projection_insize)\n shared_set_x_proj = theano.shared(p_indexes, name='x_proj', borrow=True)\n else:\n temp_set_x, one_hot = expand_projection_inputs(temp_set_x, self.index_to_project, \\\n self.projection_insize)\n shared_set_x_proj = self.make_shared(one_hot, 'x_proj')\n\n shared_set_x = self.make_shared(temp_set_x, 'x')\n shared_set_y = self.make_shared(temp_set_y, 'y')\n\n shared_set_xy = (shared_set_x, shared_set_x_proj, shared_set_y)\n\n if self.indexes_only:\n return shared_set_xy, temp_set_x, p_indexes, temp_set_y\n else:\n return shared_set_xy, temp_set_x, one_hot, temp_set_y\n\n## Put this function at global level so it can be imported for use in dnn_generation\ndef expand_projection_inputs(temp_set_x, index_to_project, projection_insize):\n ## Turn indexes to words, syllables etc. to one-hot data:\n m,n = numpy.shape(temp_set_x)\n projection_indices = temp_set_x[:, index_to_project]\n #print projection_indices.tolist()\n assert projection_indices.max() < projection_insize,'projection_insize is %s but there is an index %s in the data'%(projection_insize, projection_indices.max())\n one_hot = numpy.zeros((m, projection_insize))\n\n ## Used advanced indexing to turn the relevant features on:\n projection_indices = projection_indices.astype(int) ## check conversion???!?!?!\n # print projection_indices.tolist()\n # print ' ^--- proj indices'\n # print\n one_hot[list(range(m)),projection_indices] = 1.0\n ## Effectively remove the index from the original data by setting to 0:\n temp_set_x[:, index_to_project] = 0.0\n return temp_set_x, one_hot\n\ndef get_unexpanded_projection_inputs(temp_set_x, index_to_project, projection_insize):\n ## Turn indexes to words, syllables etc. to one-hot data:\n m,n = numpy.shape(temp_set_x)\n projection_indices = temp_set_x[:, index_to_project]\n #print projection_indices.tolist()\n assert projection_indices.max() < projection_insize,'projection_insize is %s but there is an index %s in the data'%(projection_insize, projection_indices.max())\n\n projection_indices = projection_indices.astype('int32') ## check conversion???!?!?!\n\n temp_set_x[:, index_to_project] = 0.0\n return temp_set_x, projection_indices\n" ]
[ [ "numpy.asarray", "matplotlib.use", "numpy.flipud", "matplotlib.pyplot.figure" ], [ "numpy.random.seed", "numpy.reshape", "numpy.asarray", "numpy.cumsum", "numpy.random.shuffle", "numpy.concatenate", "numpy.append", "numpy.argmax", "numpy.shape", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mirwaisse/tutorials
[ "18ec63ce8c85ef11af92685cc1436fd3034efc74" ]
[ "intermediate_source/model_parallel_tutorial.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nModel Parallel Best Practices\n*************************************************************\n**Author**: `Shen Li <https://mrshenli.github.io/>`_\n\nData parallel and model parallel are widely-used in distributed training\ntechniques. Previous posts have explained how to use\n`DataParallel <https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html>`_\nto train a neural network on multiple GPUs. ``DataParallel`` replicates the\nsame model to all GPUs, where each GPU consumes a different partition of the\ninput data. Although it can significantly accelerate the training process, it\ndoes not work for some use cases where the model is too large to fit into a\nsingle GPU. This post shows how to solve that problem by using model parallel\nand also shares some insights on how to speed up model parallel training.\n\nThe high-level idea of model parallel is to place different sub-networks of a\nmodel onto different devices, and implement the ``forward`` method accordingly\nto move intermediate outputs across devices. As only part of a model operates\non any individual device, a set of devices can collectively serve a larger\nmodel. In this post, we will not try to construct huge models and squeeze them\ninto a limited number of GPUs. Instead, this post focuses on showing the idea\nof model parallel. It is up to the readers to apply the ideas to real-world\napplications.\n\n**Recommended Reading:**\n\n- https://pytorch.org/ For installation instructions\n- :doc:`/beginner/blitz/data_parallel_tutorial` Single-Machine Data Parallel\n- :doc:`/intermediate/ddp_tutorial` Combine Distributed Data Parallel and Model Parallel\n\"\"\"\n\n######################################################################\n# Basic Usage\n# =======================\n#\n# Let us start with a toy model that contains two linear layers. To run this\n# model on two GPUs, simply put each linear layer on a different GPU, and move\n# inputs and intermediate outputs to match the layer devices accordingly.\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = torch.nn.Linear(10, 10).to('cuda:0')\n self.relu = torch.nn.ReLU()\n self.net2 = torch.nn.Linear(10, 5).to('cuda:1')\n\n def forward(self, x):\n x = self.relu(self.net1(x.to('cuda:0')))\n return self.net2(x.to('cuda:1'))\n\n######################################################################\n# Note that, the above ``ToyModel`` looks very similar to how one would\n# implement it on a single GPU, except the five ``to(device)`` calls which\n# place linear layers and tensors on proper devices. That is the only place in\n# the model that requires changes. The ``backward()`` and ``torch.optim`` will\n# automatically take care of gradients as if the model is on one GPU. You only\n# need to make sure that the labels are on the same device as the outputs when\n# calling the loss function.\n\n\nmodel = ToyModel()\nloss_fn = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.001)\n\noptimizer.zero_grad()\noutputs = model(torch.randn(20, 10))\nlabels = torch.randn(20, 5).to('cuda:1')\nloss_fn(outputs, labels).backward()\noptimizer.step()\n\n######################################################################\n# Apply Model Parallel to Existing Modules\n# =======================\n#\n# It is also possible to run an existing single-GPU module on multiple GPUs\n# with just a few lines of changes. The code below shows how to decompose\n# ``torchvision.models.reset50()`` to two GPUs. The idea is to inherit from\n# the existing ``ResNet`` module, and split the layers to two GPUs during\n# construction. Then, override the ``forward`` method to stitch two\n# sub-networks by moving the intermediate outputs accordingly.\n\n\nfrom torchvision.models.resnet import ResNet, Bottleneck\n\nnum_classes = 1000\n\n\nclass ModelParallelResNet50(ResNet):\n def __init__(self, *args, **kwargs):\n super(ModelParallelResNet50, self).__init__(\n Bottleneck, [3, 4, 6, 3], num_classes=num_classes, *args, **kwargs)\n\n self.seq1 = nn.Sequential(\n self.conv1,\n self.bn1,\n self.relu,\n self.maxpool,\n\n self.layer1,\n self.layer2\n ).to('cuda:0')\n\n self.seq2 = nn.Sequential(\n self.layer3,\n self.layer4,\n self.avgpool,\n ).to('cuda:1')\n\n self.fc.to('cuda:1')\n\n def forward(self, x):\n x = self.seq2(self.seq1(x).to('cuda:1'))\n return self.fc(x.view(x.size(0), -1))\n\n\n######################################################################\n# The above implementation solves the problem for cases where the model is too\n# large to fit into a single GPU. However, you might have already noticed that\n# it will be slower than running it on a single GPU if your model fits. It is\n# because, at any point in time, only one of the two GPUs are working, while\n# the other one is sitting there doing nothing. The performance further\n# deteriorates as the intermediate outputs need to be copied from ``cuda:0`` to\n# ``cuda:1`` between ``layer2`` and ``layer3``.\n#\n# Let us run an experiment to get a more quantitative view of the execution\n# time. In this experiment, we train ``ModelParallelResNet50`` and the existing\n# ``torchvision.models.reset50()`` by running random inputs and labels through\n# them. After the training, the models will not produce any useful predictions,\n# but we can get a reasonable understanding of the execution times.\n\n\nimport torchvision.models as models\n\nnum_batches = 3\nbatch_size = 120\nimage_w = 128\nimage_h = 128\n\n\ndef train(model):\n model.train(True)\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n\n one_hot_indices = torch.LongTensor(batch_size) \\\n .random_(0, num_classes) \\\n .view(batch_size, 1)\n\n for _ in range(num_batches):\n # generate random inputs and labels\n inputs = torch.randn(batch_size, 3, image_w, image_h)\n labels = torch.zeros(batch_size, num_classes) \\\n .scatter_(1, one_hot_indices, 1)\n\n # run forward pass\n optimizer.zero_grad()\n outputs = model(inputs.to('cuda:0'))\n\n # run backward pass\n labels = labels.to(outputs.device)\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n\n######################################################################\n# The ``train(model)`` method above uses ``nn.MSELoss`` as the loss function,\n# and ``optim.SGD`` as the optimizer. It mimics training on ``128 X 128``\n# images which are organized into 3 batches where each batch contains 120\n# images. Then, we use ``timeit`` to run the ``train(model)`` method 10 times\n# and plot the execution times with standard deviations.\n\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('Agg')\nimport numpy as np\nimport timeit\n\nnum_repeat = 10\n\nstmt = \"train(model)\"\n\nsetup = \"model = ModelParallelResNet50()\"\n# globals arg is only available in Python 3. In Python 2, use the following\n# import __builtin__\n# __builtin__.__dict__.update(locals())\nmp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\nmp_mean, mp_std = np.mean(mp_run_times), np.std(mp_run_times)\n\nsetup = \"import torchvision.models as models;\" + \\\n \"model = models.resnet50(num_classes=num_classes).to('cuda:0')\"\nrn_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\nrn_mean, rn_std = np.mean(rn_run_times), np.std(rn_run_times)\n\n\ndef plot(means, stds, labels, fig_name):\n fig, ax = plt.subplots()\n ax.bar(np.arange(len(means)), means, yerr=stds,\n align='center', alpha=0.5, ecolor='red', capsize=10, width=0.6)\n ax.set_ylabel('ResNet50 Execution Time (Second)')\n ax.set_xticks(np.arange(len(means)))\n ax.set_xticklabels(labels)\n ax.yaxis.grid(True)\n plt.tight_layout()\n plt.savefig(fig_name)\n plt.close(fig)\n\n\nplot([mp_mean, rn_mean],\n [mp_std, rn_std],\n ['Model Parallel', 'Single GPU'],\n 'mp_vs_rn.png')\n\n\n######################################################################\n#\n# .. figure:: /_static/img/model-parallel-images/mp_vs_rn.png\n# :alt:\n#\n# The result shows that the execution time of model parallel implementation is\n# ``4.02/3.75-1=7%`` longer than the existing single-GPU implementation. So we\n# can conclude there is roughly 7% overhead in copying tensors back and forth\n# across the GPUs. There are rooms for improvements, as we know one of the two\n# GPUs is sitting idle throughout the execution. One option is to further\n# divide each batch into a pipeline of splits, such that when one split reaches\n# the second sub-network, the following split can be fed into the first\n# sub-network. In this way, two consecutive splits can run concurrently on two\n# GPUs.\n\n######################################################################\n# Speed Up by Pipelining Inputs\n# =======================\n#\n# In the following experiments, we further divide each 120-image batch into\n# 20-image splits. As PyTorch launches CUDA operations asynchronizely, the\n# implementation does not need to spawn multiple threads to achieve\n# concurrency.\n\n\nclass PipelineParallelResNet50(ModelParallelResNet50):\n def __init__(self, split_size=20, *args, **kwargs):\n super(PipelineParallelResNet50, self).__init__(*args, **kwargs)\n self.split_size = split_size\n\n def forward(self, x):\n splits = iter(x.split(self.split_size, dim=0))\n s_next = next(splits)\n s_prev = self.seq1(s_next).to('cuda:1')\n ret = []\n\n for s_next in splits:\n # A. s_prev runs on cuda:1\n s_prev = self.seq2(s_prev)\n ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))\n\n # B. s_next runs on cuda:0, which can run concurrently with A\n s_prev = self.seq1(s_next).to('cuda:1')\n\n s_prev = self.seq2(s_prev)\n ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))\n\n return torch.cat(ret)\n\n\nsetup = \"model = PipelineParallelResNet50()\"\npp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\npp_mean, pp_std = np.mean(pp_run_times), np.std(pp_run_times)\n\nplot([mp_mean, rn_mean, pp_mean],\n [mp_std, rn_std, pp_std],\n ['Model Parallel', 'Single GPU', 'Pipelining Model Parallel'],\n 'mp_vs_rn_vs_pp.png')\n\n######################################################################\n# Please note, device-to-device tensor copy operations are synchronized on\n# current streams on the source and the destination devices. If you create\n# multiple streams, you have to make sure that copy operations are properly\n# synchronized. Writing the source tensor or reading/writing the destination\n# tensor before finishing the copy operation can lead to undefined behavior.\n# The above implementation only uses default streams on both source and\n# destination devices, hence it is not necessary to enforce additional\n# synchronizations.\n#\n# .. figure:: /_static/img/model-parallel-images/mp_vs_rn_vs_pp.png\n# :alt:\n#\n# The experiment result shows that, pipelining inputs to model parallel\n# ResNet50 speeds up the training process by roughly ``3.75/2.51-1=49%``. It is\n# still quite far away from the ideal 100% speedup. As we have introduced a new\n# parameter ``split_sizes`` in our pipeline parallel implementation, it is\n# unclear how the new parameter affects the overall training time. Intuitively\n# speaking, using small ``split_size`` leads to many tiny CUDA kernel launch,\n# while using large ``split_size`` results to relatively long idle times during\n# the first and last splits. Neither are optimal. There might be an optimal\n# ``split_size`` configuration for this specific experiment. Let us try to find\n# it by running experiments using several different ``split_size`` values.\n\n\nmeans = []\nstds = []\nsplit_sizes = [1, 3, 5, 8, 10, 12, 20, 40, 60]\n\nfor split_size in split_sizes:\n setup = \"model = PipelineParallelResNet50(split_size=%d)\" % split_size\n pp_run_times = timeit.repeat(\n stmt, setup, number=1, repeat=num_repeat, globals=globals())\n means.append(np.mean(pp_run_times))\n stds.append(np.std(pp_run_times))\n\nfig, ax = plt.subplots()\nax.plot(split_sizes, means)\nax.errorbar(split_sizes, means, yerr=stds, ecolor='red', fmt='ro')\nax.set_ylabel('ResNet50 Execution Time (Second)')\nax.set_xlabel('Pipeline Split Size')\nax.set_xticks(split_sizes)\nax.yaxis.grid(True)\nplt.tight_layout()\nplt.savefig(\"split_size_tradeoff.png\")\nplt.close(fig)\n\n######################################################################\n#\n# .. figure:: /_static/img/model-parallel-images/split_size_tradeoff.png\n# :alt:\n#\n# The result shows that setting ``split_size`` to 12 achieves the fastest\n# training speed, which leads to ``3.75/2.43-1=54%`` speedup. There are\n# still opportunities to further accelerate the training process. For example,\n# all operations on ``cuda:0`` is placed on its default stream. It means that\n# computations on the next split cannot overlap with the copy operation of the\n# prev split. However, as prev and next splits are different tensors, there is\n# no problem to overlap one's computation with the other one's copy. The\n# implementation need to use multiple streams on both GPUs, and different\n# sub-network structures require different stream management strategies. As no\n# general multi-stream solution works for all model parallel use cases, we will\n# not discuss it in this tutorial.\n" ]
[ [ "torch.nn.Sequential", "torch.LongTensor", "matplotlib.pyplot.tight_layout", "torch.cat", "torch.zeros", "matplotlib.pyplot.switch_backend", "torch.randn", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "torch.nn.Linear", "numpy.std", "numpy.mean", "matplotlib.pyplot.close", "torch.nn.ReLU", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kichiro09/object-detection
[ "e498d28503fd4a12d1fa9ade41891f2f9601c674", "e498d28503fd4a12d1fa9ade41891f2f9601c674" ]
[ "official/recommendation/ncf_test.py", "research/object_detection/models/feature_map_generators_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests NCF.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport mock\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom absl import flags\nfrom absl.testing import flagsaver\nfrom official.recommendation import constants as rconst\nfrom official.recommendation import data_preprocessing\nfrom official.recommendation import neumf_model\nfrom official.recommendation import ncf_main\nfrom official.recommendation import stat_utils\n\n\nNUM_TRAIN_NEG = 4\n\n\nclass NcfTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(NcfTest, cls).setUpClass()\n ncf_main.define_ncf_flags()\n\n def setUp(self):\n self.top_k_old = rconst.TOP_K\n self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES\n rconst.NUM_EVAL_NEGATIVES = 2\n\n def tearDown(self):\n rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old\n rconst.TOP_K = self.top_k_old\n\n def get_hit_rate_and_ndcg(self, predicted_scores_by_user, items_by_user,\n top_k=rconst.TOP_K, match_mlperf=False):\n rconst.TOP_K = top_k\n rconst.NUM_EVAL_NEGATIVES = predicted_scores_by_user.shape[1] - 1\n\n g = tf.Graph()\n with g.as_default():\n logits = tf.convert_to_tensor(\n predicted_scores_by_user.reshape((-1, 1)), tf.float32)\n softmax_logits = tf.concat([tf.zeros(logits.shape, dtype=logits.dtype),\n logits], axis=1)\n duplicate_mask = tf.convert_to_tensor(\n stat_utils.mask_duplicates(items_by_user, axis=1), tf.float32)\n\n metric_ops = neumf_model.compute_eval_loss_and_metrics(\n logits=logits, softmax_logits=softmax_logits,\n duplicate_mask=duplicate_mask, num_training_neg=NUM_TRAIN_NEG,\n match_mlperf=match_mlperf).eval_metric_ops\n\n hr = metric_ops[rconst.HR_KEY]\n ndcg = metric_ops[rconst.NDCG_KEY]\n\n init = [tf.global_variables_initializer(),\n tf.local_variables_initializer()]\n\n with self.test_session(graph=g) as sess:\n sess.run(init)\n return sess.run([hr[1], ndcg[1]])\n\n\n\n def test_hit_rate_and_ndcg(self):\n # Test with no duplicate items\n predictions = np.array([\n [1., 2., 0.], # In top 2\n [2., 1., 0.], # In top 1\n [0., 2., 1.], # In top 3\n [2., 3., 4.] # In top 3\n ])\n items = np.array([\n [1, 2, 3],\n [2, 3, 1],\n [3, 2, 1],\n [2, 1, 3],\n ])\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n # Test with duplicate items. In the MLPerf case, we treat the duplicates as\n # a single item. Otherwise, we treat the duplicates as separate items.\n predictions = np.array([\n [1., 2., 2., 3.], # In top 4. MLPerf: In top 3\n [3., 1., 0., 2.], # In top 1. MLPerf: In top 1\n [0., 2., 3., 2.], # In top 4. MLPerf: In top 3\n [3., 2., 4., 2.] # In top 2. MLPerf: In top 2\n ])\n items = np.array([\n [1, 2, 2, 3],\n [1, 2, 3, 4],\n [1, 2, 3, 2],\n [4, 3, 2, 1],\n ])\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(5)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n # Test with duplicate items, where the predictions for the same item can\n # differ. In the MLPerf case, we should take the first prediction.\n predictions = np.array([\n [3., 2., 4., 4.], # In top 3. MLPerf: In top 2\n [3., 4., 2., 4.], # In top 3. MLPerf: In top 3\n [2., 3., 4., 1.], # In top 3. MLPerf: In top 2\n [4., 3., 5., 2.] # In top 2. MLPerf: In top 1\n ])\n items = np.array([\n [1, 2, 2, 3],\n [4, 3, 3, 2],\n [2, 1, 1, 1],\n [4, 2, 2, 1],\n ])\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 0 / 4)\n self.assertAlmostEqual(ndcg, 0 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +\n 3 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (math.log(2) / math.log(3) +\n 3 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 3 / 4)\n self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +\n math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + 2 * math.log(2) / math.log(3) +\n math.log(2) / math.log(4)) / 4)\n\n _BASE_END_TO_END_FLAGS = {\n \"batch_size\": 1024,\n \"train_epochs\": 1,\n \"use_synthetic_data\": True\n }\n\n @flagsaver.flagsaver(**_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end(self):\n ncf_main.main(None)\n\n @flagsaver.flagsaver(ml_perf=True, **_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_mlperf(self):\n ncf_main.main(None)\n\n @flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_no_estimator(self):\n ncf_main.main(None)\n flags.FLAGS.ml_perf = True\n ncf_main.main(None)\n\n @flagsaver.flagsaver(use_estimator=False, **_BASE_END_TO_END_FLAGS)\n @mock.patch.object(data_preprocessing, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_while_loop(self):\n # We cannot set use_while_loop = True in the flagsaver constructor, because\n # if the flagsaver sets it to True before setting use_estimator to False,\n # the flag validator will throw an error.\n flags.FLAGS.use_while_loop = True\n ncf_main.main(None)\n flags.FLAGS.ml_perf = True\n ncf_main.main(None)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for feature map generators.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\n\nfrom object_detection.builders import hyperparams_builder\nfrom object_detection.models import feature_map_generators\nfrom object_detection.protos import hyperparams_pb2\n\nINCEPTION_V2_LAYOUT = {\n 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],\n 'layer_depth': [-1, -1, -1, 512, 256, 256],\n 'anchor_strides': [16, 32, 64, -1, -1, -1],\n 'layer_target_norm': [20.0, -1, -1, -1, -1, -1],\n}\n\nINCEPTION_V3_LAYOUT = {\n 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],\n 'layer_depth': [-1, -1, -1, 512, 256, 128],\n 'anchor_strides': [16, 32, 64, -1, -1, -1],\n 'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3]\n}\n\nEMBEDDED_SSD_MOBILENET_V1_LAYOUT = {\n 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''],\n 'layer_depth': [-1, -1, 512, 256, 256],\n 'conv_kernel_size': [-1, -1, 3, 3, 2],\n}\n\nSSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {\n 'from_layer': ['Conv2d_13_pointwise', '', '', ''],\n 'layer_depth': [-1, 256, 256, 256],\n}\n\n\[email protected](\n {'use_keras': False},\n {'use_keras': True},\n)\nclass MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):\n\n def _build_conv_hyperparams(self):\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\n\n def _build_feature_map_generator(self, feature_map_layout, use_keras,\n pool_residual=False):\n if use_keras:\n return feature_map_generators.KerasMultiResolutionFeatureMaps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=1,\n min_depth=32,\n insert_1x1_conv=True,\n freeze_batchnorm=False,\n is_training=True,\n conv_hyperparams=self._build_conv_hyperparams(),\n name='FeatureMaps'\n )\n else:\n def feature_map_generator(image_features):\n return feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=1,\n min_depth=32,\n insert_1x1_conv=True,\n image_features=image_features,\n pool_residual=pool_residual)\n return feature_map_generator\n\n def test_get_expected_feature_map_shapes_with_inception_v2(self, use_keras):\n image_features = {\n 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),\n 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),\n 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)\n }\n feature_map_generator = self._build_feature_map_generator(\n feature_map_layout=INCEPTION_V2_LAYOUT,\n use_keras=use_keras\n )\n feature_maps = feature_map_generator(image_features)\n\n expected_feature_map_shapes = {\n 'Mixed_3c': (4, 28, 28, 256),\n 'Mixed_4c': (4, 14, 14, 576),\n 'Mixed_5c': (4, 7, 7, 1024),\n 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),\n 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),\n 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = dict(\n (key, value.shape) for key, value in out_feature_maps.items())\n self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)\n\n # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10\n\n def test_get_expected_feature_map_shapes_use_explicit_padding(\n self, use_keras):\n image_features = {\n 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),\n 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),\n 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)\n }\n layout_copy = INCEPTION_V2_LAYOUT.copy()\n layout_copy['use_explicit_padding'] = True\n feature_map_generator = self._build_feature_map_generator(\n feature_map_layout=layout_copy,\n use_keras=use_keras\n )\n feature_maps = feature_map_generator(image_features)\n\n expected_feature_map_shapes = {\n 'Mixed_3c': (4, 28, 28, 256),\n 'Mixed_4c': (4, 14, 14, 576),\n 'Mixed_5c': (4, 7, 7, 1024),\n 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),\n 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),\n 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = dict(\n (key, value.shape) for key, value in out_feature_maps.items())\n self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)\n\n def test_get_expected_feature_map_shapes_with_inception_v3(self, use_keras):\n image_features = {\n 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),\n 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),\n 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)\n }\n\n feature_map_generator = self._build_feature_map_generator(\n feature_map_layout=INCEPTION_V3_LAYOUT,\n use_keras=use_keras\n )\n feature_maps = feature_map_generator(image_features)\n\n expected_feature_map_shapes = {\n 'Mixed_5d': (4, 35, 35, 256),\n 'Mixed_6e': (4, 17, 17, 576),\n 'Mixed_7c': (4, 8, 8, 1024),\n 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),\n 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),\n 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = dict(\n (key, value.shape) for key, value in out_feature_maps.items())\n self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)\n\n def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(\n self, use_keras):\n image_features = {\n 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],\n dtype=tf.float32),\n 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],\n dtype=tf.float32),\n }\n\n feature_map_generator = self._build_feature_map_generator(\n feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,\n use_keras=use_keras\n )\n feature_maps = feature_map_generator(image_features)\n\n expected_feature_map_shapes = {\n 'Conv2d_11_pointwise': (4, 16, 16, 512),\n 'Conv2d_13_pointwise': (4, 8, 8, 1024),\n 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),\n 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),\n 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = dict(\n (key, value.shape) for key, value in out_feature_maps.items())\n self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)\n\n def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1(\n self, use_keras):\n image_features = {\n 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],\n dtype=tf.float32),\n }\n\n feature_map_generator = self._build_feature_map_generator(\n feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT,\n use_keras=use_keras,\n pool_residual=True\n )\n feature_maps = feature_map_generator(image_features)\n\n expected_feature_map_shapes = {\n 'Conv2d_13_pointwise': (4, 8, 8, 1024),\n 'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256),\n 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256),\n 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)}\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = dict(\n (key, value.shape) for key, value in out_feature_maps.items())\n self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)\n\n def test_get_expected_variable_names_with_inception_v2(self, use_keras):\n image_features = {\n 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),\n 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),\n 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)\n }\n feature_map_generator = self._build_feature_map_generator(\n feature_map_layout=INCEPTION_V2_LAYOUT,\n use_keras=use_keras\n )\n feature_maps = feature_map_generator(image_features)\n\n expected_slim_variables = set([\n 'Mixed_5c_1_Conv2d_3_1x1_256/weights',\n 'Mixed_5c_1_Conv2d_3_1x1_256/biases',\n 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',\n 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',\n 'Mixed_5c_1_Conv2d_4_1x1_128/weights',\n 'Mixed_5c_1_Conv2d_4_1x1_128/biases',\n 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',\n 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',\n 'Mixed_5c_1_Conv2d_5_1x1_128/weights',\n 'Mixed_5c_1_Conv2d_5_1x1_128/biases',\n 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',\n 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',\n ])\n\n expected_keras_variables = set([\n 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',\n 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',\n 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',\n 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',\n 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',\n 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',\n 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',\n 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',\n 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',\n 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',\n 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',\n 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',\n ])\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n sess.run(feature_maps)\n actual_variable_set = set(\n [var.op.name for var in tf.trainable_variables()])\n if use_keras:\n self.assertSetEqual(expected_keras_variables, actual_variable_set)\n else:\n self.assertSetEqual(expected_slim_variables, actual_variable_set)\n\n # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10\n\n\nclass FPNFeatureMapGeneratorTest(tf.test.TestCase):\n\n def test_get_expected_feature_map_shapes(self):\n image_features = [\n ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),\n ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),\n ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),\n ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))\n ]\n feature_maps = feature_map_generators.fpn_top_down_feature_maps(\n image_features=image_features, depth=128)\n\n expected_feature_map_shapes = {\n 'top_down_block2': (4, 8, 8, 128),\n 'top_down_block3': (4, 4, 4, 128),\n 'top_down_block4': (4, 2, 2, 128),\n 'top_down_block5': (4, 1, 1, 128)\n }\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = {key: value.shape\n for key, value in out_feature_maps.items()}\n self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)\n\n def test_get_expected_feature_map_shapes_with_depthwise(self):\n image_features = [\n ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),\n ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),\n ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),\n ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))\n ]\n feature_maps = feature_map_generators.fpn_top_down_feature_maps(\n image_features=image_features, depth=128, use_depthwise=True)\n\n expected_feature_map_shapes = {\n 'top_down_block2': (4, 8, 8, 128),\n 'top_down_block3': (4, 4, 4, 128),\n 'top_down_block4': (4, 2, 2, 128),\n 'top_down_block5': (4, 1, 1, 128)\n }\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = {key: value.shape\n for key, value in out_feature_maps.items()}\n self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)\n\n\nclass GetDepthFunctionTest(tf.test.TestCase):\n\n def test_return_min_depth_when_multiplier_is_small(self):\n depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,\n min_depth=16)\n self.assertEqual(depth_fn(16), 16)\n\n def test_return_correct_depth_with_multiplier(self):\n depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,\n min_depth=16)\n self.assertEqual(depth_fn(64), 32)\n\n\[email protected](\n {'replace_pool_with_conv': False},\n {'replace_pool_with_conv': True},\n)\nclass PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase):\n\n def test_get_expected_feature_map_shapes(self, replace_pool_with_conv):\n image_features = {\n 'image_features': tf.random_uniform([4, 19, 19, 1024])\n }\n feature_maps = feature_map_generators.pooling_pyramid_feature_maps(\n base_feature_map_depth=1024,\n num_layers=6,\n image_features=image_features,\n replace_pool_with_conv=replace_pool_with_conv)\n\n expected_pool_feature_map_shapes = {\n 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),\n 'MaxPool2d_0_2x2': (4, 10, 10, 1024),\n 'MaxPool2d_1_2x2': (4, 5, 5, 1024),\n 'MaxPool2d_2_2x2': (4, 3, 3, 1024),\n 'MaxPool2d_3_2x2': (4, 2, 2, 1024),\n 'MaxPool2d_4_2x2': (4, 1, 1, 1024),\n }\n\n expected_conv_feature_map_shapes = {\n 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),\n 'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024),\n 'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024),\n 'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024),\n 'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024),\n 'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024),\n }\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n out_feature_maps = sess.run(feature_maps)\n out_feature_map_shapes = {key: value.shape\n for key, value in out_feature_maps.items()}\n if replace_pool_with_conv:\n self.assertDictEqual(expected_conv_feature_map_shapes,\n out_feature_map_shapes)\n else:\n self.assertDictEqual(expected_pool_feature_map_shapes,\n out_feature_map_shapes)\n\n def test_get_expected_variable_names(self, replace_pool_with_conv):\n image_features = {\n 'image_features': tf.random_uniform([4, 19, 19, 1024])\n }\n feature_maps = feature_map_generators.pooling_pyramid_feature_maps(\n base_feature_map_depth=1024,\n num_layers=6,\n image_features=image_features,\n replace_pool_with_conv=replace_pool_with_conv)\n\n expected_pool_variables = set([\n 'Base_Conv2d_1x1_1024/weights',\n 'Base_Conv2d_1x1_1024/biases',\n ])\n\n expected_conv_variables = set([\n 'Base_Conv2d_1x1_1024/weights',\n 'Base_Conv2d_1x1_1024/biases',\n 'Conv2d_0_3x3_s2_1024/weights',\n 'Conv2d_0_3x3_s2_1024/biases',\n 'Conv2d_1_3x3_s2_1024/weights',\n 'Conv2d_1_3x3_s2_1024/biases',\n 'Conv2d_2_3x3_s2_1024/weights',\n 'Conv2d_2_3x3_s2_1024/biases',\n 'Conv2d_3_3x3_s2_1024/weights',\n 'Conv2d_3_3x3_s2_1024/biases',\n 'Conv2d_4_3x3_s2_1024/weights',\n 'Conv2d_4_3x3_s2_1024/biases',\n ])\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n sess.run(feature_maps)\n actual_variable_set = set(\n [var.op.name for var in tf.trainable_variables()])\n if replace_pool_with_conv:\n self.assertSetEqual(expected_conv_variables, actual_variable_set)\n else:\n self.assertSetEqual(expected_pool_variables, actual_variable_set)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.Graph", "tensorflow.local_variables_initializer", "tensorflow.zeros", "tensorflow.test.main", "tensorflow.global_variables_initializer", "tensorflow.logging.set_verbosity", "numpy.array" ], [ "tensorflow.trainable_variables", "tensorflow.global_variables_initializer", "tensorflow.random_uniform", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
End-of-an-Era/PCN
[ "043c3063014166d831c07197d4e6748e824a5587" ]
[ "PCN/PyPCN.py" ]
[ "#!/usr/bin/python3\nfrom ctypes import *\nimport cv2\nimport numpy as np\nimport sys\nimport os\nimport time\nfrom ipdb import set_trace as dbg\nfrom enum import IntEnum\n\nclass CPoint(Structure):\n _fields_ = [(\"x\", c_int),\n (\"y\", c_int)]\n\nFEAT_POINTS = 14\nclass CWindow(Structure):\n _fields_ = [(\"x\", c_int),\n (\"y\", c_int),\n (\"width\", c_int),\n (\"angle\", c_int),\n (\"score\", c_float),\n (\"points\",CPoint*FEAT_POINTS)]\n\nclass FeatEnam(IntEnum):\n CHIN_0 = 0\n CHIN_1 = 1\n CHIN_2 = 2\n CHIN_3 = 3\n CHIN_4 = 4\n CHIN_5 = 5\n CHIN_6 = 6\n CHIN_7 = 7\n CHIN_8 = 8\n NOSE = 9\n EYE_LEFT = 10\n EYE_RIGHT = 11\n MOUTH_LEFT = 12\n MOUTH_RIGHT = 13\n FEAT_POINTS = 14\n\nlib = CDLL(\"/usr/local/lib/libPCN.so\")\n\ninit_detector = lib.init_detector\n#void *init_detector(const char *detection_model_path, \n# const char *pcn1_proto, const char *pcn2_proto, const char *pcn3_proto, \n# const char *tracking_model_path, const char *tracking_proto,\n# int min_face_size, float pyramid_scale_factor, float detection_thresh_stage1,\n# float detection_thresh_stage2, float detection_thresh_stage3, int tracking_period,\n# float tracking_thresh, int do_smooth)\ninit_detector.argtypes = [\n c_char_p, c_char_p, c_char_p, \n c_char_p, c_char_p, c_char_p,\n c_int,c_float,c_float,c_float,\n c_float,c_int,c_float,c_int]\ninit_detector.restype = c_void_p\n\n#CWindow* detect_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)\ndetect_faces = lib.detect_faces\ndetect_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]\ndetect_faces.restype = POINTER(CWindow)\n\n#CWindow* detect_track_faces(void* pcn, unsigned char* raw_img,size_t rows, size_t cols, int *lwin)\ndetect_track_faces = lib.detect_track_faces\ndetect_track_faces.argtypes = [c_void_p, POINTER(c_ubyte),c_size_t,c_size_t,POINTER(c_int)]\ndetect_track_faces.restype = POINTER(CWindow)\n\n#void free_faces(CWindow* wins)\nfree_faces = lib.free_faces\nfree_faces.argtypes= [c_void_p]\n\n# void free_detector(void *pcn)\nfree_detector = lib.free_detector\nfree_detector.argtypes= [c_void_p]\n\nCYAN=(255,255,0)\nBLUE=(255,0,0)\nRED=(0,0,255)\nGREEN=(0,255,0)\nYELLOW=(0,255,255)\n\ndef DrawFace(win,img):\n width = 2\n x1 = win.x\n y1 = win.y\n x2 = win.width + win.x - 1\n y2 = win.width + win.y - 1\n centerX = (x1 + x2) / 2\n centerY = (y1 + y2) / 2\n angle = win.angle\n R = cv2.getRotationMatrix2D((centerX,centerY),angle,1)\n pts = np.array([[x1,y1,1],[x1,y2,1],[x2,y2,1],[x2,y1,1]], np.int32)\n pts = (pts @ R.T).astype(int) #Rotate points\n pts = pts.reshape((-1,1,2))\n cv2.polylines(img,[pts],True,CYAN,width)\n cv2.line(img, (pts[0][0][0],pts[0][0][1]), (pts[3][0][0],pts[3][0][1]), BLUE, width)\n \ndef DrawPoints(win,img):\n width = 2\n f = FeatEnam.NOSE\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,GREEN,-1)\n f = FeatEnam.EYE_LEFT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)\n f = FeatEnam.EYE_RIGHT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,YELLOW,-1)\n f = FeatEnam.MOUTH_LEFT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)\n f = FeatEnam.MOUTH_RIGHT\n cv2.circle(img,(win.points[f].x,win.points[f].y),width,RED,-1)\n for i in range(8):\n cv2.circle(img,(win.points[i].x,win.points[i].y),width,BLUE,-1)\n\ndef SetThreadCount(threads):\n os.environ['OMP_NUM_THREADS'] = str(threads)\n\ndef c_str(str_in):\n return c_char_p(str_in.encode('utf-8'))\n\nvideo_flag = 0\n\nif __name__==\"__main__\":\n\n SetThreadCount(1)\n path = '/usr/local/share/pcn/'\n detection_model_path = c_str(path + \"PCN.caffemodel\")\n pcn1_proto = c_str(path + \"PCN-1.prototxt\")\n pcn2_proto = c_str(path + \"PCN-2.prototxt\")\n pcn3_proto = c_str(path + \"PCN-3.prototxt\")\n tracking_model_path = c_str(path + \"PCN-Tracking.caffemodel\")\n tracking_proto = c_str(path + \"PCN-Tracking.prototxt\")\n if video_flag:\n cap = cv2.VideoCapture(0)\n detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,\n\t\t \ttracking_model_path,tracking_proto, \n\t\t \t40,1.45,0.5,0.5,0.98,30,0.9,1)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) \n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) \n fps = cap.get(cv2.CAP_PROP_FPS) \n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n break\n start = time.time()\n face_count = c_int(0)\n raw_data = frame.ctypes.data_as(POINTER(c_ubyte))\n windows = detect_track_faces(detector, raw_data, \n int(height), int(width),\n pointer(face_count))\n end = time.time()\n for i in range(face_count.value):\n DrawFace(windows[i],frame)\n DrawPoints(windows[i],frame)\n free_faces(windows)\n fps = int(1 / (end - start))\n cv2.putText(frame, str(fps) + \"fps\", (20, 45), 4, 1, (0, 0, 125))\n cv2.imshow('PCN', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n detector = init_detector(detection_model_path,pcn1_proto,pcn2_proto,pcn3_proto,\n\t\t \ttracking_model_path,tracking_proto, \n\t\t \t40,1.45,0.5,0.5,0.98,30,0.9,0)\n for i in range(1, 27):\n frame = cv2.imread(\"imgs/\" + str(i) + \".jpg\")\n start = time.time()\n face_count = c_int(0)\n raw_data = frame.ctypes.data_as(POINTER(c_ubyte))\n windows = detect_faces(detector, raw_data, \n frame.shape[0], frame.shape[1],\n pointer(face_count))\n end = time.time()\n print(i, end - start, \"s\")\n for i in range(face_count.value):\n DrawFace(windows[i],frame)\n DrawPoints(windows[i],frame)\n free_faces(windows)\n cv2.imshow('PCN', frame)\n cv2.waitKey()\n\n free_detector(detector)\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]