repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
PlatterDataset/feature | [
"2ebdc1b28498b709a0c91e60c19bfc731006bc50"
] | [
"synchronization/SyncNetInstance.py"
] | [
"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n# Video 25 FPS, Audio 16000HZ\n\nimport torch\nimport numpy\nimport time, pdb, argparse, subprocess, os, math, glob\nimport cv2\nimport python_speech_features\n\nfrom scipy import signal\nfrom scipy.io import wavfile\nfrom SyncNetModel import *\nfrom shutil import rmtree\n\n\n# ==================== Get OFFSET ====================\ndef get_median(data1):\n data = sorted(data1)\n size = len(data)\n if size % 2 == 0: # 判断列表长度为偶数\n median = (data[size//2]+data[size//2-1])/2\n data[0] = median\n if size % 2 == 1: # 判断列表长度为奇数\n median = data[(size-1)//2]\n data[0] = median\n return data[0]\n\n\ndef calc_pdist(feat1, feat2, vshift=40):\n \n win_size = vshift*2+1\n\n feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))\n\n dists = []\n\n for i in range(0,len(feat1)):\n\n dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))\n\n return dists\n\n# ==================== MAIN DEF ====================\n\nclass SyncNetInstance(torch.nn.Module):\n\n def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):\n super(SyncNetInstance, self).__init__();\n\n self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();\n\n def evaluate(self, opt, videofile, num):\n\n self.__S__.eval();\n\n # ========== ==========\n # Convert files\n # ========== ==========\n\n if os.path.exists(os.path.join(opt.tmp_dir,opt.reference)):\n rmtree(os.path.join(opt.tmp_dir,opt.reference))\n\n os.makedirs(os.path.join(opt.tmp_dir,opt.reference))\n\n command = (\"ffmpeg -y -i %s -threads 1 -f image2 %s\" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'%06d.jpg'))) \n output = subprocess.call(command, shell=True, stdout=None)\n\n command = (\"ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s\" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))) \n output = subprocess.call(command, shell=True, stdout=None)\n \n # ========== ==========\n # Load video \n # ========== ==========\n\n images = []\n \n flist = glob.glob(os.path.join(opt.tmp_dir,opt.reference,'*.jpg'))\n flist.sort()\n\n for fname in flist:\n images.append(cv2.imread(fname))\n\n im = numpy.stack(images,axis=3)\n im = numpy.expand_dims(im,axis=0)\n im = numpy.transpose(im,(0,3,4,1,2))\n\n imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())\n\n # ========== ==========\n # Load audio\n # ========== ==========\n\n sample_rate, audio = wavfile.read(os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))\n mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))\n mfcc = numpy.stack([numpy.array(i) for i in mfcc])\n torch.save(mfcc,'./mfcc_saver/mfcc'+str(num)+'.pt')\n ww = open('./mfcc_saver/mfcc'+str(num)+'.txt','w')\n ww.write(str(mfcc))\n cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)\n cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())\n\n # ========== ==========\n # Check audio and video input length\n # ========== ==========\n\n if (float(len(audio))/16000) != (float(len(images))/25) :\n print(\"WARNING: Audio (%.4fs) and video (%.4fs) lengths are different.\"%(float(len(audio))/16000,float(len(images))/25))\n\n min_length = min(len(images),math.floor(len(audio)/640))\n \n # ========== ==========\n # Generate video and audio feats\n # ========== ==========\n\n lastframe = min_length-5\n im_feat = []\n cc_feat = []\n wr = open('./'+str(opt.reference)+'_'+str(num)+'_resultoff.txt','w')\n tS = time.time()\n for i in range(0,lastframe,opt.batch_size):\n \n im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]\n im_in = torch.cat(im_batch,0)\n im_out = self.__S__.forward_lip(im_in.cuda());\n im_feat.append(im_out.data.cpu())\n\n cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]\n cc_in = torch.cat(cc_batch,0)\n cc_out = self.__S__.forward_aud(cc_in.cuda())\n cc_feat.append(cc_out.data.cpu())\n\n im_feat = torch.cat(im_feat,0)\n cc_feat = torch.cat(cc_feat,0)\n\n # ========== ==========\n # Compute offset\n # ========== ==========\n \n print('Compute time %.3f sec.' % (time.time()-tS))\n\n dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)\n mdist = torch.mean(torch.stack(dists,1),1)\n off = []\n avg_dist = []\n\n for t in range(0,len(im_feat)):\n tt = 10000\n offy = 0\n of = 0\n of_m = 0\n dis_mid = 0\n dis_min = 1000000000\n for k in range(0,len(dists[t])):\n if t == 0:\n avg_dist.append(dists[t][k])\n else:\n avg_dist[k] += dists[t][k]\n\n if (t+1)% 100 == 0 or t == len(im_feat)-1:\n if avg_dist[k] < dis_min:\n dis_min = avg_dist[k]\n of = k\n\n if dists[t][k]<tt:\n tt = dists[t][k]\n offy = k\n if (t+1)%100 == 0 or t == len(im_feat) -1:\n dis_mid = get_median(avg_dist)\n for k in range(len(avg_dist)):\n avg_dist[k] = 0\n wr.write(str(t%100)+' ')\n wr.write(str((opt.vshift-of) * 0.04)+'s ')\n if (t+1)%100 != 0:\n wr.write(\"conf = \"+str((dis_mid.item()-dis_min.item())/((t+1)%100))+'\\n')#confidence改成medium\n else:\n wr.write(\"conf = \"+str((dis_mid.item()-dis_min.item())/100)+'\\n')\n off.append(opt.vshift-offy)\n off = numpy.array(off)\n\n minval, minidx = torch.min(mdist,0)\n\n offset = opt.vshift-minidx\n conf = torch.median(mdist) - minval\n\n fdist = numpy.stack([dist[minidx].numpy() for dist in dists])\n # fdist = numpy.pad(fdist, (3,3), 'constant', constant_values=15)\n fconf = torch.median(mdist).numpy() - fdist\n fconfm = signal.medfilt(fconf,kernel_size=9)\n \n numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n print('Framewise conf: ')\n print(fconfm)\n print('AV offset: \\t%d \\nMin dist: \\t%.3f\\nConfidence: \\t%.3f' % (offset,minval,conf))\n\n dists_npy = numpy.array([ dist.numpy() for dist in dists ])\n return off, conf.numpy(), dists_npy\n\n def extract_feature(self, opt, videofile):\n\n self.__S__.eval();\n \n # ========== ==========\n # Load video \n # ========== ==========\n cap = cv2.VideoCapture(videofile)\n\n frame_num = 1;\n images = []\n while frame_num:\n frame_num += 1\n ret, image = cap.read()\n if ret == 0:\n break\n\n images.append(image)\n\n im = numpy.stack(images,axis=3)\n im = numpy.expand_dims(im,axis=0)\n im = numpy.transpose(im,(0,3,4,1,2))\n\n imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())\n \n # ========== ==========\n # Generate video feats\n # ========== ==========\n\n lastframe = len(images)-4\n im_feat = []\n\n tS = time.time()\n for i in range(0,lastframe,opt.batch_size):\n \n im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]\n im_in = torch.cat(im_batch,0)\n im_out = self.__S__.forward_lipfeat(im_in.cuda());\n im_feat.append(im_out.data.cpu())\n\n im_feat = torch.cat(im_feat,0)\n\n # ========== ==========\n # Compute offset\n # ========== ==========\n \n print('Compute time %.3f sec.' % (time.time()-tS))\n\n return im_feat\n\n\n def loadParameters(self, path):\n loaded_state = torch.load(path, map_location=lambda storage, loc: storage);\n\n self_state = self.__S__.state_dict();\n\n for name, param in loaded_state.items():\n\n self_state[name].copy_(param);\n"
] | [
[
"numpy.expand_dims",
"torch.cat",
"scipy.signal.medfilt",
"torch.load",
"torch.min",
"numpy.set_printoptions",
"torch.median",
"numpy.stack",
"numpy.transpose",
"torch.stack",
"numpy.array",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
googleinterns/deepspeech-reconstruction | [
"72f28d1e9064d221b3421c302a8725a8c71859ee"
] | [
"src/deepspeech_training/util/config.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport tensorflow.compat.v1 as tfv1\n\nfrom attrdict import AttrDict\nfrom xdg import BaseDirectory as xdg\n\nfrom src.flags import FLAGS\nfrom .gpu import get_available_gpus\nfrom .logging import log_error\nfrom .text import Alphabet, UTF8Alphabet\nfrom .helpers import parse_file_size\n\nclass ConfigSingleton:\n _config = None\n\n def __getattr__(self, name):\n if not ConfigSingleton._config:\n raise RuntimeError(\"Global configuration not yet initialized.\")\n if not hasattr(ConfigSingleton._config, name):\n raise RuntimeError(\"Configuration option {} not found in config.\".format(name))\n return ConfigSingleton._config[name]\n\n\nConfig = ConfigSingleton() # pylint: disable=invalid-name\n\ndef initialize_globals():\n c = AttrDict()\n\n # Read-buffer\n FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)\n\n # Set default dropout rates\n if FLAGS.dropout_rate2 < 0:\n FLAGS.dropout_rate2 = FLAGS.dropout_rate\n if FLAGS.dropout_rate3 < 0:\n FLAGS.dropout_rate3 = FLAGS.dropout_rate\n if FLAGS.dropout_rate6 < 0:\n FLAGS.dropout_rate6 = FLAGS.dropout_rate\n\n # Set default checkpoint dir\n if not FLAGS.checkpoint_dir:\n FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))\n\n if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:\n FLAGS.load_train = 'auto'\n\n if FLAGS.load_evaluate not in ['last', 'best', 'auto']:\n FLAGS.load_evaluate = 'auto'\n\n # Set default summary dir\n if not FLAGS.summary_dir:\n FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))\n\n # Standard session configuration that'll be used for all new sessions.\n c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))\n\n # CPU device\n c.cpu_device = '/cpu:0'\n\n # Available GPU devices\n c.available_devices = get_available_gpus(c.session_config)\n\n # If there is no GPU available, we fall back to CPU based operation\n if not c.available_devices:\n c.available_devices = [c.cpu_device]\n\n if FLAGS.utf8:\n c.alphabet = UTF8Alphabet()\n else:\n c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))\n\n # Geometric Constants\n # ===================\n\n # For an explanation of the meaning of the geometric constants, please refer to\n # doc/Geometry.md\n\n # Number of MFCC features\n c.n_input = 26 # TODO: Determine this programmatically from the sample rate\n\n # The number of frames in the context\n c.n_context = 9 # TODO: Determine the optimal value using a validation data set\n\n # Number of units in hidden layers\n c.n_hidden = FLAGS.n_hidden\n\n c.n_hidden_1 = c.n_hidden\n\n c.n_hidden_2 = c.n_hidden\n\n c.n_hidden_5 = c.n_hidden\n\n # LSTM cell state dimension\n c.n_cell_dim = c.n_hidden\n\n # The number of units in the third layer, which feeds in to the LSTM\n c.n_hidden_3 = c.n_cell_dim\n\n # Units in the sixth layer = number of characters in the target language plus one\n c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label\n\n # Size of audio window in samples\n if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:\n log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '\n 'by --audio_sample_rate value ({}) must be an integer value. Adjust '\n 'your --feature_win_len value or resample your audio accordingly.'\n ''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))\n sys.exit(1)\n\n c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)\n\n # Stride for feature computations in samples\n if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:\n log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '\n 'by --audio_sample_rate value ({}) must be an integer value. Adjust '\n 'your --feature_win_step value or resample your audio accordingly.'\n ''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))\n sys.exit(1)\n\n c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)\n\n if FLAGS.one_shot_infer:\n if not os.path.exists(FLAGS.one_shot_infer):\n log_error('Path specified in --one_shot_infer is not a valid file.')\n sys.exit(1)\n\n if FLAGS.train_cudnn and FLAGS.load_cudnn:\n log_error('Trying to use --train_cudnn, but --load_cudnn '\n 'was also specified. The --load_cudnn flag is only '\n 'needed when converting a CuDNN RNN checkpoint to '\n 'a CPU-capable graph. If your system is capable of '\n 'using CuDNN RNN, you can just specify the CuDNN RNN '\n 'checkpoint normally with --save_checkpoint_dir.')\n sys.exit(1)\n\n # If separate save and load flags were not specified, default to load and save\n # from the same dir.\n if not FLAGS.save_checkpoint_dir:\n FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir\n\n if not FLAGS.load_checkpoint_dir:\n FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir\n\n ConfigSingleton._config = c # pylint: disable=protected-access\n"
] | [
[
"tensorflow.compat.v1.GPUOptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jasonkks/mlcnet | [
"8f89c860c709733c8baa663607004fc48d76291d",
"8f89c860c709733c8baa663607004fc48d76291d",
"8f89c860c709733c8baa663607004fc48d76291d"
] | [
"pcdet/datasets/augmentor/data_augmentor.py",
"pcdet/datasets/nuscenes/nuscenes_utils.py",
"pcdet/datasets/processor/data_processor.py"
] | [
"from functools import partial\nimport torch\nimport random\nimport numpy as np\nfrom ...ops.roiaware_pool3d import roiaware_pool3d_utils\nfrom ...utils import common_utils, box_utils\nfrom . import augmentor_utils, database_sampler\n\n\nclass DataAugmentor(object):\n def __init__(self, root_path, augmentor_configs, class_names, logger=None):\n self.root_path = root_path\n self.class_names = class_names\n self.logger = logger\n\n self.data_augmentor_queue = []\n aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \\\n else augmentor_configs.AUG_CONFIG_LIST\n\n for cur_cfg in aug_config_list:\n if not isinstance(augmentor_configs, list):\n if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:\n continue\n cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)\n self.data_augmentor_queue.append(cur_augmentor)\n\n def gt_sampling(self, config=None):\n db_sampler = database_sampler.DataBaseSampler(\n root_path=self.root_path,\n sampler_cfg=config,\n class_names=self.class_names,\n logger=self.logger\n )\n return db_sampler\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['logger']\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def object_size_normalization(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.object_size_normalization, config=config)\n \n gt_boxes, points = data_dict['gt_boxes'], data_dict['points']\n if gt_boxes.shape[1] > 7:\n gt_boxes = gt_boxes[:,:7]\n offset = np.array(config['OFFSET'])\n # get masks of points inside boxes\n point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(\n torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()\n\n num_obj = gt_boxes.shape[0]\n obj_points_list = []\n gt_boxes_size = gt_boxes[:, 3:6]\n new_gt_boxes_size = gt_boxes_size + offset\n scale_factor = new_gt_boxes_size / gt_boxes_size\n # scale the objects\n for i in range(num_obj):\n point_mask = point_masks[i]\n obj_points = points[point_mask > 0] # get object points within the gt box\n obj_points[:, :3] -= gt_boxes[i, :3] # relative to box center\n obj_points[:, :3] *= scale_factor[i] # scale\n obj_points[:, :3] += gt_boxes[i, :3] # back to global coordinate\n obj_points_list.append(obj_points)\n\n # remove points inside boxes\n points = box_utils.remove_points_in_boxes3d(points, gt_boxes)\n # scale the boxes\n gt_boxes[:, 3:6] *= scale_factor\n # remove points inside boxes\n points = box_utils.remove_points_in_boxes3d(points, gt_boxes)\n\n # merge points\n # points = box_utils.remove_points_in_boxes3d(points, gt_boxes)\n obj_points = np.concatenate(obj_points_list, axis=0)\n points = np.concatenate([points, obj_points], axis=0)\n\n data_dict['points'] = points\n data_dict['gt_boxes'][:,:7] = gt_boxes\n return data_dict\n \n def random_world_flip(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_flip, config=config)\n \n gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None\n points = data_dict['points']\n\n for cur_axis in config['ALONG_AXIS_LIST']:\n assert cur_axis in ['x', 'y']\n if 'gt_boxes' in data_dict:\n gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(\n gt_boxes, points, return_enable=True\n )\n else:\n points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(\n points, return_enable=True\n )\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'] = gt_boxes\n data_dict['points'] = points\n data_dict['world_flip_enabled'] = world_flip_enabled\n return data_dict\n\n def random_world_rotation(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_rotation, config=config)\n rot_range = config['WORLD_ROT_ANGLE']\n if not isinstance(rot_range, list):\n rot_range = [-rot_range, rot_range]\n\n if 'gt_boxes' in data_dict:\n gt_boxes, points, world_rotation = augmentor_utils.global_rotation(\n data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True\n )\n else:\n points, world_rotation = augmentor_utils.global_rotation_points(\n data_dict['points'], rot_range=rot_range, return_rotation=True\n )\n\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'] = gt_boxes\n data_dict['points'] = points\n data_dict['world_rotation'] = world_rotation\n return data_dict\n\n def random_world_scaling(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_scaling, config=config)\n if 'gt_boxes' in data_dict:\n gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(\n data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']\n )\n else:\n points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])\n \n data_dict['world_scaling'] = scale_ratio\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'] = gt_boxes\n data_dict['points'] = points\n return data_dict\n\n def random_world_scaling_xyz(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_scaling_xyz, config=config)\n gt_boxes = data_dict['gt_boxes']\n points = data_dict['points']\n scale_range = config['SCALE_RANGE']\n noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)\n points[:, :3] *= noise_scale\n gt_boxes[:, :3] *= noise_scale\n gt_boxes[:, 3:6] *= noise_scale\n data_dict['points'] = points\n data_dict['gt_boxes'] = gt_boxes\n data_dict['world_scaling_xyz'] = noise_scale\n return data_dict\n\n def jitter_point_cloud(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.jitter_point_cloud, config=config)\n sigma = config['SIGMA']\n clip = config['CLIP']\n assert(clip > 0)\n points = data_dict['points']\n jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)\n points += jittered_data\n data_dict['points'] = points\n data_dict['jittered'] = True\n data_dict['jitter_values'] = jittered_data\n return data_dict\n\n def random_world_shift(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_shift, config=config)\n shift_range = config['RANGE']\n shifts = np.random.uniform(-shift_range, shift_range, 3)\n data_dict['points'] += shifts\n data_dict['world_shifts'] = shifts\n return data_dict\n\n def forward(self, data_dict, augment=True):\n \"\"\"\n Args:\n data_dict:\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]\n gt_names: optional, (N), string\n ...\n\n Returns:\n \"\"\"\n if augment:\n for cur_augmentor in self.data_augmentor_queue:\n data_dict = cur_augmentor(data_dict=data_dict)\n\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'][:, 6] = common_utils.limit_period(\n data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi\n )\n if 'road_plane' in data_dict:\n data_dict.pop('road_plane')\n if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:\n gt_boxes_mask = data_dict['gt_boxes_mask']\n data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]\n data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]\n data_dict.pop('gt_boxes_mask')\n return data_dict\n",
"\"\"\"\nThe NuScenes data pre-processing and evaluation is modified from\nhttps://github.com/traveller59/second.pytorch and https://github.com/poodarchu/Det3D\n\"\"\"\n\nimport operator\nfrom functools import reduce\nfrom pathlib import Path\nimport pickle\nimport numpy as np\nimport tqdm\nfrom nuscenes.utils.data_classes import Box\nfrom nuscenes.utils.geometry_utils import transform_matrix\nfrom pyquaternion import Quaternion\n\nmap_name_from_general_to_detection = {\n 'human.pedestrian.adult': 'pedestrian',\n 'human.pedestrian.child': 'pedestrian',\n 'human.pedestrian.wheelchair': 'ignore',\n 'human.pedestrian.stroller': 'ignore',\n 'human.pedestrian.personal_mobility': 'ignore',\n 'human.pedestrian.police_officer': 'pedestrian',\n 'human.pedestrian.construction_worker': 'pedestrian',\n 'animal': 'ignore',\n 'vehicle.car': 'car',\n 'vehicle.motorcycle': 'motorcycle',\n 'vehicle.bicycle': 'bicycle',\n 'vehicle.bus.bendy': 'bus',\n 'vehicle.bus.rigid': 'bus',\n 'vehicle.truck': 'truck',\n 'vehicle.construction': 'construction_vehicle',\n 'vehicle.emergency.ambulance': 'ignore',\n 'vehicle.emergency.police': 'ignore',\n 'vehicle.trailer': 'trailer',\n 'movable_object.barrier': 'barrier',\n 'movable_object.trafficcone': 'traffic_cone',\n 'movable_object.pushable_pullable': 'ignore',\n 'movable_object.debris': 'ignore',\n 'static_object.bicycle_rack': 'ignore',\n}\n\n\ncls_attr_dist = {\n 'barrier': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 0,\n 'vehicle.parked': 0,\n 'vehicle.stopped': 0,\n },\n 'bicycle': {\n 'cycle.with_rider': 2791,\n 'cycle.without_rider': 8946,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 0,\n 'vehicle.parked': 0,\n 'vehicle.stopped': 0,\n },\n 'bus': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 9092,\n 'vehicle.parked': 3294,\n 'vehicle.stopped': 3881,\n },\n 'car': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 114304,\n 'vehicle.parked': 330133,\n 'vehicle.stopped': 46898,\n },\n 'construction_vehicle': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 882,\n 'vehicle.parked': 11549,\n 'vehicle.stopped': 2102,\n },\n 'ignore': {\n 'cycle.with_rider': 307,\n 'cycle.without_rider': 73,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 165,\n 'vehicle.parked': 400,\n 'vehicle.stopped': 102,\n },\n 'motorcycle': {\n 'cycle.with_rider': 4233,\n 'cycle.without_rider': 8326,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 0,\n 'vehicle.parked': 0,\n 'vehicle.stopped': 0,\n },\n 'pedestrian': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 157444,\n 'pedestrian.sitting_lying_down': 13939,\n 'pedestrian.standing': 46530,\n 'vehicle.moving': 0,\n 'vehicle.parked': 0,\n 'vehicle.stopped': 0,\n },\n 'traffic_cone': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 0,\n 'vehicle.parked': 0,\n 'vehicle.stopped': 0,\n },\n 'trailer': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 3421,\n 'vehicle.parked': 19224,\n 'vehicle.stopped': 1895,\n },\n 'truck': {\n 'cycle.with_rider': 0,\n 'cycle.without_rider': 0,\n 'pedestrian.moving': 0,\n 'pedestrian.sitting_lying_down': 0,\n 'pedestrian.standing': 0,\n 'vehicle.moving': 21339,\n 'vehicle.parked': 55626,\n 'vehicle.stopped': 11097,\n },\n}\n\n\ndef get_available_scenes(nusc):\n available_scenes = []\n print('total scene num:', len(nusc.scene))\n for scene in nusc.scene:\n scene_token = scene['token']\n scene_rec = nusc.get('scene', scene_token)\n sample_rec = nusc.get('sample', scene_rec['first_sample_token'])\n sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])\n has_more_frames = True\n scene_not_exist = False\n while has_more_frames:\n lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token'])\n if not Path(lidar_path).exists():\n scene_not_exist = True\n break\n else:\n break\n # if not sd_rec['next'] == '':\n # sd_rec = nusc.get('sample_data', sd_rec['next'])\n # else:\n # has_more_frames = False\n if scene_not_exist:\n continue\n available_scenes.append(scene)\n print('exist scene num:', len(available_scenes))\n return available_scenes\n\n\ndef get_sample_data(nusc, sample_data_token, selected_anntokens=None):\n \"\"\"\n Returns the data path as well as all annotations related to that sample_data.\n Note that the boxes are transformed into the current sensor's coordinate frame.\n Args:\n nusc:\n sample_data_token: Sample_data token.\n selected_anntokens: If provided only return the selected annotation.\n\n Returns:\n\n \"\"\"\n # Retrieve sensor & pose records\n sd_record = nusc.get('sample_data', sample_data_token)\n cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])\n sensor_record = nusc.get('sensor', cs_record['sensor_token'])\n pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])\n\n data_path = nusc.get_sample_data_path(sample_data_token)\n\n if sensor_record['modality'] == 'camera':\n cam_intrinsic = np.array(cs_record['camera_intrinsic'])\n imsize = (sd_record['width'], sd_record['height'])\n else:\n cam_intrinsic = imsize = None\n\n # Retrieve all sample annotations and map to sensor coordinate system.\n if selected_anntokens is not None:\n boxes = list(map(nusc.get_box, selected_anntokens))\n else:\n boxes = nusc.get_boxes(sample_data_token)\n\n # Make list of Box objects including coord system transforms.\n box_list = []\n for box in boxes:\n box.velocity = nusc.box_velocity(box.token)\n # Move box to ego vehicle coord system\n box.translate(-np.array(pose_record['translation']))\n box.rotate(Quaternion(pose_record['rotation']).inverse)\n\n # Move box to sensor coord system\n box.translate(-np.array(cs_record['translation']))\n box.rotate(Quaternion(cs_record['rotation']).inverse)\n\n box_list.append(box)\n\n return data_path, box_list, cam_intrinsic\n\n\ndef quaternion_yaw(q: Quaternion) -> float:\n \"\"\"\n Calculate the yaw angle from a quaternion.\n Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.\n It does not work for a box in the camera frame.\n :param q: Quaternion of interest.\n :return: Yaw angle in radians.\n \"\"\"\n\n # Project into xy plane.\n v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))\n\n # Measure yaw using arctan.\n yaw = np.arctan2(v[1], v[0])\n\n return yaw\n\n\ndef fill_trainval_infos(data_path, nusc, train_scenes, val_scenes, test=False, max_sweeps=10):\n train_nusc_infos = []\n val_nusc_infos = []\n progress_bar = tqdm.tqdm(total=len(nusc.sample), desc='create_info', dynamic_ncols=True)\n\n ref_chan = 'LIDAR_TOP' # The radar channel from which we track back n sweeps to aggregate the point cloud.\n chan = 'LIDAR_TOP' # The reference channel of the current sample_rec that the point clouds are mapped to.\n\n annotation_tokens = []\n gt_box_coordinates = []\n\n for index, sample in enumerate(nusc.sample):\n progress_bar.update()\n\n ref_sd_token = sample['data'][ref_chan]\n ref_sd_rec = nusc.get('sample_data', ref_sd_token)\n ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])\n ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])\n ref_time = 1e-6 * ref_sd_rec['timestamp']\n\n ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)\n\n ref_cam_front_token = sample['data']['CAM_FRONT']\n ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)\n\n # Homogeneous transform from ego car frame to reference frame\n ref_from_car = transform_matrix(\n ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True\n )\n\n # Homogeneous transformation matrix from global to _current_ ego car frame\n car_from_global = transform_matrix(\n ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']), inverse=True,\n )\n\n info = {\n 'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),\n 'cam_front_path': Path(ref_cam_path).relative_to(data_path).__str__(),\n 'cam_intrinsic': ref_cam_intrinsic,\n 'token': sample['token'],\n 'sweeps': [],\n 'ref_from_car': ref_from_car,\n 'car_from_global': car_from_global,\n 'timestamp': ref_time,\n }\n\n sample_data_token = sample['data'][chan]\n curr_sd_rec = nusc.get('sample_data', sample_data_token)\n sweeps = []\n while len(sweeps) < max_sweeps - 1:\n if curr_sd_rec['prev'] == '':\n if len(sweeps) == 0:\n sweep = {\n 'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),\n 'sample_data_token': curr_sd_rec['token'],\n 'transform_matrix': None,\n 'time_lag': curr_sd_rec['timestamp'] * 0,\n }\n sweeps.append(sweep)\n else:\n sweeps.append(sweeps[-1])\n else:\n curr_sd_rec = nusc.get('sample_data', curr_sd_rec['prev'])\n\n # Get past pose\n current_pose_rec = nusc.get('ego_pose', curr_sd_rec['ego_pose_token'])\n global_from_car = transform_matrix(\n current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False,\n )\n\n # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.\n current_cs_rec = nusc.get(\n 'calibrated_sensor', curr_sd_rec['calibrated_sensor_token']\n )\n car_from_current = transform_matrix(\n current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False,\n )\n\n tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])\n\n lidar_path = nusc.get_sample_data_path(curr_sd_rec['token'])\n\n time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp']\n\n sweep = {\n 'lidar_path': Path(lidar_path).relative_to(data_path).__str__(),\n 'sample_data_token': curr_sd_rec['token'],\n 'transform_matrix': tm,\n 'global_from_car': global_from_car,\n 'car_from_current': car_from_current,\n 'time_lag': time_lag,\n }\n sweeps.append(sweep)\n\n info['sweeps'] = sweeps\n\n assert len(info['sweeps']) == max_sweeps - 1, \\\n f\"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, \" \\\n f\"you should duplicate to sweep num {max_sweeps - 1}\"\n\n if not test:\n annotations = [nusc.get('sample_annotation', token) for token in sample['anns']]\n\n # the filtering gives 0.5~1 map improvement\n num_lidar_pts = np.array([anno['num_lidar_pts'] for anno in annotations])\n num_radar_pts = np.array([anno['num_radar_pts'] for anno in annotations])\n mask = (num_lidar_pts + num_radar_pts > 0)\n\n locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)\n dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] # wlh == > dxdydz (lwh)\n velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)\n rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(-1, 1)\n names = np.array([b.name for b in ref_boxes])\n tokens = np.array([b.token for b in ref_boxes])\n gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1)\n\n assert len(annotations) == len(gt_boxes) == len(velocity)\n\n info['gt_boxes'] = gt_boxes[mask, :]\n info['gt_boxes_velocity'] = velocity[mask, :]\n info['gt_names'] = np.array([map_name_from_general_to_detection[name] for name in names])[mask]\n info['gt_boxes_token'] = tokens[mask]\n info['num_lidar_pts'] = num_lidar_pts[mask]\n info['num_radar_pts'] = num_radar_pts[mask]\n\n if sample['scene_token'] in train_scenes:\n train_nusc_infos.append(info)\n else:\n val_nusc_infos.append(info)\n\n progress_bar.close()\n return train_nusc_infos, val_nusc_infos\n\n\ndef boxes_lidar_to_nusenes(det_info):\n boxes3d = det_info['boxes_lidar']\n scores = det_info['score']\n labels = det_info['pred_labels']\n\n box_list = []\n for k in range(boxes3d.shape[0]):\n quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6])\n velocity = (*boxes3d[k, 7:9], 0.0) if boxes3d.shape[1] == 9 else (0.0, 0.0, 0.0)\n box = Box(\n boxes3d[k, :3],\n boxes3d[k, [4, 3, 5]], # wlh\n quat, label=labels[k], score=scores[k], velocity=velocity,\n )\n box_list.append(box)\n return box_list\n\n\ndef lidar_nusc_box_to_global(nusc, boxes, sample_token):\n s_record = nusc.get('sample', sample_token)\n sample_data_token = s_record['data']['LIDAR_TOP']\n\n sd_record = nusc.get('sample_data', sample_data_token)\n cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])\n sensor_record = nusc.get('sensor', cs_record['sensor_token'])\n pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])\n\n data_path = nusc.get_sample_data_path(sample_data_token)\n box_list = []\n for box in boxes:\n # Move box to ego vehicle coord system\n box.rotate(Quaternion(cs_record['rotation']))\n box.translate(np.array(cs_record['translation']))\n # Move box to global coord system\n box.rotate(Quaternion(pose_record['rotation']))\n box.translate(np.array(pose_record['translation']))\n box_list.append(box)\n return box_list\n\n\ndef transform_det_annos_to_nusc_annos(det_annos, nusc):\n nusc_annos = {\n 'results': {},\n 'meta': None,\n }\n\n for det in det_annos:\n annos = []\n box_list = boxes_lidar_to_nusenes(det)\n box_list = lidar_nusc_box_to_global(\n nusc=nusc, boxes=box_list, sample_token=det['metadata']['token']\n )\n\n for k, box in enumerate(box_list):\n name = det['name'][k]\n if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2:\n if name in ['car', 'construction_vehicle', 'bus', 'truck', 'trailer']:\n attr = 'vehicle.moving'\n elif name in ['bicycle', 'motorcycle']:\n attr = 'cycle.with_rider'\n else:\n attr = None\n else:\n if name in ['pedestrian']:\n attr = 'pedestrian.standing'\n elif name in ['bus']:\n attr = 'vehicle.stopped'\n else:\n attr = None\n attr = attr if attr is not None else max(\n cls_attr_dist[name].items(), key=operator.itemgetter(1))[0]\n nusc_anno = {\n 'sample_token': det['metadata']['token'],\n 'translation': box.center.tolist(),\n 'size': box.wlh.tolist(),\n 'rotation': box.orientation.elements.tolist(),\n 'velocity': box.velocity[:2].tolist(),\n 'detection_name': name,\n 'detection_score': box.score,\n 'attribute_name': attr\n }\n annos.append(nusc_anno)\n \n nusc_annos['results'].update({det[\"metadata\"][\"token\"]: annos})\n\n return nusc_annos\n\n\ndef format_nuscene_results(metrics, class_names, version='default'):\n result = '----------------Nuscene %s results-----------------\\n' % version\n for name in class_names:\n threshs = ', '.join(list(metrics['label_aps'][name].keys()))\n ap_list = list(metrics['label_aps'][name].values())\n\n err_name =', '.join([x.split('_')[0] for x in list(metrics['label_tp_errors'][name].keys())])\n error_list = list(metrics['label_tp_errors'][name].values())\n\n result += f'***{name} error@{err_name} | AP@{threshs}\\n'\n result += ', '.join(['%.2f' % x for x in error_list]) + ' | '\n result += ', '.join(['%.2f' % (x * 100) for x in ap_list])\n result += f\" | mean AP: {metrics['mean_dist_aps'][name]}\"\n result += '\\n'\n\n result += '--------------average performance-------------\\n'\n details = {}\n for key, val in metrics['tp_errors'].items():\n result += '%s:\\t %.4f\\n' % (key, val)\n details[key] = val\n\n result += 'mAP:\\t %.4f\\n' % metrics['mean_ap']\n result += 'NDS:\\t %.4f\\n' % metrics['nd_score']\n\n details.update({\n 'mAP': metrics['mean_ap'],\n 'NDS': metrics['nd_score'],\n })\n\n return result, details\n",
"from functools import partial\n\nimport numpy as np\n\nfrom ...utils import box_utils, common_utils\n\n\nclass DataProcessor(object):\n def __init__(self, processor_configs, point_cloud_range, training):\n self.point_cloud_range = point_cloud_range\n self.training = training\n self.mode = 'train' if training else 'test'\n self.grid_size = self.voxel_size = None\n self.data_processor_queue = []\n for cur_cfg in processor_configs:\n cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg)\n self.data_processor_queue.append(cur_processor)\n\n def mask_points_and_boxes_outside_range(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.mask_points_and_boxes_outside_range, config=config)\n mask = common_utils.mask_points_by_range(data_dict['points'], self.point_cloud_range)\n data_dict['points'] = data_dict['points'][mask]\n # if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:\n if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES:\n mask = box_utils.mask_boxes_outside_range_numpy(\n data_dict['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)\n )\n data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]\n\n if 'gt_names' in data_dict:\n data_dict['gt_names'] = data_dict['gt_names'][mask]\n return data_dict\n\n def filter_points_and_boxes_by_kitti_fov(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.filter_points_and_boxes_by_kitti_fov, config=config)\n\n tan_threshold = float(config.TAN_THRESHOLD)\n points = data_dict['points']\n if config.get('ROTATE90', False):\n mask = (np.abs(points[:,1]) / np.maximum(np.abs(points[:,0]), 1e-3)) > tan_threshold\n else:\n mask = (np.abs(points[:,0]) / np.maximum(np.abs(points[:,1]), 1e-3)) > tan_threshold\n data_dict['points'] = data_dict['points'][mask]\n\n if 'gt_boxes' in data_dict:\n boxes = data_dict['gt_boxes']\n if boxes.shape[0] > 0:\n if boxes.shape[1] > 7:\n boxes = boxes[:, 0:7]\n corners = box_utils.boxes_to_corners_3d(boxes) # (N, 8, 3)\n if config.get('ROTATE90', False):\n corners_tan = np.abs(corners[...,1]) / np.maximum(np.abs(corners[...,0]), 1e-3)\n else:\n corners_tan = np.abs(corners[...,0]) / np.maximum(np.abs(corners[...,1]), 1e-3)\n mask = corners_tan > tan_threshold\n mask = mask.sum(axis=1) >= config.get('min_num_corners', 1) # (N)\n data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]\n\n if 'gt_names' in data_dict:\n data_dict['gt_names'] = data_dict['gt_names'][mask]\n \n return data_dict\n\n def shuffle_points(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.shuffle_points, config=config)\n\n if config.SHUFFLE_ENABLED[self.mode]:\n points = data_dict['points']\n shuffle_idx = np.random.permutation(points.shape[0])\n points = points[shuffle_idx]\n data_dict['points'] = points\n\n return data_dict\n\n def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None):\n if data_dict is None:\n try:\n from spconv.utils import VoxelGeneratorV2 as VoxelGenerator\n except:\n from spconv.utils import VoxelGenerator\n\n voxel_generator = VoxelGenerator(\n voxel_size=config.VOXEL_SIZE,\n point_cloud_range=self.point_cloud_range,\n max_num_points=config.MAX_POINTS_PER_VOXEL,\n max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]\n )\n grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)\n self.grid_size = np.round(grid_size).astype(np.int64)\n self.voxel_size = config.VOXEL_SIZE\n return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator)\n\n points = data_dict['points']\n voxel_output = voxel_generator.generate(points)\n if isinstance(voxel_output, dict):\n voxels, coordinates, num_points = \\\n voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']\n else:\n voxels, coordinates, num_points = voxel_output\n\n if not data_dict['use_lead_xyz']:\n voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)\n\n data_dict['voxels'] = voxels\n data_dict['voxel_coords'] = coordinates\n data_dict['voxel_num_points'] = num_points\n return data_dict\n\n def sample_points_by_voxel(self, data_dict=None, config=None, voxel_generator=None):\n if data_dict is None:\n try:\n from spconv.utils import VoxelGeneratorV2 as VoxelGenerator\n except:\n from spconv.utils import VoxelGenerator\n\n voxel_generator = VoxelGenerator(\n voxel_size=config.VOXEL_SIZE,\n point_cloud_range=self.point_cloud_range,\n max_num_points=config.MAX_POINTS_PER_VOXEL,\n max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]\n )\n grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)\n self.grid_size = np.round(grid_size).astype(np.int64)\n self.voxel_size = config.VOXEL_SIZE\n return partial(self.sample_points_by_voxel, voxel_generator=voxel_generator)\n\n points = data_dict['points']\n voxel_output = voxel_generator.generate(points)\n if isinstance(voxel_output, dict):\n voxels, coordinates, num_points = \\\n voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']\n else:\n voxels, coordinates, num_points = voxel_output\n\n if not data_dict['use_lead_xyz']:\n voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)\n\n data_dict['voxels'] = voxels\n # print('points/ voxels', points.shape[0], voxels.shape[0])\n data_dict['points'] = voxels[:,0,:] # pick the first point in each voxel\n return data_dict\n\n\n def sample_points(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.sample_points, config=config)\n\n num_points = config.NUM_POINTS[self.mode]\n if num_points == -1:\n return data_dict\n\n points = data_dict['points']\n if num_points < len(points):\n pts_depth = np.linalg.norm(points[:, 0:3], axis=1)\n pts_near_flag = pts_depth < 40.0\n far_idxs_choice = np.where(pts_near_flag == 0)[0]\n\n # print('near/far', np.sum(pts_near_flag), len(points)-np.sum(pts_near_flag))\n\n # if len(far_idxs_choice) > num_points//2 :\n # far_idxs_choice = np.random.choice(far_idxs_choice, num_points//2)\n \n max_far_points = int(num_points * config.get('MAX_FAR_POINTS_RATIO', 0.25))\n if len(far_idxs_choice) > max_far_points:\n far_idxs_choice = np.random.choice(far_idxs_choice, max_far_points)\n \n near_idxs = np.where(pts_near_flag == 1)[0]\n\n if len(near_idxs) + len(far_idxs_choice) > num_points:\n near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)\n\n choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \\\n if len(far_idxs_choice) > 0 else near_idxs_choice\n else:\n choice = np.arange(0, len(points), dtype=np.int32)\n choice = np.random.choice(choice, num_points)\n \n np.random.shuffle(choice)\n else:\n choice = np.arange(0, len(points), dtype=np.int32)\n \n # if too few points, make copys of current points\n repeat = num_points // len(points)\n if repeat > 1:\n choice = np.concatenate([choice]*repeat, axis=0)\n\n if num_points > len(points):\n # extra_choice = np.random.choice(choice, num_points - len(points), replace=False)\n extra_choice = np.random.choice(choice, num_points - choice.shape[0], replace=False)\n choice = np.concatenate((choice, extra_choice), axis=0)\n np.random.shuffle(choice)\n data_dict['points'] = points[choice]\n return data_dict\n\n # original \n # def sample_points(self, data_dict=None, config=None):\n # if data_dict is None:\n # return partial(self.sample_points, config=config)\n\n # num_points = config.NUM_POINTS[self.mode]\n # if num_points == -1:\n # return data_dict\n\n # points = data_dict['points']\n # if num_points < len(points):\n # pts_depth = np.linalg.norm(points[:, 0:3], axis=1)\n # pts_near_flag = pts_depth < 40.0\n # far_idxs_choice = np.where(pts_near_flag == 0)[0]\n # near_idxs = np.where(pts_near_flag == 1)[0]\n # near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)\n # choice = []\n # if num_points > len(far_idxs_choice):\n # near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)\n # choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \\\n # if len(far_idxs_choice) > 0 else near_idxs_choice\n # else: \n # choice = np.arange(0, len(points), dtype=np.int32)\n # choice = np.random.choice(choice, num_points, replace=False)\n # np.random.shuffle(choice)\n # else:\n # choice = np.arange(0, len(points), dtype=np.int32)\n # if num_points > len(points):\n # extra_choice = np.random.choice(choice, num_points - len(points), replace=False)\n # choice = np.concatenate((choice, extra_choice), axis=0)\n # np.random.shuffle(choice)\n # data_dict['points'] = points[choice]\n # return data_dict\n\n def forward(self, data_dict):\n \"\"\"\n Args:\n data_dict:\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n gt_names: optional, (N), string\n ...\n\n Returns:\n \"\"\"\n\n for cur_processor in self.data_processor_queue:\n data_dict = cur_processor(data_dict=data_dict)\n\n return data_dict\n"
] | [
[
"torch.from_numpy",
"numpy.concatenate",
"numpy.random.randn",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.arctan2",
"numpy.sqrt",
"numpy.array",
"numpy.concatenate"
],
[
"numpy.abs",
"numpy.random.choice",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.round",
"numpy.random.permutation",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
reetikaag/human-activity-recognition | [
"1e6760a88ca52fe9a8a8ca60d000cd3426851156"
] | [
"Efficient-3DCNNs/thop/count_hooks.py"
] | [
"import argparse\n\nimport torch\nimport torch.nn as nn\n\nmultiply_adds = 1\n\n\ndef count_conv2d(m, x, y):\n\t# TODO: add support for pad and dilation\n\tx = x[0]\n\n\tcin = m.in_channels\n\tcout = m.out_channels\n\tkh, kw = m.kernel_size\n\tbatch_size = x.size()[0]\n\n\tout_w = y.size(2)\n\tout_h = y.size(3)\n\n\t# ops per output element\n\t# kernel_mul = kh * kw * cin\n\t# kernel_add = kh * kw * cin - 1\n\tkernel_ops = multiply_adds * kh * kw * cin // m.groups\n\tbias_ops = 1 if m.bias is not None else 0\n\tops_per_element = kernel_ops + bias_ops\n\n\t# total ops\n\t# num_out_elements = y.numel()\n\toutput_elements = batch_size * out_w * out_h * cout\n\ttotal_ops = output_elements * ops_per_element\n\n\t# in case same conv is used multiple times\n\tm.total_ops += torch.Tensor([int(total_ops)])\n\n\ndef count_conv3d(m, x, y):\n\t# TODO: add support for pad and dilation\n\tx = x[0]\n\n\tcin = m.in_channels\n\tcout = m.out_channels\n\tkd, kh, kw = m.kernel_size\n\tbatch_size = x.size()[0]\n \n\tout_d = y.size(2)\n\tout_w = y.size(3)\n\tout_h = y.size(4)\n\n\t# ops per output element\n\t# kernel_mul = kh * kw * cin\n\t# kernel_add = kh * kw * cin - 1\n\tkernel_ops = multiply_adds * kd * kh * kw * cin // m.groups\n\tbias_ops = 1 if m.bias is not None else 0\n\tops_per_element = kernel_ops + bias_ops\n\n\t# total ops\n\t# num_out_elements = y.numel()\n\toutput_elements = batch_size * out_d * out_w * out_h * cout\n\ttotal_ops = output_elements * ops_per_element\n\n\t# in case same conv is used multiple times\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_bn2d(m, x, y):\n\tx = x[0]\n\n\tnelements = x.numel()\n\ttotal_sub = nelements\n\ttotal_div = nelements\n\ttotal_ops = total_sub + total_div\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_relu(m, x, y):\n\tx = x[0]\n\n\tnelements = x.numel()\n\ttotal_ops = nelements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_softmax(m, x, y):\n\tx = x[0]\n\n\tbatch_size, nfeatures = x.size()\n\n\ttotal_exp = nfeatures\n\ttotal_add = nfeatures - 1\n\ttotal_div = nfeatures\n\ttotal_ops = batch_size * (total_exp + total_add + total_div)\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_maxpool(m, x, y):\n\tkernel_ops = torch.prod(torch.Tensor([m.kernel_size])) - 1\n\tnum_elements = y.numel()\n\ttotal_ops = kernel_ops * num_elements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_avgpool(m, x, y):\n\ttotal_add = torch.prod(torch.Tensor([m.kernel_size])) - 1\n\ttotal_div = 1\n\tkernel_ops = total_add + total_div\n\tnum_elements = y.numel()\n\ttotal_ops = kernel_ops * num_elements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_linear(m, x, y):\n\t# per output element\n\ttotal_mul = m.in_features\n\ttotal_add = m.in_features - 1\n\tnum_elements = y.numel()\n\ttotal_ops = (total_mul + total_add) * num_elements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n"
] | [
[
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haorang/285 | [
"3b7369b8eb4433952c9cdf27d4feaa015a6c40e4"
] | [
"stable_baselines3/dqn/dqn.py"
] | [
"from typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport numpy as np\nimport torch as th\nfrom torch.nn import functional as F\n\nfrom stable_baselines3.common import logger\nfrom stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm\nfrom stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule\nfrom stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update\nfrom stable_baselines3.dqn.policies import DQNPolicy\n\n\nclass DQN(OffPolicyAlgorithm):\n \"\"\"\n Deep Q-Network (DQN)\n\n Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236\n Default hyperparameters are taken from the nature paper,\n except for the optimizer and learning rate that were taken from Stable Baselines defaults.\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1) default 1 for hard update\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.\n :param gradient_steps: How many gradient steps to do after each rollout\n (see ``train_freq`` and ``n_episodes_rollout``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.\n Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param target_update_interval: update the target network every ``target_update_interval``\n environment steps.\n :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced\n :param exploration_initial_eps: initial value of random action probability\n :param exploration_final_eps: final value of random action probability\n :param max_grad_norm: The maximum value for the gradient clipping\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[DQNPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 1e-4,\n buffer_size: int = 1000000,\n learning_starts: int = 50000,\n batch_size: Optional[int] = 32,\n tau: float = 1.0,\n gamma: float = 0.99,\n train_freq: int = 4,\n gradient_steps: int = 1,\n n_episodes_rollout: int = -1,\n optimize_memory_usage: bool = False,\n target_update_interval: int = 10000,\n exploration_fraction: float = 0.1,\n exploration_initial_eps: float = 1.0,\n exploration_final_eps: float = 0.05,\n max_grad_norm: float = 10,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(DQN, self).__init__(\n policy,\n env,\n DQNPolicy,\n learning_rate,\n buffer_size,\n learning_starts,\n batch_size,\n tau,\n gamma,\n train_freq,\n gradient_steps,\n n_episodes_rollout,\n action_noise=None, # No action noise\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n create_eval_env=create_eval_env,\n seed=seed,\n sde_support=False,\n optimize_memory_usage=optimize_memory_usage,\n )\n\n self.exploration_initial_eps = exploration_initial_eps\n self.exploration_final_eps = exploration_final_eps\n self.exploration_fraction = exploration_fraction\n self.target_update_interval = target_update_interval\n self.max_grad_norm = max_grad_norm\n # \"epsilon\" for the epsilon-greedy exploration\n self.exploration_rate = 0.0\n # Linear schedule will be defined in `_setup_model()`\n self.exploration_schedule = None\n self.q_net, self.q_net_target = None, None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super(DQN, self)._setup_model()\n self._create_aliases()\n self.exploration_schedule = get_linear_fn(\n self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction\n )\n\n def _create_aliases(self) -> None:\n self.q_net = self.policy.q_net\n self.q_net_target = self.policy.q_net_target\n\n def _on_step(self) -> None:\n \"\"\"\n Update the exploration rate and target network if needed.\n This method is called in ``collect_rollouts()`` after each step in the environment.\n \"\"\"\n if self.num_timesteps % self.target_update_interval == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n logger.record(\"rollout/exploration rate\", self.exploration_rate)\n\n def train(self, gradient_steps: int, batch_size: int = 100) -> None:\n # Update learning rate according to schedule\n self._update_learning_rate(self.policy.optimizer)\n\n losses = []\n for gradient_step in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)\n\n with th.no_grad():\n # Compute the target Q values\n target_q = self.q_net_target(replay_data.next_observations)\n # Follow greedy policy: use the one with the highest value\n target_q, _ = target_q.max(dim=1)\n # Avoid potential broadcast issue\n target_q = target_q.reshape(-1, 1)\n # 1-step TD target\n target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q\n\n # Get current Q estimates\n current_q = self.q_net(replay_data.observations)\n\n # Retrieve the q-values for the actions from the replay buffer\n current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())\n\n # Compute Huber loss (less sensitive to outliers)\n loss = F.smooth_l1_loss(current_q, target_q)\n losses.append(loss.item())\n\n # Optimize the policy\n self.policy.optimizer.zero_grad()\n loss.backward()\n # Clip gradient norm\n th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.policy.optimizer.step()\n\n # Increase update counter\n self._n_updates += gradient_steps\n\n logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n logger.record(\"train/loss\", np.mean(losses))\n\n def predict(\n self,\n observation: np.ndarray,\n state: Optional[np.ndarray] = None,\n mask: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n \"\"\"\n Overrides the base_class predict function to include epsilon-greedy exploration.\n\n :param observation: the input observation\n :param state: The last states (can be None, used in recurrent policies)\n :param mask: The last masks (can be None, used in recurrent policies)\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next state\n (used in recurrent policies)\n \"\"\"\n if not deterministic and np.random.rand() < self.exploration_rate:\n if is_vectorized_observation(observation, self.observation_space):\n n_batch = observation.shape[0]\n action = np.array([self.action_space.sample() for _ in range(n_batch)])\n else:\n action = np.array(self.action_space.sample())\n else:\n action, state = self.policy.predict(observation, state, mask, deterministic)\n return action, state\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"DQN\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n ) -> OffPolicyAlgorithm:\n\n return super(DQN, self).learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n eval_env=eval_env,\n eval_freq=eval_freq,\n n_eval_episodes=n_eval_episodes,\n tb_log_name=tb_log_name,\n eval_log_path=eval_log_path,\n reset_num_timesteps=reset_num_timesteps,\n )\n\n def _excluded_save_params(self) -> List[str]:\n return super(DQN, self)._excluded_save_params() + [\"q_net\", \"q_net_target\"]\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"policy.optimizer\"]\n\n return state_dicts, []\n"
] | [
[
"torch.no_grad",
"numpy.mean",
"numpy.random.rand",
"torch.nn.functional.smooth_l1_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
b1quint/astropy | [
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d",
"a170a74739e4356c169429a42e554f9777b53f4d"
] | [
"astropy/stats/lombscargle/implementations/tests/test_mle.py",
"astropy/stats/lombscargle/implementations/fast_impl.py",
"astropy/io/fits/hdu/table.py",
"astropy/visualization/wcsaxes/tests/test_frame.py",
"astropy/io/fits/scripts/fitsheader.py",
"astropy/visualization/wcsaxes/utils.py",
"astropy/modeling/tabular.py",
"astropy/visualization/tests/test_norm.py",
"astropy/utils/tests/test_misc.py"
] | [
"import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit\n\n\[email protected]\ndef t():\n rand = np.random.RandomState(42)\n return 10 * rand.rand(10)\n\n\[email protected]('freq', [1.0, 2])\[email protected]('dy', [None, 2.0])\[email protected]('bias', [True, False])\ndef test_design_matrix(t, freq, dy, bias):\n X = design_matrix(t, freq, dy, bias=bias)\n assert X.shape == (t.shape[0], 2 + bool(bias))\n if bias:\n assert_allclose(X[:, 0], 1. / (dy or 1.0))\n assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))\n assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))\n\n\[email protected]('nterms', range(4))\ndef test_multiterm_design_matrix(t, nterms):\n dy = 2.0\n freq = 1.5\n X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)\n assert X.shape == (t.shape[0], 1 + 2 * nterms)\n assert_allclose(X[:, 0], 1. / dy)\n for i in range(1, nterms + 1):\n assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)\n assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)\n\n\[email protected]('nterms', range(1, 4))\[email protected]('freq', [1, 2])\[email protected]('fit_mean', [True, False])\ndef test_exact_mle_fit(nterms, freq, fit_mean):\n rand = np.random.RandomState(42)\n t = 10 * rand.rand(30)\n theta = -1 + rand.rand(2 * nterms + 1)\n y = np.zeros(t.shape)\n if fit_mean:\n y = theta[0] * np.ones(t.shape)\n for i in range(1, nterms + 1):\n y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)\n y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)\n\n y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,\n center_data=False, fit_mean=fit_mean)\n assert_allclose(y, y_fit)\n",
"\nimport numpy as np\nfrom .utils import trig_sum\n\n\ndef lombscargle_fast(t, y, dy, f0, df, Nf,\n center_data=True, fit_mean=True,\n normalization='standard',\n use_fft=True, trig_sum_kwds=None):\n \"\"\"Fast Lomb-Scargle Periodogram\n\n This implements the Press & Rybicki method [1]_ for fast O[N log(N)]\n Lomb-Scargle periodograms.\n\n Parameters\n ----------\n t, y, dy : array_like (NOT astropy.Quantities)\n times, values, and errors of the data points. These should be\n broadcastable to the same shape.\n f0, df, Nf : (float, float, int)\n parameters describing the frequency grid, f = f0 + df * arange(Nf).\n center_data : bool (default=True)\n Specify whether to subtract the mean of the data before the fit\n fit_mean : bool (default=True)\n If True, then compute the floating-mean periodogram; i.e. let the mean\n vary with the fit.\n normalization : string (optional, default='standard')\n Normalization to use for the periodogram.\n Options are 'standard', 'model', 'log', or 'psd'.\n use_fft : bool (default=True)\n If True, then use the Press & Rybicki O[NlogN] algorithm to compute\n the result. Otherwise, use a slower O[N^2] algorithm\n trig_sum_kwds : dict or None (optional)\n extra keyword arguments to pass to the ``trig_sum`` utility.\n Options are ``oversampling`` and ``Mfft``. See documentation\n of ``trig_sum`` for details.\n\n Returns\n -------\n power : ndarray\n Lomb-Scargle power associated with each frequency.\n Units of the result depend on the normalization.\n\n Notes\n -----\n Note that the ``use_fft=True`` algorithm is an approximation to the true\n Lomb-Scargle periodogram, and as the number of points grows this\n approximation improves. On the other hand, for very small datasets\n (<~50 points or so) this approximation may not be useful.\n\n References\n ----------\n .. [1] Press W.H. and Rybicki, G.B, \"Fast algorithm for spectral analysis\n of unevenly sampled data\". ApJ 1:338, p277, 1989\n .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)\n .. [3] W. Press et al, Numerical Recipes in C (2002)\n \"\"\"\n if dy is None:\n dy = 1\n\n # Validate and setup input data\n t, y, dy = np.broadcast_arrays(t, y, dy)\n if t.ndim != 1:\n raise ValueError(\"t, y, dy should be one dimensional\")\n\n # Validate and setup frequency grid\n if f0 < 0:\n raise ValueError(\"Frequencies must be positive\")\n if df <= 0:\n raise ValueError(\"Frequency steps must be positive\")\n if Nf <= 0:\n raise ValueError(\"Number of frequencies must be positive\")\n\n w = dy ** -2.0\n w /= w.sum()\n\n # Center the data. Even if we're fitting the offset,\n # this step makes the expressions below more succinct\n if center_data or fit_mean:\n y = y - np.dot(w, y)\n\n # set up arguments to trig_sum\n kwargs = dict.copy(trig_sum_kwds or {})\n kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)\n\n # ----------------------------------------------------------------------\n # 1. compute functions of the time-shift tau at each frequency\n Sh, Ch = trig_sum(t, w * y, **kwargs)\n S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)\n\n if fit_mean:\n S, C = trig_sum(t, w, **kwargs)\n tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))\n else:\n tan_2omega_tau = S2 / C2\n\n # This is what we're computing below; the straightforward way is slower\n # and less stable, so we use trig identities instead\n #\n # omega_tau = 0.5 * np.arctan(tan_2omega_tau)\n # S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)\n # Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)\n\n S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)\n C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)\n Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)\n Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)\n\n # ----------------------------------------------------------------------\n # 2. Compute the periodogram, following Zechmeister & Kurster\n # and using tricks from Press & Rybicki.\n YY = np.dot(w, y ** 2)\n YC = Ch * Cw + Sh * Sw\n YS = Sh * Cw - Ch * Sw\n CC = 0.5 * (1 + C2 * C2w + S2 * S2w)\n SS = 0.5 * (1 - C2 * C2w - S2 * S2w)\n\n if fit_mean:\n CC -= (C * Cw + S * Sw) ** 2\n SS -= (S * Cw - C * Sw) ** 2\n\n power = (YC * YC / CC + YS * YS / SS)\n\n if normalization == 'standard':\n power /= YY\n elif normalization == 'model':\n power /= YY - power\n elif normalization == 'log':\n power = -np.log(1 - power / YY)\n elif normalization == 'psd':\n power *= 0.5 * (dy ** -2.0).sum()\n else:\n raise ValueError(\"normalization='{0}' \"\n \"not recognized\".format(normalization))\n\n return power\n",
"# Licensed under a 3-clause BSD style license - see PYFITS.rst\n\n\nimport contextlib\nimport csv\nimport operator\nimport os\nimport re\nimport sys\nimport textwrap\nimport warnings\nfrom contextlib import suppress\n\nimport numpy as np\nfrom numpy import char as chararray\n\nfrom .base import DELAYED, _ValidHDU, ExtensionHDU\n# This module may have many dependencies on astropy.io.fits.column, but\n# astropy.io.fits.column has fewer dependencies overall, so it's easier to\n# keep table/column-related utilities in astropy.io.fits.column\nfrom astropy.io.fits.column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,\n ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,\n _AsciiColDefs, _FormatP, _FormatQ, _makep,\n _parse_tformat, _scalar_to_format, _convert_format,\n _cmp_recformats)\nfrom astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields\nfrom astropy.io.fits.header import Header, _pad_length\nfrom astropy.io.fits.util import _is_int, _str_to_num\n\nfrom astropy.utils import lazyproperty\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\nfrom astropy.utils.decorators import deprecated_renamed_argument\n\n\nclass FITSTableDumpDialect(csv.excel):\n \"\"\"\n A CSV dialect for the Astropy format of ASCII dumps of FITS tables.\n \"\"\"\n\n delimiter = ' '\n lineterminator = '\\n'\n quotechar = '\"'\n quoting = csv.QUOTE_ALL\n skipinitialspace = True\n\n\nclass _TableLikeHDU(_ValidHDU):\n \"\"\"\n A class for HDUs that have table-like data. This is used for both\n Binary/ASCII tables as well as Random Access Group HDUs (which are\n otherwise too dissimilar for tables to use _TableBaseHDU directly).\n \"\"\"\n\n _data_type = FITS_rec\n _columns_type = ColDefs\n\n # TODO: Temporary flag representing whether uints are enabled; remove this\n # after restructuring to support uints by default on a per-column basis\n _uint = False\n\n @classmethod\n def match_header(cls, header):\n \"\"\"\n This is an abstract HDU type for HDUs that contain table-like data.\n This is even more abstract than _TableBaseHDU which is specifically for\n the standard ASCII and Binary Table types.\n \"\"\"\n\n raise NotImplementedError\n\n @classmethod\n def from_columns(cls, columns, header=None, nrows=0, fill=False,\n character_as_bytes=False, **kwargs):\n \"\"\"\n Given either a `ColDefs` object, a sequence of `Column` objects,\n or another table HDU or table data (a `FITS_rec` or multi-field\n `numpy.ndarray` or `numpy.recarray` object, return a new table HDU of\n the class this method was called on using the column definition from\n the input.\n\n See also `FITS_rec.from_columns`.\n\n Parameters\n ----------\n columns : sequence of `Column`, `ColDefs`, or other\n The columns from which to create the table data, or an object with\n a column-like structure from which a `ColDefs` can be instantiated.\n This includes an existing `BinTableHDU` or `TableHDU`, or a\n `numpy.recarray` to give some examples.\n\n If these columns have data arrays attached that data may be used in\n initializing the new table. Otherwise the input columns will be\n used as a template for a new table with the requested number of\n rows.\n\n header : `Header`\n An optional `Header` object to instantiate the new HDU yet. Header\n keywords specifically related to defining the table structure (such\n as the \"TXXXn\" keywords like TTYPEn) will be overridden by the\n supplied column definitions, but all other informational and data\n model-specific keywords are kept.\n\n nrows : int\n Number of rows in the new table. If the input columns have data\n associated with them, the size of the largest input column is used.\n Otherwise the default is 0.\n\n fill : bool\n If `True`, will fill all cells with zeros or blanks. If `False`,\n copy the data from input, undefined cells will still be filled with\n zeros/blanks.\n\n character_as_bytes : bool\n Whether to return bytes for string columns when accessed from the\n HDU. By default this is `False` and (unicode) strings are returned,\n but for large tables this may use up a lot of memory.\n\n Notes\n -----\n\n Any additional keyword arguments accepted by the HDU class's\n ``__init__`` may also be passed in as keyword arguments.\n \"\"\"\n\n coldefs = cls._columns_type(columns)\n data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill,\n character_as_bytes=character_as_bytes)\n hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs)\n coldefs._add_listener(hdu)\n return hdu\n\n @lazyproperty\n def columns(self):\n \"\"\"\n The :class:`ColDefs` objects describing the columns in this table.\n \"\"\"\n\n # The base class doesn't make any assumptions about where the column\n # definitions come from, so just return an empty ColDefs\n return ColDefs([])\n\n @property\n def _nrows(self):\n \"\"\"\n Table-like HDUs must provide an attribute that specifies the number of\n rows in the HDU's table.\n\n For now this is an internal-only attribute.\n \"\"\"\n\n raise NotImplementedError\n\n def _get_tbdata(self):\n \"\"\"Get the table data from an input HDU object.\"\"\"\n\n columns = self.columns\n\n # TODO: Details related to variable length arrays need to be dealt with\n # specifically in the BinTableHDU class, since they're a detail\n # specific to FITS binary tables\n if (any(type(r) in (_FormatP, _FormatQ)\n for r in columns._recformats) and\n self._data_size is not None and\n self._data_size > self._theap):\n # We have a heap; include it in the raw_data\n raw_data = self._get_raw_data(self._data_size, np.uint8,\n self._data_offset)\n data = raw_data[:self._theap].view(dtype=columns.dtype,\n type=np.rec.recarray)\n else:\n raw_data = self._get_raw_data(self._nrows, columns.dtype,\n self._data_offset)\n if raw_data is None:\n # This can happen when a brand new table HDU is being created\n # and no data has been assigned to the columns, which case just\n # return an empty array\n raw_data = np.array([], dtype=columns.dtype)\n\n data = raw_data.view(np.rec.recarray)\n\n self._init_tbdata(data)\n data = data.view(self._data_type)\n columns._add_listener(data)\n return data\n\n def _init_tbdata(self, data):\n columns = self.columns\n\n data.dtype = data.dtype.newbyteorder('>')\n\n # hack to enable pseudo-uint support\n data._uint = self._uint\n\n # pass datLoc, for P format\n data._heapoffset = self._theap\n data._heapsize = self._header['PCOUNT']\n tbsize = self._header['NAXIS1'] * self._header['NAXIS2']\n data._gap = self._theap - tbsize\n\n # pass the attributes\n for idx, col in enumerate(columns):\n # get the data for each column object from the rec.recarray\n col.array = data.field(idx)\n\n # delete the _arrays attribute so that it is recreated to point to the\n # new data placed in the column object above\n del columns._arrays\n\n def _update_column_added(self, columns, column):\n \"\"\"\n Update the data upon addition of a new column through the `ColDefs`\n interface.\n \"\"\"\n\n # TODO: It's not clear that this actually works--it probably does not.\n # This is what the code used to do before introduction of the\n # notifier interface, but I don't believe it actually worked (there are\n # several bug reports related to this...)\n if self._data_loaded:\n del self.data\n\n def _update_column_removed(self, columns, col_idx):\n \"\"\"\n Update the data upon removal of a column through the `ColDefs`\n interface.\n \"\"\"\n\n # For now this doesn't do anything fancy--it just deletes the data\n # attribute so that it is forced to be recreated again. It doesn't\n # change anything on the existing data recarray (this is also how this\n # worked before introducing the notifier interface)\n if self._data_loaded:\n del self.data\n\n\nclass _TableBaseHDU(ExtensionHDU, _TableLikeHDU):\n \"\"\"\n FITS table extension base HDU class.\n\n Parameters\n ----------\n data : array\n Data to be used.\n header : `Header` instance\n Header to be used. If the ``data`` is also specified, header keywords\n specifically related to defining the table structure (such as the\n \"TXXXn\" keywords like TTYPEn) will be overridden by the supplied column\n definitions, but all other informational and data model-specific\n keywords are kept.\n name : str\n Name to be populated in ``EXTNAME`` keyword.\n uint : bool, optional\n Set to `True` if the table contains unsigned integer columns.\n ver : int > 0 or None, optional\n The ver of the HDU, will be the value of the keyword ``EXTVER``.\n If not given or None, it defaults to the value of the ``EXTVER``\n card of the ``header`` or 1.\n (default: None)\n character_as_bytes : bool\n Whether to return bytes for string columns. By default this is `False`\n and (unicode) strings are returned, but this does not respect memory\n mapping and loads the whole column in memory when accessed.\n \"\"\"\n\n _manages_own_heap = False\n \"\"\"\n This flag implies that when writing VLA tables (P/Q format) the heap\n pointers that go into P/Q table columns should not be reordered or\n rearranged in any way by the default heap management code.\n\n This is included primarily as an optimization for compressed image HDUs\n which perform their own heap maintenance.\n \"\"\"\n\n def __init__(self, data=None, header=None, name=None, uint=False, ver=None,\n character_as_bytes=False):\n\n super().__init__(data=data, header=header, name=name, ver=ver)\n\n if header is not None and not isinstance(header, Header):\n raise ValueError('header must be a Header object.')\n\n self._uint = uint\n self._character_as_bytes = character_as_bytes\n\n if data is DELAYED:\n # this should never happen\n if header is None:\n raise ValueError('No header to setup HDU.')\n\n # if the file is read the first time, no need to copy, and keep it\n # unchanged\n else:\n self._header = header\n else:\n # construct a list of cards of minimal header\n cards = [\n ('XTENSION', '', ''),\n ('BITPIX', 8, 'array data type'),\n ('NAXIS', 2, 'number of array dimensions'),\n ('NAXIS1', 0, 'length of dimension 1'),\n ('NAXIS2', 0, 'length of dimension 2'),\n ('PCOUNT', 0, 'number of group parameters'),\n ('GCOUNT', 1, 'number of groups'),\n ('TFIELDS', 0, 'number of table fields')]\n\n if header is not None:\n\n # Make a \"copy\" (not just a view) of the input header, since it\n # may get modified. the data is still a \"view\" (for now)\n hcopy = header.copy(strip=True)\n cards.extend(hcopy.cards)\n\n self._header = Header(cards)\n\n if isinstance(data, np.ndarray) and data.dtype.fields is not None:\n # self._data_type is FITS_rec.\n if isinstance(data, self._data_type):\n self.data = data\n else:\n self.data = self._data_type.from_columns(data)\n\n # TEMP: Special column keywords are normally overwritten by attributes\n # from Column objects. In Astropy 3.0, several new keywords are now\n # recognized as being special column keywords, but we don't\n # automatically clear them yet, as we need to raise a deprecation\n # warning for at least one major version.\n if header is not None:\n future_ignore = set()\n for keyword in self._header.keys():\n match = TDEF_RE.match(keyword)\n try:\n base_keyword = match.group('label')\n except Exception:\n continue # skip if there is no match\n if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:\n future_ignore.add(base_keyword)\n if future_ignore:\n keys = ', '.join(x + 'n' for x in sorted(future_ignore))\n warnings.warn(\"The following keywords are now recognized as special \"\n \"column-related attributes and should be set via the \"\n \"Column objects: {0}. In future, these values will be \"\n \"dropped from manually specified headers automatically \"\n \"and replaced with values generated based on the \"\n \"Column objects.\".format(keys), AstropyDeprecationWarning)\n\n # TODO: Too much of the code in this class uses header keywords\n # in making calculations related to the data size. This is\n # unreliable, however, in cases when users mess with the header\n # unintentionally--code that does this should be cleaned up.\n self._header['NAXIS1'] = self.data._raw_itemsize\n self._header['NAXIS2'] = self.data.shape[0]\n self._header['TFIELDS'] = len(self.data._coldefs)\n\n self.columns = self.data._coldefs\n self.update()\n\n with suppress(TypeError, AttributeError):\n # Make the ndarrays in the Column objects of the ColDefs\n # object of the HDU reference the same ndarray as the HDU's\n # FITS_rec object.\n for idx, col in enumerate(self.columns):\n col.array = self.data.field(idx)\n\n # Delete the _arrays attribute so that it is recreated to\n # point to the new data placed in the column objects above\n del self.columns._arrays\n elif data is None:\n pass\n else:\n raise TypeError('Table data has incorrect type.')\n\n if not (isinstance(self._header[0], str) and\n self._header[0].rstrip() == self._extension):\n self._header[0] = (self._extension, self._ext_comment)\n\n # Ensure that the correct EXTNAME is set on the new header if one was\n # created, or that it overrides the existing EXTNAME if different\n if name:\n self.name = name\n if ver is not None:\n self.ver = ver\n\n @classmethod\n def match_header(cls, header):\n \"\"\"\n This is an abstract type that implements the shared functionality of\n the ASCII and Binary Table HDU types, which should be used instead of\n this.\n \"\"\"\n\n raise NotImplementedError\n\n @lazyproperty\n def columns(self):\n \"\"\"\n The :class:`ColDefs` objects describing the columns in this table.\n \"\"\"\n\n if self._has_data and hasattr(self.data, '_coldefs'):\n return self.data._coldefs\n return self._columns_type(self)\n\n @lazyproperty\n def data(self):\n data = self._get_tbdata()\n data._coldefs = self.columns\n data._character_as_bytes = self._character_as_bytes\n # Columns should now just return a reference to the data._coldefs\n del self.columns\n return data\n\n @data.setter\n def data(self, data):\n if 'data' in self.__dict__:\n if self.__dict__['data'] is data:\n return\n else:\n self._data_replaced = True\n else:\n self._data_replaced = True\n\n self._modified = True\n\n if data is None and self.columns:\n # Create a new table with the same columns, but empty rows\n formats = ','.join(self.columns._recformats)\n data = np.rec.array(None, formats=formats,\n names=self.columns.names,\n shape=0)\n\n if isinstance(data, np.ndarray) and data.dtype.fields is not None:\n # Go ahead and always make a view, even if the data is already the\n # correct class (self._data_type) so we can update things like the\n # column defs, if necessary\n data = data.view(self._data_type)\n\n if not isinstance(data.columns, self._columns_type):\n # This would be the place, if the input data was for an ASCII\n # table and this is binary table, or vice versa, to convert the\n # data to the appropriate format for the table type\n new_columns = self._columns_type(data.columns)\n data = FITS_rec.from_columns(new_columns)\n\n self.__dict__['data'] = data\n\n self.columns = self.data.columns\n self.update()\n\n with suppress(TypeError, AttributeError):\n # Make the ndarrays in the Column objects of the ColDefs\n # object of the HDU reference the same ndarray as the HDU's\n # FITS_rec object.\n for idx, col in enumerate(self.columns):\n col.array = self.data.field(idx)\n\n # Delete the _arrays attribute so that it is recreated to\n # point to the new data placed in the column objects above\n del self.columns._arrays\n elif data is None:\n pass\n else:\n raise TypeError('Table data has incorrect type.')\n\n # returning the data signals to lazyproperty that we've already handled\n # setting self.__dict__['data']\n return data\n\n @property\n def _nrows(self):\n if not self._data_loaded:\n return self._header.get('NAXIS2', 0)\n else:\n return len(self.data)\n\n @lazyproperty\n def _theap(self):\n size = self._header['NAXIS1'] * self._header['NAXIS2']\n return self._header.get('THEAP', size)\n\n # TODO: Need to either rename this to update_header, for symmetry with the\n # Image HDUs, or just at some point deprecate it and remove it altogether,\n # since header updates should occur automatically when necessary...\n def update(self):\n \"\"\"\n Update header keywords to reflect recent changes of columns.\n \"\"\"\n\n self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS')\n self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1')\n self._header.set('TFIELDS', len(self.columns), after='GCOUNT')\n\n self._clear_table_keywords()\n self._populate_table_keywords()\n\n def copy(self):\n \"\"\"\n Make a copy of the table HDU, both header and data are copied.\n \"\"\"\n\n # touch the data, so it's defined (in the case of reading from a\n # FITS file)\n return self.__class__(data=self.data.copy(),\n header=self._header.copy())\n\n def _prewriteto(self, checksum=False, inplace=False):\n if self._has_data:\n self.data._scale_back(\n update_heap_pointers=not self._manages_own_heap)\n # check TFIELDS and NAXIS2\n self._header['TFIELDS'] = len(self.data._coldefs)\n self._header['NAXIS2'] = self.data.shape[0]\n\n # calculate PCOUNT, for variable length tables\n tbsize = self._header['NAXIS1'] * self._header['NAXIS2']\n heapstart = self._header.get('THEAP', tbsize)\n self.data._gap = heapstart - tbsize\n pcount = self.data._heapsize + self.data._gap\n if pcount > 0:\n self._header['PCOUNT'] = pcount\n\n # update the other T****n keywords\n self._populate_table_keywords()\n\n # update TFORM for variable length columns\n for idx in range(self.data._nfields):\n format = self.data._coldefs._recformats[idx]\n if isinstance(format, _FormatP):\n _max = self.data.field(idx).max\n # May be either _FormatP or _FormatQ\n format_cls = format.__class__\n format = format_cls(format.dtype, repeat=format.repeat,\n max=_max)\n self._header['TFORM' + str(idx + 1)] = format.tform\n return super()._prewriteto(checksum, inplace)\n\n def _verify(self, option='warn'):\n \"\"\"\n _TableBaseHDU verify method.\n \"\"\"\n\n errs = super()._verify(option=option)\n self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs)\n self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs)\n self.req_cards('TFIELDS', 7,\n lambda v: (_is_int(v) and v >= 0 and v <= 999), 0,\n option, errs)\n tfields = self._header['TFIELDS']\n for idx in range(tfields):\n self.req_cards('TFORM' + str(idx + 1), None, None, None, option,\n errs)\n return errs\n\n def _summary(self):\n \"\"\"\n Summarize the HDU: name, dimensions, and formats.\n \"\"\"\n\n class_name = self.__class__.__name__\n\n # if data is touched, use data info.\n if self._data_loaded:\n if self.data is None:\n shape, format = (), ''\n nrows = 0\n else:\n nrows = len(self.data)\n\n ncols = len(self.columns)\n format = self.columns.formats\n\n # if data is not touched yet, use header info.\n else:\n shape = ()\n nrows = self._header['NAXIS2']\n ncols = self._header['TFIELDS']\n format = ', '.join([self._header['TFORM' + str(j + 1)]\n for j in range(ncols)])\n format = '[{}]'.format(format)\n dims = \"{}R x {}C\".format(nrows, ncols)\n ncards = len(self._header)\n\n return (self.name, self.ver, class_name, ncards, dims, format)\n\n def _update_column_removed(self, columns, idx):\n super()._update_column_removed(columns, idx)\n\n # Fix the header to reflect the column removal\n self._clear_table_keywords(index=idx)\n\n def _update_column_attribute_changed(self, column, col_idx, attr,\n old_value, new_value):\n \"\"\"\n Update the header when one of the column objects is updated.\n \"\"\"\n\n # base_keyword is the keyword without the index such as TDIM\n # while keyword is like TDIM1\n base_keyword = ATTRIBUTE_TO_KEYWORD[attr]\n keyword = base_keyword + str(col_idx + 1)\n\n if keyword in self._header:\n if new_value is None:\n # If the new value is None, i.e. None was assigned to the\n # column attribute, then treat this as equivalent to deleting\n # that attribute\n del self._header[keyword]\n else:\n self._header[keyword] = new_value\n else:\n keyword_idx = KEYWORD_NAMES.index(base_keyword)\n # Determine the appropriate keyword to insert this one before/after\n # if it did not already exist in the header\n for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):\n before_keyword += str(col_idx + 1)\n if before_keyword in self._header:\n self._header.insert(before_keyword, (keyword, new_value),\n after=True)\n break\n else:\n for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:\n after_keyword += str(col_idx + 1)\n if after_keyword in self._header:\n self._header.insert(after_keyword,\n (keyword, new_value))\n break\n else:\n # Just append\n self._header[keyword] = new_value\n\n def _clear_table_keywords(self, index=None):\n \"\"\"\n Wipe out any existing table definition keywords from the header.\n\n If specified, only clear keywords for the given table index (shifting\n up keywords for any other columns). The index is zero-based.\n Otherwise keywords for all columns.\n \"\"\"\n\n # First collect all the table structure related keyword in the header\n # into a single list so we can then sort them by index, which will be\n # useful later for updating the header in a sensible order (since the\n # header *might* not already be written in a reasonable order)\n table_keywords = []\n\n for idx, keyword in enumerate(self._header.keys()):\n match = TDEF_RE.match(keyword)\n try:\n base_keyword = match.group('label')\n except Exception:\n continue # skip if there is no match\n\n if base_keyword in KEYWORD_TO_ATTRIBUTE:\n\n # TEMP: For Astropy 3.0 we don't clear away the following keywords\n # as we are first raising a deprecation warning that these will be\n # dropped automatically if they were specified in the header. We\n # can remove this once we are happy to break backward-compatibility\n if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:\n continue\n\n num = int(match.group('num')) - 1 # convert to zero-base\n table_keywords.append((idx, match.group(0), base_keyword,\n num))\n\n # First delete\n rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0),\n reverse=True)\n for idx, keyword, _, num in rev_sorted_idx_0:\n if index is None or index == num:\n del self._header[idx]\n\n # Now shift up remaining column keywords if only one column was cleared\n if index is not None:\n sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))\n for _, keyword, base_keyword, num in sorted_idx_3:\n if num <= index:\n continue\n\n old_card = self._header.cards[keyword]\n new_card = (base_keyword + str(num), old_card.value,\n old_card.comment)\n self._header.insert(keyword, new_card)\n del self._header[keyword]\n\n # Also decrement TFIELDS\n if 'TFIELDS' in self._header:\n self._header['TFIELDS'] -= 1\n\n def _populate_table_keywords(self):\n \"\"\"Populate the new table definition keywords from the header.\"\"\"\n\n for idx, column in enumerate(self.columns):\n for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():\n val = getattr(column, attr)\n if val is not None:\n keyword = keyword + str(idx + 1)\n self._header[keyword] = val\n\n\nclass TableHDU(_TableBaseHDU):\n \"\"\"\n FITS ASCII table extension HDU class.\n\n Parameters\n ----------\n data : array or `FITS_rec`\n Data to be used.\n header : `Header`\n Header to be used.\n name : str\n Name to be populated in ``EXTNAME`` keyword.\n ver : int > 0 or None, optional\n The ver of the HDU, will be the value of the keyword ``EXTVER``.\n If not given or None, it defaults to the value of the ``EXTVER``\n card of the ``header`` or 1.\n (default: None)\n character_as_bytes : bool\n Whether to return bytes for string columns. By default this is `False`\n and (unicode) strings are returned, but this does not respect memory\n mapping and loads the whole column in memory when accessed.\n\n \"\"\"\n\n _extension = 'TABLE'\n _ext_comment = 'ASCII table extension'\n\n _padding_byte = ' '\n _columns_type = _AsciiColDefs\n\n __format_RE = re.compile(\n r'(?P<code>[ADEFIJ])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?')\n\n def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False):\n super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes)\n\n @classmethod\n def match_header(cls, header):\n card = header.cards[0]\n xtension = card.value\n if isinstance(xtension, str):\n xtension = xtension.rstrip()\n return card.keyword == 'XTENSION' and xtension == cls._extension\n\n def _get_tbdata(self):\n columns = self.columns\n names = [n for idx, n in enumerate(columns.names)]\n\n # determine if there are duplicate field names and if there\n # are throw an exception\n dup = np.rec.find_duplicate(names)\n\n if dup:\n raise ValueError(\"Duplicate field names: {}\".format(dup))\n\n # TODO: Determine if this extra logic is necessary--I feel like the\n # _AsciiColDefs class should be responsible for telling the table what\n # its dtype should be...\n itemsize = columns.spans[-1] + columns.starts[-1] - 1\n dtype = {}\n\n for idx in range(len(columns)):\n data_type = 'S' + str(columns.spans[idx])\n\n if idx == len(columns) - 1:\n # The last column is padded out to the value of NAXIS1\n if self._header['NAXIS1'] > itemsize:\n data_type = 'S' + str(columns.spans[idx] +\n self._header['NAXIS1'] - itemsize)\n dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)\n\n raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)\n data = raw_data.view(np.rec.recarray)\n self._init_tbdata(data)\n return data.view(self._data_type)\n\n def _calculate_datasum(self):\n \"\"\"\n Calculate the value for the ``DATASUM`` card in the HDU.\n \"\"\"\n\n if self._has_data:\n # We have the data to be used.\n # We need to pad the data to a block length before calculating\n # the datasum.\n bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)\n padding = np.frombuffer(_pad_length(self.size) * b' ',\n dtype=np.ubyte)\n\n d = np.append(bytes_array, padding)\n\n cs = self._compute_checksum(d)\n return cs\n else:\n # This is the case where the data has not been read from the file\n # yet. We can handle that in a generic manner so we do it in the\n # base class. The other possibility is that there is no data at\n # all. This can also be handled in a generic manner.\n return super()._calculate_datasum()\n\n def _verify(self, option='warn'):\n \"\"\"\n `TableHDU` verify method.\n \"\"\"\n\n errs = super()._verify(option=option)\n self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs)\n tfields = self._header['TFIELDS']\n for idx in range(tfields):\n self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option,\n errs)\n return errs\n\n\nclass BinTableHDU(_TableBaseHDU):\n \"\"\"\n Binary table HDU class.\n\n Parameters\n ----------\n data : array, `FITS_rec`, or `~astropy.table.Table`\n Data to be used.\n header : `Header`\n Header to be used.\n name : str\n Name to be populated in ``EXTNAME`` keyword.\n uint : bool, optional\n Set to `True` if the table contains unsigned integer columns.\n ver : int > 0 or None, optional\n The ver of the HDU, will be the value of the keyword ``EXTVER``.\n If not given or None, it defaults to the value of the ``EXTVER``\n card of the ``header`` or 1.\n (default: None)\n character_as_bytes : bool\n Whether to return bytes for string columns. By default this is `False`\n and (unicode) strings are returned, but this does not respect memory\n mapping and loads the whole column in memory when accessed.\n\n \"\"\"\n\n _extension = 'BINTABLE'\n _ext_comment = 'binary table extension'\n\n def __init__(self, data=None, header=None, name=None, uint=False, ver=None,\n character_as_bytes=False):\n from astropy.table import Table\n if isinstance(data, Table):\n from astropy.io.fits.convenience import table_to_hdu\n hdu = table_to_hdu(data)\n if header is not None:\n hdu.header.update(header)\n data = hdu.data\n header = hdu.header\n\n super().__init__(data, header, name=name, uint=uint, ver=ver,\n character_as_bytes=character_as_bytes)\n\n @classmethod\n def match_header(cls, header):\n card = header.cards[0]\n xtension = card.value\n if isinstance(xtension, str):\n xtension = xtension.rstrip()\n return (card.keyword == 'XTENSION' and\n xtension in (cls._extension, 'A3DTABLE'))\n\n def _calculate_datasum_with_heap(self):\n \"\"\"\n Calculate the value for the ``DATASUM`` card given the input data\n \"\"\"\n\n with _binary_table_byte_swap(self.data) as data:\n dout = data.view(type=np.ndarray, dtype=np.ubyte)\n csum = self._compute_checksum(dout)\n\n # Now add in the heap data to the checksum (we can skip any gap\n # between the table and the heap since it's all zeros and doesn't\n # contribute to the checksum\n # TODO: The following code may no longer be necessary since it is\n # now possible to get a pointer directly to the heap data as a\n # whole. That said, it is possible for the heap section to contain\n # data that is not actually pointed to by the table (i.e. garbage;\n # this *shouldn't* happen but it is not disallowed either)--need to\n # double check whether or not the checksum should include such\n # garbage\n for idx in range(data._nfields):\n if isinstance(data.columns._recformats[idx], _FormatP):\n for coldata in data.field(idx):\n # coldata should already be byteswapped from the call\n # to _binary_table_byte_swap\n if not len(coldata):\n continue\n\n csum = self._compute_checksum(coldata, csum)\n\n return csum\n\n def _calculate_datasum(self):\n \"\"\"\n Calculate the value for the ``DATASUM`` card in the HDU.\n \"\"\"\n\n if self._has_data:\n # This method calculates the datasum while incorporating any\n # heap data, which is obviously not handled from the base\n # _calculate_datasum\n return self._calculate_datasum_with_heap()\n else:\n # This is the case where the data has not been read from the file\n # yet. We can handle that in a generic manner so we do it in the\n # base class. The other possibility is that there is no data at\n # all. This can also be handled in a generic manner.\n return super()._calculate_datasum()\n\n def _writedata_internal(self, fileobj):\n size = 0\n\n if self.data is None:\n return size\n\n with _binary_table_byte_swap(self.data) as data:\n if _has_unicode_fields(data):\n # If the raw data was a user-supplied recarray, we can't write\n # unicode columns directly to the file, so we have to switch\n # to a slower row-by-row write\n self._writedata_by_row(fileobj)\n else:\n fileobj.writearray(data)\n # write out the heap of variable length array columns this has\n # to be done after the \"regular\" data is written (above)\n fileobj.write((data._gap * '\\0').encode('ascii'))\n\n nbytes = data._gap\n\n if not self._manages_own_heap:\n # Write the heap data one column at a time, in the order\n # that the data pointers appear in the column (regardless\n # if that data pointer has a different, previous heap\n # offset listed)\n for idx in range(data._nfields):\n if not isinstance(data.columns._recformats[idx],\n _FormatP):\n continue\n\n field = self.data.field(idx)\n for row in field:\n if len(row) > 0:\n nbytes += row.nbytes\n if not fileobj.simulateonly:\n fileobj.writearray(row)\n else:\n heap_data = data._get_heap_data()\n if len(heap_data) > 0:\n nbytes += len(heap_data)\n if not fileobj.simulateonly:\n fileobj.writearray(heap_data)\n\n data._heapsize = nbytes - data._gap\n size += nbytes\n\n size += self.data.size * self.data._raw_itemsize\n\n return size\n\n def _writedata_by_row(self, fileobj):\n fields = [self.data.field(idx)\n for idx in range(len(self.data.columns))]\n\n # Creating Record objects is expensive (as in\n # `for row in self.data:` so instead we just iterate over the row\n # indices and get one field at a time:\n for idx in range(len(self.data)):\n for field in fields:\n item = field[idx]\n field_width = None\n\n if field.dtype.kind == 'U':\n # Read the field *width* by reading past the field kind.\n i = field.dtype.str.index(field.dtype.kind)\n field_width = int(field.dtype.str[i+1:])\n item = np.char.encode(item, 'ascii')\n\n fileobj.writearray(item)\n if field_width is not None:\n j = item.dtype.str.index(item.dtype.kind)\n item_length = int(item.dtype.str[j+1:])\n # Fix padding problem (see #5296).\n padding = '\\x00'*(field_width - item_length)\n fileobj.write(padding.encode('ascii'))\n\n _tdump_file_format = textwrap.dedent(\"\"\"\n\n - **datafile:** Each line of the data file represents one row of table\n data. The data is output one column at a time in column order. If\n a column contains an array, each element of the column array in the\n current row is output before moving on to the next column. Each row\n ends with a new line.\n\n Integer data is output right-justified in a 21-character field\n followed by a blank. Floating point data is output right justified\n using 'g' format in a 21-character field with 15 digits of\n precision, followed by a blank. String data that does not contain\n whitespace is output left-justified in a field whose width matches\n the width specified in the ``TFORM`` header parameter for the\n column, followed by a blank. When the string data contains\n whitespace characters, the string is enclosed in quotation marks\n (``\"\"``). For the last data element in a row, the trailing blank in\n the field is replaced by a new line character.\n\n For column data containing variable length arrays ('P' format), the\n array data is preceded by the string ``'VLA_Length= '`` and the\n integer length of the array for that row, left-justified in a\n 21-character field, followed by a blank.\n\n .. note::\n\n This format does *not* support variable length arrays using the\n ('Q' format) due to difficult to overcome ambiguities. What this\n means is that this file format cannot support VLA columns in\n tables stored in files that are over 2 GB in size.\n\n For column data representing a bit field ('X' format), each bit\n value in the field is output right-justified in a 21-character field\n as 1 (for true) or 0 (for false).\n\n - **cdfile:** Each line of the column definitions file provides the\n definitions for one column in the table. The line is broken up into\n 8, sixteen-character fields. The first field provides the column\n name (``TTYPEn``). The second field provides the column format\n (``TFORMn``). The third field provides the display format\n (``TDISPn``). The fourth field provides the physical units\n (``TUNITn``). The fifth field provides the dimensions for a\n multidimensional array (``TDIMn``). The sixth field provides the\n value that signifies an undefined value (``TNULLn``). The seventh\n field provides the scale factor (``TSCALn``). The eighth field\n provides the offset value (``TZEROn``). A field value of ``\"\"`` is\n used to represent the case where no value is provided.\n\n - **hfile:** Each line of the header parameters file provides the\n definition of a single HDU header card as represented by the card\n image.\n \"\"\")\n\n @deprecated_renamed_argument('clobber', 'overwrite', '2.0')\n def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):\n \"\"\"\n Dump the table HDU to a file in ASCII format. The table may be dumped\n in three separate files, one containing column definitions, one\n containing header parameters, and one for table data.\n\n Parameters\n ----------\n datafile : file path, file object or file-like object, optional\n Output data file. The default is the root name of the\n fits file associated with this HDU appended with the\n extension ``.txt``.\n\n cdfile : file path, file object or file-like object, optional\n Output column definitions file. The default is `None`, no\n column definitions output is produced.\n\n hfile : file path, file object or file-like object, optional\n Output header parameters file. The default is `None`,\n no header parameters output is produced.\n\n overwrite : bool, optional\n If ``True``, overwrite the output file if it exists. Raises an\n ``OSError`` if ``False`` and the output file exists. Default is\n ``False``.\n\n .. versionchanged:: 1.3\n ``overwrite`` replaces the deprecated ``clobber`` argument.\n\n Notes\n -----\n The primary use for the `dump` method is to allow viewing and editing\n the table data and parameters in a standard text editor.\n The `load` method can be used to create a new table from the three\n plain text (ASCII) files.\n \"\"\"\n\n # check if the output files already exist\n exist = []\n files = [datafile, cdfile, hfile]\n\n for f in files:\n if isinstance(f, str):\n if os.path.exists(f) and os.path.getsize(f) != 0:\n if overwrite:\n os.remove(f)\n else:\n exist.append(f)\n\n if exist:\n raise OSError(' '.join([\"File '{}' already exists.\".format(f)\n for f in exist]))\n\n # Process the data\n self._dump_data(datafile)\n\n # Process the column definitions\n if cdfile:\n self._dump_coldefs(cdfile)\n\n # Process the header parameters\n if hfile:\n self._header.tofile(hfile, sep='\\n', endcard=False, padding=False)\n\n if isinstance(dump.__doc__, str):\n dump.__doc__ += _tdump_file_format.replace('\\n', '\\n ')\n\n def load(cls, datafile, cdfile=None, hfile=None, replace=False,\n header=None):\n \"\"\"\n Create a table from the input ASCII files. The input is from up to\n three separate files, one containing column definitions, one containing\n header parameters, and one containing column data.\n\n The column definition and header parameters files are not required.\n When absent the column definitions and/or header parameters are taken\n from the header object given in the header argument; otherwise sensible\n defaults are inferred (though this mode is not recommended).\n\n Parameters\n ----------\n datafile : file path, file object or file-like object\n Input data file containing the table data in ASCII format.\n\n cdfile : file path, file object, file-like object, optional\n Input column definition file containing the names,\n formats, display formats, physical units, multidimensional\n array dimensions, undefined values, scale factors, and\n offsets associated with the columns in the table. If\n `None`, the column definitions are taken from the current\n values in this object.\n\n hfile : file path, file object, file-like object, optional\n Input parameter definition file containing the header\n parameter definitions to be associated with the table. If\n `None`, the header parameter definitions are taken from\n the current values in this objects header.\n\n replace : bool\n When `True`, indicates that the entire header should be\n replaced with the contents of the ASCII file instead of\n just updating the current header.\n\n header : Header object\n When the cdfile and hfile are missing, use this Header object in\n the creation of the new table and HDU. Otherwise this Header\n supersedes the keywords from hfile, which is only used to update\n values not present in this Header, unless ``replace=True`` in which\n this Header's values are completely replaced with the values from\n hfile.\n\n Notes\n -----\n The primary use for the `load` method is to allow the input of ASCII\n data that was edited in a standard text editor of the table data and\n parameters. The `dump` method can be used to create the initial ASCII\n files.\n \"\"\"\n\n # Process the parameter file\n if header is None:\n header = Header()\n\n if hfile:\n if replace:\n header = Header.fromtextfile(hfile)\n else:\n header.extend(Header.fromtextfile(hfile), update=True,\n update_first=True)\n\n coldefs = None\n # Process the column definitions file\n if cdfile:\n coldefs = cls._load_coldefs(cdfile)\n\n # Process the data file\n data = cls._load_data(datafile, coldefs)\n if coldefs is None:\n coldefs = ColDefs(data)\n\n # Create a new HDU using the supplied header and data\n hdu = cls(data=data, header=header)\n hdu.columns = coldefs\n return hdu\n\n if isinstance(load.__doc__, str):\n load.__doc__ += _tdump_file_format.replace('\\n', '\\n ')\n\n load = classmethod(load)\n # Have to create a classmethod from this here instead of as a decorator;\n # otherwise we can't update __doc__\n\n def _dump_data(self, fileobj):\n \"\"\"\n Write the table data in the ASCII format read by BinTableHDU.load()\n to fileobj.\n \"\"\"\n\n if not fileobj and self._file:\n root = os.path.splitext(self._file.name)[0]\n fileobj = root + '.txt'\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, 'w')\n close_file = True\n\n linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)\n\n # Process each row of the table and output one row at a time\n def format_value(val, format):\n if format[0] == 'S':\n itemsize = int(format[1:])\n return '{:{size}}'.format(val, size=itemsize)\n elif format in np.typecodes['AllInteger']:\n # output integer\n return '{:21d}'.format(val)\n elif format in np.typecodes['Complex']:\n return '{:21.15g}+{:.15g}j'.format(val.real, val.imag)\n elif format in np.typecodes['Float']:\n # output floating point\n return '{:#21.15g}'.format(val)\n\n for row in self.data:\n line = [] # the line for this row of the table\n\n # Process each column of the row.\n for column in self.columns:\n # format of data in a variable length array\n # where None means it is not a VLA:\n vla_format = None\n format = _convert_format(column.format)\n\n if isinstance(format, _FormatP):\n # P format means this is a variable length array so output\n # the length of the array for this row and set the format\n # for the VLA data\n line.append('VLA_Length=')\n line.append('{:21d}'.format(len(row[column.name])))\n _, dtype, option = _parse_tformat(column.format)\n vla_format = FITS2NUMPY[option[0]][0]\n\n if vla_format:\n # Output the data for each element in the array\n for val in row[column.name].flat:\n line.append(format_value(val, vla_format))\n else:\n # The column data is a single element\n dtype = self.data.dtype.fields[column.name][0]\n array_format = dtype.char\n if array_format == 'V':\n array_format = dtype.base.char\n if array_format == 'S':\n array_format += str(dtype.itemsize)\n\n if dtype.char == 'V':\n for value in row[column.name].flat:\n line.append(format_value(value, array_format))\n else:\n line.append(format_value(row[column.name],\n array_format))\n linewriter.writerow(line)\n if close_file:\n fileobj.close()\n\n def _dump_coldefs(self, fileobj):\n \"\"\"\n Write the column definition parameters in the ASCII format read by\n BinTableHDU.load() to fileobj.\n \"\"\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, 'w')\n close_file = True\n\n # Process each column of the table and output the result to the\n # file one at a time\n for column in self.columns:\n line = [column.name, column.format]\n attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero']\n line += ['{:16s}'.format(value if value else '\"\"')\n for value in (getattr(column, attr) for attr in attrs)]\n fileobj.write(' '.join(line))\n fileobj.write('\\n')\n\n if close_file:\n fileobj.close()\n\n @classmethod\n def _load_data(cls, fileobj, coldefs=None):\n \"\"\"\n Read the table data from the ASCII file output by BinTableHDU.dump().\n \"\"\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, 'r')\n close_file = True\n\n initialpos = fileobj.tell() # We'll be returning here later\n linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)\n\n # First we need to do some preprocessing on the file to find out how\n # much memory we'll need to reserve for the table. This is necessary\n # even if we already have the coldefs in order to determine how many\n # rows to reserve memory for\n vla_lengths = []\n recformats = []\n names = []\n nrows = 0\n if coldefs is not None:\n recformats = coldefs._recformats\n names = coldefs.names\n\n def update_recformats(value, idx):\n fitsformat = _scalar_to_format(value)\n recformat = _convert_format(fitsformat)\n if idx >= len(recformats):\n recformats.append(recformat)\n else:\n if _cmp_recformats(recformats[idx], recformat) < 0:\n recformats[idx] = recformat\n\n # TODO: The handling of VLAs could probably be simplified a bit\n for row in linereader:\n nrows += 1\n if coldefs is not None:\n continue\n col = 0\n idx = 0\n while idx < len(row):\n if row[idx] == 'VLA_Length=':\n if col < len(vla_lengths):\n vla_length = vla_lengths[col]\n else:\n vla_length = int(row[idx + 1])\n vla_lengths.append(vla_length)\n idx += 2\n while vla_length:\n update_recformats(row[idx], col)\n vla_length -= 1\n idx += 1\n col += 1\n else:\n if col >= len(vla_lengths):\n vla_lengths.append(None)\n update_recformats(row[idx], col)\n col += 1\n idx += 1\n\n # Update the recformats for any VLAs\n for idx, length in enumerate(vla_lengths):\n if length is not None:\n recformats[idx] = str(length) + recformats[idx]\n\n dtype = np.rec.format_parser(recformats, names, None).dtype\n\n # TODO: In the future maybe enable loading a bit at a time so that we\n # can convert from this format to an actual FITS file on disk without\n # needing enough physical memory to hold the entire thing at once\n hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),\n nrows=nrows, fill=True)\n\n # TODO: It seems to me a lot of this could/should be handled from\n # within the FITS_rec class rather than here.\n data = hdu.data\n for idx, length in enumerate(vla_lengths):\n if length is not None:\n arr = data.columns._arrays[idx]\n dt = recformats[idx][len(str(length)):]\n\n # NOTE: FormatQ not supported here; it's hard to determine\n # whether or not it will be necessary to use a wider descriptor\n # type. The function documentation will have to serve as a\n # warning that this is not supported.\n recformats[idx] = _FormatP(dt, max=length)\n data.columns._recformats[idx] = recformats[idx]\n name = data.columns.names[idx]\n data._cache_field(name, _makep(arr, arr, recformats[idx]))\n\n def format_value(col, val):\n # Special formatting for a couple particular data types\n if recformats[col] == FITS2NUMPY['L']:\n return bool(int(val))\n elif recformats[col] == FITS2NUMPY['M']:\n # For some reason, in arrays/fields where numpy expects a\n # complex it's not happy to take a string representation\n # (though it's happy to do that in other contexts), so we have\n # to convert the string representation for it:\n return complex(val)\n else:\n return val\n\n # Jump back to the start of the data and create a new line reader\n fileobj.seek(initialpos)\n linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)\n for row, line in enumerate(linereader):\n col = 0\n idx = 0\n while idx < len(line):\n if line[idx] == 'VLA_Length=':\n vla_len = vla_lengths[col]\n idx += 2\n slice_ = slice(idx, idx + vla_len)\n data[row][col][:] = line[idx:idx + vla_len]\n idx += vla_len\n elif dtype[col].shape:\n # This is an array column\n array_size = int(np.multiply.reduce(dtype[col].shape))\n slice_ = slice(idx, idx + array_size)\n idx += array_size\n else:\n slice_ = None\n\n if slice_ is None:\n # This is a scalar row element\n data[row][col] = format_value(col, line[idx])\n idx += 1\n else:\n data[row][col].flat[:] = [format_value(col, val)\n for val in line[slice_]]\n\n col += 1\n\n if close_file:\n fileobj.close()\n\n return data\n\n @classmethod\n def _load_coldefs(cls, fileobj):\n \"\"\"\n Read the table column definitions from the ASCII file output by\n BinTableHDU.dump().\n \"\"\"\n\n close_file = False\n\n if isinstance(fileobj, str):\n fileobj = open(fileobj, 'r')\n close_file = True\n\n columns = []\n\n for line in fileobj:\n words = line[:-1].split()\n kwargs = {}\n for key in ['name', 'format', 'disp', 'unit', 'dim']:\n kwargs[key] = words.pop(0).replace('\"\"', '')\n\n for key in ['null', 'bscale', 'bzero']:\n word = words.pop(0).replace('\"\"', '')\n if word:\n word = _str_to_num(word)\n kwargs[key] = word\n columns.append(Column(**kwargs))\n\n if close_file:\n fileobj.close()\n\n return ColDefs(columns)\n\n\[email protected]\ndef _binary_table_byte_swap(data):\n \"\"\"\n Ensures that all the data of a binary FITS table (represented as a FITS_rec\n object) is in a big-endian byte order. Columns are swapped in-place one\n at a time, and then returned to their previous byte order when this context\n manager exits.\n\n Because a new dtype is needed to represent the byte-swapped columns, the\n new dtype is temporarily applied as well.\n \"\"\"\n\n orig_dtype = data.dtype\n\n names = []\n formats = []\n offsets = []\n\n to_swap = []\n\n if sys.byteorder == 'little':\n swap_types = ('<', '=')\n else:\n swap_types = ('<',)\n\n for idx, name in enumerate(orig_dtype.names):\n field = _get_recarray_field(data, idx)\n\n field_dtype, field_offset = orig_dtype.fields[name]\n names.append(name)\n formats.append(field_dtype)\n offsets.append(field_offset)\n\n if isinstance(field, chararray.chararray):\n continue\n\n # only swap unswapped\n # must use field_dtype.base here since for multi-element dtypes,\n # the .str with be '|V<N>' where <N> is the total bytes per element\n if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:\n to_swap.append(field)\n # Override the dtype for this field in the new record dtype with\n # the byteswapped version\n formats[-1] = field_dtype.newbyteorder()\n\n # deal with var length table\n recformat = data.columns._recformats[idx]\n if isinstance(recformat, _FormatP):\n coldata = data.field(idx)\n for c in coldata:\n if (not isinstance(c, chararray.chararray) and\n c.itemsize > 1 and c.dtype.str[0] in swap_types):\n to_swap.append(c)\n\n for arr in reversed(to_swap):\n arr.byteswap(True)\n\n data.dtype = np.dtype({'names': names,\n 'formats': formats,\n 'offsets': offsets})\n\n yield data\n\n for arr in to_swap:\n arr.byteswap(True)\n\n data.dtype = orig_dtype\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom astropy.wcs import WCS\n\nfrom astropy.visualization.wcsaxes import WCSAxes\nfrom astropy.visualization.wcsaxes.frame import BaseFrame\n\nfrom astropy.tests.image_tests import IMAGE_REFERENCE_DIR\nfrom .test_images import BaseImageTests\n\n\nclass HexagonalFrame(BaseFrame):\n\n spine_names = 'abcdef'\n\n def update_spines(self):\n\n xmin, xmax = self.parent_axes.get_xlim()\n ymin, ymax = self.parent_axes.get_ylim()\n\n ymid = 0.5 * (ymin + ymax)\n xmid1 = (xmin + xmax) / 4.\n xmid2 = (xmin + xmax) * 3. / 4.\n\n self['a'].data = np.array(([xmid1, ymin], [xmid2, ymin]))\n self['b'].data = np.array(([xmid2, ymin], [xmax, ymid]))\n self['c'].data = np.array(([xmax, ymid], [xmid2, ymax]))\n self['d'].data = np.array(([xmid2, ymax], [xmid1, ymax]))\n self['e'].data = np.array(([xmid1, ymax], [xmin, ymid]))\n self['f'].data = np.array(([xmin, ymid], [xmid1, ymin]))\n\n\nclass TestFrame(BaseImageTests):\n\n @pytest.mark.remote_data(source='astropy')\n @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,\n tolerance=0, style={})\n def test_custom_frame(self):\n\n wcs = WCS(self.msx_header)\n\n fig = plt.figure(figsize=(4, 4))\n\n ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7],\n wcs=wcs,\n frame_class=HexagonalFrame)\n fig.add_axes(ax)\n\n ax.coords.grid(color='white')\n\n im = ax.imshow(np.ones((149, 149)), vmin=0., vmax=2.,\n origin='lower', cmap=plt.cm.gist_heat)\n\n minpad = {}\n minpad['a'] = minpad['d'] = 1\n minpad['b'] = minpad['c'] = minpad['e'] = minpad['f'] = 2.75\n\n ax.coords['glon'].set_axislabel(\"Longitude\", minpad=minpad)\n ax.coords['glon'].set_axislabel_position('ad')\n\n ax.coords['glat'].set_axislabel(\"Latitude\", minpad=minpad)\n ax.coords['glat'].set_axislabel_position('bcef')\n\n ax.coords['glon'].set_ticklabel_position('ad')\n ax.coords['glat'].set_ticklabel_position('bcef')\n\n # Set limits so that no labels overlap\n ax.set_xlim(5.5, 100.5)\n ax.set_ylim(5.5, 110.5)\n\n # Clip the image to the frame\n im.set_clip_path(ax.coords.frame.patch)\n\n return fig\n\n @pytest.mark.remote_data(source='astropy')\n @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,\n tolerance=0, style={})\n def test_update_clip_path_rectangular(self, tmpdir):\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')\n\n fig.add_axes(ax)\n\n ax.set_xlim(0., 2.)\n ax.set_ylim(0., 2.)\n\n # Force drawing, which freezes the clip path returned by WCSAxes\n fig.savefig(tmpdir.join('nothing').strpath)\n\n ax.imshow(np.zeros((12, 4)))\n\n ax.set_xlim(-0.5, 3.5)\n ax.set_ylim(-0.5, 11.5)\n\n return fig\n\n @pytest.mark.remote_data(source='astropy')\n @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,\n tolerance=0, style={})\n def test_update_clip_path_nonrectangular(self, tmpdir):\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal',\n frame_class=HexagonalFrame)\n\n fig.add_axes(ax)\n\n ax.set_xlim(0., 2.)\n ax.set_ylim(0., 2.)\n\n # Force drawing, which freezes the clip path returned by WCSAxes\n fig.savefig(tmpdir.join('nothing').strpath)\n\n ax.imshow(np.zeros((12, 4)))\n\n ax.set_xlim(-0.5, 3.5)\n ax.set_ylim(-0.5, 11.5)\n\n return fig\n\n @pytest.mark.remote_data(source='astropy')\n @pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,\n tolerance=0, style={})\n def test_update_clip_path_change_wcs(self, tmpdir):\n\n # When WCS is changed, a new frame is created, so we need to make sure\n # that the path is carried over to the new frame.\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')\n\n fig.add_axes(ax)\n\n ax.set_xlim(0., 2.)\n ax.set_ylim(0., 2.)\n\n # Force drawing, which freezes the clip path returned by WCSAxes\n fig.savefig(tmpdir.join('nothing').strpath)\n\n ax.reset_wcs()\n\n ax.imshow(np.zeros((12, 4)))\n\n ax.set_xlim(-0.5, 3.5)\n ax.set_ylim(-0.5, 11.5)\n\n return fig\n\n def test_copy_frame_properties_change_wcs(self):\n\n # When WCS is changed, a new frame is created, so we need to make sure\n # that the color and linewidth are transferred over\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])\n fig.add_axes(ax)\n ax.coords.frame.set_linewidth(5)\n ax.coords.frame.set_color('purple')\n ax.reset_wcs()\n assert ax.coords.frame.get_linewidth() == 5\n assert ax.coords.frame.get_color() == 'purple'\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\n``fitsheader`` is a command line script based on astropy.io.fits for printing\nthe header(s) of one or more FITS file(s) to the standard output in a human-\nreadable format.\n\nExample uses of fitsheader:\n\n1. Print the header of all the HDUs of a .fits file::\n\n $ fitsheader filename.fits\n\n2. Print the header of the third and fifth HDU extension::\n\n $ fitsheader --extension 3 --extension 5 filename.fits\n\n3. Print the header of a named extension, e.g. select the HDU containing\n keywords EXTNAME='SCI' and EXTVER='2'::\n\n $ fitsheader --extension \"SCI,2\" filename.fits\n\n4. Print only specific keywords::\n\n $ fitsheader --keyword BITPIX --keyword NAXIS filename.fits\n\n5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard::\n\n $ fitsheader --keyword NAXIS* filename.fits\n\n6. Dump the header keywords of all the files in the current directory into a\n machine-readable csv file::\n\n $ fitsheader --table ascii.csv *.fits > keywords.csv\n\n7. Specify hierarchical keywords with the dotted or spaced notation::\n\n $ fitsheader --keyword ESO.INS.ID filename.fits\n $ fitsheader --keyword \"ESO INS ID\" filename.fits\n\n8. Compare the headers of different fites files, following ESO's ``fitsort``\n format::\n\n $ fitsheader --fitsort --extension 0 --keyword ESO.INS.ID *.fits\n\n9. Same as above, sorting the output along a specified keyword::\n\n $ fitsheader -f DATE-OBS -e 0 -k DATE-OBS -k ESO.INS.ID *.fits\n\nNote that compressed images (HDUs of type\n:class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real\nBINTABLE header to describe the compressed data, and a fake IMAGE header\nrepresenting the image that was compressed. Astropy returns the latter by\ndefault. You must supply the ``--compressed`` option if you require the real\nheader that describes the compression.\n\nWith Astropy installed, please run ``fitsheader --help`` to see the full usage\ndocumentation.\n\"\"\"\n\nimport sys\nimport argparse\n\nimport numpy as np\n\nfrom astropy.io import fits\nfrom astropy import log\n\n\nclass ExtensionNotFoundException(Exception):\n \"\"\"Raised if an HDU extension requested by the user does not exist.\"\"\"\n pass\n\n\nclass HeaderFormatter:\n \"\"\"Class to format the header(s) of a FITS file for display by the\n `fitsheader` tool; essentially a wrapper around a `HDUList` object.\n\n Example usage:\n fmt = HeaderFormatter('/path/to/file.fits')\n print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX']))\n\n Parameters\n ----------\n filename : str\n Path to a single FITS file.\n verbose : bool\n Verbose flag, to show more information about missing extensions,\n keywords, etc.\n\n Raises\n ------\n OSError\n If `filename` does not exist or cannot be read.\n \"\"\"\n\n def __init__(self, filename, verbose=True):\n self.filename = filename\n self.verbose = verbose\n self._hdulist = fits.open(filename)\n\n def parse(self, extensions=None, keywords=None, compressed=False):\n \"\"\"Returns the FITS file header(s) in a readable format.\n\n Parameters\n ----------\n extensions : list of int or str, optional\n Format only specific HDU(s), identified by number or name.\n The name can be composed of the \"EXTNAME\" or \"EXTNAME,EXTVER\"\n keywords.\n\n keywords : list of str, optional\n Keywords for which the value(s) should be returned.\n If not specified, then the entire header is returned.\n\n compressed : boolean, optional\n If True, shows the header describing the compression, rather than\n the header obtained after decompression. (Affects FITS files\n containing `CompImageHDU` extensions only.)\n\n Returns\n -------\n formatted_header : str or astropy.table.Table\n Traditional 80-char wide format in the case of `HeaderFormatter`;\n an Astropy Table object in the case of `TableHeaderFormatter`.\n \"\"\"\n # `hdukeys` will hold the keys of the HDUList items to display\n if extensions is None:\n hdukeys = range(len(self._hdulist)) # Display all by default\n else:\n hdukeys = []\n for ext in extensions:\n try:\n # HDU may be specified by number\n hdukeys.append(int(ext))\n except ValueError:\n # The user can specify \"EXTNAME\" or \"EXTNAME,EXTVER\"\n parts = ext.split(',')\n if len(parts) > 1:\n extname = ','.join(parts[0:-1])\n extver = int(parts[-1])\n hdukeys.append((extname, extver))\n else:\n hdukeys.append(ext)\n\n # Having established which HDUs the user wants, we now format these:\n return self._parse_internal(hdukeys, keywords, compressed)\n\n def _parse_internal(self, hdukeys, keywords, compressed):\n \"\"\"The meat of the formatting; in a separate method to allow overriding.\n \"\"\"\n result = []\n for idx, hdu in enumerate(hdukeys):\n try:\n cards = self._get_cards(hdu, keywords, compressed)\n except ExtensionNotFoundException:\n continue\n\n if idx > 0: # Separate HDUs by a blank line\n result.append('\\n')\n result.append('# HDU {} in {}:\\n'.format(hdu, self.filename))\n for c in cards:\n result.append('{}\\n'.format(c))\n return ''.join(result)\n\n def _get_cards(self, hdukey, keywords, compressed):\n \"\"\"Returns a list of `astropy.io.fits.card.Card` objects.\n\n This function will return the desired header cards, taking into\n account the user's preference to see the compressed or uncompressed\n version.\n\n Parameters\n ----------\n hdukey : int or str\n Key of a single HDU in the HDUList.\n\n keywords : list of str, optional\n Keywords for which the cards should be returned.\n\n compressed : boolean, optional\n If True, shows the header describing the compression.\n\n Raises\n ------\n ExtensionNotFoundException\n If the hdukey does not correspond to an extension.\n \"\"\"\n # First we obtain the desired header\n try:\n if compressed:\n # In the case of a compressed image, return the header before\n # decompression (not the default behavior)\n header = self._hdulist[hdukey]._header\n else:\n header = self._hdulist[hdukey].header\n except (IndexError, KeyError):\n message = '{0}: Extension {1} not found.'.format(self.filename,\n hdukey)\n if self.verbose:\n log.warning(message)\n raise ExtensionNotFoundException(message)\n\n if not keywords: # return all cards\n cards = header.cards\n else: # specific keywords are requested\n cards = []\n for kw in keywords:\n try:\n crd = header.cards[kw]\n if isinstance(crd, fits.card.Card): # Single card\n cards.append(crd)\n else: # Allow for wildcard access\n cards.extend(crd)\n except KeyError as e: # Keyword does not exist\n if self.verbose:\n log.warning('{filename} (HDU {hdukey}): '\n 'Keyword {kw} not found.'.format(\n filename=self.filename,\n hdukey=hdukey,\n kw=kw))\n return cards\n\n\nclass TableHeaderFormatter(HeaderFormatter):\n \"\"\"Class to convert the header(s) of a FITS file into a Table object.\n The table returned by the `parse` method will contain four columns:\n filename, hdu, keyword, and value.\n\n Subclassed from HeaderFormatter, which contains the meat of the formatting.\n \"\"\"\n\n def _parse_internal(self, hdukeys, keywords, compressed):\n \"\"\"Method called by the parse method in the parent class.\"\"\"\n tablerows = []\n for hdu in hdukeys:\n try:\n for card in self._get_cards(hdu, keywords, compressed):\n tablerows.append({'filename': self.filename,\n 'hdu': hdu,\n 'keyword': card.keyword,\n 'value': str(card.value)})\n except ExtensionNotFoundException:\n pass\n\n if tablerows:\n from astropy import table\n return table.Table(tablerows)\n return None\n\n\ndef print_headers_traditional(args):\n \"\"\"Prints FITS header(s) using the traditional 80-char format.\n\n Parameters\n ----------\n args : argparse.Namespace\n Arguments passed from the command-line as defined below.\n \"\"\"\n for idx, filename in enumerate(args.filename): # support wildcards\n if idx > 0 and not args.keywords:\n print() # print a newline between different files\n try:\n formatter = HeaderFormatter(filename)\n print(formatter.parse(args.extensions,\n args.keywords,\n args.compressed), end='')\n except OSError as e:\n log.error(str(e))\n\n\ndef print_headers_as_table(args):\n \"\"\"Prints FITS header(s) in a machine-readable table format.\n\n Parameters\n ----------\n args : argparse.Namespace\n Arguments passed from the command-line as defined below.\n \"\"\"\n tables = []\n # Create a Table object for each file\n for filename in args.filename: # Support wildcards\n try:\n formatter = TableHeaderFormatter(filename)\n tbl = formatter.parse(args.extensions,\n args.keywords,\n args.compressed)\n if tbl:\n tables.append(tbl)\n except OSError as e:\n log.error(str(e)) # file not found or unreadable\n # Concatenate the tables\n if len(tables) == 0:\n return False\n elif len(tables) == 1:\n resulting_table = tables[0]\n else:\n from astropy import table\n resulting_table = table.vstack(tables)\n # Print the string representation of the concatenated table\n resulting_table.write(sys.stdout, format=args.table)\n\n\ndef print_headers_as_comparison(args):\n \"\"\"Prints FITS header(s) with keywords as columns.\n\n This follows the dfits+fitsort format.\n\n Parameters\n ----------\n args : argparse.Namespace\n Arguments passed from the command-line as defined below.\n \"\"\"\n from astropy import table\n tables = []\n # Create a Table object for each file\n for filename in args.filename: # Support wildcards\n try:\n formatter = TableHeaderFormatter(filename, verbose=False)\n tbl = formatter.parse(args.extensions,\n args.keywords,\n args.compressed)\n if tbl:\n # Remove empty keywords\n tbl = tbl[np.where(tbl['keyword'] != '')]\n else:\n tbl = table.Table([[filename]], names=('filename',))\n tables.append(tbl)\n except OSError as e:\n log.error(str(e)) # file not found or unreadable\n # Concatenate the tables\n if len(tables) == 0:\n return False\n elif len(tables) == 1:\n resulting_table = tables[0]\n else:\n resulting_table = table.vstack(tables)\n\n # If we obtained more than one hdu, merge hdu and keywords columns\n hdus = resulting_table['hdu']\n if np.ma.isMaskedArray(hdus):\n hdus = hdus.compressed()\n if len(np.unique(hdus)) > 1:\n for tab in tables:\n new_column = table.Column(\n ['{}:{}'.format(row['hdu'], row['keyword']) for row in tab])\n tab.add_column(new_column, name='hdu+keyword')\n keyword_column_name = 'hdu+keyword'\n else:\n keyword_column_name = 'keyword'\n\n # Check how many hdus we are processing\n final_tables = []\n for tab in tables:\n final_table = [table.Column([tab['filename'][0]], name='filename')]\n if 'value' in tab.colnames:\n for row in tab:\n if row['keyword'] in ('COMMENT', 'HISTORY'):\n continue\n final_table.append(table.Column([row['value']],\n name=row[keyword_column_name]))\n final_tables.append(table.Table(final_table))\n final_table = table.vstack(final_tables)\n # Sort if requested\n if args.fitsort is not True: # then it must be a keyword, therefore sort\n final_table.sort(args.fitsort)\n # Reorganise to keyword by columns\n final_table.pprint(max_lines=-1, max_width=-1)\n\n\nclass KeywordAppendAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n keyword = values.replace('.', ' ')\n if namespace.keywords is None:\n namespace.keywords = []\n if keyword not in namespace.keywords:\n namespace.keywords.append(keyword)\n\n\ndef main(args=None):\n \"\"\"This is the main function called by the `fitsheader` script.\"\"\"\n\n parser = argparse.ArgumentParser(\n description=('Print the header(s) of a FITS file. '\n 'Optional arguments allow the desired extension(s), '\n 'keyword(s), and output format to be specified. '\n 'Note that in the case of a compressed image, '\n 'the decompressed header is shown by default.'))\n parser.add_argument('-e', '--extension', metavar='HDU',\n action='append', dest='extensions',\n help='specify the extension by name or number; '\n 'this argument can be repeated '\n 'to select multiple extensions')\n parser.add_argument('-k', '--keyword', metavar='KEYWORD',\n action=KeywordAppendAction, dest='keywords',\n help='specify a keyword; this argument can be '\n 'repeated to select multiple keywords; '\n 'also supports wildcards')\n parser.add_argument('-t', '--table',\n nargs='?', default=False, metavar='FORMAT',\n help='print the header(s) in machine-readable table '\n 'format; the default format is '\n '\"ascii.fixed_width\" (can be \"ascii.csv\", '\n '\"ascii.html\", \"ascii.latex\", \"fits\", etc)')\n parser.add_argument('-f', '--fitsort', action='store_true',\n help='print the headers as a table with each unique '\n 'keyword in a given column (fitsort format); '\n 'if a SORT_KEYWORD is specified, the result will be '\n 'sorted along that keyword')\n parser.add_argument('-c', '--compressed', action='store_true',\n help='for compressed image data, '\n 'show the true header which describes '\n 'the compression rather than the data')\n parser.add_argument('filename', nargs='+',\n help='path to one or more files; '\n 'wildcards are supported')\n args = parser.parse_args(args)\n\n # If `--table` was used but no format specified,\n # then use ascii.fixed_width by default\n if args.table is None:\n args.table = 'ascii.fixed_width'\n\n # Now print the desired headers\n try:\n if args.table:\n print_headers_as_table(args)\n elif args.fitsort:\n print_headers_as_comparison(args)\n else:\n print_headers_traditional(args)\n except OSError as e:\n # A 'Broken pipe' OSError may occur when stdout is closed prematurely,\n # eg. when calling `fitsheader file.fits | head`. We let this pass.\n pass\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates import BaseCoordinateFrame\n\n__all__ = ['select_step_degree', 'select_step_hour', 'select_step_scalar',\n 'coord_type_from_ctype', 'transform_contour_set_inplace']\n\ndef select_step_degree(dv):\n\n # Modified from axis_artist, supports astropy.units\n\n if dv > 1. * u.arcsec:\n\n degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]\n degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]\n degree_units = [u.degree] * len(degree_steps_)\n\n minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]\n minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]\n\n minute_limits_ = np.array(minsec_limits_) / 60.\n minute_units = [u.arcmin] * len(minute_limits_)\n\n second_limits_ = np.array(minsec_limits_) / 3600.\n second_units = [u.arcsec] * len(second_limits_)\n\n degree_limits = np.concatenate([second_limits_,\n minute_limits_,\n degree_limits_])\n\n degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_\n degree_units = second_units + minute_units + degree_units\n\n n = degree_limits.searchsorted(dv.to(u.degree))\n step = degree_steps[n]\n unit = degree_units[n]\n\n return step * unit\n\n else:\n\n return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec\n\n\ndef select_step_hour(dv):\n\n if dv > 15. * u.arcsec:\n\n hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]\n hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]\n hour_units = [u.hourangle] * len(hour_steps_)\n\n minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]\n minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]\n\n minute_limits_ = np.array(minsec_limits_) / 60.\n minute_units = [15. * u.arcmin] * len(minute_limits_)\n\n second_limits_ = np.array(minsec_limits_) / 3600.\n second_units = [15. * u.arcsec] * len(second_limits_)\n\n hour_limits = np.concatenate([second_limits_,\n minute_limits_,\n hour_limits_])\n\n hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_\n hour_units = second_units + minute_units + hour_units\n\n n = hour_limits.searchsorted(dv.to(u.hourangle))\n step = hour_steps[n]\n unit = hour_units[n]\n\n return step * unit\n\n else:\n\n return select_step_scalar(dv.to_value(15. * u.arcsec)) * (15. * u.arcsec)\n\n\ndef select_step_scalar(dv):\n\n log10_dv = np.log10(dv)\n\n base = np.floor(log10_dv)\n frac = log10_dv - base\n\n steps = np.log10([1, 2, 5, 10])\n\n imin = np.argmin(np.abs(frac - steps))\n\n return 10. ** (base + steps[imin])\n\n\ndef get_coord_meta(frame):\n\n coord_meta = {}\n coord_meta['type'] = ('longitude', 'latitude')\n coord_meta['wrap'] = (None, None)\n coord_meta['unit'] = (u.deg, u.deg)\n\n from astropy.coordinates import frame_transform_graph\n\n if isinstance(frame, str):\n initial_frame = frame\n frame = frame_transform_graph.lookup_name(frame)\n if frame is None:\n raise ValueError(\"Unknown frame: {0}\".format(initial_frame))\n\n if not isinstance(frame, BaseCoordinateFrame):\n frame = frame()\n\n names = list(frame.representation_component_names.keys())\n coord_meta['name'] = names[:2]\n\n return coord_meta\n\n\ndef coord_type_from_ctype(ctype):\n \"\"\"\n Determine whether a particular WCS ctype corresponds to an angle or scalar\n coordinate.\n \"\"\"\n if ctype[:4] == 'RA--':\n return 'longitude', u.hourangle, None\n elif ctype[:4] == 'HPLN':\n return 'longitude', u.arcsec, 180.\n elif ctype[:4] == 'HPLT':\n return 'latitude', u.arcsec, None\n elif ctype[:4] == 'HGLN':\n return 'longitude', None, 180.\n elif ctype[1:4] == 'LON' or ctype[2:4] == 'LN':\n return 'longitude', None, None\n elif ctype[:4] == 'DEC-' or ctype[1:4] == 'LAT' or ctype[2:4] == 'LT':\n return 'latitude', None, None\n else:\n return 'scalar', None, None\n\n\ndef transform_contour_set_inplace(cset, transform):\n \"\"\"\n Transform a contour set in-place using a specified\n :class:`matplotlib.transform.Transform`\n\n Using transforms with the native Matplotlib contour/contourf can be slow if\n the transforms have a non-negligible overhead (which is the case for\n WCS/SkyCoord transforms) since the transform is called for each individual\n contour line. It is more efficient to stack all the contour lines together\n temporarily and transform them in one go.\n \"\"\"\n\n # The contours are represented as paths grouped into levels. Each can have\n # one or more paths. The approach we take here is to stack the vertices of\n # all paths and transform them in one go. The pos_level list helps us keep\n # track of where the set of segments for each overall contour level ends.\n # The pos_segments list helps us keep track of where each segmnt ends for\n # each contour level.\n all_paths = []\n pos_level = []\n pos_segments = []\n\n for collection in cset.collections:\n paths = collection.get_paths()\n all_paths.append(paths)\n # The last item in pos isn't needed for np.split and in fact causes\n # issues if we keep it because it will cause an extra empty array to be\n # returned.\n pos = np.cumsum([len(x) for x in paths])\n pos_segments.append(pos[:-1])\n pos_level.append(pos[-1])\n\n # As above the last item isn't needed\n pos_level = np.cumsum(pos_level)[:-1]\n\n # Stack all the segments into a single (n, 2) array\n vertices = [path.vertices for paths in all_paths for path in paths]\n if len(vertices) > 0:\n vertices = np.concatenate(vertices)\n else:\n return\n\n # Transform all coordinates in one go\n vertices = transform.transform(vertices)\n\n # Split up into levels again\n vertices = np.split(vertices, pos_level)\n\n # Now re-populate the segments in the line collections\n for ilevel, vert in enumerate(vertices):\n vert = np.split(vert, pos_segments[ilevel])\n for iseg, ivert in enumerate(vert):\n all_paths[ilevel][iseg].vertices = ivert\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nTabular models.\n\nTabular models of any dimension can be created using `tabular_model`.\nFor convenience `Tabular1D` and `Tabular2D` are provided.\n\nExamples\n--------\n>>> table = np.array([[ 3., 0., 0.],\n... [ 0., 2., 0.],\n... [ 0., 0., 0.]])\n>>> points = ([1, 2, 3], [1, 2, 3])\n>>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False,\n... fill_value=None, method='nearest')\n\n\"\"\"\n\nimport abc\n\nimport numpy as np\n\nfrom .core import Model\nfrom astropy import units as u\nfrom astropy.utils import minversion\n\ntry:\n import scipy\n from scipy.interpolate import interpn\n has_scipy = True\nexcept ImportError:\n has_scipy = False\n\nhas_scipy = has_scipy and minversion(scipy, \"0.14\")\n\n__all__ = ['tabular_model', 'Tabular1D', 'Tabular2D']\n\n__doctest_requires__ = {('tabular_model'): ['scipy']}\n\n\nclass _Tabular(Model):\n \"\"\"\n Returns an interpolated lookup table value.\n\n Parameters\n ----------\n points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ), optional\n The points defining the regular grid in n dimensions.\n lookup_table : array-like, shape (m1, ..., mn, ...)\n The data on a regular grid in n dimensions.\n method : str, optional\n The method of interpolation to perform. Supported are \"linear\" and\n \"nearest\", and \"splinef2d\". \"splinef2d\" is only supported for\n 2-dimensional data. Default is \"linear\".\n bounds_error : bool, optional\n If True, when interpolated values are requested outside of the\n domain of the input data, a ValueError is raised.\n If False, then ``fill_value`` is used.\n fill_value : float or `~astropy.units.Quantity`, optional\n If provided, the value to use for points outside of the\n interpolation domain. If None, values outside\n the domain are extrapolated. Extrapolation is not supported by method\n \"splinef2d\". If Quantity is given, it will be converted to the unit of\n ``lookup_table``, if applicable.\n\n Returns\n -------\n value : ndarray\n Interpolated values at input coordinates.\n\n Raises\n ------\n ImportError\n Scipy is not installed.\n\n Notes\n -----\n Uses `scipy.interpolate.interpn`.\n\n \"\"\"\n\n linear = False\n fittable = False\n\n standard_broadcasting = False\n outputs = ('y',)\n\n @property\n @abc.abstractmethod\n def lookup_table(self):\n pass\n\n _is_dynamic = True\n\n _id = 0\n\n def __init__(self, points=None, lookup_table=None, method='linear',\n bounds_error=True, fill_value=np.nan, **kwargs):\n\n n_models = kwargs.get('n_models', 1)\n if n_models > 1:\n raise NotImplementedError('Only n_models=1 is supported.')\n super().__init__(**kwargs)\n\n if lookup_table is None:\n raise ValueError('Must provide a lookup table.')\n\n if not isinstance(lookup_table, u.Quantity):\n lookup_table = np.asarray(lookup_table)\n\n if self.lookup_table.ndim != lookup_table.ndim:\n raise ValueError(\"lookup_table should be an array with \"\n \"{0} dimensions.\".format(self.lookup_table.ndim))\n\n if points is None:\n points = tuple(np.arange(x, dtype=float)\n for x in lookup_table.shape)\n else:\n if lookup_table.ndim == 1 and not isinstance(points, tuple):\n points = (points,)\n npts = len(points)\n if npts != lookup_table.ndim:\n raise ValueError(\n \"Expected grid points in \"\n \"{0} directions, got {1}.\".format(lookup_table.ndim, npts))\n if (npts > 1 and isinstance(points[0], u.Quantity) and\n len(set([getattr(p, 'unit', None) for p in points])) > 1):\n raise ValueError('points must all have the same unit.')\n\n if isinstance(fill_value, u.Quantity):\n if not isinstance(lookup_table, u.Quantity):\n raise ValueError('fill value is in {0} but expected to be '\n 'unitless.'.format(fill_value.unit))\n fill_value = fill_value.to(lookup_table.unit).value\n\n self.points = points\n self.lookup_table = lookup_table\n self.bounds_error = bounds_error\n self.method = method\n self.fill_value = fill_value\n\n def __repr__(self):\n fmt = \"<{0}(points={1}, lookup_table={2})>\".format(\n self.__class__.__name__, self.points, self.lookup_table)\n return fmt\n\n def __str__(self):\n default_keywords = [\n ('Model', self.__class__.__name__),\n ('Name', self.name),\n ('Inputs', self.inputs),\n ('Outputs', self.outputs),\n ('Parameters', \"\"),\n (' points', self.points),\n (' lookup_table', self.lookup_table),\n (' method', self.method),\n (' fill_value', self.fill_value),\n (' bounds_error', self.bounds_error)\n ]\n\n parts = ['{0}: {1}'.format(keyword, value)\n for keyword, value in default_keywords\n if value is not None]\n\n return '\\n'.join(parts)\n\n @property\n def input_units(self):\n pts = self.points[0]\n if not isinstance(pts, u.Quantity):\n return None\n else:\n return dict([(x, pts.unit) for x in self.inputs])\n\n @property\n def return_units(self):\n if not isinstance(self.lookup_table, u.Quantity):\n return None\n else:\n return {'y': self.lookup_table.unit}\n\n @property\n def bounding_box(self):\n \"\"\"\n Tuple defining the default ``bounding_box`` limits,\n ``(points_low, points_high)``.\n\n Examples\n --------\n >>> from astropy.modeling.models import Tabular1D, Tabular2D\n >>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30])\n >>> t1.bounding_box\n (1, 3)\n >>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]],\n ... lookup_table=[[10, 20, 30], [20, 30, 40]])\n >>> t2.bounding_box\n ((2, 4), (1, 3))\n\n \"\"\"\n bbox = [(min(p), max(p)) for p in self.points][::-1]\n if len(bbox) == 1:\n bbox = bbox[0]\n return tuple(bbox)\n\n def evaluate(self, *inputs):\n \"\"\"\n Return the interpolated values at the input coordinates.\n\n Parameters\n ----------\n inputs : list of scalars or ndarrays\n Input coordinates. The number of inputs must be equal\n to the dimensions of the lookup table.\n \"\"\"\n if isinstance(inputs, u.Quantity):\n inputs = inputs.value\n shape = inputs[0].shape\n inputs = [inp.flatten() for inp in inputs[: self.n_inputs]]\n inputs = np.array(inputs).T\n if not has_scipy: # pragma: no cover\n raise ImportError(\"This model requires scipy >= v0.14\")\n result = interpn(self.points, self.lookup_table, inputs,\n method=self.method, bounds_error=self.bounds_error,\n fill_value=self.fill_value)\n\n # return_units not respected when points has no units\n if (isinstance(self.lookup_table, u.Quantity) and\n not isinstance(self.points[0], u.Quantity)):\n result = result * self.lookup_table.unit\n\n if self.n_outputs == 1:\n result = result.reshape(shape)\n else:\n result = [r.reshape(shape) for r in result]\n return result\n\n\ndef tabular_model(dim, name=None):\n \"\"\"\n Make a ``Tabular`` model where ``n_inputs`` is\n based on the dimension of the lookup_table.\n\n This model has to be further initialized and when evaluated\n returns the interpolated values.\n\n Parameters\n ----------\n dim : int\n Dimensions of the lookup table.\n name : str\n Name for the class.\n\n Examples\n --------\n >>> table = np.array([[3., 0., 0.],\n ... [0., 2., 0.],\n ... [0., 0., 0.]])\n\n >>> tab = tabular_model(2, name='Tabular2D')\n >>> print(tab)\n <class 'abc.Tabular2D'>\n Name: Tabular2D\n Inputs: (u'x0', u'x1')\n Outputs: (u'y',)\n\n >>> points = ([1, 2, 3], [1, 2, 3])\n\n Setting fill_value to None, allows extrapolation.\n >>> m = tab(points, lookup_table=table, name='my_table',\n ... bounds_error=False, fill_value=None, method='nearest')\n\n >>> xinterp = [0, 1, 1.5, 2.72, 3.14]\n >>> m(xinterp, xinterp) # doctest: +FLOAT_CMP\n array([3., 3., 3., 0., 0.])\n\n \"\"\"\n if dim < 1:\n raise ValueError('Lookup table must have at least one dimension.')\n\n table = np.zeros([2] * dim)\n inputs = tuple('x{0}'.format(idx) for idx in range(table.ndim))\n members = {'lookup_table': table, 'inputs': inputs}\n\n if dim == 1:\n members['_separable'] = True\n else:\n members['_separable'] = False\n\n if name is None:\n model_id = _Tabular._id\n _Tabular._id += 1\n name = 'Tabular{0}'.format(model_id)\n\n return type(str(name), (_Tabular,), members)\n\n\nTabular1D = tabular_model(1, name='Tabular1D')\n\nTabular2D = tabular_model(2, name='Tabular2D')\n\n_tab_docs = \"\"\"\n method : str, optional\n The method of interpolation to perform. Supported are \"linear\" and\n \"nearest\", and \"splinef2d\". \"splinef2d\" is only supported for\n 2-dimensional data. Default is \"linear\".\n bounds_error : bool, optional\n If True, when interpolated values are requested outside of the\n domain of the input data, a ValueError is raised.\n If False, then ``fill_value`` is used.\n fill_value : float, optional\n If provided, the value to use for points outside of the\n interpolation domain. If None, values outside\n the domain are extrapolated. Extrapolation is not supported by method\n \"splinef2d\".\n\n Returns\n -------\n value : ndarray\n Interpolated values at input coordinates.\n\n Raises\n ------\n ImportError\n Scipy is not installed.\n\n Notes\n -----\n Uses `scipy.interpolate.interpn`.\n\"\"\"\n\nTabular1D.__doc__ = \"\"\"\n Tabular model in 1D.\n Returns an interpolated lookup table value.\n\n Parameters\n ----------\n points : array-like of float of ndim=1.\n The points defining the regular grid in n dimensions.\n lookup_table : array-like, of ndim=1.\n The data in one dimensions.\n\"\"\" + _tab_docs\n\nTabular2D.__doc__ = \"\"\"\n Tabular model in 2D.\n Returns an interpolated lookup table value.\n\n Parameters\n ----------\n points : tuple of ndarray of float, with shapes (m1, m2), optional\n The points defining the regular grid in n dimensions.\n lookup_table : array-like, shape (m1, m2)\n The data on a regular grid in 2 dimensions.\n\n\"\"\" + _tab_docs\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\nfrom numpy import ma\nfrom numpy.testing import assert_allclose\n\nfrom astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm\nfrom astropy.visualization.interval import ManualInterval\nfrom astropy.visualization.stretch import SqrtStretch\n\ntry:\n import matplotlib # pylint: disable=W0611\n from matplotlib import pyplot as plt\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\n\nDATA = np.linspace(0., 15., 6)\nDATA2 = np.arange(3)\nDATA2SCL = 0.5 * DATA2\n\n\[email protected]('HAS_MATPLOTLIB')\ndef test_normalize_error_message():\n with pytest.raises(ImportError) as exc:\n ImageNormalize()\n assert (exc.value.args[0] == \"matplotlib is required in order to use \"\n \"this class.\")\n\n\[email protected]('not HAS_MATPLOTLIB')\nclass TestNormalize:\n def test_invalid_interval(self):\n with pytest.raises(TypeError):\n ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,\n clip=True)\n\n def test_invalid_stretch(self):\n with pytest.raises(TypeError):\n ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,\n clip=True)\n\n def test_scalar(self):\n norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),\n clip=True)\n norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),\n stretch=SqrtStretch(), clip=True)\n assert_allclose(norm(6), 0.70710678)\n assert_allclose(norm(6), norm2(6))\n\n def test_clip(self):\n norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),\n clip=True)\n norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),\n stretch=SqrtStretch(), clip=True)\n output = norm(DATA)\n expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]\n assert_allclose(output, expected)\n assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])\n assert_allclose(output, norm2(DATA))\n\n def test_noclip(self):\n norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),\n clip=False)\n norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),\n stretch=SqrtStretch(), clip=False)\n output = norm(DATA)\n expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,\n 1.27475488]\n assert_allclose(output, expected)\n assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])\n assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])\n assert_allclose(output, norm2(DATA))\n\n def test_implicit_autoscale(self):\n norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),\n clip=False)\n norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),\n stretch=SqrtStretch(), clip=False)\n output = norm(DATA)\n assert norm.vmin == np.min(DATA)\n assert norm.vmax == 10.\n assert_allclose(output, norm2(DATA))\n\n norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),\n clip=False)\n norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),\n stretch=SqrtStretch(), clip=False)\n output = norm(DATA)\n assert norm.vmin == 2.\n assert norm.vmax == np.max(DATA)\n assert_allclose(output, norm2(DATA))\n\n def test_masked_clip(self):\n mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])\n norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),\n clip=True)\n norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),\n stretch=SqrtStretch(), clip=True)\n output = norm(mdata)\n expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]\n assert_allclose(output.filled(-10), expected)\n assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])\n assert_allclose(output, norm2(mdata))\n\n def test_masked_noclip(self):\n mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])\n norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),\n clip=False)\n norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),\n stretch=SqrtStretch(), clip=False)\n output = norm(mdata)\n expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,\n 1.27475488]\n assert_allclose(output.filled(-10), expected)\n assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])\n\n assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])\n assert_allclose(output, norm2(mdata))\n\n\[email protected]('not HAS_MATPLOTLIB')\nclass TestImageScaling:\n\n def test_linear(self):\n \"\"\"Test linear scaling.\"\"\"\n norm = simple_norm(DATA2, stretch='linear')\n assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)\n\n def test_sqrt(self):\n \"\"\"Test sqrt scaling.\"\"\"\n norm = simple_norm(DATA2, stretch='sqrt')\n assert_allclose(norm(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)\n\n def test_power(self):\n \"\"\"Test power scaling.\"\"\"\n power = 3.0\n norm = simple_norm(DATA2, stretch='power', power=power)\n assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)\n\n def test_log(self):\n \"\"\"Test log10 scaling.\"\"\"\n norm = simple_norm(DATA2, stretch='log')\n ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)\n assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)\n\n def test_asinh(self):\n \"\"\"Test asinh scaling.\"\"\"\n a = 0.1\n norm = simple_norm(DATA2, stretch='asinh', asinh_a=a)\n ref = np.arcsinh(DATA2SCL / a) / np.arcsinh(1. / a)\n assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)\n\n def test_min(self):\n \"\"\"Test linear scaling.\"\"\"\n norm = simple_norm(DATA2, stretch='linear', min_cut=1.)\n assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)\n\n def test_percent(self):\n \"\"\"Test percent keywords.\"\"\"\n norm = simple_norm(DATA2, stretch='linear', percent=99.)\n assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)\n\n norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,\n max_percent=99.5)\n assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)\n\n def test_invalid_stretch(self):\n \"\"\"Test invalid stretch keyword.\"\"\"\n with pytest.raises(ValueError):\n simple_norm(DATA2, stretch='invalid')\n\n\[email protected]('not HAS_MATPLOTLIB')\ndef test_imshow_norm():\n image = np.random.randn(10, 10)\n\n ax = plt.subplot()\n imshow_norm(image, ax=ax)\n\n with pytest.raises(ValueError):\n # X and data are the same, can't give both\n imshow_norm(image, X=image, ax=ax)\n\n with pytest.raises(ValueError):\n # illegal to manually pass in normalization since that defeats the point\n imshow_norm(image, ax=ax, norm=ImageNormalize())\n\n imshow_norm(image, ax=ax, vmin=0, vmax=1)\n # vmin/vmax \"shadow\" the MPL versions, so imshow_only_kwargs allows direct-setting\n imshow_norm(image, ax=ax, imshow_only_kwargs=dict(vmin=0, vmax=1))\n # but it should fail for an argument that is not in ImageNormalize\n with pytest.raises(ValueError):\n imshow_norm(image, ax=ax, imshow_only_kwargs=dict(cmap='jet'))\n\n # make sure the pyplot version works\n imres, norm = imshow_norm(image, ax=None)\n\n assert isinstance(norm, ImageNormalize)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport json\nimport os\nfrom datetime import datetime\nimport locale\n\nimport pytest\nimport numpy as np\n\nfrom astropy.utils import data, misc\n\n\ndef test_isiterable():\n assert misc.isiterable(2) is False\n assert misc.isiterable([2]) is True\n assert misc.isiterable([1, 2, 3]) is True\n assert misc.isiterable(np.array(2)) is False\n assert misc.isiterable(np.array([1, 2, 3])) is True\n\n\ndef test_signal_number_to_name_no_failure():\n # Regression test for #5340: ensure signal_number_to_name throws no\n # AttributeError (it used \".iteritems()\" which was removed in Python3).\n misc.signal_number_to_name(0)\n\n\[email protected]_data\ndef test_api_lookup():\n strurl = misc.find_api_page('astropy.utils.misc', 'dev', False, timeout=3)\n objurl = misc.find_api_page(misc, 'dev', False, timeout=3)\n\n assert strurl == objurl\n assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc'\n\n\ndef test_skip_hidden():\n path = data._find_pkg_data_path('data')\n for root, dirs, files in os.walk(path):\n assert '.hidden_file.txt' in files\n assert 'local.dat' in files\n # break after the first level since the data dir contains some other\n # subdirectories that don't have these files\n break\n\n for root, dirs, files in misc.walk_skip_hidden(path):\n assert '.hidden_file.txt' not in files\n assert 'local.dat' in files\n break\n\n\ndef test_JsonCustomEncoder():\n from astropy import units as u\n assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'\n assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'\n assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]'\n assert json.dumps(b'hello world \\xc3\\x85',\n cls=misc.JsonCustomEncoder) == '\"hello world \\\\u00c5\"'\n assert json.dumps({1: 2},\n cls=misc.JsonCustomEncoder) == '{\"1\": 2}' # default\n assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{\"1\": \"m\"}'\n # Quantities\n tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)\n newd = json.loads(tmp)\n tmpd = {\"a\": {\"unit\": \"cm\", \"value\": 5.0}}\n assert newd == tmpd\n tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)\n newd = json.loads(tmp2)\n tmpd = {\"a\": {\"unit\": \"cm\", \"value\": [0., 1.]}}\n assert newd == tmpd\n tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)\n newd = json.loads(tmp3)\n tmpd = {\"a\": {\"unit\": \"erg / s\", \"value\": [0., 1.]}}\n assert newd == tmpd\n\n\ndef test_inherit_docstrings():\n class Base(metaclass=misc.InheritDocstrings):\n def __call__(self, *args):\n \"FOO\"\n pass\n\n @property\n def bar(self):\n \"BAR\"\n pass\n\n class Subclass(Base):\n def __call__(self, *args):\n pass\n\n @property\n def bar(self):\n return 42\n\n if Base.__call__.__doc__ is not None:\n # TODO: Maybe if __doc__ is None this test should be skipped instead?\n assert Subclass.__call__.__doc__ == \"FOO\"\n\n if Base.bar.__doc__ is not None:\n assert Subclass.bar.__doc__ == \"BAR\"\n\n\ndef test_set_locale():\n # First, test if the required locales are available\n current = locale.setlocale(locale.LC_ALL)\n try:\n locale.setlocale(locale.LC_ALL, str('en_US'))\n locale.setlocale(locale.LC_ALL, str('de_DE'))\n except locale.Error as e:\n pytest.skip('Locale error: {}'.format(e))\n finally:\n locale.setlocale(locale.LC_ALL, current)\n\n date = datetime(2000, 10, 1, 0, 0, 0)\n day_mon = date.strftime('%a, %b')\n\n with misc.set_locale('en_US'):\n assert date.strftime('%a, %b') == 'Sun, Oct'\n\n with misc.set_locale('de_DE'):\n assert date.strftime('%a, %b') == 'So, Okt'\n\n # Back to original\n assert date.strftime('%a, %b') == day_mon\n\n with misc.set_locale(current):\n assert date.strftime('%a, %b') == day_mon\n\n\ndef test_check_broadcast():\n assert misc.check_broadcast((10, 1), (3,)) == (10, 3)\n assert misc.check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)\n with pytest.raises(ValueError):\n misc.check_broadcast((10, 2), (3,))\n\n with pytest.raises(ValueError):\n misc.check_broadcast((10, 1), (3,), (4, 1, 2, 3))\n\n\ndef test_dtype_bytes_or_chars():\n assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8\n assert misc.dtype_bytes_or_chars(np.dtype(object)) is None\n assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4\n assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5\n assert misc.dtype_bytes_or_chars(np.array(u'12345').dtype) == 5\n"
] | [
[
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.testing.assert_allclose",
"numpy.random.RandomState",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.sign",
"numpy.broadcast_arrays"
],
[
"numpy.rec.find_duplicate",
"numpy.char.encode",
"numpy.multiply.reduce",
"numpy.dtype",
"numpy.rec.array",
"numpy.append",
"numpy.array",
"numpy.recarray",
"numpy.rec.format_parser"
],
[
"numpy.ones",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.ma.isMaskedArray",
"numpy.where",
"numpy.unique"
],
[
"numpy.split",
"numpy.abs",
"numpy.cumsum",
"numpy.concatenate",
"numpy.log10",
"numpy.floor",
"numpy.array"
],
[
"numpy.asarray",
"numpy.arange",
"scipy.interpolate.interpn",
"numpy.array",
"numpy.zeros"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.min",
"numpy.arange",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.random.randn",
"numpy.log10",
"numpy.testing.assert_allclose",
"numpy.ma.array",
"numpy.arcsinh"
],
[
"numpy.arange",
"numpy.array",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chaekit/pytorch | [
"132f5c1f36698361149ea99ca3504bd2acfdc19f",
"132f5c1f36698361149ea99ca3504bd2acfdc19f"
] | [
"torch/nn/parallel/distributed.py",
"test/backward_compatibility/check_backward_compatibility.py"
] | [
"import copy\nimport inspect\nimport itertools\nimport logging\nimport os\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import NamedTuple\n\nimport torch\nimport torch.distributed as dist\n\nRPC_AVAILABLE = False\nif dist.is_available():\n from torch.distributed.distributed_c10d import ReduceOp\n from torch.distributed.distributed_c10d import _get_default_group\nif torch.distributed.rpc.is_available():\n RPC_AVAILABLE = True\n from torch.distributed.rpc import RRef\nfrom torch._utils import _get_device_index\n\nfrom ..modules import Module\nfrom ._functions import _get_stream\nfrom .scatter_gather import scatter_kwargs, gather, is_namedtuple\n\n\ndef _find_tensors(obj):\n r\"\"\"\n Recursively find all tensors contained in the specified object.\n \"\"\"\n if RPC_AVAILABLE and isinstance(obj, RRef):\n # If the current node is the owner of the RRef, unwrap it and try to\n # find Tensors.\n # TODO: Expand to remote RRefs.\n if obj.is_owner():\n return _find_tensors(obj.local_value())\n if isinstance(obj, torch.Tensor):\n return [obj]\n if isinstance(obj, (list, tuple)):\n return itertools.chain(*map(_find_tensors, obj))\n if isinstance(obj, dict):\n return itertools.chain(*map(_find_tensors, obj.values()))\n return []\n\n\ndef _dump_DDP_relevant_env_vars():\n relevant_env_vars = [\n \"RANK\",\n \"LOCAL_RANK\",\n \"WORLD_SIZE\",\n \"MASTER_PORT\",\n \"MASTER_ADDR\",\n \"CUDA_VISIBLE_DEVICES\",\n \"GLOO_SOCKET_IFNAME\",\n \"GLOO_DEVICE_TRANSPORT\",\n \"NCCL_SOCKET_IFNAME\",\n \"NCCL_BLOCKING_WAIT\",\n \"NCCL_DEBUG\",\n \"NCCL_DEBUG_SUBSYS\",\n \"NCCL_IB_DISABLE\",\n # More NCCL env vars:\n \"NCCL_P2P_DISABLE\",\n \"NCCL_P2P_LEVEL\",\n \"NCCL_SHM_DISABLE\",\n \"NCCL_SOCKET_NTHREADS\",\n \"NCCL_NSOCKS_PERTHREAD\",\n \"NCCL_BUFFSIZE\",\n \"NCCL_NTHREADS\",\n \"NCCL_RINGS\",\n \"NCCL_MAX_NCHANNELS\",\n \"NCCL_MIN_NCHANNELS\",\n \"NCCL_CHECKS_DISABLE\",\n \"NCCL_CHECK_POINTERS\",\n \"NCCL_LAUNCH_MODE\",\n \"NCCL_IB_HCA\",\n \"NCCL_IB_TIMEOUT\",\n \"NCCL_IB_RETRY_CNT\",\n \"NCCL_IB_GID_INDEX\",\n \"NCCL_IB_SL\",\n \"NCCL_IB_TC\",\n \"NCCL_IB_AR_THRESHOLD\",\n \"NCCL_IB_CUDA_SUPPORT\",\n \"NCCL_NET_GDR_LEVEL\",\n \"NCCL_NET_GDR_READ\",\n \"NCCL_SINGLE_RING_THRESHOLD\",\n \"NCCL_LL_THRESHOLD\",\n \"NCCL_TREE_THRESHOLD\",\n \"NCCL_ALGO\",\n \"NCCL_PROTO\",\n \"NCCL_IGNORE_CPU_AFFINITY\",\n \"NCCL_DEBUG_FILE\",\n \"NCCL_COLLNET_ENABLE\",\n \"NCCL_TOPO_FILE\",\n \"NCCL_TOPO_DUMP_FILE\",\n ]\n formatted_output = \"\"\n for var in relevant_env_vars:\n value = os.environ[var] if var in os.environ else \"N/A\"\n formatted_output += \"env:%s=%s\\n\" % (var, value)\n print(formatted_output)\n\n\nclass _DDPUnevenInputsConfig(NamedTuple):\n ddp_join_enabled: bool\n ddp_join_divide_by_initial_world_size: bool\n\n\nclass DistributedDataParallel(Module):\n r\"\"\"Implements distributed data parallelism that is based on\n ``torch.distributed`` package at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. The module is replicated on each machine and each device, and\n each such replica handles a portion of the input. During the backwards\n pass, gradients from each node are averaged.\n\n The batch size should be larger than the number of GPUs used locally.\n\n See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.\n The same constraints on input as in :class:`torch.nn.DataParallel` apply.\n\n Creation of this class requires that ``torch.distributed`` to be already\n initialized, by calling :func:`torch.distributed.init_process_group`.\n\n ``DistributedDataParallel`` is proven to be significantly faster than\n :class:`torch.nn.DataParallel` for single-node multi-GPU data\n parallel training.\n\n To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn\n up ``N`` processes, ensuring that each process exclusively works on a single\n GPU from 0 to N-1. This can be done by either setting\n ``CUDA_VISIBLE_DEVICES`` for every process or by calling:\n\n >>> torch.cuda.set_device(i)\n\n where i is from 0 to N-1. In each process, you should refer the following\n to construct this module:\n\n >>> torch.distributed.init_process_group(\n >>> backend='nccl', world_size=N, init_method='...'\n >>> )\n >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)\n\n In order to spawn up multiple processes per node, you can use either\n ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.\n\n .. note::\n Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__\n for a brief introduction to all features related to distributed training.\n\n .. note::\n ``DistributedDataParallel`` can be used in conjunction with\n :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce\n per-rank optimizer states memory footprint. Please refer to\n `ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__\n for more details.\n\n .. note:: ``nccl`` backend is currently the fastest and highly recommended\n backend when using GPUs. This applies to both single-node and\n multi-node distributed training.\n\n .. note:: This module also supports mixed-precision distributed training.\n This means that your model can have different types of parameters such\n as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these\n mixed types of parameters will just work fine.\n\n .. note:: If you use ``torch.save`` on one process to checkpoint the module,\n and ``torch.load`` on some other processes to recover it, make sure that\n ``map_location`` is configured properly for every process. Without\n ``map_location``, ``torch.load`` would recover the module to devices\n where the module was saved from.\n\n .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the\n gradient will be ``M`` times smaller when compared to the same model\n trained on a single node with ``batch=M*N`` if the loss is summed (NOT\n averaged as usual) across instances in a batch (because the gradients\n between different nodes are averaged). You should take this into\n consideration when you want to obtain a mathematically equivalent\n training process compared to the local training counterpart. But in most\n cases, you can just treat a DistributedDataParallel wrapped model, a\n DataParallel wrapped model and an ordinary model on a single GPU as the\n same (E.g. using the same learning rate for equivalent batch size).\n\n .. note::\n Parameters are never broadcast between processes. The module performs\n an all-reduce step on gradients and assumes that they will be modified\n by the optimizer in all processes in the same way. Buffers\n (e.g. BatchNorm stats) are broadcast from the module in process of rank\n 0, to all other replicas in the system in every iteration.\n\n .. note::\n If you are using DistributedDataParallel in conjunction with the\n :ref:`distributed-rpc-framework`, you should always use\n :meth:`torch.distributed.autograd.backward` to compute gradients and\n :class:`torch.distributed.optim.DistributedOptimizer` for optimizing\n parameters.\n\n Example::\n\n >>> import torch.distributed.autograd as dist_autograd\n >>> from torch.nn.parallel import DistributedDataParallel as DDP\n >>> from torch import optim\n >>> from torch.distributed.optim import DistributedOptimizer\n >>> from torch.distributed.rpc import RRef\n >>>\n >>> t1 = torch.rand((3, 3), requires_grad=True)\n >>> t2 = torch.rand((3, 3), requires_grad=True)\n >>> rref = rpc.remote(\"worker1\", torch.add, args=(t1, t2))\n >>> ddp_model = DDP(my_model)\n >>>\n >>> # Setup optimizer\n >>> optimizer_params = [rref]\n >>> for param in ddp_model.parameters():\n >>> optimizer_params.append(RRef(param))\n >>>\n >>> dist_optim = DistributedOptimizer(\n >>> optim.SGD,\n >>> optimizer_params,\n >>> lr=0.05,\n >>> )\n >>>\n >>> with dist_autograd.context() as context_id:\n >>> pred = ddp_model(rref.to_here())\n >>> loss = loss_func(pred, loss)\n >>> dist_autograd.backward(context_id, loss)\n >>> dist_optim.step()\n\n .. note::\n To let a non-DDP model load a state dict from a DDP model,\n :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`\n needs to be applied to strip the prefix \"module.\" in the DDP state dict before loading.\n\n .. warning::\n Constructor, forward method, and differentiation of the output (or a\n function of the output of this module) are distributed synchronization\n points. Take that into account in case different processes might be\n executing different code.\n\n .. warning::\n This module assumes all parameters are registered in the model by the\n time it is created. No parameters should be added nor removed later.\n Same applies to buffers.\n\n .. warning::\n This module assumes all parameters are registered in the model of each\n distributed processes are in the same order. The module itself will\n conduct gradient ``allreduce`` following the reverse order of the\n registered parameters of the model. In other words, it is users'\n responsibility to ensure that each distributed process has the exact\n same model and thus the exact same parameter registration order.\n\n .. warning::\n This module allows parameters with non-rowmajor-contiguous strides.\n For example, your model may contain some parameters whose\n :class:`torch.memory_format` is ``torch.contiguous_format``\n and others whose format is ``torch.channels_last``. However,\n corresponding parameters in different processes must have the\n same strides.\n\n .. warning::\n This module doesn't work with :func:`torch.autograd.grad` (i.e. it will\n only work if gradients are to be accumulated in ``.grad`` attributes of\n parameters).\n\n .. warning::\n If you plan on using this module with a ``nccl`` backend or a ``gloo``\n backend (that uses Infiniband), together with a DataLoader that uses\n multiple workers, please change the multiprocessing start method to\n ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately\n Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will\n likely experience deadlocks if you don't change this setting.\n\n .. warning::\n Forward and backward hooks defined on :attr:`module` and its submodules\n won't be invoked anymore, unless the hooks are initialized in the\n :meth:`forward` method.\n\n .. warning::\n You should never try to change your model's parameters after wrapping\n up your model with ``DistributedDataParallel``. Because, when\n wrapping up your model with ``DistributedDataParallel``, the constructor\n of ``DistributedDataParallel`` will register the additional gradient\n reduction functions on all the parameters of the model itself at the\n time of construction. If you change the model's parameters afterwards,\n gradient redunction functions no longer match the correct set of\n parameters.\n\n .. warning::\n Using ``DistributedDataParallel`` in conjunction with the\n :ref:`distributed-rpc-framework` is experimental and subject to change.\n\n .. warning::\n The ``gradient_as_bucket_view`` mode does not yet work with Automatic\n Mixed Precision (AMP). AMP maintains stashed gradients that are used for\n unscaling gradients. With ``gradient_as_bucket_view=True``, these\n stashed gradients will point to communication buckets in the first\n iteration. In the next iteration, the communication buckets are mutated\n and thus these stashed gradients will be unexpectedly mutated as well,\n which might lead to wrong results.\n\n Args:\n module (Module): module to be parallelized\n device_ids (list of int or torch.device): CUDA devices.\n 1) For single-device modules, ``device_ids`` can\n contain exactly one device id, which represents the only\n CUDA device where the input module corresponding to this process resides.\n Alternatively, ``device_ids`` can also be ``None``.\n 2) For multi-device modules and CPU modules,\n ``device_ids`` must be ``None``.\n\n When ``device_ids`` is ``None`` for both cases,\n both the input data for the forward pass and the actual module\n must be placed on the correct device.\n (default: ``None``)\n output_device (int or torch.device): Device location of output for\n single-device CUDA modules. For multi-device modules and\n CPU modules, it must be ``None``, and the module itself\n dictates the output location. (default: ``device_ids[0]``\n for single-device modules)\n broadcast_buffers (bool): Flag that enables syncing (broadcasting)\n buffers of the module at beginning of the ``forward``\n function. (default: ``True``)\n process_group: The process group to be used for distributed data\n all-reduction. If ``None``, the default process group, which\n is created by :func:`torch.distributed.init_process_group`,\n will be used. (default: ``None``)\n bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into\n multiple buckets so that gradient reduction of each\n bucket can potentially overlap with backward computation.\n :attr:`bucket_cap_mb` controls the bucket size in\n MegaBytes (MB). (default: 25)\n find_unused_parameters (bool): Traverse the autograd graph from all\n tensors contained in the return value of the\n wrapped module's ``forward`` function. Parameters\n that don't receive gradients as part of this\n graph are preemptively marked as being ready to\n be reduced. Note that all ``forward`` outputs\n that are derived from module parameters must\n participate in calculating loss and later the\n gradient computation. If they don't, this wrapper\n will hang waiting for autograd to produce\n gradients for those parameters. Any outputs\n derived from module parameters that are otherwise\n unused can be detached from the autograd graph\n using ``torch.Tensor.detach``. (default: ``False``)\n check_reduction: This argument is deprecated.\n gradient_as_bucket_view (bool): This is a prototype feature and subject\n to changes. When set to ``True``, gradients will be views\n pointing to different offsets of ``allreduce`` communication\n buckets. This can reduce peak memory usage, where the\n saved memory size will be equal to the total gradients\n size. Moreover, it avoids the overhead of copying between\n gradients and ``allreduce`` communication buckets. When\n gradients are views, ``detach_()`` cannot be called on the\n gradients. If hitting such errors, please fix it by\n referring to the :meth:`~torch.optim.Optimizer.zero_grad`\n function in ``torch/optim/optimizer.py`` as a solution.\n\n\n Attributes:\n module (Module): the module to be parallelized.\n\n Example::\n\n >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')\n >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)\n \"\"\"\n\n def __init__(\n self,\n module,\n device_ids=None,\n output_device=None,\n dim=0,\n broadcast_buffers=True,\n process_group=None,\n bucket_cap_mb=25,\n find_unused_parameters=False,\n check_reduction=False,\n gradient_as_bucket_view=False,\n ):\n\n super(DistributedDataParallel, self).__init__()\n\n assert any((p.requires_grad for p in module.parameters())), (\n \"DistributedDataParallel is not needed when a module \"\n \"doesn't have any parameter that requires a gradient.\"\n )\n\n if device_ids is not None and len(device_ids) > 1:\n raise ValueError(\"device_ids can only be None or contain a single element.\")\n\n self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1\n distinct_device_types = {p.device.type for p in module.parameters()}\n if len(distinct_device_types) != 1:\n raise ValueError(\n \"DistributedDataParallel's input module must be on \"\n \"the same type of devices, but input module parameters locate in {}.\".format(\n distinct_device_types\n )\n )\n self.device_type = list(distinct_device_types)[0]\n\n if (\n device_ids is None\n or len(device_ids) == 0 # For backward compatibility.\n or self.device_type == \"cpu\"\n or self.is_multi_device_module\n ):\n if device_ids or output_device:\n raise ValueError(\n \"DistributedDataParallel device_ids and output_device arguments \"\n \"only work with single-device/multiple-device GPU modules or CPU modules, \"\n \"but got device_ids {}, output_device {}, and module parameters {}.\".format(\n device_ids,\n output_device,\n {p.device for p in module.parameters()},\n )\n )\n\n self.device_ids = None\n self.output_device = None\n else:\n self.device_ids = [_get_device_index(x, True) for x in device_ids]\n\n if output_device is None:\n output_device = device_ids[0]\n\n self.output_device = _get_device_index(output_device, True)\n\n if process_group is None:\n self.process_group = _get_default_group()\n else:\n self.process_group = process_group\n\n self.dim = dim\n self.module = module\n self.device = list(self.module.parameters())[0].device\n self.broadcast_buffers = broadcast_buffers\n self.find_unused_parameters = find_unused_parameters\n self.require_backward_grad_sync = True\n self.require_forward_param_sync = True\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False\n )\n self.gradient_as_bucket_view = gradient_as_bucket_view\n if hasattr(module, \"_ddp_params_and_buffers_to_ignore\"):\n self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore\n else:\n self.parameters_to_ignore = []\n\n if check_reduction:\n # This argument is no longer used since the reducer\n # will ensure reduction completes even if some parameters\n # do not receive gradients.\n warnings.warn(\n \"The `check_reduction` argument in `DistributedDataParallel` \"\n \"module is deprecated. Please avoid using it.\"\n )\n\n # Check that a module does not have Uninitialized parameters\n for param in module.parameters():\n if isinstance(param, torch.nn.parameter.UninitializedParameter):\n raise RuntimeError(\n \"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. \"\n \"Run a dummy forward pass to correctly initialize the modules\"\n )\n # used for intra-node param sync and inter-node sync as wel\n self.broadcast_bucket_size = int(250 * 1024 * 1024)\n\n # reduction bucket size\n self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)\n # Whether to perform input tensor CPU to GPU copies on a side-stream\n self.use_side_stream_for_tensor_copies = (\n os.environ.get(\"PYTORCH_DDP_USE_SIDE_STREAM\", \"1\") == \"1\"\n )\n\n # TODO(wayi@): Remove this field since SPMD is no longer supported,\n # and also remove all the relevant unnecessary loops.\n # Module replication within process (single-process multi device)\n self._module_copies = [self.module]\n # Build parameters for reducer.\n parameters, expect_sparse_gradient = self._build_params_for_reducer()\n # Verify model equivalence.\n dist._verify_model_across_ranks(self.process_group, parameters)\n # Sync params and buffers. Ensures all DDP models start off at the same value.\n self._sync_params_and_buffers(authoritative_rank=0)\n # Builds reducer.\n self._ddp_init_helper(parameters, expect_sparse_gradient)\n\n def _sync_params_and_buffers(self, authoritative_rank=0):\n module_states = []\n for name, param in self.module.state_dict().items():\n if name not in self.parameters_to_ignore:\n module_states.append(param)\n\n if len(module_states) > 0:\n self._distributed_broadcast_coalesced(\n module_states, self.broadcast_bucket_size, authoritative_rank\n )\n\n def _ddp_init_helper(self, parameters, expect_sparse_gradient):\n \"\"\"\n Initialization helper function that does the following:\n (1) bucketing the parameters for reductions\n (2) resetting the bucketing states\n (3) registering the grad hooks\n (4) Logging constructin-time DDP logging data\n (5) passing a handle of DDP to SyncBatchNorm Layer\n \"\"\"\n # The bucket size limit is specified in the constructor.\n # Additionally, we allow for a single small bucket for parameters\n # that are defined first, such that their gradients don't spill into\n # a much larger bucket, adding unnecessary latency after gradient\n # computation finishes. Experiments showed 1MB is a reasonable value.\n bucket_indices = dist._compute_bucket_assignment_by_size(\n parameters[0],\n [dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],\n expect_sparse_gradient[0],\n )\n\n # Note: reverse list of buckets because we want to approximate the\n # order in which their gradients are produced, and assume they\n # are used in the forward pass in the order they are defined.\n self.reducer = dist.Reducer(\n parameters,\n list(reversed(bucket_indices)),\n self.process_group,\n expect_sparse_gradient,\n self.bucket_bytes_cap,\n self.find_unused_parameters,\n self.gradient_as_bucket_view,\n )\n\n self.logger = dist.Logger(self.reducer)\n\n # Set logging data that can be got during construction time.\n self.logger.set_construction_data_and_log(\n self.module.__class__.__name__,\n [] if self.device_ids is None else self.device_ids,\n -1 if self.output_device is None else self.output_device,\n self.broadcast_buffers,\n )\n\n # passing a handle to torch.nn.SyncBatchNorm layer\n self._passing_sync_batchnorm_handle(self._module_copies)\n\n def __getstate__(self):\n self._check_default_group()\n attrs = copy.copy(self.__dict__)\n del attrs[\"process_group\"]\n del attrs[\"reducer\"]\n del attrs[\"logger\"]\n return attrs\n\n def __setstate__(self, state):\n # If serializable, then the process group should be the default one\n self.process_group = _get_default_group()\n super(DistributedDataParallel, self).__setstate__(state)\n self.__dict__.setdefault(\"require_forward_param_sync\", True)\n self.__dict__.setdefault(\"require_backward_grad_sync\", True)\n parameters, expect_sparse_gradient = self._build_params_for_reducer()\n self._ddp_init_helper(parameters, expect_sparse_gradient)\n\n def _build_params_for_reducer(self):\n # Build tuple of (module, parameter) for all parameters that require grads.\n modules_and_parameters = [\n [\n (module, parameter)\n for module_name, module in replica.named_modules()\n for parameter in [\n param\n # Note that we access module.named_parameters instead of\n # parameters(module). parameters(module) is only needed in the\n # single-process multi device case, where it accesses replicated\n # parameters through _former_parameters.\n for param_name, param in module.named_parameters(recurse=False)\n if param.requires_grad\n and f\"{module_name}.{param_name}\"\n not in self.parameters_to_ignore\n ]\n ]\n for replica in self._module_copies\n ]\n\n # Deduplicate any parameters that might be shared across child modules.\n memo = set()\n modules_and_parameters = [\n # \"p not in memo\" is the deduplication check.\n # \"not memo.add(p)\" is always True, and it's only there to cause \"add(p)\" if needed.\n [(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]\n for replica_mps in modules_and_parameters\n ]\n\n # Build list of parameters.\n parameters = [\n list(parameter for _, parameter in replica)\n for replica in modules_and_parameters\n ]\n\n # Checks if a module will produce a sparse gradient.\n def produces_sparse_gradient(module):\n if isinstance(module, torch.nn.Embedding) or isinstance(\n module, torch.nn.EmbeddingBag\n ):\n return module.sparse\n return False\n\n # Build list of booleans indicating whether or not to expect sparse\n # gradients for the corresponding parameters.\n expect_sparse_gradient = [\n list(produces_sparse_gradient(module) for module, _ in replica)\n for replica in modules_and_parameters\n ]\n\n # The following modules_params and modules_buffers are used for\n # param/buffer sync in _sync_params.\n self.modules_params = [\n list(self._get_parameters(m)) for m in self._module_copies\n ]\n # Collect buffers for modules, filtering out buffers that should be ignored.\n named_module_buffers = [\n [(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]\n for m in self._module_copies\n ]\n self.modules_buffers = [\n [\n buffer\n for (buffer, buffer_name) in module_buffers\n if buffer_name not in self.parameters_to_ignore\n ]\n for module_buffers in named_module_buffers\n ]\n\n return parameters, expect_sparse_gradient\n\n def _get_parameters(self, m, recurse=True):\n \"\"\"\n Returns a generator of module parameters\n \"\"\"\n\n def model_parameters(m):\n ps = (\n m._former_parameters.values()\n if hasattr(m, \"_former_parameters\")\n else m.parameters(recurse=False)\n )\n for p in ps:\n yield p\n\n for m in m.modules() if recurse else [m]:\n for p in model_parameters(m):\n yield p\n\n def _check_default_group(self):\n pickle_not_supported = False\n try:\n if self.process_group != _get_default_group():\n pickle_not_supported = True\n except RuntimeError:\n pickle_not_supported = True\n\n if pickle_not_supported:\n raise RuntimeError(\n \"DDP Pickling/Unpickling are only supported \"\n \"when using DDP with the default process \"\n \"group. That is, when you have called \"\n \"init_process_group and have not passed \"\n \"process_group argument to DDP constructor\"\n )\n\n @contextmanager\n def no_sync(self):\n r\"\"\"\n A context manager to disable gradient synchronizations across DDP\n processes. Within this context, gradients will be accumulated on module\n variables, which will later be synchronized in the first\n forward-backward pass exiting the context.\n\n Example::\n\n >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)\n >>> with ddp.no_sync():\n >>> for input in inputs:\n >>> ddp(input).backward() # no synchronization, accumulate grads\n >>> ddp(another_input).backward() # synchronize grads\n \"\"\"\n old_require_backward_grad_sync = self.require_backward_grad_sync\n self.require_backward_grad_sync = False\n try:\n yield\n finally:\n self.require_backward_grad_sync = old_require_backward_grad_sync\n\n def forward(self, *inputs, **kwargs):\n self.reducer.save_thread_local_state()\n if torch.is_grad_enabled() and self.require_backward_grad_sync:\n self.logger.set_runtime_stats_and_log()\n self.reducer.prepare_for_forward()\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n ones = torch.ones(1, device=self.device)\n work = dist.all_reduce(ones, group=self.process_group, async_op=True)\n self.reducer._set_forward_pass_work_handle(\n work,\n self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,\n )\n\n # Calling _rebuild_buckets before forward compuation,\n # It may allocate new buckets before deallocating old buckets\n # inside _rebuild_buckets. To save peak memory usage,\n # call _rebuild_buckets before the peak memory usage increases\n # during forward computation.\n # This should be called only once during whole training period.\n if torch.is_grad_enabled() and self.reducer._rebuild_buckets():\n logging.info(\"Reducer buckets have been rebuilt in this iteration.\")\n\n if self.require_forward_param_sync:\n self._sync_params()\n\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n # Notify joined ranks whether they should sync in backwards pass or not.\n self._check_global_requires_backward_grad_sync(is_joined_rank=False)\n\n if self.device_ids:\n inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])\n output = self.module(*inputs[0], **kwargs[0])\n else:\n output = self.module(*inputs, **kwargs)\n\n if torch.is_grad_enabled() and self.require_backward_grad_sync:\n self.require_forward_param_sync = True\n # We'll return the output object verbatim since it is a freeform\n # object. We need to find any tensors in this object, though,\n # because we need to figure out which parameters were used during\n # this forward pass, to ensure we short circuit reduction for any\n # unused parameters. Only if `find_unused_parameters` is set.\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n self.require_forward_param_sync = False\n\n return output\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def _recursive_to(self, inputs, target_gpu):\n r\"\"\"\n Recursively moves input to the target_gpu.\n \"\"\"\n\n def to_map(obj):\n if isinstance(obj, torch.Tensor):\n if not self.use_side_stream_for_tensor_copies:\n return (obj.to(target_gpu),)\n else:\n # Perform CPU -> GPU copies in a background stream. This code is\n # motivated from similar logic in torch/nn/parallel/_functions.py\n stream = _get_stream(target_gpu)\n with torch.cuda.stream(stream):\n output = obj.to(target_gpu)\n # synchronize with the copy stream\n with torch.cuda.device(target_gpu):\n current_stream = torch.cuda.current_stream()\n # Sync the current stream with the copy stream\n current_stream.wait_stream(stream)\n # Ensure tensor memory is not reused until work on\n # main stream is complete\n output.record_stream(current_stream)\n return (output,)\n if is_namedtuple(obj):\n return [type(obj)(*args) for args in zip(*map(to_map, obj))]\n if isinstance(obj, tuple) and len(obj) > 0:\n return list(zip(*map(to_map, obj)))\n if isinstance(obj, list) and len(obj) > 0:\n return [list(i) for i in zip(*map(to_map, obj))]\n if isinstance(obj, dict) and len(obj) > 0:\n return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]\n return [obj]\n\n # Avoid reference cycle\n try:\n res = to_map(inputs)\n finally:\n to_map = None\n return res\n\n def to_kwargs(self, inputs, kwargs, device_id):\n inputs = self._recursive_to(inputs, device_id) if inputs else []\n kwargs = self._recursive_to(kwargs, device_id) if kwargs else []\n if len(inputs) < len(kwargs):\n inputs.extend([() for _ in range(len(kwargs) - len(inputs))])\n elif len(kwargs) < len(inputs):\n kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])\n inputs = tuple(inputs)\n kwargs = tuple(kwargs)\n return inputs, kwargs\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n def train(self, mode=True):\n super(DistributedDataParallel, self).train(mode)\n for module in self._module_copies[1:]:\n module.train(mode)\n return self\n\n # When running in join mode, schedules an allreduce to match the one in the\n # forward pass to determine the no. of currently active processes and whether\n # all processes have joined.\n def _schedule_shadow_all_reduce_for_fwd_pass(self):\n all_active_procs = torch.zeros(1, device=self.device)\n dist.all_reduce(all_active_procs, group=self.process_group)\n return all_active_procs.item()\n\n # When running in join mode, schedules an allreduce to notify joined ranks\n # of whether backwards pass synchronization will run this iteraton or not.\n def _check_global_requires_backward_grad_sync(self, is_joined_rank):\n if not is_joined_rank and self.require_backward_grad_sync:\n requires_sync_tensor = torch.ones(1, device=self.device)\n else:\n requires_sync_tensor = torch.zeros(1, device=self.device)\n\n work = dist.all_reduce(\n requires_sync_tensor, group=self.process_group, async_op=True\n )\n return work, requires_sync_tensor\n\n # When running in join mode, checks and performs sync of module buffers if\n # the models have buffers that should be synchronized in the forward pass.\n def _check_and_sync_module_buffers(self):\n if self.will_sync_module_buffers():\n authoritative_rank = self._find_common_rank(self._distributed_rank, False)\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank\n )\n\n # When running in join model, agrees upon a common rank and broadcast model\n # parameters to all other ranks.\n def _sync_final_model(self, is_last_joiner):\n # Agree upon the process that will be the authoritative model copy.\n # The current rank is a candidate for being the authoritative copy if\n # is_last_joiner=True. We break ties via picking the larger rank.\n self._authoritative_rank = self._find_common_rank(\n self._distributed_rank, is_last_joiner\n )\n self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)\n\n # Schedule allreduce ops to match those scheduled in the reducer's backward\n # pass.\n def _match_all_reduce_for_bwd_pass(self):\n allreduce_work = []\n # Schedule allreduce in the same order as Reducer schedules them, i.e.\n # the order of the buckets. Retrieving the bucket order from the reducer\n # ensures that we keep the same order in join mode, such as when bucket\n # order is rebuilt dynamically.\n all_bucket_tensors = self.reducer.get_bucket_tensors()\n for bucket_tensors in all_bucket_tensors:\n # Joined processes contribute zero gradient. In the case that\n # divide_by_initial_world_size=True, we divide grads by the static\n # world size, if not, the dividing factor is reduced by the number\n # of joined processes.\n zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]\n work = self.process_group.allreduce(zero_tensors)\n allreduce_work.append(work)\n for work in allreduce_work:\n work.wait()\n\n # Allreduces the used parameter mapping across ranks.\n def _match_unused_params_allreduce(self):\n locally_used_param_maps = self.reducer._get_local_used_maps()\n self.process_group.allreduce(locally_used_param_maps)\n\n @contextmanager\n def join(self, divide_by_initial_world_size=True, enable=True):\n r\"\"\"\n A context manager to be used in conjunction with an instance of\n :class:`torch.nn.parallel.DistributedDataParallel` to be\n able to train with uneven inputs across participating processes.\n\n This context manager will keep track of already-joined DDP processes,\n and \"shadow\" the forward and backward passes by inserting collective\n communication operations to match with the ones created by non-joined\n DDP processes. This will ensure each collective call has a corresponding\n call by already-joined DDP processes, preventing hangs or errors that\n would otherwise happen when training with uneven inputs across\n processes.\n\n Once all DDP processes have joined, the context manager will broadcast\n the model corresponding to the last joined process to all processes to\n ensure the model is the same across all processes\n (which is guaranteed by DDP).\n\n To use this to enable training with uneven inputs across processes,\n simply wrap this context manager around your training loop. No further\n modifications to the model or data loading is required.\n\n .. warning::\n This module currently does not support custom distributed collective\n operations in the forward pass, such as ``SyncBatchNorm`` or other\n custom defined collectives in the model's forward pass.\n\n Args:\n divide_by_initial_world_size (bool): If ``True``, will divide\n gradients by the initial ``world_size`` DDP training was launched\n with. If ``False``, will compute the effective world size\n (number of ranks that have not depleted their inputs yet) and\n divide gradients by that during allreduce. Set\n ``divide_by_initial_world_size=True`` to ensure every input\n sample including the uneven inputs have equal weight in terms of\n how much they contribute to the global gradient. This is\n achieved by always dividing the gradient by the initial\n ``world_size`` even when we encounter uneven inputs. If you set\n this to ``False``, we divide the gradient by the remaining\n number of nodes. This ensures parity with training on a smaller\n ``world_size`` although it also means the uneven inputs would\n contribute more towards the global gradient. Typically, you\n would want to set this to ``True`` for cases where the last few\n inputs of your training job are uneven. In extreme cases, where\n there is a large discrepancy in the number of inputs, setting\n this to ``False`` might provide better results.\n enable (bool): Whether to enable uneven input detection or not. Pass\n in ``enable=False`` to disable in cases where you know that\n inputs are even across participating processes. Default is\n ``True``.\n\n\n Example::\n\n >>> import torch\n >>> import torch.distributed as dist\n >>> import os\n >>> import torch.multiprocessing as mp\n >>> import torch.nn as nn\n >>> # On each spawned worker\n >>> def worker(rank):\n >>> dist.init_process_group(\"nccl\", rank=rank, world_size=2)\n >>> torch.cuda.set_device(rank)\n >>> model = nn.Linear(1, 1, bias=False).to(rank)\n >>> model = torch.nn.parallel.DistributedDataParallel(\n >>> model, device_ids=[rank], output_device=rank\n >>> )\n >>> # Rank 1 gets one more input than rank 0.\n >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]\n >>> with model.join():\n >>> for _ in range(5):\n >>> for inp in inputs:\n >>> loss = model(inp).sum()\n >>> loss.backward()\n >>> # Without the join() API, the below synchronization will hang\n >>> # blocking for rank 1's allreduce to complete.\n >>> torch.cuda.synchronize(device=rank)\n \"\"\"\n # Log uneven input API usage.\n self.logger._set_uneven_input_join()\n try:\n has_error = False\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=enable,\n ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,\n )\n yield\n except Exception as e:\n # Set to skip any processing in the finally block.\n has_error = True\n raise e\n finally:\n # Skip any processing to let the exception immediately be raised if\n # there was one.\n if enable and not has_error:\n all_procs_joined = False\n is_last_joiner = True\n i = 0\n WARN_THRESHOLD = 1000\n warnings.simplefilter(\"once\")\n while not all_procs_joined:\n if i > WARN_THRESHOLD:\n my_rank = self._distributed_rank\n warnings.warn(\n \"Detected uneven input skew of greater \"\n f\"than {WARN_THRESHOLD}. This means that rank {my_rank} \"\n f\"has at least {WARN_THRESHOLD} fewer inputs than \"\n \"other currently active ranks. This level of skew could \"\n \"lead to performance degradation during training.\"\n )\n # Schedules allreduce to match fwd pass allreduce in non-joined procs\n num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()\n if num_active_procs == 0:\n all_procs_joined = True\n else:\n # Some DDP process still needs to be joined.\n if is_last_joiner:\n is_last_joiner = False\n # It will rebuild buckets only once during training period\n self.reducer._rebuild_buckets()\n # Schedule a corresponding broadcast if we are syncing module\n # buffers in the forward pass.\n self._check_and_sync_module_buffers()\n\n (\n work,\n should_sync_backwards_tensor,\n ) = self._check_global_requires_backward_grad_sync(\n is_joined_rank=True\n )\n work.wait()\n # If nonzero, then we should sync in the bwd pass.\n should_sync_backwards = should_sync_backwards_tensor.item() != 0\n # Forward param sync is disabled in the next iteration\n # if we are skipping grad sync this iteration. Hence, we\n # set require_forward_param_sync appropriately here.\n self.require_forward_param_sync = should_sync_backwards\n if not should_sync_backwards:\n continue\n # Schedules one allreduce per gradient bucket to match\n # the backwards pass allreduce.\n self._match_all_reduce_for_bwd_pass()\n # Check if we need to allreduce locally unused params.\n if self.find_unused_parameters:\n self._match_unused_params_allreduce()\n # It will push rebuilt params only once during training period\n self.reducer._push_all_rebuilt_params()\n i += 1\n\n # All procs joined. Agree on authoritative rank and broadcast the model.\n self._sync_final_model(is_last_joiner)\n\n def register_comm_hook(self, state: object, hook: callable):\n r\"\"\"\n Registers a communication hook which is an enhancement that provides a\n flexible hook to users where they can specify how DDP aggregates gradients\n across multiple workers.\n\n This hook would be very useful for researchers to try out new ideas. For\n example, this hook can be used to implement several algorithms like GossipGrad\n and gradient compression which involve different communication strategies for\n parameter syncs while running Distributed DataParallel training.\n\n Args:\n state (object): Passed to the hook to maintain any state information during the training process.\n Examples include error feedback in gradient compression,\n peers to communicate with next in GossipGrad, etc.\n\n It is locally stored by each worker\n and shared by all the gradient tensors on the worker.\n hook (callable): Averages gradient tensors across workers and defined as:\n ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future``:\n\n This function is called once the bucket is ready. The\n hook can perform whatever processing is needed and return\n a Future indicating completion of any async work (ex: allreduce).\n If the hook doesn't perform any communication, it can also\n just return a completed Future. The Future should hold the\n new value of grad bucket's tensors. Once a bucket is ready,\n c10d reducer would call this hook and use the tensors returned\n by the Future and copy grads to individual parameters.\n\n We also provide an API called ``get_future`` to retrieve a\n Future associated with the completion of ``c10d.ProcessGroup.work``.\n\n .. warning ::\n Grad bucket's tensors will not be predivided by world_size. User is responsible\n to divide by the world_size in case of operations like allreduce.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n The Future object that hook returns should contain a result that has the same\n shape with the tensors inside grad bucket.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n ``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``\n which is an internal type and should be used with caution. It can still be used by\n ``register_comm_hook`` API, but it is subject to some subtle differences compared\n to ``torch.futures.Future``.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a noop hook that returns the same tensors.\n\n >>> def noop(state: object, bucket: dist.GradBucket): -> torch.futures.Future\n >>> fut = torch.futures.Future()\n >>> fut.set_result(bucket.get_tensors())\n >>> return fut\n\n >>> ddp.register_comm_hook(state = None, hook = noop)\n\n Example::\n Below is an example of a Parallel SGD algorithm where gradients are encoded before\n allreduce, and then decoded after allreduce.\n\n >>> def encode_and_decode(state: object, bucket: dist.GradBucket): -> torch.futures.Future\n >>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]\n >>> encoded_tensors = encode(tensors) # encode gradients\n >>> fut = process_group.allreduce(encoded_tensors).get_future()\n >>> # Define the then callback to decode.\n >>> def decode(fut):\n >>> decoded_tensors = decode(fut.value()) # decode gradients\n >>> return decoded_tensors\n >>> return fut.then(decode)\n\n >>> ddp.register_comm_hook(state = None, hook = encode_and_decode)\n \"\"\"\n self._check_comm_hook(hook)\n self.logger._set_comm_hook_name(hook.__qualname__)\n dist._register_comm_hook(self.reducer, state, hook)\n\n def _register_builtin_comm_hook(self, comm_hook_type):\n r\"\"\"\n Registers a built-in communication hook that specifies how DDP\n aggregates gradients across multiple workers.\n The built-in hooks aim to provide efficient C++ implementations for certain hooks,\n which might not be as efficient if implemented in Python using a Python communication hook.\n\n Args:\n comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as\n ALLREDUCE, FP16_COMPRESS, etc.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a FP16 compression where gradients are\n compressed into 16-bit floating-point numbers before allreduce, and\n then decompressed after allreduce.\n\n >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)\n\n \"\"\"\n self.logger._set_comm_hook_name(str(comm_hook_type))\n dist._register_builtin_comm_hook(self.reducer, comm_hook_type)\n\n def _distributed_broadcast_coalesced(\n self, tensors, buffer_size, authoritative_rank=0\n ):\n dist._broadcast_coalesced(\n self.process_group, tensors, buffer_size, authoritative_rank\n )\n\n def will_sync_module_buffers(self):\n return (\n self.require_forward_param_sync\n and self.broadcast_buffers\n and len(self.modules_buffers[0]) > 0\n )\n\n def _find_common_rank(self, input_rank, rank_cond):\n # -1 indicates that this rank is not under consideration to be the\n # common_rank\n rank_to_use = torch.tensor(\n [input_rank if rank_cond else -1],\n device=self.device,\n )\n dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)\n if rank_to_use.item() == -1:\n raise ValueError(\n \"BUG! Expected rank_cond to be true for at least one process.\"\n )\n return rank_to_use.item()\n\n def _sync_params(self):\n with torch.no_grad():\n # module buffer sync\n if self.will_sync_module_buffers():\n # Synchronize buffers across processes.\n # If we are running DDP with the join manager, we have to agree\n # upon a rank to sync module buffers from, since rank 0 may\n # already have been joined and have stale module buffers.\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n authoritative_rank = self._find_common_rank(\n self._distributed_rank, True\n )\n else:\n # The process with rank 0 is considered the authoritative copy.\n authoritative_rank = 0\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0],\n self.broadcast_bucket_size,\n authoritative_rank,\n )\n\n def _passing_sync_batchnorm_handle(self, module_copies):\n for dev_idx, module in enumerate(module_copies):\n for layer in module.modules():\n if isinstance(layer, torch.nn.modules.SyncBatchNorm):\n assert (\n self.device_type != \"cpu\"\n ), \"SyncBatchNorm layers only work with GPU modules\"\n layer._specify_ddp_gpu_num(1)\n\n def _check_comm_hook(self, hook):\n if not callable(hook):\n raise TypeError(\"Communication hook must be callable.\")\n\n sig = inspect.signature(hook)\n if (\n sig.parameters[\"bucket\"].annotation != inspect._empty\n and sig.parameters[\"bucket\"].annotation != dist.GradBucket\n ):\n raise ValueError(\n \"Communication hook: bucket annotation should be dist.GradBucket.\"\n )\n\n if sig.return_annotation != inspect._empty and (\n sig.return_annotation != torch.futures.Future\n and sig.return_annotation != torch._C.Future\n ):\n raise ValueError(\n \"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.\"\n )\n\n @property\n def _distributed_rank(self):\n return dist.get_rank(self.process_group)\n\n @staticmethod\n def _set_params_and_buffers_to_ignore_for_model(\n module, params_and_buffers_to_ignore\n ):\n # This is a workaround to set parameters and buffers DDP should ignore\n # during synchronization. It will be removed when the API is finalized\n # as part of addressing https://github.com/pytorch/pytorch/issues/43690.\n module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore\n\n def get_ddp_logging_data(self):\n r\"\"\"\n This interface can be called after DistributedDataParallel() is\n constructed. It returns DDPLoggingData for debugging and analysis.\n More detailed explanation of the fields in DDPLoggingData are in\n ``torch/c10/util/Logging.h``.\n \"\"\"\n return self.logger._get_ddp_logging_data()\n\n def set_ddp_runtime_logging_sample_rate(self, sample_rate):\n r\"\"\"\n This interface allows users to set sample_rate of collecting\n runtime stats. The runtime stats will be recorded for the\n first 10 iterations, after 10 iteratons runtime stats will be\n recorded once every \"sample_rate\" training iterations. In\n default, runtime stats are recorded for the first 10 iterations,\n after 10 iterations runtime stats are recorded once every\n \"kDDPRuntimeLoggingSampleRate=100\" training iterations.\n \"\"\"\n if sample_rate < 1:\n raise ValueError(\n \"DDP runtime logging sample rate should be equal or greater than 1\"\n )\n self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)\n",
"import argparse\nimport datetime\nimport re\nimport sys\nfrom collections import defaultdict\n\nimport torch\nfrom torch._C import parse_schema\n\n\n# The date specifies how long the allowlist exclusion should apply to.\n#\n# - If we NEVER give BC guarantee for an operator, you can put the\n# date arbitrarily far in the future.\n# - Otherwise, pick a date that is far enough in the future that you\n# believe you can land your diff before then.\n#\n# Allowlist entries can be removed after the date listed on them passes.\n#\n# Allowlist item format:\n# [\n# 0: function name regex\n# 1: date until which the allowlist entry is valid\n# 2: (optional) function argument regex\n# ]\n#\n# NB: function name DOES NOT include overload name!\nallow_list = [\n (\"c10_experimental\", datetime.date(2222, 1, 1)),\n # Internal\n (\"static\", datetime.date(9999, 1, 1)),\n (\"prim::ModuleDictIndex\", datetime.date(9999, 1, 1)),\n # Internal, profiler-specific ops\n (\"profiler::_call_end_callbacks_on_jit_fut*\", datetime.date(9999, 1, 1)),\n (\"profiler::_record_function_enter\", datetime.date(9999, 1, 1)),\n (\"aten::_qr_helper\", datetime.date(2021, 1, 31)),\n (\"aten::fft\", datetime.date(2021, 1, 31)),\n (\"aten::ifft\", datetime.date(2021, 1, 31)),\n (\"aten::irfft\", datetime.date(2021, 1, 31)),\n (\"aten::rfft\", datetime.date(2021, 1, 31)),\n (\"aten::_lstsq_helper\", datetime.date(9999, 1, 1)),\n (\"aten::_svd_helper\", datetime.date(2021, 1, 31)),\n (\"aten::_syevd_helper\", datetime.date(9999, 1, 1)),\n (\"aten::_cudnn_rnn_flatten_weight\", datetime.date(2020, 12, 31)),\n (\"aten::_cudnn_rnn\", datetime.date(2020, 12, 31)),\n (\"aten::_cudnn_rnn_backward\", datetime.date(2020, 12, 31)),\n (\"aten::quantile\", datetime.date(2021, 1, 31)),\n (\"aten::nanquantile\", datetime.date(2021, 1, 31)),\n (\"aten::make_dual\", datetime.date(2021, 2, 20)),\n (\"aten::unpack_dual\", datetime.date(2021, 2, 20)),\n (\"aten::_fft_with_size\", datetime.date(2021, 1, 31)),\n (\"aten::thnn_conv_depthwise2d_backward\", datetime.date(2021, 1, 31)),\n (\"aten::slow_conv3d_backward\", datetime.date(2021, 1, 31)),\n (\"aten::thnn_conv2d_backward\", datetime.date(2021, 1, 31)),\n (\"aten::slow_conv_transpose3d_backward\", datetime.date(2021, 1, 31)),\n (\"aten::slow_conv_transpose2d_backward\", datetime.date(2021, 1, 31)),\n (\"aten::set_\", datetime.date(2021, 1, 31)),\n (\"aten::native_layer_norm\", datetime.date(2021, 1, 31)),\n (\"aten::native_layer_norm_backward\", datetime.date(2021, 1, 31)),\n (\"aten::elu_backward\", datetime.date(2021, 1, 31)),\n (\"aten::_multinomial_alias_setup\", datetime.date(2021, 1, 31)),\n (\"aten::_multinomial_alias_draw\", datetime.date(2021, 1, 31)),\n (\"prim::profile_optional\", datetime.date(2021, 1, 31)),\n (\"aten::fake_quantize_per_tensor_affine_backward\", datetime.date(2021, 2, 20)),\n (\"aten::fake_quantize_per_channel_affine_backward\", datetime.date(2021, 2, 20)),\n (\"aten::rowwise_prune\", datetime.date(9999, 1, 1)),\n (\"aten::_foreach_mul_\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_addcdiv_\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_div\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_addcmul_\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_sub\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_add\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_sub_\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_add_\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_mul\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_div_\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_addcdiv\", datetime.date(2021, 4, 2)),\n (\"aten::_foreach_addcmul\", datetime.date(2021, 4, 2)),\n (\"aten::mkldnn_linear\", datetime.date(2021, 3, 2)),\n (\"aten::_mode*\", datetime.date(2021, 5, 2)),\n (\"aten::linalg_multi_dot\", datetime.date(2021, 3, 25)),\n (\"aten::coalesce\", datetime.date(2021, 4, 15)),\n (\"aten::empty_meta\", datetime.date(2021, 4, 1)),\n (\"aten::div\", datetime.date(2021, 4, 28)),\n (\"aten::divide\", datetime.date(2021, 4, 28)),\n (\"aten::batch_norm_backward_elemt\", datetime.date(2021, 5, 1)),\n (\"aten::assert_async\", datetime.date(2021, 5, 1)),\n (\"aten::cumprod_backward\", datetime.date(2021, 5, 1)),\n (\"aten::_triangular_solve_helper\", datetime.date(9999, 1, 1)),\n (\"aten::adaptive_avg_pool3d_backward\", datetime.date(9999, 1, 1)),\n]\n\ndef allow_listed(schema, allow_list):\n for item in allow_list:\n if item[1] < datetime.date.today():\n continue\n regexp = re.compile(item[0])\n if regexp.search(schema.name):\n if len(item) > 2:\n # if arguments regex is present, use it\n regexp_args = re.compile(item[2])\n return bool(regexp_args.search(str(schema)))\n return True\n return False\n\n\n# The nightly will fail to parse newly added syntax to schema declarations\n# Add new schemas that will fail the nightly here\ndont_parse_list = [\n (\"_TorchScriptTesting.*\", datetime.date(2099, 9, 17)),\n (\"test_backend\", datetime.date(2099, 9, 17)),\n (\"dist_c10d\", datetime.date(2021, 1, 30)),\n]\n\n\ndef dont_parse(schema_line):\n for item in dont_parse_list:\n if item[1] < datetime.date.today():\n continue\n regexp = re.compile(item[0])\n if regexp.search(schema_line):\n return True\n return False\n\n\ndef check_bc(existing_schemas):\n new_schemas = torch._C._jit_get_all_schemas()\n new_schemas += torch._C._jit_get_custom_class_schemas()\n new_schema_dict = defaultdict(list)\n for s in new_schemas:\n new_schema_dict[s.name].append(s)\n\n is_bc = True\n broken_ops = []\n for existing_schema in existing_schemas:\n if allow_listed(existing_schema, allow_list):\n print(\"schema: \", str(existing_schema), \" found on allowlist, skipping\")\n continue\n print(\"processing existing schema: \", str(existing_schema))\n matching_new_schemas = new_schema_dict.get(existing_schema.name, [])\n found = False\n for matching_new_schema in matching_new_schemas:\n if matching_new_schema.is_backward_compatible_with(existing_schema):\n found = True\n break\n if not found:\n print(\n \"Can NOT find backward compatible schemas after changes \"\n \"for schema {} from the following candidates:\\n[\\n{}\\n]\".format(\n str(existing_schema),\n \"\\n\\t\".join(str(s) for s in matching_new_schemas),\n )\n )\n # TODO Print out more details about why candidates don't match.\n broken_ops.append(str(existing_schema))\n is_bc = False\n if is_bc:\n print(\"Found backward compatible schemas for all existing schemas\")\n else:\n print(\n \"The PR is introducing backward incompatible changes to the \"\n \"operator library. Please contact PyTorch team to confirm \"\n \"whether this change is wanted or not. \\n\\nBroken ops: \"\n \"[\\n\\t{}\\n]\".format(\"\\n\\t\".join(broken_ops))\n )\n return is_bc\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Process some integers.\")\n parser.add_argument(\n \"--existing-schemas\",\n help=\"filename to load existing schemas\",\n type=str,\n default=\"schemas.txt\",\n )\n args = parser.parse_args()\n existing_schema_dict = dict()\n slist = []\n with open(args.existing_schemas, \"r\") as f:\n while True:\n line = f.readline()\n if not line:\n break\n\n if dont_parse(line.strip()):\n print(\"Not parsing schema line: \", line.strip())\n continue\n s = parse_schema(line.strip())\n slist.append(s)\n\n if not check_bc(slist):\n sys.exit(1)\n"
] | [
[
"torch.distributed._register_comm_hook",
"torch.distributed._verify_model_across_ranks",
"torch.zeros",
"torch.distributed.Logger",
"torch.distributed.distributed_c10d._get_default_group",
"torch.no_grad",
"torch.cuda.stream",
"torch.distributed.get_rank",
"torch.is_grad_enabled",
"torch.distributed._broadcast_coalesced",
"torch.distributed.rpc.is_available",
"torch.ones",
"torch.tensor",
"torch.distributed._compute_bucket_assignment_by_size",
"torch.distributed._register_builtin_comm_hook",
"torch.cuda.current_stream",
"torch.zeros_like",
"torch.distributed.is_available",
"torch.cuda.device",
"torch._utils._get_device_index",
"torch.distributed.all_reduce"
],
[
"torch._C._jit_get_custom_class_schemas",
"torch._C._jit_get_all_schemas"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fahmirevo/sign-language-recognition | [
"ff5e3f4ffb7ecba15667be8870db62717f1fab66"
] | [
"test.py"
] | [
"from keras.models import load_model\nimport numpy as np\n\nX = np.load(\"dataset/X_test.npy\")\nY = np.load(\"dataset/Y_test.npy\")\n\nmodel = load_model(\"model\")\n\nscore = model.evaluate(X, Y)\n\nprint(score[0], score[1])\n\n# print(np.argmax(model.predict(X[:200]), axis=1))\n# print(np.argmax(model.predict(X), axis=1) == np.argmax(Y, axis=1))\n# print(model.predict(X[:50]))\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deloragaskins/deepchem | [
"234ab699cdb997e5963966a8b6926cb2cda7c064",
"234ab699cdb997e5963966a8b6926cb2cda7c064",
"234ab699cdb997e5963966a8b6926cb2cda7c064",
"234ab699cdb997e5963966a8b6926cb2cda7c064",
"234ab699cdb997e5963966a8b6926cb2cda7c064"
] | [
"deepchem/molnet/load_function/factors_datasets.py",
"deepchem/dock/tests/test_pose_generation.py",
"deepchem/utils/voxel_utils.py",
"deepchem/models/tests/test_weave_models.py",
"deepchem/rl/tests/test_rl_reload.py"
] | [
"\"\"\"\nFACTOR dataset loader\n\"\"\"\nimport os\nimport logging\nimport time\n\nimport numpy as np\nimport deepchem\nfrom deepchem.molnet.load_function.kaggle_features import merck_descriptors\n\nlogger = logging.getLogger(__name__)\n\nTRAIN_URL = \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz\"\nVALID_URL = \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz\"\nTEST_URL = \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz\"\n\nTRAIN_FILENAME = \"FACTORS_training_disguised_combined_full.csv.gz\"\nVALID_FILENAME = \"FACTORS_test1_disguised_combined_full.csv.gz\"\nTEST_FILENAME = \"FACTORS_test2_disguised_combined_full.csv.gz\"\n\n\ndef remove_missing_entries(dataset):\n \"\"\"Remove missing entries.\n\n Some of the datasets have missing entries that sneak in as zero'd out\n feature vectors. Get rid of them.\n \"\"\"\n for i, (X, y, w, ids) in enumerate(dataset.itershards()):\n available_rows = X.any(axis=1)\n logger.info(\"Shard %d has %d missing entries.\" %\n (i, np.count_nonzero(~available_rows)))\n X = X[available_rows]\n y = y[available_rows]\n w = w[available_rows]\n ids = ids[available_rows]\n dataset.set_shard(i, X, y, w, ids)\n\n\ndef get_transformers(train_dataset):\n \"\"\"Gets transformers applied to the dataset\"\"\"\n\n transformers = list()\n # TODO: Check if anything needs to be added\n\n return transformers\n\n\ndef gen_factors(FACTORS_tasks,\n data_dir,\n train_dir,\n valid_dir,\n test_dir,\n shard_size=2000):\n \"\"\"Loads the FACTORS dataset; does not do train/test split\"\"\"\n\n time1 = time.time()\n\n train_files = os.path.join(data_dir, TRAIN_FILENAME)\n valid_files = os.path.join(data_dir, VALID_FILENAME)\n test_files = os.path.join(data_dir, TEST_FILENAME)\n\n if not os.path.exists(train_files):\n logger.info(\"Downloading train file...\")\n deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)\n logger.info(\"Training file download complete.\")\n\n logger.info(\"Downloading validation file...\")\n deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)\n logger.info(\"Validation file download complete.\")\n\n logger.info(\"Downloading test file...\")\n deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)\n logger.info(\"Test file download complete\")\n\n # Featurize the FACTORS dataset\n logger.info(\"About to featurize the FACTORS dataset\")\n featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)\n loader = deepchem.data.UserCSVLoader(\n tasks=FACTORS_tasks, id_field=\"Molecule\", featurizer=featurizer)\n\n logger.info(\"Featurizing the train dataset...\")\n train_dataset = loader.featurize(train_files, shard_size=shard_size)\n\n logger.info(\"Featurizing the validation dataset...\")\n valid_dataset = loader.featurize(valid_files, shard_size=shard_size)\n\n logger.info(\"Featurizing the test dataset...\")\n test_dataset = loader.featurize(test_files, shard_size=shard_size)\n\n logger.info(\"Remove missing entries from dataset\")\n remove_missing_entries(train_dataset)\n remove_missing_entries(valid_dataset)\n remove_missing_entries(test_dataset)\n\n # Shuffle the training data\n logger.info(\"Shuffling the training dataset\")\n train_dataset.sparse_shuffle()\n\n # Apply transformations\n logger.info(\"Transforming datasets with transformers\")\n transformers = get_transformers(train_dataset)\n\n for transformer in transformers:\n logger.info(\"Performing transformations with {}\".format(\n transformer.__class__.__name__))\n\n logger.info(\"Transforming the training dataset...\")\n train_dataset = transformer.transform(train_dataset)\n\n logger.info(\"Transforming the validation dataset...\")\n valid_dataset = transformer.transform(valid_dataset)\n\n logger.info(\"Transforming the test dataset...\")\n test_dataset = transformer.transform(test_dataset)\n\n logger.info(\"Transformations complete.\")\n logger.info(\"Moving datasets to corresponding directories\")\n\n train_dataset.move(train_dir)\n logger.info(\"Train dataset moved.\")\n\n valid_dataset.move(valid_dir)\n logger.info(\"Validation dataset moved.\")\n\n test_dataset.move(test_dir)\n logger.info(\"Test dataset moved.\")\n\n time2 = time.time()\n\n # TIMING\n logger.info(\"TIMING: FACTORS fitting took %0.3f s\" % (time2 - time1))\n\n return train_dataset, valid_dataset, test_dataset\n\n\ndef load_factors(shard_size=2000, featurizer=None, split=None, reload=True):\n \"\"\"Loads FACTOR dataset; does not do train/test split\n\n The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper:\n Ramsundar, Bharath, et al. \"Is multitask deep learning practical for pharma?.\" Journal of chemical information and modeling 57.8 (2017): 2068-2076.\n\n It contains 1500 Merck in-house compounds that were measured\n for IC50 of inhibition on 12 serine proteases. Unlike most of\n the other datasets featured in MoleculeNet, the Factors\n collection does not have structures for the compounds tested\n since they were proprietary Merck compounds. However, the\n collection does feature pre-computed descriptors for these\n compounds.\n\n Note that the original train/valid/test split from the source\n data was preserved here, so this function doesn't allow for\n alternate modes of splitting. Similarly, since the source data\n came pre-featurized, it is not possible to apply alternative\n featurizations.\n\n Parameters\n ----------\n shard_size: int, optional\n Size of the DiskDataset shards to write on disk\n featurizer: optional\n Ignored since featurization pre-computed\n split: optional\n Ignored since split pre-computed\n reload: bool, optional\n Whether to automatically re-load from disk\n\n \"\"\"\n\n FACTORS_tasks = [\n 'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',\n 'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'\n ]\n\n data_dir = deepchem.utils.data_utils.get_data_dir()\n data_dir = os.path.join(data_dir, \"factors\")\n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n train_dir = os.path.join(data_dir, \"train_dir\")\n valid_dir = os.path.join(data_dir, \"valid_dir\")\n test_dir = os.path.join(data_dir, \"test_dir\")\n\n if (os.path.exists(train_dir) and os.path.exists(valid_dir) and\n os.path.exists(test_dir)):\n\n logger.info(\"Reloading existing datasets\")\n train_dataset = deepchem.data.DiskDataset(train_dir)\n valid_dataset = deepchem.data.DiskDataset(valid_dir)\n test_dataset = deepchem.data.DiskDataset(test_dir)\n\n else:\n logger.info(\"Featurizing datasets\")\n train_dataset, valid_dataset, test_dataset = gen_factors(\n FACTORS_tasks=FACTORS_tasks,\n data_dir=data_dir,\n train_dir=train_dir,\n valid_dir=valid_dir,\n test_dir=test_dir,\n shard_size=shard_size)\n\n transformers = get_transformers(train_dataset)\n\n return FACTORS_tasks, (train_dataset, valid_dataset,\n test_dataset), transformers\n",
"\"\"\"\nTests for Pose Generation\n\"\"\"\nimport os\nimport platform\nimport tempfile\nimport unittest\nimport logging\nimport numpy as np\nimport deepchem as dc\nimport pytest\n\nIS_WINDOWS = platform.system() == 'Windows'\nIS_LINUX = platform.system() == 'Linux'\n\n\nclass TestPoseGeneration(unittest.TestCase):\n \"\"\"\n Does sanity checks on pose generation.\n \"\"\"\n\n @unittest.skipIf(IS_WINDOWS, 'Skip the test on Windows')\n def test_vina_initialization(self):\n \"\"\"Test that VinaPoseGenerator can be initialized.\"\"\"\n dc.dock.VinaPoseGenerator()\n\n @unittest.skipIf(not IS_LINUX, 'Skip the test on Windows and Mac.')\n def test_gnina_initialization(self):\n \"\"\"Test that GninaPoseGenerator can be initialized.\"\"\"\n dc.dock.GninaPoseGenerator()\n\n @unittest.skipIf(IS_WINDOWS, 'Skip the test on Windows')\n def test_pocket_vina_initialization(self):\n \"\"\"Test that VinaPoseGenerator can be initialized.\"\"\"\n pocket_finder = dc.dock.ConvexHullPocketFinder()\n dc.dock.VinaPoseGenerator(pocket_finder=pocket_finder)\n\n @pytest.mark.slow\n def test_vina_poses_and_scores(self):\n \"\"\"Test that VinaPoseGenerator generates poses and scores\n\n This test takes some time to run, about a minute and a half on\n development laptop.\n \"\"\"\n # Let's turn on logging since this test will run for a while\n logging.basicConfig(level=logging.INFO)\n current_dir = os.path.dirname(os.path.realpath(__file__))\n protein_file = os.path.join(current_dir, \"1jld_protein.pdb\")\n ligand_file = os.path.join(current_dir, \"1jld_ligand.sdf\")\n\n vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)\n with tempfile.TemporaryDirectory() as tmp:\n poses, scores = vpg.generate_poses(\n (protein_file, ligand_file),\n exhaustiveness=1,\n num_modes=1,\n out_dir=tmp,\n generate_scores=True)\n\n assert len(poses) == 1\n assert len(scores) == 1\n protein, ligand = poses[0]\n from rdkit import Chem\n assert isinstance(protein, Chem.Mol)\n assert isinstance(ligand, Chem.Mol)\n\n @pytest.mark.slow\n @unittest.skipIf(not IS_LINUX, 'Skip the test on Windows and Mac.')\n def test_gnina_poses_and_scores(self):\n \"\"\"Test that GninaPoseGenerator generates poses and scores\n\n This test takes some time to run, about 3 minutes on\n development laptop.\n \"\"\"\n # Let's turn on logging since this test will run for a while\n logging.basicConfig(level=logging.INFO)\n current_dir = os.path.dirname(os.path.realpath(__file__))\n protein_file = os.path.join(current_dir, \"1jld_protein.pdb\")\n ligand_file = os.path.join(current_dir, \"1jld_ligand.sdf\")\n\n gpg = dc.dock.GninaPoseGenerator()\n with tempfile.TemporaryDirectory() as tmp:\n poses, scores = gpg.generate_poses(\n (protein_file, ligand_file),\n exhaustiveness=1,\n num_modes=1,\n out_dir=tmp)\n\n assert len(poses) == 1\n assert len(scores) == 1\n protein, ligand = poses[0]\n from rdkit import Chem\n assert isinstance(protein, Chem.Mol)\n assert isinstance(ligand, Chem.Mol)\n\n @pytest.mark.slow\n def test_vina_poses_no_scores(self):\n \"\"\"Test that VinaPoseGenerator generates poses.\n\n This test takes some time to run, about a minute and a half on\n development laptop.\n \"\"\"\n # Let's turn on logging since this test will run for a while\n logging.basicConfig(level=logging.INFO)\n current_dir = os.path.dirname(os.path.realpath(__file__))\n protein_file = os.path.join(current_dir, \"1jld_protein.pdb\")\n ligand_file = os.path.join(current_dir, \"1jld_ligand.sdf\")\n\n vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)\n with tempfile.TemporaryDirectory() as tmp:\n poses = vpg.generate_poses(\n (protein_file, ligand_file),\n exhaustiveness=1,\n num_modes=1,\n out_dir=tmp,\n generate_scores=False)\n\n assert len(poses) == 1\n protein, ligand = poses[0]\n from rdkit import Chem\n assert isinstance(protein, Chem.Mol)\n assert isinstance(ligand, Chem.Mol)\n\n @pytest.mark.slow\n def test_vina_pose_specified_centroid(self):\n \"\"\"Test that VinaPoseGenerator creates pose files with specified centroid/box dims.\n\n This test takes some time to run, about a minute and a half on\n development laptop.\n \"\"\"\n # Let's turn on logging since this test will run for a while\n logging.basicConfig(level=logging.INFO)\n current_dir = os.path.dirname(os.path.realpath(__file__))\n protein_file = os.path.join(current_dir, \"1jld_protein.pdb\")\n ligand_file = os.path.join(current_dir, \"1jld_ligand.sdf\")\n\n centroid = np.array([56.21891368, 25.95862964, 3.58950065])\n box_dims = np.array([51.354, 51.243, 55.608])\n vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)\n with tempfile.TemporaryDirectory() as tmp:\n poses, scores = vpg.generate_poses(\n (protein_file, ligand_file),\n centroid=centroid,\n box_dims=box_dims,\n exhaustiveness=1,\n num_modes=1,\n out_dir=tmp,\n generate_scores=True)\n\n assert len(poses) == 1\n assert len(scores) == 1\n protein, ligand = poses[0]\n from rdkit import Chem\n assert isinstance(protein, Chem.Mol)\n assert isinstance(ligand, Chem.Mol)\n\n @pytest.mark.slow\n def test_pocket_vina_poses(self):\n \"\"\"Test that VinaPoseGenerator creates pose files.\n\n This test is quite slow and takes about 5 minutes to run on a\n development laptop.\n \"\"\"\n # Let's turn on logging since this test will run for a while\n logging.basicConfig(level=logging.INFO)\n current_dir = os.path.dirname(os.path.realpath(__file__))\n protein_file = os.path.join(current_dir, \"1jld_protein.pdb\")\n ligand_file = os.path.join(current_dir, \"1jld_ligand.sdf\")\n\n # Note this may download autodock Vina...\n convex_finder = dc.dock.ConvexHullPocketFinder()\n vpg = dc.dock.VinaPoseGenerator(pocket_finder=convex_finder)\n with tempfile.TemporaryDirectory() as tmp:\n poses, scores = vpg.generate_poses(\n (protein_file, ligand_file),\n exhaustiveness=1,\n num_modes=1,\n num_pockets=2,\n out_dir=tmp,\n generate_scores=True)\n\n assert len(poses) == 2\n assert len(scores) == 2\n from rdkit import Chem\n for pose in poses:\n protein, ligand = pose\n assert isinstance(protein, Chem.Mol)\n assert isinstance(ligand, Chem.Mol)\n",
"\"\"\"\nVarious utilities around voxel grids.\n\"\"\"\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\nimport numpy as np\n\nfrom deepchem.utils.noncovalent_utils import compute_pi_stack\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_atom_to_voxel(coordinates: np.ndarray, atom_index: int,\n box_width: float, voxel_width: float) -> np.ndarray:\n \"\"\"Converts atom coordinates to an i,j,k grid index.\n\n This function offsets molecular atom coordinates by\n (box_width/2, box_width/2, box_width/2) and then divides by\n voxel_width to compute the voxel indices.\n\n Parameters\n -----------\n coordinates: np.ndarray\n Array with coordinates of all atoms in the molecule, shape (N, 3).\n atom_index: int\n Index of an atom in the molecule.\n box_width: float\n Size of the box in Angstroms.\n voxel_width: float\n Size of a voxel in Angstroms\n\n Returns\n -------\n indices: np.ndarray\n A 1D numpy array of length 3 with `[i, j, k]`, the voxel coordinates\n of specified atom.\n \"\"\"\n\n indices = np.floor(\n (coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)\n\n return indices\n\n\ndef convert_atom_pair_to_voxel(coordinates_tuple: Tuple[np.ndarray, np.ndarray],\n atom_index_pair: Tuple[int, int],\n box_width: float,\n voxel_width: float) -> np.ndarray:\n \"\"\"Converts a pair of atoms to i,j,k grid indexes.\n\n\n Parameters\n ----------\n coordinates_tuple: Tuple[np.ndarray, np.ndarray]\n A tuple containing two molecular coordinate arrays of shapes `(N, 3)` and `(M, 3)`.\n atom_index_pair: Tuple[int, int]\n A tuple of indices for the atoms in the two molecules.\n box_width: float\n Size of the box in Angstroms.\n voxel_width: float\n Size of a voxel in Angstroms\n\n Returns\n -------\n indices_list: np.ndarray\n A numpy array of shape `(2, 3)`, where `3` is `[i, j, k]` of the\n voxel coordinates of specified atom.\n \"\"\"\n\n indices_list = []\n for coordinates, atom_index in zip(coordinates_tuple, atom_index_pair):\n indices_list.append(\n convert_atom_to_voxel(coordinates, atom_index, box_width, voxel_width))\n return np.array(indices_list)\n\n\ndef voxelize(get_voxels: Callable[..., Any],\n coordinates: np.ndarray,\n box_width: float = 16.0,\n voxel_width: float = 1.0,\n hash_function: Optional[Callable[..., Any]] = None,\n feature_dict: Optional[Dict[Any, Any]] = None,\n feature_list: Optional[List[Union[int, Tuple[int]]]] = None,\n nb_channel: int = 16,\n dtype: str = 'int') -> np.ndarray:\n \"\"\"Helper function to voxelize inputs.\n\n This helper function helps convert a hash function which\n specifies spatial features of a molecular complex into a voxel\n tensor. This utility is used by various featurizers that generate\n voxel grids.\n\n Parameters\n ----------\n get_voxels: Function\n Function that voxelizes inputs\n coordinates: np.ndarray\n Contains the 3D coordinates of a molecular system.\n box_width: float, optional (default 16.0)\n Size of a box in which voxel features are calculated. Box\n is centered on a ligand centroid.\n voxel_width: float, optional (default 1.0)\n Size of a 3D voxel in a grid in Angstroms.\n hash_function: Function\n Used to map feature choices to voxel channels.\n feature_dict: Dict, optional (default None)\n Keys are atom indices or tuples of atom indices, the values are\n computed features. If `hash_function is not None`, then the values\n are hashed using the hash function into `[0, nb_channels)` and\n this channel at the voxel for the given key is incremented by `1`\n for each dictionary entry. If `hash_function is None`, then the\n value must be a vector of size `(n_channels,)` which is added to\n the existing channel values at that voxel grid.\n feature_list: List, optional (default None)\n List of atom indices or tuples of atom indices. This can only be\n used if `nb_channel==1`. Increments the voxels corresponding to\n these indices by `1` for each entry.\n nb_channel: int, , optional (default 16)\n The number of feature channels computed per voxel. Should\n be a power of 2.\n dtype: str ('int' or 'float'), optional (default 'int')\n The type of the numpy ndarray created to hold features.\n\n Returns\n -------\n feature_tensor: np.ndarray\n The voxel of the input with the shape\n `(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel)`.\n \"\"\"\n # Number of voxels per one edge of box to voxelize.\n voxels_per_edge = int(box_width / voxel_width)\n if dtype == \"int\":\n feature_tensor = np.zeros(\n (voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),\n dtype=np.int8)\n else:\n feature_tensor = np.zeros(\n (voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),\n dtype=np.float16)\n if feature_dict is not None:\n for key, features in feature_dict.items():\n voxels = get_voxels(coordinates, key, box_width, voxel_width)\n if len(voxels.shape) == 1:\n voxels = np.expand_dims(voxels, axis=0)\n for voxel in voxels:\n if ((voxel >= 0) & (voxel < voxels_per_edge)).all():\n if hash_function is not None:\n feature_tensor[voxel[0], voxel[1], voxel[2],\n hash_function(features, nb_channel)] += 1.0\n else:\n feature_tensor[voxel[0], voxel[1], voxel[2], 0] += features\n elif feature_list is not None:\n for key in feature_list:\n voxels = get_voxels(coordinates, key, box_width, voxel_width)\n for voxel in voxels:\n if ((voxel >= 0) & (voxel < voxels_per_edge)).all():\n feature_tensor[voxel[0], voxel[1], voxel[2], 0] += 1.0\n\n return feature_tensor\n\n\ndef voxelize_pi_stack(prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances,\n pi_stack_dist_cutoff, pi_stack_angle_cutoff, box_width,\n voxel_width):\n protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel = (\n compute_pi_stack(\n prot_rdk,\n lig_rdk,\n distances,\n dist_cutoff=pi_stack_dist_cutoff,\n angle_cutoff=pi_stack_angle_cutoff))\n pi_parallel_tensor = voxelize(\n convert_atom_to_voxel,\n prot_xyz,\n box_width=box_width,\n voxel_width=voxel_width,\n feature_dict=protein_pi_parallel,\n nb_channel=1,\n )\n\n pi_parallel_tensor += voxelize(\n convert_atom_to_voxel,\n lig_xyz,\n box_width=box_width,\n voxel_width=voxel_width,\n feature_dict=ligand_pi_parallel,\n nb_channel=1,\n )\n\n pi_t_tensor = voxelize(\n convert_atom_to_voxel,\n prot_xyz,\n box_width=box_width,\n voxel_width=voxel_width,\n feature_dict=protein_pi_t,\n nb_channel=1,\n )\n\n pi_t_tensor += voxelize(\n convert_atom_to_voxel,\n lig_xyz,\n box_width=box_width,\n voxel_width=voxel_width,\n feature_dict=ligand_pi_t,\n nb_channel=1,\n )\n\n return [pi_parallel_tensor, pi_t_tensor]\n",
"import unittest\nimport os\nimport numpy as np\nimport pytest\nimport scipy\n\nimport deepchem as dc\nfrom deepchem.data import NumpyDataset\nfrom deepchem.molnet import load_bace_classification, load_delaney\nfrom deepchem.feat import ConvMolFeaturizer\ntry:\n import tensorflow as tf\n from deepchem.models import GraphConvModel, DAGModel, WeaveModel, MPNNModel\n has_tensorflow = True\nexcept:\n has_tensorflow = False\n\nfrom flaky import flaky\n\n\ndef get_dataset(mode='classification',\n featurizer='GraphConv',\n num_tasks=2,\n data_points=20):\n if mode == 'classification':\n tasks, all_dataset, transformers = load_bace_classification(\n featurizer, reload=False)\n else:\n tasks, all_dataset, transformers = load_delaney(featurizer, reload=False)\n\n train, valid, test = all_dataset\n for i in range(1, num_tasks):\n tasks.append(\"random_task\")\n w = np.ones(shape=(data_points, len(tasks)))\n\n if mode == 'classification':\n y = np.random.randint(0, 2, size=(data_points, len(tasks)))\n metric = dc.metrics.Metric(\n dc.metrics.roc_auc_score, np.mean, mode=\"classification\")\n else:\n y = np.random.normal(size=(data_points, len(tasks)))\n metric = dc.metrics.Metric(\n dc.metrics.mean_absolute_error, mode=\"regression\")\n\n ds = NumpyDataset(train.X[:data_points], y, w, train.ids[:data_points])\n\n return tasks, ds, transformers, metric\n\n\[email protected]\ndef test_compute_features_on_infinity_distance():\n \"\"\"Test that WeaveModel correctly transforms WeaveMol objects into tensors with infinite max_pair_distance.\"\"\"\n featurizer = dc.feat.WeaveFeaturizer(max_pair_distance=None)\n X = featurizer([\"C\", \"CCC\"])\n batch_size = 20\n model = WeaveModel(\n 1,\n batch_size=batch_size,\n mode='classification',\n fully_connected_layer_sizes=[2000, 1000],\n batch_normalize=True,\n batch_normalize_kwargs={\n \"fused\": False,\n \"trainable\": True,\n \"renorm\": True\n },\n learning_rate=0.0005)\n atom_feat, pair_feat, pair_split, atom_split, atom_to_pair = model.compute_features_on_batch(\n X)\n\n # There are 4 atoms each of which have 75 atom features\n assert atom_feat.shape == (4, 75)\n # There are 10 pairs with infinity distance and 14 pair features\n assert pair_feat.shape == (10, 14)\n # 4 atoms in total\n assert atom_split.shape == (4,)\n assert np.all(atom_split == np.array([0, 1, 1, 1]))\n # 10 pairs in total\n assert pair_split.shape == (10,)\n assert np.all(pair_split == np.array([0, 1, 1, 1, 2, 2, 2, 3, 3, 3]))\n # 10 pairs in total each with start/finish\n assert atom_to_pair.shape == (10, 2)\n assert np.all(\n atom_to_pair == np.array([[0, 0], [1, 1], [1, 2], [1, 3], [2, 1], [2, 2],\n [2, 3], [3, 1], [3, 2], [3, 3]]))\n\n\[email protected]\ndef test_compute_features_on_distance_1():\n \"\"\"Test that WeaveModel correctly transforms WeaveMol objects into tensors with finite max_pair_distance.\"\"\"\n featurizer = dc.feat.WeaveFeaturizer(max_pair_distance=1)\n X = featurizer([\"C\", \"CCC\"])\n batch_size = 20\n model = WeaveModel(\n 1,\n batch_size=batch_size,\n mode='classification',\n fully_connected_layer_sizes=[2000, 1000],\n batch_normalize=True,\n batch_normalize_kwargs={\n \"fused\": False,\n \"trainable\": True,\n \"renorm\": True\n },\n learning_rate=0.0005)\n atom_feat, pair_feat, pair_split, atom_split, atom_to_pair = model.compute_features_on_batch(\n X)\n\n # There are 4 atoms each of which have 75 atom features\n assert atom_feat.shape == (4, 75)\n # There are 8 pairs with distance 1 and 14 pair features. (To see why 8,\n # there's the self pair for \"C\". For \"CCC\" there are 7 pairs including self\n # connections and accounting for symmetry.)\n assert pair_feat.shape == (8, 14)\n # 4 atoms in total\n assert atom_split.shape == (4,)\n assert np.all(atom_split == np.array([0, 1, 1, 1]))\n # 10 pairs in total\n assert pair_split.shape == (8,)\n # The center atom is self connected and to both neighbors so it appears\n # thrice. The canonical ranking used in MolecularFeaturizer means this\n # central atom is ranked last in ordering.\n assert np.all(pair_split == np.array([0, 1, 1, 2, 2, 3, 3, 3]))\n # 10 pairs in total each with start/finish\n assert atom_to_pair.shape == (8, 2)\n assert np.all(atom_to_pair == np.array([[0, 0], [1, 1], [1, 3], [2, 2],\n [2, 3], [3, 1], [3, 2], [3, 3]]))\n\n\n@flaky\[email protected]\[email protected]\ndef test_weave_model():\n tasks, dataset, transformers, metric = get_dataset(\n 'classification', 'Weave', data_points=10)\n\n batch_size = 10\n model = WeaveModel(\n len(tasks),\n batch_size=batch_size,\n mode='classification',\n dropouts=0,\n learning_rate=0.0001)\n model.fit(dataset, nb_epoch=250)\n scores = model.evaluate(dataset, [metric], transformers)\n assert scores['mean-roc_auc_score'] >= 0.9\n\n\[email protected]\[email protected]\ndef test_weave_regression_model():\n import numpy as np\n import tensorflow as tf\n tf.random.set_seed(123)\n np.random.seed(123)\n tasks, dataset, transformers, metric = get_dataset(\n 'regression', 'Weave', data_points=10)\n\n batch_size = 10\n model = WeaveModel(\n len(tasks),\n batch_size=batch_size,\n mode='regression',\n dropouts=0,\n learning_rate=0.00003)\n model.fit(dataset, nb_epoch=400)\n scores = model.evaluate(dataset, [metric], transformers)\n assert scores['mean_absolute_error'] < 0.1\n\n\n# def test_weave_fit_simple_infinity_distance():\n# featurizer = dc.feat.WeaveFeaturizer(max_pair_distance=None)\n# X = featurizer([\"C\", \"CCC\"])\n# y = np.array([0, 1.])\n# dataset = dc.data.NumpyDataset(X, y)\n\n# batch_size = 20\n# model = WeaveModel(\n# 1,\n# batch_size=batch_size,\n# mode='classification',\n# fully_connected_layer_sizes=[2000, 1000],\n# batch_normalize=True,\n# batch_normalize_kwargs={\n# \"fused\": False,\n# \"trainable\": True,\n# \"renorm\": True\n# },\n# learning_rate=0.0005)\n# model.fit(dataset, nb_epoch=200)\n# transformers = []\n# metric = dc.metrics.Metric(\n# dc.metrics.roc_auc_score, np.mean, mode=\"classification\")\n# scores = model.evaluate(dataset, [metric], transformers)\n# assert scores['mean-roc_auc_score'] >= 0.9\n\n\[email protected]\ndef test_weave_fit_simple_distance_1():\n featurizer = dc.feat.WeaveFeaturizer(max_pair_distance=1)\n X = featurizer([\"C\", \"CCC\"])\n y = np.array([0, 1.])\n dataset = dc.data.NumpyDataset(X, y)\n\n batch_size = 20\n model = WeaveModel(\n 1,\n batch_size=batch_size,\n mode='classification',\n fully_connected_layer_sizes=[2000, 1000],\n batch_normalize=True,\n batch_normalize_kwargs={\n \"fused\": False,\n \"trainable\": True,\n \"renorm\": True\n },\n learning_rate=0.0005)\n model.fit(dataset, nb_epoch=200)\n transformers = []\n metric = dc.metrics.Metric(\n dc.metrics.roc_auc_score, np.mean, mode=\"classification\")\n scores = model.evaluate(dataset, [metric], transformers)\n assert scores['mean-roc_auc_score'] >= 0.9\n",
"import deepchem as dc\nimport numpy as np\nimport pytest\nfrom deepchem.models.optimizers import Adam\ntry:\n import tensorflow as tf\n\n class RouletteEnvironment(dc.rl.Environment):\n\n def __init__(self):\n super(RouletteEnvironment, self).__init__([(1,)], 38)\n self._state = [np.array([0])]\n\n def step(self, action):\n if action == 37:\n self._terminated = True # Walk away.\n return 0.0\n wheel = np.random.randint(37)\n if wheel == 0:\n if action == 0:\n return 35.0\n return -1.0\n if action != 0 and wheel % 2 == action % 2:\n return 1.0\n return -1.0\n\n def reset(self):\n self._terminated = False\n\n # This policy just learns a constant probability for each action, and a constant for the value.\n\n class TestPolicy(dc.rl.Policy):\n\n def __init__(self, env):\n super(TestPolicy, self).__init__(['action_prob', 'value'])\n self.env = env\n\n def create_model(self, **kwargs):\n env = self.env\n\n class TestModel(tf.keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(**kwargs)\n self.action = tf.Variable(np.ones(env.n_actions, np.float32))\n self.value = tf.Variable([0.0], tf.float32)\n\n def call(self, inputs, **kwargs):\n prob = tf.nn.softmax(tf.reshape(self.action, (-1, env.n_actions)))\n return (prob, self.value)\n\n return TestModel()\n\n has_tensorflow = True\nexcept:\n has_tensorflow = False\n\n\[email protected]\ndef test_a2c_reload():\n env = RouletteEnvironment()\n policy = TestPolicy(env)\n\n a2c = dc.rl.A2C(\n env, policy, max_rollout_length=20, optimizer=Adam(learning_rate=0.001))\n a2c.fit(1000)\n action_prob, value = a2c.predict([[0]])\n\n new_a2c = dc.rl.A2C(env, policy, model_dir=a2c._model.model_dir)\n new_a2c.restore()\n action_prob2, value2 = new_a2c.predict([[0]])\n\n assert np.all(action_prob == action_prob2)\n assert value == value2\n\n\[email protected]\ndef test_ppo_reload():\n env = RouletteEnvironment()\n policy = TestPolicy(env)\n ppo = dc.rl.PPO(\n env,\n policy,\n max_rollout_length=20,\n optimization_epochs=8,\n optimizer=Adam(learning_rate=0.003))\n ppo.fit(1000)\n action_prob, value = ppo.predict([[0]])\n\n new_ppo = dc.rl.PPO(env, policy, model_dir=ppo._model.model_dir)\n new_ppo.restore()\n action_prob2, value2 = new_ppo.predict([[0]])\n\n assert np.all(action_prob == action_prob2)\n assert value == value2\n"
] | [
[
"numpy.count_nonzero"
],
[
"numpy.array"
],
[
"numpy.expand_dims",
"numpy.array",
"numpy.zeros",
"numpy.floor"
],
[
"numpy.array",
"numpy.random.seed",
"tensorflow.random.set_seed"
],
[
"tensorflow.Variable",
"tensorflow.reshape",
"numpy.ones",
"numpy.all",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"2.7",
"1.4",
"2.6",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
msc5/junior-iw | [
"d356e015fcd3a3be638097a1acc02d5dea4751aa"
] | [
"src/data/datasets/BAIR/BAIR.py"
] | [
"import os\nimport io\nimport numpy as np\nfrom PIL import Image\nimport torch\n\nfrom torchvision.transforms import ToTensor\n\n\nclass BAIR (object):\n\n \"\"\"Data Handler that loads robot pushing data.\"\"\"\n\n def __init__(self, data_root, train=True, seq_len=20, image_size=64):\n self.root_dir = data_root\n if train:\n self.data_dir = '%s/processed_data/train' % self.root_dir\n self.ordered = False\n else:\n self.data_dir = '%s/processed_data/test' % self.root_dir\n self.ordered = True\n self.dirs = []\n for d1 in os.listdir(self.data_dir):\n for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):\n self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))\n self.seq_len = seq_len\n self.image_size = image_size\n self.seed_is_set = False # multi threaded loading\n self.d = 0\n self.totensor = ToTensor()\n\n def set_seed(self, seed):\n if not self.seed_is_set:\n self.seed_is_set = True\n np.random.seed(seed)\n\n def __len__(self):\n return len(self.dirs)\n\n def get_seq(self):\n if self.ordered:\n d = self.dirs[self.d]\n if self.d == len(self.dirs) - 1:\n self.d = 0\n else:\n self.d += 1\n else:\n d = self.dirs[np.random.randint(len(self.dirs))]\n image_seq = []\n for i in range(self.seq_len):\n fname = '%s/%d.png' % (d, i)\n # im = imread(fname).reshape(1, 64, 64, 3)\n # im = np.array(Image.open(fname)).reshape((1, 3, 64, 64))\n im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)\n image_seq.append(im)\n image_seq = torch.cat(image_seq, axis=0)\n return image_seq\n\n def __getitem__(self, index):\n self.set_seed(index)\n return self.get_seq()\n\n\nif __name__ == \"__main__\":\n from torch.utils.data import DataLoader\n train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)\n train_dataloader = DataLoader(train_dataloader, batch_size=4)\n print(len(train_dataset, train_dataloader))\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.seed",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
glaswasser/python-vision | [
"706c314a86b8f35c313bb3e907ae84317dca1a0b"
] | [
"samples/snippets/detect/label-products.py"
] | [
"\nfrom detect import (detect_logos, detect_text)\nimport pandas as pd\nimport re\nimport os\n#from __future__ import print_function\nfrom google.cloud import vision\n\n\nimages_path = \"C:\\\\Users\\\\heinz\\\\Yagora GmbH\\\\Ievgen Kyrda - Crawler\\\\images\\\\foodnewsgermany_images/\"\nfile_names = os.listdir(os.path.dirname(images_path))\n\nfile_paths = [images_path + f for f in file_names]\n\nlogos = [detect_logos(f) for f in file_paths]\n\ntexts = [detect_text(f)[0].description for f in file_paths]\n# remove line break symbols\ntexts = [x.replace(\"\\n\", \", \") for x in texts]\n\ncontained = []\n#contained[1] = \"test\"\nfor i in range(len(logos)): # loop over future rows of df\n tmp = []\n for j in logos[i]: # for every logo-row, check if in text\n if j.lower() in texts[i].lower():\n tmp.append(logos[i])\n else:\n tmp.append(None)\n contained.append(tmp)\n\ndetect_df = pd.DataFrame(\n list(zip(file_names, texts, logos, contained, file_paths)),\n columns = [\"files\", \"texts\", \"logos\", \"probable_brand\", \"file_path\"]\n)\ndetect_df\n\n# other ideas:\n# if logo in existing logos, add logo\n\n\n\nfrom PIL import Image\nfrom io import BytesIO\nfrom IPython.display import HTML\nimport base64\n\n\npd.set_option('display.max_colwidth', -1)\n\ndef get_thumbnail(path):\n i = Image.open(path)\n i.thumbnail((150, 150), Image.LANCZOS)\n return i\n\n\ndef image_base64(im):\n if isinstance(im, str):\n im = get_thumbnail(im)\n with BytesIO() as buffer:\n im.save(buffer, 'jpeg')\n return base64.b64encode(buffer.getvalue()).decode()\n\ndef image_formatter(im):\n return f'<img src=\"data:image/jpeg;base64,{image_base64(im)}\">'\n\n#dogs['file'] = dogs.id.map(lambda id: f'../input/train/{id}.jpg')\n\ndetect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))\n\nHTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False))"
] | [
[
"pandas.set_option"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mustafamerttunali/Tensorflow-Training-GUI | [
"1992185fd18e768f30c5bb5edd08ea709be97b09",
"1992185fd18e768f30c5bb5edd08ea709be97b09"
] | [
"tests/test_basic.py",
"tests/img2array.py"
] | [
"import os\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom multiprocessing import Process\n\n\ndef startTensorboard(logdir):\n # Start tensorboard with system call\n os.system(\"tensorboard --logdir {}\".format(logdir))\n\n\ndef fitModel():\n # Create your model\n model = Sequential()\n model.add(Dense(32, activation='relu', input_dim=100))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n # Some mock training data\n data = np.random.random((1000, 100))\n labels = np.random.randint(2, size=(1000, 1))\n\n # Run the fit function\n model.fit(data, labels, epochs=100, batch_size=32)\n\n\nif __name__ == '__main__':\n # Run both processes simultaneously\n Process(target=startTensorboard, args=(\"logs\",)).start()\n Process(target=fitModel).start()",
"import tensorflow as tf \nfrom keras.preprocessing import image\nimport numpy as np\n\ndef prepare_image(file):\n img_path = ''\n img = tf.keras.preprocessing.image.load_img(img_path + file, target_size=(224, 224))\n img_array = tf.keras.preprocessing.image.img_to_array(img)\n img_array_expanded_dims = np.expand_dims(img_array, axis=0)\n return tf.keras.applications.mobilenet_v2.preprocess_input(img_array_expanded_dims)\n\ndef sigmoid(x):\n s = 1 / (1 + np.exp(-x))\n return s\n\n\nimg = \"tests1.jpg\"\n\nimg = prepare_image(img)\n\nmodel = tf.keras.models.load_model('models/test.h5')\n\npred = model.predict(img)\nprint(sigmoid(pred))\nprint(np.argmax(pred))\n"
] | [
[
"tensorflow.keras.layers.Dense",
"numpy.random.random",
"tensorflow.keras.models.Sequential",
"numpy.random.randint"
],
[
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"numpy.argmax",
"numpy.exp",
"tensorflow.keras.preprocessing.image.img_to_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
YonginKwon/glow | [
"7d316d028e1792534416755bf80af422adccdaa9",
"7d316d028e1792534416755bf80af422adccdaa9",
"7d316d028e1792534416755bf80af422adccdaa9",
"7d316d028e1792534416755bf80af422adccdaa9"
] | [
"torch_glow/tests/nodes/adaptive_avg_pool2d_test.py",
"torch_glow/tests/nodes/quantized_linear_test.py",
"torch_glow/tests/nodes/quantized_conv2d_relu_test.py",
"torch_glow/tests/nodes/rsub_test.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nimport torch.nn.functional as F\n\nfrom tests.utils import jitVsGlow\nimport unittest\n\n\nclass TestAdaptiveAvgPool2d(unittest.TestCase):\n def test_adaptive_avg_pool2d_basic(self):\n \"\"\"Basic test of PyTorch adaptive_avg_pool2d Node.\"\"\"\n\n def test_f(inputs):\n return F.adaptive_avg_pool2d(inputs, (5, 5))\n\n inputs = torch.randn(3, 6, 14, 14)\n\n jitVsGlow(test_f, inputs, expected_fused_ops={\n \"aten::adaptive_avg_pool2d\"})\n\n def test_adaptive_avg_pool2d_nonsquare_inputs(self):\n \"\"\"Test of PyTorch adaptive_avg_pool2d Node with non-square inputs.\"\"\"\n\n def test_f(inputs):\n return F.adaptive_avg_pool2d(inputs, (3, 3))\n\n inputs = torch.randn(3, 6, 13, 14)\n\n jitVsGlow(test_f, inputs, expected_fused_ops={\n \"aten::adaptive_avg_pool2d\"})\n\n def test_adaptive_avg_pool2d_nonsquare_outputs(self):\n \"\"\"Test of PyTorch adaptive_avg_pool2d Node with non-square outputs.\"\"\"\n\n def test_f(inputs):\n return F.adaptive_avg_pool2d(inputs, (5, 3))\n\n inputs = torch.randn(3, 6, 14, 14)\n\n jitVsGlow(test_f, inputs, expected_fused_ops={\n \"aten::adaptive_avg_pool2d\"})\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\n\nfrom tests.utils import jitVsGlow\nimport unittest\n\n\nclass TestQuantizedLinear(unittest.TestCase):\n def test_quantized_linear_packed(self):\n \"\"\"Basic test of the PyTorch quantized::linear Node on Glow.\"\"\"\n\n q = torch.nn.quantized.Quantize(\n scale=1 / 25, zero_point=17, dtype=torch.quint8)\n dq = torch.nn.quantized.DeQuantize()\n\n linear = torch.nn.Linear(5, 5)\n\n linear.weight.data.fill_(1.2)\n linear.bias.data.fill_(3.0)\n\n model = torch.nn.Sequential(q, linear, dq)\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n x = torch.tensor(range(5), dtype=torch.float)\n x = torch.cat((x, x, x, x, x))\n x = torch.reshape(x, [5, 5])\n\n jitVsGlow(\n model,\n x,\n expected_fused_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::linear\",\n \"aten::dequantize\",\n },\n )\n\n def test_quantized_linear_packed_dq_cut(self):\n \"\"\"Basic test of the PyTorch quantized::linear Node on Glow, with dequantize excluded. \"\"\"\n\n q = torch.nn.quantized.Quantize(\n scale=1 / 25, zero_point=17, dtype=torch.quint8)\n dq = torch.nn.quantized.DeQuantize()\n\n linear = torch.nn.Linear(5, 5)\n\n linear.weight.data.fill_(1.2)\n linear.bias.data.fill_(3.0)\n\n model = torch.nn.Sequential(q, linear, dq)\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n x = torch.tensor(range(5), dtype=torch.float)\n x = torch.cat((x, x, x, x, x))\n x = torch.reshape(x, [5, 5])\n\n jitVsGlow(\n model,\n x,\n expected_fused_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::linear\",\n },\n black_list=[\n \"aten::dequantize\",\n ]\n )\n\n @unittest.skip(reason=\"random input could cause flaky\")\n def test_quantized_linear_random_input(self):\n \"\"\"Basic test of the PyTorch quantized::linear Node on Glow.\"\"\"\n\n def test_f(inputs, weights, bias=None):\n q_int = torch.nn.quantized.Quantize(\n scale=1 / 13, zero_point=0, dtype=torch.qint8\n )\n q_uint = torch.nn.quantized.Quantize(\n scale=1 / 13, zero_point=10, dtype=torch.quint8\n )\n\n dq = torch.nn.quantized.DeQuantize()\n\n q_inputs = q_uint(inputs)\n q_weights = q_int(weights)\n\n return dq(torch.nn.quantized.functional.linear(q_inputs, q_weights, bias))\n\n for _ in range(100):\n inputs = torch.randn(7, 7)\n weights = torch.randn(7, 7)\n\n bias = torch.tensor([1, 1, 1, 1, 1, 1, 1], dtype=torch.float) * 0.1\n\n jitVsGlow(\n test_f,\n inputs,\n weights,\n bias,\n expected_fused_ops={\n \"glow::unpacked_quantized_linear\",\n \"aten::quantize_per_tensor\",\n \"aten::dequantize\",\n },\n )\n\n def test_quantized_linear_packed_rowwise(self):\n \"\"\"Basic test of the PyTorch quantized::linear Node with rowwise quantized\n packed weights on Glow.\"\"\"\n\n linear = torch.nn.Linear(6, 5)\n linear.weight.data.random_(0, 100)\n linear.bias.data.random_(0, 10)\n\n x = torch.tensor(range(30), dtype=torch.float)\n x = torch.reshape(x, [5, 6])\n\n model = torch.quantization.QuantWrapper(linear)\n model.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n jitVsGlow(model, x, expected_fused_ops={\"aten::quantize_per_tensor\",\n \"quantized::linear\",\n \"aten::dequantize\"})\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\n\nfrom tests.utils import jitVsGlow\nfrom collections import OrderedDict\nimport unittest\n\n\nclass TestQuantizedConv2dRelu(unittest.TestCase):\n def _test_quantized_conv2d_relu_packed(self, groups):\n \"\"\"Basic test of PyTorch quantized::conv2d_relu Node with packed weights on Glow.\"\"\"\n with torch.no_grad():\n x = torch.tensor(range(5), dtype=torch.float) / 3\n x = torch.cat((x, x, x, x, x))\n x = torch.cat((x, x, x))\n x = torch.reshape(x, [1, 3, 5, 5])\n q = torch.nn.quantized.Quantize(1, 2, torch.quint8)\n conv = torch.nn.Conv2d(3, 3, [2, 2], groups=groups)\n relu = torch.nn.ReLU()\n dq = torch.nn.quantized.DeQuantize()\n\n # Due to the off-by-one error, we cannot let the weights, bias & input\n # to be totally random.\n conv.weight.set_(torch.arange(36/groups, dtype=torch.float).reshape([3,\n 3//groups, 2,\n 2])\n / 3)\n conv.bias.data.fill_(2)\n\n model = torch.nn.Sequential(\n OrderedDict(\n [(\"quantize\", q), (\"conv1\", conv),\n (\"relu1\", relu), (\"dequantize\", dq)]\n )\n )\n model.eval()\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n # Fuse conv and relu to conv_relu\n model = torch.quantization.fuse_modules(\n model, [[\"conv1\", \"relu1\"]])\n\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n jitVsGlow(\n model,\n x,\n expected_fused_ops={\n \"aten::quantize_per_tensor\",\n \"quantized::conv2d_relu\",\n \"aten::dequantize\",\n },\n )\n\n def test_quantized_conv2d_relu_packed_groupwise(self):\n \"\"\"PyTorch groupwise quantized::conv2d_relu Node with packed weights on Glow.\"\"\"\n self._test_quantized_conv2d_relu_packed(groups=3)\n\n def test_quantized_conv2d_relu_packed_nongroupwise(self):\n \"\"\"PyTorch vanilla quantized::conv2d_relu Node with packed weights on Glow.\"\"\"\n self._test_quantized_conv2d_relu_packed(groups=1)\n\n def test_quantized_conv2d_relu_packed_cut_q_dq(self):\n \"\"\"Basic test of PyTorch quantized::conv2d_relu Node with packed weights on Glow, with quantize and dequantize excluded. \"\"\"\n with torch.no_grad():\n x = torch.tensor(range(5), dtype=torch.float) / 3\n x = torch.cat((x, x, x, x, x))\n x = torch.cat((x, x, x))\n x = torch.reshape(x, [1, 3, 5, 5])\n q = torch.nn.quantized.Quantize(1, 2, torch.quint8)\n conv = torch.nn.Conv2d(3, 3, [2, 2], groups=1)\n relu = torch.nn.ReLU()\n dq = torch.nn.quantized.DeQuantize()\n\n # Due to the off-by-one error, we cannot let the weights, bias & input\n # to be totally random.\n conv.weight.set_(torch.arange(36, dtype=torch.float).reshape([3,\n 3, 2,\n 2])\n / 3)\n conv.bias.data.fill_(2)\n\n model = torch.nn.Sequential(\n OrderedDict(\n [(\"quantize\", q), (\"conv1\", conv),\n (\"relu1\", relu), (\"dequantize\", dq)]\n )\n )\n model.eval()\n model.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n # Fuse conv and relu to conv_relu\n model = torch.quantization.fuse_modules(\n model, [[\"conv1\", \"relu1\"]])\n\n torch.quantization.prepare(model, inplace=True)\n torch.quantization.convert(model, inplace=True)\n\n jitVsGlow(\n model,\n x,\n expected_fused_ops={\n \"quantized::conv2d_relu\",\n },\n black_list=[\n \"aten::quantize_per_tensor\",\n \"aten::dequantize\",\n ]\n )\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\n\nfrom tests.utils import jitVsGlow\nimport unittest\n\n\nclass TestRsub(unittest.TestCase):\n def test_rsub_basic(self):\n \"\"\"Basic test of the PyTorch rsub Node on Glow.\"\"\"\n\n def test_f(a, b):\n c = torch.rsub(a,b)\n return torch.rsub(c,c)\n\n x = torch.randn(4)\n y = torch.randn(4)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::rsub\"})\n\n def test_rsub_broadcast_1(self):\n \"\"\"Test of the PyTorch rsub Node on Glow with broadcasting.\"\"\"\n\n def test_f(a, b):\n c = torch.rsub(a,b)\n return torch.rsub(c,c)\n\n x = torch.randn(8, 3, 4, 2)\n y = torch.randn(4, 2)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::rsub\"})\n\n def test_rsub_broadcast_2(self):\n \"\"\"Test of the PyTorch rsub Node on Glow with broadcasting.\"\"\"\n\n def test_f(a, b):\n c = torch.rsub(a,b)\n return torch.rsub(c,c)\n\n x = torch.randn(8, 3, 4, 2)\n y = torch.randn(1, 2)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::rsub\"})\n\n def test_rsub_broadcast_3(self):\n \"\"\"Test of the PyTorch rsub Node on Glow with broadcasting.\"\"\"\n\n def test_f(a, b):\n c = torch.rsub(a,b)\n return torch.rsub(c,c)\n\n x = torch.randn(4, 2)\n y = torch.randn(8, 3, 4, 2)\n\n jitVsGlow(test_f, x, y, expected_fused_ops={\"aten::rsub\"})\n\n def test_rsub_float(self):\n \"\"\"Test of the PyTorch aten::rsub Node with a float argument\"\"\"\n\n def test_f(a):\n return torch.rsub((a * a), 3.9)\n\n x = torch.randn(4)\n\n jitVsGlow(test_f, x, expected_fused_ops={\"aten::rsub\"})\n\n def test_rsub_int(self):\n \"\"\"Test of the PyTorch aten::rsub Node with an int argument\"\"\"\n\n def test_f(a):\n return torch.rsub((a * a),20)\n\n x = torch.randn(4)\n\n jitVsGlow(test_f, x, expected_fused_ops={\"aten::rsub\"})\n"
] | [
[
"torch.randn",
"torch.nn.functional.adaptive_avg_pool2d"
],
[
"torch.nn.Sequential",
"torch.nn.quantized.DeQuantize",
"torch.quantization.prepare",
"torch.cat",
"torch.nn.quantized.functional.linear",
"torch.randn",
"torch.reshape",
"torch.tensor",
"torch.quantization.get_default_qconfig",
"torch.nn.Linear",
"torch.quantization.convert",
"torch.nn.quantized.Quantize",
"torch.quantization.QuantWrapper"
],
[
"torch.nn.quantized.DeQuantize",
"torch.quantization.prepare",
"torch.cat",
"torch.quantization.fuse_modules",
"torch.reshape",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.quantization.get_default_qconfig",
"torch.no_grad",
"torch.quantization.convert",
"torch.arange",
"torch.nn.quantized.Quantize"
],
[
"torch.randn",
"torch.rsub"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdat2/seager19 | [
"9c3acbc5332da787de1eda2600a82490ff20fa11"
] | [
"src/visualisation/arrow.py"
] | [
"\"\"\"Arrow plots for mechanism.\"\"\"\nimport os\nfrom src.plot_utils import ps_defaults\nfrom src.constants import FIGURE_PATH\nfrom typing import Optional\nimport matplotlib.pyplot as plt\n\n\ndef plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:\n \"\"\"\n Plot the arrow plot to show that I have reproduced the paper.\n\n Args:\n save_path (Optional[str], optional): Where to save the plot to.\n Defaults to None. If None will not save.\n show_plots (bool, optional): Whether to show plots. Defaults to False.\n \"\"\"\n ps_defaults(use_tex=False)\n\n color_d = {\n \"EEEE\": \"blue\",\n \"EECE\": \"green\",\n \"EEEC\": \"orange\",\n \"EECC\": \"red\",\n }\n\n def plot_error(x: float, y: float, yerr: float, mem: str) -> None:\n plt.fill_between(\n [x - 0.2, x + 0.2],\n [y + yerr, y + yerr],\n [y - yerr, y - yerr],\n color=color_d[mem],\n alpha=0.5,\n )\n plt.plot([x - 0.2, x + 0.2], [y, y], \"black\", linewidth=1)\n\n xlim = [0.5, 3.5]\n head_length = 0.02\n decrease_arrow = 0.01\n ax = plt.axes()\n ecmwf = 0.411\n # ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')\n ax.arrow(\n 1,\n ecmwf,\n 0,\n 0.054 - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(1, ecmwf + 0.054, 0.005, \"EECE\")\n ax.arrow(\n 2,\n ecmwf,\n 0,\n 0.31 - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(2, ecmwf + 0.31, 0.03, \"EEEC\")\n ax.arrow(\n 3,\n ecmwf,\n 0,\n 0.47 - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(3, ecmwf + 0.47, 0.04, \"EECC\")\n plt.plot(xlim, [ecmwf, ecmwf], color=\"blue\", label=\"ECMWF/ORAS4 $= 0.411$ K \")\n plt.plot(\n xlim, [ecmwf + 0.478, ecmwf + 0.478], color=\"red\", label=\"CMIP5 MMM $= 0.889$ K\"\n )\n\n # plt.xticks([0, 1, 2, 3], [\"ECMWF\", \"W\", \"RH\", \"RH+W\"])\n plt.xticks(\n [1, 2, 3],\n [\n \"W\\n\" + r\"$+ 0.054 \\pm 0.005$ K \",\n \"RH\\n \" + r\"$+ 0.31 \\pm 0.03$ K\",\n \"RH+W\\n \" + r\"$+ 0.47 \\pm 0.04$ K\",\n ],\n )\n\n plt.xlim(xlim)\n plt.ylabel(\"1958-2017, Trend in nino3.4 [K]\")\n\n plt.legend(\n bbox_to_anchor=(0.0, 1.02, 1, 0.102),\n loc=\"lower left\",\n mode=\"expand\",\n ncol=2,\n )\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path)\n if show_plots:\n plt.show()\n else:\n plt.clf()\n\n\ndef plot_arrow_plot_6(\n save_path: Optional[str] = None, show_plots: bool = False\n) -> None:\n \"\"\"\n Plot the arrow plot to show how it performs in cmip6.\n\n Args:\n save_path (Optional[str], optional): Where to save the plot to.\n Defaults to None. If None will not save.\n show_plots (bool, optional): Whether to show plots. Defaults to False.\n \"\"\"\n ps_defaults(use_tex=False)\n\n color_d = {\n \"EEEE\": \"blue\",\n \"EECE\": \"green\",\n \"EEEC\": \"orange\",\n \"EECC\": \"red\",\n }\n\n def plot_error(x: float, y: float, yerr: float, mem: str) -> None:\n plt.fill_between(\n [x - 0.2, x + 0.2],\n [y + yerr, y + yerr],\n [y - yerr, y - yerr],\n color=color_d[mem],\n alpha=0.5,\n )\n plt.plot([x - 0.2, x + 0.2], [y, y], \"black\", linewidth=1)\n\n xlim = [0.5, 3.5]\n head_length = 0.02\n decrease_arrow = 0.01\n ax = plt.axes()\n ecmwf = 0.411\n # ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')\n wind = 0.07\n wind_error = 0.01\n rh = 0.15\n rh_error = 0.02\n cmip6 = 0.772\n rh_and_wind = 0.29\n rh_and_wind_error = 0.04\n\n ax.arrow(\n 1,\n ecmwf,\n 0,\n wind - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(1, ecmwf + wind, wind_error, \"EECE\")\n ax.arrow(\n 2,\n ecmwf,\n 0,\n rh - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(2, ecmwf + rh, rh_error, \"EEEC\")\n ax.arrow(\n 3,\n ecmwf,\n 0,\n rh_and_wind - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, \"EECC\")\n plt.plot(xlim, [ecmwf, ecmwf], color=\"blue\", label=\"ECMWF/ORAS4 $= 0.411$ K \")\n plt.plot(\n xlim,\n [cmip6, cmip6],\n color=\"red\",\n label=\"CMIP6 MMM $= 0.772$ K\",\n )\n\n # plt.xticks([0, 1, 2, 3], [\"ECMWF\", \"W\", \"RH\", \"RH+W\"])\n plt.xticks(\n [1, 2, 3],\n [\n \"W\\n\"\n + r\"$+ $\"\n + str(wind)\n + r\" $\\pm$ \"\n + r\"$\"\n + str(wind_error)\n + r\"$\"\n + \" K \",\n \"RH\\n \" + r\"$+ $ $0.15$ $\\pm$ $0.02$ K\",\n \"RH+W\\n \" + r\"$+ $ $0.29$ $\\pm$ $0.04$ K\",\n ],\n )\n\n plt.xlim(xlim)\n plt.ylabel(\"1958-2017, Trend in nino3.4 [K]\")\n\n plt.legend(\n bbox_to_anchor=(0.0, 1.02, 1, 0.102),\n loc=\"lower left\",\n mode=\"expand\",\n ncol=2,\n )\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path)\n if show_plots:\n plt.show()\n else:\n plt.clf()\n\n\nif __name__ == \"__main__\":\n # python src/visualisation.arrow()\n plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, \"mech_arrow_cmip6.pdf\"))\n plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, \"mech_arrow_cmip6.png\"))\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vespos/pcdsdevices | [
"7c4728df62ea58b6491d1cb36bb39d27d6dd9fca"
] | [
"pcdsdevices/tests/test_ccm.py"
] | [
"import logging\nimport time\n\nimport numpy as np\nimport pytest\nfrom ophyd.sim import fake_device_cache, make_fake_device\n\nfrom .. import ccm\nfrom ..sim import FastMotor\n\nlogger = logging.getLogger(__name__)\n\n\nSAMPLE_ALIO = 4.575 # Current value as of writing this file\nSAMPLE_THETA = 1.2 # Modest angle\nSAMPLE_WAVELENGTH = 1.5 # hard xray\n\n\n# Make sure the calcs are properly inverted\ndef test_theta_alio_inversion():\n logger.debug('test_theta_alio_inversion')\n theta = ccm.alio_to_theta(SAMPLE_ALIO, ccm.default_theta0, ccm.default_gr,\n ccm.default_gd)\n alio_calc = ccm.theta_to_alio(theta, ccm.default_theta0, ccm.default_gr,\n ccm.default_gd)\n # Unlike the other inversions, this is just an approximation\n assert np.isclose(alio_calc, SAMPLE_ALIO)\n\n\ndef test_wavelength_theta_inversion():\n logger.debug('test_wavelength_theta_inversion')\n wavelength = ccm.theta_to_wavelength(SAMPLE_THETA, ccm.default_dspacing)\n theta = ccm.wavelength_to_theta(wavelength, ccm.default_dspacing)\n logger.debug('%s, %s', wavelength, theta)\n assert np.isclose(theta, SAMPLE_THETA)\n theta = ccm.wavelength_to_theta(SAMPLE_WAVELENGTH, ccm.default_dspacing)\n wavelength = ccm.theta_to_wavelength(theta, ccm.default_dspacing)\n logger.debug('%s, %s', wavelength, theta)\n assert np.isclose(wavelength, SAMPLE_WAVELENGTH)\n\n\ndef test_energy_wavelength_inversion():\n logger.debug('test_energy_wavelength_inversion')\n energy = ccm.wavelength_to_energy(SAMPLE_WAVELENGTH)\n wavelength_calc = ccm.energy_to_wavelength(energy)\n assert wavelength_calc == SAMPLE_WAVELENGTH\n\n\[email protected](scope='function')\ndef fake_ccm():\n return make_fake_ccm()\n\n\nclass FakeAlio(FastMotor):\n kill = None\n home = None\n\n\ndef make_fake_ccm():\n fake_device_cache[ccm.CCMMotor] = FastMotor\n fake_device_cache[ccm.CCMAlio] = FakeAlio\n FakeCCM = make_fake_device(ccm.CCM)\n fake_ccm = FakeCCM(alio_prefix='ALIO', theta2fine_prefix='THETA',\n theta2coarse_prefix='THTA', chi2_prefix='CHI',\n x_down_prefix='X:DOWN', x_up_prefix='X:UP',\n y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',\n y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,\n name='fake_ccm')\n\n def init_pos(mot, pos=0):\n mot.user_readback.sim_put(0)\n mot.user_setpoint.sim_put(0)\n mot.user_setpoint.sim_set_limits((0, 0))\n mot.motor_spg.sim_put(2)\n mot.part_number.sim_put('tasdf')\n\n init_pos(fake_ccm.x.down)\n init_pos(fake_ccm.x.up)\n init_pos(fake_ccm.y.down)\n init_pos(fake_ccm.y.up_north)\n init_pos(fake_ccm.y.up_south)\n\n fake_ccm.alio.set(SAMPLE_ALIO)\n fake_ccm.energy.alio.set(SAMPLE_ALIO)\n fake_ccm.energy_with_vernier.alio.set(SAMPLE_ALIO)\n fake_ccm.energy_with_vernier.vernier.setpoint.sim_put(0)\n\n return fake_ccm\n\n\ndef test_fake_ccm(fake_ccm):\n logger.debug('test_fake_ccm')\n fake_ccm.get()\n\n\n# Make sure we set up the forward/inverse to use the right methods\ndef test_ccm_calc(fake_ccm):\n logger.debug('test_ccm_calc')\n calc = fake_ccm.energy\n\n logger.debug('physics pos is %s', calc.position)\n logger.debug('real pos is %s', calc.real_position)\n logger.debug('sample alio is %s', SAMPLE_ALIO)\n\n theta_func = ccm.alio_to_theta(\n SAMPLE_ALIO,\n calc.theta0_rad_val,\n calc.gr_val,\n calc.gd_val,\n )\n wavelength_func = ccm.theta_to_wavelength(theta_func, calc.dspacing_val)\n energy_func = ccm.wavelength_to_energy(wavelength_func)\n energy = calc.energy.position\n assert energy == energy_func\n\n calc.alio.move(0)\n calc.move(energy, wait=False)\n assert np.isclose(calc.alio.position, SAMPLE_ALIO)\n\n calc.alio.move(calc.alio.position)\n calc.move(energy=calc.energy.position, wait=False)\n assert np.isclose(calc.alio.position, SAMPLE_ALIO)\n\n\n# Make sure sync'd axes work and that unk/in/out states work\[email protected](5)\ndef test_ccm_main(fake_ccm):\n logger.debug('test_ccm_main')\n fake_ccm.y.move(5, wait=False)\n assert fake_ccm.y.down.user_setpoint.get() == 5\n assert fake_ccm.y.up_north.user_setpoint.get() == 5\n assert fake_ccm.y.up_south.user_setpoint.get() == 5\n\n assert fake_ccm.removed\n assert not fake_ccm.inserted\n\n fake_ccm.x.down.user_readback.sim_put(8)\n fake_ccm.x.up.user_readback.sim_put(8)\n assert not fake_ccm.removed\n assert fake_ccm.inserted\n\n fake_ccm.x.down.user_readback.sim_put(4)\n fake_ccm.x.up.user_readback.sim_put(4)\n assert not fake_ccm.removed\n assert not fake_ccm.inserted\n\n fake_ccm.insert(wait=False)\n assert fake_ccm.x.down.user_setpoint.get() == 8\n assert fake_ccm.x.up.user_setpoint.get() == 8\n\n fake_ccm.remove(wait=False)\n assert fake_ccm.x.down.user_setpoint.get() == 0\n assert fake_ccm.x.up.user_setpoint.get() == 0\n\n\[email protected](5)\ndef test_vernier(fake_ccm):\n logger.debug('test_vernier')\n\n pseudopos = fake_ccm.energy_with_vernier\n\n # Moving with vernier should move the energy request motor too\n pseudopos.move(7, wait=False)\n assert np.isclose(pseudopos.energy.position, 7)\n assert pseudopos.vernier.position == 7000\n\n pseudopos.move(8, wait=False)\n assert np.isclose(pseudopos.energy.position, 8)\n assert pseudopos.vernier.position == 8000\n\n pseudopos.move(9, wait=False)\n assert np.isclose(pseudopos.energy.position, 9)\n assert pseudopos.vernier.position == 9000\n\n # Small moves (less than 30eV) should be skipped on the energy request\n pseudopos.move(9.001, wait=False)\n assert np.isclose(pseudopos.energy.position, 9.001)\n assert pseudopos.vernier.position == 9000\n\n # Unless we set the option for not skipping them\n pseudopos.vernier.skip_small_moves = False\n pseudopos.move(9.002, wait=False)\n assert np.isclose(pseudopos.energy.position, 9.002)\n assert pseudopos.vernier.position == 9002\n\n\[email protected](5)\ndef test_set_current_position(fake_ccm):\n logger.debug('test_set_current_position')\n mot = fake_ccm.energy.energy\n for energy in range(6, 14):\n mot.set_current_position(energy)\n assert np.isclose(mot.position, energy)\n\n\[email protected](5)\ndef test_check_valid_constant(fake_ccm):\n logger.debug('test_check_valid_constant')\n\n # First call to make_valid sends the first monitor update\n def make_valid(sig, valid):\n if valid:\n sig.put(1)\n else:\n sig.put(0)\n\n def make_conn(sig, conn):\n sig._metadata['connected'] = conn\n\n def output(sig):\n return fake_ccm._check_valid_constant(sig, sig.get())\n\n test_sig = fake_ccm.dspacing\n\n # Can we get to all the enum values?\n make_conn(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.ALWAYS_DISCONNECT\n make_conn(test_sig, True)\n make_valid(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.INVALID_CONNECT\n make_conn(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.INVALID_DISCONNECT\n make_conn(test_sig, True)\n make_valid(test_sig, True)\n assert output(test_sig) == ccm.CCMConstantWarning.NO_WARNING\n make_conn(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.VALID_DISCONNECT\n\n # theta0_deg is allowed to be zero, unlike the others\n test_sig2 = fake_ccm.theta0_deg\n make_conn(test_sig2, True)\n make_valid(test_sig2, False)\n assert output(test_sig2) == ccm.CCMConstantWarning.NO_WARNING\n\n\[email protected](5)\ndef test_show_constant_warning(fake_ccm, caplog):\n logger.debug('test_show_constant_warning')\n for warning in (\n ccm.CCMConstantWarning.NO_WARNING,\n ccm.CCMConstantWarning.ALWAYS_DISCONNECT,\n ccm.CCMConstantWarning.VALID_DISCONNECT,\n ccm.CCMConstantWarning.INVALID_DISCONNECT,\n ccm.CCMConstantWarning.INVALID_CONNECT,\n ):\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n fake_ccm._show_constant_warning(\n warning,\n fake_ccm.dspacing,\n 0.111111,\n 0.222222,\n )\n if warning == ccm.CCMConstantWarning.NO_WARNING:\n assert len(caplog.records) == 0\n else:\n assert len(caplog.records) == 1\n\n\[email protected](5)\ndef test_warn_invalid_constants(fake_ccm, caplog):\n logger.debug('test_warn_invalid_constants')\n # Trick the warning into thinking we've be initialized for a while\n fake_ccm._init_time = time.monotonic() - 1000\n fake_ccm.theta0_deg.put(0)\n fake_ccm.dspacing.put(0)\n fake_ccm.gr.put(0)\n fake_ccm.gd.put(0)\n # We expect three warnings from the fake PVs that start at 0\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n fake_ccm.warn_invalid_constants(only_new=False)\n assert len(caplog.records) == 3\n # We expect the warnings to not repeat\n caplog.clear()\n fake_ccm.warn_invalid_constants(only_new=True)\n assert len(caplog.records) == 0\n # Unless we ask them to\n caplog.clear()\n fake_ccm.warn_invalid_constants(only_new=False)\n assert len(caplog.records) == 3\n # Let's fix the issue and make sure no warnings are shown\n fake_ccm.reset_calc_constant_defaults(confirm=False)\n caplog.clear()\n fake_ccm.warn_invalid_constants(only_new=False)\n assert len(caplog.records) == 0\n\n\[email protected](5)\ndef test_disconnected_ccm():\n ccm.CCM(alio_prefix='ALIO', theta2fine_prefix='THETA',\n theta2coarse_prefix='THTA', chi2_prefix='CHI',\n x_down_prefix='X:DOWN', x_up_prefix='X:UP',\n y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',\n y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,\n name='ccm')\n"
] | [
[
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ElliotCheung/simpeg | [
"ce5bde154179ca63798a62a12787a7ec3535472c",
"ce5bde154179ca63798a62a12787a7ec3535472c"
] | [
"SimPEG/electromagnetics/analytics/FDEM.py",
"SimPEG/electromagnetics/static/resistivity/simulation_1d.py"
] | [
"from __future__ import division\nimport numpy as np\nfrom scipy.constants import mu_0, pi, epsilon_0\nfrom scipy.special import erf\nfrom SimPEG import utils\nimport warnings\n\n\ndef hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):\n \"\"\"\n The analytical expression is given in Equation 4.56 in Ward and Hohmann,\n 1988, and the example reproduces their Figure 4.2.\n\n\n .. plot::\n\n import numpy as np\n import matplotlib.pyplot as plt\n from SimPEG import electromagnetics as EM\n freq = np.logspace(-1, 5, 301)\n test = EM.analytics.hzAnalyticDipoleF(\n 100, freq, 0.01, secondary=False)\n plt.loglog(freq, test.real, 'C0-', label='Real')\n plt.loglog(freq, -test.real, 'C0--')\n plt.loglog(freq, test.imag, 'C1-', label='Imaginary')\n plt.loglog(freq, -test.imag, 'C1--')\n plt.title('Response at $r=100$ m')\n plt.xlim([1e-1, 1e5])\n plt.ylim([1e-12, 1e-6])\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('$H_z$ (A/m)')\n plt.legend(loc=6)\n plt.show()\n\n\n **Reference**\n\n - Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for\n geophysical applications, Chapter 4 of Electromagnetic Methods in Applied\n Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:\n `10.1190/1.9781560802631.ch4\n <https://doi.org/10.1190/1.9781560802631.ch4>`_.\n\n \"\"\"\n r = np.abs(r)\n k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)\n\n m = 1\n front = m / (2.0 * np.pi * (k**2) * (r**5))\n back = 9 - (\n 9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)\n ) * np.exp(-1j * k * r)\n hz = front * back\n\n if secondary:\n hp = -1 / (4 * np.pi * r**3)\n hz = hz - hp\n\n if hz.ndim == 1:\n hz = utils.mkvc(hz, 2)\n\n return hz\n\n\ndef MagneticDipoleWholeSpace(\n XYZ, srcLoc, sig, f, moment, fieldType=\"b\", mu_r=1, eps_r=1, **kwargs\n):\n \"\"\"\n Analytical solution for a dipole in a whole-space.\n\n The analytical expression is given in Equation 2.57 in Ward and Hohmann,\n 1988, and the example reproduces their Figure 2.2.\n\n TODOs:\n - set it up to instead take a mesh & survey\n - add divide by zero safety\n\n\n .. plot::\n\n import numpy as np\n from SimPEG import electromagnetics as EM\n import matplotlib.pyplot as plt\n from scipy.constants import mu_0\n freqs = np.logspace(-2, 5, 301)\n Bx, By, Bz = EM.analytics.FDEM.MagneticDipoleWholeSpace(\n [0, 100, 0], [0, 0, 0], 1e-2, freqs, moment='Z')\n plt.figure()\n plt.loglog(freqs, Bz.real/mu_0, 'C0', label='Real')\n plt.loglog(freqs, -Bz.real/mu_0, 'C0--')\n plt.loglog(freqs, Bz.imag/mu_0, 'C1', label='Imaginary')\n plt.loglog(freqs, -Bz.imag/mu_0, 'C1--')\n plt.legend()\n plt.xlim([1e-2, 1e5])\n plt.ylim([1e-13, 1e-6])\n plt.show()\n\n **Reference**\n\n - Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for\n geophysical applications, Chapter 4 of Electromagnetic Methods in Applied\n Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:\n `10.1190/1.9781560802631.ch4\n <https://doi.org/10.1190/1.9781560802631.ch4>`_.\n\n \"\"\"\n\n orient = kwargs.pop(\"orientation\", None)\n if orient is not None:\n raise TypeError(\n \"orientation kwarg has been removed, please use the moment argument\",\n )\n magnitude = moment\n moment = orient\n else:\n magnitude = 1\n mu = kwargs.pop(\"mu\", None)\n if mu is not None:\n raise TypeError(\"mu kwarg has been removed, please use the mu_r argument.\")\n mu_r = mu / mu_0\n\n mu = mu_0 * mu_r\n eps = epsilon_0 * eps_r\n w = 2 * np.pi * f\n\n if isinstance(moment, str):\n if moment == \"X\":\n mx, my, mz = 1.0, 0.0, 0.0\n elif moment == \"Y\":\n mx, my, mz = 0.0, 1.0, 0.0\n elif moment == \"Z\":\n mx, my, mz = 0.0, 0.0, 1.0\n else:\n raise NotImplementedError(\"String type for moment not recognized\")\n mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude\n else:\n mx, my, mz = moment[0], moment[1], moment[2]\n\n XYZ = utils.asArray_N_x_Dim(XYZ, 3)\n\n dx = XYZ[:, 0] - srcLoc[0]\n dy = XYZ[:, 1] - srcLoc[1]\n dz = XYZ[:, 2] - srcLoc[2]\n\n r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)\n k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)\n kr = k * r\n\n if fieldType in [\"h\", \"b\"]:\n front = 1 / (4.0 * pi * r**3.0) * np.exp(-1j * kr)\n mid = -(kr**2.0) + 3.0 * 1j * kr + 3.0\n\n Fx = front * (\n mx * ((dx / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))\n + my * ((dy * dx / r**2.0) * mid)\n + mz * ((dx * dz / r**2.0) * mid)\n )\n\n Fy = front * (\n mx * ((dx * dy / r**2.0) * mid)\n + my * ((dy / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))\n + mz * ((dy * dz / r**2.0) * mid)\n )\n\n Fz = front * (\n mx * ((dx * dz / r**2.0) * mid)\n + my * ((dy * dz / r**2.0) * mid)\n + mz * ((dz / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))\n )\n\n if fieldType == \"b\":\n Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz\n\n elif fieldType == \"e\":\n\n front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r**3.0) * np.exp(-1j * kr)\n\n Fx = front * (my * (dz / r) + mz * (-dy / r))\n\n Fy = front * (mx * (-dz / r) + mz * (dx / r))\n\n Fz = front * (mx * (dy / r) + my * (-dx / r))\n\n return Fx, Fy, Fz\n\n\ndef ElectricDipoleWholeSpace(\n XYZ, srcLoc, sig, f, moment=\"X\", fieldType=\"e\", mu_r=1, eps_r=1, **kwargs\n):\n\n orient = kwargs.pop(\"orientation\", None)\n if orient is not None:\n raise TypeError(\n \"orientation kwarg has been removed, please use the moment argument.\"\n )\n mu = kwargs.pop(\"mu\", None)\n if mu is not None:\n raise TypeError(\"mu kwarg has been removed, please use the mu_r argument.\")\n cur = kwargs.pop(\"current\", None)\n if cur is not None:\n raise TypeError(\n \"current kwarg has been removed, please use the moment argument.\",\n )\n else:\n magnitude = 1\n length = kwargs.pop(\"length\", None)\n if length is not None:\n raise TypeError(\n \"length kwarg has been removed, please use the moment argument.\"\n )\n\n mu = mu_0 * mu_r\n eps = epsilon_0 * eps_r\n w = 2 * np.pi * f\n\n if isinstance(moment, str):\n if moment.upper() == \"X\":\n mx, my, mz = 1.0, 0.0, 0.0\n elif moment.upper() == \"Y\":\n mx, my, mz = 0.0, 1.0, 0.0\n elif moment.upper() == \"Z\":\n mx, my, mz = 0.0, 0.0, 1.0\n else:\n raise NotImplementedError(\"String type for moment not recognized\")\n mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude\n\n else:\n mx, my, mz = moment[0], moment[1], moment[2]\n\n XYZ = utils.asArray_N_x_Dim(XYZ, 3)\n\n dx = XYZ[:, 0] - srcLoc[0]\n dy = XYZ[:, 1] - srcLoc[1]\n dz = XYZ[:, 2] - srcLoc[2]\n\n r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)\n k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)\n kr = k * r\n\n if fieldType == \"e\":\n\n front = 1 / (4.0 * np.pi * sig * r**3) * np.exp(-1j * k * r)\n mid = -(k**2) * r**2 + 3 * 1j * k * r + 3\n\n Fx = front * (\n mx * ((dx**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))\n + my * (dy * dx / r**2) * mid\n + mz * (dz * dx / r**2) * mid\n )\n\n Fy = front * (\n mx * (dx * dy / r**2) * mid\n + my * ((dy**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))\n + mz * (dz * dy / r**2) * mid\n )\n\n Fz = front * (\n mx * (dx * dz / r**2) * mid\n + my * (dy * dz / r**2) * mid\n + mz * ((dz**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))\n )\n\n elif fieldType in [\"h\", \"b\"]:\n\n front = (1 + 1j * kr) / (4.0 * np.pi * r**2) * np.exp(-1j * k * r)\n\n Fx = front * (my * (dz / r) + mz * (-dy / r))\n\n Fy = front * (mx * (-dz / r) + mz * (dx / r))\n\n Fz = front * (mx * (dy / r) + my * (-dx / r))\n\n if fieldType == \"b\":\n Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz\n\n return Fx, Fy, Fz\n",
"import numpy as np\nimport properties\n\nfrom ....utils import mkvc\nfrom ....simulation import BaseSimulation\nfrom .... import props\n\nfrom .survey import Survey\n\nfrom empymod.transform import dlf\n\ntry:\n from empymod.transform import get_spline_values as get_dlf_points\nexcept ImportError:\n from empymod.transform import get_dlf_points\nfrom empymod.utils import check_hankel\nfrom ..utils import static_utils\n\n\nclass Simulation1DLayers(BaseSimulation):\n \"\"\"\n 1D DC Simulation\n \"\"\"\n\n sigma, sigmaMap, sigmaDeriv = props.Invertible(\"Electrical conductivity (S/m)\")\n rho, rhoMap, rhoDeriv = props.Invertible(\"Electrical resistivity (Ohm m)\")\n props.Reciprocal(sigma, rho)\n\n thicknesses, thicknessesMap, thicknessesDeriv = props.Invertible(\n \"thicknesses of the layers\"\n )\n\n survey = properties.Instance(\"a DC survey object\", Survey, required=True)\n\n storeJ = properties.Bool(\"store the sensitivity\", default=False)\n\n data_type = \"volt\"\n hankel_pts_per_dec = None # Default: Standard DLF\n\n # TODO: using 51 filter coefficient could be overkill, use less if possible\n hankel_filter = \"key_51_2012\" # Default: Hankel filter\n\n _Jmatrix = None\n fix_Jmatrix = False\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n try:\n ht, htarg = check_hankel(\n \"fht\", [self.hankel_filter, self.hankel_pts_per_dec], 1\n )\n self.fhtfilt = htarg[0] # Store filter\n self.hankel_pts_per_dec = htarg[1] # Store pts_per_dec\n except ValueError:\n arg = {}\n arg[\"dlf\"] = self.hankel_filter\n if self.hankel_pts_per_dec is not None:\n arg[\"pts_per_dec\"] = self.hankel_pts_per_dec\n ht, htarg = check_hankel(\"dlf\", arg, 1)\n self.fhtfilt = htarg[\"dlf\"] # Store filter\n self.hankel_pts_per_dec = htarg[\"pts_per_dec\"] # Store pts_per_dec\n self.hankel_filter = self.fhtfilt.name # Store name\n self.n_filter = self.fhtfilt.base.size\n\n def fields(self, m):\n\n if m is not None:\n self.model = m\n\n if self.verbose:\n print(\">> Compute fields\")\n\n # TODO: this for loop can slow down the speed, cythonize below for loop\n T1 = self.rho[self.n_layer - 1] * np.ones_like(self.lambd)\n for ii in range(self.n_layer - 1, 0, -1):\n rho0 = self.rho[ii - 1]\n t0 = self.thicknesses[ii - 1]\n T0 = (T1 + rho0 * np.tanh(self.lambd * t0)) / (\n 1.0 + (T1 * np.tanh(self.lambd * t0) / rho0)\n )\n T1 = T0\n PJ = (T0, None, None)\n try:\n voltage = dlf(\n PJ,\n self.lambd,\n self.offset,\n self.fhtfilt,\n self.hankel_pts_per_dec,\n factAng=None,\n ab=33,\n ).real / (2 * np.pi)\n except TypeError:\n voltage = dlf(\n PJ,\n self.lambd,\n self.offset,\n self.fhtfilt,\n self.hankel_pts_per_dec,\n ang_fact=None,\n ab=33,\n ).real / (2 * np.pi)\n\n # Assume dipole-dipole\n V = voltage.reshape((self.survey.nD, 4), order=\"F\")\n data = V[:, 0] + V[:, 1] - (V[:, 2] + V[:, 3])\n\n if self.data_type == \"apparent_resistivity\":\n data /= self.geometric_factor\n\n return data\n\n def dpred(self, m=None, f=None):\n \"\"\"\n Project fields to receiver locations\n :param Fields u: fields object\n :rtype: numpy.ndarray\n :return: data\n \"\"\"\n\n if self.verbose:\n print(\"Calculating predicted data\")\n\n if f is None:\n if m is None:\n m = self.model\n f = self.fields(m)\n\n return f\n\n def getJ(self, m, f=None, factor=1e-2):\n \"\"\"\n Generate Full sensitivity matrix using central difference\n \"\"\"\n if self._Jmatrix is not None:\n return self._Jmatrix\n else:\n if self.verbose:\n print(\"Calculating J and storing\")\n self.model = m\n\n # TODO: this makes code quite slow derive analytic sensitivity\n N = self.survey.nD\n M = self.model.size\n Jmatrix = np.zeros((N, M), dtype=float, order=\"F\")\n for ii in range(M):\n m0 = m.copy()\n dm = m[ii] * factor\n m0[ii] = m[ii] - dm * 0.5\n m1 = m.copy()\n m1[ii] = m[ii] + dm * 0.5\n d0 = self.fields(m0)\n d1 = self.fields(m1)\n Jmatrix[:, ii] = (d1 - d0) / (dm)\n self._Jmatrix = Jmatrix\n return self._Jmatrix\n\n def Jvec(self, m, v, f=None):\n \"\"\"\n Compute sensitivity matrix (J) and vector (v) product.\n \"\"\"\n\n J = self.getJ(m, f=f)\n Jv = mkvc(np.dot(J, v))\n\n return mkvc(Jv)\n\n def Jtvec(self, m, v, f=None):\n \"\"\"\n Compute adjoint sensitivity matrix (J^T) and vector (v) product.\n \"\"\"\n\n J = self.getJ(m, f=f)\n Jtv = mkvc(np.dot(J.T, v))\n\n return Jtv\n\n @property\n def deleteTheseOnModelUpdate(self):\n toDelete = super().deleteTheseOnModelUpdate\n if self.fix_Jmatrix:\n return toDelete\n\n if self._Jmatrix is not None:\n toDelete = toDelete + [\"_Jmatrix\"]\n return toDelete\n\n @property\n def electrode_separations(self):\n \"\"\"\n Electrode separations\n \"\"\"\n # TODO: only works isotropic sigma\n if getattr(self, \"_electrode_separations\", None) is None:\n self._electrode_separations = static_utils.electrode_separations(\n self.survey\n )\n return self._electrode_separations\n\n @property\n def offset(self):\n \"\"\"\n Offset between a current electrode and a potential electrode\n \"\"\"\n # TODO: only works isotropic sigma\n if getattr(self, \"_offset\", None) is None:\n r_AM = self.electrode_separations[\"AM\"]\n r_AN = self.electrode_separations[\"AN\"]\n r_BM = self.electrode_separations[\"BM\"]\n r_BN = self.electrode_separations[\"BM\"]\n self._offset = np.r_[r_AM, r_AN, r_BM, r_BN]\n return self._offset\n\n @property\n def lambd(self):\n \"\"\"\n Spatial frequency in Hankel domain\n np.sqrt(kx*2 + ky**2) = lamda\n \"\"\"\n # TODO: only works isotropic sigma\n if getattr(self, \"_lambd\", None) is None:\n self._lambd = np.empty(\n [self.offset.size, self.n_filter], order=\"F\", dtype=complex\n )\n self.lambd[:, :], _ = get_dlf_points(\n self.fhtfilt, self.offset, self.hankel_pts_per_dec\n )\n return self._lambd\n\n # @property\n # def t(self):\n # \"\"\"\n # thickness of the layer\n # \"\"\"\n # # TODO: only works isotropic sigma\n # if getattr(self, '_t', None) is None:\n # self._t = self.mesh.hx[:-1]\n # return self._t\n\n @property\n def n_layer(self):\n \"\"\"\n number of layers\n \"\"\"\n # TODO: only works isotropic sigma\n if getattr(self, \"_n_layer\", None) is None:\n self._n_layer = self.thicknesses.size + 1\n return self._n_layer\n\n @property\n def geometric_factor(self):\n \"\"\"\n number of layers\n \"\"\"\n # TODO: only works isotropic sigma\n if getattr(self, \"_geometric_factor\", None) is None:\n r_AM = self.electrode_separations[\"AM\"]\n r_AN = self.electrode_separations[\"AN\"]\n r_BM = self.electrode_separations[\"BM\"]\n r_BN = self.electrode_separations[\"BM\"]\n self._geometric_factor = (1 / r_AM - 1 / r_BM - 1 / r_AN + 1 / r_BN) / (\n 2 * np.pi\n )\n return self._geometric_factor\n"
] | [
[
"numpy.sqrt",
"numpy.exp",
"numpy.abs"
],
[
"numpy.dot",
"numpy.ones_like",
"numpy.tanh",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiacheng1gujiaxin/poseface | [
"316924e224477f881240712a13a925bdd27adf4c",
"316924e224477f881240712a13a925bdd27adf4c"
] | [
"img2pose/utils/renderer.py",
"img2pose/losses.py"
] | [
"import cv2\nimport numpy as np\nfrom Sim3DR import RenderPipeline\n\nfrom .pose_operations import plot_3d_landmark\n\n\ndef _to_ctype(arr):\n if not arr.flags.c_contiguous:\n return arr.copy(order=\"C\")\n return arr\n\n\ndef get_colors(img, ver):\n h, w, _ = img.shape\n ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1) # x\n ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1) # y\n ind = np.round(ver).astype(np.int32)\n colors = img[ind[1, :], ind[0, :], :] / 255.0 # n x 3\n\n return colors.copy()\n\n\nclass Renderer:\n def __init__(\n self,\n vertices_path=\"../pose_references/vertices_trans.npy\",\n triangles_path=\"../pose_references/triangles.npy\",\n ):\n self.vertices = np.load(vertices_path)\n self.triangles = _to_ctype(np.load(triangles_path).T)\n self.vertices[:, 0] *= -1\n\n self.cfg = {\n \"intensity_ambient\": 0.3,\n \"color_ambient\": (1, 1, 1),\n \"intensity_directional\": 0.6,\n \"color_directional\": (1, 1, 1),\n \"intensity_specular\": 0.1,\n \"specular_exp\": 5,\n \"light_pos\": (0, 0, 5),\n \"view_pos\": (0, 0, 5),\n }\n\n self.render_app = RenderPipeline(**self.cfg)\n\n def transform_vertices(self, img, poses, global_intrinsics=None):\n (w, h) = img.size\n if global_intrinsics is None:\n global_intrinsics = np.array(\n [[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]\n )\n\n transformed_vertices = []\n for pose in poses:\n projected_lms = np.zeros_like(self.vertices)\n projected_lms[:, :2], lms_3d_trans_proj = plot_3d_landmark(\n self.vertices, pose, global_intrinsics\n )\n projected_lms[:, 2] = lms_3d_trans_proj[:, 2] * -1\n\n range_x = np.max(projected_lms[:, 0]) - np.min(projected_lms[:, 0])\n range_y = np.max(projected_lms[:, 1]) - np.min(projected_lms[:, 1])\n\n s = (h + w) / pose[5]\n projected_lms[:, 2] *= s\n projected_lms[:, 2] += (range_x + range_y) * 3\n\n transformed_vertices.append(projected_lms)\n\n return transformed_vertices\n\n def render(self, img, transformed_vertices, alpha=0.9, save_path=None):\n img = np.asarray(img)\n overlap = img.copy()\n\n for vertices in transformed_vertices:\n vertices = _to_ctype(vertices) # transpose\n overlap = self.render_app(vertices, self.triangles, overlap)\n\n res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)\n\n if save_path is not None:\n cv2.imwrite(save_path, res)\n print(f\"Save visualization result to {save_path}\")\n\n return res\n\n def save_to_obj(self, img, ver_lst, height, save_path):\n n_obj = len(ver_lst) # count obj\n\n if n_obj <= 0:\n return\n\n n_vertex = ver_lst[0].T.shape[1]\n n_face = self.triangles.shape[0]\n\n with open(save_path, \"w\") as f:\n for i in range(n_obj):\n ver = ver_lst[i].T\n colors = get_colors(img, ver)\n\n for j in range(n_vertex):\n x, y, z = ver[:, j]\n f.write(\n f\"v {x:.2f} {height - y:.2f} {z:.2f} {colors[j, 2]:.2f} \"\n f\"{colors[j, 1]:.2f} {colors[j, 0]:.2f}\\n\"\n )\n\n for i in range(n_obj):\n offset = i * n_vertex\n for j in range(n_face):\n idx1, idx2, idx3 = self.triangles[j] # m x 3\n f.write(\n f\"f {idx3 + 1 + offset} {idx2 + 1 + offset} \"\n f\"{idx1 + 1 + offset}\\n\"\n )\n\n print(f\"Dump tp {save_path}\")\n",
"from itertools import chain, repeat\n\nimport torch\nimport torch.nn.functional as F\n\nfrom .utils.pose_operations import plot_3d_landmark_torch, pose_full_image_to_bbox\n\n\ndef fastrcnn_loss(\n class_logits,\n class_labels,\n box_regression,\n labels,\n dof_regression_targets,\n box_regression_targets,\n proposals,\n image_shapes,\n pose_mean=None,\n pose_stddev=None,\n threed_points=None,\n):\n # # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]\n \"\"\"\n Computes the loss for Faster R-CNN.\n\n Arguments:\n class_logits (Tensor)\n box_regression (Tensor)\n labels (list[BoxList])\n regression_targets (Tensor)\n\n Returns:\n classification_loss (Tensor)\n box_loss (Tensor)\n \"\"\"\n img_size = [\n (boxes_in_image.shape[0], image_shapes[i])\n for i, boxes_in_image in enumerate(proposals)\n ]\n img_size = list(chain.from_iterable(repeat(j, i) for i, j in img_size))\n\n labels = torch.cat(labels, dim=0)\n class_labels = torch.cat(class_labels, dim=0)\n dof_regression_targets = torch.cat(dof_regression_targets, dim=0)\n box_regression_targets = torch.cat(box_regression_targets, dim=0)\n proposals = torch.cat(proposals, dim=0)\n classification_loss = F.cross_entropy(class_logits, class_labels)\n\n # get indices that correspond to the regression targets for\n # the corresponding ground truth labels, to be used with\n # advanced indexing\n sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)\n labels_pos = labels[sampled_pos_inds_subset]\n N = box_regression.shape[0]\n box_regression = box_regression.reshape(N, -1, 6)\n dof_regression = box_regression[sampled_pos_inds_subset, labels_pos]\n prop_regression = proposals[sampled_pos_inds_subset]\n\n dof_regression_targets = dof_regression_targets[sampled_pos_inds_subset]\n\n box_regression_targets = box_regression_targets[sampled_pos_inds_subset]\n box_loss = F.l1_loss(prop_regression, box_regression_targets, reduction=\"sum\")\n box_loss = box_loss / prop_regression.numel()\n\n all_target_calibration_points = None\n all_pred_calibration_points = None\n\n for i in range(prop_regression.shape[0]):\n (h, w) = img_size[i]\n global_intrinsics = torch.Tensor(\n [[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]\n ).to(proposals[0].device)\n\n threed_points = threed_points.to(proposals[0].device)\n\n h = prop_regression[i, 3] - prop_regression[i, 1]\n w = prop_regression[i, 2] - prop_regression[i, 0]\n local_intrinsics = torch.Tensor(\n [[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]\n ).to(proposals[0].device)\n\n # calibration points projection\n local_dof_regression = (\n dof_regression[i, :] * pose_stddev.to(proposals[0].device)\n ) + pose_mean.to(proposals[0].device)\n\n pred_calibration_points = plot_3d_landmark_torch(\n threed_points, local_dof_regression.float(), local_intrinsics\n ).unsqueeze(0)\n\n # pose convertion for pose loss\n dof_regression_targets[i, :] = torch.from_numpy(\n pose_full_image_to_bbox(\n dof_regression_targets[i, :].cpu().numpy(),\n global_intrinsics.cpu().numpy(),\n prop_regression[i, :].cpu().numpy(),\n )\n ).to(proposals[0].device)\n\n # target calibration points projection\n target_calibration_points = plot_3d_landmark_torch(\n threed_points, dof_regression_targets[i, :], local_intrinsics\n ).unsqueeze(0)\n\n if all_target_calibration_points is None:\n all_target_calibration_points = target_calibration_points\n else:\n all_target_calibration_points = torch.cat(\n (all_target_calibration_points, target_calibration_points)\n )\n if all_pred_calibration_points is None:\n all_pred_calibration_points = pred_calibration_points\n else:\n all_pred_calibration_points = torch.cat(\n (all_pred_calibration_points, pred_calibration_points)\n )\n\n if pose_mean is not None:\n dof_regression_targets[i, :] = (\n dof_regression_targets[i, :] - pose_mean.to(proposals[0].device)\n ) / pose_stddev.to(proposals[0].device)\n\n points_loss = F.l1_loss(all_target_calibration_points, all_pred_calibration_points)\n\n dof_loss = (\n F.mse_loss(\n dof_regression,\n dof_regression_targets,\n reduction=\"sum\",\n )\n / dof_regression.shape[0]\n )\n\n return classification_loss, dof_loss, points_loss\n"
] | [
[
"numpy.maximum",
"numpy.min",
"numpy.asarray",
"numpy.round",
"numpy.max",
"numpy.zeros_like",
"numpy.load",
"numpy.array"
],
[
"torch.nn.functional.l1_loss",
"torch.cat",
"torch.Tensor",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.mse_loss",
"torch.nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
macarro/imputena | [
"3a94ae1419a2af0d9707b20546ee078929ce99e8"
] | [
"imputena/simple_imputation/linear_regression.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nimport logging\n\n\ndef linear_regression(\n data=None, dependent=None, predictors=None, regressions='available',\n noise=False, inplace=False):\n \"\"\"Performs simple or multiple linear regression imputation on the data.\n First, the regression equation for the dependent variable given the\n predictor variables is computed. For this step, all rows that contain a\n missing value in either the dependent variable or any of the predictor\n variable is ignored via pairwise deletion. Then, missing valued in the\n dependent column in imputed using the regression equation. If, in the same\n row as a missing value in the dependent variable the value for any\n predictor variable is missing, a regression model based on all available\n predictors in calculated just to impute those values where the\n predictor(s) are missing. This behavior can be changed by assigning to\n the parameter regressions the value 'complete'. In this case, rows in\n which a predictor variable is missing do not get imputed. If stochastic\n regression imputation should be performed, set noise=True. In this\n case, a random value is chosen from a normal distribution with the width\n of the standard error of the regression model and added to the imputed\n value. If the parameter predictors is omitted, all variables other than\n the dependent are used as predictors. If the parameter dependent is\n omitted, the operation is performed on all columns that contain missing\n values.\n\n :param data: The data on which to perform the linear regression imputation.\n :type data: pandas.DataFrame\n :param dependent: The dependent variable in which the missing values\n should be imputed.\n :type dependent: String, optional\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like, optional\n :param regressions: If 'available': Impute missing values by modeling a\n regression based on all available predictors if some predictors have\n missing values themselves. If 'complete': Only impute with a\n regression model based on all predictors and leave missing values in\n rows in which some predictor value is missing itself unimputed.\n :type regressions: {'available', 'complete'}, default 'available'\n :param noise: Whether to add noise to the imputed values (stochastic\n regression imputation)\n :type noise: bool, default False\n :param inplace: If True, do operation inplace and return None.\n :type inplace: bool, default False\n :return: The dataframe with linear regression imputation performed for the\n incomplete variable(s) or None if inplace=True.\n :rtype: pandas.DataFrame or None\n :raises: TypeError, ValueError\n \"\"\"\n # Check if data is a dataframe:\n if not isinstance(data, pd.DataFrame):\n raise TypeError('The data has to be a DataFrame.')\n # Check if the dependent variable is actually a column of the dataframe:\n if dependent is not None and dependent not in data.columns:\n raise ValueError(\n '\\'' + dependent + '\\' is not a column of the data.')\n # Check if each of the predictor variables is actually a column of the\n # dataframe:\n if predictors is not None:\n for column in predictors:\n if column not in data.columns:\n raise ValueError(\n '\\'' + column + '\\' is not a column of the data.')\n # Assign value to do_available_regressions\n if regressions == 'available':\n do_available_regressions = True\n elif regressions == 'complete':\n do_available_regressions = False\n else:\n raise ValueError(regressions + 'could not be understood')\n # Assign a reference or copy to res, depending on inplace:\n if inplace:\n res = data\n else:\n res = data.copy()\n # If dependent is not set, apply the operation to each column that contains\n # missing data:\n if dependent is None:\n for column in data.columns:\n if data[column].isna().any():\n res.loc[:, :] = linear_regression_one_dependent(\n res, column, predictors, do_available_regressions,\n noise)\n # Otherwise apply the operation to the dependent column only:\n else:\n res.loc[:, :] = linear_regression_one_dependent(\n data, dependent, predictors, do_available_regressions, noise)\n # Return dataframe if the operation is not to be performed inplace:\n if not inplace:\n return res\n\n\ndef linear_regression_one_dependent(\n data, dependent, predictors, do_available_regressions, noise):\n \"\"\"Auxiliary function that performs linear regression imputation for the\n dependent column. The difference with linear_regression() is that in\n that function dependent can be None, in which case this function is\n called for each column containing missing values,\n\n :param data: The data on which to perform the linear regression imputation.\n :type data: pandas.DataFrame\n :param dependent: The dependent variable in which the missing values\n should be imputed.\n :type dependent: String\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like\n :param do_available_regressions: Whether to do regressions for all\n available predictor combinations or only on complete ones\n :type do_available_regressions: bool\n :param noise: Whether to add noise to the imputed values (stochastic\n regression imputation)\n :type noise: bool\n :return: The dataframe with linear regression imputation performed for the\n incomplete variable.\n :rtype: pandas.DataFrame\n \"\"\"\n # This auxiliary function always returns a copy:\n res = data.copy()\n # If predictors is None, all variables except for the dependent one are\n # considered predictors:\n if predictors is None:\n predictors = list(data.columns)\n predictors.remove(dependent)\n # Predictor combination sets and lists\n limited_predictors_combs = set()\n predictors_combs_done = []\n predictors_combs_todo = [tuple(predictors)]\n # Perform the operation:\n while len(predictors_combs_todo) > 0:\n # Select iteration predictors\n it_predictors = predictors_combs_todo.pop(0)\n # Log iteration beginning:\n logging.info('Applying regression imputation with predictors: ' + str(\n it_predictors))\n # Perform iteration:\n res.loc[:, :] = linear_regression_iter(\n res, dependent, list(it_predictors), noise,\n limited_predictors_combs)\n # Update predictor combinations done and to do\n predictors_combs_done.append(it_predictors)\n if do_available_regressions:\n predictors_combs_todo = list(\n set(limited_predictors_combs) - set(predictors_combs_done))\n # Log iteration end:\n logging.info('Predictor combinations done: ' + str(\n predictors_combs_done))\n logging.info('Predictor combinations to do: ' + str(\n predictors_combs_todo))\n return res\n\n\ndef linear_regression_iter(\n data, dependent, predictors, noise, limited_predictors_combs):\n \"\"\"Auxiliary function that performs (simple or multiple) linear\n regression imputation on the data, for the dependent column only. In rows\n that contain a missing value for any predictor variable, the value of the\n dependent variable does not get imputed. The operation is always\n performed on a copy of the data, which is returned.\n\n :param data: The data on which to perform the linear regression imputation.\n :type data: pandas.DataFrame\n :param dependent: The dependent variable in which the missing values\n should be imputed.\n :type dependent: String\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like\n :param noise: Whether to add noise to the imputed value (stochastic\n regression imputation)\n :type noise: bool\n :param limited_predictors_combs: Reference to the set which contains all\n limited predictor combinations that are necessary to use because\n some predictor had a missing value in some row.\n :type limited_predictors_combs: set\n :return: A copy of the dataframe with linear regression imputation\n performed for the incomplete variable.\n :rtype: pandas.DataFrame\n \"\"\"\n # Perform pairwise deletion before calculating the regression\n data_pairwise_deleted = data.copy()\n variables = predictors.copy()\n variables.append(dependent)\n data_pairwise_deleted.dropna(subset=variables, inplace=True)\n # Calculate the regression:\n x = data_pairwise_deleted[predictors]\n y = data_pairwise_deleted[dependent]\n model = linear_model.LinearRegression()\n model.fit(x, y)\n # Extract the regression parameters from the model\n intercept = model.intercept_\n coefs = model.coef_\n # Log regression equation:\n eq = str(dependent) + ' = ' + str(intercept)\n for idx, coef in enumerate(coefs):\n eq += ' + ' + str(coef) + '*' + predictors[idx]\n logging.info('Regression equation: ' + eq)\n # Calculate standard error:\n std_error = (model.predict(x) - y).std()\n logging.info('Standard error: ' + str(std_error))\n # Implementation using apply:\n return data.apply(\n lambda row: get_imputed_row(\n row, dependent, predictors, intercept, coefs, noise, std_error,\n limited_predictors_combs),\n axis=1, result_type='broadcast')\n\n\ndef get_imputed_row(\n row, dependent, predictors, intercept, coefs, noise, std_error,\n limited_predictors_combs):\n \"\"\"Auxiliary function that receives a row of a DataFrame and returns the\n same row. If the row contains a missing value for the dependent variable,\n it gets imputed according to the regression equation specified by\n predictors, intercept and coefs.\n\n :param row: The row for which the missing value should be imputed\n :type row: pandas.Series\n :param dependent: The dependent variable for which the row might contain a\n missing value\n :type dependent: String\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like\n :param intercept: The y-intercept of the regression equation.\n :type intercept: scalar\n :param coefs: The coefficients of the regression equation, in the same\n order as the predictors.\n :type coefs: array-like,\n :param noise: Whether to add noise to the imputed value (stochastic\n regression imputation)\n :type noise: bool\n :param std_error: The standard error of the regression model. Required\n if noise=True\n :type std_error: scalar\n :param limited_predictors_combs: Reference to the set which contains all\n limited predictor combinations that are necessary to use because\n some predictor had a missing value in some row.\n :type limited_predictors_combs: set\n :return: The row, with the missing value imputed if it contains one.\n :rtype: pandas.Series\n \"\"\"\n res = row.copy()\n if pd.isnull(res[dependent]):\n # Check whether there are predictors for which the value is NA\n na_predictors = tuple(\n row[predictors][row[predictors].isnull()].index.to_list())\n # If the row contains NA values for one or several predictors,\n # add the combination of predictors to na_predictor_combs, in order\n # to perform regression without them:\n if na_predictors != ():\n limited_predictors = tuple(set(predictors) - set(na_predictors))\n # Add the limited_predictors to the set only if the combination\n # isn't empty:\n if limited_predictors != ():\n limited_predictors_combs.add(limited_predictors)\n # If the row doesn't contain missing values for any predictor, impute:\n else:\n value = intercept\n for idx, coef in enumerate(coefs):\n value += coef * row[predictors[idx]]\n # If noise == True, add noise (stochastic regression imputation)\n if noise:\n value += std_error * np.random.randn()\n res[dependent] = value\n return res\n"
] | [
[
"numpy.random.randn",
"sklearn.linear_model.LinearRegression",
"pandas.isnull"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nmningmei/metacognition | [
"734082e247cc7fc9d277563e2676e10692617a3f",
"734082e247cc7fc9d277563e2676e10692617a3f",
"734082e247cc7fc9d277563e2676e10692617a3f",
"734082e247cc7fc9d277563e2676e10692617a3f",
"734082e247cc7fc9d277563e2676e10692617a3f"
] | [
"3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py",
"scripts/classifcation_pos_n_trials_back (cv counts).py",
"3 experiments_confidence/analysis e2.py",
"3 experiments_correctness/batch/e2 (experiment and chance scores) (ack).py",
"3 experiments_confidence/batch/e2 (experiment and chance scores) (sva).py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 16:07:58 2018\n\n@author: nmei\n\nin exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)\nBUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.\n\n\n\"\"\"\nif __name__ == '__main__':\n import os\n import pandas as pd\n import numpy as np\n import utils\n # define result saving directory\n dir_saving = 'results_e2'\n if not os.path.exists(dir_saving):\n os.mkdir(dir_saving)\n \n try:# the subject level processing\n df1 = pd.read_csv('e2.csv').iloc[:,1:]\n except: # when I test the script\n df1 = pd.read_csv('../e2.csv').iloc[:,1:]\n df = df1.copy()\n # select the columns that I need\n df = df[['blocks.thisN',\n 'trials.thisN',\n 'key_resp_2.keys',\n 'resp.corr',\n 'resp_mrating.keys',\n 'participant',]]\n # rename the columns\n df.columns = ['blocks',\n 'trials',\n 'awareness',\n 'correctness',\n 'confidence',\n 'participant',]\n # preallocate the data frame structure\n results = dict(sub = [],\n model = [],\n score = [],\n window = [],\n correctness = [],\n awareness = [],\n confidence = [],\n chance = [],\n )\n # use success, awareness, and confidence as features\n np.random.seed(12345)\n # use judgement features\n feature_names = [\n 'correctness',\n 'awareness',\n 'confidence',\n ]\n target_name = 'confidence'\n experiment = 'e2'\n # for some of the variables, we need to rescale them to a more preferable range like 0-1\n name_for_scale = ['awareness']\n # 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'\n # get one of the participants' data\n participant = 'cpj'\n df_sub = df[df['participant'] == participant]\n # pick 1- no experience, 2 brief glimpse for binary classification\n df_sub = df_sub[df_sub['awareness'] != 3]\n # for 1-back to 4-back\n for n_back in np.arange(1,5):\n # experiment score\n results = utils.classification(\n df_sub.dropna(), # take out nan rows\n feature_names, # feature columns\n target_name, # target column\n results, # the saving structure\n participant, # participant's name\n experiment, # experiment name\n window = n_back, # N-back\n chance = False, # it is NOT estimating the chance level but the empirical classification experiment\n name_for_scale = name_for_scale # scale some of the variables\n )\n # empirical chance level\n results = utils.classification(\n df_sub.dropna(),\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window = n_back,\n chance = True, # it is to estimate the empirical chance level\n name_for_scale = name_for_scale\n )\n results_to_save = pd.DataFrame(results)\n results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 15 16:02:16 2018\n\n@author: ning\n\"\"\"\n\nimport os\nworking_dir = ''\nimport pandas as pd\npd.options.mode.chained_assignment = None\nimport numpy as np\nfrom utils import (cv_counts)\nsaving_dir = '../results/cv_counts'\nif not os.path.exists(saving_dir):\n os.mkdir(saving_dir)\n\n# Exp 1\nfor participant in ['AC', 'CL', 'FW', 'HB', 'KK', 'LM', 'MC', 'MP1', 'MP2', 'NN', 'RP','SD', 'TJ', 'TS', 'WT']:\n\n experiment = 'pos'\n df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))\n df = df[df.columns[1:]]\n df.columns = ['participant',\n 'blocks',\n 'trials',\n 'firstgabor',\n 'success',\n 'tilted',\n 'correct',\n 'RT_correct',\n 'awareness',\n 'RT_awareness',\n 'confidence',\n 'RT_confidence']\n df_sub = df[df['participant'] == participant]\n # make sure all the attributes are either 0 or 1\n df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1\n df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1\n df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1\n \n ##################################################################\n np.random.seed(12345)\n # use all 6 possible features\n feature_names = [\n 'correct',\n 'awareness',\n 'confidence',\n 'RT_correct',\n 'RT_awareness',\n 'RT_confidence']\n target_name = 'success'\n results = dict(sub = [],\n window = [],\n fold = [],\n )\n for name in feature_names:\n results['{}_high_cond_{}_low'.format(target_name,name)] = []\n results['{}_high_cond_{}_high'.format(target_name,name)] = []\n \n for n_back in np.arange(1,5): # loop through the number of trials looking back\n # this is the part that is redundent and the code is long\n results = cv_counts(\n df_sub,\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window=n_back,\n \n ) \n temp = pd.DataFrame(results)\n temp.to_csv(os.path.join(saving_dir,'Pos_6_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv\n ################################################################################\n # use success, awareness, and confidence as features\n np.random.seed(12345)\n # use judgement features\n feature_names = [\n 'correct',\n 'awareness',\n 'confidence',]\n target_name = 'success'\n results = dict(sub = [],\n window = [],\n fold = [],\n )\n for name in feature_names:\n results['{}_high_cond_{}_low'.format(target_name,name)] = []\n results['{}_high_cond_{}_high'.format(target_name,name)] = []\n \n for n_back in np.arange(1,5): # loop through the number of trials looking back\n # this is the part that is redundent and the code is long\n results = cv_counts(\n df_sub,\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window=n_back,\n \n ) \n temp = pd.DataFrame(results)\n temp.to_csv(os.path.join(saving_dir,'Pos_3_1_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv\n ###############################################################################\n # use reactimes as features\n np.random.seed(12345)\n # use all 6 possible features\n feature_names = [\n 'RT_correct',\n 'RT_awareness',\n 'RT_confidence']\n target_name = 'success'\n results = dict(sub = [],\n window = [],\n fold = [],\n )\n for name in feature_names:\n results['{}_high_cond_{}_low'.format(target_name,name)] = []\n results['{}_high_cond_{}_high'.format(target_name,name)] = []\n \n for n_back in np.arange(1,5): # loop through the number of trials looking back\n # this is the part that is redundent and the code is long\n results = cv_counts(\n df_sub,\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window=n_back,\n \n ) \n temp = pd.DataFrame(results)\n temp.to_csv(os.path.join(saving_dir,'Pos_RT_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 16:07:58 2018\n\n@author: nmei\n\nin exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)\nBUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.\n\n\n\"\"\"\nif __name__ == '__main__':\n import os\n import pandas as pd\n import numpy as np\n import utils\n # define result saving directory\n dir_saving = 'results_e2'\n if not os.path.exists(dir_saving):\n os.mkdir(dir_saving)\n \n try:# the subject level processing\n df1 = pd.read_csv('e2.csv').iloc[:,1:]\n except: # when I test the script\n df1 = pd.read_csv('../e2.csv').iloc[:,1:]\n df = df1.copy()\n # select the columns that I need\n df = df[['blocks.thisN',\n 'trials.thisN',\n 'key_resp_2.keys',\n 'resp.corr',\n 'resp_mrating.keys',\n 'participant',]]\n # rename the columns\n df.columns = ['blocks',\n 'trials',\n 'awareness',\n 'correctness',\n 'confidence',\n 'participant',]\n # preallocate the data frame structure\n results = dict(sub = [],\n model = [],\n score = [],\n window = [],\n correctness = [],\n awareness = [],\n confidence = [],\n chance = [],\n )\n # use success, awareness, and confidence as features\n np.random.seed(12345)\n # use judgement features\n feature_names = [\n 'correctness',\n 'awareness',\n 'confidence',\n ]\n target_name = 'confidence'\n experiment = 'e2'\n # for some of the variables, we need to rescale them to a more preferable range like 0-1\n name_for_scale = ['awareness']\n # 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'\n # get one of the participants' data\n participant = 'ack'\n df_sub = df[df['participant'] == participant]\n # pick 1- no experience, 2 brief glimpse for binary classification\n df_sub = df_sub[df_sub['awareness'] != 3]\n # for 1-back to 4-back\n for n_back in np.arange(1,5):\n # experiment score\n results = utils.classification(\n df_sub.dropna(), # take out nan rows\n feature_names, # feature columns\n target_name, # target column\n results, # the saving structure\n participant, # participant's name\n experiment, # experiment name\n window = n_back, # N-back\n chance = False, # it is NOT estimating the chance level but the empirical classification experiment\n name_for_scale = name_for_scale # scale some of the variables\n )\n # empirical chance level\n results = utils.classification(\n df_sub.dropna(),\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window = n_back,\n chance = True, # it is to estimate the empirical chance level\n name_for_scale = name_for_scale\n )\n results_to_save = pd.DataFrame(results)\n results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 16:07:58 2018\n\n@author: nmei\n\nin exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)\nBUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.\n\n\n\"\"\"\nif __name__ == '__main__':\n import os\n import pandas as pd\n import numpy as np\n import utils\n # define result saving directory\n dir_saving = 'results_e2'\n if not os.path.exists(dir_saving):\n os.mkdir(dir_saving)\n \n try:# the subject level processing\n df1 = pd.read_csv('e2.csv').iloc[:,1:]\n except: # when I test the script\n df1 = pd.read_csv('../e2.csv').iloc[:,1:]\n df = df1.copy()\n # select the columns that I need\n df = df[['blocks.thisN',\n 'trials.thisN',\n 'key_resp_2.keys',\n 'resp.corr',\n 'resp_mrating.keys',\n 'participant',]]\n # rename the columns\n df.columns = ['blocks',\n 'trials',\n 'awareness',\n 'correctness',\n 'confidence',\n 'participant',]\n # preallocate the data frame structure\n results = dict(sub = [],\n model = [],\n score = [],\n window = [],\n correctness = [],\n awareness = [],\n confidence = [],\n chance = [],\n )\n # use success, awareness, and confidence as features\n np.random.seed(12345)\n # use judgement features\n feature_names = [\n 'correctness',\n 'awareness',\n 'confidence',\n ]\n target_name = 'correctness'\n experiment = 'e2'\n # for some of the variables, we need to rescale them to a more preferable range like 0-1\n name_for_scale = ['awareness']\n # 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'\n # get one of the participants' data\n participant = 'ack'\n df_sub = df[df['participant'] == participant]\n # pick 1- no experience, 2 brief glimpse for binary classification\n df_sub = df_sub[df_sub['awareness'] != 3]\n # for 1-back to 4-back\n for n_back in np.arange(1,5):\n # experiment score\n results = utils.classification(\n df_sub.dropna(), # take out nan rows\n feature_names, # feature columns\n target_name, # target column\n results, # the saving structure\n participant, # participant's name\n experiment, # experiment name\n window = n_back, # N-back\n chance = False, # it is NOT estimating the chance level but the empirical classification experiment\n name_for_scale = name_for_scale # scale some of the variables\n )\n # empirical chance level\n results = utils.classification(\n df_sub.dropna(),\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window = n_back,\n chance = True, # it is to estimate the empirical chance level\n name_for_scale = name_for_scale\n )\n results_to_save = pd.DataFrame(results)\n results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 16:07:58 2018\n\n@author: nmei\n\nin exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)\nBUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.\n\n\n\"\"\"\nif __name__ == '__main__':\n import os\n import pandas as pd\n import numpy as np\n import utils\n # define result saving directory\n dir_saving = 'results_e2'\n if not os.path.exists(dir_saving):\n os.mkdir(dir_saving)\n \n try:# the subject level processing\n df1 = pd.read_csv('e2.csv').iloc[:,1:]\n except: # when I test the script\n df1 = pd.read_csv('../e2.csv').iloc[:,1:]\n df = df1.copy()\n # select the columns that I need\n df = df[['blocks.thisN',\n 'trials.thisN',\n 'key_resp_2.keys',\n 'resp.corr',\n 'resp_mrating.keys',\n 'participant',]]\n # rename the columns\n df.columns = ['blocks',\n 'trials',\n 'awareness',\n 'correctness',\n 'confidence',\n 'participant',]\n # preallocate the data frame structure\n results = dict(sub = [],\n model = [],\n score = [],\n window = [],\n correctness = [],\n awareness = [],\n confidence = [],\n chance = [],\n )\n # use success, awareness, and confidence as features\n np.random.seed(12345)\n # use judgement features\n feature_names = [\n 'correctness',\n 'awareness',\n 'confidence',\n ]\n target_name = 'confidence'\n experiment = 'e2'\n # for some of the variables, we need to rescale them to a more preferable range like 0-1\n name_for_scale = ['awareness']\n # 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'\n # get one of the participants' data\n participant = 'sva'\n df_sub = df[df['participant'] == participant]\n # pick 1- no experience, 2 brief glimpse for binary classification\n df_sub = df_sub[df_sub['awareness'] != 3]\n # for 1-back to 4-back\n for n_back in np.arange(1,5):\n # experiment score\n results = utils.classification(\n df_sub.dropna(), # take out nan rows\n feature_names, # feature columns\n target_name, # target column\n results, # the saving structure\n participant, # participant's name\n experiment, # experiment name\n window = n_back, # N-back\n chance = False, # it is NOT estimating the chance level but the empirical classification experiment\n name_for_scale = name_for_scale # scale some of the variables\n )\n # empirical chance level\n results = utils.classification(\n df_sub.dropna(),\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window = n_back,\n chance = True, # it is to estimate the empirical chance level\n name_for_scale = name_for_scale\n )\n results_to_save = pd.DataFrame(results)\n results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.arange",
"pandas.read_csv",
"numpy.random.seed",
"pandas.DataFrame"
],
[
"numpy.arange",
"numpy.random.seed",
"pandas.DataFrame"
],
[
"numpy.arange",
"pandas.read_csv",
"numpy.random.seed",
"pandas.DataFrame"
],
[
"numpy.arange",
"pandas.read_csv",
"numpy.random.seed",
"pandas.DataFrame"
],
[
"numpy.arange",
"pandas.read_csv",
"numpy.random.seed",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
christianwbrock/algol-reduction | [
"5e85734d9e9e31985ead3ce40e67535418351010",
"5e85734d9e9e31985ead3ce40e67535418351010"
] | [
"reduction/test/plot_algol_h_alpha_line.py",
"reduction/scripts/generate_report.py"
] | [
"\nimport matplotlib.pyplot as plt\nfrom reduction.algol_h_alpha_line_model import AlgolHAlphaModel\n\n\nif __name__ == '__main__':\n\n AlgolHAlphaModel().plot(plt.axes())\n plt.show()\n",
"\"\"\"\\\nGenerate LaTeX report displaying spectra normalized around the H_alpha line.\n\"\"\"\n\nimport logging\nimport os\nimport os.path\nfrom argparse import ArgumentParser\nfrom collections import namedtuple, defaultdict\n\nimport numpy as np\nfrom astropy import constants as const\nfrom astropy.convolution import Box1DKernel\nfrom astropy.convolution import convolve\nfrom astropy.coordinates import EarthLocation\nfrom matplotlib import cm\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rcParams as plot_params\n\nfrom reduction.algol_h_alpha_line_model import AlgolHAlphaModel\nfrom reduction.commandline import poly_glob, filename_parser, verbose_parser, get_loglevel\nfrom reduction.constants import H_ALPHA\nfrom reduction.normalize import normalize\nfrom reduction.spectrum import Spectrum\nfrom reduction.stars.algol import Algol, algol_coordinate\nfrom reduction.utils.ranges import closed_range\n\nlogger = logging.getLogger(__name__)\n\nDiff = namedtuple('Diff', 'wavelength diff phase maxima')\n\n\ndef main():\n\n plot_params['figure.dpi'] = 150\n\n # TODO comment\n max_diff = 0.25\n\n # range around H_alpha in A to be ignored for spectrum normalization\n padding = 10.0\n\n disc_range = closed_range(H_ALPHA.value - padding, H_ALPHA.value + padding)\n continuum_ranges = closed_range(6520, 6610) & ~disc_range\n\n parser = ArgumentParser(parents=[filename_parser('spectrum'), verbose_parser],\n description='Generate LaTeX report displaying spectra normalized around the H_alpha line.')\n\n parser.add_argument('-o', '--output', type=str, default='output',\n help='output folder where TeX file and images are stored')\n parser.add_argument('-f', '--force', action='store_true',\n help='Use this option to remove an existing output folder.')\n parser.add_argument('--deg', type=int, default=3,\n help='Degree of the normalization polynomial (default: %(default)s)')\n parser.add_argument('--cmap', default='bwr',\n help='A valid matplotlib colormap name (default: %(default)s)')\n\n args = parser.parse_args()\n\n logging.basicConfig(level=get_loglevel(logger, args))\n\n os.makedirs(args.output, exist_ok=args.force)\n logger.info(\"write report to '%s'\", os.path.abspath(args.output))\n\n if args.cmap not in cm.datad.keys():\n logger.warning('Invalid colormap not in %s', cm.datad.keys())\n args.cmap = parser.get_default('cmap')\n\n observer_location = EarthLocation.from_geodetic(lon=15.0, lat=50.0)\n algol = Algol()\n\n tex_file = open(os.path.join(args.output, \"report.tex\"), \"w\")\n\n tex_file.write(\"\\\\documentclass{article}\\n\")\n tex_file.write(\"\\\\usepackage[utf8]{inputenc}\\n\")\n tex_file.write(\"\\\\usepackage{graphicx}\\n\")\n tex_file.write(\"\\\\usepackage{seqsplit}\\n\")\n tex_file.write(\"\\\\usepackage{longtable}\\n\")\n tex_file.write(\"\\\\usepackage[hidelinks]{hyperref}\\n\")\n tex_file.write(\"\\\\title{Project Algol\\\\\\\\Spectrum reduction}\\n\")\n tex_file.write(\"\\\\date{\\\\today}\\n\")\n tex_file.write(\"\\\\author{%s\\\\\\\\by Christian Brock}\\n\" % os.path.basename(__file__).replace('_', '\\\\_'))\n tex_file.write(\"\\\\begin{document}\\n\")\n tex_file.write(\"\\\\maketitle\\n\")\n tex_file.write(\"\\\\begin{verbatim}\\n\")\n\n for k,v in args.__dict__.items():\n tex_file.write(\"--%s %s\\n\" % (k, v))\n\n tex_file.write(\"\\\\end{verbatim}\\n\")\n tex_file.write(\"\\\\tableofcontents\\n\")\n\n diff_image_name = \"diff_by_phase.png\"\n diff_image_wm_name = \"diff_by_phase_with_maxima.png\"\n sorted_diff_image_name = \"diff_sorted_phase.png\"\n sorted_diff_image_wm_name = \"diff_sorted_phase_with_maxima.png\"\n snr_by_observer_name = \"snr_by_observer.png\"\n\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\section{Final Result}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % diff_image_name)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % diff_image_wm_name)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % sorted_diff_image_name)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % sorted_diff_image_wm_name)\n tex_file.write(\"\\n\")\n\n # list of Diffs\n diffs_by_phase = []\n snr_by_observer = defaultdict(list)\n\n filenames = poly_glob(args.filenames)\n\n # pass #1 loads all spectra found in the command line arguments\n spectra = []\n for n, filename in enumerate(filenames, start=1):\n\n logger.info(\"pass1 %d/%d: %s\", n, len(filenames), filename)\n\n for spectrum in Spectrum.load(filename, slice(None)):\n\n obs_time = spectrum.obs_date\n if not obs_time:\n logger.error(\"%s has no observation date\", spectrum.filename)\n continue\n\n spectra.append(spectrum)\n\n prev_observer = None\n prev_day = None\n\n # pass #2\n # group all spectra by observer and date\n for n, spectrum in enumerate(sorted(spectra, key=lambda sp: (sp.observer, sp.obs_date)), start=1):\n\n logger.info(\"pass2 %d/%d: %s\", n, len(spectra), spectrum.short_name)\n\n if spectrum.observer != prev_observer:\n tex_file.write(\"\\section{%s}\\n\\n\" % spectrum.observer)\n prev_observer = spectrum.observer\n prev_day = None\n\n obs_day = spectrum.obs_date.iso[:10]\n if obs_day != prev_day:\n tex_file.write(\"\\subsection{%s}\\n\\n\" % obs_day)\n prev_day = obs_day\n\n xs = spectrum.xs\n ys = spectrum.ys\n\n # cut first and last 15 values which may contain invalid (zero) values\n xs = xs[15:-15]\n ys = ys[15:-15]\n\n # normalize the maximum value to 1\n ys = ys / ys.max()\n\n obs_time = spectrum.obs_date\n res = spectrum.resolution\n\n # compute obs_time at solar system center\n light_travel_time = obs_time.light_travel_time(algol_coordinate, location=observer_location)\n obs_time += light_travel_time\n\n algol_rv_a = algol.rv_A(obs_time)\n radial_velocity_correction = algol_coordinate.radial_velocity_correction(obstime=obs_time,\n location=observer_location)\n rv_predicted_a = algol_rv_a - radial_velocity_correction\n phase = algol.AB.phase(obs_time)\n\n def as_redshift(radial_velocity):\n return H_ALPHA * (radial_velocity / const.c).to(1)\n\n redshift_predicted_a = as_redshift(rv_predicted_a)\n\n # 2.354 is the scale between sigma and FWHM of a gaussian\n sigma = H_ALPHA / (res or 15000) / 2.354\n\n model_algol_a = AlgolHAlphaModel(redshift=redshift_predicted_a, sigma=sigma)\n model_algol_a.scale.fixed = True\n model_algol_a.redshift.fixed = True\n model_algol_a.sigma.fixed = True\n\n # TODO: calculate algol spectrum from the single spectra of components A, B and C\n # model_algol_b = AlgolHAlphaModel(redshift=redshift_predicted_b, sigma=sigma)\n # model_algol_c = AlgolHAlphaModel(redshift=redshift_predicted_c, sigma=sigma)\n\n # part_a, part_b, part_c = (1, 0, 0)\n # model_algol = part_a * model_algol_a + part_b * model_algol_b + part_c * model_algol_c\n\n normalization = normalize(xs, ys, ref_ys=model_algol_a(xs), degree_or_range=args.deg,\n continuum_ranges=continuum_ranges)\n\n normalized = normalization.norm\n snr = normalization.snr\n normalization.plot(plt.figure().add_subplot(111))\n\n image_normalized = \"%05d_norm1.png\" % n\n plt.title(\"Normalization: %s\" % model_algol_a)\n plt.savefig(os.path.join(args.output, image_normalized))\n plt.close()\n\n image_diff = \"%05d_diff.png\" % n\n\n xlim = np.asarray(model_algol_a.get_xlimits())\n xlim[0] = max(xlim[0], continuum_ranges.lower_bound())\n xlim[1] = min(xlim[1], continuum_ranges.upper_bound())\n\n # compute difference spectrum between the normalized observed and the reference spectrum\n # This is assumed to be the spectrum of the circum stellar disc\n diff_xs = xs - model_algol_a.redshift\n diff_ys = normalized - model_algol_a(xs)\n\n diff_mask = [x in disc_range for x in diff_xs]\n\n diff_xs = diff_xs[diff_mask]\n diff_ys = diff_ys[diff_mask]\n\n maxima = _find_maxima(diff_xs, diff_ys, H_ALPHA.value)\n\n diffs_by_phase.append(Diff(diff_xs, diff_ys, phase, maxima))\n if spectrum.resolution:\n snr_by_observer[spectrum.observer].append([spectrum.resolution, snr])\n\n create_diff_plot(model_algol_a, model_algol_a, normalized, maxima, spectrum.short_name, xlim, xs, ys,\n os.path.join(args.output, image_diff))\n\n def display(q, format_string):\n return ((format_string + \" %s\") % (q.value, q.unit)).replace('Angstrom', r'\\AA')\n\n def display_rv(rv):\n return r\"%.1f km/s, %.2f \\AA\" % (rv.to('km/s').value, as_redshift(rv).to('AA').value)\n\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\begin{center}\\n\")\n tex_file.write(\"\\\\begin{tabular}{|l|l|}\\n\")\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Observer & %s \\\\\\\\\\n\" % spectrum.observer.replace('_', '\\\\_'))\n tex_file.write(\"Filename & \\\\seqsplit{%s} \\\\\\\\\\n\" % spectrum.short_name.replace('_', '\\\\_'))\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Resolution $\\\\delta\\\\lambda/\\\\lambda$ & %s \\\\\\\\\\n\" % spectrum.resolution)\n tex_file.write(\"Sigma & %s \\\\\\\\\\n\" % display(sigma.to('AA'), \"%.2f\"))\n tex_file.write(\"SNR & %.0f \\\\\\\\\\n\" % snr)\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Observation date $(UTC)$ & %s \\\\\\\\\\n\" % spectrum.obs_date.iso)\n tex_file.write(\"Light travel time& %s \\\\\\\\\\n\" % display(light_travel_time.to('min'), \"%.1f\"))\n tex_file.write(\"Phase & $%.2f$ \\\\\\\\\\n\" % phase)\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Algol radial velocity & %s \\\\\\\\\\n\" % display_rv(algol_rv_a))\n tex_file.write(\"Barycentric correction & %s \\\\\\\\\\n\" % display_rv(radial_velocity_correction))\n tex_file.write(\"Final radial velocity& %s \\\\\\\\\\n\" % display_rv(rv_predicted_a))\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Redshift, form data & %s \\\\\\\\\\n\" % display(redshift_predicted_a.to('AA'), \"%.2f\"))\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"\\\\end{tabular}\\n\")\n tex_file.write(\"\\\\end{center}\\n\")\n\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % image_diff)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % image_normalized)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\pagebreak\\n\")\n tex_file.write(\"\\n\")\n\n # end pass #2 spectra\n\n diffs_by_phase = sorted(diffs_by_phase, key=lambda diff: diff.phase)\n\n # TODO what is vmin, vmax?\n vmin = max(-max_diff, np.min([np.nanmin(diff.diff) for diff in diffs_by_phase]))\n vmax = min(+max_diff, np.max([np.nanmax(diff.diff) for diff in diffs_by_phase]))\n\n plot_diff(args.cmap, args.output, diff_image_name, diffs_by_phase, disc_range, vmin, vmax, False)\n plot_diff(args.cmap, args.output, diff_image_wm_name, diffs_by_phase, disc_range, vmin, vmax, True)\n\n plot_sorted_diff(args.cmap, args.output, sorted_diff_image_name, diffs_by_phase, disc_range, vmin, vmax, False)\n plot_sorted_diff(args.cmap, args.output, sorted_diff_image_wm_name, diffs_by_phase, disc_range, vmin, vmax, True)\n\n plot_snr_by_observer(args.output, snr_by_observer_name, snr_by_observer)\n\n\n tex_file.write(\"\\\\appendix\\n\")\n tex_file.write(\"\\\\section{SNRs and Resolutions}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % snr_by_observer_name)\n tex_file.write(\"\\n\")\n\n # generate a txt file containing the maxima around H_alpha assumed to be hot-spots\n # the content is also written as table to the tex file\n max_file = open(os.path.join(args.output, \"maxima.dat\"), \"w\")\n max_file.write(\"#phase,w1,v1,y1,w2,v2,y2\\n\")\n tex_file.write(\"\\\\section{maxima of differences}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"The raw date is stored in {\\\\tt %s}\\n\" % \"maxima.dat\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\begin{longtable}{|l|lll|lll|}\\n\")\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"phase & $\\AA$ & $km/s$ & y & $\\AA$ & $km/s$ & y \\\\\\\\\\n\")\n tex_file.write(\"\\\\hline\\n\")\n\n for diff in diffs_by_phase:\n\n if len(diff.maxima) == 2:\n x1, y1 = diff.maxima[0]\n x2, y2 = diff.maxima[1]\n elif len(diff.maxima) == 1:\n x, y = diff.maxima[0]\n if x < H_ALPHA.value:\n x1, y1 = x, y\n x2, y2 = None, None\n else:\n x1, y1 = None, None\n x2, y2 = x, y\n else: # happens if both maxima are at the border\n continue\n\n v1 = ((x1 - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value if x1 else None\n v2 = ((x2 - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value if x2 else None\n\n def _(value, fmt):\n return fmt % value if value else ''\n\n tex_file.write(\"%.5f & %s & %s & %s & %s & %s & %s\\\\\\\\\\n\" %\n (diff.phase, _(x1, '%.1f'), _(v1, '%.0f'), _(y1, '%.3f'),\n _(x2, '%.1f'), _(v2, '%.0f'), _(y2, '%.3f')))\n\n max_file.write(\"%.5f,%s,%s,%s,%s,%s,%s\\n\" %\n (diff.phase, _(x1, '%.1f'), _(v1, '%.0f'), _(y1, '%.3f'),\n _(x2, '%.1f'), _(v2, '%.0f'), _(y2, '%.3f')))\n\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"\\\\end{longtable}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\section{spectra by phase}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\begin{longtable}{|l|l|l|l|}\\n\")\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"phase & observer & date & filename \\\\\\\\\\n\")\n tex_file.write(\"\\\\hline\\n\")\n\n for spectrum in sorted(spectra, key=lambda sp: algol.AB.phase(sp.obs_date)):\n tex_file.write(\"%.5f & %s & %s & \\\\seqsplit{%s}\\\\\\\\\\n\" %\n (algol.AB.phase(spectrum.obs_date), spectrum.observer.replace('_', '\\\\_'),\n spectrum.obs_date.iso[:10], spectrum.short_name.replace('_', '\\\\_')))\n\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"\\\\end{longtable}\\n\")\n tex_file.write(\"\\\\end{document}\\n\")\n\n max_file.close()\n tex_file.close()\n\n\ndef plot_sorted_diff(args_cmap, args_output, sorted_diff_image_name, diffs_by_phase, disc_range, vmin, vmax, plot_maxima):\n # create the trailed spectrum *sorted* by phase plot\n fig = plt.figure(figsize=[6.4, 4.8 * 2])\n plot = fig.add_subplot(111)\n plot.set_xlim(disc_range.lower_bound(), disc_range.upper_bound())\n plot.set_ylabel('Spectra sorted by phase')\n plot.set_xlabel('Wavelength ($\\AA$)')\n sc = None\n\n left_xs = []\n left_ys = []\n right_xs = []\n right_ys = []\n\n for i, diff in enumerate(diffs_by_phase):\n assert len(diff.wavelength) == len(diff.diff)\n\n ys = 1.0 * i * np.ones(len(diff.wavelength))\n sc = plot.scatter(diff.wavelength, ys, s=1, c=diff.diff, cmap=args_cmap, vmin=min(vmin, -vmax),\n vmax=max(vmax, -vmin))\n\n if 0.15 <= diff.phase <= 0.85:\n for x, y in diff.maxima:\n if x < H_ALPHA.value:\n left_xs.append(x)\n left_ys.append(i)\n else:\n right_xs.append(x)\n right_ys.append(i)\n\n plot.vlines(H_ALPHA.value, *plot.get_ylim())\n\n if plot_maxima:\n plot.plot(left_xs, left_ys, 'k')\n plot.plot(right_xs, right_ys, 'k')\n\n ax2 = plot.twiny()\n ax2.set_xlim(((np.asarray(plot.get_xlim()) - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value)\n ax2.set_xlabel('Radial velocity ($km/s$)')\n\n fig.colorbar(sc)\n plt.savefig(os.path.join(args_output, sorted_diff_image_name))\n plt.close()\n\n\ndef plot_snr_by_observer(args_output, filename, snr_by_observer):\n\n assert isinstance(filename, str)\n assert isinstance(snr_by_observer, dict)\n\n fig = plt.figure()\n plot = fig.add_subplot(111)\n plot.set_xlabel('Resolution $\\lambda / \\delta \\lambda$')\n plot.set_ylabel('SNR')\n\n for observer, resolutions_and_snrs in sorted(snr_by_observer.items()):\n resolutions = [i[0] for i in resolutions_and_snrs]\n snrs = [i[1] for i in resolutions_and_snrs]\n plot.scatter(resolutions, snrs, label=observer)\n\n plot.legend()\n plt.savefig(os.path.join(args_output, filename))\n plt.close(os.path.join(args_output, filename))\n\n\ndef plot_diff(args_cmap, args_output, diff_image_name, diffs_by_phase, disc_range, vmin, vmax, plot_maxima):\n \"\"\"\n Create the trailed spectrum by phase plot\n \"\"\"\n fig = plt.figure(figsize=[6.4, 4.8 * 2])\n plot = fig.add_subplot(111)\n plot.set_ylim(-0.5, 1.5)\n plot.set_xlim(disc_range.lower_bound(), disc_range.upper_bound())\n plot.set_ylabel('Phase')\n plot.set_xlabel('Wavelength ($\\AA$)')\n for diff in diffs_by_phase:\n\n assert len(diff.wavelength) == len(diff.diff)\n\n for offset in [-1, 0, 1]:\n ys = (diff.phase + offset) * np.ones(len(diff.wavelength))\n sc = plot.scatter(diff.wavelength, ys, s=1, c=diff.diff, cmap=args_cmap, vmin=min(vmin, -vmax),\n vmax=max(vmax, -vmin))\n plot.vlines(H_ALPHA.value, *plot.get_ylim())\n\n if plot_maxima:\n left_xs = []\n left_ys = []\n right_xs = []\n right_ys = []\n\n for diff in diffs_by_phase:\n\n if 0.15 <= diff.phase <= 0.85:\n for x, y in diff.maxima:\n if x < H_ALPHA.value:\n left_ys.append(diff.phase)\n left_xs.append(x)\n else:\n right_xs.append(x)\n right_ys.append(diff.phase)\n plot.plot(left_xs, left_ys, 'k')\n plot.plot(right_xs, right_ys, 'k')\n\n ax2 = plot.twiny()\n ax2.set_xlim(((np.asarray(plot.get_xlim()) - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value)\n ax2.set_xlabel('Radial velocity ($km/s$)')\n\n fig.colorbar(sc)\n\n plt.savefig(os.path.join(args_output, diff_image_name))\n plt.close()\n return sc\n\n\ndef create_diff_plot(final_model, initial_model, normalized, maxima, title, xlim, xs, ys, image_path):\n\n redshift = final_model.redshift\n\n plot = plt.figure().add_subplot(111)\n plot.set_ylim(-0.5, 1.5)\n plot.set_xlim(xlim)\n\n plot.plot(xs, 0.6 * ys, label='measured')\n plot.plot(xs, normalized, label='normalized')\n\n plot.plot(xs, initial_model(xs), label='predicted %s' % initial_model)\n if final_model is not initial_model:\n plot.plot(xs, final_model(xs), label='fitted %s' % final_model)\n\n plot.plot(xs, normalized - final_model(xs), label='normalized - fitted')\n\n if maxima:\n for x, y in maxima:\n plot.vlines(x + redshift, ymin=0, ymax=y, label='maxima')\n\n plot.hlines(0, xlim[0], xlim[1])\n plot.vlines(H_ALPHA.value + redshift, *plot.get_ylim())\n plot.set_title(title)\n plot.legend(loc='upper right')\n\n plt.savefig(image_path)\n plt.close()\n\n\ndef _find_maxima(xs, ys, center):\n \"\"\"\\\n Find maxima of ys below and above the center wave length, i.e. H_alpha\n \"\"\"\n\n result = []\n\n for r in [closed_range(np.min(xs), center), closed_range(center, np.max(xs))]:\n\n mask = [x in r for x in xs]\n\n ys_in_r = ys[mask]\n arg = np.argmax(ys_in_r)\n\n if arg == 0 or arg + 1 == len(ys_in_r):\n logger.debug('ignore maximum in %s at %s bound', r, 'lower' if arg == 0 else 'upper')\n continue\n\n y = ys_in_r[arg]\n x = xs[mask][arg]\n\n logger.debug('maximum in %s at x=%.1f, y=%.2f', r, x, y)\n result.append((x, y))\n\n return tuple(result)\n\n\ndef _find_minimum(xs, ys, dx, range_AA, box_size_AA):\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n\n width = int(np.ceil(box_size_AA / dx))\n\n kernel = Box1DKernel(width)\n\n ys = convolve(ys, kernel=kernel, boundary=None)\n\n assert len(xs) == len(ys)\n\n # remove convolution boundaries\n clip = kernel.array.size // 2\n xs = xs[clip:-clip]\n ys = ys[clip:-clip]\n\n mask = [H_ALPHA.value - range_AA <= x <= H_ALPHA.value + range_AA for x in xs]\n xs = xs[mask]\n ys = ys[mask]\n\n i = np.argmin(ys)\n return xs[i]\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.axes",
"matplotlib.pyplot.show"
],
[
"numpy.nanmax",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.asarray",
"numpy.nanmin",
"matplotlib.pyplot.savefig",
"numpy.ceil",
"numpy.max",
"numpy.argmax",
"numpy.argmin",
"matplotlib.pyplot.close",
"matplotlib.cm.datad.keys",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lidongyv/Explicit-Context-Mapping-for-Stereo-Matching | [
"9b2e63982daf5629045de0bf0694d8ccb111b2f1",
"9b2e63982daf5629045de0bf0694d8ccb111b2f1",
"9b2e63982daf5629045de0bf0694d8ccb111b2f1",
"9b2e63982daf5629045de0bf0694d8ccb111b2f1"
] | [
"cmf/models/cmfsm.py",
"back of code/CMF/cmf/models/rstereo-20181014164030.py",
"back of code/CMF/cmf/models/rstereo-20181014164536.py",
"cmf/loader/KITTI.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-07-17 10:44:43\n# @Last Modified by: yulidong\n# @Last Modified time: 2019-03-01 14:12:35\n# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-20 18:01:52\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-07-16 22:16:14\nimport time\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport math\nfrom math import ceil\nfrom torch.autograd import Variable\nfrom torch.nn.functional import cosine_similarity as cosine_s\nfrom cmf import caffe_pb2\nfrom cmf.models.utils import *\nrsn_specs = {\n 'scene': \n {\n 'n_classes': 9,\n 'input_size': (540, 960),\n 'block_config': [3, 4, 23, 3],\n },\n\n}\n\ngroup_dim=32\npramid_dim=8\ngroup_norm_group_num = 32\n\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):\n\n return nn.Sequential(\n nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=kernel_size,\n stride=stride,\n padding=dilation if dilation > 1 else pad,\n dilation=dilation,\n bias=False), nn.GroupNorm(group_norm_group_num, out_planes))\n\n\ndef convbn_3d(in_planes, out_planes, kernel_size, stride, pad):\n\n return nn.Sequential(\n nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=kernel_size,\n padding=pad,\n stride=stride,\n bias=False), nn.GroupNorm(group_norm_group_num, out_planes))\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride, downsample, pad, dilation):\n super(BasicBlock, self).__init__()\n\n self.conv1 = nn.Sequential(\n convbn(inplanes, planes, 3, stride, pad, dilation),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n\n if self.downsample is not None:\n x = self.downsample(x)\n\n out += x\n\n return out\n\n\nclass matchshifted(nn.Module):\n def __init__(self):\n super(matchshifted, self).__init__()\n\n def forward(self, left, right, shift):\n batch, filters, height, width = left.size()\n shifted_left = F.pad(\n torch.index_select(\n left, 3,\n Variable(torch.LongTensor(\n [i for i in range(shift, width)])).cuda()),\n (shift, 0, 0, 0))\n shifted_right = F.pad(\n torch.index_select(\n right, 3,\n Variable(torch.LongTensor(\n [i for i in range(width - shift)])).cuda()),\n (shift, 0, 0, 0))\n out = torch.cat((shifted_left, shifted_right), 1).view(\n batch, filters * 2, 1, height, width)\n return out\n\n\nclass disparityregression(nn.Module):\n def __init__(self, maxdisp):\n super().__init__()\n self.disp = Variable(\n torch.Tensor(\n np.reshape(np.array(range(maxdisp)),\n [1, maxdisp, 1, 1])).cuda(),\n requires_grad=False)\n\n def forward(self, x):\n disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])\n out = torch.sum(x * disp, 1)\n return out\n\n\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.firstconv = nn.Sequential(\n convbn(3, 32, 3, 1, 1, 1),\n # nn.GroupNorm(group_dim, 32),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False))\n self.secondconv = nn.Sequential(\n nn.GroupNorm(group_dim, 32),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 2, 1, 1),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)\n self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)\n self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)\n self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)\n\n self.branch1 = nn.Sequential(\n nn.AvgPool2d((64, 64), stride=(64, 64)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(\n nn.AvgPool2d((32, 32), stride=(32, 32)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(\n nn.AvgPool2d((16, 16), stride=(16, 16)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(\n nn.AvgPool2d((8, 8), stride=(8, 8)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.lastconv = nn.Sequential(\n convbn(320, 128, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False),\n nn.GroupNorm(group_norm_group_num, planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(self.inplanes, planes, stride, downsample, pad, dilation))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, 1, None, pad, dilation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output_all = self.firstconv(x)\n output=self.secondconv(output_all)\n output_rt = self.layer1(output)\n output_raw = self.layer2(output_rt)\n output = self.layer3(output_raw)\n output_skip = self.layer4(output)\n\n output_branch1 = self.branch1(output_skip)\n output_branch1 = F.interpolate(\n output_branch1, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_branch2 = self.branch2(output_skip)\n output_branch2 = F.interpolate(\n output_branch2, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_branch3 = self.branch3(output_skip)\n output_branch3 = F.interpolate(\n output_branch3, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_branch4 = self.branch4(output_skip)\n output_branch4 = F.interpolate(\n output_branch4, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_feature = torch.cat(\n (output_raw, output_skip, output_branch4, output_branch3,\n output_branch2, output_branch1), 1)\n output_feature = self.lastconv(output_feature)\n\n return output_feature, output_rt,output_all\n\n\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes):\n super().__init__()\n\n self.conv1 = nn.Sequential(\n convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(\n inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)\n\n self.conv3 = nn.Sequential(\n convbn_3d(\n inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(\n convbn_3d(\n inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose3d(\n inplanes * 2,\n inplanes * 2,\n kernel_size=3,\n padding=1,\n output_padding=1,\n stride=2,\n bias=False), nn.GroupNorm(group_norm_group_num,\n inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose3d(\n inplanes * 2,\n inplanes,\n kernel_size=3,\n padding=1,\n output_padding=(1,1,1),\n stride=2,\n bias=False), nn.GroupNorm(group_norm_group_num,\n inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(\n self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\nclass similarity_measure1(nn.Module):\n def __init__(self):\n super(similarity_measure1, self).__init__()\n self.inplanes = 32\n self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu0 = nn.LeakyReLU(inplace=True) \n self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1) \n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n #self.relu3 = nn.Sigmoid()\n # self.conv4 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n # self.relu4 = nn.LeakyReLU(inplace=True)\n # self.conv5 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n # self.relu5 = nn.ReLU(inplace=True)\n #self.s1=nn.Parameter(torch.ones(1)).float()*0.5\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n\n output = self.conv0(x)\n output = self.relu0(output)\n output = self.conv1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n #output = self.relu3(output)\n # output = self.conv4(output)\n # output = self.relu4(output)\n # output = self.conv5(output)\n # #output = torch.abs(output)\n # output = self.relu5(output)\n\n # print(output.shape)\n # print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n\n # output = output/torch.max(output)\n # output = output-torch.min(output)\n # output = 1-output\n # output = torch.exp(-output)\n #print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n return output\nclass similarity_measure2(nn.Module):\n def __init__(self):\n super(similarity_measure2, self).__init__()\n self.inplanes = 32\n self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu0 = nn.LeakyReLU(inplace=True) \n self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1) \n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True) \n #self.s2=nn.Parameter(torch.ones(1)).float()*0.5\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n\n output = self.conv0(x)\n output = self.relu0(output)\n output = self.conv1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n return output\n\n\ndef matrix_generation():\n scale=4\n x=torch.arange(-scale//2,scale//2+1).float()\n x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)\n distance_matrix=x.expand(scale,scale).unsqueeze(0)\n\n distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)\n distance_matrix=distance_matrix.unsqueeze(0)\n distance_matrix1=distance_matrix+0\n distance_matrix2=distance_matrix+0\n distance_matrix3=distance_matrix+0\n distance_matrix4=distance_matrix+0\n distance_matrix5=distance_matrix+0\n distance_matrix6=distance_matrix+0\n distance_matrix7=distance_matrix+0\n distance_matrix8=distance_matrix+0\n x=torch.arange(1,scale+1).float()\n x=x.expand(scale,scale).unsqueeze(0)\n #x=x.repeat(hr_feature.shape[0],hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()\n distance_matrix1[:,0,:,:]=scale-x+1\n distance_matrix2[:,0,:,:]=x\n distance_matrix5[:,0,:,:]=distance_matrix2[:,0,:,:]\n distance_matrix6[:,0,:,:]=distance_matrix1[:,0,:,:]\n distance_matrix7[:,0,:,:]=distance_matrix2[:,0,:,:]\n distance_matrix8[:,0,:,:]=distance_matrix1[:,0,:,:]\n x=torch.arange(1,scale+1).float()\n x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)\n\n distance_matrix3[:,1,:,:]=(scale-x+1)\n distance_matrix4[:,1,:,:]=x\n distance_matrix5[:,1,:,:]=distance_matrix3[:,1,:,:]\n distance_matrix6[:,1,:,:]=distance_matrix3[:,1,:,:]\n distance_matrix7[:,1,:,:]=distance_matrix4[:,1,:,:]\n distance_matrix8[:,1,:,:]=distance_matrix4[:,1,:,:]\n # print(distance_matrix3)\n \n return distance_matrix.cuda(),distance_matrix1.cuda(),distance_matrix2.cuda(),distance_matrix3.cuda(),distance_matrix4.cuda(), \\\n distance_matrix5.cuda(),distance_matrix6.cuda(),distance_matrix7.cuda(),distance_matrix8.cuda()\n\n\nclass eight_related_context_mapping(nn.Module):\n def __init__(self):\n super(eight_related_context_mapping,self).__init__()\n self.similarity1=similarity_measure1()\n #need to remove\n #self.similarity2=similarity_measure2()\n # self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1),nn.LeakyReLU(inplace=True))\n #self.fuse.weight.data.fill_(1)\n self.sigmoid=nn.Sigmoid()\n self.distance_matrix,self.distance_matrix1,self.distance_matrix2,self.distance_matrix3,self.distance_matrix4, \\\n self.distance_matrix5,self.distance_matrix6,self.distance_matrix7,self.distance_matrix8=matrix_generation()\n def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):\n \n #self.fuse.weight.data=torch.abs(self.fuse.weight.data)\n with torch.no_grad():\n scale=hr_feature.shape[-1]//lr_feature.shape[-1]\n if scale%2!=0:\n exit()\n\n padding1=hr_feature[:,:1,:,:scale]*0-100\n padding2=hr_feature[:,:1,:scale,:]*0-100\n\n distance_matrix=self.distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix1=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix2=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix3=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix4=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix5=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix6=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix7=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix8=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n #center\n #reference image\n lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \\\n .contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \\\n .unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \\\n .contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)\n\n representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)\n weight=self.similarity1(representation)\n\n #target image\n # lr_feature_r=lr_feature_r.unsqueeze(-1).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3],scale) \\\n # .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3]*scale) \\\n # .unsqueeze(-2).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],scale,lr_feature_r.shape[3]*scale) \\\n # .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2]*scale,lr_feature_r.shape[3]*scale)\n\n # representation_target=torch.cat([lr_feature_r,hr_feature_r,distance_matrix],1)\n # weight_target=self.similarity1(representation_target)\n\n #left\n #reference\n representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix1[:,:,:,:-scale]],1)\n weight_l=self.similarity1(representation_l)\n weight_l=torch.cat([padding1,weight_l],-1)\n #target\n # representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)\n # weight_l_target=self.similarity1(representation_l_target)\n # weight_l_target=torch.cat([padding1,weight_l_target],-1)\n #right\n #reference\n representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix2[:,:,:,scale:]],1)\n weight_r=self.similarity1(representation_r)\n weight_r=torch.cat([weight_r,padding1],-1)\n\n #target image\n # representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)\n # weight_r_target=self.similarity1(representation_r_target)\n # weight_r_target=torch.cat([weight_r_target,padding1],-1)\n #top\n #reference\n representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)\n weight_t=self.similarity1(representation_t)\n weight_t=torch.cat([padding2,weight_t],-2)\n #target\n # representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)\n # weight_t_target=self.similarity1(representation_t_target)\n # weight_t_target=torch.cat([padding2,weight_t_target],-2)\n #bottom\n #reference\n representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)\n weight_b=self.similarity1(representation_b)\n weight_b=torch.cat([weight_b,padding2],-2)\n\n #left-top\n #reference\n representation_lt=torch.cat([lr_feature[:,:,:-scale,:-scale],hr_feature[:,:,scale:,scale:],distance_matrix5[:,:,:-scale,:-scale]],1)\n weight_lt=self.similarity1(representation_lt)\n weight_lt=torch.cat([padding2,torch.cat([padding1[...,scale:,:],weight_lt],-1)],-2)\n #target\n # representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)\n # weight_l_target=self.similarity1(representation_l_target)\n # weight_l_target=torch.cat([padding1,weight_l_target],-1)\n #right-top\n #reference\n representation_rt=torch.cat([lr_feature[:,:,:-scale,scale:],hr_feature[:,:,scale:,:-scale],distance_matrix6[:,:,:-scale,scale:]],1)\n weight_rt=self.similarity1(representation_rt)\n weight_rt=torch.cat([padding2,torch.cat([weight_rt,padding1[...,scale:,:]],-1)],-2)\n\n #target image\n # representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)\n # weight_r_target=self.similarity1(representation_r_target)\n # weight_r_target=torch.cat([weight_r_target,padding1],-1)\n #left-bottom\n #reference\n representation_lb=torch.cat([lr_feature[:,:,scale:,:-scale],hr_feature[:,:,:-scale:,scale:],distance_matrix7[:,:,scale:,:-scale]],1)\n weight_lb=self.similarity1(representation_lb)\n weight_lb=torch.cat([torch.cat([padding1[...,scale:,:],weight_lb],-1),padding2],-2)\n #target\n # representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)\n # weight_t_target=self.similarity1(representation_t_target)\n # weight_t_target=torch.cat([padding2,weight_t_target],-2)\n #right-bottom\n #reference\n representation_rb=torch.cat([lr_feature[:,:,scale:,scale:],hr_feature[:,:,:-scale,:-scale],distance_matrix8[:,:,scale:,scale:]],1)\n weight_rb=self.similarity1(representation_rb)\n weight_rb=torch.cat([torch.cat([weight_rb,padding1[...,:-scale,:]],-1),padding2],-2)\n\n\n weight_all=torch.cat([weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb],dim=1)\n weight_norm=F.softmax(weight_all, dim=1)\n #weight_fuse=F.softmax(weight_norm*weight_all)\n #target\n # representation_b_target=torch.cat([lr_feature_r[:,:,scale:,:],hr_feature_r[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)\n # weight_b_target=self.similarity1(representation_b_target)\n # weight_b_target=torch.cat([weight_b_target,padding2],-2)\n\n # weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)\n # weight_norm=F.softmax(weight_all, dim=1)\n # weight_all_target=torch.cat([weight_target,weight_r_target,weight_l_target,weight_t_target,weight_b_target],dim=1)\n # weight_norm_target=F.softmax(weight_all_target, dim=1)\n\n # return weight*weight_norm[:,0:1,:,:],weight_target*weight_norm_target[:,0:1,:,:], \\\n # weight_r*weight_norm[:,1:2,:,:],weight_r_target*weight_norm_target[:,1:2,:,:], \\\n # weight_l*weight_norm[:,2:3,:,:],weight_l_target*weight_norm_target[:,2:3,:,:], \\\n # weight_t*weight_norm[:,3:4,:,:],weight_t_target*weight_norm_target[:,3:4,:,:], \\\n # weight_b*weight_norm[:,4:5,:,:],weight_b_target*weight_norm_target[:,4:5,:,:]\n # return self.sigmoid(weight)*weight_norm[:,0:1,...], \\\n # self.sigmoid(weight_l)*weight_norm[:,1:2,...], \\\n # self.sigmoid(weight_r)*weight_norm[:,2:3,...], \\\n # self.sigmoid(weight_t)*weight_norm[:,3:4,...], \\\n # self.sigmoid(weight_b)*weight_norm[:,4:5,...],\\\n # self.sigmoid(weight_lt)*weight_norm[:,5:6,...], \\\n # self.sigmoid(weight_rt)*weight_norm[:,6:7,...], \\\n # self.sigmoid(weight_lb)*weight_norm[:,7:8,...], \\\n # self.sigmoid(weight_rb)*weight_norm[:,8:9,...]\n #print(torch.mean(torch.max(weight_norm,dim=1)[0]),torch.max(weight_all,dim=1)[0])\n #print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))\n #print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))\n if torch.mean(torch.topk(weight_all,1,dim=1)[0].float())<0:\n print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))\n print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))\n #print(torch.mean(torch.min(weight_norm,dim=1)[0]),torch.min(weight_all,dim=1)[0])\n return weight_norm[:,0:1,...], \\\n weight_norm[:,1:2,...], \\\n weight_norm[:,2:3,...], \\\n weight_norm[:,3:4,...], \\\n weight_norm[:,4:5,...],\\\n weight_norm[:,5:6,...], \\\n weight_norm[:,6:7,...], \\\n weight_norm[:,7:8,...], \\\n weight_norm[:,8:9,...]\nclass cmfsm(nn.Module):\n\n\n def __init__(self, \n maxdisp=192):\n\n super(cmfsm, self).__init__()\n self.maxdisp = maxdisp\n self.feature_extraction = feature_extraction()\n\n self.dres0 = nn.Sequential(\n convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dres1 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1))\n\n self.dres2 = hourglass(32)\n\n self.dres3 = hourglass(32)\n\n self.dres4 = hourglass(32)\n\n self.classif1 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n self.classif2 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n self.classif3 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n self.mapping_matrix=eight_related_context_mapping()\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * \\\n m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, left, right):\n start=time.time()\n refimg_fea, half,all_feature= self.feature_extraction(left)\n targetimg_fea, _ ,all_feature_right= self.feature_extraction(right)\n scale=all_feature.shape[-1]//refimg_fea.shape[-1]\n #mapping,mapping_r,mapping_l,mapping_t,mapping_b=self.mapping_matrix(refimg_fea,all_feature)\n #target\n #[mapping,mapping_r,mapping_l,mapping_t,mapping_b],[mapping_target,mapping_target_r,mapping_target_l]=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)\n #time=0.1s\n weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)\n #mapping,mapping_target=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)\n # matching\n cost = Variable(\n torch.FloatTensor(refimg_fea.size()[0],\n refimg_fea.size()[1] * 2, self.maxdisp // scale,\n refimg_fea.size()[2],\n refimg_fea.size()[3]).zero_()).cuda()\n\n for i in range(self.maxdisp // scale):\n if i > 0:\n cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :,\n i:]\n cost[:, refimg_fea.size()[1]:, i, :,\n i:] = targetimg_fea[:, :, :, :-i]\n else:\n cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea\n cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea\n cost = cost.contiguous()\n \n cost0 = self.dres0(cost)\n cost0 = self.dres1(cost0) + cost0\n out1, pre1, post1 = self.dres2(cost0, None, None)\n out1 = out1 + cost0\n\n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = out2 + cost0\n\n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n out3 = out3 + cost0\n\n cost1 = self.classif1(out1)\n #cost2 = self.classif2(out2) + cost1\n #cost3 = self.classif3(out3) + cost2\n #torch.Size([1, 1, 256, 512])\n # weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)\n # weight_norm=F.softmax(weight_all, dim=1)\n\n # t=time.time()\n cost1 = torch.squeeze(cost1, 1)\n\n pred1 = F.softmax(cost1, dim=1)\n pred1 = disparityregression(self.maxdisp//scale)(pred1)\n #torch.Size([1, 64, 128])\n\n pred1=scale*pred1.unsqueeze(-1).expand(pred1.shape[0],pred1.shape[1],pred1.shape[2],scale) \\\n .contiguous().view(pred1.shape[0],pred1.shape[1],pred1.shape[2]*scale) \\\n .unsqueeze(-2).expand(pred1.shape[0],pred1.shape[1],scale,pred1.shape[2]*scale) \\\n .contiguous().view(pred1.shape[0],pred1.shape[1]*scale,pred1.shape[2]*scale)\n\n pred1_map=pred1*weight\n pred1_map[...,scale:]+=pred1[...,:-scale]*weight_l[...,scale:]\n pred1_map[...,:-scale]+=pred1[...,scale:]*weight_r[...,:-scale]\n pred1_map[...,scale:,:]+=pred1[...,:-scale,:]*weight_t[...,scale:,:]\n pred1_map[...,:-scale,:]+=pred1[...,scale:,:]*weight_b[...,:-scale,:]\n\n pred1_map[...,scale:,scale:]+=pred1[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]\n pred1_map[...,scale:,:-scale]+=pred1[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]\n pred1_map[...,:-scale,scale:]+=pred1[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]\n pred1_map[...,:-scale,:-scale]+=pred1[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]\n cost2 = self.classif2(out2)\n cost2 = torch.squeeze(cost2, 1)+cost1\n\n pred2 = F.softmax(cost2, dim=1)\n pred2 = disparityregression(self.maxdisp//scale)(pred2)\n\n pred2=scale*pred2.unsqueeze(-1).expand(pred2.shape[0],pred2.shape[1],pred2.shape[2],scale) \\\n .contiguous().view(pred2.shape[0],pred2.shape[1],pred2.shape[2]*scale) \\\n .unsqueeze(-2).expand(pred2.shape[0],pred2.shape[1],scale,pred2.shape[2]*scale) \\\n .contiguous().view(pred2.shape[0],pred2.shape[1]*scale,pred2.shape[2]*scale)\n\n pred2_map=pred2*weight\n pred2_map[...,scale:]+=pred2[...,:-scale]*weight_l[...,scale:]\n pred2_map[...,:-scale]+=pred2[...,scale:]*weight_r[...,:-scale]\n pred2_map[...,scale:,:]+=pred2[...,:-scale,:]*weight_t[...,scale:,:]\n pred2_map[...,:-scale,:]+=pred2[...,scale:,:]*weight_b[...,:-scale,:]\n\n pred2_map[...,scale:,scale:]+=pred2[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]\n pred2_map[...,scale:,:-scale]+=pred2[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]\n pred2_map[...,:-scale,scale:]+=pred2[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]\n pred2_map[...,:-scale,:-scale]+=pred2[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]\n\n\n cost3 = self.classif3(out3)\n cost3 = torch.squeeze(cost3, 1)+cost2\n \n pred3 = F.softmax(cost3, dim=1)\n # print(torch.max(pred3,dim=1)[0])\n # print(torch.min(pred3,dim=1)[0])\n pred3 = disparityregression(self.maxdisp//scale)(pred3)\n\n pred3=scale*pred3.unsqueeze(-1).expand(pred3.shape[0],pred3.shape[1],pred3.shape[2],scale) \\\n .contiguous().view(pred3.shape[0],pred3.shape[1],pred3.shape[2]*scale) \\\n .unsqueeze(-2).expand(pred3.shape[0],pred3.shape[1],scale,pred3.shape[2]*scale) \\\n .contiguous().view(pred3.shape[0],pred3.shape[1]*scale,pred3.shape[2]*scale)\n\n pred3_map=pred3*weight\n pred3_map[...,scale:]+=pred3[...,:-scale]*weight_l[...,scale:]\n pred3_map[...,:-scale]+=pred3[...,scale:]*weight_r[...,:-scale]\n pred3_map[...,scale:,:]+=pred3[...,:-scale,:]*weight_t[...,scale:,:]\n pred3_map[...,:-scale,:]+=pred3[...,scale:,:]*weight_b[...,:-scale,:]\n\n pred3_map[...,scale:,scale:]+=pred3[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]\n pred3_map[...,scale:,:-scale]+=pred3[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]\n pred3_map[...,:-scale,scale:]+=pred3[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]\n pred3_map[...,:-scale,:-scale]+=pred3[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]\n\n\n #pred3 = self.srr(pred3, left, refimg_fea, half)\n #print(time.time()-start)\n return pred1_map, pred2_map, pred3_map\n #return pred3\n\n\n\n",
"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-07-17 10:44:43\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-09-23 17:15:36\n# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-20 18:01:52\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-07-16 22:16:14\nimport time\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport math\nfrom math import ceil\nfrom torch.autograd import Variable\nfrom torch.nn.functional import cosine_similarity as cosine_s\nfrom pssm import caffe_pb2\nfrom pssm.models.utils import *\nrsn_specs = {\n 'scene': \n {\n 'n_classes': 9,\n 'input_size': (540, 960),\n 'block_config': [3, 4, 23, 3],\n },\n\n}\n\ngroup_dim=32\npramid_dim=8\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n\n if stride==1:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n if stride==2:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=2, bias=False) \nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.gn1 = nn.GroupNorm(group_dim,planes)\n self.relu = nn.LeakyReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.gn2 = nn.GroupNorm(group_dim,planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.gn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.gn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n # print(residual.shape)\n # print(out.shape)\n out += residual\n out = self.relu(out)\n\n return out\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.layer1 = self._make_layer(BasicBlock, 32, 3, 1,1,1)\n\n self.branch1 = nn.Sequential(nn.AvgPool2d((54, 96), stride=(54,96)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((27, 48), stride=(27,48)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((36, 64), stride=(36,64)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((18, 32), stride=(18,32)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n self.branch5 = nn.Sequential(nn.AvgPool2d((9, 16), stride=(9,16)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n self.branch6 = nn.Sequential(nn.AvgPool2d((3, 8), stride=(3,8)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n\n self.lastconv = nn.Sequential(nn.Conv2d(80, 64, 3, 1, 1, 1),\n nn.GroupNorm(group_dim,64),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(64, 32, 3, 1, 1, 1),\n )\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n\n layers = []\n layers.append(block(self.inplanes, planes, stride))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n # output = self.conv1(x)\n # output = self.gn1(output)\n # output = self.relu1(output)\n # output = self.conv2(output)\n # output = self.gn2(output)\n # output = self.relu2(output)\n # output = self.conv3(output)\n # output = self.gn3(output)\n # output = self.relu3(output)\n output_skip = self.layer1(x)\n # output_skip=x\n\n output_branch1 = self.branch1(output_skip)\n output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch2 = self.branch2(output_skip)\n output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch3 = self.branch3(output_skip)\n output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch4 = self.branch4(output_skip)\n output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch5 = self.branch5(output_skip)\n output_branch5 = F.interpolate(output_branch5, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch6 = self.branch6(output_skip)\n output_branch6 = F.interpolate(output_branch6, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_feature = torch.cat((output_skip, output_branch6, output_branch5, output_branch4, output_branch3, output_branch2, output_branch1), 1)\n output_feature = self.lastconv(output_feature)\n #print(output_feature.shape)\n return output_feature\n\nclass feature_extraction2(nn.Module):\n def __init__(self):\n super(feature_extraction2, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1,\n bias=False,dilation=1)\n self.gn1 = nn.GroupNorm(group_dim,32)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,\n bias=False,dilation=1)\n self.gn2 = nn.GroupNorm(group_dim,32)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=7, stride=1, padding=6,\n bias=False,dilation=2)\n self.gn3 = nn.GroupNorm(group_dim,32)\n self.relu3 = nn.LeakyReLU(inplace=True)\n self.layer1 = self._make_layer(BasicBlock, 32, 1, 1,1,1)\n self.lastconv = nn.Conv2d(32, 32, 3, 1, 1, 1)\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1,None,))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.conv1(x)\n output = self.gn1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.gn2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n output = self.gn3(output)\n output = self.relu3(output)\n #print(output.shape)\n output = self.layer1(output)\n output=self.lastconv(output)\n\n return output\n\nclass similarity_measure1(nn.Module):\n def __init__(self):\n super(similarity_measure1, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(16, 4, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(4, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n # self.conv4 = nn.Conv2d(8, 2, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1) \n # self.lastconv = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n self.s1=nn.Parameter(torch.ones(1)).float()\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n output = self.conv1(x)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n # output=self.conv4(output)\n # output=self.lastconv(output)\n output=output*self.s1\n return output\nclass similarity_measure2(nn.Module):\n def __init__(self):\n super(similarity_measure2, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(16, 4, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(4, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n # self.conv4 = nn.Conv2d(8, 2, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1) \n # self.lastconv = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n self.s2=nn.Parameter(torch.ones(1)).float()\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n\n def forward(self, x):\n output = self.conv1(x)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n # output=self.conv4(output)\n # output=self.lastconv(output)\n output=self.s2*output\n return output\n\nclass similarity_measure3(nn.Module):\n def __init__(self):\n super(similarity_measure3, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(69, 32, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu3 = nn.LeakyReLU(inplace=True)\n self.conv4 = nn.Conv2d(8, 4, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu4 = nn.LeakyReLU(inplace=True)\n self.conv5 = nn.Conv2d(4, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n # self.relu5 = nn.ReLU(inplace=True)\n\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n output = self.conv1(x)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n output = self.relu3(output)\n output = self.conv4(output)\n output = self.relu4(output)\n output = self.conv5(output)\n output = torch.abs(output)\n # output = self.relu5(output)\n # print(output.shape)\n # print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n\n output = output/torch.max(output)\n # output = output-torch.min(output)\n # output = 1-output\n output = torch.exp(-output)\n #print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n return output\nclass rstereo(nn.Module):\n\n\n def __init__(self, \n n_classes=9, \n block_config=[3, 4, 6, 3], \n input_size= (480, 640), \n version='scene'):\n\n super(rstereo, self).__init__()\n self.feature_extraction=feature_extraction().cuda(0)\n self.feature_extraction2=feature_extraction2().cuda(0)\n self.softmax= nn.Softmax(dim=-1)\n self.similarity1=similarity_measure1().cuda(1)\n self.similarity2=similarity_measure2().cuda(1)\n self.similarity3=similarity_measure3().cuda(1)\n\n def ss_argmin(self,x,index):\n one=torch.ones(1)\n zero=torch.zeros(1)\n #print(x.data.cpu())\n # exit()\n x=self.softmax(-x)\n #print(x)\n disparity= torch.sum(x*index.unsqueeze(0),dim=-1)\n return disparity \n def cluster_vector(self,feature,x,y):\n one=torch.ones(1).cuda(1)\n zero=torch.zeros(1).cuda(1)\n cluster_feature=feature[...,x,y]\n mean=torch.sum(cluster_feature,dim=-1)/x.shape[0]\n mean=mean.view(cluster_feature.shape[0],cluster_feature.shape[1],1)\n #print(mean.shape)\n weights=torch.norm(cluster_feature-mean,dim=1)\n weights=torch.exp(-weights)\n return weights\n def forward(self, l,r,P,pre,pre2):\n #self.P=P[1,0]\n #0 l to r,1 min,2 max\n #[l_box,r_box,match],[min_d,max_d]\n #start_time=time.time()\n self.pre=pre.cuda(1)\n P1=P[...,0].cuda(1).squeeze()\n P2=P[...,1].cuda(1).squeeze()\n P3=P[...,2].cuda(1).squeeze()\n P4=P[...,3].cuda(1).squeeze()\n #feature extraction\n #P2=P2-P1\n #print(torch.min(P3),torch.max(P3))\n l_sf=self.feature_extraction2(l)\n l_lf=self.feature_extraction(l_sf)\n\n r_sf=self.feature_extraction2(r)\n r_lf=self.feature_extraction(r_sf)\n\n disparity=torch.zeros([540,960]).cuda(0)\n one=torch.ones(1).cuda(1)\n zero=torch.zeros(1).cuda(1)\n #cost_volume=[]\n #5710\n #print(value)\n l_lf=l_lf.cuda(1)\n r_lf=r_lf.cuda(1)\n r_sf=r_sf.cuda(1)\n l_sf=l_sf.cuda(1)\n\n count=0\n #start_time=time.time()\n #with torch.no_grad():\n\n for i in range(1,torch.max(P3).type(torch.int32)+1):\n #i=19\n with torch.no_grad():\n x1,y1,x2,y2,size=pre[0,i].long()\n region=P3[x1:x2,y1:y2]\n P1_r=P1[x1:x2,y1:y2]\n P2_r=P2[x1:x2,y1:y2]\n region=torch.where(region==i,one,zero)\n pixels=torch.sum(region).item()\n object1=region*P1_r\n object2=region*P2_r\n index_all=region.nonzero()\n region=region-object1-object2\n index_r=region.nonzero()\n index1=object1.nonzero()\n index1_all=index1\n index2=object2.nonzero()\n index2_all=index2\n max_d=pre2[0,1,i].long()\n min_d=pre2[0,0,i].long()\n cost_volume=torch.zeros(x2-x1+1,y2-y1+1,max_d-min_d+1).cuda(1)\n # max_d=300\n # min_d=0\n #print(y2,y1)\n #print(index1.shape[0],index2.shape[0],pixels)\n if index1.shape[0]>0:\n if y2-y1 >700 or max_d-min_d==192:\n index1=index1[np.random.randint(low=0,high=index1.shape[0],size=(np.min([np.ceil(index1.shape[0]/30),pixels/96]).astype(np.int),)),:]\n elif index1.shape[0]>6000:\n index1=index1[np.random.randint(low=0,high=index1.shape[0],size=(np.min([np.ceil(index1.shape[0]/6),pixels/36]).astype(np.int),)),:]\n elif index1.shape[0]/pixels<0.1:\n index1=index1[np.random.randint(low=0,high=index1.shape[0],size=(np.min([np.ceil(index1.shape[0]/2),pixels/20]).astype(np.int),)),:]\n elif index1.shape[0]/pixels<0.5:\n index1=index1[np.random.randint(low=0,high=index1.shape[0],size=(np.min([np.ceil(index1.shape[0]/3),pixels/20]).astype(np.int),)),:]\n else:\n index1=index1[np.random.randint(low=0,high=index1.shape[0],size=(np.min([np.ceil(index1.shape[0]/4),pixels/20]).astype(np.int),)),:]\n if index2.shape[0]>0:\n if y2-y1 >700 or max_d-min_d==192:\n index2=index2[np.random.randint(low=0,high=index2.shape[0],size=(np.min([np.ceil(index2.shape[0]),pixels/96]).astype(np.int),)),:]\n elif index2.shape[0]>6000:\n index2=index2[np.random.randint(low=0,high=index2.shape[0],size=(np.min([np.ceil(index2.shape[0]/6),pixels/36]).astype(np.int),)),:]\n elif index2.shape[0]/pixels<0.1:\n index2=index2[np.random.randint(low=0,high=index2.shape[0],size=(np.min([np.ceil(index2.shape[0]/2),pixels/20]).astype(np.int),)),:]\n elif index2.shape[0]/pixels<0.5:\n index2=index2[np.random.randint(low=0,high=index2.shape[0],size=(np.min([np.ceil(index2.shape[0]/3),pixels/20]).astype(np.int),)),:]\n else:\n index2=index2[np.random.randint(low=0,high=index2.shape[0],size=(np.min([np.ceil(index2.shape[0]/4),pixels/25]).astype(np.int),)),:]\n if index_r.shape[0]>0:\n if y2-y1>700 or max_d-min_d==192:\n index2=torch.cat([index2,index_r[np.random.randint(low=0,high=index_r.shape[0],size=(np.min([np.ceil(index_r.shape[0]/96),pixels/96]).astype(np.int),)),:]],0)\n elif index_r.shape[0]>6000:\n index2=torch.cat([index2,index_r[np.random.randint(low=0,high=index_r.shape[0],size=(np.min([np.ceil(index_r.shape[0]/48),pixels/48]).astype(np.int),)),:]],0)\n else:\n index2=torch.cat([index2,index_r[np.random.randint(low=0,high=index_r.shape[0],size=(np.min([np.ceil(index_r.shape[0]/25),pixels/25]).astype(np.int),)),:]],0)\n #print(index1.shape[0],index2.shape[0],pixels) \n #print(max_d.item(),min_d.item()) \n d=torch.arange(min_d,max_d+1).cuda(1)\n if index1.shape[0]>0:\n d_index1=d.expand(index1.shape[0],max_d-min_d+1).contiguous().view(-1)\n index1_d_x=index1[:,0].unsqueeze(-1).expand(index1.shape[0],d.shape[0]).contiguous().view(-1)\n index1_d_y=index1[:,1].unsqueeze(-1).expand(index1.shape[0],d.shape[0]).contiguous().view(-1)\n if index1_all.shape[0]>0:\n d_index1_all=d.expand(index1_all.shape[0],max_d-min_d+1).contiguous().view(-1)\n index1_all_d_x=index1_all[:,0].unsqueeze(-1).expand(index1_all.shape[0],d.shape[0]).contiguous().view(-1)\n index1_all_d_y=index1_all[:,1].unsqueeze(-1).expand(index1_all.shape[0],d.shape[0]).contiguous().view(-1) \n if index2.shape[0]>0:\n d_index2=d.expand(index2.shape[0],max_d-min_d+1).contiguous().view(-1)\n index2_d_x=index2[:,0].view(index2.shape[0],1).expand(index2.shape[0],d.shape[0]).contiguous().view(-1)\n index2_d_y=index2[:,1].view(index2.shape[0],1).expand(index2.shape[0],d.shape[0]).contiguous().view(-1)\n if index2_all.shape[0]>0:\n d_index2_all=d.expand(index2_all.shape[0],max_d-min_d+1).contiguous().view(-1)\n index2_all_d_x=index2_all[:,0].view(index2_all.shape[0],1).expand(index2_all.shape[0],d.shape[0]).contiguous().view(-1)\n index2_all_d_y=index2_all[:,1].view(index2_all.shape[0],1).expand(index2_all.shape[0],d.shape[0]).contiguous().view(-1)\n if index_all.shape[0]>0:\n d_index_all=d.expand(index_all.shape[0],max_d-min_d+1).contiguous().view(-1)\n index_all_d_x=index_all[:,0].view(index_all.shape[0],1).expand(index_all.shape[0],d.shape[0]).contiguous().view(-1)\n index_all_d_y=index_all[:,1].view(index_all.shape[0],1).expand(index_all.shape[0],d.shape[0]).contiguous().view(-1) \n count=count+index2.shape[0]+index1.shape[0]\n\n if index1.shape[0]>0:\n s_feature=l_sf[...,x1:x2,y1:y2][...,index1[:,0],index1[:,1]].unsqueeze(-1).contiguous() \\\n .expand(l_sf.shape[0],l_sf.shape[1],index1.shape[0],d.shape[0]).contiguous() \\\n .view(l_sf.shape[0],l_sf.shape[1],d.shape[0]*index1.shape[0])\n s_r_y=torch.max(index1_d_y+y1-d_index1,-torch.ones_like(index1_d_y))\n s_r_o_t=r_sf[...,index1_d_x+x1,s_r_y]\n s_cost=self.similarity1((torch.where(s_r_y>=0,s_feature-s_r_o_t,zero)).unsqueeze(-1)) \\\n +self.similarity2((torch.where(s_r_y>=0,s_feature*s_r_o_t,zero)).unsqueeze(-1))\n s_cost=s_cost.view(s_cost.shape[0],index1.shape[0],d.shape[0])\n a_s_feature=torch.cat([l_sf[...,x1:x2,y1:y2][...,index1[:,0],index1[:,1]],index1[:,0].unsqueeze(0).unsqueeze(0).float(),index1[:,1].unsqueeze(0).unsqueeze(0).float()],1)\n s_mean_feature=torch.mean(a_s_feature,2,keepdim=True).expand(a_s_feature.shape[0],a_s_feature.shape[1],index1.shape[0])\n s_weights=self.similarity3(torch.cat([a_s_feature,s_mean_feature,torch.norm(a_s_feature-s_mean_feature,dim=1).unsqueeze(1)],dim=1).unsqueeze(-1)).squeeze()\n s_weights=s_weights.unsqueeze(0).expand(d.shape[0],-1).contiguous().view(s_cost.shape[0],s_cost.shape[1],s_cost.shape[2])\n s_weights_b=s_weights\n s_weights=torch.where(s_r_y.view_as(s_cost)>=0,s_weights,1e-4*one)\n #print(s_cost.shape) 1,n,d\n mean_cost=torch.sum((s_weights*s_cost),1)/torch.sum(s_weights,1)\n mean_cost=torch.where(torch.sum(s_weights,1)==zero,torch.sum((s_weights*s_cost),1),mean_cost)\n mean_cost=mean_cost.unsqueeze(1).expand(s_cost.shape[0],s_cost.shape[1],s_cost.shape[2])\n s_cost=torch.where(s_weights>1e-4*one,mean_cost*s_weights+(one-s_weights)*s_cost,s_weights_b*mean_cost)\n disparity[x1:x2,y1:y2][index1[:,0],index1[:,1]]=self.ss_argmin(-s_cost.view(1,index1.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n #print(index1.shape,s_cost.shape,cost_volume[index1[:,0],index1[:,1],:].shape)\n #torch.Size([8, 2]) torch.Size([1, 8, 21]) torch.Size([8, 20])\n cost_volume[index1[:,0],index1[:,1],:]=s_cost\n # if index1_all.shape[0]>0:\n # #completion, we need the cost volume\n # s_feature=l_sf[...,x1:x2,y1:y2][...,index1_all[:,0],index1_all[:,1]].unsqueeze(-1).contiguous() \\\n # .expand(l_sf.shape[0],l_sf.shape[1],index1_all.shape[0],d.shape[0]).contiguous() \\\n # .view(l_sf.shape[0],l_sf.shape[1],d.shape[0]*index1_all.shape[0])\n # s_r_y=torch.max(index1_all_d_y+y1-d_index1_all,-torch.ones_like(index1_all_d_y))\n # a_s_feature=torch.cat([l_sf[...,x1:x2,y1:y2][...,index1_all[:,0],index1_all[:,1]],index1_all[:,0].unsqueeze(0).unsqueeze(0).float(),index1_all[:,1].unsqueeze(0).unsqueeze(0).float()],1)\n # s_mean_feature=torch.mean(a_s_feature,2,keepdim=True).expand(a_s_feature.shape[0],a_s_feature.shape[1],index1_all.shape[0])\n # s_weights=self.similarity3(torch.cat([a_s_feature,s_mean_feature,torch.norm(a_s_feature-s_mean_feature,dim=1).unsqueeze(1)],dim=1).unsqueeze(-1)).squeeze()\n # s_cost=cost_volume[index1_all[:,0],index1_all[:,1],:].unsqueeze(0)\n # s_weights=s_weights.unsqueeze(0).expand(d.shape[0],-1).contiguous().view(s_cost.shape[0],s_cost.shape[1],s_cost.shape[2])\n # s_weights_b=s_weights\n # s_weights=torch.where(s_r_y.view_as(s_cost)>=0,s_weights,1e-4*one)\n # s_weights=torch.where(s_cost==zero,1e-4*one,s_weights)\n # mean_cost=torch.sum((s_weights*s_cost),1)/torch.sum(s_weights,1)\n # mean_cost=torch.where(torch.sum(s_weights,1)==zero,torch.sum((s_weights*s_cost),1),mean_cost)\n # mean_cost=mean_cost.unsqueeze(1).expand(s_cost.shape[0],s_cost.shape[1],s_cost.shape[2])\n # #s_cost=torch.where(s_weights==1e-4*one,s_weights_b*mean_cost,s_cost)\n # cost_volume[index1_all[:,0],index1_all[:,1],:]=torch.where(s_weights==1e-4*one,s_weights_b*mean_cost,s_cost)\n # disparity[x1:x2,y1:y2][index1_all[:,0],index1_all[:,1]]=self.ss_argmin(-cost_volume[index1_all[:,0],index1_all[:,1],:].view(1,index1_all.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n #print(torch.max(s_weights).item(),torch.max(s_cost).item(),torch.max(mean_cost).item())\n if index2.shape[0]>0:\n l_feature=l_lf[...,x1:x2,y1:y2][...,index2[:,0],index2[:,1]].unsqueeze(-1).contiguous() \\\n .expand(l_lf.shape[0],l_lf.shape[1],index2.shape[0],d.shape[0]).contiguous() \\\n .view(l_lf.shape[0],l_lf.shape[1],d.shape[0]*index2.shape[0])\n l_r_y=torch.max(index2_d_y+y1-d_index2,-torch.ones_like(index2_d_y))\n l_r_o_t=r_lf[...,index2_d_x+x1,l_r_y]\n l_cost=self.similarity1((torch.where(l_r_y>=0,l_feature-l_r_o_t,2*l_feature)).unsqueeze(-1)) \\\n +self.similarity2((torch.where(l_r_y>=0,l_feature*l_r_o_t,zero)).unsqueeze(-1))\n l_cost=l_cost.view(l_cost.shape[0],index2.shape[0],d.shape[0])\n a_l_feature=torch.cat([l_lf[...,x1:x2,y1:y2][...,index2[:,0],index2[:,1]],index2[:,0].unsqueeze(0).unsqueeze(0).float(),index2[:,1].unsqueeze(0).unsqueeze(0).float()],1)\n l_mean_feature=torch.mean(a_l_feature,2,keepdim=True).expand(a_l_feature.shape[0],a_l_feature.shape[1],index2.shape[0])\n l_weights=self.similarity3(torch.cat([a_l_feature,l_mean_feature,torch.norm(a_l_feature-l_mean_feature,dim=1).unsqueeze(1)],dim=1).unsqueeze(-1)).squeeze()\n l_weights=l_weights.unsqueeze(0).expand(d.shape[0],-1).contiguous().view(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n l_weight_b=l_weights\n l_weights=torch.where(l_r_y.view_as(l_cost)>=0,l_weights,one*1e-4)\n #print(l_cost.shape) 1,n,d\n mean_cost=torch.sum((l_weights*l_cost),1)/torch.sum(l_weights,1)\n mean_cost=torch.where(torch.sum(l_weights,1)==zero,torch.sum((l_weights*l_cost),1),mean_cost)\n mean_cost=mean_cost.unsqueeze(1).expand(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2]) \n l_cost=torch.where(l_weights>one*1e-4*one,mean_cost*l_weights+(one-l_weights)*l_cost,l_weight_b*mean_cost)\n disparity[x1:x2,y1:y2][index2[:,0],index2[:,1]]=self.ss_argmin(-l_cost.view(1,index2.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n cost_volume[index2[:,0],index2[:,1],:]=l_cost\n # if index2_all.shape[0]>0:\n # #completion, we need the cost volume\n # l_feature=l_lf[...,x1:x2,y1:y2][...,index2_all[:,0],index2_all[:,1]].unsqueeze(-1).contiguous() \\\n # .expand(l_lf.shape[0],l_lf.shape[1],index2_all.shape[0],d.shape[0]).contiguous() \\\n # .view(l_lf.shape[0],l_lf.shape[1],d.shape[0]*index2_all.shape[0])\n # l_r_y=torch.max(index2_all_d_y+y1-d_index2_all,-torch.ones_like(index2_all_d_y))\n # a_l_feature=torch.cat([l_lf[...,x1:x2,y1:y2][...,index2_all[:,0],index2_all[:,1]],index2_all[:,0].unsqueeze(0).unsqueeze(0).float(),index2_all[:,1].unsqueeze(0).unsqueeze(0).float()],1)\n # l_mean_feature=torch.mean(a_l_feature,2,keepdim=True).expand(a_l_feature.shape[0],a_l_feature.shape[1],index2_all.shape[0])\n # l_weights=self.similarity3(torch.cat([a_l_feature,l_mean_feature,torch.norm(a_l_feature-l_mean_feature,dim=1).unsqueeze(1)],dim=1).unsqueeze(-1)).squeeze()\n # l_cost=cost_volume[index2_all[:,0],index2_all[:,1],:].unsqueeze(0)\n # l_weights=l_weights.unsqueeze(0).expand(d.shape[0],-1).contiguous().view(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n # l_weights_b=l_weights\n # l_weights=torch.where(l_r_y.view_as(l_cost)>=0,l_weights,1e-4*one)\n # l_weights=torch.where(l_cost==zero,1e-4*one,l_weights)\n # mean_cost=torch.sum((l_weights*l_cost),1)/torch.sum(l_weights,1)\n # mean_cost=torch.where(torch.sum(l_weights,1)==zero,torch.sum((l_weights*l_cost),1),mean_cost)\n # mean_cost=mean_cost.unsqueeze(1).expand(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n # #l_cost=torch.where(l_weights==1e-4*one,l_weights_b*mean_cost,l_cost)\n # cost_volume[index2_all[:,0],index2_all[:,1],:]=torch.where(l_weights==1e-4*one,l_weights_b*mean_cost,l_cost)\n # disparity[x1:x2,y1:y2][index2_all[:,0],index2_all[:,1]]=self.ss_argmin(-cost_volume[index2_all[:,0],index2_all[:,1],:].view(1,index2_all.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n #time.sleep(1)\n # if index_all.shape[0]>0:\n # #completion, we need the cost volume\n # #print(index_all.shape[0],i)\n # #print(i,x1,x2,y1,y2,max_d,min_d,count/960/540,torch.sum(torch.where(disparity>0,one,zero)).item()/960/540)\n # if index_all.shape[0]>50000:\n\n # #disparity[x1:x2,y1:y2][index1_all[:,0],index1_all[:,1]]=self.ss_argmin(-cost_volume[index1_all[:,0],index1_all[:,1],:].view(1,index1_all.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n # #disparity[x1:x2,y1:y2][index2_all[:,0],index2_all[:,1]]=self.ss_argmin(-cost_volume[index2_all[:,0],index2_all[:,1],:].view(1,index2_all.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n # # time.sleep(1)\n # continue\n # for j in range(int(np.ceil(index_all.shape[0]/19200))):\n # index_all_t=index_all[j*19200:(j+1)*19200,:]\n # index_all_t_d_y=index_all_d_y[j*cost_volume.shape[2]*19200:(j+1)*cost_volume.shape[2]*19200]\n # d_index_all_t=d_index_all[j*cost_volume.shape[2]*19200:(j+1)*cost_volume.shape[2]*19200]\n # l_feature=l_lf[...,x1:x2,y1:y2][...,index_all_t[:,0],index_all_t[:,1]].unsqueeze(-1).contiguous() \\\n # .expand(l_lf.shape[0],l_lf.shape[1],index_all_t.shape[0],d.shape[0]).contiguous() \\\n # .view(l_lf.shape[0],l_lf.shape[1],d.shape[0]*index_all_t.shape[0])\n # l_cost=cost_volume[index_all_t[:,0],index_all_t[:,1],:].unsqueeze(0)\n # l_r_y=torch.max(index_all_t_d_y+y1-d_index_all_t,-torch.ones_like(index_all_t_d_y)).view_as(l_cost)\n # a_l_feature=torch.cat([l_lf[...,x1:x2,y1:y2][...,index_all_t[:,0],index_all_t[:,1]],index_all_t[:,0].unsqueeze(0).unsqueeze(0).float(),index_all_t[:,1].unsqueeze(0).unsqueeze(0).float()],1)\n # l_mean_feature=torch.mean(a_l_feature,2,keepdim=True).expand(a_l_feature.shape[0],a_l_feature.shape[1],index_all_t.shape[0])\n # l_weights=self.similarity3(torch.cat([a_l_feature,l_mean_feature,torch.norm(a_l_feature-l_mean_feature,dim=1).unsqueeze(1)],dim=1).unsqueeze(-1)).squeeze()\n # l_weights=l_weights.unsqueeze(0).expand(d.shape[0],-1).contiguous().view(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n # l_weights_b=l_weights\n # l_weights=torch.where(l_r_y>=0,l_weights,1e-4*one)\n # l_weights=torch.where(l_cost==zero,1e-4*one,l_weights)\n # mean_cost=torch.sum((l_weights*l_cost),1)/torch.sum(l_weights,1)\n # mean_cost=torch.where(torch.sum(l_weights,1)==zero,torch.sum((l_weights*l_cost),1),mean_cost)\n # mean_cost=mean_cost.unsqueeze(1).expand(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n # l_cost=torch.where(l_weights==1e-4*one,l_weights_b*mean_cost,l_cost) \n # disparity[x1:x2,y1:y2][index_all_t[:,0],index_all_t[:,1]]=self.ss_argmin(-l_cost.view(1,index_all_t.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n # else:\n # l_feature=l_lf[...,x1:x2,y1:y2][...,index_all[:,0],index_all[:,1]].unsqueeze(-1).contiguous() \\\n # .expand(l_lf.shape[0],l_lf.shape[1],index_all.shape[0],d.shape[0]).contiguous() \\\n # .view(l_lf.shape[0],l_lf.shape[1],d.shape[0]*index_all.shape[0])\n # l_r_y=torch.max(index_all_d_y+y1-d_index_all,-torch.ones_like(index_all_d_y))\n # a_l_feature=torch.cat([l_lf[...,x1:x2,y1:y2][...,index_all[:,0],index_all[:,1]],index_all[:,0].unsqueeze(0).unsqueeze(0).float(),index_all[:,1].unsqueeze(0).unsqueeze(0).float()],1)\n # l_mean_feature=torch.mean(a_l_feature,2,keepdim=True).expand(a_l_feature.shape[0],a_l_feature.shape[1],index_all.shape[0])\n # l_weights=self.similarity3(torch.cat([a_l_feature,l_mean_feature,torch.norm(a_l_feature-l_mean_feature,dim=1).unsqueeze(1)],dim=1).unsqueeze(-1)).squeeze()\n # l_cost=cost_volume[index_all[:,0],index_all[:,1],:].unsqueeze(0)\n # l_weights=l_weights.unsqueeze(0).expand(d.shape[0],-1).contiguous().view(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n # l_weights_b=l_weights\n # l_weights=torch.where(l_r_y.view_as(l_cost)>=0,l_weights,1e-4*one)\n # l_weights=torch.where(l_cost==zero,1e-4*one,l_weights)\n # mean_cost=torch.sum((l_weights*l_cost),1)/torch.sum(l_weights,1)\n # mean_cost=torch.where(torch.sum(l_weights,1)==zero,torch.sum((l_weights*l_cost),1),mean_cost)\n # mean_cost=mean_cost.unsqueeze(1).expand(l_cost.shape[0],l_cost.shape[1],l_cost.shape[2])\n # l_cost=torch.where(l_weights==1e-4*one,l_weights_b*mean_cost,l_cost) \n # disparity[x1:x2,y1:y2][index_all[:,0],index_all[:,1]]=self.ss_argmin(-l_cost.view(1,index_all.shape[0],d.shape[0]).cuda(0),d.float().cuda(0))\n # # time.sleep(1)\n\n\n print(count/960/540,torch.sum(torch.where(disparity>0,one,zero)).item()/960/540)\n #time.sleep(1000)\n #exit()\n #print(torch.max(disparity).item(),torch.min(disparity).item())\n return disparity\n\n\n",
"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-07-17 10:44:43\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-10-14 16:44:55\n# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-20 18:01:52\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-07-16 22:16:14\nimport time\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport math\nfrom math import ceil\nfrom torch.autograd import Variable\nfrom torch.nn.functional import cosine_similarity as cosine_s\nfrom pssm import caffe_pb2\nfrom pssm.models.utils import *\nrsn_specs = {\n 'scene': \n {\n 'n_classes': 9,\n 'input_size': (540, 960),\n 'block_config': [3, 4, 23, 3],\n },\n\n}\n\ngroup_dim=32\npramid_dim=8\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n\n if stride==1:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n if stride==2:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=2, bias=False) \nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.gn1 = nn.GroupNorm(group_dim,planes)\n self.relu = nn.LeakyReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.gn2 = nn.GroupNorm(group_dim,planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.gn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.gn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n # print(residual.shape)\n # print(out.shape)\n out += residual\n out = self.relu(out)\n\n return out\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.layer1 = self._make_layer(BasicBlock, 32, 3, 1,1,1)\n\n self.branch1 = nn.Sequential(nn.AvgPool2d((54, 96), stride=(54,96)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((27, 48), stride=(27,48)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((36, 64), stride=(36,64)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((18, 32), stride=(18,32)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n self.branch5 = nn.Sequential(nn.AvgPool2d((9, 16), stride=(9,16)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n self.branch6 = nn.Sequential(nn.AvgPool2d((3, 8), stride=(3,8)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(pramid_dim,8),\n nn.LeakyReLU(inplace=True))\n\n\n self.lastconv = nn.Sequential(nn.Conv2d(80, 64, 3, 1, 1, 1),\n nn.GroupNorm(group_dim,64),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(64, 32, 3, 1, 1, 1),\n )\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n\n layers = []\n layers.append(block(self.inplanes, planes, stride))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n # output = self.conv1(x)\n # output = self.gn1(output)\n # output = self.relu1(output)\n # output = self.conv2(output)\n # output = self.gn2(output)\n # output = self.relu2(output)\n # output = self.conv3(output)\n # output = self.gn3(output)\n # output = self.relu3(output)\n output_skip = self.layer1(x)\n # output_skip=x\n\n output_branch1 = self.branch1(output_skip)\n output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch2 = self.branch2(output_skip)\n output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch3 = self.branch3(output_skip)\n output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch4 = self.branch4(output_skip)\n output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch5 = self.branch5(output_skip)\n output_branch5 = F.interpolate(output_branch5, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch6 = self.branch6(output_skip)\n output_branch6 = F.interpolate(output_branch6, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_feature = torch.cat((output_skip, output_branch6, output_branch5, output_branch4, output_branch3, output_branch2, output_branch1), 1)\n output_feature = self.lastconv(output_feature)\n #print(output_feature.shape)\n return output_feature\n\nclass feature_extraction2(nn.Module):\n def __init__(self):\n super(feature_extraction2, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1,\n bias=False,dilation=1)\n self.gn1 = nn.GroupNorm(group_dim,32)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,\n bias=False,dilation=1)\n self.gn2 = nn.GroupNorm(group_dim,32)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=7, stride=1, padding=6,\n bias=False,dilation=2)\n self.gn3 = nn.GroupNorm(group_dim,32)\n self.relu3 = nn.LeakyReLU(inplace=True)\n self.layer1 = self._make_layer(BasicBlock, 32, 1, 1,1,1)\n self.lastconv = nn.Conv2d(32, 32, 3, 1, 1, 1)\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1,None,))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.conv1(x)\n output = self.gn1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.gn2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n output = self.gn3(output)\n output = self.relu3(output)\n #print(output.shape)\n output = self.layer1(output)\n output=self.lastconv(output)\n\n return output\n\nclass similarity_measure1(nn.Module):\n def __init__(self):\n super(similarity_measure1, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(16, 4, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(4, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n # self.conv4 = nn.Conv2d(8, 2, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1) \n # self.lastconv = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n self.s1=nn.Parameter(torch.ones(1)).float()\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n output = self.conv1(x)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n # output=self.conv4(output)\n # output=self.lastconv(output)\n output=output*self.s1\n return output\nclass similarity_measure2(nn.Module):\n def __init__(self):\n super(similarity_measure2, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(16, 4, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(4, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n # self.conv4 = nn.Conv2d(8, 2, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1) \n # self.lastconv = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n self.s2=nn.Parameter(torch.ones(1)).float()\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n\n def forward(self, x):\n output = self.conv1(x)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n # output=self.conv4(output)\n # output=self.lastconv(output)\n output=self.s2*output\n return output\n\nclass similarity_measure3(nn.Module):\n def __init__(self):\n super(similarity_measure3, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(69, 32, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu3 = nn.LeakyReLU(inplace=True)\n self.conv4 = nn.Conv2d(8, 4, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu4 = nn.LeakyReLU(inplace=True)\n self.conv5 = nn.Conv2d(4, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n # self.relu5 = nn.ReLU(inplace=True)\n\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n output = self.conv1(x)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n output = self.relu3(output)\n output = self.conv4(output)\n output = self.relu4(output)\n output = self.conv5(output)\n output = torch.abs(output)\n # output = self.relu5(output)\n # print(output.shape)\n # print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n\n output = output/torch.max(output)\n # output = output-torch.min(output)\n # output = 1-output\n output = torch.exp(-output)\n #print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n return output\nclass rstereo(nn.Module):\n\n\n def __init__(self, \n n_classes=9, \n block_config=[3, 4, 6, 3], \n input_size= (480, 640), \n version='scene'):\n\n super(rstereo, self).__init__()\n self.feature_extraction=feature_extraction().cuda(0)\n self.feature_extraction2=feature_extraction2().cuda(0)\n self.softmax= nn.Softmax(dim=-1)\n self.similarity1=similarity_measure1().cuda(1)\n self.similarity2=similarity_measure2().cuda(1)\n self.similarity3=similarity_measure3().cuda(1)\n\n def ss_argmin(self,x,index):\n one=torch.ones(1)\n zero=torch.zeros(1)\n #print(x.data.cpu())\n # exit()\n x=self.softmax(-x)\n #print(x)\n disparity= torch.sum(x*index.unsqueeze(0),dim=-1)\n return disparity \n def cluster_vector(self,feature,x,y):\n one=torch.ones(1).cuda(1)\n zero=torch.zeros(1).cuda(1)\n cluster_feature=feature[...,x,y]\n mean=torch.sum(cluster_feature,dim=-1)/x.shape[0]\n mean=mean.view(cluster_feature.shape[0],cluster_feature.shape[1],1)\n #print(mean.shape)\n weights=torch.norm(cluster_feature-mean,dim=1)\n weights=torch.exp(-weights)\n return weights\n def forward(self, l,r,P,pre,pre2):\n #self.P=P[1,0]\n #0 l to r,1 min,2 max\n #[l_box,r_box,match],[min_d,max_d]\n #start_time=time.time()\n self.pre=pre.cuda(1)\n P1=P[...,0].cuda(1).squeeze()\n P2=P[...,1].cuda(1).squeeze()\n P3=P[...,2].cuda(1).squeeze()\n P4=P[...,3].cuda(1).squeeze()\n #feature extraction\n #P2=P2-P1\n #print(torch.min(P3),torch.max(P3))\n l_sf=self.feature_extraction2(l)\n l_lf=self.feature_extraction(l_sf)\n\n r_sf=self.feature_extraction2(r)\n r_lf=self.feature_extraction(r_sf)\n\n disparity=torch.zeros([540,960]).cuda(0)\n one=torch.ones(1).cuda(1)\n zero=torch.zeros(1).cuda(1)\n\n\n\n print(count/960/540,torch.sum(torch.where(disparity>0,one,zero)).item()/960/540)\n #time.sleep(1000)\n #exit()\n #print(torch.max(disparity).item(),torch.min(disparity).item())\n return disparity\n\n\n",
"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-03-19 13:33:07\n# @Last Modified by: yulidong\n# @Last Modified time: 2019-02-26 15:03:18\n\nimport os\nimport torch\nimport numpy as np\nfrom torch.utils import data\nimport torchvision.transforms as transforms\nimport random\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as tf\nimport torch.nn.functional as F\nclass Lighting(object):\n \"\"\"Lighting noise(AlexNet - style PCA - based noise)\"\"\"\n\n def __init__(self):\n self.alphastd = 0.1\n self.eigval = torch.Tensor([0.2175, 0.0188, 0.0045])\n self.eigvec = torch.Tensor([\n [-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203],\n ])\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone()\\\n .mul(alpha.view(1, 3).expand(3, 3))\\\n .mul(self.eigval.view(1, 3).expand(3, 3))\\\n .sum(1).squeeze()\n # print(rgb.view(3, 1, 1).expand_as(img))\n # exit()\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\nclass KITTI(data.Dataset):\n\n\n def __init__(self, root, split=\"train\", is_transform=True, img_size=(540,960)):\n \"\"\"__init__\n\n :param root:\n :param split:\n :param is_transform:\n :param img_size:\n \"\"\"\n self.is_transform = is_transform\n self.img_size = img_size if isinstance(img_size, tuple) else (540, 960)\n self.stats={'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225]}\n self.pca = Lighting()\n self.files = {}\n self.datapath=root\n self.files=os.listdir(os.path.join('/home/lidong/Documents/datasets/kitti15/',split))+os.listdir(os.path.join('/home/lidong/Documents/datasets/kitti12/',split))\n #self.files.sort() \n self.split=split\n self.kitti15=len(os.listdir(os.path.join('/home/lidong/Documents/datasets/kitti15/',split)))\n if len(self.files)<1:\n raise Exception(\"No files for ld=[%s] found in %s\" % (split, self.ld))\n self.length=self.__len__()\n print(\"Found %d in %s data\" % (len(self.files), self.datapath))\n\n def __len__(self):\n \"\"\"__len__\"\"\"\n return len(self.files)\n\n def __getitem__(self, index):\n \"\"\"__getitem__\n\n :param index:\n \"\"\"\n #index=58\n #print(os.path.join(self.datapath,'train_all',self.files[index]))\n if index<self.kitti15:\n data=np.load(os.path.join('/home/lidong/Documents/datasets/kitti15/',self.split,self.files[index]))\n else:\n data=np.load(os.path.join('/home/lidong/Documents/datasets/kitti12/',self.split,self.files[index]))\n #print(os.path.join(self.datapath,self.split,self.files[index]))\n if self.split=='train' or self.split=='train_all':\n position=np.nonzero(data[...,6])\n hmin=np.min(position[0])\n hmax=np.max(position[0])\n wmin=np.min(position[1])\n wmax=np.max(position[1])\n if hmax-hmin<=256:\n hmin=hmax-256\n if wmax-wmin<=512:\n wmax=wmin+512\n th, tw = 256, 512\n x1 = random.randint(hmin, hmax - th)\n y1 = random.randint(wmin, wmax - tw)\n data=data[x1:x1+th,y1:y1+tw,:]\n else:\n h,w = data.shape[0],data.shape[1]\n th, tw = 384, 1248\n x1 = 0\n y1 = 0\n padding_h=data[:(th-h),:,:]\n padding_h[:,:,6]=0\n data=np.concatenate([padding_h,data],0)\n padding_w=data[:,:(tw-w),:]\n padding_w[:,:,6]=0\n data=np.concatenate([padding_w,data],1)\n #data[:(th-h),:(tw-w),6]=0\n #data=data[:540,:960,:]\n left=data[...,0:3]/255\n #\n image2=data[...,0:3]\n image2=transforms.ToTensor()(image2)\n #print(torch.max(image),torch.min(image))\n right=data[...,3:6]/255\n disparity=data[...,6]\n # print(np.sum(np.where(disparity[:540,...]==0,np.ones(1),np.zeros(1))))\n # print(np.sum(np.where(disparity[:540,...]<=1,np.ones(1),np.zeros(1))))\n # print(np.sum(np.where(disparity<=2,np.ones(1),np.zeros(1))))\n # print(np.sum(np.where(disparity<=3,np.ones(1),np.zeros(1))))\n # print(disparity.shape)\n if self.is_transform:\n left, right,disparity,image = self.transform(left, right,disparity)\n if self.split=='test':\n return left, right,disparity,image,self.files[index].split('.')[0],h,w\n\n #print(torch.max(left),torch.min(left))\n return left, right,disparity,image2\n def transform(self, left, right,disparity):\n \"\"\"transform\n \"\"\"\n trans=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n if self.split=='eval' or self.split=='test':\n image=left*255+0\n left=trans(left).float()\n right=trans(right).float()\n disparity=torch.from_numpy(disparity).float()\n #image=left+0\n else:\n\n disparity=torch.from_numpy(disparity).float()\n topil=transforms.ToPILImage()\n totensor=transforms.ToTensor()\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n left=totensor(left)\n right=totensor(right)\n one=torch.ones(1).float()\n zero=torch.zeros(1).float()\n #sigma=random.uniform(0, 0.04)\n # brightness=random.uniform(0, 0.4)\n # contrast=random.uniform(0, 0.4)\n # saturation=random.uniform(0, 0.4)\n # hue=random.uniform(0, 0.2)\n \n #variance=color(left)-left\n left=topil(left)\n right=topil(right)\n color=transforms.ColorJitter(0.4,0.4,0.4,0.4)\n left=color(left)\n right=color(right)\n\n # gamma=random.uniform(0.8, 1.2)\n # left=tf.adjust_gamma(left,gamma)\n # right=tf.adjust_gamma(right,gamma)\n left=totensor(left)\n right=totensor(right)\n left=self.pca(left)\n right=self.pca(right)\n # r=random.uniform(0.8, 1.2)\n # g=random.uniform(0.8, 1.2)\n # b=random.uniform(0.8, 1.2)\n # left[:,:,0]*=r\n # left[:,:,1]*=g\n # left[:,:,2]*=b\n # right[:,:,0]*=r\n # right[:,:,1]*=g\n # right[:,:,2]*=b\n # gaussian=torch.zeros_like(left).normal_()*sigma\n # left=left+gaussian\n left=left.clamp(min=0,max=1)\n # right=right+gaussian\n right=right.clamp(min=0,max=1)\n image=left+0\n left=normalize(left)\n right=normalize(right)\n\n return left,right,disparity,image\n"
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.ConvTranspose3d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Sigmoid",
"torch.nn.Conv3d",
"torch.nn.AvgPool2d",
"torch.no_grad",
"torch.nn.LeakyReLU",
"torch.arange",
"torch.topk",
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.squeeze",
"torch.nn.init.kaiming_normal_"
],
[
"torch.abs",
"torch.nn.Softmax",
"torch.mean",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.no_grad",
"torch.where",
"torch.norm",
"torch.ones",
"numpy.ceil",
"torch.arange",
"torch.nn.GroupNorm",
"torch.ones_like",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.exp",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.Sequential",
"torch.abs",
"torch.nn.Softmax",
"torch.ones",
"torch.norm",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.sum",
"torch.exp",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU",
"torch.where",
"torch.nn.GroupNorm",
"torch.nn.init.kaiming_normal_"
],
[
"torch.ones",
"numpy.nonzero",
"torch.Tensor",
"numpy.min",
"torch.zeros",
"torch.from_numpy",
"numpy.concatenate",
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
khirotaka/testbed | [
"e32384a3267d5282fb9f2df22597dfa7fb9aa17d"
] | [
"examples/sw.py"
] | [
"import time\nimport numpy as np\nfrom testbed._rust import sliding_window\n\n\nx = np.random.randn(5000, 5)\n\n\ns = time.time()\nrustout = sliding_window(x, 100, 1)\nprint(\"=\" * 50)\nprint(\"Rust Speed: \", time.time() - s)\nprint(rustout.shape)\n\n\ndef sw(array, ws, over):\n sl = len(array)\n return [array[i:i+ws] for i in range(0, sl-ws, over)]\n\n\nprint(\"=\" * 50)\ns = time.time()\ntmp = sw(x, 100, 1)\ntmp = np.stack(tmp, 0)\nprint(\"Python Speed: \", time.time() - s)\nprint(tmp.shape)\n"
] | [
[
"numpy.random.randn",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
katianaz/GiftHelper | [
"1fbff4e7902c25950a5f50f04f0b2c834842ccbe"
] | [
"informacoes_emails.py"
] | [
"import pontuacao_categorias\r\nimport pandas as pd\r\n\r\nnomes = []\r\nnomes_presenteados = []\r\nenderecos_emails = []\r\n\r\nfor p in range(len(pontuacao_categorias.tabela.index)):\r\n nomes.append(pontuacao_categorias.tabela['3'][p])\r\n nomes_presenteados.append(pontuacao_categorias.tabela['4'][p])\r\n enderecos_emails.append(pontuacao_categorias.tabela['2'][p])\r\n\r\ninformacoes = {'Nome': nomes,\r\n 'Email': enderecos_emails,\r\n 'Presenteado': nomes_presenteados,\r\n 'Sugestoes': pontuacao_categorias.sugestoes}\r\n\r\ninfos = pd.DataFrame(informacoes, columns=['Nome', 'Email', 'Presenteado', 'Sugestoes'])\r\n\r\ninfos.to_csv('infos_emails.csv', encoding='latin-1')\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tfgraph/tfgraph | [
"19ae968b3060275c631dc601757646abaf1f58a1"
] | [
"examples/example_sparsifier_graph.py"
] | [
"#!/usr/bin/python3\n\nimport tensorflow as tf\nimport tfgraph\n\n\ndef main():\n with tf.Session() as sess:\n g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, \"G\", 10, 85)\n g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75)\n\n print(g)\n print(g.m)\n\n print(g_sparse)\n print(g_sparse.m)\n\n print(g_sparse.m / g.m)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
HeyLifeHD/rp-bp | [
"9c59b1bc0267400747477467c45f96364d5528e1"
] | [
"rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py"
] | [
"#! /usr/bin/env python3\n\nimport matplotlib\nmatplotlib.use('agg')\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport logging\n\ndefault_title = \"Metagene profile Bayes' factors\"\ndefault_xlabel = \"Offset, relative to translation \\ninitiation site\"\ndefault_ylabel = \"Bayes' factor\"\ndefault_font_size = 15\n\ndefault_series_label = \"\"\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"This script visualizes the Bayes' factors for a metagene profile.\\n\\n\"\n \"This script contains some hard-coded field names.\")\n parser.add_argument('bayes_factors', help=\"The metagene profile (csv) file\")\n parser.add_argument('length', help=\"The profile lengths to visualize\", type=int)\n parser.add_argument('out', help=\"The (output) image file\")\n \n parser.add_argument('--title', help=\"The title for the figure\", default=default_title)\n parser.add_argument('--xlabel', help=\"The label for the x-axis\", default=default_xlabel)\n parser.add_argument('--ylabel', help=\"The label for the y-axis\", default=default_ylabel)\n parser.add_argument('--series-label', help=\"The label for the legend\", default=default_series_label)\n parser.add_argument('--font-size', help=\"The font size for the title, axis labels, and \"\n \"xticks labels\", type=int, default=default_font_size)\n\n args = parser.parse_args()\n\n bayes_factors = pd.read_csv(args.bayes_factors)\n\n mask_length = bayes_factors['length'] == args.length\n group = bayes_factors.loc[mask_length]\n\n bfs = group['bayes_factor_mean']\n offsets = group['offset']\n bf_range = max(bfs) - min(bfs)\n \n fig, ax = plt.subplots(figsize=(10,5))\n ax.plot(offsets, bfs, label=args.series_label, color='b')\n ax.scatter(offsets, bfs, color='b')\n\n xlim = (min(offsets), max(offsets))\n\n ymin = min(bfs) - 0.1*bf_range\n ymax = max(bfs) + 0.1*bf_range\n ylim = (ymin, ymax)\n\n # and draw a line at \"bf=5\"\n plt.plot(xlim, (5, 5), color='k', linewidth=2, linestyle=':')\n\n # and a horizontal line at the maximum bf\n plt.plot(xlim, (max(bfs), max(bfs)), color='r', linewidth=1, linestyle=\"-.\")\n\n # and a vertical line at \"offset=-12\"\n ax.plot((-12, -12), ylim, color='g', linestyle=\"--\")\n \n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # finally, add the labels, etc.\n plt.suptitle(args.title, fontsize=args.font_size, y=1.03)\n ax.set_xlabel(args.xlabel, fontsize=args.font_size)\n ax.set_ylabel(args.ylabel, fontsize=args.font_size)\n\n ax.tick_params(axis='both', which='major', labelsize=args.font_size)\n #ax.legend(loc=\"upper right\")\n\n fig.tight_layout()\n fig.savefig(args.out, bbox_inches='tight')\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.suptitle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
B612-Asteroid-Institute/thor | [
"d3d1dcbe86f67a62c90b4cde3fc577e414825cf2"
] | [
"thor/orbit.py"
] | [
"import numpy as np\n\nfrom .utils import _checkTime\nfrom .vectors import calcNae\nfrom .vectors import calcDelta\nfrom .vectors import calcXae\nfrom .vectors import calcXa\nfrom .vectors import calcNhat\nfrom .vectors import calcR1\nfrom .vectors import calcR2\nfrom .projections import cartesianToGnomonic\nfrom .coordinates import transformCoordinates\n\n__all__ = [\"TestOrbit\"]\n\nclass TestOrbit:\n \"\"\"\n TestOrbit: Class that calculates and stores the rotation matrices \n for a guess of heliocentric distance and velocity. To be used in \n tandem with the Cell class.\n \n Parameters\n ----------\n elements : `~numpy.ndarray` (6)\n Cartesian ecliptic orbital elements with postions in units of AU\n and velocities in units of AU per day. \n t0 : `~astropy.time.core.Time` (1)\n Epoch at which orbital elements are defined.\n \"\"\"\n def __init__(self, elements, epoch):\n _checkTime(epoch, \"epoch\")\n \n self.elements = elements\n self.epoch = epoch\n \n def prepare(self, verbose=True):\n \"\"\"\n Calculate rotation matrices. \n \n Populates the following class properties:\n n_hat : vector normal to the plane of orbit \n R1 : rotation matrix to rotate towards x-y plane\n R2 : rotation matrix to rotate towards x-axis\n M : final rotation matrix\n \n Parameters\n ----------\n verbose : bool, optional\n Print progress statements.\n [Default = True]\n \n Returns\n -------\n None\n \"\"\"\n if verbose is True:\n print(\"Calculating vector normal to plane of orbit...\")\n self.n_hat = calcNhat(self.elements[:3])\n \n if verbose is True:\n print(\"Calculating R1 rotation matrix...\")\n self.R1 = calcR1(self.elements[:3], self.n_hat)\n self.x_a_xy = np.array(self.R1 @ self.elements[:3])[0]\n \n if verbose is True:\n print(\"Calculating R2 rotation matrix...\")\n self.R2 = calcR2(self.x_a_xy)\n \n if verbose is True:\n print(\"Calculating final rotation matrix...\")\n self.M = self.R2 @ self.R1\n \n if verbose is True:\n print(\"Done.\")\n print(\"\")\n return\n \n def applyToObservations(self, observations, verbose=True):\n \"\"\"\n Apply the prepared rotations to the given observations. Adds the gnomonic \n plane coordinates to observations (columns: theta_x_deg, theta_y_deg) \n \n Parameters\n ----------\n observations : `~pandas.DataFrame`\n DataFrame of observations defined at the same epoch as this test orbit, \n to project into the test orbit's frame.\n verbose : bool, optional\n Print progress statements? \n [Default = True]\n \n Returns\n -------\n None\n \"\"\"\n \n if verbose is True:\n print(\"Applying rotation matrices to observations...\")\n print(\"Converting to ecliptic coordinates...\")\n\n #velocities_present = False\n #if \"vRAcosDec\" in observations.columns and \"vDec\" in observations.columns:\n # coords_eq_r = observations[[\"RA_deg\", \"Dec_deg\"]].values\n # coords_eq_v = observations[[\"vRAcosDec\", \"vDec\"]].values\n # coords_eq_v[:, 0] /= np.cos(np.radians(coords_eq_r[:, 1]))\n # coords_eq = np.hstack([\n # np.ones((len(coords_eq_r), 1)), \n # coords_eq_r, \n # np.zeros((len(coords_eq_r), 1)),\n # coords_eq_v\n # ]) \n # velocities_present = True\n\n #else:\n coords_eq = observations[[\"RA_deg\", \"Dec_deg\"]].values\n coords_eq = np.hstack([np.ones((len(coords_eq), 1)), coords_eq]) \n coords_ec = transformCoordinates(coords_eq, \n \"equatorial\", \n \"ecliptic\",\n representation_in=\"spherical\",\n representation_out=\"spherical\"\n )\n \n if verbose is True:\n print(\"Calculating object to observer unit vector...\")\n n_ae = calcNae(coords_ec[:, 1:3])\n x_e = observations[[\"obs_x\", \"obs_y\", \"obs_z\"]].values\n \n if verbose is True:\n print(\"Calculating object to observer distance assuming r = {} AU...\".format(np.linalg.norm(self.elements[:3])))\n delta = np.zeros(len(n_ae))\n for i in range(len(delta)):\n delta[i] = calcDelta(np.linalg.norm(self.elements[:3]), x_e[i, :], n_ae[i, :])\n \n if verbose is True:\n print(\"Calculating object to observer position vector...\")\n x_ae = np.zeros([len(delta), 3])\n for i, (delta_i, n_ae_i) in enumerate(zip(delta, n_ae)):\n x_ae[i] = calcXae(delta_i, n_ae_i)\n \n if verbose is True:\n print(\"Calculating heliocentric object position vector...\")\n x_a = np.zeros([len(x_ae), 3])\n for i, (x_ae_i, x_e_i) in enumerate(zip(x_ae, x_e)):\n x_a[i] = calcXa(x_ae_i, x_e_i)\n \n if verbose is True:\n print(\"Applying rotation matrix M to heliocentric object position vector...\")\n coords_cart_rotated = np.array(self.M @ x_a.T).T\n \n if verbose is True:\n print(\"Performing gnomonic projection...\")\n gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)\n \n\n observations[\"obj_x\"] = x_a[:, 0]\n observations[\"obj_y\"] = x_a[:, 1]\n observations[\"obj_z\"] = x_a[:, 2]\n observations[\"theta_x_deg\"] = np.degrees(gnomonic_coords[:, 0])\n observations[\"theta_y_deg\"] = np.degrees(gnomonic_coords[:, 1])\n observations[\"test_obj_x\"] = self.elements[0]\n observations[\"test_obj_y\"] = self.elements[1]\n observations[\"test_obj_z\"] = self.elements[2]\n observations[\"test_obj_vx\"] = self.elements[3]\n observations[\"test_obj_vy\"] = self.elements[4]\n observations[\"test_obj_vz\"] = self.elements[5]\n\n if verbose is True:\n print(\"Done.\")\n print(\"\")\n return \n\n def applyToEphemeris(self, ephemeris, verbose=True):\n \"\"\"\n Apply the prepared rotations to the given ephemerides. Adds the gnomonic \n plane coordinates to observations (columns: theta_x_deg, theta_y_deg, vtheta_x, and vtheta_y) \n \n Parameters\n ----------\n ephemeris : `~pandas.DataFrame`\n DataFrame of ephemeris generated by a THOR backend defined at the same epoch as this test orbit, \n to project into the test orbit's frame.\n verbose : bool, optional\n Print progress statements? \n [Default = True]\n \n Returns\n -------\n None\n \"\"\"\n coords_cart = ephemeris[[\"obj_x\", \"obj_y\", \"obj_z\", \"obj_vx\", \"obj_vy\", \"obj_vz\"]].values\n coords_cart_rotated = np.zeros_like(coords_cart)\n \n if verbose is True:\n print(\"Applying rotation matrix M to heliocentric object position vector...\")\n coords_cart_rotated[:, :3] = np.array(self.M @ coords_cart[:, :3].T).T\n\n if verbose is True:\n print(\"Applying rotation matrix M to heliocentric object velocity vector...\")\n # Calculate relative velocity, then rotate to projected frame\n coords_cart[:, 3:] = coords_cart[:, 3:] - self.elements[3:].reshape(1, -1)\n coords_cart_rotated[:, 3:] = np.array(self.M @ coords_cart[:, 3:].T).T\n \n if verbose is True:\n print(\"Performing gnomonic projection...\")\n gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)\n \n ephemeris[\"theta_x_deg\"] = np.degrees(gnomonic_coords[:, 0])\n ephemeris[\"theta_y_deg\"] = np.degrees(gnomonic_coords[:, 1])\n ephemeris[\"vtheta_x_deg\"] = np.degrees(gnomonic_coords[:, 2])\n ephemeris[\"vtheta_y_deg\"] = np.degrees(gnomonic_coords[:, 3])\n ephemeris[\"test_obj_x\"] = self.elements[0]\n ephemeris[\"test_obj_y\"] = self.elements[1]\n ephemeris[\"test_obj_z\"] = self.elements[2]\n ephemeris[\"test_obj_vx\"] = self.elements[3]\n ephemeris[\"test_obj_vy\"] = self.elements[4]\n ephemeris[\"test_obj_vz\"] = self.elements[5]\n\n if verbose is True:\n print(\"Done.\")\n print(\"\")\n return "
] | [
[
"numpy.degrees",
"numpy.array",
"numpy.zeros_like",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bullocke/yatsm_nrt | [
"b0ded56032bf9f9dcdf6b7b749f6554ade56de1e",
"b0ded56032bf9f9dcdf6b7b749f6554ade56de1e"
] | [
"yatsm/cache.py",
"tests/algorithms/test_postprocess.py"
] | [
"\"\"\" Functions related to writing to and retrieving from cache files\n\"\"\"\nimport os\n\nimport numpy as np\n\nfrom log_yatsm import logger\n\n_image_ID_str = 'image_IDs'\n\n\ndef get_line_cache_name(dataset_config, n_images, row, nbands):\n \"\"\" Returns cache filename for specified config and line number\n\n Args:\n dataset_config (dict): configuration information about the dataset\n n_images (int): number of images in dataset\n row (int): line of the dataset for output\n nbands (int): number of bands in dataset\n\n Returns:\n str: filename of cache file\n\n \"\"\"\n path = dataset_config.get('cache_line_dir')\n if not path:\n return\n\n filename = 'yatsm_r%i_n%i_b%i.npy.npz' % (row, n_images, nbands)\n\n return os.path.join(path, filename)\n\n\ndef get_line_cache_pattern(row, nbands, regex=False):\n \"\"\" Returns a pattern for a cache file from a certain row\n\n This function is useful for finding all cache files from a line, ignoring\n the number of images in the file.\n\n Args:\n row (int): line of the dataset for output\n nbands (int): number of bands in dataset\n regex (bool, optional): return a regular expression instead of glob\n style (default: False)\n\n Returns:\n str: filename pattern for cache files from line ``row``\n\n \"\"\"\n wildcard = '.*' if regex else '*'\n pattern = 'yatsm_r{l}_n{w}_b{b}.npy.npz'.format(\n l=row, w=wildcard, b=nbands)\n\n return pattern\n\n\ndef test_cache(dataset_config):\n \"\"\" Test cache directory for ability to read from or write to\n\n Args:\n dataset_config (dict): dictionary of dataset configuration options\n\n Returns:\n tuple: tuple of bools describing ability to read from and write to\n cache directory\n\n \"\"\"\n # Try to find / use cache\n read_cache = False\n write_cache = False\n\n cache_dir = dataset_config.get('cache_line_dir')\n if cache_dir:\n # Test existence\n if os.path.isdir(cache_dir):\n if os.access(cache_dir, os.R_OK):\n read_cache = True\n if os.access(cache_dir, os.W_OK):\n write_cache = True\n if read_cache and not write_cache:\n logger.warning('Cache directory exists but is not writable')\n else:\n # If it doesn't already exist, can we create it?\n try:\n os.makedirs(cache_dir)\n except:\n logger.warning('Could not create cache directory')\n else:\n read_cache = True\n write_cache = True\n\n logger.debug('Attempt reading in from cache directory?: {b}'.format(\n b=read_cache))\n logger.debug('Attempt writing to cache directory?: {b}'.format(\n b=write_cache))\n\n return read_cache, write_cache\n\n\ndef read_cache_file(cache_filename, image_IDs=None):\n \"\"\" Returns image data from a cache file\n\n If ``image_IDs`` is not None this function will try to ensure data from\n cache file come from the list of image IDs provided. If cache file does not\n contain a list of image IDs, it will skip the check and return cache data.\n\n Args:\n cache_filename (str): cache filename\n image_IDs (iterable, optional): list of image IDs corresponding to data\n in cache file. If not specified, function will not check for\n correspondence (default: None)\n\n Returns:\n np.ndarray, or None: Return Y as np.ndarray if possible and if the\n cache file passes the consistency check specified by ``image_IDs``,\n else None\n\n \"\"\"\n try:\n cache = np.load(cache_filename)\n except IOError:\n return None\n\n if _image_ID_str in cache.files and image_IDs is not None:\n if not np.array_equal(image_IDs, cache[_image_ID_str]):\n logger.warning('Cache file data in {f} do not match images '\n 'specified'.format(f=cache_filename))\n return None\n\n return cache['Y']\n\n\ndef write_cache_file(cache_filename, Y, image_IDs):\n \"\"\" Writes data to a cache file using np.savez_compressed\n\n Args:\n cache_filename (str): cache filename\n Y (np.ndarray): data to write to cache file\n image_IDs (iterable): list of image IDs corresponding to data in cache\n file. If not specified, function will not check for correspondence\n\n \"\"\"\n np.savez_compressed(cache_filename, **{\n 'Y': Y, _image_ID_str: image_IDs\n })\n\n\n# Cache file updating\ndef update_cache_file(images, image_IDs,\n old_cache_filename, new_cache_filename,\n line, reader):\n \"\"\" Modify an existing cache file to contain data within `images`\n\n This should be useful for updating a set of cache files to reflect\n modifications to the timeseries dataset without completely reading the\n data into another cache file.\n\n For example, the cache file could be updated to reflect the deletion of\n a misregistered or cloudy image. Another common example would be for\n updating cache files to include newly acquired observations.\n\n Note that this updater will not handle updating cache files to include\n new bands.\n\n Args:\n images (iterable): list of new image filenames\n image_IDs (iterable): list of new image identifying strings\n old_cache_filename (str): filename of cache file to update\n new_cache_filename (str): filename of new cache file which includes\n modified data\n line (int): the line of data to be updated\n reader (callable): GDAL or BIP image reader function from\n :mod:`yatsm.io.stack_line_readers`\n\n Raises:\n ValueError: Raise error if old cache file does not record ``image_IDs``\n\n \"\"\"\n images = np.asarray(images)\n image_IDs = np.asarray(image_IDs)\n\n # Cannot proceed if old cache file doesn't store filenames\n old_cache = np.load(old_cache_filename)\n if _image_ID_str not in old_cache.files:\n raise ValueError('Cannot update cache.'\n 'Old cache file does not store image IDs.')\n old_IDs = old_cache[_image_ID_str]\n old_Y = old_cache['Y']\n nband, _, ncol = old_Y.shape\n\n # Create new Y and add in values retained from old cache\n new_Y = np.zeros((nband, image_IDs.size, ncol),\n dtype=old_Y.dtype.type)\n new_IDs = np.zeros(image_IDs.size, dtype=image_IDs.dtype)\n\n # Check deletions -- find which indices to retain in new cache\n retain_old = np.where(np.in1d(old_IDs, image_IDs))[0]\n if retain_old.size == 0:\n logger.warning('No image IDs in common in old cache file.')\n else:\n logger.debug(' retaining {r} of {n} images'.format(\n r=retain_old.size, n=old_IDs.size))\n # Find indices of old data to insert into new data\n idx_old_IDs = np.argsort(old_IDs)\n sorted_old_IDs = old_IDs[idx_old_IDs]\n idx_IDs = np.searchsorted(sorted_old_IDs,\n image_IDs[np.in1d(image_IDs, old_IDs)])\n\n retain_old = idx_old_IDs[idx_IDs]\n\n # Indices to insert into new data\n retain_new = np.where(np.in1d(image_IDs, old_IDs))[0]\n\n new_Y[:, retain_new, :] = old_Y[:, retain_old, :]\n new_IDs[retain_new] = old_IDs[retain_old]\n\n # Check additions -- find which indices we need to insert\n insert = np.where(np.in1d(image_IDs, old_IDs, invert=True))[0]\n\n if retain_old.size == 0 and insert.size == 0:\n raise ValueError('Cannot update cache file -- '\n 'no data retained or added')\n\n # Read in the remaining data from disk\n if insert.size > 0:\n logger.debug('Inserting {n} new images into cache'.format(\n n=insert.size))\n insert_Y = reader.read_row(images[insert], line)\n new_Y[:, insert, :] = insert_Y\n new_IDs[insert] = image_IDs[insert]\n\n np.testing.assert_equal(new_IDs, image_IDs)\n\n # Save\n write_cache_file(new_cache_filename, new_Y, image_IDs)\n",
"\"\"\" Test postprocessing algorithms\n\"\"\"\nimport numpy as np\n\nfrom yatsm.algorithms.postprocess import commission_test, refit_record\n\n\n# COMMISSION TEST\ndef test_commission_nochange(sim_nochange):\n \"\"\" In no change situation, we should get back exactly what we gave in\n \"\"\"\n record = commission_test(sim_nochange, 0.10)\n assert len(record) == 1\n np.testing.assert_array_equal(record, sim_nochange.record)\n\n\ndef test_commission_no_real_change_1(sim_no_real_change_1):\n \"\"\" Test commission test's ability to resolve spurious change\n \"\"\"\n record = commission_test(sim_no_real_change_1, 0.01)\n assert len(record) == 1\n assert record[0]['break'] == 0\n\n\ndef test_commission_no_real_change_2(sim_no_real_change_2):\n \"\"\" Test commission test's ability to resolve two spurious changes\n \"\"\"\n record = commission_test(sim_no_real_change_2, 0.01)\n assert len(record) == 1\n assert record[0]['break'] == 0\n\n\ndef test_commission_real_change(sim_real_change):\n \"\"\" Test commission test's ability to avoid merging real changes\n\n This test is run with a relatively large p value (very likely to reject H0\n and retain changes)\n \"\"\"\n record = commission_test(sim_real_change, 0.10)\n assert len(record) == len(sim_real_change.record)\n\n\n# REFIT\ndef test_refit_nochange_rlm(sim_nochange):\n \"\"\" Test record refitting of one record using robust linear models\n \"\"\"\n from yatsm.regression import RLM\n estimator = RLM(maxiter=10)\n\n refit = refit_record(sim_nochange, 'rlm', estimator,\n keep_regularized=True)\n assert 'rlm_coef' in refit.dtype.names\n assert 'rlm_rmse' in refit.dtype.names\n\n coef = np.array([[-3.84164779e+03, -3.84164779e+03],\n [5.26200993e-03, 5.26200993e-03]])\n rmse = np.array([0.96866816, 0.96866816])\n np.testing.assert_allclose(refit[0]['rlm_coef'], coef)\n np.testing.assert_allclose(refit[0]['rlm_rmse'], rmse)\n\n\ndef test_refit_nochange_reg(sim_nochange):\n \"\"\" Test refit ``keep_regularized=False`` (i.e., not ignoring coef == 0)\n \"\"\"\n from sklearn.linear_model import LinearRegression as OLS\n estimator = OLS()\n\n refit = refit_record(sim_nochange, 'ols', estimator,\n keep_regularized=False)\n assert 'ols_coef' in refit.dtype.names\n assert 'ols_rmse' in refit.dtype.names\n\n coef = np.array([[-3.83016528e+03, -3.83016528e+03],\n [5.24635240e-03, 5.24635240e-03]])\n rmse = np.array([0.96794599, 0.96794599])\n np.testing.assert_allclose(refit[0]['ols_coef'], coef)\n np.testing.assert_allclose(refit[0]['ols_rmse'], rmse)\n\n\ndef test_refit_none():\n \"\"\" Test refit if model is None/[]\n \"\"\"\n refit = refit_record(None, 'ols', None)\n assert refit is None\n refit = refit_record([], 'ols', None)\n assert refit is None\n\n\n# ISSUE #79\ndef test_refit_issue_79(sim_nochange):\n \"\"\" Issue 79: missing coverage for case when record['coef'] are all zero\n\n Fix is to use ``refit_[(coef|rmse)]`` prefix variable to index the record\n name\n \"\"\"\n from yatsm.regression import RLM\n estimator = RLM(maxiter=10)\n\n # Set record.coef to 0.\n sim_nochange.record['coef'] = np.zeros_like(sim_nochange.record['coef'])\n\n refit = refit_record(sim_nochange, 'rlm', estimator,\n keep_regularized=True)\n assert 'rlm_coef' in refit.dtype.names\n assert 'rlm_rmse' in refit.dtype.names\n\n coef = np.zeros_like(sim_nochange.record[0]['coef'])\n rmse = np.array([0.97117668, 0.97117668])\n np.testing.assert_allclose(refit[0]['rlm_coef'], coef)\n np.testing.assert_allclose(refit[0]['rlm_rmse'], rmse)\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.array_equal",
"numpy.asarray",
"numpy.in1d",
"numpy.savez_compressed",
"numpy.argsort",
"numpy.load",
"numpy.zeros"
],
[
"numpy.testing.assert_array_equal",
"numpy.zeros_like",
"sklearn.linear_model.LinearRegression",
"numpy.testing.assert_allclose",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mewbak/hypertools | [
"bc2947737be8bd5a6e2a3bdca84132f6fee8989c",
"bc2947737be8bd5a6e2a3bdca84132f6fee8989c"
] | [
"examples/plot_hue.py",
"hypertools/tools/df2mat.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n=============================\nGrouping data by category\n=============================\n\nWhen plotting, its useful to have a way to color points by some category or\nvariable. Hypertools does this using the `hue` kwarg, which takes a list\nof string category labels or numerical values. If text labels are passed, the\ndata is restructured according to those labels and plotted in different colors\naccording to your color palette. If numerical values are passed, the values\nare binned (default resolution: 100) and plotted according to your color\npalette.\n\"\"\"\n\n# Code source: Andrew Heusser\n# License: MIT\n\n# import\nimport hypertools as hyp\nimport numpy as np\n\n# load example data\ngeo = hyp.load('weights_sample')\ndata = geo.get_data()\n\n# simulate random groups\nhue=[]\nfor idx,i in enumerate(data):\n tmp=[]\n for iidx,ii in enumerate(i):\n tmp.append(int(np.random.randint(1000, size=1)))\n hue.append(tmp)\n\n# plot\ngeo.plot(fmt='.', hue=hue)\n",
"#!/usr/bin/env python\n\nimport pandas as pd\n\n\ndef df2mat(data, return_labels=False):\n \"\"\"\n Transforms a Pandas DataFrame into a Numpy array with binarized text columns\n\n This function transforms single-level df to an array so it can be plotted\n with HyperTools. Additionally, it uses the Pandas.Dataframe.get_dummies\n function to transform text columns into binary vectors, or\n 'dummy variables'.\n\n Parameters\n ----------\n data : A single-level Pandas DataFrame\n The df that you want to convert. Note that this currently only works\n with single-level (not Multi-level indices).\n\n Returns\n ----------\n plot_data : Numpy array\n A Numpy array where text columns are turned into binary vectors.\n\n labels : list (optional)\n A list of column labels for the numpy array. To return this, set\n return_labels=True.\n\n \"\"\"\n\n df_str = data.select_dtypes(include=['object'])\n df_num = data.select_dtypes(exclude=['object'])\n\n for colname in df_str.columns:\n df_num = df_num.join(pd.get_dummies(data[colname], prefix=colname))\n\n plot_data = df_num.values\n\n labels=list(df_num.columns.values)\n\n if return_labels:\n return plot_data,labels\n else:\n return plot_data\n"
] | [
[
"numpy.random.randint"
],
[
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gewoonrik/pullreqs-dnn | [
"dbafd1866c1cd44424d238618e5ca54841c358c0"
] | [
"preprocess.py"
] | [
"#!/usr/bin/env python\n#\n# (c) 2016 -- onwards Georgios Gousios <[email protected]>, Rik Nijessen <[email protected]>\n#\n\n\nfrom __future__ import print_function\n\nimport pickle\nimport random\nimport urllib\nimport numpy as np\nimport argparse\n\nfrom config import *\nfrom code_tokenizer import CodeTokenizer\nfrom my_tokenizer import MyTokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n\n@timeit\ndef load_pr_csv(file):\n \"\"\"\n Load a PR dataset, including all engineered features\n :return: A pandas dataframe with all data loaded\n \"\"\"\n print(\"Loading pull requests file \", file)\n pullreqs = pd.read_csv(file)\n pullreqs.set_index(['project_name', 'github_id'])\n return pullreqs\n\n\ndef ensure_diffs():\n \"\"\"\n Make sure that the PR diffs have been downloaded in the appropriate dir\n \"\"\"\n if not os.path.exists(DIFFS_DIR):\n print(\"Downloading pull request diffs\")\n import tarfile\n\n urllib.urlretrieve(DIFFS_DATA_URL, DIFFS_FILE)\n tar = tarfile.open(DIFFS_FILE, \"r:gz\")\n tar.extractall()\n tar.close()\n\n\ndef read_title_and_comments(file):\n str = open(file).read()\n splitted = str.split(\"\\n\")\n title = splitted[0]\n # remove title and empty space\n comment = str[2:]\n return title, comment\n\n@timeit\ndef create_code_tokenizer(code, vocabulary_size):\n tokenizer = CodeTokenizer(nb_words=vocabulary_size)\n tokenizer.fit_on_texts(code)\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n return tokenizer\n\ndef create_text_tokenizer(texts, vocabulary_size):\n tokenizer = MyTokenizer(nb_words=vocabulary_size)\n tokenizer.fit_on_texts(texts)\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n return tokenizer\n\n\n@timeit\ndef tokenize(tokenizer, texts, maxlen):\n print(\"Tokenizing\")\n sequences = tokenizer.texts_to_sequences(texts)\n return pad_sequences(sequences, maxlen=maxlen)\n\n\ndef load_data(pullreqs):\n diffs = []\n titles = []\n comments = []\n labels = []\n successful = failed = 0\n for i, row in pullreqs.iterrows():\n try:\n name = (row['project_name']).replace('/','@')+\"@\"+str(row['github_id'])+'.patch'\n\n diff_file = os.path.join(DIFFS_DIR, name)\n comment_file = os.path.join(TXTS_DIR, name.replace(\".patch\",\".txt\"))\n\n diff = open(diff_file).read()\n title, comment = read_title_and_comments(comment_file)\n\n diffs.append(diff)\n titles.append(title)\n comments.append(comment)\n labels.append(int(row['merged'] * 1))\n successful += 1\n except:\n failed += 1\n pass\n print(\"%s diffs loaded, %s diffs failed\" % (successful, failed), end='\\r')\n\n print(\"\")\n return diffs, comments, titles, labels\n\n\n@timeit\ndef create_dataset(prefix=\"default\",\n diff_vocabulary_size=20000,\n comment_vocabulary_size=20000,\n title_vocabulary_size=20000,\n max_diff_length=100,\n max_comment_length=100,\n max_title_length=100):\n \"\"\"\n Create a dataset for further processing\n :param prefix: Name for the dataset\n :param balance_ratio: The ratio between merged and unmerged PRs to include\n :param num_diffs: Total number of diffs to load. Any value below 1 means load all diffs.\n :param langs: Only include PRs for repos whose primary language is within this array\n :param diff_vocabulary_size: (Max) size of the diff vocabulary to use for tokenizing\n :param comment_vocabulary_size: (Max) size of the comment vocabulary to use for tokenizing\n :param title_vocabulary_size: (Max) size of the title vocabulary to use for tokenizing\n :param max_diff_length: Maximum length of the input diff sequences\n :param max_comment_length: Maximum length of the input comment sequences\n :param max_title_length: Maximum length of the input title sequences\n :return: A training and testing dataset, along with the config used to produce it\n \"\"\"\n config = locals()\n\n pullreqs_train = load_pr_csv(train_csv_file % prefix)\n pullreqs_test = load_pr_csv(test_csv_file % prefix)\n pullreqs_validation = load_pr_csv(validation_csv_file % prefix)\n\n ensure_diffs()\n\n tr_diffs, tr_comments, tr_titles, tr_labels = load_data(pullreqs_train)\n val_diffs, val_comments, val_titles, val_labels = load_data(pullreqs_validation)\n te_diffs, te_comments, te_titles, te_labels = load_data(pullreqs_test)\n\n code_tokenizer = create_code_tokenizer(tr_diffs+val_diffs, diff_vocabulary_size)\n\n diff_train = tokenize(code_tokenizer, tr_diffs, max_diff_length)\n diff_val = tokenize(code_tokenizer, val_diffs, max_diff_length)\n diff_test = tokenize(code_tokenizer, te_diffs, max_diff_length)\n\n comment_tokenizer = create_text_tokenizer(tr_comments+val_comments, comment_vocabulary_size)\n\n comment_train = tokenize(comment_tokenizer, tr_comments, max_comment_length)\n comment_val = tokenize(code_tokenizer, val_comments, max_comment_length)\n comment_test = tokenize(comment_tokenizer, te_comments, max_comment_length)\n\n title_tokenizer = create_text_tokenizer(tr_titles+val_titles, title_vocabulary_size)\n\n title_train = tokenize(title_tokenizer, tr_titles, max_title_length)\n title_val = tokenize(code_tokenizer, val_titles, max_title_length)\n title_test = tokenize(title_tokenizer, te_titles, max_title_length)\n\n\n y_train = np.asarray(tr_labels)\n y_val = np.asarray(val_labels)\n y_test = np.asarray(te_labels)\n\n\n print('Shape of diff tensor:', diff_train.shape)\n print('Shape of comment tensor:', comment_train.shape)\n print('Shape of title tensor:', title_train.shape)\n print('Shape of label tensor:', y_train.shape)\n\n\n # Save dataset\n with open(diff_vocab_file % prefix, 'w') as f:\n pickle.dump(code_tokenizer, f)\n\n with open(comment_vocab_file % prefix, 'w') as f:\n pickle.dump(comment_tokenizer, f)\n\n with open(title_vocab_file % prefix, 'w') as f:\n pickle.dump(title_tokenizer, f)\n\n with open(diff_train_file % prefix, 'w') as f:\n pickle.dump(diff_train, f)\n\n with open(comment_train_file % prefix, 'w') as f:\n pickle.dump(comment_train, f)\n\n with open(title_train_file % prefix, 'w') as f:\n pickle.dump(title_train, f)\n\n with open(y_train_file % prefix, 'w') as f:\n pickle.dump(y_train, f)\n\n with open(diff_val_file % prefix, 'w') as f:\n pickle.dump(diff_val, f)\n\n with open(comment_val_file % prefix, 'w') as f:\n pickle.dump(comment_val, f)\n\n with open(title_val_file % prefix, 'w') as f:\n pickle.dump(title_val, f)\n\n with open(y_val_file % prefix, 'w') as f:\n pickle.dump(y_val, f)\n\n # save testdata\n with open(diff_test_file % prefix, 'w') as f:\n pickle.dump(diff_test, f)\n\n with open(comment_test_file % prefix, 'w') as f:\n pickle.dump(comment_test, f)\n\n\n with open(title_test_file % prefix, 'w') as f:\n pickle.dump(title_test, f)\n\n with open(y_test_file % prefix, 'w') as f:\n pickle.dump(y_test, f)\n\n\n with open(config_file % prefix, 'w') as f:\n pickle.dump(config, f)\n\n return diff_train, comment_train, title_train, y_train, diff_val, comment_val, title_val, y_val, diff_test, comment_test, title_test, y_test, config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--prefix', default='default')\nparser.add_argument('--diff_vocabulary_size', type=int, default=50000)\nparser.add_argument('--comment_vocabulary_size', type=int, default=50000)\nparser.add_argument('--title_vocabulary_size', type=int, default=10000)\nparser.add_argument('--max_diff_sequence_length', type=int, default=150)\nparser.add_argument('--max_comment_sequence_length', type=int, default=150)\nparser.add_argument('--max_title_sequence_length', type=int, default=150)\n\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n create_dataset(args.prefix, args.diff_vocabulary_size, args.comment_vocabulary_size, args.title_vocabulary_size, args.max_diff_sequence_length, args.max_comment_sequence_length, args.max_title_sequence_length)\n\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewcistola/value-based-healthcare | [
"12583c33bff8dee83a7daf5aaaf1e7c39883a279"
] | [
"READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py"
] | [
"# FractureProof\n## Value Based Healthcare Project\n### Outcome \n#### CMS Hospital Wiide Readmission Rate 2018 \n### Predictors\n#### BEA 2018 County wide Economic Measures\n### Table Key\n#### State County FIPS\n\n### Set working directory to project folder\nos.chdir(\"C:/Users/drewc/GitHub/allocativ\") # Set wd to project repository\n\n### Set file title and path\ntitle = \"fp_VBHC_READMIT_BEA_FIPS_alpha\"\npath = \"fp/VBHC/READMIT/\"\n\n## Section A: Collect Possible Predictors from Public Access Data\n\n### Import Python Libraries\nimport os # Operating system navigation\nimport sqlite3 # SQLite database manager\n\n### Import data science libraries\nimport pandas as pd # Widely used data manipulation library with R/Excel like tables named 'data frames'\nimport numpy as np # Widely used matrix library for numerical processes\n\n### Import scikit-learn libraries: data preparation \nfrom sklearn.preprocessing import StandardScaler # Standard scaling for easier use of machine learning algorithms\nfrom sklearn.impute import SimpleImputer # Univariate imputation for missing data\n\n### Step 1: Import and Join Data\n\n### Import ACS\ndf_bea = pd.read_csv(\"hnb/BEA/2018/BEA_2018_FIPS_full.csv\", low_memory = 'false') # Import dataset saved as csv in _data folder\n\n### Import CMS Data and Join\ndf_cms = pd.read_csv(\"hnb/CMS/CMS_2018_FIPS_full.csv\", low_memory = 'false') # Import dataset saved as csv in _data folder\ndf_cms = df_cms.filter([\"Rate of readmission after discharge from hospital (hospital-wide)\", \"FIPS\"]) # Keep only selected columns\ndf_join = pd.merge(df_cms, df_bea, on = \"FIPS\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\ndf_cms = 0 # Clear variable\ndf_acs = 0 # Clear variable\n\n### Rename and Verify\ndf_step1 = df_join\ndf_join = 0\ndf_step1.info() # Get class, memory, and column info: names, data types, obs.\ndf_step1.head() # Print first 5 observations\n\n### Step 2: Data Manipulation\n\n### Import Datasets\n\n### Drop ID variables\ndf_man = df_step1.drop(columns = [\"FIPS\"]) # Drop Unwanted Columns\n\n### Rename outcome and test\ndf_man = df_man.rename(columns = {\"Rate of readmission after discharge from hospital (hospital-wide)\": \"outcome\"}) # Rename multiple columns in place\n\n### Rename and Verify\ndf_step2 = df_man\ndf_man = 0\ndf_step2.info() # Get class, memory, and column info: names, data types, obs.\ndf_step2.head() # Print first 5 observations\n\n## Step 3: Data Standardization\n\n### Remove outcome and test\ndf_NA = df_step2\noutcome = df_NA.pop(\"outcome\") # 'pop' column from df\n\n### Drop features with less than 75% data\ndf_NA = df_NA.dropna(axis = 1, thresh = 0.75*len(df_NA)) # Drop features less than 75% non-NA count for all columns\n\n### Impute missing values\ndf_NA = pd.DataFrame(SimpleImputer(strategy = \"median\").fit_transform(df_NA), columns = df_NA.columns) # Impute missing data\n\n### Standard Scale Values\ndf_NA = pd.DataFrame(StandardScaler().fit_transform(df_NA.values), columns = df_NA.columns) # convert the normalized features into a tabular format with the help of DataFrame.\n\n### Reattach outcome\ndf_NA.insert(0, \"outcome\", outcome) # reinsert in index\n\n### Drop all remaining rows (should be none)\ndf_NA = df_NA.dropna() # Drop all rows with NA values\n\n### Rename and Verify\ndf_step3 = df_NA\ndf_NA = 0\ndf_step3.info() # Get class, memory, and column info: names, data types, obs.\ndf_step3.head() # Print first 5 observations\n\n## Section B: Identify Significant Predictors with Reduction Algorithms\n\n### Import scikit-learn: machine learning\nfrom sklearn.decomposition import PCA # Principal compnents analysis from sklearn\nfrom sklearn.ensemble import RandomForestClassifier # Random Forest classification component\nfrom sklearn.ensemble import RandomForestRegressor # Random Forest classification component\nfrom sklearn.feature_selection import RFECV # Recursive Feature elimination with cross validation\nfrom sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome\n\n### Step 4: Principal Component Analysis\n\n### Setup initial PCA model\ndf_pca = df_step3.drop(columns = [\"outcome\"]) # Drop outcome variable\ndegree = len(df_step3.columns) - 2 # Save number of features -1 to get degrees of freedom\npca = PCA(n_components = degree) # you will pass the number of components to make PCA model based on degrees of freedom\n\n### Fit initial PCA model\npca.fit(df_pca) # fit to data\n\n### Setup final PCA model\ndf_ev = pd.DataFrame(pca.explained_variance_) # Print explained variance of components\ndf_ev = df_ev[(df_ev[0] > 1)] # Save eigenvalues above 1\ncomponents = len(df_ev.index) # Save count of values for Variable reduction\npca = PCA(n_components = components) # you will pass the number of components to make PCA model\n\n### Fit final PCA model\npca.fit_transform(df_pca) # finally call fit_transform on the aggregate data to create PCA results object\n\n### Collect feature list from PCA\ndf_pca2 = pd.DataFrame(pca.components_, columns = df_pca.columns) # Export eigenvectors to data frame\ndf_pca2[\"Variance\"] = pca.explained_variance_ratio_ # Save eigenvalues as their own column\ndf_pca2 = df_pca2[df_pca2.Variance > df_pca2.Variance.mean()] # Susbet by eigenvalues with above average exlained variance ratio\ndf_pca2 = df_pca2.abs() # get absolute value for column or data frame\ndf_pca3 = pd.DataFrame(df_pca2.max(), columns = [\"MaxEV\"]) # select maximum eigenvector for each feature\ndf_pc = df_pca3[df_pca3.MaxEV > df_pca3.MaxEV.mean()] # Susbet by above average max eigenvalues \ndf_pc = df_pc.reset_index() # Add a new index of ascending values, existing index becomes column named \"index\"\ndf_pc = df_pc.rename(columns = {\"index\": \"Features\"}) # Rename multiple columns in place\n\n### Rename and Verify\ndf_step4 = df_pc\ndf_step4.info() # Get class, memory, and column info: names, data types, obs.\ndf_step4.head() # Print first 5 observations\n\n### Step 5: Random Forest Regressor\n\n### Setup RF model\nY = df_step3[\"outcome\"] # Isolate Outcome variable\nX = df_step3.drop(columns = [\"outcome\"]) # Drop Unwanted Columns # Save features columns as predictor data frame\nforest = RandomForestRegressor(n_estimators = 1000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository. \n\n### Fit Forest model\nforest.fit(X, Y) # This will take time\n\n### Collect features from RF\ngini = forest.feature_importances_ # Output importances of features\nl_gini = list(zip(X, gini)) # Create list of variables alongside importance scores \ndf_gini = pd.DataFrame(l_gini, columns = [\"Features\", \"Gini\"]) # Create data frame of importances with variables and gini column names\ndf_gini = df_gini.sort_values(by = [\"Gini\"], ascending = False) # Sort data frame by gini value in desceding order\ndf_gini = df_gini[(df_gini[\"Gini\"] > df_gini[\"Gini\"].mean())] # Subset by Gini values higher than mean\n\n### Rename and Verify\ndf_step5 = df_gini\ndf_step5.info() # Get class, memory, and column info: names, data types, obs.\ndf_step5.head() # Print first 5 observations\n\n### Step 6: Recursive Feature Elimination\n\n### Collect features from RF and PC\ndf_pc_gini = pd.merge(df_pc, df_gini, on = \"Features\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\npc_gini_features = df_pc_gini[\"Features\"].tolist() # Save features from data frame\ndf_rfecv = df_step3[pc_gini_features] # Add selected features to df\n\n### Setup RFE model\nX = df_rfecv # Save features columns as predictor data frame\nY = df_step3[\"outcome\"] # Use outcome data frame \nRFE = LinearRegression() # Use regression coefficient as estimator\nselector = RFECV(estimator = RFE, min_features_to_select = 10) # define selection parameters, in this case all features are selected. See Readme for more ifo\n\n### Fit RFE model\nselected = selector.fit(X, Y) # This will take time\n\n### Collect features from RFE model\nar_rfe = selected.support_ # Save Boolean values as numpy array\nl_rfe = list(zip(X, ar_rfe)) # Create list of variables alongside RFE value \ndf_rfe = pd.DataFrame(l_rfe, columns = [\"Features\", \"RFE\"]) # Create data frame of importances with variables and gini column names\ndf_rfe = df_rfe[df_rfe.RFE == True] # Select Variables that were True\ndf_rfe = df_rfe.reset_index() # Reset Index\ndf_rfe = df_rfe.filter([\"Features\"]) # Keep only selected columns\n\n### Rename and Verify\ndf_step6 = df_rfe\ndf_step6.info() # Get class, memory, and column info: names, data types, obs.\ndf_step6.head() # Print first 5 observations\n\n## Section C: Evaluate Significant Features with Modeling and Prediction\n\n### Import scikit-learn libraries: regression\nfrom sklearn.linear_model import LogisticRegression # Used for machine learning with categorical outcome\nfrom sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome\n\n### Import scikit-learn: neural network\nfrom sklearn.neural_network import MLPRegressor\n\n### Step 7: Multiple Regression\n\n### Setup MR Model\nfeatures = list(df_step6[\"Features\"]) # Save chosen featres as list\nx = df_step3.filter(features) # Keep only selected columns from rfe\ny = df_step3[\"outcome\"] # Add outcome variable\nLR = LinearRegression() # Linear Regression in scikit learn\n\n### Fit MR model\nregression = LR.fit(x, y) # Fit model\n\n### Collect features from MR model\ncoef = regression.coef_ # Coefficient models as scipy array\nl_reg = list(zip(x, coef)) # Create list of variables alongside coefficient \ndf_reg = pd.DataFrame(l_reg, columns = [\"Features\", \"Coefficients\"]) # Create data frame of importances with variables and gini column names\n\n### Export feature attributes\ndf_pc_gini_reg = pd.merge(df_pc_gini, df_reg, on = \"Features\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\ndf_pc_gini_reg.to_csv(r\"fp/VBHC/READMIT/fp_VBHC_READMIT_BEA_FIPS_alpha.csv\") # Export df as csv\nprint(df_pc_gini_reg)\n\n### Collect prediction results\ndetermination = regression.score(x, y) # rsq value, ceofficient of determination\nprint(determination)\n\n### Rename and Verify\ndf_step7 = df_pc_gini_reg\ndf_step7.info() # Get class, memory, and column info: names, data types, obs.\ndf_step7.head() # Print first 5 observations"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.merge",
"pandas.read_csv",
"sklearn.impute.SimpleImputer",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"sklearn.feature_selection.RFECV",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.PCA"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
niallscc/Optimus | [
"35218401556e5acc4beb2859084128ebcd1ab4e5"
] | [
"optimus/engines/base/dataframe/columns.py"
] | [
"from functools import reduce\n\nfrom sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler\n\nfrom optimus.engines.base.columns import BaseColumns\nfrom optimus.helpers.columns import parse_columns, name_col\nfrom optimus.helpers.constants import Actions\nfrom optimus.helpers.raiseit import RaiseIt\n\n\nclass DataFrameBaseColumns(BaseColumns):\n\n def __init__(self, df):\n super(DataFrameBaseColumns, self).__init__(df)\n\n @staticmethod\n def exec_agg(exprs, compute=None):\n \"\"\"\n Exectute and aggregation\n Expression in Non dask dataframe can not handle compute. See exec_agg dask implementation\n :param exprs:\n :param compute:\n :return:\n \"\"\"\n return exprs\n\n def qcut(self, columns, num_buckets, handle_invalid=\"skip\"):\n pass\n\n @staticmethod\n def correlation(input_cols, method=\"pearson\", output=\"json\"):\n pass\n\n @staticmethod\n def scatter(columns, buckets=10):\n pass\n\n def standard_scaler(self, input_cols=\"*\", output_cols=None):\n df = self.root\n\n def _standard_scaler(_value):\n return StandardScaler().fit_transform(_value.values.reshape(-1, 1))\n\n return df.cols.apply(input_cols, func=_standard_scaler, output_cols=output_cols, meta_action=Actions.STANDARD_SCALER.value)\n\n def max_abs_scaler(self, input_cols=\"*\", output_cols=None):\n\n df = self.root\n\n def _max_abs_scaler(_value):\n return MaxAbsScaler().fit_transform(_value.values.reshape(-1, 1))\n\n return df.cols.apply(input_cols, func=_max_abs_scaler, output_cols=output_cols,meta_action=Actions.MAX_ABS_SCALER.value )\n\n def min_max_scaler(self, input_cols, output_cols=None):\n # https://github.com/dask/dask/issues/2690\n\n df = self.root\n\n def _min_max_scaler(_value):\n return MinMaxScaler().fit_transform(_value.values.reshape(-1, 1))\n\n return df.cols.apply(input_cols, func=_min_max_scaler, output_cols=output_cols, meta_action=Actions.MIN_MAX_SCALER.value )\n\n def replace_regex(self, input_cols, regex=None, value=\"\", output_cols=None):\n \"\"\"\n Use a Regex to replace values\n :param input_cols: '*', list of columns names or a single column name.\n :param output_cols:\n :param regex: values to look at to be replaced\n :param value: new value to replace the old one\n :return:\n \"\"\"\n\n df = self.root\n\n def _replace_regex(_value, _regex, _replace):\n return _value.replace(_regex, _replace, regex=True)\n\n return df.cols.apply(input_cols, func=_replace_regex, args=(regex, value,), output_cols=output_cols,\n filter_col_by_dtypes=df.constants.STRING_TYPES + df.constants.NUMERIC_TYPES)\n\n def reverse(self, input_cols, output_cols=None):\n def _reverse(value):\n return str(value)[::-1]\n\n df = self.root\n return df.cols.apply(input_cols, _reverse, func_return_type=str,\n filter_col_by_dtypes=df.constants.STRING_TYPES,\n output_cols=output_cols, set_index=True)\n\n @staticmethod\n def astype(*args, **kwargs):\n pass\n\n @staticmethod\n def apply_by_dtypes(columns, func, func_return_type, args=None, func_type=None, data_type=None):\n pass\n\n @staticmethod\n def to_timestamp(input_cols, date_format=None, output_cols=None):\n pass\n\n def nest(self, input_cols, separator=\"\", output_col=None, shape=\"string\", drop=False):\n df = self.root\n\n dfd = df.data\n\n if output_col is None:\n output_col = name_col(input_cols)\n\n input_cols = parse_columns(df, input_cols)\n\n output_ordered_columns = df.cols.names()\n\n # cudfd do nor support apply or agg join for this operation\n if shape == \"vector\" or shape == \"array\":\n raise NotImplementedError(\"Not implemented yet\")\n # https://stackoverflow.com/questions/43898035/pandas-combine-column-values-into-a-list-in-a-new-column/43898233\n # t['combined'] = t.values.tolist()\n\n # dfds = [dfd[input_col] for input_col in input_cols]\n # dfd[output_col] = dfd[input_cols].values.tolist()\n elif shape == \"string\":\n dfds = [dfd[input_col].astype(str) for input_col in input_cols]\n dfd = dfd.assign(**{output_col:reduce((lambda x, y: x + separator + y), dfds)})\n\n if output_col not in output_ordered_columns:\n col_index = output_ordered_columns.index(input_cols[-1]) + 1\n output_ordered_columns[col_index:col_index] = [output_col]\n\n if drop is True:\n for input_col in input_cols:\n if input_col in output_ordered_columns and input_col != output_col:\n output_ordered_columns.remove(input_col)\n\n return self.root.new(dfd).cols.select(output_ordered_columns)\n"
] | [
[
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.MaxAbsScaler",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dspub99/betazero | [
"b1adf9885166e6fb4974952292653efeea1b19dc"
] | [
"mctsPlayer.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\n\nfrom randomPlayer import RandomPlayer\nimport game\nimport play\n\n# Run MCTS with MC to estimate the rest of the game.\n# http://mcts.ai/about/index.html\n# http://ccg.doc.gold.ac.uk/wp-content/uploads/2016/10/browne_tciaig12_1.pdf\n\nclass UCT:\n def __init__(self, c):\n self._c = c\n\n def parts(self, pNode, node):\n return (node.sum/node.n, 2*self._c*np.sqrt(2*np.log(pNode.n) / node.n))\n\n def __call__(self, pNode, node):\n if node.n == 0:\n return np.inf\n\n (exploit, explore) = self.parts( pNode, node )\n return exploit + explore\n\nclass UCTNegamax:\n def __init__(self, c):\n self._uct = UCT(c)\n\n def __call__(self, pNode, node):\n if node.n == 0:\n return np.inf\n\n # pNode.chi gives us negamax\n # Actually, our scores (like node.sum/node.n) are in [0,1] not [-1,1].\n # So to change to the opponent's perspective, we might prefer\n # scoreOpponent_A = 1 - score\n # to\n # scoreOpponent_B = -score\n # Note that scoreOpponent_B = scoreOpponent_A - 1. This offset of -1 in exploit\n # won't affect which node maximizes exploit + explore.\n (exploit, explore) = self._uct.parts( pNode, node )\n return pNode.chi*exploit + explore\n\nclass Node:\n def __init__(self, nprand, ttt, chi, maxPlies, parent=None, move=None):\n self._nprand = nprand\n # each Node has a clone of ttt with the Node's game state\n self.maxPlies = maxPlies\n self.chi = chi\n self.parent = parent\n self.ttt = ttt\n self.move = move\n self.sum = 0\n self.n = 0\n self.children = []\n self._needMoves = list(self.ttt.validMoves())\n\n def dump(self):\n n = 0\n queue = [self]\n while len(queue) > 0:\n # queue[0].ttt.dump()\n s = [str(n), \" \"*n]\n newQueue = []\n n += 1\n for node in queue:\n s.append(\"%d/%d(%d)\" % (2*node.sum, 2*node.n, node.maxPlies))\n newQueue.extend(node.children)\n print (' '.join(s))\n queue = newQueue\n\n\n def check_parentage(self):\n # Am I may children's parent?\n for c in self.children:\n assert(c.parent == self)\n c.check_parentage()\n\n def bestChild(self, uct):\n assert(len(self.children)>0)\n\n phis = []\n for c in self.children:\n # print (\"CHILD:\", uct(self, c))\n phis.append(uct(self, c))\n phis = np.array(phis)\n\n i = self._nprand.choice(np.where(phis > phis.max() - 1e-6)[0])\n return self.children[i]\n\n def findBoard(self, ttt):\n # exactly one ply ahead\n for c in self.children:\n if ttt.equivBoard(c.ttt.board()):\n return c\n return None\n\n def select(self, uct):\n # \"Starting at the root node, a child selection policy is recursively applied to descend\n # through the tree until the most urgent expandable node is reached. A node is expandable if\n # it represents a nonterminal state and has unvisited (i.e. unexpanded) children\"\n\n if len(self._needMoves) > 0:\n return self\n\n if len(self.children)==0:\n return None\n\n return self.bestChild(uct).select(uct)\n\n def expand(self):\n # \"One (or more) child nodes are added to expand the tree, according to the\n # available actions.\"\n\n assert( len(self._needMoves) > 0 )\n\n if self.maxPlies==0:\n # just run another sim from here\n return self\n\n m = self._nprand.choice(self._needMoves)\n self._needMoves.remove(m)\n ttt = self.ttt.clone()\n ttt.add(m)\n c = Node(self._nprand, ttt, -self.chi, self.maxPlies - 1, self, m.clone())\n self.children.append(c)\n return c\n\n def backpropagate(self, score):\n # \"The simulation result is “backed up” (i.e. backpropagated)\n # through the selected nodes to update their statistics.\"\n\n self.n += 1\n self.sum += score\n if self.parent is not None:\n self.parent.backpropagate(score)\n\n def __str__(self):\n return \"sum = %.4f n = %d nChildren = %d self = %s parent = %s\" % (self.sum, self.n, len(self.children), id(self), id(self.parent))\n\n\nclass MCTSPlayer:\n\n def __init__(self, nPlay, maxPlies, bNegamax, cUct = 1/np.sqrt(2), bDump=False):\n self._nPlay = nPlay\n self._maxPlies = maxPlies\n if bNegamax:\n self._uct = UCTNegamax(cUct)\n else:\n self._uct = UCT(cUct)\n self._cUct = cUct\n self._bNegamax = bNegamax\n self._bDump = bDump\n self._uctMove = UCT(0)\n self._rp = RandomPlayer()\n self._nprand = np.random.RandomState()\n\n self._root = None\n\n def __str__(self):\n return (\"%s nPlay = %d maxPlies = %d bNegamax = %s cUct = %.4f\" %\n (self.__class__.__name__, self._nPlay, self._maxPlies,\n self._bNegamax, self._cUct))\n\n def _simulate(self, node):\n # \"A simulation is run from the new node(s) according to the\n # default policy to produce an outcome.\"\n return play.playRest(self._rp, self._rp, node.ttt.clone(), False, 99999)[0]\n\n def setSeed(self, seed):\n self._nprand.seed(seed)\n self._rp.setSeed(seed+1)\n\n def move(self, ttt):\n if self._root is not None:\n self._root = self._root.findBoard(ttt)\n\n if self._root is None:\n self._root = Node(self._nprand, ttt, 1, maxPlies=self._maxPlies)\n\n marker = ttt.whoseTurn()\n for _ in range(self._nPlay):\n nodeLeaf = self._root.select(self._uct)\n if nodeLeaf is not None:\n nodeSim = nodeLeaf.expand()\n if nodeSim is not None:\n # print (\"START:\", nodeSim.maxPlies, nodeSim.move)\n w = self._simulate(nodeSim)\n if w == ttt.whoseTurn():\n score = 1\n elif w == game.Draw:\n score = .5\n else:\n score = 0\n # print (\"SCORE:\", marker, w, score)\n nodeSim.backpropagate(score)\n\n\n if self._bDump:\n self._root.dump()\n self._root = self._root.bestChild(self._uctMove)\n return self._root.move\n\n\n def tests(self):\n self._root.check_parentage()\n\n\nif __name__ == \"__main__\":\n from ticTacToe import TicTacToe\n from mmPlayer import MMPlayer\n from mcPlayer import MCPlayer\n\n\n nPlay = 100\n maxPlies = 1000\n bNegamax = True\n cUct = 1/np.sqrt(2)\n if True:\n mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,\n cUct = cUct, bDump=True)\n mcts.setSeed(1)\n mc10 = MCPlayer(nPlay=10)\n mc10.setSeed(2)\n play.play(TicTacToe, mcts, mc10, bShow = True)\n else:\n score = []\n for _ in range(100):\n mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,\n cUct = cUct)\n # mc10 vs. mc10 gives .79, fyi\n # mcts100_mp=1_c=1e6 vs. mc 10 gives .82\n # mcts100_mp=1_c=1/sqrt(2) vs. mc 10 gives .82\n # mcts100_mp=1_c=0 vs. mc 10 gives .82\n # mcts100_mp=2_c=0 vs. mc 10 gives .855\n # mcts100_mp=3_c=0 vs. mc 10 gives .83\n # mcts100_mp=3_c=1/sqrt(2) vs. mc 10 gives .86\n # mcts100_mp=3_c=1/sqrt(2)_negamax vs. mc 10 gives .86\n # mcts100_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .83\n # mcts1000_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .94\n # mcts1000_mp=1000_c=1/sqrt(2) vs. mc 10 gives .83\n w = play.play(TicTacToe, MCPlayer(nPlay=100), mcts, bShow = False)\n if w == 'X':\n score.append(1)\n elif w == 'D':\n score.append(.5)\n else:\n score.append(0)\n print (np.array(score).mean())\n\n\n\n\n\n\n"
] | [
[
"numpy.log",
"numpy.array",
"numpy.sqrt",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Global19-atlassian-net/datasets | [
"db298928fe0e45907fcd61443d2319665a933afc"
] | [
"tensorflow_datasets/core/dataset_utils.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for dealing with tf.data.Dataset.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.core import tf_compat\nfrom tensorflow_datasets.core import utils\n\n\ndef _eager_dataset_iterator(dataset):\n for item in dataset:\n flat = tf.nest.flatten(item)\n flat = [t if isinstance(t, tf.RaggedTensor) else t.numpy() for t in flat]\n yield tf.nest.pack_sequence_as(item, flat)\n\n\ndef _graph_dataset_iterator(ds_iter, graph=None):\n \"\"\"Constructs a Python generator from a tf.data.Iterator.\"\"\"\n with utils.maybe_with_graph(graph, create_if_none=False):\n init = ds_iter.initializer\n ds_item = ds_iter.get_next()\n with utils.nogpu_session(graph) as sess:\n sess.run(init)\n while True:\n try:\n yield sess.run(ds_item)\n except tf.errors.OutOfRangeError:\n break\n\n\ndef as_numpy(dataset, *, graph=None):\n \"\"\"Converts a `tf.data.Dataset` to an iterable of NumPy arrays.\n\n `as_numpy` converts a possibly nested structure of `tf.data.Dataset`s\n and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.\n\n Note that because TensorFlow has support for ragged tensors and NumPy has\n no equivalent representation,\n [`tf.RaggedTensor`s](https://www.tensorflow.org/api_docs/python/tf/RaggedTensor)\n are left as-is for the user to deal with them (e.g. using `to_list()`).\n In TF 1 (i.e. graph mode), `tf.RaggedTensor`s are returned as\n `tf.ragged.RaggedTensorValue`s.\n\n Example:\n\n ```\n ds = tfds.load(name=\"mnist\", split=\"train\")\n ds_numpy = tfds.as_numpy(ds) # Convert `tf.data.Dataset` to Python generator\n for ex in ds_numpy:\n # `{'image': np.array(shape=(28, 28, 1)), 'labels': np.array(shape=())}`\n print(ex)\n ```\n\n Args:\n dataset: a possibly nested structure of `tf.data.Dataset`s and/or\n `tf.Tensor`s.\n graph: `tf.Graph`, optional, explicitly set the graph to use.\n\n Returns:\n A structure matching `dataset` where `tf.data.Dataset`s are converted to\n generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.\n \"\"\"\n nested_ds = dataset\n del dataset\n\n # Flatten\n flat_ds = tf.nest.flatten(nested_ds)\n flat_np = []\n\n # Type check for Tensors and Datasets\n for ds_el in flat_ds:\n types = [type(el) for el in flat_ds]\n types = tf.nest.pack_sequence_as(nested_ds, types)\n if not (\n isinstance(ds_el, (tf.Tensor, tf.RaggedTensor)) or\n tf_compat.is_dataset(ds_el)):\n raise ValueError(\"Arguments to as_numpy must be tf.Tensors or \"\n \"tf.data.Datasets. Got: %s\" % types)\n\n if tf.executing_eagerly():\n # Eager mode\n for ds_el in flat_ds:\n if isinstance(ds_el, tf.Tensor):\n np_el = ds_el.numpy()\n elif isinstance(ds_el, tf.RaggedTensor):\n np_el = ds_el\n elif tf_compat.is_dataset(ds_el):\n np_el = _eager_dataset_iterator(ds_el)\n else:\n assert False\n flat_np.append(np_el)\n else:\n # Graph mode\n\n # First create iterators for datasets\n with utils.maybe_with_graph(graph, create_if_none=False):\n ds_iters = [\n tf.compat.v1.data.make_initializable_iterator(ds_el)\n for ds_el in flat_ds if tf_compat.is_dataset(ds_el)\n ]\n ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]\n\n # Then create numpy arrays for tensors\n with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor\n # Calling sess.run once so that randomness is shared.\n np_arrays = sess.run([tensor for tensor in flat_ds\n if not tf_compat.is_dataset(tensor)])\n\n # Merge the dataset iterators and np arrays\n iter_ds = iter(ds_iters)\n iter_array = iter(np_arrays)\n flat_np = [\n next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)\n for ds_el in flat_ds\n ]\n\n # Nest\n return tf.nest.pack_sequence_as(nested_ds, flat_np)\n\n\ndef dataset_shape_is_fully_defined(ds):\n output_shapes = tf.compat.v1.data.get_output_shapes(ds)\n return all([ts.is_fully_defined() for ts in tf.nest.flatten(output_shapes)])\n\n\ndef features_shape_is_fully_defined(features):\n return all([tf.TensorShape(info.shape).is_fully_defined() for info in\n tf.nest.flatten(features.get_tensor_info())])\n"
] | [
[
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.compat.v1.data.make_initializable_iterator",
"tensorflow.compat.v2.compat.v1.data.get_output_shapes",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.TensorShape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JingyaHuang/transformers | [
"6589e510fa4e6c442059de2fab84752535de9b23",
"6589e510fa4e6c442059de2fab84752535de9b23"
] | [
"tests/models/bloom/test_modeling_bloom.py",
"src/transformers/models/transfo_xl/modeling_transfo_xl.py"
] | [
"# coding=utf-8\n# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nimport unittest\n\nfrom transformers import BloomConfig, is_torch_available\nfrom transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device\n\nfrom ...generation.test_generation_utils import GenerationTesterMixin\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,\n BloomForCausalLM,\n BloomForSequenceClassification,\n BloomForTokenClassification,\n BloomModel,\n BloomTokenizerFast,\n )\n\n\n@require_torch\nclass BloomModelTester:\n def __init__(\n self,\n parent,\n batch_size=14,\n seq_length=7,\n is_training=True,\n use_token_type_ids=False,\n use_input_mask=True,\n use_labels=True,\n use_mc_token_ids=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n num_choices=4,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_token_type_ids = use_token_type_ids\n self.use_input_mask = use_input_mask\n self.use_labels = use_labels\n self.use_mc_token_ids = use_mc_token_ids\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.scope = None\n self.bos_token_id = vocab_size - 1\n self.eos_token_id = vocab_size - 1\n self.pad_token_id = vocab_size - 1\n\n def get_large_model_config(self):\n return BloomConfig.from_pretrained(\"bigscience/bloom\")\n\n def prepare_config_and_inputs(self, gradient_checkpointing=False):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n sequence_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n\n config = self.get_config(gradient_checkpointing=gradient_checkpointing)\n\n return (config, input_ids, input_mask, sequence_labels)\n\n def get_config(self, gradient_checkpointing=False, slow_but_exact=True):\n return BloomConfig(\n vocab_size=self.vocab_size,\n seq_length=self.seq_length,\n hidden_size=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n resid_pdrop=self.hidden_dropout_prob,\n attn_pdrop=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n initializer_range=self.initializer_range,\n use_cache=True,\n bos_token_id=self.bos_token_id,\n eos_token_id=self.eos_token_id,\n pad_token_id=self.pad_token_id,\n num_labels=self.num_labels,\n gradient_checkpointing=gradient_checkpointing,\n slow_but_exact=slow_but_exact,\n dtype=\"float32\",\n )\n\n def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(len(result.past_key_values), config.n_layer)\n\n def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)\n outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))\n outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n past = outputs[\"past_key_values\"]\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # append to next input_ids and token_type_ids\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n\n output_from_no_past = model(next_input_ids)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past_key_values=past)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n model.to(torch_device)\n model.eval()\n\n # create attention mask\n attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)\n half_seq_length = self.seq_length // 2\n attn_mask[:, half_seq_length:] = 0\n\n # first forward pass\n output, past = model(input_ids, attention_mask=attn_mask).to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)\n input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens\n\n # append to next input_ids and attn_mask\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n attn_mask = torch.cat(\n [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],\n dim=1,\n )\n\n # get two different outputs\n output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(input_ids, attention_mask=input_mask, use_cache=True)\n\n output, past = outputs.to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)\n\n # append to next input_ids and token_type_ids\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)\n\n output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[\n \"last_hidden_state\"\n ]\n self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):\n model = BloomForCausalLM(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):\n config.num_labels = self.num_labels\n model = BloomForSequenceClassification(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, attention_mask=input_mask)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):\n model = BloomForTokenClassification(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, attention_mask=input_mask)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))\n\n def create_and_check_forward_and_backwards(\n self, config, input_ids, input_mask, *args, gradient_checkpointing=False\n ):\n model = BloomForCausalLM(config)\n model.to(torch_device)\n if gradient_checkpointing:\n model.gradient_checkpointing_enable()\n\n result = model(input_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n result.loss.backward()\n\n def create_and_check_bloom_weight_initialization(self, config, *args):\n model = BloomModel(config)\n model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)\n for key in model.state_dict().keys():\n if \"c_proj\" in key and \"weight\" in key:\n self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)\n self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n config, input_ids, input_mask, sequence_labels = config_and_inputs\n\n inputs_dict = {\"input_ids\": input_ids}\n\n return config, inputs_dict\n\n\n@require_torch\nclass BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n BloomModel,\n BloomForCausalLM,\n BloomForSequenceClassification,\n BloomForTokenClassification,\n )\n if is_torch_available()\n else ()\n )\n\n all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()\n fx_compatible = False\n test_missing_keys = False\n test_pruning = False\n test_torchscript = True # torch.autograd functions seems to be not supported\n\n def setUp(self):\n self.model_tester = BloomModelTester(self)\n self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_bloom_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model(*config_and_inputs)\n\n def test_bloom_model_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)\n\n def test_bloom_model_att_mask_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)\n\n def test_bloom_model_past_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)\n\n def test_bloom_lm_head_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_lm_head_model(*config_and_inputs)\n\n def test_bloom_sequence_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)\n\n def test_bloom_token_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_token_classification_model(*config_and_inputs)\n\n def test_bloom_gradient_checkpointing(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)\n\n def test_bloom_weight_initialization(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = BloomModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n @slow\n @require_torch_gpu\n def test_simple_generation(self):\n path_350m = \"bigscience/bloom-350m\"\n model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype=\"auto\", use_cache=True).cuda()\n model = model.eval()\n tokenizer = BloomTokenizerFast.from_pretrained(path_350m)\n\n input_sentence = \"I enjoy walking with my cute dog\"\n EXPECTED_OUTPUT = (\n \"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am\"\n \" a very good listener. I am a very good person, and I am a very good person. I am a\"\n )\n\n input_ids = tokenizer.encode(input_sentence, return_tensors=\"pt\")\n greedy_output = model.generate(input_ids.cuda(), max_length=50)\n\n self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)\n\n @slow\n @require_torch_gpu\n def test_batch_generation(self):\n path_350m = \"bigscience/bloom-350m\"\n model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype=\"auto\", use_cache=True).cuda()\n model = model.eval()\n tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side=\"left\")\n\n input_sentence = [\"I enjoy walking with my cute dog\", \"I enjoy walking with my cute dog\"]\n\n input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors=\"pt\", padding=True)\n greedy_output = model.generate(\n input_ids[\"input_ids\"].cuda(), attention_mask=input_ids[\"attention_mask\"], max_length=50, do_sample=False\n )\n\n self.assertEqual(\n tokenizer.decode(greedy_output[0], skip_special_tokens=True),\n tokenizer.decode(greedy_output[1], skip_special_tokens=True),\n )\n\n @slow\n @require_torch_gpu\n def test_batch_generation_padd(self):\n path_350m = \"bigscience/bloom-350m\"\n model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype=\"auto\", use_cache=True).cuda()\n model = model.eval()\n tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side=\"left\")\n\n input_sentence = [\"I enjoy walking with my cute dog\", \"Hello my name is\"]\n input_sentence_without_pad = \"Hello my name is\"\n\n input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors=\"pt\", padding=True)\n input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors=\"pt\")\n\n greedy_output = model.generate(\n input_ids[\"input_ids\"].cuda(), attention_mask=input_ids[\"attention_mask\"], max_length=50, do_sample=False\n )\n greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)\n\n # test token values\n self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())\n\n # test reconstructions\n self.assertEqual(\n tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),\n tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),\n )\n\n\n@require_torch\nclass BloomEmbeddingTest(unittest.TestCase):\n \"\"\"\n The goal here is to compare the embeddings generated by the model trained\n using Megatron-LM with the one from the transformers library, with a small GPT2-like model\n to ensure that the conversion from Megatron-LM to transformers has been done successfully.\n The script compares the logits of the embedding layer and the transformer layers.\n\n WARNING: It is expected that these logits will not have exactly the same statistics when running\n the code on CPU or GPU. For more info, please visit:\n - https://github.com/pytorch/pytorch/issues/76052#issuecomment-1103193548\n - https://discuss.pytorch.org/t/reproducibility-issue-between-intel-and-amd-cpus/144779/9\n\n\n You need to install tokenizers following this readme:\n - https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles\n\n Tokenizer used during training:\n - https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles\n\n # TODO change the script (or just add skip) when building the env with tokenizers 0.12.0\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.path_bigscience_model = \"bigscience/bigscience-small-testing\"\n\n @require_torch\n def test_embeddings(self):\n model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype=\"auto\") # load in fp32\n model.eval()\n\n EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {\n 3478: 0.0002307891845703125,\n 368: -0.000568389892578125,\n 109586: -0.0003910064697265625,\n 35433: -0.000194549560546875,\n 2: 0.0004138946533203125,\n 77: 0.000659942626953125,\n 132619: -0.00031280517578125,\n 2175: 0.000457763671875,\n 23714: 0.000263214111328125,\n 73173: -0.000286102294921875,\n 144252: 0.00052642822265625,\n }\n EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {\n 3478: -0.00921630859375,\n 368: -0.010009765625,\n 109586: -0.01031494140625,\n 35433: -0.01177978515625,\n 2: -0.0074462890625,\n 77: -0.00848388671875,\n 132619: -0.009521484375,\n 2175: -0.0074462890625,\n 23714: -0.0145263671875,\n 73173: -0.007415771484375,\n 144252: -0.01007080078125,\n }\n EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {\n 3478: 0.0128173828125,\n 368: 0.01214599609375,\n 109586: 0.0111083984375,\n 35433: 0.01019287109375,\n 2: 0.0157470703125,\n 77: 0.0174560546875,\n 132619: 0.0078125,\n 2175: 0.0113525390625,\n 23714: 0.0146484375,\n 73173: 0.01116943359375,\n 144252: 0.01141357421875,\n }\n EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {\"value\": 0.08203125}\n\n EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {\n 132619: -0.00031256675720214844,\n 3478: 0.00023090839385986328,\n 368: -0.0005702972412109375,\n 109586: -0.00039124488830566406,\n 35433: -0.000194549560546875,\n 2: 0.0004146099090576172,\n 2175: 0.0004572868347167969,\n 23714: 0.00026416778564453125,\n 73173: -0.0002865791320800781,\n 144252: 0.0005254745483398438,\n 77: 0.0006618499755859375,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {\n 3478: -0.00921630859375,\n 368: -0.010009765625,\n 109586: -0.01031494140625,\n 35433: -0.01177978515625,\n 2: -0.0074462890625,\n 77: -0.00848388671875,\n 132619: -0.009521484375,\n 2175: -0.0074462890625,\n 23714: -0.0145263671875,\n 73173: -0.007415771484375,\n 144252: -0.01007080078125,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {\n 3478: 0.0128173828125,\n 368: 0.01214599609375,\n 109586: 0.0111083984375,\n 35433: 0.01019287109375,\n 2: 0.0157470703125,\n 77: 0.0174560546875,\n 132619: 0.0078125,\n 2175: 0.0113525390625,\n 23714: 0.0146484375,\n 73173: 0.01116943359375,\n 144252: 0.01141357421875,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {\"value\": 0.0821533203125}\n\n EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {\n 132619: -0.00031267106533050537,\n 3478: 0.00023087859153747559,\n 368: -0.0005701072514057159,\n 109586: -0.0003911703824996948,\n 35433: -0.0001944899559020996,\n 2: 0.0004146844148635864,\n 2175: 0.00045740045607089996,\n 23714: 0.0002641640603542328,\n 73173: -0.0002864748239517212,\n 144252: 0.0005256589502096176,\n 77: 0.0006617321632802486,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {\n 3478: -0.00921630859375,\n 368: -0.010009765625,\n 109586: -0.01031494140625,\n 35433: -0.01177978515625,\n 2: -0.0074462890625,\n 77: -0.00848388671875,\n 132619: -0.009521484375,\n 2175: -0.0074462890625,\n 23714: -0.0145263671875,\n 73173: -0.007415771484375,\n 144252: -0.01007080078125,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {\n 3478: 0.0128173828125,\n 368: 0.01214599609375,\n 109586: 0.0111083984375,\n 35433: 0.01019287109375,\n 2: 0.0157470703125,\n 77: 0.0174560546875,\n 132619: 0.0078125,\n 2175: 0.0113525390625,\n 23714: 0.0146484375,\n 73173: 0.01116943359375,\n 144252: 0.01141357421875,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {\"value\": 0.08217757940292358}\n\n TEST_EMBEDDINGS = {\n \"torch.bfloat16\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,\n },\n \"torch.float32\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,\n },\n \"torch.float\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,\n },\n \"torch.float16\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,\n },\n }\n\n # fmt: off\n EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]\n # fmt: on\n\n EMBEDDINGS_DS_AFTER_LN_MEAN = {\n 3478: -6.580352783203125e-05,\n 368: 0.0001316070556640625,\n 109586: -0.00030517578125,\n 35433: 4.00543212890625e-05,\n 2: -7.2479248046875e-05,\n 77: -8.96453857421875e-05,\n 132619: 0.0001583099365234375,\n 2175: 2.1219253540039062e-05,\n 23714: -0.000247955322265625,\n 73173: -0.00021839141845703125,\n 144252: -0.0001430511474609375,\n }\n EMBEDDINGS_DS_AFTER_LN_MIN = {\n 3478: -1.6953125,\n 368: -1.6875,\n 109586: -1.6875,\n 35433: -2.125,\n 2: -1.390625,\n 77: -1.5390625,\n 132619: -1.875,\n 2175: -1.4609375,\n 23714: -2.296875,\n 73173: -1.3515625,\n 144252: -1.78125,\n }\n EMBEDDINGS_DS_AFTER_LN_MAX = {\n 3478: 2.265625,\n 368: 2.28125,\n 109586: 1.953125,\n 35433: 1.90625,\n 2: 2.703125,\n 77: 2.828125,\n 132619: 1.65625,\n 2175: 2.015625,\n 23714: 2.234375,\n 73173: 2.171875,\n 144252: 1.828125,\n }\n\n EMBEDDINGS_DS_AFTER_LN = {\n \"mean\": EMBEDDINGS_DS_AFTER_LN_MEAN,\n \"min\": EMBEDDINGS_DS_AFTER_LN_MIN,\n \"max\": EMBEDDINGS_DS_AFTER_LN_MAX,\n }\n\n tensor_ids = torch.LongTensor([EXAMPLE_IDS])\n with torch.no_grad():\n embeddings = model.transformer.word_embeddings(tensor_ids)\n embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings) #\n # first check the embeddings before LN\n output_dict = {\"min\": {}, \"max\": {}, \"mean\": {}, \"sum\": {\"value\": embeddings.sum().item()}}\n for i, idx in enumerate(EXAMPLE_IDS):\n output_dict[\"min\"][idx] = embeddings.min(dim=-1).values[0][i].item()\n output_dict[\"max\"][idx] = embeddings.max(dim=-1).values[0][i].item()\n output_dict[\"mean\"][idx] = embeddings.mean(dim=-1)[0][i].item()\n\n for key in TEST_EMBEDDINGS[str(model.dtype)].keys():\n self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])\n\n output_dict_norm = {\"min\": {}, \"max\": {}, \"mean\": {}}\n for i, idx in enumerate(EXAMPLE_IDS):\n output_dict_norm[\"min\"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()\n output_dict_norm[\"max\"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()\n output_dict_norm[\"mean\"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()\n\n # This test does not pass when places = 2\n for i, key in enumerate(output_dict_norm.keys()):\n for j, idx in enumerate(output_dict[key].keys()):\n self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)\n\n @require_torch\n def test_hidden_states_transformers(self):\n cuda_available = torch.cuda.is_available()\n model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype=\"auto\").to(\n torch_device\n )\n model.eval()\n\n # fmt: off\n EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]\n # fmt: on\n\n MEAN_VALUE_LAST_LM = -4.3392181396484375e-05\n MIN_MAX_DICT = {\"min\": -2.0625, \"max\": 2.75}\n tensor_ids = torch.LongTensor([EXAMPLE_IDS])\n\n with torch.no_grad():\n logits = model(tensor_ids.to(torch_device))\n output_dict = {\n \"min\": logits.last_hidden_state.min(dim=-1).values[0][0].item(),\n \"max\": logits.last_hidden_state.max(dim=-1).values[0][0].item(),\n }\n\n if cuda_available:\n self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)\n else:\n self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)\n\n self.assertDictEqual(MIN_MAX_DICT, output_dict)\n\n @require_torch\n def test_logits(self):\n cuda_available = torch.cuda.is_available()\n model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype=\"auto\").to(\n torch_device\n ) # load in bf16\n model.eval()\n\n # fmt: off\n EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]\n # fmt: on\n\n MEAN_LOGITS_GPU_1 = -1.823902130126953e-05\n MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05\n\n tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)\n with torch.no_grad():\n output = model(tensor_ids).logits\n\n output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)\n if cuda_available:\n self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)\n self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)\n else:\n self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) # 1e-06 precision!!\n self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)\n",
"# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular\n https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py\n\"\"\"\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n)\nfrom .configuration_transfo_xl import TransfoXLConfig\nfrom .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"transfo-xl-wt103\"\n_CONFIG_FOR_DOC = \"TransfoXLConfig\"\n_TOKENIZER_FOR_DOC = \"TransfoXLTokenizer\"\n\nTRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"transfo-xl-wt103\",\n # See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl\n]\n\n\ndef build_tf_to_pytorch_map(model, config):\n \"\"\"\n A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original\n PyTorch model as possible.\n \"\"\"\n tf_to_pt_map = {}\n\n if hasattr(model, \"transformer\"):\n # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax\n tf_to_pt_map.update(\n {\n \"transformer/adaptive_softmax/cutoff_0/cluster_W\": model.crit.cluster_weight,\n \"transformer/adaptive_softmax/cutoff_0/cluster_b\": model.crit.cluster_bias,\n }\n )\n for i, (out_l, proj_l, tie_proj) in enumerate(\n zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)\n ):\n layer_str = f\"transformer/adaptive_softmax/cutoff_{i}/\"\n if config.tie_word_embeddings:\n tf_to_pt_map.update({layer_str + \"b\": out_l.bias})\n else:\n raise NotImplementedError\n # I don't think this is implemented in the TF code\n tf_to_pt_map.update({layer_str + \"lookup_table\": out_l.weight, layer_str + \"b\": out_l.bias})\n if not tie_proj:\n tf_to_pt_map.update({layer_str + \"proj\": proj_l})\n # Now load the rest of the transformer\n model = model.transformer\n\n # Embeddings\n for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):\n layer_str = f\"transformer/adaptive_embed/cutoff_{i}/\"\n tf_to_pt_map.update({layer_str + \"lookup_table\": embed_l.weight, layer_str + \"proj_W\": proj_l})\n\n # Transformer blocks\n for i, b in enumerate(model.layers):\n layer_str = f\"transformer/layer_{i}/\"\n tf_to_pt_map.update(\n {\n layer_str + \"rel_attn/LayerNorm/gamma\": b.dec_attn.layer_norm.weight,\n layer_str + \"rel_attn/LayerNorm/beta\": b.dec_attn.layer_norm.bias,\n layer_str + \"rel_attn/o/kernel\": b.dec_attn.o_net.weight,\n layer_str + \"rel_attn/qkv/kernel\": b.dec_attn.qkv_net.weight,\n layer_str + \"rel_attn/r/kernel\": b.dec_attn.r_net.weight,\n layer_str + \"ff/LayerNorm/gamma\": b.pos_ff.layer_norm.weight,\n layer_str + \"ff/LayerNorm/beta\": b.pos_ff.layer_norm.bias,\n layer_str + \"ff/layer_1/kernel\": b.pos_ff.CoreNet[0].weight,\n layer_str + \"ff/layer_1/bias\": b.pos_ff.CoreNet[0].bias,\n layer_str + \"ff/layer_2/kernel\": b.pos_ff.CoreNet[3].weight,\n layer_str + \"ff/layer_2/bias\": b.pos_ff.CoreNet[3].bias,\n }\n )\n\n # Relative positioning biases\n if config.untie_r:\n r_r_list = []\n r_w_list = []\n for b in model.layers:\n r_r_list.append(b.dec_attn.r_r_bias)\n r_w_list.append(b.dec_attn.r_w_bias)\n else:\n r_r_list = [model.r_r_bias]\n r_w_list = [model.r_w_bias]\n tf_to_pt_map.update({\"transformer/r_r_bias\": r_r_list, \"transformer/r_w_bias\": r_w_list})\n return tf_to_pt_map\n\n\ndef load_tf_weights_in_transfo_xl(model, config, tf_path):\n \"\"\"Load tf checkpoints in a pytorch model\"\"\"\n try:\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n # Build TF to PyTorch weights loading map\n tf_to_pt_map = build_tf_to_pytorch_map(model, config)\n\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n tf_weights[name] = array\n\n for name, pointer in tf_to_pt_map.items():\n assert name in tf_weights\n array = tf_weights[name]\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if \"kernel\" in name or \"proj\" in name:\n array = np.transpose(array)\n if (\"r_r_bias\" in name or \"r_w_bias\" in name) and len(pointer) > 1:\n # Here we will split the TF weights\n assert len(pointer) == array.shape[0]\n for i, p_i in enumerate(pointer):\n arr_i = array[i, ...]\n try:\n assert p_i.shape == arr_i.shape\n except AssertionError as e:\n e.args += (p_i.shape, arr_i.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name} for layer {i}\")\n p_i.data = torch.from_numpy(arr_i)\n else:\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n tf_weights.pop(name, None)\n tf_weights.pop(name + \"/Adam\", None)\n tf_weights.pop(name + \"/Adam_1\", None)\n\n logger.info(f\"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}\")\n return model\n\n\nclass PositionalEmbedding(nn.Module):\n def __init__(self, demb):\n super().__init__()\n\n self.demb = demb\n\n inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))\n self.register_buffer(\"inv_freq\", inv_freq)\n\n def forward(self, pos_seq, bsz=None):\n sinusoid_inp = torch.ger(pos_seq, self.inv_freq)\n pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)\n\n if bsz is not None:\n return pos_emb[:, None, :].expand(-1, bsz, -1)\n else:\n return pos_emb[:, None, :]\n\n\nclass PositionwiseFF(nn.Module):\n def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):\n super().__init__()\n\n self.d_model = d_model\n self.d_inner = d_inner\n self.dropout = dropout\n\n self.CoreNet = nn.Sequential(\n nn.Linear(d_model, d_inner),\n nn.ReLU(inplace=True),\n nn.Dropout(dropout),\n nn.Linear(d_inner, d_model),\n nn.Dropout(dropout),\n )\n\n self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)\n\n self.pre_lnorm = pre_lnorm\n\n def forward(self, inp):\n if self.pre_lnorm:\n # layer normalization + positionwise feed-forward\n core_out = self.CoreNet(self.layer_norm(inp))\n\n # residual connection\n output = core_out + inp\n else:\n # positionwise feed-forward\n core_out = self.CoreNet(inp)\n\n # residual connection + layer normalization\n output = self.layer_norm(inp + core_out)\n\n return output\n\n\nclass RelPartialLearnableMultiHeadAttn(nn.Module):\n def __init__(\n self,\n n_head,\n d_model,\n d_head,\n dropout,\n dropatt=0,\n pre_lnorm=False,\n r_r_bias=None,\n r_w_bias=None,\n layer_norm_epsilon=1e-5,\n ):\n super().__init__()\n\n self.n_head = n_head\n self.d_model = d_model\n self.d_head = d_head\n self.dropout = dropout\n\n self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)\n\n self.drop = nn.Dropout(dropout)\n self.dropatt = nn.Dropout(dropatt)\n self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)\n\n self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)\n\n self.scale = 1 / (d_head**0.5)\n\n self.pre_lnorm = pre_lnorm\n\n if r_r_bias is None or r_w_bias is None: # Biases are not shared\n self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))\n self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))\n else:\n self.r_r_bias = r_r_bias\n self.r_w_bias = r_w_bias\n\n self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)\n\n def _rel_shift(self, x):\n zero_pad_shape = (x.size(0), 1) + x.size()[2:]\n zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=1)\n\n x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]\n x_padded = x_padded.view(*x_padded_shape)\n\n x = x_padded[1:].view_as(x)\n\n return x\n\n def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):\n qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)\n\n if mems is not None:\n cat = torch.cat([mems, w], 0)\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(cat))\n else:\n w_heads = self.qkv_net(cat)\n r_head_k = self.r_net(r)\n\n w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)\n w_head_q = w_head_q[-qlen:]\n else:\n if self.pre_lnorm:\n w_heads = self.qkv_net(self.layer_norm(w))\n else:\n w_heads = self.qkv_net(w)\n r_head_k = self.r_net(r)\n\n w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)\n\n klen = w_head_k.size(0)\n\n w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head\n w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head\n w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head\n\n r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head\n\n # compute attention score\n rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head\n AC = torch.einsum(\"ibnd,jbnd->ijbn\", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head\n\n rr_head_q = w_head_q + self.r_r_bias\n BD = torch.einsum(\"ibnd,jnd->ijbn\", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head\n BD = self._rel_shift(BD)\n\n # [qlen x klen x bsz x n_head]\n attn_score = AC + BD\n attn_score.mul_(self.scale)\n\n # compute attention probability\n if attn_mask is not None and torch.sum(attn_mask).item():\n attn_mask = attn_mask == 1 # Switch to bool\n if attn_mask.dim() == 2:\n if next(self.parameters()).dtype == torch.float16:\n attn_score = (\n attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)\n )\n else:\n attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)\n elif attn_mask.dim() == 3:\n if next(self.parameters()).dtype == torch.float16:\n attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)\n else:\n attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)\n\n # [qlen x klen x bsz x n_head]\n attn_prob = nn.functional.softmax(attn_score, dim=1)\n attn_prob = self.dropatt(attn_prob)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_prob = attn_prob * head_mask\n\n # compute attention vector\n attn_vec = torch.einsum(\"ijbn,jbnd->ibnd\", (attn_prob, w_head_v))\n\n # [qlen x bsz x n_head x d_head]\n attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)\n\n # linear projection\n attn_out = self.o_net(attn_vec)\n attn_out = self.drop(attn_out)\n\n if self.pre_lnorm:\n # residual connection\n outputs = [w + attn_out]\n else:\n # residual connection + layer normalization\n outputs = [self.layer_norm(w + attn_out)]\n\n if output_attentions:\n outputs.append(attn_prob)\n\n return outputs\n\n\nclass RelPartialLearnableDecoderLayer(nn.Module):\n def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):\n super().__init__()\n\n self.dec_attn = RelPartialLearnableMultiHeadAttn(\n n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs\n )\n self.pos_ff = PositionwiseFF(\n d_model, d_inner, dropout, pre_lnorm=kwargs.get(\"pre_lnorm\"), layer_norm_epsilon=layer_norm_epsilon\n )\n\n def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):\n\n attn_outputs = self.dec_attn(\n dec_inp,\n r,\n attn_mask=dec_attn_mask,\n mems=mems,\n head_mask=head_mask,\n output_attentions=output_attentions,\n )\n ff_output = self.pos_ff(attn_outputs[0])\n\n outputs = [ff_output] + attn_outputs[1:]\n\n return outputs\n\n\nclass AdaptiveEmbedding(nn.Module):\n def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):\n super().__init__()\n\n self.n_token = n_token\n self.d_embed = d_embed\n\n self.cutoffs = cutoffs + [n_token]\n self.div_val = div_val\n self.d_proj = d_proj\n\n self.emb_scale = d_proj**0.5\n\n self.cutoff_ends = [0] + self.cutoffs\n\n self.emb_layers = nn.ModuleList()\n self.emb_projs = nn.ParameterList()\n if div_val == 1:\n self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))\n if d_proj != d_embed:\n self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))\n else:\n for i in range(len(self.cutoffs)):\n l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]\n d_emb_i = d_embed // (div_val**i)\n self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))\n self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))\n\n def forward(self, inp):\n if self.div_val == 1:\n embed = self.emb_layers[0](inp)\n if self.d_proj != self.d_embed:\n embed = nn.functional.linear(embed, self.emb_projs[0])\n else:\n param = next(self.parameters())\n inp_flat = inp.view(-1)\n emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)\n for i in range(len(self.cutoffs)):\n l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]\n\n mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)\n indices_i = mask_i.nonzero().squeeze()\n\n if indices_i.numel() == 0:\n continue\n\n inp_i = inp_flat.index_select(0, indices_i) - l_idx\n emb_i = self.emb_layers[i](inp_i)\n emb_i = nn.functional.linear(emb_i, self.emb_projs[i])\n\n emb_flat.index_copy_(0, indices_i, emb_i)\n\n embed_shape = inp.size() + (self.d_proj,)\n embed = emb_flat.view(embed_shape)\n\n embed.mul_(self.emb_scale)\n\n return embed\n\n\nclass TransfoXLPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = TransfoXLConfig\n load_tf_weights = load_tf_weights_in_transfo_xl\n base_model_prefix = \"transformer\"\n\n def _init_weight(self, weight):\n if self.config.init == \"uniform\":\n nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)\n elif self.config.init == \"normal\":\n nn.init.normal_(weight, 0.0, self.config.init_std)\n\n def _init_bias(self, bias):\n nn.init.constant_(bias, 0.0)\n\n def _init_weights(self, m):\n \"\"\"Initialize the weights.\"\"\"\n classname = m.__class__.__name__\n if classname.find(\"Linear\") != -1:\n if hasattr(m, \"weight\") and m.weight is not None:\n self._init_weight(m.weight)\n if hasattr(m, \"bias\") and m.bias is not None:\n self._init_bias(m.bias)\n elif classname.find(\"AdaptiveEmbedding\") != -1:\n if hasattr(m, \"emb_projs\"):\n for i in range(len(m.emb_projs)):\n if m.emb_projs[i] is not None:\n nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)\n elif classname.find(\"Embedding\") != -1:\n if hasattr(m, \"weight\"):\n self._init_weight(m.weight)\n elif classname.find(\"ProjectedAdaptiveLogSoftmax\") != -1:\n if hasattr(m, \"cluster_weight\") and m.cluster_weight is not None:\n self._init_weight(m.cluster_weight)\n if hasattr(m, \"cluster_bias\") and m.cluster_bias is not None:\n self._init_bias(m.cluster_bias)\n if hasattr(m, \"out_projs\"):\n for i in range(len(m.out_projs)):\n if m.out_projs[i] is not None:\n nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)\n elif classname.find(\"LayerNorm\") != -1:\n if hasattr(m, \"weight\"):\n nn.init.normal_(m.weight, 1.0, self.config.init_std)\n if hasattr(m, \"bias\") and m.bias is not None:\n self._init_bias(m.bias)\n else:\n if hasattr(m, \"r_emb\"):\n self._init_weight(m.r_emb)\n if hasattr(m, \"r_w_bias\"):\n self._init_weight(m.r_w_bias)\n if hasattr(m, \"r_r_bias\"):\n self._init_weight(m.r_r_bias)\n if hasattr(m, \"r_bias\"):\n self._init_bias(m.r_bias)\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):\n \"\"\"\n Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying\n weights embeddings afterwards if the model class has a *tie_weights()* method.\n\n Arguments:\n\n new_num_tokens: (*optional*) int:\n New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at\n the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and\n just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.\n layer: (*optional*) int:\n Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be\n resized. Be aware that when resizing other than the last layer, you have to ensure that the new\n token(s) in the tokenizer are at the corresponding position.\n\n Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed\n\n if new_num_tokens is None:\n return self.get_input_embeddings()\n\n new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)\n assert new_num_tokens_layer > 0, \"The size of the new embedding layer cannot be 0 or less\"\n model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)\n\n # Update base model and current model config\n self.config.vocab_size = new_num_tokens\n base_model.vocab_size = new_num_tokens\n base_model.n_token = new_num_tokens\n\n new_embedding_shapes = self._get_embedding_shapes()\n self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)\n\n # Tie weights again if needed\n self.tie_weights()\n\n return model_embeds\n\n def _get_new_num_tokens_layer(self, new_num_tokens, layer):\n embeddings = self.get_input_embeddings()\n if layer == -1:\n layer = len(embeddings.emb_layers) - 1\n assert 0 <= layer <= len(embeddings.emb_layers) - 1\n\n new_num_tokens_layer = (\n new_num_tokens\n - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])\n - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])\n )\n return new_num_tokens_layer, layer\n\n def _get_embedding_shapes(self):\n embeddings = self.get_input_embeddings()\n return [emb.weight.shape[0] for emb in embeddings.emb_layers]\n\n def _resize_token_embeddings(self, new_num_tokens, layer=-1):\n embeddings = self.get_input_embeddings()\n if new_num_tokens is None:\n return embeddings\n new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)\n embeddings.emb_layers[layer] = new_embeddings_layer\n\n self.set_input_embeddings(embeddings)\n\n return self.get_input_embeddings()\n\n def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):\n embeddings = self.get_input_embeddings()\n\n for i in range(layer, len(embeddings.cutoffs)):\n embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])\n\n embeddings.cutoff_ends = [0] + embeddings.cutoffs\n embeddings.n_token = new_num_tokens\n\n self.config.cutoffs = embeddings.cutoffs[:-1]\n\n return embeddings.cutoffs\n\n\n@dataclass\nclass TransfoXLModelOutput(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`\n input) to speed up sequential decoding. The token ids which have their past given to this model should not\n be passed as input ids as they have already been computed.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor\n mems: List[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass TransfoXLSequenceClassifierOutputWithPast(ModelOutput):\n \"\"\"\n Base class for outputs of sentence classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`\n input) to speed up sequential decoding. The token ids which have their past given to this model should not\n be passed as input ids as they have already been computed.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n mems: List[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass TransfoXLLMHeadModelOutput(ModelOutput):\n \"\"\"\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n Args:\n losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):\n Language modeling losses (not reduced).\n prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`\n input) to speed up sequential decoding. The token ids which have their past given to this model should not\n be passed as input ids as they have already been computed.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)\n Reduced language modeling loss.\n \"\"\"\n\n losses: Optional[torch.FloatTensor] = None\n prediction_scores: torch.FloatTensor = None\n mems: List[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n loss: Optional[torch.FloatTensor] = None\n\n @property\n def logits(self):\n # prediction scores are the output of the adaptive softmax, see\n # the file `modeling_transfo_xl_utilities`. Since the adaptive\n # softmax returns the log softmax value, `self.prediction_scores`\n # are strictly speaking not exactly `logits`, but behave the same\n # way logits do.\n return self.prediction_scores\n\n\nTRANSFO_XL_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nTRANSFO_XL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`TransfoXLTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems\n given to this model should not be passed as `input_ids` as they have already been computed.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.\",\n TRANSFO_XL_START_DOCSTRING,\n)\nclass TransfoXLModel(TransfoXLPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.n_token = config.vocab_size\n\n self.d_embed = config.d_embed\n self.d_model = config.d_model\n self.n_head = config.n_head\n self.d_head = config.d_head\n\n self.word_emb = AdaptiveEmbedding(\n config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val\n )\n\n self.drop = nn.Dropout(config.dropout)\n\n self.n_layer = config.n_layer\n self.mem_len = config.mem_len\n self.attn_type = config.attn_type\n\n if not config.untie_r:\n self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))\n self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))\n\n self.layers = nn.ModuleList()\n if config.attn_type == 0: # the default attention\n for i in range(config.n_layer):\n self.layers.append(\n RelPartialLearnableDecoderLayer(\n config.n_head,\n config.d_model,\n config.d_head,\n config.d_inner,\n config.dropout,\n dropatt=config.dropatt,\n pre_lnorm=config.pre_lnorm,\n r_w_bias=None if config.untie_r else self.r_w_bias,\n r_r_bias=None if config.untie_r else self.r_r_bias,\n layer_norm_epsilon=config.layer_norm_epsilon,\n )\n )\n else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints\n raise NotImplementedError # Removed them to avoid maintaining dead code\n\n self.same_length = config.same_length\n self.clamp_len = config.clamp_len\n\n if self.attn_type == 0: # default attention\n self.pos_emb = PositionalEmbedding(self.d_model)\n else: # learnable embeddings and absolute embeddings\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.word_emb\n\n def set_input_embeddings(self, new_embeddings):\n self.word_emb = new_embeddings\n\n def backward_compatible(self):\n self.sample_softmax = -1\n\n def reset_memory_length(self, mem_len):\n self.mem_len = mem_len\n\n def _prune_heads(self, heads):\n logger.info(\"Head pruning is not implemented for Transformer-XL model\")\n pass\n\n def init_mems(self, bsz):\n if self.mem_len > 0:\n mems = []\n param = next(self.parameters())\n for i in range(self.n_layer):\n empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)\n mems.append(empty)\n\n return mems\n else:\n return None\n\n def _update_mems(self, hids, mems, mlen, qlen):\n # does not deal with None\n if mems is None:\n return None\n\n # mems is not None\n assert len(hids) == len(mems), \"len(hids) != len(mems)\"\n\n # There are `mlen + qlen` steps that can be cached into mems\n with torch.no_grad():\n new_mems = []\n end_idx = mlen + max(0, qlen)\n beg_idx = max(0, end_idx - self.mem_len)\n for i in range(len(hids)):\n\n cat = torch.cat([mems[i], hids[i]], dim=0)\n new_mems.append(cat[beg_idx:end_idx].detach())\n\n return new_mems\n\n @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TransfoXLModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library\n # so we transpose here from shape [bsz, len] to shape [len, bsz]\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_ids = input_ids.transpose(0, 1).contiguous()\n qlen, bsz = input_ids.size()\n elif inputs_embeds is not None:\n inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()\n qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if mems is None:\n mems = self.init_mems(bsz)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)\n # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)\n head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to float if need + fp16 compatibility\n else:\n head_mask = [None] * self.n_layer\n\n if inputs_embeds is not None:\n word_emb = inputs_embeds\n else:\n word_emb = self.word_emb(input_ids)\n\n mlen = mems[0].size(0) if mems is not None else 0\n klen = mlen + qlen\n if self.same_length:\n all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)\n mask_len = klen - self.mem_len\n if mask_len > 0:\n mask_shift_len = qlen - mask_len\n else:\n mask_shift_len = qlen\n dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1\n else:\n dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1 + mlen)[\n :, :, None\n ]\n\n hids = []\n attentions = [] if output_attentions else None\n if self.attn_type == 0: # default\n pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype)\n if self.clamp_len > 0:\n pos_seq.clamp_(max=self.clamp_len)\n pos_emb = self.pos_emb(pos_seq)\n\n core_out = self.drop(word_emb)\n pos_emb = self.drop(pos_emb)\n\n for i, layer in enumerate(self.layers):\n hids.append(core_out)\n mems_i = None if mems is None else mems[i]\n layer_outputs = layer(\n core_out,\n pos_emb,\n dec_attn_mask=dec_attn_mask,\n mems=mems_i,\n head_mask=head_mask[i],\n output_attentions=output_attentions,\n )\n core_out = layer_outputs[0]\n if output_attentions:\n attentions.append(layer_outputs[1])\n else: # learnable embeddings and absolute embeddings\n raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint\n\n core_out = self.drop(core_out)\n\n new_mems = self._update_mems(hids, mems, mlen, qlen)\n\n if output_hidden_states:\n # Add last layer and transpose to library standard shape [bsz, len, hidden_dim]\n hids.append(core_out)\n hids = tuple(t.transpose(0, 1).contiguous() for t in hids)\n else:\n hids = None\n if output_attentions:\n # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]\n attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)\n # We transpose back here to shape [bsz, len, hidden_dim]\n core_out = core_out.transpose(0, 1).contiguous()\n\n if not return_dict:\n return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)\n\n return TransfoXLModelOutput(\n last_hidden_state=core_out,\n mems=new_mems,\n hidden_states=hids,\n attentions=attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive\n input embeddings)\n \"\"\",\n TRANSFO_XL_START_DOCSTRING,\n)\nclass TransfoXLLMHeadModel(TransfoXLPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.transformer = TransfoXLModel(config)\n self.sample_softmax = config.sample_softmax\n self.trainer_compatible = getattr(config, \"trainer_compatible\", False)\n\n if not self.trainer_compatible:\n warnings.warn(\n \"The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order\"\n \"to use that updated output, please specify `trainer_compatible=True` as your configuration\"\n \" attribute.\",\n DeprecationWarning,\n )\n\n assert self.sample_softmax <= 0, (\n \"Sampling from the softmax is not implemented yet. Please look at issue: #3310:\"\n \" https://github.com/huggingface/transformers/issues/3310\"\n )\n\n self.crit = ProjectedAdaptiveLogSoftmax(\n config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val\n )\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def tie_weights(self):\n \"\"\"\n Run this to be sure output and input (adaptive) softmax weights are tied\n \"\"\"\n\n if self.config.tie_word_embeddings:\n for i in range(len(self.crit.out_layers)):\n self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])\n if self.config.tie_projs:\n for i, tie_proj in enumerate(self.config.tie_projs):\n if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:\n if self.config.torchscript:\n self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())\n else:\n self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]\n elif tie_proj and self.config.div_val != 1:\n if self.config.torchscript:\n self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())\n else:\n self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]\n\n def reset_memory_length(self, mem_len):\n self.transformer.reset_memory_length(mem_len)\n\n def init_mems(self, bsz):\n return self.transformer.init_mems(bsz)\n\n @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TransfoXLLMHeadModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is not None:\n bsz, tgt_len = input_ids.size(0), input_ids.size(1)\n elif inputs_embeds is not None:\n bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n transformer_outputs = self.transformer(\n input_ids,\n mems=mems,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden = transformer_outputs[0]\n pred_hid = last_hidden[:, -tgt_len:]\n\n if labels is not None:\n # Prevents all labels being -100 and throwing an error\n # when backwarding the loss\n miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100\n if miss_valid_label:\n # Sets an <EOS> token, just to prevent loss from being NaN\n labels[0, 1] = self.config.eos_token_id\n\n softmax_output = self.crit(pred_hid, labels)\n prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()\n\n if labels is not None:\n losses = softmax_output.view(bsz, tgt_len - 1)\n # Avoids from incorporating padding (-100) tokens into loss value\n loss = losses[losses != 0].mean()\n else:\n losses, loss = None, None\n\n if not return_dict:\n if self.trainer_compatible:\n output = (prediction_scores, losses) if losses is not None else (prediction_scores,)\n output += transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n else:\n output = (prediction_scores, *transformer_outputs[1:])\n output = ((losses,) + output) if losses is not None else output\n return (output + (loss,)) if loss is not None else output\n\n return TransfoXLLMHeadModelOutput(\n loss=loss,\n prediction_scores=prediction_scores,\n losses=losses,\n mems=transformer_outputs.mems,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def get_output_embeddings(self):\n \"\"\"Double-check if you are using adaptive softmax.\"\"\"\n if self.sample_softmax > 0:\n return self.out_layer\n else:\n return self.crit.out_layers[-1]\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **model_kwargs):\n inputs = {}\n\n # if past is defined in model kwargs then use it for faster decoding\n if past:\n inputs[\"mems\"] = past\n inputs[\"input_ids\"] = input_ids[:, -1].unsqueeze(-1)\n else:\n inputs[\"input_ids\"] = input_ids\n\n return inputs\n\n def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):\n new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)\n\n self.crit.cutoffs = new_cutoffs\n self.crit.cutoff_ends = [0] + new_cutoffs\n self.crit.n_token = new_num_tokens\n\n @staticmethod\n def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:\n \"\"\"\n This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or\n [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every\n generation step.\n \"\"\"\n return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]\n\n\n@add_start_docstrings(\n \"\"\"\n The Transformer-XL Model transformer with a sequence classification head on top (linear layer).\n\n [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n \"\"\",\n TRANSFO_XL_START_DOCSTRING,\n)\nclass TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.transformer = TransfoXLModel(config)\n self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TransfoXLSequenceClassifierOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n mems: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n mems=mems,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n logits = self.score(hidden_states)\n\n if input_ids is not None:\n batch_size, sequence_length = input_ids.shape[:2]\n else:\n batch_size, sequence_length = inputs_embeds.shape[:2]\n\n assert (\n self.config.pad_token_id is not None or batch_size == 1\n ), \"Cannot handle batch sizes > 1 if no padding token is defined.\"\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n \"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n\n pooled_logits = logits[range(batch_size), sequence_lengths]\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(pooled_logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(pooled_logits, labels)\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TransfoXLSequenceClassifierOutputWithPast(\n loss=loss,\n logits=pooled_logits,\n mems=transformer_outputs.mems,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n"
] | [
[
"torch.LongTensor",
"torch.ones",
"torch.cat",
"torch.no_grad",
"torch.cuda.is_available",
"torch.allclose",
"torch.ones_like"
],
[
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.zeros",
"torch.cat",
"torch.sum",
"torch.nn.Embedding",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.FloatTensor",
"torch.triu",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.einsum",
"torch.from_numpy",
"torch.tril",
"tensorflow.train.list_variables",
"torch.arange",
"torch.nn.functional.linear",
"torch.nn.MSELoss",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.ParameterList",
"numpy.transpose",
"torch.ne",
"torch.nn.LayerNorm",
"torch.chunk",
"torch.nn.ReLU",
"torch.ger"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fjaragones/fastai | [
"be48d209a4526191f71dc7adaef090828897b9ec"
] | [
"old/fastai/structured.py"
] | [
"from .imports import *\n\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype\nfrom sklearn.ensemble import forest\nfrom sklearn.tree import export_graphviz\n\n\ndef set_plot_sizes(sml, med, big):\n plt.rc('font', size=sml) # controls default text sizes\n plt.rc('axes', titlesize=sml) # fontsize of the axes title\n plt.rc('axes', labelsize=med) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=sml) # fontsize of the tick labels\n plt.rc('ytick', labelsize=sml) # fontsize of the tick labels\n plt.rc('legend', fontsize=sml) # legend fontsize\n plt.rc('figure', titlesize=big) # fontsize of the figure title\n\ndef parallel_trees(m, fn, n_jobs=8):\n return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))\n\ndef draw_tree(t, df, size=10, ratio=0.6, precision=0):\n \"\"\" Draws a representation of a random forest in IPython.\n\n Parameters:\n -----------\n t: The tree you wish to draw\n df: The data used to train the tree. This is used to get the names of the features.\n \"\"\"\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))\n\ndef combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,\n seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):\n years = np.asarray(years) - 1970\n months = np.asarray(months) - 1\n days = np.asarray(days) - 1\n types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',\n '<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')\n vals = (years, months, days, weeks, hours, minutes, seconds,\n milliseconds, microseconds, nanoseconds)\n return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)\n if v is not None)\n\ndef get_sample(df,n):\n \"\"\" Gets a random sample of n rows from df, without replacement.\n\n Parameters:\n -----------\n df: A pandas data frame, that you wish to sample from.\n n: The number of rows you wish to sample.\n\n Returns:\n --------\n return value: A random sample of n rows of df.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n >>> get_sample(df, 2)\n col1 col2\n 1 2 b\n 2 3 a\n \"\"\"\n idxs = sorted(np.random.permutation(len(df))[:n])\n return df.iloc[idxs].copy()\n\ndef add_datepart(df, fldname, drop=True, time=False, errors=\"raise\"):\t\n \"\"\"add_datepart converts a column of df from a datetime64 to many columns containing\n the information from the date. This applies changes inplace.\n\n Parameters:\n -----------\n df: A pandas data frame. df gain several new columns.\n fldname: A string that is the name of the date column you wish to expand.\n If it is not a datetime64 series, it will be converted to one with pd.to_datetime.\n drop: If true then the original date column will be removed.\n time: If true time features: Hour, Minute, Second will be added.\n\n Examples:\n ---------\n\n >>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })\n >>> df\n\n A\n 0 2000-03-11\n 1 2000-03-12\n 2 2000-03-13\n\n >>> add_datepart(df, 'A')\n >>> df\n\n AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed\n 0 2000 3 10 11 5 71 False False False False False False 952732800\n 1 2000 3 10 12 6 72 False False False False False False 952819200\n 2 2000 3 11 13 0 73 False False False False False False 952905600\n \"\"\"\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)\n targ_pre = re.sub('[Dd]ate$', '', fldname)\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())\n df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9\n if drop: df.drop(fldname, axis=1, inplace=True)\n\ndef is_date(x): return np.issubdtype(x.dtype, np.datetime64)\n\ndef train_cats(df):\n \"\"\"Change any columns of strings in a panda's dataframe to a column of\n categorical values. This applies the changes inplace.\n\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values.\n\n Examples:\n ---------\n\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category\n \"\"\"\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()\n\ndef apply_cats(df, trn):\n \"\"\"Changes any columns of strings in df into categorical variables using trn as\n a template for the category codes.\n\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values. The category codes are determined by trn.\n\n trn: A pandas dataframe. When creating a category for df, it looks up the\n what the category's code were in trn and makes those the category codes\n for df.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category {a : 1, b : 2}\n\n >>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})\n >>> apply_cats(df2, df)\n\n col1 col2\n 0 1 b\n 1 2 a\n 2 3 a\n\n now the type of col is category {a : 1, b : 2}\n \"\"\"\n for n,c in df.items():\n if (n in trn.columns) and (trn[n].dtype.name=='category'):\n df[n] = c.astype('category').cat.as_ordered()\n df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)\n\ndef fix_missing(df, col, name, na_dict):\n \"\"\" Fill missing data in a column of df with the median, and add a {name}_na column\n which specifies if the data was missing.\n\n Parameters:\n -----------\n df: The data frame that will be changed.\n\n col: The column of data to fix by filling in missing data.\n\n name: The name of the new filled column in df.\n\n na_dict: A dictionary of values to create na's of and the value to insert. If\n name is not a key of na_dict the median will fill any missing data. Also\n if name is not a key of na_dict and there is no missing data in col, then\n no {name}_na column is not created.\n\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n >>> fix_missing(df, df['col1'], 'col1', {})\n >>> df\n col1 col2 col1_na\n 0 1 5 False\n 1 2 2 True\n 2 3 2 False\n\n\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n >>> fix_missing(df, df['col2'], 'col2', {})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500})\n >>> df\n col1 col2 col1_na\n 0 1 5 False\n 1 500 2 True\n 2 3 2 False\n \"\"\"\n if is_numeric_dtype(col):\n if pd.isnull(col).sum() or (name in na_dict):\n df[name+'_na'] = pd.isnull(col)\n filler = na_dict[name] if name in na_dict else col.median()\n df[name] = col.fillna(filler)\n na_dict[name] = filler\n return na_dict\n\ndef numericalize(df, col, name, max_n_cat):\n \"\"\" Changes the column col from a categorical type to it's integer codes.\n\n Parameters:\n -----------\n df: A pandas dataframe. df[name] will be filled with the integer codes from\n col.\n\n col: The column you wish to change into the categories.\n name: The column name you wish to insert into df. This column will hold the\n integer codes.\n\n max_n_cat: If col has more categories than max_n_cat it will not change the\n it to its integer codes. If max_n_cat is None, then col will always be\n converted.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category { a : 1, b : 2}\n\n >>> numericalize(df, df['col2'], 'col3', None)\n\n col1 col2 col3\n 0 1 a 1\n 1 2 b 2\n 2 3 a 1\n \"\"\"\n if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat):\n df[name] = col.cat.codes+1\n\ndef scale_vars(df, mapper):\n warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)\n if mapper is None:\n map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]\n mapper = DataFrameMapper(map_f).fit(df)\n df[mapper.transformed_names_] = mapper.transform(df)\n return mapper\n\ndef proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,\n preproc_fn=None, max_n_cat=None, subset=None, mapper=None):\n \"\"\" proc_df takes a data frame df and splits off the response variable, and\n changes the df into an entirely numeric dataframe. For each column of df \n which is not in skip_flds nor in ignore_flds, na values are replaced by the\n median value of the column.\n\n Parameters:\n -----------\n df: The data frame you wish to process.\n\n y_fld: The name of the response variable\n\n skip_flds: A list of fields that dropped from df.\n\n ignore_flds: A list of fields that are ignored during processing.\n\n do_scale: Standardizes each column in df. Takes Boolean Values(True,False)\n\n na_dict: a dictionary of na columns to add. Na columns are also added if there\n are any missing values.\n\n preproc_fn: A function that gets applied to df.\n\n max_n_cat: The maximum number of categories to break into dummy values, instead\n of integer codes.\n\n subset: Takes a random subset of size subset from df.\n\n mapper: If do_scale is set as True, the mapper variable\n calculates the values used for scaling of variables during training time (mean and standard deviation).\n\n Returns:\n --------\n [x, y, nas, mapper(optional)]:\n\n x: x is the transformed version of df. x will not have the response variable\n and is entirely numeric.\n\n y: y is the response variable\n\n nas: returns a dictionary of which nas it created, and the associated median.\n\n mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continuous\n variables which is then used for scaling of during test-time.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category { a : 1, b : 2}\n\n >>> x, y, nas = proc_df(df, 'col1')\n >>> x\n\n col2\n 0 1\n 1 2\n 2 1\n\n >>> data = DataFrame(pet=[\"cat\", \"dog\", \"dog\", \"fish\", \"cat\", \"dog\", \"cat\", \"fish\"],\n children=[4., 6, 3, 3, 2, 3, 5, 4],\n salary=[90, 24, 44, 27, 32, 59, 36, 27])\n\n >>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),\n ([:children], StandardScaler())])\n\n >>>round(fit_transform!(mapper, copy(data)), 2)\n\n 8x4 Array{Float64,2}:\n 1.0 0.0 0.0 0.21\n 0.0 1.0 0.0 1.88\n 0.0 1.0 0.0 -0.63\n 0.0 0.0 1.0 -0.63\n 1.0 0.0 0.0 -1.46\n 0.0 1.0 0.0 -0.63\n 1.0 0.0 0.0 1.04\n 0.0 0.0 1.0 0.21\n \"\"\"\n if not ignore_flds: ignore_flds=[]\n if not skip_flds: skip_flds=[]\n if subset: df = get_sample(df,subset)\n else: df = df.copy()\n ignored_flds = df.loc[:, ignore_flds]\n df.drop(ignore_flds, axis=1, inplace=True)\n if preproc_fn: preproc_fn(df)\n if y_fld is None: y = None\n else:\n if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes\n y = df[y_fld].values\n skip_flds += [y_fld]\n df.drop(skip_flds, axis=1, inplace=True)\n\n if na_dict is None: na_dict = {}\n else: na_dict = na_dict.copy()\n na_dict_initial = na_dict.copy()\n for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)\n if len(na_dict_initial.keys()) > 0:\n df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)\n if do_scale: mapper = scale_vars(df, mapper)\n for n,c in df.items(): numericalize(df, c, n, max_n_cat)\n df = pd.get_dummies(df, dummy_na=True)\n df = pd.concat([ignored_flds, df], axis=1)\n res = [df, y, na_dict]\n if do_scale: res = res + [mapper]\n return res\n\ndef rf_feat_importance(m, df):\n return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}\n ).sort_values('imp', ascending=False)\n\ndef set_rf_samples(n):\n \"\"\" Changes Scikit learn's random forests to give each tree a random sample of\n n random rows.\n \"\"\"\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n))\n\ndef reset_rf_samples():\n \"\"\" Undoes the changes produced by set_rf_samples.\n \"\"\"\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n_samples))\n\ndef get_nn_mappers(df, cat_vars, contin_vars):\n # Replace nulls with 0 for continuous, \"\" for categorical.\n for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)\n for v in cat_vars: df[v].fillna('#NA#', inplace=True)\n\n # list of tuples, containing variable and instance of a transformer for that variable\n # for categoricals, use LabelEncoder to map to integers. For continuous, standardize\n cat_maps = [(o, LabelEncoder()) for o in cat_vars]\n contin_maps = [([o], StandardScaler()) for o in contin_vars]\n return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)\n"
] | [
[
"sklearn.tree.export_graphviz",
"sklearn.ensemble.forest.check_random_state",
"pandas.api.types.is_numeric_dtype",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.LabelEncoder",
"pandas.api.types.is_string_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dizcza/pytorch-mighty | [
"942c53b529377c9100bffc2f7f20ec740763e6ae",
"942c53b529377c9100bffc2f7f20ec740763e6ae"
] | [
"mighty/trainer/autoencoder.py",
"mighty/monitor/mutual_info/neural_estimation.py"
] | [
"from typing import Union\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau\nfrom torch.optim.optimizer import Optimizer\n\nfrom mighty.loss import LossPenalty\nfrom mighty.models import AutoencoderLinear\nfrom mighty.monitor.monitor import MonitorAutoencoder\nfrom mighty.utils.var_online import MeanOnline\nfrom mighty.utils.signal import peak_to_signal_noise_ratio\nfrom mighty.utils.common import input_from_batch, batch_to_cuda\nfrom mighty.utils.data import DataLoader\nfrom .embedding import TrainerEmbedding\n\n\n__all__ = [\n \"TrainerAutoencoder\"\n]\n\n\nclass TrainerAutoencoder(TrainerEmbedding):\n \"\"\"\n An unsupervised AutoEncoder trainer that not only transforms inputs to\n meaningful embeddings but also aims to restore the input signal from it.\n\n\n Parameters\n ----------\n model : nn.Module\n A neural network to train.\n criterion : nn.Module\n A loss function.\n data_loader : DataLoader\n A data loader.\n optimizer : Optimizer\n An optimizer (Adam, SGD, etc.).\n scheduler : _LRScheduler or ReduceLROnPlateau, or None\n A learning rate scheduler.\n Default: None\n accuracy_measure : AccuracyEmbedding, optional\n Calculates the accuracy of embedding vectors.\n Default: ``AccuracyEmbedding()``\n **kwargs\n Passed to the base class.\n \"\"\"\n\n watch_modules = TrainerEmbedding.watch_modules + (AutoencoderLinear,)\n\n def __init__(self,\n model: nn.Module,\n criterion: nn.Module,\n data_loader: DataLoader,\n optimizer: Optimizer,\n scheduler: Union[_LRScheduler, ReduceLROnPlateau] = None,\n **kwargs):\n super().__init__(model, criterion=criterion, data_loader=data_loader,\n optimizer=optimizer, scheduler=scheduler, **kwargs)\n\n def _init_monitor(self, mutual_info) -> MonitorAutoencoder:\n monitor = MonitorAutoencoder(\n mutual_info=mutual_info,\n normalize_inverse=self.data_loader.normalize_inverse\n )\n return monitor\n\n def _init_online_measures(self):\n online = super()._init_online_measures()\n\n # peak signal-to-noise ratio\n online['psnr-train'] = MeanOnline()\n online['psnr-test'] = MeanOnline()\n\n return online\n\n def _get_loss(self, batch, output):\n input = input_from_batch(batch)\n latent, reconstructed = output\n if isinstance(self.criterion, LossPenalty):\n loss = self.criterion(reconstructed, input, latent)\n else:\n loss = self.criterion(reconstructed, input)\n return loss\n\n def _on_forward_pass_batch(self, batch, output, train):\n input = input_from_batch(batch)\n latent, reconstructed = output\n if isinstance(self.criterion, nn.BCEWithLogitsLoss):\n reconstructed = reconstructed.sigmoid()\n psnr = peak_to_signal_noise_ratio(input, reconstructed)\n fold = 'train' if train else 'test'\n if torch.isfinite(psnr):\n self.online[f'psnr-{fold}'].update(psnr.cpu())\n super()._on_forward_pass_batch(batch, latent, train)\n\n def _epoch_finished(self, loss):\n self.plot_autoencoder()\n for fold in ('train', 'test'):\n self.monitor.plot_psnr(self.online[f'psnr-{fold}'].get_mean(),\n mode=fold)\n super()._epoch_finished(loss)\n\n def plot_autoencoder(self):\n \"\"\"\n Plots AutoEncoder reconstruction.\n \"\"\"\n batch = self.data_loader.sample()\n batch = batch_to_cuda(batch)\n mode_saved = self.model.training\n self.model.train(False)\n with torch.no_grad():\n latent, reconstructed = self._forward(batch)\n if isinstance(self.criterion, nn.BCEWithLogitsLoss):\n reconstructed = reconstructed.sigmoid()\n self._plot_autoencoder(batch, reconstructed)\n self.model.train(mode_saved)\n\n def _plot_autoencoder(self, batch, reconstructed, mode='train'):\n input = input_from_batch(batch)\n self.monitor.plot_autoencoder(input, reconstructed, mode=mode)\n",
"\"\"\"\nMINE: Mutual Information Neural Estimation\nM. Belghazi et. al, 2018\nhttps://arxiv.org/abs/1801.04062\n\"\"\"\n\nfrom typing import List\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\n\nfrom mighty.models import MLP\nfrom mighty.monitor.mutual_info._pca_preprocess import MutualInfoPCA\nfrom mighty.utils.signal import to_onehot, exponential_moving_average\nfrom mighty.utils.constants import BATCH_SIZE\nfrom mighty.utils.data import DataLoader\n\n\nclass MINE_Net(nn.Module):\n\n def __init__(self, x_size: int, y_size: int, hidden_units=(100, 50)):\n \"\"\"\n A network to estimate the mutual information between X and Y, I(X; Y).\n\n Parameters\n ----------\n x_size, y_size : int\n Number of neurons in X and Y.\n hidden_units : int or tuple of int\n Hidden layer size(s).\n \"\"\"\n super().__init__()\n if isinstance(hidden_units, int):\n hidden_units = [hidden_units]\n self.fc_x = nn.Linear(x_size, hidden_units[0], bias=False)\n self.fc_y = nn.Linear(y_size, hidden_units[0], bias=False)\n self.xy_bias = nn.Parameter(torch.zeros(hidden_units[0]))\n # the output mutual info is a scalar; hence, the last dimension is 1\n self.fc_output = MLP(*hidden_units, 1)\n\n def forward(self, x, y):\n \"\"\"\n Parameters\n ----------\n x, y : torch.Tensor\n Data batches.\n\n Returns\n -------\n mi : torch.Tensor\n Kullback-Leibler lower-bound estimation of I(X; Y).\n \"\"\"\n hidden = F.relu(self.fc_x(x) + self.fc_y(y) + self.xy_bias,\n inplace=True)\n mi = self.fc_output(hidden)\n return mi\n\n\nclass MINE_Trainer:\n \"\"\"\n Parameters\n ----------\n mine_model : MINE_Net\n A network to estimate mutual information.\n learning_rate : float\n Optimizer learning rate.\n smooth_filter_size : int\n Smoothing filter size. The larger the filter, the smoother but also\n more biased towards lower values of the resulting estimate.\n \"\"\"\n\n log2_e = np.log2(np.e)\n\n def __init__(self, mine_model: nn.Module, learning_rate=1e-3,\n smooth_filter_size=30):\n if torch.cuda.is_available():\n mine_model = mine_model.cuda()\n self.mine_model = mine_model\n self.optimizer = torch.optim.Adam(self.mine_model.parameters(),\n lr=learning_rate,\n weight_decay=1e-5)\n self.smooth_filter_size = smooth_filter_size\n\n self.scheduler = None\n self.mi_history = None\n self.reset()\n\n def __repr__(self):\n return f\"{MINE_Trainer.__name__}(model={self.mine_model}, \" \\\n f\"optimizer={self.optimizer}, \" \\\n f\"smooth_filter_size={self.smooth_filter_size})\"\n\n def reset(self):\n self.scheduler = torch.optim.lr_scheduler.ExponentialLR(\n self.optimizer, gamma=0.5)\n self.mi_history = [0]\n\n def train_batch(self, x_batch, y_batch):\n \"\"\"\n Performs a single step to refine I(X; Y).\n\n Parameters\n ----------\n x_batch, y_batch : torch.Tensor\n A batch of multidimensional X and Y of size (B, N) to\n estimate mutual information from. N could be 1 or more.\n \"\"\"\n if torch.cuda.is_available():\n x_batch = x_batch.cuda()\n y_batch = y_batch.cuda()\n self.optimizer.zero_grad()\n pred_joint = self.mine_model(x_batch, y_batch)\n y_batch = y_batch[\n torch.randperm(y_batch.shape[0], device=y_batch.device)]\n pred_marginal = self.mine_model(x_batch, y_batch)\n mi_lower_bound = pred_joint.mean() - pred_marginal.exp().mean().log()\n mi_bits = mi_lower_bound.item() * self.log2_e # convert nats to bits\n self.mi_history.append(mi_bits)\n loss = -mi_lower_bound # maximize\n loss.backward()\n self.optimizer.step()\n\n def smooth_history(self):\n history = torch.as_tensor(self.mi_history)\n history = history[~torch.isnan(history)]\n return exponential_moving_average(history,\n window=self.smooth_filter_size)\n\n def get_mutual_info(self):\n \"\"\"\n Returns\n -------\n float\n Estimated mutual information lower bound.\n \"\"\"\n return self.smooth_history().max()\n\n\nclass MutualInfoNeuralEstimation(MutualInfoPCA):\n \"\"\"\n Mutual Information Neural Estimation [1]_, followed by PCA dimensionality\n reduction.\n\n Parameters\n ----------\n data_loader : DataLoader\n The data loader.\n pca_size : int, optional\n PCA dimension size.\n Default: 100\n estimate_epochs : int, optional\n The number of epochs to run.\n Default: 5\n noise_std : float, optional\n Additive noise standard deviation (to break the degeneracy).\n Default: 1e-3\n debug : bool, optional\n If True, shows more informative plots.\n Default: False\n\n Attributes\n ----------\n ignore_layers : tuple\n A tuple to ignore layer classes to monitor for MI.\n\n References\n ----------\n .. [1] Belghazi, M. I., Baratin, A., Rajeswar, S., Ozair, S., Bengio, Y.,\n Courville, A., & Hjelm, R. D. (2018). Mine: mutual information neural\n estimation. arXiv preprint arXiv:1801.04062.\n \"\"\"\n\n def __init__(self, data_loader: DataLoader, pca_size=100, debug=False,\n hidden_units=(100, 50), estimate_epochs=5, noise_std=1e-3):\n super().__init__(data_loader=data_loader, pca_size=pca_size,\n debug=debug)\n self.estimate_epochs = estimate_epochs\n self.hidden_units = hidden_units\n self.noise_sampler = torch.distributions.normal.Normal(loc=0,\n scale=noise_std)\n self.trainers = {} # MutualInformationNeuralEstimation trainers for both input X- and target Y-data\n self.input_size = None\n self.target_size = None\n\n def extra_repr(self):\n return f\"{super().extra_repr()}; noise_variance={self.noise_sampler.variance}; \"\n\n def _prepare_input_finished(self):\n super()._prepare_input_finished()\n self.input_size = self.quantized['input'].shape[1]\n self.target_size = len(self.quantized['target'].unique())\n # one-hot encoded labels are better fit than argmax\n self.quantized['target'] = to_onehot(self.quantized['target']).type(\n torch.float32)\n\n def _process_activations(self, layer_name: str,\n activations: List[torch.FloatTensor]):\n # TODO process each batch in save_activations()\n activations = torch.cat(activations, dim=0)\n assert len(self.quantized['input']) == len(\n self.quantized['target']) == len(activations)\n embedding_size = activations.shape[1]\n if layer_name not in self.trainers:\n self.trainers[layer_name] = (\n MINE_Trainer(MINE_Net(x_size=embedding_size,\n y_size=self.input_size,\n hidden_units=self.hidden_units)),\n MINE_Trainer(MINE_Net(x_size=embedding_size,\n y_size=self.target_size,\n hidden_units=self.hidden_units)),\n )\n for mi_trainer in self.trainers[layer_name]:\n mi_trainer.reset()\n for epoch in range(self.estimate_epochs):\n permutations = torch.randperm(len(activations)).split(BATCH_SIZE)\n for batch_permutation in permutations:\n activations_batch = activations[batch_permutation]\n for data_type, trainer in zip(('input', 'target'),\n self.trainers[layer_name]):\n labels_batch = self.quantized[data_type][batch_permutation]\n labels_batch = labels_batch + self.noise_sampler.sample(\n labels_batch.shape)\n trainer.train_batch(x_batch=activations_batch,\n y_batch=labels_batch)\n for mi_trainer in self.trainers[layer_name]:\n mi_trainer.scheduler.step()\n\n def _save_mutual_info(self):\n for layer_name, (trainer_x, trainer_y) in self.trainers.items():\n info_x = trainer_x.get_mutual_info()\n info_y = trainer_y.get_mutual_info()\n self.information[layer_name] = (info_x, info_y)\n\n def plot_mine_history_loss(self, viz):\n \"\"\"\n Plots the loss of a training progress with iterations.\n \"\"\"\n legend = []\n info_x = []\n info_y = []\n for layer_name, (trainer_x, trainer_y) in self.trainers.items():\n info_x.append(trainer_x.smooth_history())\n info_y.append(trainer_y.smooth_history())\n legend.append(layer_name)\n for info_name, info in (('input X', info_x), ('target Y', info_y)):\n info = torch.stack(info).t().squeeze()\n title = f'MutualInfoNeuralEstimation {info_name}'\n viz.line(Y=info, X=torch.arange(len(info)), win=title, opts=dict(\n xlabel='Iteration',\n ylabel='Mutual info lower bound, bits',\n title=title,\n legend=legend,\n ))\n\n def _plot_debug(self, viz):\n super()._plot_debug(viz)\n self.plot_mine_history_loss(viz)\n"
] | [
[
"torch.isfinite",
"torch.no_grad"
],
[
"numpy.log2",
"torch.isnan",
"torch.cat",
"torch.zeros",
"torch.randperm",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.stack",
"torch.distributions.normal.Normal",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RussellM2020/RoboticTasks | [
"c7157c986cdbbf08cc0ea296205ef2dbcf6fc487"
] | [
"rllab/misc/instrument.py"
] | [
"import os\nimport re\nimport subprocess\nimport base64\nimport os.path as osp\nimport pickle as pickle\nimport inspect\nimport hashlib\nimport sys\nfrom contextlib import contextmanager\n\nimport errno\n\nfrom rllab.core.serializable import Serializable\nfrom rllab import config\nfrom rllab.misc.console import mkdir_p\nfrom rllab.misc import ext\nfrom io import StringIO\nimport datetime\nimport dateutil.tz\nimport json\nimport time\nimport numpy as np\n\nfrom rllab.misc.ext import AttrDict\nfrom rllab.viskit.core import flatten\nimport collections\n\n\nclass StubBase(object):\n def __getitem__(self, item):\n return StubMethodCall(self, \"__getitem__\", args=[item], kwargs=dict())\n\n def __getattr__(self, item):\n try:\n return super(self.__class__, self).__getattribute__(item)\n except AttributeError:\n if item.startswith(\"__\") and item.endswith(\"__\"):\n raise\n return StubAttr(self, item)\n\n def __pow__(self, power, modulo=None):\n return StubMethodCall(self, \"__pow__\", [power, modulo], dict())\n\n def __call__(self, *args, **kwargs):\n return StubMethodCall(self.obj, self.attr_name, args, kwargs)\n\n def __add__(self, other):\n return StubMethodCall(self, \"__add__\", [other], dict())\n\n def __rmul__(self, other):\n return StubMethodCall(self, \"__rmul__\", [other], dict())\n\n def __div__(self, other):\n return StubMethodCall(self, \"__div__\", [other], dict())\n\n def __rdiv__(self, other):\n return StubMethodCall(BinaryOp(), \"rdiv\", [self, other], dict()) # self, \"__rdiv__\", [other], dict())\n\n def __rpow__(self, power, modulo=None):\n return StubMethodCall(self, \"__rpow__\", [power, modulo], dict())\n\n\nclass BinaryOp(Serializable):\n def __init__(self):\n Serializable.quick_init(self, locals())\n\n def rdiv(self, a, b):\n return b / a\n # def __init__(self, opname, a, b):\n # self.opname = opname\n # self.a = a\n # self.b = b\n\n\nclass StubAttr(StubBase):\n def __init__(self, obj, attr_name):\n self.__dict__[\"_obj\"] = obj\n self.__dict__[\"_attr_name\"] = attr_name\n\n @property\n def obj(self):\n return self.__dict__[\"_obj\"]\n\n @property\n def attr_name(self):\n return self.__dict__[\"_attr_name\"]\n\n def __str__(self):\n return \"StubAttr(%s, %s)\" % (str(self.obj), str(self.attr_name))\n\n\nclass StubMethodCall(StubBase, Serializable):\n def __init__(self, obj, method_name, args, kwargs):\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n self.obj = obj\n self.method_name = method_name\n self.args = args\n self.kwargs = kwargs\n\n def __str__(self):\n return \"StubMethodCall(%s, %s, %s, %s)\" % (\n str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))\n\n\nclass StubClass(StubBase):\n def __init__(self, proxy_class):\n self.proxy_class = proxy_class\n\n def __call__(self, *args, **kwargs):\n if len(args) > 0:\n # Convert the positional arguments to keyword arguments\n spec = inspect.getargspec(self.proxy_class.__init__)\n kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)\n args = tuple()\n return StubObject(self.proxy_class, *args, **kwargs)\n\n def __getstate__(self):\n return dict(proxy_class=self.proxy_class)\n\n def __setstate__(self, dict):\n self.proxy_class = dict[\"proxy_class\"]\n\n def __getattr__(self, item):\n if hasattr(self.proxy_class, item):\n return StubAttr(self, item)\n raise AttributeError\n\n def __str__(self):\n return \"StubClass(%s)\" % self.proxy_class\n\n\nclass StubObject(StubBase):\n def __init__(self, __proxy_class, *args, **kwargs):\n if len(args) > 0:\n spec = inspect.getargspec(__proxy_class.__init__)\n kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)\n args = tuple()\n self.proxy_class = __proxy_class\n self.args = args\n self.kwargs = kwargs\n\n def __getstate__(self):\n return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)\n\n def __setstate__(self, dict):\n self.args = dict[\"args\"]\n self.kwargs = dict[\"kwargs\"]\n self.proxy_class = dict[\"proxy_class\"]\n\n def __getattr__(self, item):\n # why doesnt the commented code work?\n # return StubAttr(self, item)\n # checks bypassed to allow for accesing instance fileds\n if hasattr(self.proxy_class, item):\n return StubAttr(self, item)\n raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))\n\n def __str__(self):\n return \"StubObject(%s, *%s, **%s)\" % (str(self.proxy_class), str(self.args), str(self.kwargs))\n\n\nclass VariantDict(AttrDict):\n def __init__(self, d, hidden_keys):\n super(VariantDict, self).__init__(d)\n self._hidden_keys = hidden_keys\n\n def dump(self):\n return {k: v for k, v in self.items() if k not in self._hidden_keys}\n\n\nclass VariantGenerator(object):\n \"\"\"\n Usage:\n\n vg = VariantGenerator()\n vg.add(\"param1\", [1, 2, 3])\n vg.add(\"param2\", ['x', 'y'])\n vg.variants() => # all combinations of [1,2,3] x ['x','y']\n\n Supports noncyclic dependency among parameters:\n vg = VariantGenerator()\n vg.add(\"param1\", [1, 2, 3])\n vg.add(\"param2\", lambda param1: [param1+1, param1+2])\n vg.variants() => # ..\n \"\"\"\n\n def __init__(self):\n self._variants = []\n self._populate_variants()\n self._hidden_keys = []\n for k, vs, cfg in self._variants:\n if cfg.get(\"hide\", False):\n self._hidden_keys.append(k)\n\n def add(self, key, vals, **kwargs):\n self._variants.append((key, vals, kwargs))\n\n def _populate_variants(self):\n methods = inspect.getmembers(\n self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))\n methods = [x[1].__get__(self, self.__class__)\n for x in methods if getattr(x[1], '__is_variant', False)]\n for m in methods:\n self.add(m.__name__, m, **getattr(m, \"__variant_config\", dict()))\n\n def variants(self, randomized=False):\n ret = list(self.ivariants())\n if randomized:\n np.random.shuffle(ret)\n return list(map(self.variant_dict, ret))\n\n def variant_dict(self, variant):\n return VariantDict(variant, self._hidden_keys)\n\n def to_name_suffix(self, variant):\n suffix = []\n for k, vs, cfg in self._variants:\n if not cfg.get(\"hide\", False):\n suffix.append(k + \"_\" + str(variant[k]))\n return \"_\".join(suffix)\n\n def ivariants(self):\n dependencies = list()\n for key, vals, _ in self._variants:\n if hasattr(vals, \"__call__\"):\n args = inspect.getargspec(vals).args\n if hasattr(vals, 'im_self') or hasattr(vals, \"__self__\"):\n # remove the first 'self' parameter\n args = args[1:]\n dependencies.append((key, set(args)))\n else:\n dependencies.append((key, set()))\n sorted_keys = []\n # topo sort all nodes\n while len(sorted_keys) < len(self._variants):\n # get all nodes with zero in-degree\n free_nodes = [k for k, v in dependencies if len(v) == 0]\n if len(free_nodes) == 0:\n error_msg = \"Invalid parameter dependency: \\n\"\n for k, v in dependencies:\n if len(v) > 0:\n error_msg += k + \" depends on \" + \" & \".join(v) + \"\\n\"\n raise ValueError(error_msg)\n dependencies = [(k, v)\n for k, v in dependencies if k not in free_nodes]\n # remove the free nodes from the remaining dependencies\n for _, v in dependencies:\n v.difference_update(free_nodes)\n sorted_keys += free_nodes\n return self._ivariants_sorted(sorted_keys)\n\n def _ivariants_sorted(self, sorted_keys):\n if len(sorted_keys) == 0:\n yield dict()\n else:\n first_keys = sorted_keys[:-1]\n first_variants = self._ivariants_sorted(first_keys)\n last_key = sorted_keys[-1]\n last_vals = [v for k, v, _ in self._variants if k == last_key][0]\n if hasattr(last_vals, \"__call__\"):\n last_val_keys = inspect.getargspec(last_vals).args\n if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):\n last_val_keys = last_val_keys[1:]\n else:\n last_val_keys = None\n for variant in first_variants:\n if hasattr(last_vals, \"__call__\"):\n last_variants = last_vals(\n **{k: variant[k] for k in last_val_keys})\n for last_choice in last_variants:\n yield AttrDict(variant, **{last_key: last_choice})\n else:\n for last_choice in last_vals:\n yield AttrDict(variant, **{last_key: last_choice})\n\n\ndef variant(*args, **kwargs):\n def _variant(fn):\n fn.__is_variant = True\n fn.__variant_config = kwargs\n return fn\n\n if len(args) == 1 and isinstance(args[0], collections.Callable):\n return _variant(args[0])\n return _variant\n\n\ndef stub(glbs):\n # replace the __init__ method in all classes\n # hacky!!!\n for k, v in list(glbs.items()):\n # look at all variables that are instances of a class (not yet Stub)\n if isinstance(v, type) and v != StubClass:\n glbs[k] = StubClass(v) # and replaces them by a the same but Stub\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n\nexp_count = 0\nnow = datetime.datetime.now(dateutil.tz.tzlocal())\ntimestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\nremote_confirmed = False\n\n\ndef run_experiment_lite(\n stub_method_call=None,\n batch_tasks=None,\n exp_prefix=\"experiment\",\n exp_name=None,\n log_dir=None,\n script=\"scripts/run_experiment_lite.py\",\n python_command=\"python\",\n mode=\"local\",\n dry=False,\n docker_image=None,\n aws_config=None,\n env=None,\n variant=None,\n use_gpu=False,\n sync_s3_pkl=False,\n sync_s3_png=False,\n sync_s3_log=False,\n sync_log_on_termination=True,\n confirm_remote=True,\n terminate_machine=True,\n periodic_sync=True,\n periodic_sync_interval=15,\n sync_all_data_node_to_s3=True,\n use_cloudpickle=None,\n pre_commands=None,\n added_project_directories=[],\n **kwargs):\n \"\"\"\n Serialize the stubbed method call and run the experiment using the specified mode.\n :param stub_method_call: A stubbed method call.\n :param script: The name of the entrance point python script\n :param mode: Where & how to run the experiment. Should be one of \"local\", \"local_docker\", \"ec2\",\n and \"lab_kube\".\n :param dry: Whether to do a dry-run, which only prints the commands without executing them.\n :param exp_prefix: Name prefix for the experiments\n :param docker_image: name of the docker image. Ignored if using local mode.\n :param aws_config: configuration for AWS. Only used under EC2 mode\n :param env: extra environment variables\n :param kwargs: All other parameters will be passed directly to the entrance python script.\n :param variant: If provided, should be a dictionary of parameters\n :param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including\n certain environment flags\n :param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param sync_s3_png: Whether to sync png files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param sync_s3_log: Whether to sync log files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param confirm_remote: Whether to confirm before launching experiments remotely\n :param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using\n mode=\"ec2\". This is useful when one wants to debug after an experiment finishes abnormally.\n :param periodic_sync: Whether to synchronize certain experiment files periodically during execution.\n :param periodic_sync_interval: Time interval between each periodic sync, in seconds.\n \"\"\"\n assert stub_method_call is not None or batch_tasks is not None, \"Must provide at least either stub_method_call or batch_tasks\"\n\n\n\n \n if use_cloudpickle is None:\n for maybe_stub in (batch_tasks or [stub_method_call]):\n # decide mode\n if isinstance(maybe_stub, StubBase):\n use_cloudpickle = False\n else:\n assert hasattr(maybe_stub, '__call__')\n use_cloudpickle = True\n # ensure variant exists\n if variant is None:\n variant = dict()\n\n if batch_tasks is None:\n batch_tasks = [\n dict(\n kwargs,\n pre_commands=pre_commands,\n stub_method_call=stub_method_call,\n exp_name=exp_name,\n log_dir=log_dir,\n env=env,\n variant=variant,\n use_cloudpickle=use_cloudpickle\n )\n ]\n\n global exp_count\n global remote_confirmed\n config.USE_GPU = use_gpu\n\n # params_list = []\n\n for task in batch_tasks:\n call = task.pop(\"stub_method_call\")\n if use_cloudpickle:\n import cloudpickle\n data = base64.b64encode(cloudpickle.dumps(call)).decode(\"utf-8\")\n else:\n data = base64.b64encode(pickle.dumps(call)).decode(\"utf-8\")\n task[\"args_data\"] = data\n exp_count += 1\n params = dict(kwargs)\n if task.get(\"exp_name\", None) is None:\n task[\"exp_name\"] = \"%s_%s_%04d\" % (\n exp_prefix, timestamp, exp_count)\n if task.get(\"log_dir\", None) is None:\n task[\"log_dir\"] = config.LOG_DIR + \"/local/\" + \\\n exp_prefix.replace(\"_\", \"-\") + \"/\" + task[\"exp_name\"]\n if task.get(\"variant\", None) is not None:\n variant = task.pop(\"variant\")\n if \"exp_name\" not in variant:\n variant[\"exp_name\"] = task[\"exp_name\"]\n task[\"variant_data\"] = base64.b64encode(pickle.dumps(variant)).decode(\"utf-8\")\n elif \"variant\" in task:\n del task[\"variant\"]\n task[\"remote_log_dir\"] = osp.join(\n config.AWS_S3_PATH, exp_prefix.replace(\"_\", \"-\"), task[\"exp_name\"])\n task[\"env\"] = task.get(\"env\", dict()) or dict()\n task[\"env\"][\"RLLAB_USE_GPU\"] = str(use_gpu)\n\n if mode not in [\"local\", \"local_docker\"] and not remote_confirmed and not dry and confirm_remote:\n remote_confirmed = query_yes_no(\n \"Running in (non-dry) mode %s. Confirm?\" % mode)\n if not remote_confirmed:\n sys.exit(1)\n\n if hasattr(mode, \"__call__\"):\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n mode(\n task,\n docker_image=docker_image,\n use_gpu=use_gpu,\n exp_prefix=exp_prefix,\n script=script,\n python_command=python_command,\n sync_s3_pkl=sync_s3_pkl,\n sync_log_on_termination=sync_log_on_termination,\n periodic_sync=periodic_sync,\n periodic_sync_interval=periodic_sync_interval,\n sync_all_data_node_to_s3=sync_all_data_node_to_s3,\n )\n elif mode == \"local\":\n for task in batch_tasks:\n del task[\"remote_log_dir\"]\n env = task.pop(\"env\", None)\n command = to_local_command(\n task,\n python_command=python_command,\n script=osp.join(config.PROJECT_PATH, script),\n use_gpu=use_gpu\n )\n print(command)\n if dry:\n return\n try:\n if env is None:\n env = dict()\n subprocess.call(\n command, shell=True, env=dict(os.environ, **env))\n except Exception as e:\n print(e)\n if isinstance(e, KeyboardInterrupt):\n raise\n elif mode == \"local_docker\":\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n for task in batch_tasks:\n del task[\"remote_log_dir\"]\n env = task.pop(\"env\", None)\n command = to_docker_command(\n task, # these are the params. Pre and Post command can be here\n docker_image=docker_image,\n script=script,\n env=env,\n use_gpu=use_gpu,\n use_tty=True,\n python_command=python_command,\n )\n print(command)\n if dry:\n return\n p = subprocess.Popen(command, shell=True)\n try:\n p.wait()\n except KeyboardInterrupt:\n try:\n print(\"terminating\")\n p.terminate()\n except OSError:\n print(\"os error!\")\n pass\n p.wait()\n elif mode == \"ec2\":\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)\n launch_ec2(batch_tasks,\n exp_prefix=exp_prefix,\n docker_image=docker_image,\n python_command=python_command,\n script=script,\n aws_config=aws_config,\n dry=dry,\n terminate_machine=terminate_machine,\n use_gpu=use_gpu,\n code_full_path=s3_code_path,\n sync_s3_pkl=sync_s3_pkl,\n sync_s3_png=sync_s3_png,\n sync_s3_log=sync_s3_log,\n sync_log_on_termination=sync_log_on_termination,\n periodic_sync=periodic_sync,\n periodic_sync_interval=periodic_sync_interval)\n elif mode == \"lab_kube\":\n # assert env is None\n # first send code folder to s3\n s3_code_path = s3_sync_code(config, dry=dry)\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n for task in batch_tasks:\n # if 'env' in task:\n # assert task.pop('env') is None\n # TODO: dangerous when there are multiple tasks?\n task[\"resources\"] = params.pop(\n \"resources\", config.KUBE_DEFAULT_RESOURCES)\n task[\"node_selector\"] = params.pop(\n \"node_selector\", config.KUBE_DEFAULT_NODE_SELECTOR)\n task[\"exp_prefix\"] = exp_prefix\n pod_dict = to_lab_kube_pod(\n task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,\n python_command=python_command,\n sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,\n periodic_sync_interval=periodic_sync_interval,\n sync_all_data_node_to_s3=sync_all_data_node_to_s3,\n terminate_machine=terminate_machine,\n )\n pod_str = json.dumps(pod_dict, indent=1)\n if dry:\n print(pod_str)\n dir = \"{pod_dir}/{exp_prefix}\".format(\n pod_dir=config.POD_DIR, exp_prefix=exp_prefix)\n ensure_dir(dir)\n fname = \"{dir}/{exp_name}.json\".format(\n dir=dir,\n exp_name=task[\"exp_name\"]\n )\n with open(fname, \"w\") as fh:\n fh.write(pod_str)\n kubecmd = \"kubectl create -f %s\" % fname\n print(kubecmd)\n if dry:\n return\n retry_count = 0\n wait_interval = 1\n while retry_count <= 5:\n try:\n return_code = subprocess.call(kubecmd, shell=True)\n if return_code == 0:\n break\n retry_count += 1\n print(\"trying again...\")\n time.sleep(wait_interval)\n except Exception as e:\n if isinstance(e, KeyboardInterrupt):\n raise\n print(e)\n else:\n raise NotImplementedError\n\n\n_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search\n\n\ndef ensure_dir(dirname):\n \"\"\"\n Ensure that a named directory exists; if it does not, attempt to create it.\n \"\"\"\n try:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef _shellquote(s):\n \"\"\"Return a shell-escaped version of the string *s*.\"\"\"\n if not s:\n return \"''\"\n\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n\n\ndef _to_param_val(v):\n if v is None:\n return \"\"\n elif isinstance(v, list):\n return \" \".join(map(_shellquote, list(map(str, v))))\n else:\n return _shellquote(str(v))\n\n\ndef to_local_command(params, python_command=\"python\", script=osp.join(config.PROJECT_PATH,\n 'scripts/run_experiment.py'),\n use_gpu=False):\n command = python_command + \" \" + script\n if use_gpu and not config.USE_TF:\n command = \"THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' \" + command\n for k, v in config.ENV.items():\n command = (\"%s=%s \" % (k, v)) + command\n pre_commands = params.pop(\"pre_commands\", None)\n post_commands = params.pop(\"post_commands\", None)\n if pre_commands is not None or post_commands is not None:\n print(\"Not executing the pre_commands: \", pre_commands, \", nor post_commands: \", post_commands)\n\n for k, v in params.items():\n if isinstance(v, dict):\n for nk, nv in v.items():\n if str(nk) == \"_name\":\n command += \" --%s %s\" % (k, _to_param_val(nv))\n else:\n command += \\\n \" --%s_%s %s\" % (k, nk, _to_param_val(nv))\n else:\n command += \" --%s %s\" % (k, _to_param_val(v))\n return command\n\n\ndef to_docker_command(params, docker_image, python_command=\"python\", script='scripts/run_experiment_lite.py',\n pre_commands=None, use_tty=False,\n mujoco_path=None,\n post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):\n \"\"\"\n :param params: The parameters for the experiment. If logging directory parameters are provided, we will create\n docker volume mapping to make sure that the logging files are created at the correct locations\n :param docker_image: docker image to run the command on\n :param script: script command for running experiment\n :return:\n \"\"\"\n log_dir = params.get(\"log_dir\")\n docker_args = params.pop(\"docker_args\", \"\")\n if pre_commands is None:\n pre_commands = params.pop(\"pre_commands\", None)\n if post_commands is None:\n post_commands = params.pop(\"post_commands\", None)\n if mujoco_path is None:\n mujoco_path = config.MUJOCO_KEY_PATH\n # script = 'rllab/' + script\n # if not dry:\n\n # create volume for logging directory\n if use_gpu:\n command_prefix = \"nvidia-docker run\"\n else:\n command_prefix = \"docker run\"\n docker_log_dir = config.DOCKER_LOG_DIR\n\n if env is None:\n env = dict()\n env = dict(\n env,\n AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,\n AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,\n )\n if env is not None:\n for k, v in env.items():\n command_prefix += \" -e \\\"{k}={v}\\\"\".format(k=k, v=v)\n command_prefix += \" -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}\".format(\n local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')\n command_prefix += \" -v {local_log_dir}:{docker_log_dir}\".format(\n local_log_dir=log_dir,\n docker_log_dir=docker_log_dir\n )\n command_prefix += docker_args\n if local_code_dir is None:\n local_code_dir = config.PROJECT_PATH\n command_prefix += \" -v {local_code_dir}:{docker_code_dir}\".format(\n local_code_dir=local_code_dir,\n docker_code_dir=config.DOCKER_CODE_DIR\n )\n params = dict(params, log_dir=docker_log_dir)\n if use_tty:\n command_prefix += \" -ti \" + docker_image + \" /bin/bash -c \"\n else:\n command_prefix += \" -i \" + docker_image + \" /bin/bash -c \"\n command_list = list()\n if pre_commands is not None:\n command_list.extend(pre_commands)\n command_list.append(\"echo \\\"Running in docker\\\"\")\n command_list.append(to_local_command(\n params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))\n # We for 2 min sleep after termination to allow for last syncs.\n if post_commands is None:\n post_commands = ['sleep 120']\n command_list.extend(post_commands)\n return command_prefix + \"'\" + \"; \".join(command_list) + \"'\"\n\n\ndef dedent(s):\n lines = [l.strip() for l in s.split('\\n')]\n return '\\n'.join(lines)\n\n\ndef launch_ec2(params_list, exp_prefix, docker_image, code_full_path,\n python_command=\"python\",\n script='scripts/run_experiment.py',\n aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,\n sync_s3_png=False,\n sync_s3_log=False,\n sync_log_on_termination=True,\n periodic_sync=True, periodic_sync_interval=15):\n if len(params_list) == 0:\n return\n\n default_config = dict(\n image_id=config.AWS_IMAGE_ID,\n instance_type=config.AWS_INSTANCE_TYPE,\n key_name=config.AWS_KEY_NAME,\n spot=config.AWS_SPOT,\n spot_price=config.AWS_SPOT_PRICE,\n iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,\n security_groups=config.AWS_SECURITY_GROUPS,\n security_group_ids=config.AWS_SECURITY_GROUP_IDS,\n network_interfaces=config.AWS_NETWORK_INTERFACES,\n )\n\n if aws_config is None:\n aws_config = dict()\n aws_config = dict(default_config, **aws_config)\n\n sio = StringIO()\n sio.write(\"#!/bin/bash\\n\")\n sio.write(\"{\\n\")\n sio.write(\"\"\"\n die() { status=$1; shift; echo \"FATAL: $*\"; exit $status; }\n \"\"\")\n sio.write(\"\"\"\n EC2_INSTANCE_ID=\"`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`\"\n \"\"\")\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n \"\"\".format(exp_name=params_list[0].get(\"exp_name\"), aws_region=config.AWS_REGION_NAME))\n if config.LABEL:\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}\n \"\"\".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}\n \"\"\".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n service docker start\n \"\"\")\n sio.write(\"\"\"\n docker --config /home/ubuntu/.docker pull {docker_image}\n \"\"\".format(docker_image=docker_image))\n sio.write(\"\"\"\n export AWS_DEFAULT_REGION={aws_region}\n \"\"\".format(aws_region=config.AWS_REGION_NAME))\n if config.FAST_CODE_SYNC:\n # sio.write(\"\"\"\n # aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}\n # \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n # aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))\n sio.write(\"\"\"\n mkdir -p {local_code_path}\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n aws_region=config.AWS_REGION_NAME))\n else:\n # sio.write(\"\"\"\n # aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}\n # \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n # aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp --recursive {code_full_path} {local_code_path}\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))\n\n s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'\n # sio.write(\"\"\"\n # aws s3 cp --recursive {} {} --region {}\n # \"\"\".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp --recursive {} {}\n \"\"\".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))\n sio.write(\"\"\"\n cd {local_code_path}\n \"\"\".format(local_code_path=config.DOCKER_CODE_DIR))\n\n for params in params_list:\n log_dir = params.get(\"log_dir\")\n remote_log_dir = params.pop(\"remote_log_dir\")\n env = params.pop(\"env\", None)\n\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n \"\"\".format(exp_name=params.get(\"exp_name\"), aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n mkdir -p {log_dir}\n \"\"\".format(log_dir=log_dir))\n if periodic_sync:\n include_png = \" --include '*.png' \" if sync_s3_png else \" \"\n include_pkl = \" --include '*.pkl' \" if sync_s3_pkl else \" \"\n include_log = \" --include '*.log' \" if sync_s3_log else \" \"\n # sio.write(\"\"\"\n # while /bin/true; do\n # aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}\n # sleep {periodic_sync_interval}\n # done & echo sync initiated\"\"\".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,\n # log_dir=log_dir, remote_log_dir=remote_log_dir,\n # aws_region=config.AWS_REGION_NAME,\n # periodic_sync_interval=periodic_sync_interval))\n sio.write(\"\"\"\n while /bin/true; do\n aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,\n log_dir=log_dir, remote_log_dir=remote_log_dir,\n periodic_sync_interval=periodic_sync_interval))\n if sync_log_on_termination:\n # sio.write(\"\"\"\n # while /bin/true; do\n # if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \\ -f 2) ]\n # then\n # logger \"Running shutdown hook.\"\n # aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n # aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n # break\n # else\n # # Spot instance not yet marked for termination.\n # sleep 5\n # fi\n # done & echo log sync initiated\n # \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n while /bin/true; do\n if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \\ -f 2) ]\n then\n logger \"Running shutdown hook.\"\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log\n aws s3 cp --recursive {log_dir} {remote_log_dir}\n break\n else\n # Spot instance not yet marked for termination.\n sleep 5\n fi\n done & echo log sync initiated\n \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir))\n if use_gpu:\n sio.write(\"\"\"\n for i in {1..800}; do su -c \"nvidia-modprobe -u -c=0\" ubuntu && break || sleep 3; done\n systemctl start nvidia-docker\n \"\"\")\n sio.write(\"\"\"\n {command}\n \"\"\".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,\n use_gpu=use_gpu, env=env,\n local_code_dir=config.DOCKER_CODE_DIR)))\n # sio.write(\"\"\"\n # aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n # \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp --recursive {log_dir} {remote_log_dir}\n \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir))\n # sio.write(\"\"\"\n # aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n # \"\"\".format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log\n \"\"\".format(remote_log_dir=remote_log_dir))\n\n if terminate_machine:\n sio.write(\"\"\"\n EC2_INSTANCE_ID=\"`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \\\"wget instance-id has failed: $?\\\"`\"\n aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}\n \"\"\".format(aws_region=config.AWS_REGION_NAME))\n sio.write(\"} >> /home/ubuntu/user_data.log 2>&1\\n\")\n\n full_script = dedent(sio.getvalue())\n\n import boto3\n import botocore\n if aws_config[\"spot\"]:\n ec2 = boto3.client(\n \"ec2\",\n region_name=config.AWS_REGION_NAME,\n aws_access_key_id=config.AWS_ACCESS_KEY,\n aws_secret_access_key=config.AWS_ACCESS_SECRET,\n )\n else:\n ec2 = boto3.resource(\n \"ec2\",\n region_name=config.AWS_REGION_NAME,\n aws_access_key_id=config.AWS_ACCESS_KEY,\n aws_secret_access_key=config.AWS_ACCESS_SECRET,\n )\n\n if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode(\"utf-8\")) > 10000:\n # Script too long; need to upload script to s3 first.\n # We're being conservative here since the actual limit is 16384 bytes\n s3_path = upload_file_to_s3(full_script)\n sio = StringIO()\n sio.write(\"#!/bin/bash\\n\")\n sio.write(\"\"\"\n aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\\\\n chmod +x /home/ubuntu/remote_script.sh && \\\\\n bash /home/ubuntu/remote_script.sh\n \"\"\".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))\n user_data = dedent(sio.getvalue())\n else:\n user_data = full_script\n print(full_script)\n with open(\"/tmp/full_script\", \"w\") as f:\n f.write(full_script)\n\n instance_args = dict(\n ImageId=aws_config[\"image_id\"],\n KeyName=aws_config[\"key_name\"],\n UserData=user_data,\n InstanceType=aws_config[\"instance_type\"],\n EbsOptimized=config.EBS_OPTIMIZED,\n SecurityGroups=aws_config[\"security_groups\"],\n SecurityGroupIds=aws_config[\"security_group_ids\"],\n NetworkInterfaces=aws_config[\"network_interfaces\"],\n IamInstanceProfile=dict(\n Name=aws_config[\"iam_instance_profile_name\"],\n ),\n **config.AWS_EXTRA_CONFIGS,\n )\n\n if len(instance_args[\"NetworkInterfaces\"]) > 0:\n # disable_security_group = query_yes_no(\n # \"Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?\",\n # default=\"yes\",\n # )\n disable_security_group = True\n if disable_security_group:\n instance_args.pop(\"SecurityGroups\")\n instance_args.pop(\"SecurityGroupIds\")\n\n if aws_config.get(\"placement\", None) is not None:\n instance_args[\"Placement\"] = aws_config[\"placement\"]\n if not aws_config[\"spot\"]:\n instance_args[\"MinCount\"] = 1\n instance_args[\"MaxCount\"] = 1\n print(\"************************************************************\")\n print(instance_args[\"UserData\"])\n print(\"************************************************************\")\n if aws_config[\"spot\"]:\n instance_args[\"UserData\"] = base64.b64encode(instance_args[\"UserData\"].encode()).decode(\"utf-8\")\n spot_args = dict(\n DryRun=dry,\n InstanceCount=1,\n LaunchSpecification=instance_args,\n SpotPrice=aws_config[\"spot_price\"],\n # ClientToken=params_list[0][\"exp_name\"],\n )\n import pprint\n pprint.pprint(spot_args)\n if not dry:\n response = ec2.request_spot_instances(**spot_args)\n print(response)\n spot_request_id = response['SpotInstanceRequests'][\n 0]['SpotInstanceRequestId']\n for _ in range(10):\n try:\n ec2.create_tags(\n Resources=[spot_request_id],\n Tags=[\n {'Key': 'Name', 'Value': params_list[0][\"exp_name\"]}\n ],\n )\n break\n except botocore.exceptions.ClientError:\n continue\n else:\n import pprint\n pprint.pprint(instance_args)\n ec2.create_instances(\n DryRun=dry,\n **instance_args\n )\n\n\nS3_CODE_PATH = None\n\n\ndef s3_sync_code(config, dry=False, added_project_directories=[]):\n global S3_CODE_PATH\n if S3_CODE_PATH is not None:\n return S3_CODE_PATH\n base = config.AWS_CODE_SYNC_S3_PATH\n has_git = True\n\n if config.FAST_CODE_SYNC:\n try:\n current_commit = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"]).strip().decode(\"utf-8\")\n except subprocess.CalledProcessError as _:\n print(\"Warning: failed to execute git commands\")\n current_commit = None\n\n file_name = str(timestamp) + \"_\" + hashlib.sha224(\n subprocess.check_output([\"pwd\"]) + str(current_commit).encode() + str(timestamp).encode()\n ).hexdigest() + \".tar.gz\"\n\n file_path = \"/tmp/\" + file_name\n\n tar_cmd = [\"tar\", \"-zcvf\", file_path, \"-C\", config.PROJECT_PATH]\n\n for pattern in config.FAST_CODE_SYNC_IGNORES:\n tar_cmd += [\"--exclude\", pattern]\n tar_cmd += [\"-h\", \".\"]\n\n for path in added_project_directories:\n tar_cmd.append(\"-C\")\n tar_cmd.append(path)\n tar_cmd += [\".\"]\n\n remote_path = \"%s/%s\" % (base, file_name)\n\n upload_cmd = [\"aws\", \"s3\", \"cp\", file_path, remote_path]\n\n mujoco_key_cmd = [\n \"aws\", \"s3\", \"sync\", config.MUJOCO_KEY_PATH, \"{}/.mujoco/\".format(base)]\n\n print(\" \".join(tar_cmd))\n print(\" \".join(upload_cmd))\n print(\" \".join(mujoco_key_cmd))\n\n if not dry:\n subprocess.check_call(tar_cmd)\n subprocess.check_call(upload_cmd)\n try:\n subprocess.check_call(mujoco_key_cmd)\n except Exception as e:\n print(e)\n\n S3_CODE_PATH = remote_path\n return remote_path\n else:\n try:\n current_commit = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"]).strip().decode(\"utf-8\")\n clean_state = len(\n subprocess.check_output([\"git\", \"status\", \"--porcelain\"])) == 0\n except subprocess.CalledProcessError as _:\n print(\"Warning: failed to execute git commands\")\n has_git = False\n dir_hash = base64.b64encode(subprocess.check_output([\"pwd\"])).decode(\"utf-8\")\n code_path = \"%s_%s\" % (\n dir_hash,\n (current_commit if clean_state else \"%s_dirty_%s\" % (current_commit, timestamp)) if\n has_git else timestamp\n )\n full_path = \"%s/%s\" % (base, code_path)\n cache_path = \"%s/%s\" % (base, dir_hash)\n cache_cmds = [\"aws\", \"s3\", \"cp\", \"--recursive\"] + \\\n flatten([\"--exclude\", \"%s\" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \\\n [cache_path, full_path]\n cmds = [\"aws\", \"s3\", \"cp\", \"--recursive\"] + \\\n flatten([\"--exclude\", \"%s\" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \\\n [\".\", full_path]\n caching_cmds = [\"aws\", \"s3\", \"cp\", \"--recursive\"] + \\\n flatten([\"--exclude\", \"%s\" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \\\n [full_path, cache_path]\n mujoco_key_cmd = [\n \"aws\", \"s3\", \"sync\", config.MUJOCO_KEY_PATH, \"{}/.mujoco/\".format(base)]\n print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)\n if not dry:\n subprocess.check_call(cache_cmds)\n subprocess.check_call(cmds)\n subprocess.check_call(caching_cmds)\n try:\n subprocess.check_call(mujoco_key_cmd)\n except Exception:\n print('Unable to sync mujoco keys!')\n S3_CODE_PATH = full_path\n return full_path\n\n\ndef upload_file_to_s3(script_content):\n import tempfile\n import uuid\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(script_content.encode())\n f.close()\n remote_path = os.path.join(\n config.AWS_CODE_SYNC_S3_PATH, \"oversize_bash_scripts\", str(uuid.uuid4()))\n subprocess.check_call([\"aws\", \"s3\", \"cp\", f.name, remote_path])\n os.unlink(f.name)\n return remote_path\n\n\ndef to_lab_kube_pod(\n params, docker_image, code_full_path,\n python_command=\"python\",\n script='scripts/run_experiment.py',\n is_gpu=False,\n sync_s3_pkl=False,\n periodic_sync=True,\n periodic_sync_interval=15,\n sync_all_data_node_to_s3=False,\n terminate_machine=True\n):\n \"\"\"\n :param params: The parameters for the experiment. If logging directory parameters are provided, we will create\n docker volume mapping to make sure that the logging files are created at the correct locations\n :param docker_image: docker image to run the command on\n :param script: script command for running experiment\n :return:\n \"\"\"\n log_dir = params.get(\"log_dir\")\n remote_log_dir = params.pop(\"remote_log_dir\")\n resources = params.pop(\"resources\")\n node_selector = params.pop(\"node_selector\")\n exp_prefix = params.pop(\"exp_prefix\")\n\n kube_env = [\n {\"name\": k, \"value\": v}\n for k, v in (params.pop(\"env\", None) or dict()).items()\n ]\n mkdir_p(log_dir)\n pre_commands = list()\n pre_commands.append('mkdir -p ~/.aws')\n pre_commands.append('mkdir ~/.mujoco')\n # fetch credentials from the kubernetes secret file\n pre_commands.append('echo \"[default]\" >> ~/.aws/credentials')\n pre_commands.append(\n \"echo \\\"aws_access_key_id = %s\\\" >> ~/.aws/credentials\" % config.AWS_ACCESS_KEY)\n pre_commands.append(\n \"echo \\\"aws_secret_access_key = %s\\\" >> ~/.aws/credentials\" % config.AWS_ACCESS_SECRET)\n s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'\n pre_commands.append(\n 'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))\n\n if config.FAST_CODE_SYNC:\n pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)\n pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)\n pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)\n else:\n pre_commands.append('aws s3 cp --recursive %s %s' %\n (code_full_path, config.DOCKER_CODE_DIR))\n pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)\n pre_commands.append('mkdir -p %s' %\n (log_dir))\n\n if sync_all_data_node_to_s3:\n print('Syncing all data from node to s3.')\n if periodic_sync:\n if sync_s3_pkl:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n else:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n else:\n if periodic_sync:\n if sync_s3_pkl:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n else:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n # copy the file to s3 after execution\n post_commands = list()\n post_commands.append('aws s3 cp --recursive %s %s' %\n (log_dir,\n remote_log_dir))\n if not terminate_machine:\n post_commands.append('sleep infinity')\n command_list = list()\n if pre_commands is not None:\n command_list.extend(pre_commands)\n command_list.append(\"echo \\\"Running in docker\\\"\")\n command_list.append(\n \"%s 2>&1 | tee -a %s\" % (\n to_local_command(params, python_command=python_command, script=script),\n \"%s/stdouterr.log\" % log_dir\n )\n )\n if post_commands is not None:\n command_list.extend(post_commands)\n command = \"; \".join(command_list)\n pod_name = config.KUBE_PREFIX + params[\"exp_name\"]\n # underscore is not allowed in pod names\n pod_name = pod_name.replace(\"_\", \"-\")\n print(\"Is gpu: \", is_gpu)\n if not is_gpu:\n return {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"name\": pod_name,\n \"labels\": {\n \"owner\": config.LABEL,\n \"expt\": pod_name,\n \"exp_time\": timestamp,\n \"exp_prefix\": exp_prefix,\n },\n },\n \"spec\": {\n \"containers\": [\n {\n \"name\": \"foo\",\n \"image\": docker_image,\n \"command\": [\n \"/bin/bash\",\n \"-c\",\n \"-li\", # to load conda env file\n command,\n ],\n \"resources\": resources,\n \"imagePullPolicy\": \"Always\",\n }\n ],\n \"restartPolicy\": \"Never\",\n \"nodeSelector\": node_selector,\n \"dnsPolicy\": \"Default\",\n }\n }\n return {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"name\": pod_name,\n \"labels\": {\n \"owner\": config.LABEL,\n \"expt\": pod_name,\n \"exp_time\": timestamp,\n \"exp_prefix\": exp_prefix,\n },\n },\n \"spec\": {\n \"containers\": [\n {\n \"name\": \"foo\",\n \"image\": docker_image,\n \"env\": kube_env,\n \"command\": [\n \"/bin/bash\",\n \"-c\",\n \"-li\", # to load conda env file\n command,\n ],\n \"resources\": resources,\n \"imagePullPolicy\": \"Always\",\n # gpu specific\n \"volumeMounts\": [\n {\n \"name\": \"nvidia\",\n \"mountPath\": \"/usr/local/nvidia\",\n \"readOnly\": True,\n }\n ],\n \"securityContext\": {\n \"privileged\": True,\n }\n }\n ],\n \"volumes\": [\n {\n \"name\": \"nvidia\",\n \"hostPath\": {\n \"path\": \"/var/lib/docker/volumes/nvidia_driver_352.63/_data\",\n }\n }\n ],\n \"restartPolicy\": \"Never\",\n \"nodeSelector\": node_selector,\n \"dnsPolicy\": \"Default\",\n }\n }\n\n\ndef concretize(maybe_stub):\n if isinstance(maybe_stub, StubMethodCall):\n obj = concretize(maybe_stub.obj)\n method = getattr(obj, maybe_stub.method_name)\n args = concretize(maybe_stub.args)\n kwargs = concretize(maybe_stub.kwargs)\n return method(*args, **kwargs)\n elif isinstance(maybe_stub, StubClass):\n return maybe_stub.proxy_class\n elif isinstance(maybe_stub, StubAttr):\n obj = concretize(maybe_stub.obj)\n attr_name = maybe_stub.attr_name\n attr_val = getattr(obj, attr_name)\n return concretize(attr_val)\n elif isinstance(maybe_stub, StubObject):\n if not hasattr(maybe_stub, \"__stub_cache\"):\n args = concretize(maybe_stub.args)\n kwargs = concretize(maybe_stub.kwargs)\n try:\n maybe_stub.__stub_cache = maybe_stub.proxy_class(\n *args, **kwargs)\n except Exception as e:\n print((\"Error while instantiating %s\" % maybe_stub.proxy_class))\n import traceback\n traceback.print_exc()\n ret = maybe_stub.__stub_cache\n return ret\n elif isinstance(maybe_stub, dict):\n # make sure that there's no hidden caveat\n ret = dict()\n for k, v in maybe_stub.items():\n ret[concretize(k)] = concretize(v)\n return ret\n elif isinstance(maybe_stub, (list, tuple)):\n return maybe_stub.__class__(list(map(concretize, maybe_stub)))\n else:\n return maybe_stub\n"
] | [
[
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jlo118/DLlab2 | [
"01978907f48cfeb5cc406564a64454dc6b4f8485"
] | [
"Q2.py"
] | [
"import pandas\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Activation\r\nfrom keras.callbacks import TensorBoard\r\n# load dataset\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv(\"framingham.csv\", header=None).values\r\nimport numpy as np\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(dataset[:,0:15], dataset[:,15],\r\n test_size=0.33, random_state=87)\r\n\r\nnp.random.seed(100)\r\nnnokay = Sequential() # create model\r\nnnokay.add(Dense(20, input_dim=15, activation='tanh')) # hidden layer\r\nnnokay.add(Dense(30, activation='tanh')) #add whole layer\r\nnnokay.add(Dense(60, activation='tanh'))\r\nnnokay.add(Dense(20, activation='tanh'))\r\nnnokay.add(Dense(15, activation='tanh'))\r\nnnokay.add(Dense(60, activation='tanh'))\r\nnnokay.add(Dense(1, activation='tanh')) # output layer\r\nnnokay.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\r\n\r\nnnokay.fit(X_train, Y_train, epochs=250, verbose=0,\r\n callbacks=[TensorBoard(log_dir = '/tmp/auto')])\r\n#print(nnokay.summary())\r\n#print(nnokay.evaluate(X_test, Y_test, verbose=0))\r\n\r\nscore = nnokay.evaluate(X_test, Y_test)\r\nprint('test accuracy', score[1])\r\n\r\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
raoyongming/CAL | [
"76475ff56e399b276630d8bf3a4f5594803609a6"
] | [
"reid/modeling/baseline.py"
] | [
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport sys\n\nfrom .backbones.resnet import ResNet\nsys.path.append('.')\n\n\nEPSILON = 1e-12\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return y\n\n\nclass BAP(nn.Module):\n def __init__(self, pool='GAP'):\n super(BAP, self).__init__()\n assert pool in ['GAP', 'GMP']\n if pool == 'GAP':\n self.pool = None\n else:\n self.pool = nn.AdaptiveMaxPool2d(1)\n\n def forward(self, features, attentions, counterfactual=False):\n B, C, H, W = features.size()\n _, M, AH, AW = attentions.size()\n\n # match size\n if AH != H or AW != W:\n attentions = F.upsample_bilinear(attentions, size=(H, W))\n\n # feature_matrix: (B, M, C) -> (B, M * C)\n if self.pool is None:\n feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)\n else:\n feature_matrix = []\n for i in range(M):\n AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)\n feature_matrix.append(AiF)\n feature_matrix = torch.cat(feature_matrix, dim=1)\n\n # sign-sqrt\n feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)\n\n # l2 normalization along dimension M and C\n feature_matrix = F.normalize(feature_matrix_raw, dim=-1)\n\n if counterfactual:\n if self.training:\n fake_att = torch.zeros_like(attentions).uniform_(0, 2)\n else:\n fake_att = torch.ones_like(attentions)\n # mean_feature = features.mean(3).mean(2).view(B, 1, C)\n # counterfactual_feature = mean_feature.expand(B, M, C).contiguous().view(B, -1)\n counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)\n\n counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)\n\n counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)\n return feature_matrix, counterfactual_feature\n else:\n return feature_matrix\n\nclass MultiHeadAtt(nn.Module):\n \"\"\"\n Extend the channel attention into MultiHeadAtt. \n It is modified from \"Zhang H, Wu C, Zhang Z, et al. Resnest: Split-attention networks.\" \n \"\"\"\n def __init__(self, in_channels, channels,\n radix=4, reduction_factor=4,\n rectify=False, norm_layer=nn.BatchNorm2d):\n super(MultiHeadAtt, self).__init__()\n\n inter_channels = max(in_channels*radix//reduction_factor, 32)\n self.radix = radix\n self.channels = channels\n \n self.relu = nn.ReLU(inplace=True)\n self.fc1 = nn.Conv2d(channels, inter_channels, 1, groups=1)\n self.bn1 = norm_layer(inter_channels)\n self.fc2 = nn.Conv2d(inter_channels, channels*radix, 1, groups=1)\n\n\n def forward(self, x):\n batch, channel = x.shape[:2]\n splited = torch.split(x, channel//self.radix, dim=1)\n gap = sum(splited)\n gap = F.adaptive_avg_pool2d(gap, 1)\n gap = self.fc1(gap)\n gap = self.bn1(gap)\n gap = self.relu(gap)\n\n atten = self.fc2(gap).view((batch, self.radix, self.channels))\n atten = F.softmax(atten, dim=1).view(batch, -1, 1, 1)\n atten = torch.split(atten, channel//self.radix, dim=1)\n\n out= torch.cat([att*split for (att, split) in zip(atten, splited)],1)\n return out.contiguous()\n\n\nclass BN2d(nn.Module):\n def __init__(self, planes):\n super(BN2d, self).__init__()\n self.bottleneck2 = nn.BatchNorm2d(planes)\n self.bottleneck2.bias.requires_grad_(False) # no shift\n self.bottleneck2.apply(weights_init_kaiming)\n\n def forward(self, x):\n return self.bottleneck2(x)\n\n\n\n\nclass Baseline(nn.Module):\n in_planes = 2048\n\n def __init__(self, num_classes, last_stride, model_path, using_cal):\n super(Baseline, self).__init__()\n self.using_cal = using_cal\n self.base = ResNet(last_stride)\n self.base.load_param(model_path)\n self.radix = 2\n self.base_1 = nn.Sequential(*list(self.base.children())[0:3])\n self.BN1 = BN2d(64)\n self.att1 = SELayer(64,8)\n self.att_s1=MultiHeadAtt(64,int(64/self.radix),radix=self.radix)\n self.base_2 = nn.Sequential(*list(self.base.children())[3:4])\n self.BN2 = BN2d(256)\n self.att2 = SELayer(256,32)\n self.att_s2=MultiHeadAtt(256,int(256/self.radix),radix=self.radix)\n self.base_3 = nn.Sequential(*list(self.base.children())[4:5])\n self.BN3 = BN2d(512)\n self.att3 = SELayer(512,64)\n self.att_s3 = MultiHeadAtt(512,int(512/self.radix),radix=self.radix)\n self.base_4 = nn.Sequential(*list(self.base.children())[5:6])\n self.BN4 = BN2d(1024)\n self.att4 = SELayer(1024,128)\n self.att_s4=MultiHeadAtt(1024,int(1024/self.radix),radix=self.radix)\n self.base_5 = nn.Sequential(*list(self.base.children())[6:])\n self.BN5 = BN2d(2048)\n self.att5 = SELayer(2048,256)\n self.att_s5=MultiHeadAtt(2048,int(2048/self.radix),radix=self.radix)\n\n self.M = 8\n\n self.attentions = BasicConv2d(2048, self.M, kernel_size=1)\n self.bap = BAP(pool='GAP')\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n\n self.num_classes = num_classes\n\n self.bottleneck = nn.BatchNorm1d(self.in_planes)\n self.bottleneck.bias.requires_grad_(False) # no shift\n self.bottleneck.apply(weights_init_kaiming)\n\n\n self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)\n self.classifier_bap = nn.Linear(self.in_planes*self.M, self.in_planes, bias=False)\n\n self.classifier.apply(weights_init_classifier)\n self.classifier_bap.apply(weights_init_classifier)\n\n \n def forward(self, x):\n\n ############\n x_1 = self.base_1(x)\n x_1 = self.att_s1(x_1)\n x_1 = self.BN1(x_1)\n y_1 = self.att1(x_1)\n x_att1=x_1*y_1.expand_as(x_1)\n\n\n x_2 = self.base_2(x_att1)\n x_2 = self.att_s2(x_2)\n x_2 = self.BN2(x_2)\n y_2 = self.att2(x_2)\n x_att2=x_2*y_2.expand_as(x_2)\n\n x_3 = self.base_3(x_att2)\n x_3 = self.att_s3(x_3)\n x_3 = self.BN3(x_3)\n y_3 = self.att3(x_3)\n x_att3=x_3*y_3.expand_as(x_3)\n\n x_4 = self.base_4(x_att3)\n x_4 = self.att_s4(x_4)\n x_4 = self.BN4(x_4)\n y_4 = self.att4(x_4)\n x_att4=x_4*y_4.expand_as(x_4)\n\n x_5 = self.base_5(x_att4)\n x_5 = self.att_s5(x_5)\n x_5 = self.BN5(x_5)\n y_5 = self.att5(x_5)\n x=x_5*y_5.expand_as(x_5) \n ############\n\n # x = self.base(x) replace above with this to use base network\n\n attention_maps = self.attentions(x)\n\n \n\n global_feat,global_feat_hat = self.bap(x, attention_maps,counterfactual=True)\n global_feat = global_feat.view(global_feat.shape[0], -1)\n global_feat_hat = global_feat_hat.view(global_feat.shape[0], -1)\n\n global_feat = self.classifier_bap(global_feat)\n global_feat_hat = self.classifier_bap(global_feat_hat)\n \n \n feat_hat = self.bottleneck(global_feat_hat)\n feat = self.bottleneck(global_feat) # normalize for angular softmax\n\n cls_score = self.classifier(feat)\n cls_score_hat = self.classifier(feat_hat)\n\n if self.training:\n if self.using_cal: \n return cls_score, cls_score-cls_score_hat, global_feat # global feature for triplet loss\n else:\n return cls_score, global_feat\n else:\n return cls_score\n"
] | [
[
"torch.abs",
"torch.nn.functional.softmax",
"torch.cat",
"torch.sign",
"torch.split",
"torch.nn.AdaptiveMaxPool2d",
"torch.einsum",
"torch.nn.Sigmoid",
"torch.nn.functional.relu",
"torch.ones_like",
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.nn.functional.upsample_bilinear",
"torch.nn.functional.normalize",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cheewoei1997/sentiment-analysis | [
"e936824de57a8cd40586a1a19145c6205b6c0843"
] | [
"sample_application/__init__.py"
] | [
"from flask import Flask, render_template, flash, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_appconfig import AppConfig\nfrom flask_wtf import Form, RecaptchaField\nfrom flask_wtf.file import FileField\nfrom wtforms import TextField, HiddenField, ValidationError, RadioField,\\\n BooleanField, SubmitField, IntegerField, FormField, validators\nfrom wtforms.validators import Required\n\nimport nltk\nfrom nltk.corpus import stopwords\n# from nltk.classify import SklearnClassifier\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.collocations import BigramCollocationFinder\n\nimport sklearn\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\nimport os\nfrom random import shuffle\n\nnltk.download('punkt')\n\n\n# from analyser import set_data\n\n\nclass SentimentForm(Form):\n sentence = TextField('Type your sentence here', validators=[Required()])\n classifier = RadioField('This is a radio field', choices=[\n ('lsvc', 'LinearSVC'),\n ('bernb', 'BernoulliNB'),\n ('multi', 'Multinomial'),\n ('logreg', 'Logistic Regression'),\n ('svc', 'SVC'),\n ])\n\n submit_button = SubmitField('Submit')\n\n\ndef create_app(configfile=None):\n app = Flask(__name__)\n AppConfig(app, configfile) # Flask-Appconfig is not necessary, but\n # highly recommend =)\n # https://github.com/mbr/flask-appconfig\n Bootstrap(app)\n\n # in a real app, these should be configured through Flask-Appconfig\n app.config['SECRET_KEY'] = 'devkey'\n app.config['RECAPTCHA_PUBLIC_KEY'] = \\\n '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'\n\n \n @app.route('/', methods=('GET', 'POST'))\n def index():\n # form = ExampleForm()\n form = SentimentForm()\n form.validate_on_submit() # to get error messages to the browser\n # flash('critical message', 'critical')\n # flash('error message', 'error')\n # flash('warning message', 'warning')\n # flash('info message', 'info')\n # flash('debug message', 'debug')\n # flash('different message', 'different')\n # flash('uncategorized message')\n sentences = ['the show is not only great, but also fantastic and a masterpiece',\n 'today is definitely a day for walking the dog',]\n\n\n if form.validate_on_submit():\n if request.method == 'POST':\n # switch out request.form with the 20 sentences\n result = request.form\n input_sentence = set_data(result)\n train_data = get_dataset(input_sentence)\n\n choice = result['classifier']\n choice_dict = {\n 'bernb': 'Bernoulli Naive Bayes',\n 'multi': 'Multinomial Naive Bayes',\n 'logreg': 'Logistic Regression',\n 'svc': 'Support Vector Classifier',\n 'lsvc': 'Linear Support Vector Classifier',\n }\n\n if choice == 'bernb':\n stats = set_classifier(BernoulliNB(), train_data, input_sentence)\n elif choice == 'multi':\n stats = set_classifier(MultinomialNB(), train_data, input_sentence)\n elif choice == 'logreg':\n stats = set_classifier(LogisticRegression(), train_data, input_sentence)\n elif choice == 'svc':\n stats = set_classifier(SVC(), train_data, input_sentence)\n elif choice == 'lsvc':\n stats = set_classifier(LinearSVC(), train_data, input_sentence)\n else:\n print('Something went terribly wrong')\n\n stats_dict = {\n 'posPercent': stats[0],\n 'negPercent': stats[1],\n 'pos': stats[2],\n 'neg': stats[3],\n 'sentence': result['sentence'],\n 'train_data': train_data,\n 'choice': choice_dict[str(choice)],\n }\n\n return render_template('result.html', context=stats_dict)\n \n else:\n print('ELSEEEE')\n print(request.form)\n # print(form.csrf_token)\n return render_template('error.html', form=form) \n\n return render_template('index.html', form=form)\n\n\n # @app.route('/result/')\n # def result():\n # print('Hola this is result')\n # return render_template('result.html')\n\n\n return app\n\n\ndef word_feats(words):\n return dict([(words, True)])\n\n\ndef set_data(requested):\n sentence = requested['sentence']\n target = sentence.lower()\n target = nltk.word_tokenize(target)\n return target\n\n\ndef get_dataset(target):\n # Loads the positive and negative words\n pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()\n neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()\n\n # Tokenize the words\n pos_words = nltk.word_tokenize(pos_words)\n neg_words = nltk.word_tokenize(neg_words)\n shuffle(pos_words)\n shuffle(neg_words)\n neg_words = neg_words[:2139]\n\n # Keep both positive and negative into posneg\n posneg = pos_words + neg_words\n\n neu_words = []\n [neu_words.append(neu) for neu in target if neu not in posneg]\n\n positive_features = [(word_feats(pos), 'pos') for pos in pos_words]\n negative_features = [(word_feats(neg), 'neg') for neg in neg_words]\n neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]\n\n print('Positive feats:', len(positive_features))\n print('Negative feats:', len(negative_features))\n print('Neutral feats:', neutral_features)\n\n train_set = positive_features + negative_features + neutral_features\n return train_set\n\n\ndef set_classifier(chosen_classifier, train_set, sentence):\n classifier = SklearnClassifier(chosen_classifier)\n classifier.train(train_set)\n\n neg = 0\n pos = 0\n print('set_classifier', sentence)\n\n for word in sentence:\n classResult = classifier.classify(word_feats(word))\n print(word_feats(word))\n print(classResult)\n if classResult == 'neg':\n neg = neg + 1\n if classResult == 'pos':\n pos = pos + 1\n\n posPercent = str(float(pos)/len(sentence))\n negPercent = str(float(neg)/len(sentence))\n \n # print ('Accuracy:', nltk.classify.util.accuracy(classifier, sentence))\n # classifier.show_most_informative_features()\n # print('Score:', score)\n\n print('Positive: ' + posPercent)\n print('Negative: ' + negPercent)\n print('Pos', pos)\n print('Neg', neg)\n\n return posPercent, negPercent, pos, neg\n \n\nif __name__ == '__main__':\n create_app().run(debug=True)\n\n\n# ==============================================================================\n\n\n# from flask import Flask, render_template, flash, request\n# from flask_bootstrap import Bootstrap\n# from flask_appconfig import AppConfig\n# from flask_wtf import Form, RecaptchaField\n# from flask_wtf.file import FileField\n# from wtforms import TextField, HiddenField, ValidationError, RadioField,\\\n# BooleanField, SubmitField, IntegerField, FormField, validators\n# from wtforms.validators import Required\n\n# import nltk\n# from nltk.corpus import stopwords\n# # from nltk.classify import SklearnClassifier\n# from nltk.classify import NaiveBayesClassifier\n# from nltk.collocations import BigramCollocationFinder\n\n# import sklearn\n# from nltk.classify.scikitlearn import SklearnClassifier\n# from sklearn.svm import SVC, LinearSVC, NuSVC\n# from sklearn.naive_bayes import MultinomialNB, BernoulliNB\n# from sklearn.linear_model import LogisticRegression\n# from sklearn.metrics import accuracy_score\n\n# import os\n# from random import shuffle\n\n# nltk.download('punkt')\n\n\n# # from analyser import set_data\n\n\n# class SentimentForm(Form):\n# sentence = TextField('Type your sentence here', validators=[Required()])\n# classifier = RadioField('This is a radio field', choices=[\n# ('lsvc', 'LinearSVC'),\n# ('bernb', 'BernoulliNB'),\n# ('multi', 'Multinomial'),\n# ('logreg', 'Logistic Regression'),\n# ('svc', 'SVC'),\n# ])\n\n# submit_button = SubmitField('Submit')\n\n\n# def create_app(configfile=None):\n# app = Flask(__name__)\n# AppConfig(app, configfile) # Flask-Appconfig is not necessary, but\n# # highly recommend =)\n# # https://github.com/mbr/flask-appconfig\n# Bootstrap(app)\n\n# # in a real app, these should be configured through Flask-Appconfig\n# app.config['SECRET_KEY'] = 'devkey'\n# app.config['RECAPTCHA_PUBLIC_KEY'] = \\\n# '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'\n\n \n# @app.route('/', methods=('GET', 'POST'))\n# def index():\n# # form = ExampleForm()\n# form = SentimentForm()\n# form.validate_on_submit() # to get error messages to the browser\n# # flash('critical message', 'critical')\n# # flash('error message', 'error')\n# # flash('warning message', 'warning')\n# # flash('info message', 'info')\n# # flash('debug message', 'debug')\n# # flash('different message', 'different')\n# # flash('uncategorized message')\n\n# if form.validate_on_submit():\n# if request.method == 'POST':\n# # switch out request.form with the 20 sentences\n# result = request.form\n# input_sentence = set_data(result)\n# train_data = get_dataset(input_sentence)\n\n# choice = result['classifier']\n# choice_dict = {\n# 'bernb': 'Bernoulli Naive Bayes',\n# 'multi': 'Multinomial Naive Bayes',\n# 'logreg': 'Logistic Regression',\n# 'svc': 'Support Vector Classifier',\n# 'lsvc': 'Linear Support Vector Classifier',\n# }\n\n# if choice == 'bernb':\n# stats = set_classifier(BernoulliNB(), train_data, input_sentence)\n# elif choice == 'multi':\n# stats = set_classifier(MultinomialNB(), train_data, input_sentence)\n# elif choice == 'logreg':\n# stats = set_classifier(LogisticRegression(), train_data, input_sentence)\n# elif choice == 'svc':\n# stats = set_classifier(SVC(), train_data, input_sentence)\n# elif choice == 'lsvc':\n# stats = set_classifier(LinearSVC(), train_data, input_sentence)\n# else:\n# print('Something went terribly wrong')\n\n# stats_dict = {\n# 'posPercent': stats[0],\n# 'negPercent': stats[1],\n# 'pos': stats[2],\n# 'neg': stats[3],\n# 'sentence': result['sentence'],\n# 'train_data': train_data,\n# 'choice': choice_dict[str(choice)],\n# }\n\n# return render_template('result.html', context=stats_dict)\n \n# else:\n# print('ELSEEEE')\n# print(request.form)\n# # print(form.csrf_token)\n# return render_template('error.html', form=form) \n\n# return render_template('index.html', form=form)\n\n\n# # @app.route('/result/')\n# # def result():\n# # print('Hola this is result')\n# # return render_template('result.html')\n\n\n# return app\n\n\n# def word_feats(words):\n# return dict([(words, True)])\n\n\n# def set_data(requested):\n# sentence = requested['sentence']\n# target = sentence.lower()\n# target = nltk.word_tokenize(target)\n# return target\n\n\n# def get_dataset(target):\n# # Loads the positive and negative words\n# pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()\n# neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()\n\n# # Tokenize the words\n# pos_words = nltk.word_tokenize(pos_words)\n# neg_words = nltk.word_tokenize(neg_words)\n# shuffle(pos_words)\n# shuffle(neg_words)\n# neg_words = neg_words[:2139]\n\n# # Keep both positive and negative into posneg\n# posneg = pos_words + neg_words\n\n# neu_words = []\n# [neu_words.append(neu) for neu in target if neu not in posneg]\n\n# positive_features = [(word_feats(pos), 'pos') for pos in pos_words]\n# negative_features = [(word_feats(neg), 'neg') for neg in neg_words]\n# neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]\n\n# print('Positive feats:', len(positive_features))\n# print('Negative feats:', len(negative_features))\n# print('Neutral feats:', neutral_features)\n\n# train_set = positive_features + negative_features + neutral_features\n# return train_set\n\n\n# def set_classifier(chosen_classifier, train_set, sentence):\n# classifier = SklearnClassifier(chosen_classifier)\n# classifier.train(train_set)\n\n# neg = 0\n# pos = 0\n# print('set_classifier', sentence)\n\n# for word in sentence:\n# classResult = classifier.classify(word_feats(word))\n# print(word_feats(word))\n# print(classResult)\n# if classResult == 'neg':\n# neg = neg + 1\n# if classResult == 'pos':\n# pos = pos + 1\n\n# posPercent = str(float(pos)/len(sentence))\n# negPercent = str(float(neg)/len(sentence))\n \n# # print ('Accuracy:', nltk.classify.util.accuracy(classifier, sentence))\n# # classifier.show_most_informative_features()\n# # print('Score:', score)\n\n# print('Positive: ' + posPercent)\n# print('Negative: ' + negPercent)\n# print('Pos', pos)\n# print('Neg', neg)\n\n# return posPercent, negPercent, pos, neg\n \n\n# if __name__ == '__main__':\n# create_app().run(debug=True)\n\n"
] | [
[
"sklearn.linear_model.LogisticRegression",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
semitable/multiagent-particle-envs | [
"2cef12f72a9192a819ef289646526801c39fb909",
"2cef12f72a9192a819ef289646526801c39fb909"
] | [
"mpe/environment.py",
"mpe/scenarios/climbing_spread.py"
] | [
"import gym\nfrom gym import spaces\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\nfrom mpe.multi_discrete import MultiDiscrete\nimport copy\n\n# environment for all agents in the multiagent world\n# currently code assumes that no agents will be created/destroyed at runtime!\nclass MultiAgentEnv(gym.Env):\n metadata = {\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, world, reset_callback=None, reward_callback=None,\n observation_callback=None, info_callback=None,\n done_callback=None, shared_viewer=True):\n\n world = copy.deepcopy(world)\n self.world = world\n self.agents = self.world.policy_agents\n # set required vectorized gym env property\n self.n = len(world.policy_agents)\n # scenario callbacks\n self.reset_callback = reset_callback\n self.reward_callback = reward_callback\n self.observation_callback = observation_callback\n self.info_callback = info_callback\n self.done_callback = done_callback\n # environment parameters\n self.discrete_action_space = True\n # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector\n self.discrete_action_input = False\n # if true, even the action is continuous, action will be performed discretely\n self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False\n # if true, every agent has the same reward\n self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False\n self.time = 0\n\n # configure spaces\n self.action_space = []\n self.observation_space = []\n for agent in self.agents:\n total_action_space = []\n # physical action space\n if self.discrete_action_space:\n u_action_space = spaces.Discrete(world.dim_p * 2 + 1)\n else:\n u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)\n if agent.movable:\n total_action_space.append(u_action_space)\n # communication action space\n if self.discrete_action_space:\n c_action_space = spaces.Discrete(world.dim_c)\n else:\n c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)\n if not agent.silent:\n total_action_space.append(c_action_space)\n # total action space\n if len(total_action_space) > 1:\n # all action spaces are discrete, so simplify to MultiDiscrete action space\n if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):\n act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])\n else:\n act_space = spaces.Tuple(total_action_space)\n self.action_space.append(act_space)\n else:\n self.action_space.append(total_action_space[0])\n # observation space\n obs_dim = len(observation_callback(agent, self.world))\n self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))\n agent.action.c = np.zeros(self.world.dim_c)\n\n self.action_space = spaces.Tuple(tuple(self.action_space))\n self.observation_space = spaces.Tuple(tuple(self.observation_space))\n self.n_agents = self.n\n\n # rendering\n self.shared_viewer = shared_viewer\n if self.shared_viewer:\n self.viewers = [None]\n else:\n self.viewers = [None] * self.n\n self._reset_render()\n\n def seed(self, seed):\n self.world.seed(seed)\n\n def step(self, action_n):\n\n one_hot_actions = []\n for act, acsp in zip(action_n, self.action_space):\n one_hot = np.zeros(acsp.n)\n one_hot[act] = 1.0\n one_hot_actions.append(one_hot)\n action_n = one_hot_actions\n\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n self.agents = self.world.policy_agents\n # set action for each agent\n for i, agent in enumerate(self.agents):\n self._set_action(action_n[i], agent, self.action_space[i])\n # advance world state\n self.world.step()\n # record observation for each agent\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n reward_n.append(self._get_reward(agent))\n done_n.append(self._get_done(agent))\n\n info_n['n'].append(self._get_info(agent))\n\n # all agents get total reward in cooperative case\n reward = np.sum(reward_n)\n if self.shared_reward:\n reward_n = [reward] * self.n\n\n return tuple(obs_n), reward_n, done_n, info_n\n\n def reset(self):\n # reset world\n self.reset_callback(self.world)\n # reset renderer\n self._reset_render()\n # record observations for each agent\n obs_n = []\n self.agents = self.world.policy_agents\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n return tuple(obs_n)\n\n # get info used for benchmarking\n def _get_info(self, agent):\n if self.info_callback is None:\n return {}\n return self.info_callback(agent, self.world)\n\n # get observation for a particular agent\n def _get_obs(self, agent):\n if self.observation_callback is None:\n return np.zeros(0)\n return self.observation_callback(agent, self.world).astype(np.float32)\n\n # get dones for a particular agent\n # unused right now -- agents are allowed to go beyond the viewing screen\n def _get_done(self, agent):\n if self.done_callback is None:\n return False\n return self.done_callback(agent, self.world)\n\n # get reward for a particular agent\n def _get_reward(self, agent):\n if self.reward_callback is None:\n return 0.0\n return self.reward_callback(agent, self.world)\n\n # set env action for a particular agent\n def _set_action(self, action, agent, action_space, time=None):\n agent.action.u = np.zeros(self.world.dim_p)\n agent.action.c = np.zeros(self.world.dim_c)\n # process action\n if isinstance(action_space, MultiDiscrete):\n act = []\n size = action_space.high - action_space.low + 1\n index = 0\n for s in size:\n act.append(action[index:(index+s)])\n index += s\n action = act\n else:\n action = [action]\n\n if agent.movable:\n # physical action\n if self.discrete_action_input:\n agent.action.u = np.zeros(self.world.dim_p)\n # process discrete action\n if action[0] == 1: agent.action.u[0] = -1.0\n if action[0] == 2: agent.action.u[0] = +1.0\n if action[0] == 3: agent.action.u[1] = -1.0\n if action[0] == 4: agent.action.u[1] = +1.0\n else:\n if self.force_discrete_action:\n d = np.argmax(action[0])\n action[0][:] = 0.0\n action[0][d] = 1.0\n if self.discrete_action_space:\n agent.action.u[0] += action[0][1] - action[0][2]\n agent.action.u[1] += action[0][3] - action[0][4]\n else:\n agent.action.u = action[0]\n sensitivity = 5.0\n if agent.accel is not None:\n sensitivity = agent.accel\n agent.action.u *= sensitivity\n action = action[1:]\n if not agent.silent:\n # communication action\n if self.discrete_action_input:\n agent.action.c = np.zeros(self.world.dim_c)\n agent.action.c[action[0]] = 1.0\n else:\n agent.action.c = action[0]\n action = action[1:]\n # make sure we used all elements of action\n assert len(action) == 0\n\n # reset rendering assets\n def _reset_render(self):\n self.render_geoms = None\n self.render_geoms_xform = None\n\n # render environment\n def render(self, mode='human'):\n if mode == 'human':\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n message = ''\n for agent in self.world.agents:\n comm = []\n for other in self.world.agents:\n if other is agent: continue\n if np.all(other.state.c == 0):\n word = '_'\n else:\n word = alphabet[np.argmax(other.state.c)]\n message += (other.name + ' to ' + agent.name + ': ' + word + ' ')\n print(message)\n\n for i in range(len(self.viewers)):\n # create viewers (if necessary)\n if self.viewers[i] is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from mpe import rendering\n self.viewers[i] = rendering.Viewer(700,700)\n\n # create rendering geometry\n if self.render_geoms is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from mpe import rendering\n self.render_geoms = []\n self.render_geoms_xform = []\n for entity in self.world.entities:\n geom = rendering.make_circle(entity.size)\n xform = rendering.Transform()\n if 'agent' in entity.name:\n geom.set_color(*entity.color, alpha=0.5)\n else:\n geom.set_color(*entity.color)\n geom.add_attr(xform)\n self.render_geoms.append(geom)\n self.render_geoms_xform.append(xform)\n\n # add geoms to viewer\n for viewer in self.viewers:\n viewer.geoms = []\n for geom in self.render_geoms:\n viewer.add_geom(geom)\n\n results = []\n for i in range(len(self.viewers)):\n from mpe import rendering\n # update bounds to center around agent\n cam_range = 1\n if self.shared_viewer:\n pos = np.zeros(self.world.dim_p)\n else:\n pos = self.agents[i].state.p_pos\n self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)\n # update geometry positions\n for e, entity in enumerate(self.world.entities):\n self.render_geoms_xform[e].set_translation(*entity.state.p_pos)\n # render to display or array\n results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))\n\n if self.shared_viewer:\n assert len(results) == 1\n return results[0]\n\n return results\n\n # create receptor field locations in local coordinate frame\n def _make_receptor_locations(self, agent):\n receptor_type = 'polar'\n range_min = 0.05 * 2.0\n range_max = 1.00\n dx = []\n # circular receptive field\n if receptor_type == 'polar':\n for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):\n for distance in np.linspace(range_min, range_max, 3):\n dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))\n # add origin\n dx.append(np.array([0.0, 0.0]))\n # grid receptive field\n if receptor_type == 'grid':\n for x in np.linspace(-range_max, +range_max, 5):\n for y in np.linspace(-range_max, +range_max, 5):\n dx.append(np.array([x,y]))\n return dx\n\n def close(self):\n for viewer in self.viewers:\n if viewer:\n viewer.close()\n\n\n# vectorized wrapper for a batch of multi-agent environments\n# assumes all environments have the same observation and action space\nclass BatchMultiAgentEnv(gym.Env):\n metadata = {\n 'runtime.vectorized': True,\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, env_batch):\n self.env_batch = env_batch\n\n @property\n def n(self):\n return np.sum([env.n for env in self.env_batch])\n\n @property\n def action_space(self):\n return self.env_batch[0].action_space\n\n @property\n def observation_space(self):\n return self.env_batch[0].observation_space\n\n def step(self, action_n, time):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n i = 0\n for env in self.env_batch:\n obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)\n i += env.n\n obs_n += obs\n # reward = [r / len(self.env_batch) for r in reward]\n reward_n += reward\n done_n += done\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n obs_n = []\n for env in self.env_batch:\n obs_n += env.reset()\n return obs_n\n\n # render environment\n def render(self, mode='human', close=True):\n results_n = []\n for env in self.env_batch:\n results_n += env.render(mode, close)\n return results_n\n",
"import numpy as np\nfrom mpe.core import World, Agent, Landmark\nfrom mpe.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self):\n world = World()\n # set any world properties first\n world.dim_c = 2\n num_agents = 2\n num_landmarks = 3\n world.collaborative = True\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = False\n agent.silent = True\n agent.size = 0.15\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.35, 0.35, 0.85])\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = world.np_random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = world.np_random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n rew = 0\n collisions = 0\n occupied_landmarks = 0\n min_dists = 0\n for l in world.landmarks:\n dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]\n min_dists += min(dists)\n rew -= min(dists)\n if min(dists) < 0.1:\n occupied_landmarks += 1\n if agent.collide:\n for a in world.agents:\n if self.is_collision(a, agent):\n rew -= 1\n collisions += 1\n return (rew, collisions, min_dists, occupied_landmarks)\n\n\n def is_collision(self, obj1, obj2):\n delta_pos = obj1.state.p_pos - obj2.state.p_pos\n dist = np.sqrt(np.sum(np.square(delta_pos)))\n dist_min = 2*max(obj1.size, obj2.size)\n return True if dist < dist_min else False\n\n def reward(self, agent, world):\n rew = 0\n other_agent = [a for a in world.agents if a is not agent][0]\n\n if self.is_collision(agent, world.landmarks[2]):\n if self.is_collision(world.landmarks[2], other_agent):\n rew += 11\n else:\n rew -= 6\n elif self.is_collision(agent, world.landmarks[1]):\n if self.is_collision(world.landmarks[1], other_agent):\n rew += 7\n else:\n rew -= 5\n elif self.is_collision(agent, world.landmarks[0]):\n if self.is_collision(world.landmarks[0], other_agent):\n rew += 5\n else:\n rew -= 0\n\n return rew\n\n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks: # world.entities:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n # entity_color = []\n # for entity in world.landmarks: # world.entities:\n # entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n other_pos = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm)\n"
] | [
[
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.all",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.concatenate",
"numpy.square",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pyjsdev/googlemap_flask | [
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5",
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5",
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5",
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5",
"47aa8f8420944c47e876c1c36be182d257c14b87",
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5"
] | [
"examples/charts/file/hover_span.py",
"examples/models/image_url.py",
"bokeh/core/json_encoder.py",
"examples/compat/pandas_dataframe.py",
"examples/models/dateaxis.py",
"bokeh/core/tests/test_properties.py"
] | [
"import pandas as pd\n\nfrom bokeh.charts import Line, Scatter, show, output_file, defaults\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import HoverTool\nfrom bokeh.sampledata.degrees import data\n\ndefaults.width = 500\ndefaults.height = 300\n\nTOOLS='box_zoom,box_select,hover,crosshair,reset'\n\nTOOLTIPS = [ (\"y\", \"$~y\"), (\"x\", \"$~x\") ]\n\ndata = data[['Biology', 'Business', 'Computer Science', \"Year\"]]\ndata = pd.melt(data, id_vars=['Year'],\n value_vars=['Biology', 'Business', 'Computer Science'],\n value_name='Count', var_name='Degree')\n\nvline = Line(data, y='Count', color='Degree', title=\"Lines VLine\", ylabel='measures',\n tools=TOOLS)\n\nhline = Line(data, y='Count', color='Degree', title=\"Lines HLine\",\n ylabel='measures', tools=TOOLS)\n\nint_vline = Line(data, y='Count', color='Degree', title=\"Lines VLine Interp\",\n ylabel='measures', tools=TOOLS)\n\nint_hline = Line(data, y='Count', color='Degree', title=\"Lines HLine Interp\",\n ylabel='measures', tools=TOOLS)\n\nscatter_point = Scatter(data, x='Year', y='Count', color='Degree',\n title=\"Scatter mouse\", ylabel='measures', legend=True,\n tools=TOOLS)\n\nscatter = Scatter(data, x='Year', y='Count', color='Degree',\n title=\"Scatter V Line\", ylabel='measures', legend=True, tools=TOOLS)\n\nint_point_line = Line(data, x='Year', y='Count', color='Degree',\n title=\"Lines Mouse Interp.\", ylabel='measures', tools=TOOLS)\n\npoint_line = Line(data, x='Year', y='Count', color='Degree',\n title=\"Lines Mouse\", ylabel='measures', tools=TOOLS)\n\n\nhhover = hline.select(HoverTool)\nhhover.mode = 'hline'\nhhover.line_policy = 'next'\n\nvhover = vline.select(HoverTool)\nvhover.mode = 'vline'\nvhover.line_policy = 'nearest'\n\nint_hhover = int_hline.select(HoverTool)\nint_hhover.mode = 'hline'\nint_hhover.line_policy = 'interp'\n\nint_vhover = int_vline.select(HoverTool)\nint_vhover.mode = 'vline'\nint_vhover.line_policy = 'interp'\n\niphover = int_point_line.select(HoverTool)\niphover.mode = 'mouse'\niphover.line_policy = 'interp'\n\ntphover = point_line.select(HoverTool)\ntphover.mode = 'mouse'\n\nshover = scatter.select(HoverTool)\nshover.mode = 'vline'\n\nshoverp = scatter_point.select(HoverTool)\nshoverp.mode = 'mouse'\n\n# set up tooltips\nint_vhover.tooltips = int_hhover.tooltips = TOOLTIPS\ntphover.tooltips = iphover.tooltips = TOOLTIPS\nshover.tooltips = shoverp.tooltips = TOOLTIPS\nvhover.tooltips = hhover.tooltips = TOOLTIPS\n\noutput_file(\"hover_span.html\", title=\"hover_span.py example\")\n\nshow(gridplot(hline, vline, int_hline, int_vline,\n int_point_line, point_line, scatter_point, scatter,\n ncols=2))\n",
"import numpy as np\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import ImageURL\nfrom bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\nfrom bokeh.resources import INLINE\n\nurl = \"http://bokeh.pydata.org/en/latest/_static/images/logo.png\"\nN = 5\n\nsource = ColumnDataSource(dict(\n url = [url]*N,\n x1 = np.linspace( 0, 150, N),\n y1 = np.linspace( 0, 150, N),\n w1 = np.linspace( 10, 50, N),\n h1 = np.linspace( 10, 50, N),\n x2 = np.linspace(-50, 150, N),\n y2 = np.linspace( 0, 200, N),\n))\n\nxdr = Range1d(start=-100, end=200)\nydr = Range1d(start=-100, end=200)\n\nplot = Plot(x_range=xdr, y_range=ydr)\nplot.title.text = \"ImageURL\"\nplot.toolbar_location = None\n\nimage1 = ImageURL(url=\"url\", x=\"x1\", y=\"y1\", w=\"w1\", h=\"h1\", anchor=\"center\", global_alpha=0.2)\nplot.add_glyph(source, image1)\n\nimage2 = ImageURL(url=\"url\", x=\"x2\", y=\"y2\", w=20, h=20, anchor=\"top_left\")\nplot.add_glyph(source, image2)\n\nimage3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor=\"bottom_right\")\nplot.add_glyph(source, image3)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis,'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document( )\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n doc.validate()\n filename = \"image_url.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Image URL Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n",
"''' Provide a functions and classes to implement a custom JSON encoder for\nserializing objects for BokehJS.\n\nThe primary interface is provided by the |serialize_json| function, which\nuses the custom |BokehJSONEncoder| to produce JSON output.\n\nIn general, functions in this module convert values in the following way:\n\n* Datetime values (Python, Pandas, NumPy) are converted to floating point\n milliseconds since epoch.\n\n* Decimal values are converted to floating point.\n\n* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed\n though this interface are converted to lists. Note, however, that arrays in\n data sources inside Bokeh Documents are converted elsewhere, and by default\n use a binary encoded format.\n\n* Bokeh ``Model`` instances are usually serialized elsewhere in the context\n of an entire Bokeh Document. Models passed trough this interface are\n converted to references.\n\n* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or\n all their properties and values.\n\n* ``Color`` instances are converted to CSS color values.\n\n.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`\n.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport collections\nimport datetime as dt\nimport decimal\nimport json\nimport time\n\nimport numpy as np\n\nfrom ..settings import settings\nfrom ..util.dependencies import import_optional\nfrom ..util.serialization import transform_series, transform_array\n\npd = import_optional('pandas')\nrd = import_optional(\"dateutil.relativedelta\")\n\nNP_EPOCH = np.datetime64('1970-01-01T00:00:00Z')\nNP_MS_DELTA = np.timedelta64(1, 'ms')\n\nclass BokehJSONEncoder(json.JSONEncoder):\n ''' A custom ``json.JSONEncoder`` subclass for encoding objects in\n accordance with the BokehJS protocol.\n\n '''\n def transform_python_types(self, obj):\n ''' Handle special scalars such as (Python, NumPy, or Pandas)\n datetimes, or Decimal values.\n\n Args:\n obj (obj) :\n\n The object to encode. Anything not specifically handled in\n this method is passed on to the default system JSON encoder.\n\n '''\n\n # Pandas Timestamp\n if pd and isinstance(obj, pd.tslib.Timestamp):\n return obj.value / 10**6.0 #nanosecond to millisecond\n elif np.issubdtype(type(obj), np.float):\n return float(obj)\n elif np.issubdtype(type(obj), np.integer):\n return int(obj)\n elif np.issubdtype(type(obj), np.bool_):\n return bool(obj)\n\n # Datetime (datetime is a subclass of date)\n elif isinstance(obj, dt.datetime):\n return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.\n\n # Timedelta (timedelta is class in the datetime library)\n elif isinstance(obj, dt.timedelta):\n return obj.total_seconds() * 1000.\n\n # Date\n elif isinstance(obj, dt.date):\n return time.mktime(obj.timetuple()) * 1000.\n\n # Numpy datetime64\n elif isinstance(obj, np.datetime64):\n epoch_delta = obj - NP_EPOCH\n return (epoch_delta / NP_MS_DELTA)\n\n # Time\n elif isinstance(obj, dt.time):\n return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.\n elif rd and isinstance(obj, rd.relativedelta):\n return dict(years=obj.years,\n months=obj.months,\n days=obj.days,\n hours=obj.hours,\n minutes=obj.minutes,\n seconds=obj.seconds,\n microseconds=obj.microseconds)\n\n # Decimal\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n\n else:\n return super(BokehJSONEncoder, self).default(obj)\n\n def default(self, obj):\n ''' The required ``default`` method for JSONEncoder subclasses.\n\n Args:\n obj (obj) :\n\n The object to encode. Anything not specifically handled in\n this method is passed on to the default system JSON encoder.\n\n '''\n\n from ..model import Model\n from ..colors import Color\n from .has_props import HasProps\n\n # array types -- use force_list here, only binary\n # encoding CDS columns for now\n if pd and isinstance(obj, (pd.Series, pd.Index)):\n return transform_series(obj, force_list=True)\n elif isinstance(obj, np.ndarray):\n return transform_array(obj, force_list=True)\n elif isinstance(obj, collections.deque):\n return list(map(self.default, obj))\n elif isinstance(obj, Model):\n return obj.ref\n elif isinstance(obj, HasProps):\n return obj.properties_with_values(include_defaults=False)\n elif isinstance(obj, Color):\n return obj.to_css()\n\n else:\n return self.transform_python_types(obj)\n\ndef serialize_json(obj, pretty=False, indent=None, **kwargs):\n ''' Return a serialized JSON representation of objects, suitable to\n send to BokehJS.\n\n This function is typically used to serialize single python objects in\n the manner expected by BokehJS. In particular, many datetime values are\n automatically normalized to an expected format. Some Bokeh objects can\n also be passed, but note that Bokeh models are typically properly\n serialized in the context of an entire Bokeh document.\n\n The resulting JSON always has sorted keys. By default. the output is\n as compact as possible unless pretty output or indentation is requested.\n\n Args:\n obj (obj) : the object to serialize to JSON format\n\n pretty (bool, optional) :\n\n Whether to generate prettified output. If ``True``, spaces are\n added after added after separators, and indentation and newlines\n are applied. (default: False)\n\n Pretty output can also be enabled with the environment variable\n ``BOKEH_PRETTY``, which overrides this argument, if set.\n\n indent (int or None, optional) :\n\n Amount of indentation to use in generated JSON output. If ``None``\n then no indentation is used, unless pretty output is enabled,\n in which case two spaces are used. (default: None)\n\n Any additional keyword arguments are passed to ``json.dumps``, except for\n some that are computed internally, and cannot be overridden:\n\n * allow_nan\n * indent\n * separators\n * sort_keys\n\n Examples:\n\n .. code-block:: python\n\n >>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))\n\n >>>print(serialize_json(data))\n {\"a\":[0,1,2],\"b\":1483228800000.0}\n\n >>> print(serialize_json(data, pretty=True))\n {\n \"a\": [\n 0,\n 1,\n 2\n ],\n \"b\": 1483228800000.0\n }\n\n '''\n\n # these args to json.dumps are computed internally and should not be passed along\n for name in ['allow_nan', 'separators', 'sort_keys']:\n if name in kwargs:\n raise ValueError(\"The value of %r is computed internally, overriding is not permissable.\" % name)\n\n pretty = settings.pretty(pretty)\n\n if pretty:\n separators=(\",\", \": \")\n else:\n separators=(\",\", \":\")\n\n if pretty and indent is None:\n indent = 2\n\n return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)\n",
"import numpy as np\nimport pandas as pd\n\nfrom bokeh import mpl\nfrom bokeh.plotting import output_file, show\n\nindex=pd.date_range('1/1/2000', periods=1000)\n\ndf = pd.DataFrame(np.random.randn(1000, 4), index=index, columns=list('ABCD'))\n\ndf.cumsum().plot(legend=False)\n\noutput_file(\"pandas_dataframe.html\", title=\"pandas_dataframe.py example\")\n\nshow(mpl.to_bokeh())\n",
"from __future__ import print_function\n\nfrom numpy import pi, arange, sin\nimport numpy as np\nimport time\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import Circle\nfrom bokeh.models import (\n Plot, DataRange1d, DatetimeAxis,\n ColumnDataSource, PanTool, WheelZoomTool\n)\nfrom bokeh.resources import INLINE\n\nx = arange(-2 * pi, 2 * pi, 0.1)\ny = sin(x)\n\n# Create an array of times, starting at the current time, and extending\n# for len(x) number of hours.\ntimes = np.arange(len(x)) * 3600000 + time.time()\n\nsource = ColumnDataSource(\n data=dict(x=x, y=y, times=times)\n)\n\nxdr = DataRange1d()\nydr = DataRange1d()\n\nplot = Plot(x_range=xdr, y_range=ydr, min_border=80)\n\ncircle = Circle(x=\"times\", y=\"y\", fill_color=\"red\", size=5, line_color=\"black\")\nplot.add_glyph(source, circle)\n\nplot.add_layout(DatetimeAxis(), 'below')\nplot.add_layout(DatetimeAxis(), 'left')\n\nplot.add_tools(PanTool(), WheelZoomTool())\n\ndoc = Document()\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n doc.validate()\n filename = \"dateaxis.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Date Axis Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n",
"from __future__ import absolute_import\n\nimport datetime\nimport unittest\nimport numpy as np\nimport pandas as pd\nfrom copy import copy\n\nfrom bokeh.core.properties import (field, value,\n NumberSpec, ColorSpec, Bool, Int, Float, Complex, String,\n Regex, Seq, List, Dict, Tuple, Array, Instance, Any, Interval, Either,\n Enum, Color, DashPattern, Size, Percent, Angle, AngleSpec,\n DistanceSpec, FontSizeSpec, Override, Include, MinMaxBounds)\n\nfrom bokeh.core.has_props import HasProps\n\nfrom bokeh.models import Plot\n\nclass Basictest(unittest.TestCase):\n\n def test_simple_class(self):\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = Array(Int, np.array([1, 2, 3]))\n s = String(None)\n\n f = Foo()\n self.assertEqual(f.x, 12)\n self.assertEqual(f.y, \"hello\")\n self.assert_(np.array_equal(np.array([1, 2, 3]), f.z))\n self.assertEqual(f.s, None)\n\n\n self.assertEqual(set([\"x\", \"y\", \"z\", \"s\"]), f.properties())\n with_defaults = f.properties_with_values(include_defaults=True)\n del with_defaults['z'] # can't compare equality on the np array\n self.assertDictEqual(dict(x=12, y=\"hello\", s=None), with_defaults)\n without_defaults = f.properties_with_values(include_defaults=False)\n # the Array is in here because it's mutable\n self.assertTrue('z' in without_defaults)\n del without_defaults['z']\n self.assertDictEqual(dict(), without_defaults)\n\n f.x = 18\n self.assertEqual(f.x, 18)\n\n f.y = \"bar\"\n self.assertEqual(f.y, \"bar\")\n\n without_defaults = f.properties_with_values(include_defaults=False)\n del without_defaults['z']\n self.assertDictEqual(dict(x=18, y=\"bar\"), without_defaults)\n\n def test_enum(self):\n class Foo(HasProps):\n x = Enum(\"blue\", \"red\", \"green\") # the first item is the default\n y = Enum(\"small\", \"medium\", \"large\", default=\"large\")\n\n f = Foo()\n self.assertEqual(f.x, \"blue\")\n self.assertEqual(f.y, \"large\")\n\n f.x = \"red\"\n self.assertEqual(f.x, \"red\")\n\n with self.assertRaises(ValueError):\n f.x = \"yellow\"\n\n f.y = \"small\"\n self.assertEqual(f.y, \"small\")\n\n with self.assertRaises(ValueError):\n f.y = \"yellow\"\n\n def test_inheritance(self):\n class Base(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n class Child(Base):\n z = Float(3.14)\n\n c = Child()\n self.assertEqual(frozenset(['x', 'y', 'z']), frozenset(c.properties()))\n self.assertEqual(c.y, \"hello\")\n\n def test_set(self):\n class Foo(HasProps):\n x = Int(12)\n y = Enum(\"red\", \"blue\", \"green\")\n z = String(\"blah\")\n\n f = Foo()\n self.assertEqual(f.x, 12)\n self.assertEqual(f.y, \"red\")\n self.assertEqual(f.z, \"blah\")\n f.update(**dict(x=20, y=\"green\", z=\"hello\"))\n self.assertEqual(f.x, 20)\n self.assertEqual(f.y, \"green\")\n self.assertEqual(f.z, \"hello\")\n with self.assertRaises(ValueError):\n f.update(y=\"orange\")\n\n def test_no_parens(self):\n class Foo(HasProps):\n x = Int\n y = Int()\n f = Foo()\n self.assertEqual(f.x, f.y)\n f.x = 13\n self.assertEqual(f.x, 13)\n\n def test_accurate_properties_sets(self):\n class Base(HasProps):\n num = Int(12)\n container = List(String)\n child = Instance(HasProps)\n\n class Mixin(HasProps):\n mixin_num = Int(12)\n mixin_container = List(String)\n mixin_child = Instance(HasProps)\n\n class Sub(Base, Mixin):\n sub_num = Int(12)\n sub_container = List(String)\n sub_child = Instance(HasProps)\n\n b = Base()\n self.assertEqual(set([\"child\"]),\n b.properties_with_refs())\n self.assertEqual(set([\"container\"]),\n b.properties_containers())\n self.assertEqual(set([\"num\", \"container\", \"child\"]),\n b.properties())\n self.assertEqual(set([\"num\", \"container\", \"child\"]),\n b.properties(with_bases=True))\n self.assertEqual(set([\"num\", \"container\", \"child\"]),\n b.properties(with_bases=False))\n\n m = Mixin()\n self.assertEqual(set([\"mixin_child\"]),\n m.properties_with_refs())\n self.assertEqual(set([\"mixin_container\"]),\n m.properties_containers())\n self.assertEqual(set([\"mixin_num\", \"mixin_container\", \"mixin_child\"]),\n m.properties())\n self.assertEqual(set([\"mixin_num\", \"mixin_container\", \"mixin_child\"]),\n m.properties(with_bases=True))\n self.assertEqual(set([\"mixin_num\", \"mixin_container\", \"mixin_child\"]),\n m.properties(with_bases=False))\n\n s = Sub()\n self.assertEqual(set([\"child\", \"sub_child\", \"mixin_child\"]),\n s.properties_with_refs())\n self.assertEqual(set([\"container\", \"sub_container\", \"mixin_container\"]),\n s.properties_containers())\n self.assertEqual(set([\"num\", \"container\", \"child\",\n \"mixin_num\", \"mixin_container\", \"mixin_child\",\n \"sub_num\", \"sub_container\", \"sub_child\"]),\n s.properties())\n self.assertEqual(set([\"num\", \"container\", \"child\",\n \"mixin_num\", \"mixin_container\", \"mixin_child\",\n \"sub_num\", \"sub_container\", \"sub_child\"]),\n s.properties(with_bases=True))\n self.assertEqual(set([\"sub_num\", \"sub_container\", \"sub_child\"]),\n s.properties(with_bases=False))\n\n # verify caching\n self.assertIs(s.properties_with_refs(), s.properties_with_refs())\n self.assertIs(s.properties_containers(), s.properties_containers())\n self.assertIs(s.properties(), s.properties())\n self.assertIs(s.properties(with_bases=True), s.properties(with_bases=True))\n # this one isn't cached because we store it as a list __properties__ and wrap it\n # in a new set every time\n #self.assertIs(s.properties(with_bases=False), s.properties(with_bases=False))\n\n def test_accurate_dataspecs(self):\n class Base(HasProps):\n num = NumberSpec(12)\n not_a_dataspec = Float(10)\n\n class Mixin(HasProps):\n mixin_num = NumberSpec(14)\n\n class Sub(Base, Mixin):\n sub_num = NumberSpec(16)\n\n base = Base()\n mixin = Mixin()\n sub = Sub()\n\n self.assertEqual(set([\"num\"]), base.dataspecs())\n self.assertEqual(set([\"mixin_num\"]), mixin.dataspecs())\n self.assertEqual(set([\"num\", \"mixin_num\", \"sub_num\"]), sub.dataspecs())\n\n self.assertDictEqual(dict(num=base.lookup(\"num\")), base.dataspecs_with_props())\n self.assertDictEqual(dict(mixin_num=mixin.lookup(\"mixin_num\")), mixin.dataspecs_with_props())\n self.assertDictEqual(dict(num=sub.lookup(\"num\"),\n mixin_num=sub.lookup(\"mixin_num\"),\n sub_num=sub.lookup(\"sub_num\")),\n sub.dataspecs_with_props())\n\n def test_not_serialized(self):\n class NotSerialized(HasProps):\n x = Int(12, serialized=False)\n y = String(\"hello\")\n\n o = NotSerialized()\n self.assertEqual(o.x, 12)\n self.assertEqual(o.y, 'hello')\n\n # non-serialized props are still in the list of props\n self.assertTrue('x' in o.properties())\n self.assertTrue('y' in o.properties())\n\n # but they aren't in the dict of props with values, since their\n # values are not important (already included in other values,\n # as with the _units properties)\n self.assertTrue('x' not in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n\n o.x = 42\n o.y = 'world'\n\n self.assertTrue('x' not in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' in o.properties_with_values(include_defaults=False))\n\n def test_readonly(self):\n class Readonly(HasProps):\n x = Int(12, readonly=True) # with default\n y = Int(readonly=True) # without default\n z = String(\"hello\")\n\n o = Readonly()\n self.assertEqual(o.x, 12)\n self.assertEqual(o.y, None)\n self.assertEqual(o.z, 'hello')\n\n # readonly props are still in the list of props\n self.assertTrue('x' in o.properties())\n self.assertTrue('y' in o.properties())\n self.assertTrue('z' in o.properties())\n\n # but they aren't in the dict of props with values\n self.assertTrue('x' not in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=True))\n self.assertTrue('z' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('z' not in o.properties_with_values(include_defaults=False))\n\n with self.assertRaises(RuntimeError):\n o.x = 7\n with self.assertRaises(RuntimeError):\n o.y = 7\n o.z = \"xyz\"\n\n self.assertEqual(o.x, 12)\n self.assertEqual(o.y, None)\n self.assertEqual(o.z, 'xyz')\n\n def test_include_defaults(self):\n class IncludeDefaultsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsTest()\n self.assertEqual(o.x, 12)\n self.assertEqual(o.y, 'hello')\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n\n o.x = 42\n o.y = 'world'\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' in o.properties_with_values(include_defaults=False))\n\n def test_include_defaults_with_kwargs(self):\n class IncludeDefaultsKwargsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsKwargsTest(x=14, y=\"world\")\n self.assertEqual(o.x, 14)\n self.assertEqual(o.y, 'world')\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' in o.properties_with_values(include_defaults=False))\n\n def test_include_defaults_set_to_same(self):\n class IncludeDefaultsSetToSameTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsSetToSameTest()\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n\n # this should no-op\n o.x = 12\n o.y = \"hello\"\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n\n def test_override_defaults(self):\n class FooBase(HasProps):\n x = Int(12)\n\n class FooSub(FooBase):\n x = Override(default=14)\n\n def func_default():\n return 16\n\n class FooSubSub(FooBase):\n x = Override(default=func_default)\n\n f_base = FooBase()\n f_sub = FooSub()\n f_sub_sub = FooSubSub()\n\n self.assertEqual(f_base.x, 12)\n self.assertEqual(f_sub.x, 14)\n self.assertEqual(f_sub_sub.x, 16)\n\n self.assertEqual(12, f_base.properties_with_values(include_defaults=True)['x'])\n self.assertEqual(14, f_sub.properties_with_values(include_defaults=True)['x'])\n self.assertEqual(16, f_sub_sub.properties_with_values(include_defaults=True)['x'])\n\n self.assertFalse('x' in f_base.properties_with_values(include_defaults=False))\n self.assertFalse('x' in f_sub.properties_with_values(include_defaults=False))\n self.assertFalse('x' in f_sub_sub.properties_with_values(include_defaults=False))\n\n def test_include_delegate(self):\n class IsDelegate(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n class IncludesDelegateWithPrefix(HasProps):\n z = Include(IsDelegate, use_prefix=True)\n z_y = Int(57) # override the Include\n\n class IncludesDelegateWithoutPrefix(HasProps):\n z = Include(IsDelegate, use_prefix=False)\n y = Int(42) # override the Include\n\n class IncludesDelegateWithoutPrefixUsingOverride(HasProps):\n z = Include(IsDelegate, use_prefix=False)\n y = Override(default=\"world\") # override the Include changing just the default\n\n o = IncludesDelegateWithoutPrefix()\n self.assertEqual(o.x, 12)\n self.assertEqual(o.y, 42)\n self.assertFalse(hasattr(o, 'z'))\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n\n o = IncludesDelegateWithoutPrefixUsingOverride()\n self.assertEqual(o.x, 12)\n self.assertEqual(o.y, 'world')\n self.assertFalse(hasattr(o, 'z'))\n\n self.assertTrue('x' in o.properties_with_values(include_defaults=True))\n self.assertTrue('y' in o.properties_with_values(include_defaults=True))\n self.assertTrue('x' not in o.properties_with_values(include_defaults=False))\n self.assertTrue('y' not in o.properties_with_values(include_defaults=False))\n\n o2 = IncludesDelegateWithPrefix()\n self.assertEqual(o2.z_x, 12)\n self.assertEqual(o2.z_y, 57)\n self.assertFalse(hasattr(o2, 'z'))\n self.assertFalse(hasattr(o2, 'x'))\n self.assertFalse(hasattr(o2, 'y'))\n\n self.assertFalse('z' in o2.properties_with_values(include_defaults=True))\n self.assertFalse('x' in o2.properties_with_values(include_defaults=True))\n self.assertFalse('y' in o2.properties_with_values(include_defaults=True))\n self.assertTrue('z_x' in o2.properties_with_values(include_defaults=True))\n self.assertTrue('z_y' in o2.properties_with_values(include_defaults=True))\n self.assertTrue('z_x' not in o2.properties_with_values(include_defaults=False))\n self.assertTrue('z_y' not in o2.properties_with_values(include_defaults=False))\n\n # def test_kwargs_init(self):\n # class Foo(HasProps):\n # x = String\n # y = Int\n # z = Float\n # f = Foo(x = \"hello\", y = 14)\n # self.assertEqual(f.x, \"hello\")\n # self.assertEqual(f.y, 14)\n\n # with self.assertRaises(TypeError):\n # # This should raise a TypeError: object.__init__() takes no parameters\n # g = Foo(z = 3.14, q = \"blah\")\n\nclass TestNumberSpec(unittest.TestCase):\n\n def test_field(self):\n class Foo(HasProps):\n x = NumberSpec(\"xfield\")\n f = Foo()\n self.assertEqual(f.x, \"xfield\")\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(f), {\"field\": \"xfield\"})\n f.x = \"my_x\"\n self.assertEqual(f.x, \"my_x\")\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(f), {\"field\": \"my_x\"})\n\n def test_value(self):\n class Foo(HasProps):\n x = NumberSpec(\"xfield\")\n f = Foo()\n self.assertEqual(f.x, \"xfield\")\n f.x = 12\n self.assertEqual(f.x, 12)\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(f), {\"value\": 12})\n f.x = 15\n self.assertEqual(f.x, 15)\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(f), {\"value\": 15})\n f.x = dict(value=32)\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(f), {\"value\": 32})\n f.x = None\n self.assertIs(Foo.__dict__[\"x\"].serializable_value(f), None)\n\n def test_default(self):\n class Foo(HasProps):\n y = NumberSpec(default=12)\n f = Foo()\n self.assertEqual(f.y, 12)\n self.assertDictEqual(Foo.__dict__[\"y\"].serializable_value(f), {\"value\": 12})\n f.y = \"y1\"\n self.assertEqual(f.y, \"y1\")\n # Once we set a concrete value, the default is ignored, because it is unused\n f.y = 32\n self.assertEqual(f.y, 32)\n self.assertDictEqual(Foo.__dict__[\"y\"].serializable_value(f), {\"value\": 32})\n\n def test_multiple_instances(self):\n class Foo(HasProps):\n x = NumberSpec(\"xfield\")\n\n a = Foo()\n b = Foo()\n a.x = 13\n b.x = 14\n self.assertEqual(a.x, 13)\n self.assertEqual(b.x, 14)\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(a), {\"value\": 13})\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(b), {\"value\": 14})\n b.x = {\"field\": \"x3\"}\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(a), {\"value\": 13})\n self.assertDictEqual(Foo.__dict__[\"x\"].serializable_value(b), {\"field\": \"x3\"})\n\n def test_autocreate_no_parens(self):\n class Foo(HasProps):\n x = NumberSpec\n\n a = Foo()\n\n self.assertIs(a.x, None)\n a.x = 14\n self.assertEqual(a.x, 14)\n\n def test_set_from_json_keeps_mode(self):\n class Foo(HasProps):\n x = NumberSpec(default=None)\n\n a = Foo()\n\n self.assertIs(a.x, None)\n\n # set as a value\n a.x = 14\n self.assertEqual(a.x, 14)\n # set_from_json keeps the previous dict-ness or lack thereof\n a.set_from_json('x', dict(value=16))\n self.assertEqual(a.x, 16)\n # but regular assignment overwrites the previous dict-ness\n a.x = dict(value=17)\n self.assertDictEqual(a.x, dict(value=17))\n\n # set as a field\n a.x = \"bar\"\n self.assertEqual(a.x, \"bar\")\n # set_from_json keeps the previous dict-ness or lack thereof\n a.set_from_json('x', dict(field=\"foo\"))\n self.assertEqual(a.x, \"foo\")\n # but regular assignment overwrites the previous dict-ness\n a.x = dict(field=\"baz\")\n self.assertDictEqual(a.x, dict(field=\"baz\"))\n\nclass TestFontSizeSpec(unittest.TestCase):\n def test_font_size_from_string(self):\n class Foo(HasProps):\n x = FontSizeSpec(default=None)\n\n css_units = \"%|em|ex|ch|ic|rem|vw|vh|vi|vb|vmin|vmax|cm|mm|q|in|pc|pt|px\"\n\n a = Foo()\n self.assertIs(a.x, None)\n\n for unit in css_units.split(\"|\"):\n\n v = '10%s' % unit\n a.x = v\n self.assertEqual(a.x, dict(value=v))\n self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))\n\n v = '10.2%s' % unit\n a.x = v\n self.assertEqual(a.x, dict(value=v))\n self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))\n\n f = '_10%s' % unit\n a.x = f\n self.assertEqual(a.x, f)\n self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))\n\n f = '_10.2%s' % unit\n a.x = f\n self.assertEqual(a.x, f)\n self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))\n\n for unit in css_units.upper().split(\"|\"):\n v = '10%s' % unit\n a.x = v\n self.assertEqual(a.x, dict(value=v))\n self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))\n\n v = '10.2%s' % unit\n a.x = v\n self.assertEqual(a.x, dict(value=v))\n self.assertEqual(a.lookup('x').serializable_value(a), dict(value=v))\n\n f = '_10%s' % unit\n a.x = f\n self.assertEqual(a.x, f)\n self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))\n\n f = '_10.2%s' % unit\n a.x = f\n self.assertEqual(a.x, f)\n self.assertEqual(a.lookup('x').serializable_value(a), dict(field=f))\n\nclass TestAngleSpec(unittest.TestCase):\n def test_default_none(self):\n class Foo(HasProps):\n x = AngleSpec(None)\n\n a = Foo()\n\n self.assertIs(a.x, None)\n self.assertEqual(a.x_units, 'rad')\n a.x = 14\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'rad')\n\n def test_autocreate_no_parens(self):\n class Foo(HasProps):\n x = AngleSpec\n\n a = Foo()\n\n self.assertIs(a.x, None)\n self.assertEqual(a.x_units, 'rad')\n a.x = 14\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'rad')\n\n def test_default_value(self):\n class Foo(HasProps):\n x = AngleSpec(default=14)\n\n a = Foo()\n\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'rad')\n\n def test_setting_dict_sets_units(self):\n class Foo(HasProps):\n x = AngleSpec(default=14)\n\n a = Foo()\n\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'rad')\n\n a.x = { 'value' : 180, 'units' : 'deg' }\n self.assertDictEqual(a.x, { 'value' : 180 })\n self.assertEqual(a.x_units, 'deg')\n\n def test_setting_json_sets_units_keeps_dictness(self):\n class Foo(HasProps):\n x = AngleSpec(default=14)\n\n a = Foo()\n\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'rad')\n\n a.set_from_json('x', { 'value' : 180, 'units' : 'deg' })\n self.assertEqual(a.x, 180)\n self.assertEqual(a.x_units, 'deg')\n\n def test_setting_dict_does_not_modify_original_dict(self):\n class Foo(HasProps):\n x = AngleSpec(default=14)\n\n a = Foo()\n\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'rad')\n\n new_value = { 'value' : 180, 'units' : 'deg' }\n new_value_copy = copy(new_value)\n self.assertDictEqual(new_value_copy, new_value)\n\n a.x = new_value\n self.assertDictEqual(a.x, { 'value' : 180 })\n self.assertEqual(a.x_units, 'deg')\n\n self.assertDictEqual(new_value_copy, new_value)\n\nclass TestDistanceSpec(unittest.TestCase):\n def test_default_none(self):\n class Foo(HasProps):\n x = DistanceSpec(None)\n\n a = Foo()\n\n self.assertIs(a.x, None)\n self.assertEqual(a.x_units, 'data')\n a.x = 14\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'data')\n\n def test_autocreate_no_parens(self):\n class Foo(HasProps):\n x = DistanceSpec\n\n a = Foo()\n\n self.assertIs(a.x, None)\n self.assertEqual(a.x_units, 'data')\n a.x = 14\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'data')\n\n def test_default_value(self):\n class Foo(HasProps):\n x = DistanceSpec(default=14)\n\n a = Foo()\n\n self.assertEqual(a.x, 14)\n self.assertEqual(a.x_units, 'data')\n\nclass TestColorSpec(unittest.TestCase):\n\n def test_field(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n self.assertEqual(f.col, \"colorfield\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"colorfield\"})\n f.col = \"myfield\"\n self.assertEqual(f.col, \"myfield\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"myfield\"})\n\n def test_field_default(self):\n class Foo(HasProps):\n col = ColorSpec(default=\"red\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n self.assertEqual(f.col, \"red\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"red\"})\n f.col = \"myfield\"\n self.assertEqual(f.col, \"myfield\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"myfield\"})\n\n def test_default_tuple(self):\n class Foo(HasProps):\n col = ColorSpec(default=(128, 255, 124))\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n self.assertEqual(f.col, (128, 255, 124))\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"rgb(128, 255, 124)\"})\n\n def test_fixed_value(self):\n class Foo(HasProps):\n col = ColorSpec(\"gray\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n self.assertEqual(f.col, \"gray\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"gray\"})\n\n def test_named_value(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n\n f.col = \"red\"\n self.assertEqual(f.col, \"red\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"red\"})\n f.col = \"forestgreen\"\n self.assertEqual(f.col, \"forestgreen\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"forestgreen\"})\n\n def test_case_insensitive_named_value(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n\n f.col = \"RED\"\n self.assertEqual(f.col, \"RED\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"RED\"})\n f.col = \"ForestGreen\"\n self.assertEqual(f.col, \"ForestGreen\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"ForestGreen\"})\n\n def test_named_value_set_none(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n f.col = None\n self.assertDictEqual(desc.serializable_value(f), {\"value\": None})\n\n def test_named_value_unset(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"colorfield\"})\n\n def test_named_color_overriding_default(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n f.col = \"forestgreen\"\n self.assertEqual(f.col, \"forestgreen\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"forestgreen\"})\n f.col = \"myfield\"\n self.assertEqual(f.col, \"myfield\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"myfield\"})\n\n def test_hex_value(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n f.col = \"#FF004A\"\n self.assertEqual(f.col, \"#FF004A\")\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"#FF004A\"})\n f.col = \"myfield\"\n self.assertEqual(f.col, \"myfield\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"myfield\"})\n\n def test_tuple_value(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n f.col = (128, 200, 255)\n self.assertEqual(f.col, (128, 200, 255))\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"rgb(128, 200, 255)\"})\n f.col = \"myfield\"\n self.assertEqual(f.col, \"myfield\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"myfield\"})\n f.col = (100, 150, 200, 0.5)\n self.assertEqual(f.col, (100, 150, 200, 0.5))\n self.assertDictEqual(desc.serializable_value(f), {\"value\": \"rgba(100, 150, 200, 0.5)\"})\n\n def test_set_dict(self):\n class Foo(HasProps):\n col = ColorSpec(\"colorfield\")\n desc = Foo.__dict__[\"col\"]\n f = Foo()\n f.col = {\"field\": \"myfield\"}\n self.assertDictEqual(f.col, {\"field\": \"myfield\"})\n\n f.col = \"field2\"\n self.assertEqual(f.col, \"field2\")\n self.assertDictEqual(desc.serializable_value(f), {\"field\": \"field2\"})\n\nclass TestDashPattern(unittest.TestCase):\n\n def test_named(self):\n class Foo(HasProps):\n pat = DashPattern\n f = Foo()\n\n self.assertEqual(f.pat, [])\n f.pat = \"solid\"\n self.assertEqual(f.pat, [])\n f.pat = \"dashed\"\n self.assertEqual(f.pat, [6])\n f.pat = \"dotted\"\n self.assertEqual(f.pat, [2, 4])\n f.pat = \"dotdash\"\n self.assertEqual(f.pat, [2, 4, 6, 4])\n f.pat = \"dashdot\"\n self.assertEqual(f.pat, [6, 4, 2, 4])\n\n def test_string(self):\n class Foo(HasProps):\n pat = DashPattern\n f = Foo()\n\n f.pat = \"\"\n self.assertEqual(f.pat, [])\n f.pat = \"2\"\n self.assertEqual(f.pat, [2])\n f.pat = \"2 4\"\n self.assertEqual(f.pat, [2, 4])\n f.pat = \"2 4 6\"\n self.assertEqual(f.pat, [2, 4, 6])\n\n with self.assertRaises(ValueError):\n f.pat = \"abc 6\"\n\n def test_list(self):\n class Foo(HasProps):\n pat = DashPattern\n f = Foo()\n\n f.pat = ()\n self.assertEqual(f.pat, ())\n f.pat = (2,)\n self.assertEqual(f.pat, (2,))\n f.pat = (2, 4)\n self.assertEqual(f.pat, (2, 4))\n f.pat = (2, 4, 6)\n self.assertEqual(f.pat, (2, 4, 6))\n\n with self.assertRaises(ValueError):\n f.pat = (2, 4.2)\n with self.assertRaises(ValueError):\n f.pat = (2, \"a\")\n\n def test_invalid(self):\n class Foo(HasProps):\n pat = DashPattern\n f = Foo()\n\n with self.assertRaises(ValueError):\n f.pat = 10\n with self.assertRaises(ValueError):\n f.pat = 10.1\n with self.assertRaises(ValueError):\n f.pat = {}\n\n\nclass Foo(HasProps):\n pass\n\nclass Bar(HasProps):\n pass\n\nclass Baz(HasProps):\n pass\n\nclass TestProperties(unittest.TestCase):\n\n def test_Any(self):\n prop = Any()\n\n self.assertTrue(prop.is_valid(None))\n self.assertTrue(prop.is_valid(False))\n self.assertTrue(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertTrue(prop.is_valid(1.0+1.0j))\n self.assertTrue(prop.is_valid(\"\"))\n self.assertTrue(prop.is_valid(()))\n self.assertTrue(prop.is_valid([]))\n self.assertTrue(prop.is_valid({}))\n self.assertTrue(prop.is_valid(Foo()))\n\n def test_Bool(self):\n prop = Bool()\n\n self.assertTrue(prop.is_valid(None))\n self.assertTrue(prop.is_valid(False))\n self.assertTrue(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(np.bool8(False)))\n self.assertTrue(prop.is_valid(np.bool8(True)))\n self.assertFalse(prop.is_valid(np.int8(0)))\n self.assertFalse(prop.is_valid(np.int8(1)))\n self.assertFalse(prop.is_valid(np.int16(0)))\n self.assertFalse(prop.is_valid(np.int16(1)))\n self.assertFalse(prop.is_valid(np.int32(0)))\n self.assertFalse(prop.is_valid(np.int32(1)))\n self.assertFalse(prop.is_valid(np.int64(0)))\n self.assertFalse(prop.is_valid(np.int64(1)))\n self.assertFalse(prop.is_valid(np.uint8(0)))\n self.assertFalse(prop.is_valid(np.uint8(1)))\n self.assertFalse(prop.is_valid(np.uint16(0)))\n self.assertFalse(prop.is_valid(np.uint16(1)))\n self.assertFalse(prop.is_valid(np.uint32(0)))\n self.assertFalse(prop.is_valid(np.uint32(1)))\n self.assertFalse(prop.is_valid(np.uint64(0)))\n self.assertFalse(prop.is_valid(np.uint64(1)))\n self.assertFalse(prop.is_valid(np.float16(0)))\n self.assertFalse(prop.is_valid(np.float16(1)))\n self.assertFalse(prop.is_valid(np.float32(0)))\n self.assertFalse(prop.is_valid(np.float32(1)))\n self.assertFalse(prop.is_valid(np.float64(0)))\n self.assertFalse(prop.is_valid(np.float64(1)))\n self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))\n self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))\n if hasattr(np, \"complex256\"):\n self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))\n\n def test_Int(self):\n prop = Int()\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n # TODO: self.assertFalse(prop.is_valid(np.bool8(False)))\n # TODO: self.assertFalse(prop.is_valid(np.bool8(True)))\n self.assertTrue(prop.is_valid(np.int8(0)))\n self.assertTrue(prop.is_valid(np.int8(1)))\n self.assertTrue(prop.is_valid(np.int16(0)))\n self.assertTrue(prop.is_valid(np.int16(1)))\n self.assertTrue(prop.is_valid(np.int32(0)))\n self.assertTrue(prop.is_valid(np.int32(1)))\n self.assertTrue(prop.is_valid(np.int64(0)))\n self.assertTrue(prop.is_valid(np.int64(1)))\n self.assertTrue(prop.is_valid(np.uint8(0)))\n self.assertTrue(prop.is_valid(np.uint8(1)))\n self.assertTrue(prop.is_valid(np.uint16(0)))\n self.assertTrue(prop.is_valid(np.uint16(1)))\n self.assertTrue(prop.is_valid(np.uint32(0)))\n self.assertTrue(prop.is_valid(np.uint32(1)))\n self.assertTrue(prop.is_valid(np.uint64(0)))\n self.assertTrue(prop.is_valid(np.uint64(1)))\n self.assertFalse(prop.is_valid(np.float16(0)))\n self.assertFalse(prop.is_valid(np.float16(1)))\n self.assertFalse(prop.is_valid(np.float32(0)))\n self.assertFalse(prop.is_valid(np.float32(1)))\n self.assertFalse(prop.is_valid(np.float64(0)))\n self.assertFalse(prop.is_valid(np.float64(1)))\n self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))\n self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))\n if hasattr(np, \"complex256\"):\n self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))\n\n def test_Float(self):\n prop = Float()\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n # TODO: self.assertFalse(prop.is_valid(np.bool8(False)))\n # TODO: self.assertFalse(prop.is_valid(np.bool8(True)))\n self.assertTrue(prop.is_valid(np.int8(0)))\n self.assertTrue(prop.is_valid(np.int8(1)))\n self.assertTrue(prop.is_valid(np.int16(0)))\n self.assertTrue(prop.is_valid(np.int16(1)))\n self.assertTrue(prop.is_valid(np.int32(0)))\n self.assertTrue(prop.is_valid(np.int32(1)))\n self.assertTrue(prop.is_valid(np.int64(0)))\n self.assertTrue(prop.is_valid(np.int64(1)))\n self.assertTrue(prop.is_valid(np.uint8(0)))\n self.assertTrue(prop.is_valid(np.uint8(1)))\n self.assertTrue(prop.is_valid(np.uint16(0)))\n self.assertTrue(prop.is_valid(np.uint16(1)))\n self.assertTrue(prop.is_valid(np.uint32(0)))\n self.assertTrue(prop.is_valid(np.uint32(1)))\n self.assertTrue(prop.is_valid(np.uint64(0)))\n self.assertTrue(prop.is_valid(np.uint64(1)))\n self.assertTrue(prop.is_valid(np.float16(0)))\n self.assertTrue(prop.is_valid(np.float16(1)))\n self.assertTrue(prop.is_valid(np.float32(0)))\n self.assertTrue(prop.is_valid(np.float32(1)))\n self.assertTrue(prop.is_valid(np.float64(0)))\n self.assertTrue(prop.is_valid(np.float64(1)))\n self.assertFalse(prop.is_valid(np.complex64(1.0+1.0j)))\n self.assertFalse(prop.is_valid(np.complex128(1.0+1.0j)))\n if hasattr(np, \"complex256\"):\n self.assertFalse(prop.is_valid(np.complex256(1.0+1.0j)))\n\n def test_Complex(self):\n prop = Complex()\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertTrue(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n # TODO: self.assertFalse(prop.is_valid(np.bool8(False)))\n # TODO: self.assertFalse(prop.is_valid(np.bool8(True)))\n self.assertTrue(prop.is_valid(np.int8(0)))\n self.assertTrue(prop.is_valid(np.int8(1)))\n self.assertTrue(prop.is_valid(np.int16(0)))\n self.assertTrue(prop.is_valid(np.int16(1)))\n self.assertTrue(prop.is_valid(np.int32(0)))\n self.assertTrue(prop.is_valid(np.int32(1)))\n self.assertTrue(prop.is_valid(np.int64(0)))\n self.assertTrue(prop.is_valid(np.int64(1)))\n self.assertTrue(prop.is_valid(np.uint8(0)))\n self.assertTrue(prop.is_valid(np.uint8(1)))\n self.assertTrue(prop.is_valid(np.uint16(0)))\n self.assertTrue(prop.is_valid(np.uint16(1)))\n self.assertTrue(prop.is_valid(np.uint32(0)))\n self.assertTrue(prop.is_valid(np.uint32(1)))\n self.assertTrue(prop.is_valid(np.uint64(0)))\n self.assertTrue(prop.is_valid(np.uint64(1)))\n self.assertTrue(prop.is_valid(np.float16(0)))\n self.assertTrue(prop.is_valid(np.float16(1)))\n self.assertTrue(prop.is_valid(np.float32(0)))\n self.assertTrue(prop.is_valid(np.float32(1)))\n self.assertTrue(prop.is_valid(np.float64(0)))\n self.assertTrue(prop.is_valid(np.float64(1)))\n self.assertTrue(prop.is_valid(np.complex64(1.0+1.0j)))\n self.assertTrue(prop.is_valid(np.complex128(1.0+1.0j)))\n if hasattr(np, \"complex256\"):\n self.assertTrue(prop.is_valid(np.complex256(1.0+1.0j)))\n\n def test_String(self):\n prop = String()\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertTrue(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n def test_Regex(self):\n with self.assertRaises(TypeError):\n prop = Regex()\n\n prop = Regex(\"^x*$\")\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertTrue(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n def test_Seq(self):\n with self.assertRaises(TypeError):\n prop = Seq()\n\n prop = Seq(Int)\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertTrue(prop.is_valid(()))\n self.assertTrue(prop.is_valid([]))\n self.assertTrue(prop.is_valid(np.array([])))\n self.assertFalse(prop.is_valid(set([])))\n self.assertFalse(prop.is_valid({}))\n self.assertTrue(prop.is_valid((1, 2)))\n self.assertTrue(prop.is_valid([1, 2]))\n self.assertTrue(prop.is_valid(np.array([1, 2])))\n self.assertFalse(prop.is_valid({1, 2}))\n self.assertFalse(prop.is_valid({1: 2}))\n self.assertFalse(prop.is_valid(Foo()))\n\n df = pd.DataFrame([1, 2])\n self.assertTrue(prop.is_valid(df.index))\n self.assertTrue(prop.is_valid(df.iloc[0]))\n\n def test_List(self):\n with self.assertRaises(TypeError):\n prop = List()\n\n prop = List(Int)\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertTrue(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n def test_Dict(self):\n with self.assertRaises(TypeError):\n prop = Dict()\n\n prop = Dict(String, List(Int))\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertTrue(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n def test_Tuple(self):\n with self.assertRaises(TypeError):\n prop = Tuple()\n\n with self.assertRaises(TypeError):\n prop = Tuple(Int)\n\n prop = Tuple(Int, String, List(Int))\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid((1, \"\", [1, 2, 3])))\n self.assertFalse(prop.is_valid((1.0, \"\", [1, 2, 3])))\n self.assertFalse(prop.is_valid((1, True, [1, 2, 3])))\n self.assertFalse(prop.is_valid((1, \"\", (1, 2, 3))))\n self.assertFalse(prop.is_valid((1, \"\", [1, 2, \"xyz\"])))\n\n def test_Instance(self):\n with self.assertRaises(TypeError):\n prop = Instance()\n\n prop = Instance(Foo)\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertTrue(prop.is_valid(Foo()))\n\n self.assertFalse(prop.is_valid(Bar()))\n self.assertFalse(prop.is_valid(Baz()))\n\n def test_Instance_from_json(self):\n class MapOptions(HasProps):\n lat = Float\n lng = Float\n zoom = Int(12)\n\n v1 = Instance(MapOptions).from_json(dict(lat=1, lng=2))\n v2 = MapOptions(lat=1, lng=2)\n self.assertTrue(v1.equals(v2))\n\n def test_Interval(self):\n with self.assertRaises(TypeError):\n prop = Interval()\n\n with self.assertRaises(ValueError):\n prop = Interval(Int, 0.0, 1.0)\n\n prop = Interval(Int, 0, 255)\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(127))\n self.assertFalse(prop.is_valid(-1))\n self.assertFalse(prop.is_valid(256))\n\n prop = Interval(Float, 0.0, 1.0)\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(0.5))\n self.assertFalse(prop.is_valid(-0.001))\n self.assertFalse(prop.is_valid( 1.001))\n\n def test_Either(self):\n with self.assertRaises(TypeError):\n prop = Either()\n\n prop = Either(Interval(Int, 0, 100), Regex(\"^x*$\"), List(Int))\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertTrue(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertTrue(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(100))\n self.assertFalse(prop.is_valid(-100))\n self.assertTrue(prop.is_valid(\"xxx\"))\n self.assertFalse(prop.is_valid(\"yyy\"))\n self.assertTrue(prop.is_valid([1, 2, 3]))\n self.assertFalse(prop.is_valid([1, 2, \"\"]))\n\n def test_Enum(self):\n with self.assertRaises(TypeError):\n prop = Enum()\n\n with self.assertRaises(TypeError):\n prop = Enum(\"red\", \"green\", 1)\n\n with self.assertRaises(TypeError):\n prop = Enum(\"red\", \"green\", \"red\")\n\n prop = Enum(\"red\", \"green\", \"blue\")\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(\"red\"))\n self.assertTrue(prop.is_valid(\"green\"))\n self.assertTrue(prop.is_valid(\"blue\"))\n\n self.assertFalse(prop.is_valid(\"RED\"))\n self.assertFalse(prop.is_valid(\"GREEN\"))\n self.assertFalse(prop.is_valid(\"BLUE\"))\n\n self.assertFalse(prop.is_valid(\" red\"))\n self.assertFalse(prop.is_valid(\" green\"))\n self.assertFalse(prop.is_valid(\" blue\"))\n\n from bokeh.core.enums import LineJoin\n prop = Enum(LineJoin)\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(\"miter\"))\n self.assertTrue(prop.is_valid(\"round\"))\n self.assertTrue(prop.is_valid(\"bevel\"))\n\n self.assertFalse(prop.is_valid(\"MITER\"))\n self.assertFalse(prop.is_valid(\"ROUND\"))\n self.assertFalse(prop.is_valid(\"BEVEL\"))\n\n self.assertFalse(prop.is_valid(\" miter\"))\n self.assertFalse(prop.is_valid(\" round\"))\n self.assertFalse(prop.is_valid(\" bevel\"))\n\n from bokeh.core.enums import NamedColor\n prop = Enum(NamedColor)\n\n self.assertTrue(prop.is_valid(\"red\"))\n self.assertTrue(prop.is_valid(\"Red\"))\n self.assertTrue(prop.is_valid(\"RED\"))\n\n def test_Color(self):\n prop = Color()\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid((0, 127, 255)))\n self.assertFalse(prop.is_valid((0, -127, 255)))\n self.assertFalse(prop.is_valid((0, 127)))\n self.assertFalse(prop.is_valid((0, 127, 1.0)))\n self.assertFalse(prop.is_valid((0, 127, 255, 255)))\n self.assertTrue(prop.is_valid((0, 127, 255, 1.0)))\n\n self.assertTrue(prop.is_valid(\"#00aaff\"))\n self.assertTrue(prop.is_valid(\"#00AAFF\"))\n self.assertTrue(prop.is_valid(\"#00AaFf\"))\n self.assertFalse(prop.is_valid(\"00aaff\"))\n self.assertFalse(prop.is_valid(\"00AAFF\"))\n self.assertFalse(prop.is_valid(\"00AaFf\"))\n self.assertFalse(prop.is_valid(\"#00AaFg\"))\n self.assertFalse(prop.is_valid(\"#00AaFff\"))\n\n self.assertTrue(prop.is_valid(\"blue\"))\n self.assertTrue(prop.is_valid(\"BLUE\"))\n self.assertFalse(prop.is_valid(\"foobar\"))\n\n self.assertEqual(prop.transform((0, 127, 255)), \"rgb(0, 127, 255)\")\n self.assertEqual(prop.transform((0, 127, 255, 0.1)), \"rgba(0, 127, 255, 0.1)\")\n\n def test_DashPattern(self):\n prop = DashPattern()\n\n self.assertTrue(prop.is_valid(None))\n self.assertFalse(prop.is_valid(False))\n self.assertFalse(prop.is_valid(True))\n self.assertFalse(prop.is_valid(0))\n self.assertFalse(prop.is_valid(1))\n self.assertFalse(prop.is_valid(0.0))\n self.assertFalse(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertTrue(prop.is_valid(\"\"))\n self.assertTrue(prop.is_valid(()))\n self.assertTrue(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(\"solid\"))\n self.assertTrue(prop.is_valid(\"dashed\"))\n self.assertTrue(prop.is_valid(\"dotted\"))\n self.assertTrue(prop.is_valid(\"dotdash\"))\n self.assertTrue(prop.is_valid(\"dashdot\"))\n self.assertFalse(prop.is_valid(\"DASHDOT\"))\n\n self.assertTrue(prop.is_valid([1, 2, 3]))\n self.assertFalse(prop.is_valid([1, 2, 3.0]))\n\n self.assertTrue(prop.is_valid(\"1 2 3\"))\n self.assertFalse(prop.is_valid(\"1 2 x\"))\n\n def test_Size(self):\n prop = Size()\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(100))\n self.assertTrue(prop.is_valid(100.1))\n self.assertFalse(prop.is_valid(-100))\n self.assertFalse(prop.is_valid(-0.001))\n\n def test_Percent(self):\n prop = Percent()\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n self.assertTrue(prop.is_valid(0.5))\n self.assertFalse(prop.is_valid(-0.001))\n self.assertFalse(prop.is_valid( 1.001))\n\n def test_Angle(self):\n prop = Angle()\n\n self.assertTrue(prop.is_valid(None))\n # TODO: self.assertFalse(prop.is_valid(False))\n # TODO: self.assertFalse(prop.is_valid(True))\n self.assertTrue(prop.is_valid(0))\n self.assertTrue(prop.is_valid(1))\n self.assertTrue(prop.is_valid(0.0))\n self.assertTrue(prop.is_valid(1.0))\n self.assertFalse(prop.is_valid(1.0+1.0j))\n self.assertFalse(prop.is_valid(\"\"))\n self.assertFalse(prop.is_valid(()))\n self.assertFalse(prop.is_valid([]))\n self.assertFalse(prop.is_valid({}))\n self.assertFalse(prop.is_valid(Foo()))\n\n def test_MinMaxBounds_with_no_datetime(self):\n prop = MinMaxBounds(accept_datetime=False)\n\n # Valid values\n self.assertTrue(prop.is_valid('auto'))\n self.assertTrue(prop.is_valid(None))\n self.assertTrue(prop.is_valid((12, 13)))\n self.assertTrue(prop.is_valid((-32, -13)))\n self.assertTrue(prop.is_valid((12.1, 13.1)))\n self.assertTrue(prop.is_valid((None, 13.1)))\n self.assertTrue(prop.is_valid((-22, None)))\n\n # Invalid values\n self.assertFalse(prop.is_valid('string'))\n self.assertFalse(prop.is_valid(12))\n self.assertFalse(prop.is_valid(('a', 'b')))\n self.assertFalse(prop.is_valid((13, 12)))\n self.assertFalse(prop.is_valid((13.1, 12.2)))\n self.assertFalse(prop.is_valid((datetime.date(2012, 10, 1), datetime.date(2012, 12, 2))))\n\n def test_MinMaxBounds_with_datetime(self):\n prop = MinMaxBounds(accept_datetime=True)\n\n # Valid values\n self.assertTrue(prop.is_valid((datetime.date(2012, 10, 1), datetime.date(2012, 12, 2))))\n\n # Invalid values\n self.assertFalse(prop.is_valid((datetime.date(2012, 10, 1), 22)))\n\ndef test_HasProps_equals():\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n class FooUnrelated(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n v = Foo().equals(Foo())\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=1))\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=2))\n assert v is False\n\n v = Foo(x=1).equals(1)\n assert v is False\n\n v = Foo().equals(FooUnrelated())\n assert v is False\n\ndef test_HasProps_clone():\n p1 = Plot(plot_width=1000)\n c1 = p1.properties_with_values(include_defaults=False)\n p2 = p1._clone()\n c2 = p2.properties_with_values(include_defaults=False)\n assert c1 == c2\n\ndef test_HasProps_pretty():\n class Foo1(HasProps):\n a = Int(12)\n b = String(\"hello\")\n\n assert Foo1().pretty() == \"bokeh.core.tests.test_properties.Foo1(a=12, b='hello')\"\n\n class Foo2(HasProps):\n a = Int(12)\n b = String(\"hello\")\n c = List(Int, [1, 2, 3])\n\n assert Foo2().pretty() == \"bokeh.core.tests.test_properties.Foo2(a=12, b='hello', c=[1, 2, 3])\"\n\n class Foo3(HasProps):\n a = Int(12)\n b = String(\"hello\")\n c = List(Int, [1, 2, 3])\n d = Float(None)\n\n assert Foo3().pretty() == \"bokeh.core.tests.test_properties.Foo3(a=12, b='hello', c=[1, 2, 3], d=None)\"\n\n class Foo4(HasProps):\n a = Int(12)\n b = String(\"hello\")\n c = List(Int, [1, 2, 3])\n d = Float(None)\n e = Instance(Foo1, lambda: Foo1())\n\n assert Foo4().pretty() == \"\"\"\\\nbokeh.core.tests.test_properties.Foo4(\n a=12,\n b='hello',\n c=[1, 2, 3],\n d=None,\n e=bokeh.core.tests.test_properties.Foo1(a=12, b='hello'))\"\"\"\n\n class Foo5(HasProps):\n foo6 = Any # can't use Instance(\".core.tests.test_properties.Foo6\")\n\n class Foo6(HasProps):\n foo5 = Instance(Foo5)\n\n f5 = Foo5()\n f6 = Foo6(foo5=f5)\n f5.foo6 = f6\n\n assert f5.pretty() == \"\"\"\\\nbokeh.core.tests.test_properties.Foo5(\n foo6=bokeh.core.tests.test_properties.Foo6(\n foo5=bokeh.core.tests.test_properties.Foo5(...)))\"\"\"\n\ndef test_field_function():\n assert field(\"foo\") == dict(field=\"foo\")\n # TODO (bev) would like this to work I think\n #assert field(\"foo\", transform=\"junk\") == dict(field=\"foo\", transform=\"junk\")\n\ndef test_value_function():\n assert value(\"foo\") == dict(value=\"foo\")\n # TODO (bev) would like this to work I think\n #assert value(\"foo\", transform=\"junk\") == dict(value=\"foo\", transform=\"junk\")\n"
] | [
[
"pandas.melt"
],
[
"numpy.linspace"
],
[
"numpy.timedelta64",
"numpy.datetime64"
],
[
"numpy.random.randn",
"pandas.date_range"
],
[
"numpy.arange",
"numpy.sin"
],
[
"numpy.complex128",
"numpy.uint32",
"numpy.uint8",
"numpy.float16",
"numpy.int32",
"numpy.int8",
"pandas.DataFrame",
"numpy.int16",
"numpy.int64",
"numpy.uint16",
"numpy.uint64",
"numpy.bool8",
"numpy.float32",
"numpy.float64",
"numpy.complex256",
"numpy.array",
"numpy.complex64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
itamblyn/pytorch_geometric | [
"67ed16492863378b8434b03713a75924f0cc5df1",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"67ed16492863378b8434b03713a75924f0cc5df1",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"67ed16492863378b8434b03713a75924f0cc5df1",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3"
] | [
"torch_geometric/nn/conv/han_conv.py",
"torch_geometric/transforms/one_hot_degree.py",
"torch_geometric/nn/conv/cg_conv.py",
"torch_geometric/loader/neighbor_sampler.py",
"examples/proteins_diff_pool.py",
"benchmark/citation/appnp.py",
"benchmark/kernel/sag_pool.py",
"torch_geometric/nn/models/schnet.py",
"test/nn/conv/test_han_conv.py",
"graphgym/custom_graphgym/train/example.py",
"benchmark/kernel/gcn.py",
"examples/graph_saint.py"
] | [
"from typing import Union, Dict, Optional, List\r\n\r\nimport torch\r\nfrom torch import Tensor, nn\r\nimport torch.nn.functional as F\r\n\r\nfrom torch_geometric.typing import NodeType, EdgeType, Metadata, Adj\r\nfrom torch_geometric.nn.dense import Linear\r\nfrom torch_geometric.utils import softmax\r\nfrom torch_geometric.nn.conv import MessagePassing\r\nfrom torch_geometric.nn.inits import glorot, reset\r\n\r\n\r\ndef group(xs: List[Tensor], q: nn.Parameter,\r\n k_lin: nn.Module) -> Optional[Tensor]:\r\n if len(xs) == 0:\r\n return None\r\n else:\r\n num_edge_types = len(xs)\r\n out = torch.stack(xs)\r\n attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1)\r\n attn = F.softmax(attn_score, dim=0)\r\n out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0)\r\n return out\r\n\r\n\r\nclass HANConv(MessagePassing):\r\n r\"\"\"\r\n The Heterogenous Graph Attention Operator from the\r\n `\"Heterogenous Graph Attention Network\"\r\n <https://arxiv.org/pdf/1903.07293.pdf>`_ paper.\r\n\r\n .. note::\r\n\r\n For an example of using HANConv, see `examples/hetero/han_imdb.py\r\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\r\n hetero/han_imdb.py>`_.\r\n\r\n Args:\r\n in_channels (int or Dict[str, int]): Size of each input sample of every\r\n node type, or :obj:`-1` to derive the size from the first input(s)\r\n to the forward method.\r\n out_channels (int): Size of each output sample.\r\n metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata\r\n of the heterogeneous graph, *i.e.* its node and edge types given\r\n by a list of strings and a list of string triplets, respectively.\r\n See :meth:`torch_geometric.data.HeteroData.metadata` for more\r\n information.\r\n heads (int, optional): Number of multi-head-attentions.\r\n (default: :obj:`1`)\r\n negative_slope (float, optional): LeakyReLU angle of the negative\r\n slope. (default: :obj:`0.2`)\r\n dropout (float, optional): Dropout probability of the normalized\r\n attention coefficients which exposes each node to a stochastically\r\n sampled neighborhood during training. (default: :obj:`0`)\r\n **kwargs (optional): Additional arguments of\r\n :class:`torch_geometric.nn.conv.MessagePassing`.\r\n \"\"\"\r\n def __init__(\r\n self,\r\n in_channels: Union[int, Dict[str, int]],\r\n out_channels: int,\r\n metadata: Metadata,\r\n heads: int = 1,\r\n negative_slope=0.2,\r\n dropout: float = 0.0,\r\n **kwargs,\r\n ):\r\n super().__init__(aggr='add', node_dim=0, **kwargs)\r\n\r\n if not isinstance(in_channels, dict):\r\n in_channels = {node_type: in_channels for node_type in metadata[0]}\r\n\r\n self.heads = heads\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.negative_slope = negative_slope\r\n self.metadata = metadata\r\n self.dropout = dropout\r\n self.k_lin = nn.Linear(out_channels, out_channels)\r\n self.q = nn.Parameter(torch.Tensor(1, out_channels))\r\n\r\n self.proj = nn.ModuleDict()\r\n for node_type, in_channels in self.in_channels.items():\r\n self.proj[node_type] = Linear(in_channels, out_channels)\r\n\r\n self.lin_src = nn.ParameterDict()\r\n self.lin_dst = nn.ParameterDict()\r\n dim = out_channels // heads\r\n for edge_type in metadata[1]:\r\n edge_type = '__'.join(edge_type)\r\n self.lin_src[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))\r\n self.lin_dst[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n reset(self.proj)\r\n glorot(self.lin_src)\r\n glorot(self.lin_dst)\r\n self.k_lin.reset_parameters()\r\n glorot(self.q)\r\n\r\n def forward(\r\n self, x_dict: Dict[NodeType, Tensor],\r\n edge_index_dict: Dict[EdgeType,\r\n Adj]) -> Dict[NodeType, Optional[Tensor]]:\r\n r\"\"\"\r\n Args:\r\n x_dict (Dict[str, Tensor]): A dictionary holding input node\r\n features for each individual node type.\r\n edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A\r\n dictionary holding graph connectivity information for each\r\n individual edge type, either as a :obj:`torch.LongTensor` of\r\n shape :obj:`[2, num_edges]` or a\r\n :obj:`torch_sparse.SparseTensor`.\r\n\r\n :rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings\r\n for each node type.\r\n In case a node type does not receive any message, its output will\r\n be set to :obj:`None`.\r\n \"\"\"\r\n H, D = self.heads, self.out_channels // self.heads\r\n x_node_dict, out_dict = {}, {}\r\n\r\n # Iterate over node types:\r\n for node_type, x_node in x_dict.items():\r\n x_node_dict[node_type] = self.proj[node_type](x_node).view(\r\n -1, H, D)\r\n out_dict[node_type] = []\r\n\r\n # Iterate over edge types:\r\n for edge_type, edge_index in edge_index_dict.items():\r\n src_type, _, dst_type = edge_type\r\n edge_type = '__'.join(edge_type)\r\n lin_src = self.lin_src[edge_type]\r\n lin_dst = self.lin_dst[edge_type]\r\n x_dst = x_node_dict[dst_type]\r\n alpha_src = (x_node_dict[src_type] * lin_src).sum(dim=-1)\r\n alpha_dst = (x_dst * lin_dst).sum(dim=-1)\r\n alpha = (alpha_src, alpha_dst)\r\n # propagate_type: (x_dst: Tensor, alpha: PairTensor)\r\n out = self.propagate(edge_index, x_dst=x_dst, alpha=alpha,\r\n size=None)\r\n\r\n out = F.relu(out)\r\n out_dict[dst_type].append(out)\r\n\r\n # iterate over node types:\r\n for node_type, outs in out_dict.items():\r\n out = group(outs, self.q, self.k_lin)\r\n\r\n if out is None:\r\n out_dict[node_type] = None\r\n continue\r\n out_dict[node_type] = out\r\n\r\n return out_dict\r\n\r\n def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor,\r\n index: Tensor, ptr: Optional[Tensor],\r\n size_i: Optional[int]) -> Tensor:\r\n\r\n alpha = alpha_j + alpha_i\r\n alpha = F.leaky_relu(alpha, self.negative_slope)\r\n alpha = softmax(alpha, index, ptr, size_i)\r\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\r\n out = x_dst_i * alpha.view(-1, self.heads, 1)\r\n return out.view(-1, self.out_channels)\r\n\r\n def __repr__(self) -> str:\r\n return (f'{self.__class__.__name__}({self.out_channels}, '\r\n f'heads={self.heads})')\r\n",
"import torch\nimport torch.nn.functional as F\n\nfrom torch_geometric.utils import degree\nfrom torch_geometric.transforms import BaseTransform\n\n\nclass OneHotDegree(BaseTransform):\n r\"\"\"Adds the node degree as one hot encodings to the node features.\n\n Args:\n max_degree (int): Maximum degree.\n in_degree (bool, optional): If set to :obj:`True`, will compute the\n in-degree of nodes instead of the out-degree.\n (default: :obj:`False`)\n cat (bool, optional): Concat node degrees to node features instead\n of replacing them. (default: :obj:`True`)\n \"\"\"\n def __init__(self, max_degree, in_degree=False, cat=True):\n self.max_degree = max_degree\n self.in_degree = in_degree\n self.cat = cat\n\n def __call__(self, data):\n idx, x = data.edge_index[1 if self.in_degree else 0], data.x\n deg = degree(idx, data.num_nodes, dtype=torch.long)\n deg = F.one_hot(deg, num_classes=self.max_degree + 1).to(torch.float)\n\n if x is not None and self.cat:\n x = x.view(-1, 1) if x.dim() == 1 else x\n data.x = torch.cat([x, deg.to(x.dtype)], dim=-1)\n else:\n data.x = deg\n\n return data\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}({self.max_degree})'\n",
"from typing import Union, Tuple\nfrom torch_geometric.typing import PairTensor, Adj, OptTensor\n\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom torch.nn import Linear, BatchNorm1d\nfrom torch_geometric.nn.conv import MessagePassing\n\n\nclass CGConv(MessagePassing):\n r\"\"\"The crystal graph convolutional operator from the\n `\"Crystal Graph Convolutional Neural Networks for an\n Accurate and Interpretable Prediction of Material Properties\"\n <https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301>`_\n paper\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\mathbf{x}_i + \\sum_{j \\in \\mathcal{N}(i)}\n \\sigma \\left( \\mathbf{z}_{i,j} \\mathbf{W}_f + \\mathbf{b}_f \\right)\n \\odot g \\left( \\mathbf{z}_{i,j} \\mathbf{W}_s + \\mathbf{b}_s \\right)\n\n where :math:`\\mathbf{z}_{i,j} = [ \\mathbf{x}_i, \\mathbf{x}_j,\n \\mathbf{e}_{i,j} ]` denotes the concatenation of central node features,\n neighboring node features and edge features.\n In addition, :math:`\\sigma` and :math:`g` denote the sigmoid and softplus\n functions, respectively.\n\n Args:\n channels (int or tuple): Size of each input sample. A tuple\n corresponds to the sizes of source and target dimensionalities.\n dim (int, optional): Edge feature dimensionality. (default: :obj:`0`)\n aggr (string, optional): The aggregation operator to use\n (:obj:`\"add\"`, :obj:`\"mean\"`, :obj:`\"max\"`).\n (default: :obj:`\"add\"`)\n batch_norm (bool, optional): If set to :obj:`True`, will make use of\n batch normalization. (default: :obj:`False`)\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n\n Shapes:\n - **input:**\n node features :math:`(|\\mathcal{V}|, F)` or\n :math:`((|\\mathcal{V_s}|, F_{s}), (|\\mathcal{V_t}|, F_{t}))`\n if bipartite,\n edge indices :math:`(2, |\\mathcal{E}|)`,\n edge features :math:`(|\\mathcal{E}|, D)` *(optional)*\n - **output:** node features :math:`(|\\mathcal{V}|, F)` or\n :math:`(|\\mathcal{V_t}|, F_{t})` if bipartite\n \"\"\"\n def __init__(self, channels: Union[int, Tuple[int, int]], dim: int = 0,\n aggr: str = 'add', batch_norm: bool = False,\n bias: bool = True, **kwargs):\n super().__init__(aggr=aggr, **kwargs)\n self.channels = channels\n self.dim = dim\n self.batch_norm = batch_norm\n\n if isinstance(channels, int):\n channels = (channels, channels)\n\n self.lin_f = Linear(sum(channels) + dim, channels[1], bias=bias)\n self.lin_s = Linear(sum(channels) + dim, channels[1], bias=bias)\n if batch_norm:\n self.bn = BatchNorm1d(channels[1])\n else:\n self.bn = None\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lin_f.reset_parameters()\n self.lin_s.reset_parameters()\n if self.bn is not None:\n self.bn.reset_parameters()\n\n def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj,\n edge_attr: OptTensor = None) -> Tensor:\n \"\"\"\"\"\"\n if isinstance(x, Tensor):\n x: PairTensor = (x, x)\n\n # propagate_type: (x: PairTensor, edge_attr: OptTensor)\n out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=None)\n out = out if self.bn is None else self.bn(out)\n out += x[1]\n return out\n\n def message(self, x_i, x_j, edge_attr: OptTensor) -> Tensor:\n if edge_attr is None:\n z = torch.cat([x_i, x_j], dim=-1)\n else:\n z = torch.cat([x_i, x_j, edge_attr], dim=-1)\n return self.lin_f(z).sigmoid() * F.softplus(self.lin_s(z))\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}({self.channels}, dim={self.dim})'\n",
"from typing import List, Optional, Tuple, NamedTuple, Union, Callable\n\nimport torch\nfrom torch import Tensor\nfrom torch_sparse import SparseTensor\n\n\nclass EdgeIndex(NamedTuple):\n edge_index: Tensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n edge_index = self.edge_index.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return EdgeIndex(edge_index, e_id, self.size)\n\n\nclass Adj(NamedTuple):\n adj_t: SparseTensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n adj_t = self.adj_t.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return Adj(adj_t, e_id, self.size)\n\n\nclass NeighborSampler(torch.utils.data.DataLoader):\n r\"\"\"The neighbor sampler from the `\"Inductive Representation Learning on\n Large Graphs\" <https://arxiv.org/abs/1706.02216>`_ paper, which allows\n for mini-batch training of GNNs on large-scale graphs where full-batch\n training is not feasible.\n\n Given a GNN with :math:`L` layers and a specific mini-batch of nodes\n :obj:`node_idx` for which we want to compute embeddings, this module\n iteratively samples neighbors and constructs bipartite graphs that simulate\n the actual computation flow of GNNs.\n\n More specifically, :obj:`sizes` denotes how much neighbors we want to\n sample for each node in each layer.\n This module then takes in these :obj:`sizes` and iteratively samples\n :obj:`sizes[l]` for each node involved in layer :obj:`l`.\n In the next layer, sampling is repeated for the union of nodes that were\n already encountered.\n The actual computation graphs are then returned in reverse-mode, meaning\n that we pass messages from a larger set of nodes to a smaller one, until we\n reach the nodes for which we originally wanted to compute embeddings.\n\n Hence, an item returned by :class:`NeighborSampler` holds the current\n :obj:`batch_size`, the IDs :obj:`n_id` of all nodes involved in the\n computation, and a list of bipartite graph objects via the tuple\n :obj:`(edge_index, e_id, size)`, where :obj:`edge_index` represents the\n bipartite edges between source and target nodes, :obj:`e_id` denotes the\n IDs of original edges in the full graph, and :obj:`size` holds the shape\n of the bipartite graph.\n For each bipartite graph, target nodes are also included at the beginning\n of the list of source nodes so that one can easily apply skip-connections\n or add self-loops.\n\n .. warning::\n\n :class:`~torch_geometric.loader.NeighborSampler` is deprecated and will\n be removed in a future release.\n Use :class:`torch_geometric.loader.NeighborLoader` instead.\n\n .. note::\n\n For an example of using :obj:`NeighborSampler`, see\n `examples/reddit.py\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\n reddit.py>`_ or\n `examples/ogbn_products_sage.py\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\n ogbn_products_sage.py>`_.\n\n Args:\n edge_index (Tensor or SparseTensor): A :obj:`torch.LongTensor` or a\n :obj:`torch_sparse.SparseTensor` that defines the underlying graph\n connectivity/message passing flow.\n :obj:`edge_index` holds the indices of a (sparse) symmetric\n adjacency matrix.\n If :obj:`edge_index` is of type :obj:`torch.LongTensor`, its shape\n must be defined as :obj:`[2, num_edges]`, where messages from nodes\n :obj:`edge_index[0]` are sent to nodes in :obj:`edge_index[1]`\n (in case :obj:`flow=\"source_to_target\"`).\n If :obj:`edge_index` is of type :obj:`torch_sparse.SparseTensor`,\n its sparse indices :obj:`(row, col)` should relate to\n :obj:`row = edge_index[1]` and :obj:`col = edge_index[0]`.\n The major difference between both formats is that we need to input\n the *transposed* sparse adjacency matrix.\n sizes ([int]): The number of neighbors to sample for each node in each\n layer. If set to :obj:`sizes[l] = -1`, all neighbors are included\n in layer :obj:`l`.\n node_idx (LongTensor, optional): The nodes that should be considered\n for creating mini-batches. If set to :obj:`None`, all nodes will be\n considered.\n num_nodes (int, optional): The number of nodes in the graph.\n (default: :obj:`None`)\n return_e_id (bool, optional): If set to :obj:`False`, will not return\n original edge indices of sampled edges. This is only useful in case\n when operating on graphs without edge features to save memory.\n (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in\n a sampled mini-batch and returns a transformed version.\n (default: :obj:`None`)\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`,\n :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`.\n \"\"\"\n def __init__(self, edge_index: Union[Tensor, SparseTensor],\n sizes: List[int], node_idx: Optional[Tensor] = None,\n num_nodes: Optional[int] = None, return_e_id: bool = True,\n transform: Callable = None, **kwargs):\n\n edge_index = edge_index.to('cpu')\n\n if 'collate_fn' in kwargs:\n del kwargs['collate_fn']\n if 'dataset' in kwargs:\n del kwargs['dataset']\n\n # Save for Pytorch Lightning...\n self.edge_index = edge_index\n self.node_idx = node_idx\n self.num_nodes = num_nodes\n\n self.sizes = sizes\n self.return_e_id = return_e_id\n self.transform = transform\n self.is_sparse_tensor = isinstance(edge_index, SparseTensor)\n self.__val__ = None\n\n # Obtain a *transposed* `SparseTensor` instance.\n if not self.is_sparse_tensor:\n if (num_nodes is None and node_idx is not None\n and node_idx.dtype == torch.bool):\n num_nodes = node_idx.size(0)\n if (num_nodes is None and node_idx is not None\n and node_idx.dtype == torch.long):\n num_nodes = max(int(edge_index.max()), int(node_idx.max())) + 1\n if num_nodes is None:\n num_nodes = int(edge_index.max()) + 1\n\n value = torch.arange(edge_index.size(1)) if return_e_id else None\n self.adj_t = SparseTensor(row=edge_index[0], col=edge_index[1],\n value=value,\n sparse_sizes=(num_nodes, num_nodes)).t()\n else:\n adj_t = edge_index\n if return_e_id:\n self.__val__ = adj_t.storage.value()\n value = torch.arange(adj_t.nnz())\n adj_t = adj_t.set_value(value, layout='coo')\n self.adj_t = adj_t\n\n self.adj_t.storage.rowptr()\n\n if node_idx is None:\n node_idx = torch.arange(self.adj_t.sparse_size(0))\n elif node_idx.dtype == torch.bool:\n node_idx = node_idx.nonzero(as_tuple=False).view(-1)\n\n super().__init__(\n node_idx.view(-1).tolist(), collate_fn=self.sample, **kwargs)\n\n def sample(self, batch):\n if not isinstance(batch, Tensor):\n batch = torch.tensor(batch)\n\n batch_size: int = len(batch)\n\n adjs = []\n n_id = batch\n for size in self.sizes:\n adj_t, n_id = self.adj_t.sample_adj(n_id, size, replace=False)\n e_id = adj_t.storage.value()\n size = adj_t.sparse_sizes()[::-1]\n if self.__val__ is not None:\n adj_t.set_value_(self.__val__[e_id], layout='coo')\n\n if self.is_sparse_tensor:\n adjs.append(Adj(adj_t, e_id, size))\n else:\n row, col, _ = adj_t.coo()\n edge_index = torch.stack([col, row], dim=0)\n adjs.append(EdgeIndex(edge_index, e_id, size))\n\n adjs = adjs[0] if len(adjs) == 1 else adjs[::-1]\n out = (batch_size, n_id, adjs)\n out = self.transform(*out) if self.transform is not None else out\n return out\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(sizes={self.sizes})'\n",
"import os.path as osp\nfrom math import ceil\n\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.datasets import TUDataset\nimport torch_geometric.transforms as T\nfrom torch_geometric.loader import DenseDataLoader\nfrom torch_geometric.nn import DenseSAGEConv, dense_diff_pool\n\nmax_nodes = 150\n\n\nclass MyFilter(object):\n def __call__(self, data):\n return data.num_nodes <= max_nodes\n\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data',\n 'PROTEINS_dense')\ndataset = TUDataset(path, name='PROTEINS', transform=T.ToDense(max_nodes),\n pre_filter=MyFilter())\ndataset = dataset.shuffle()\nn = (len(dataset) + 9) // 10\ntest_dataset = dataset[:n]\nval_dataset = dataset[n:2 * n]\ntrain_dataset = dataset[2 * n:]\ntest_loader = DenseDataLoader(test_dataset, batch_size=20)\nval_loader = DenseDataLoader(val_dataset, batch_size=20)\ntrain_loader = DenseDataLoader(train_dataset, batch_size=20)\n\n\nclass GNN(torch.nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels,\n normalize=False, lin=True):\n super().__init__()\n\n self.conv1 = DenseSAGEConv(in_channels, hidden_channels, normalize)\n self.bn1 = torch.nn.BatchNorm1d(hidden_channels)\n self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, normalize)\n self.bn2 = torch.nn.BatchNorm1d(hidden_channels)\n self.conv3 = DenseSAGEConv(hidden_channels, out_channels, normalize)\n self.bn3 = torch.nn.BatchNorm1d(out_channels)\n\n if lin is True:\n self.lin = torch.nn.Linear(2 * hidden_channels + out_channels,\n out_channels)\n else:\n self.lin = None\n\n def bn(self, i, x):\n batch_size, num_nodes, num_channels = x.size()\n\n x = x.view(-1, num_channels)\n x = getattr(self, f'bn{i}')(x)\n x = x.view(batch_size, num_nodes, num_channels)\n return x\n\n def forward(self, x, adj, mask=None):\n batch_size, num_nodes, in_channels = x.size()\n\n x0 = x\n x1 = self.bn(1, F.relu(self.conv1(x0, adj, mask)))\n x2 = self.bn(2, F.relu(self.conv2(x1, adj, mask)))\n x3 = self.bn(3, F.relu(self.conv3(x2, adj, mask)))\n\n x = torch.cat([x1, x2, x3], dim=-1)\n\n if self.lin is not None:\n x = F.relu(self.lin(x))\n\n return x\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n num_nodes = ceil(0.25 * max_nodes)\n self.gnn1_pool = GNN(dataset.num_features, 64, num_nodes)\n self.gnn1_embed = GNN(dataset.num_features, 64, 64, lin=False)\n\n num_nodes = ceil(0.25 * num_nodes)\n self.gnn2_pool = GNN(3 * 64, 64, num_nodes)\n self.gnn2_embed = GNN(3 * 64, 64, 64, lin=False)\n\n self.gnn3_embed = GNN(3 * 64, 64, 64, lin=False)\n\n self.lin1 = torch.nn.Linear(3 * 64, 64)\n self.lin2 = torch.nn.Linear(64, dataset.num_classes)\n\n def forward(self, x, adj, mask=None):\n s = self.gnn1_pool(x, adj, mask)\n x = self.gnn1_embed(x, adj, mask)\n\n x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)\n\n s = self.gnn2_pool(x, adj)\n x = self.gnn2_embed(x, adj)\n\n x, adj, l2, e2 = dense_diff_pool(x, adj, s)\n\n x = self.gnn3_embed(x, adj)\n\n x = x.mean(dim=1)\n x = F.relu(self.lin1(x))\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n\ndef train(epoch):\n model.train()\n loss_all = 0\n\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n output, _, _ = model(data.x, data.adj, data.mask)\n loss = F.nll_loss(output, data.y.view(-1))\n loss.backward()\n loss_all += data.y.size(0) * loss.item()\n optimizer.step()\n return loss_all / len(train_dataset)\n\n\[email protected]_grad()\ndef test(loader):\n model.eval()\n correct = 0\n\n for data in loader:\n data = data.to(device)\n pred = model(data.x, data.adj, data.mask)[0].max(dim=1)[1]\n correct += pred.eq(data.y.view(-1)).sum().item()\n return correct / len(loader.dataset)\n\n\nbest_val_acc = test_acc = 0\nfor epoch in range(1, 151):\n train_loss = train(epoch)\n val_acc = test(val_loader)\n if val_acc > best_val_acc:\n test_acc = test(test_loader)\n best_val_acc = val_acc\n print(f'Epoch: {epoch:03d}, Train Loss: {train_loss:.4f}, '\n f'Val Acc: {val_acc:.4f}, Test Acc: {test_acc:.4f}')\n",
"import argparse\nimport torch\nfrom torch.nn import Linear\nimport torch.nn.functional as F\nfrom torch_geometric.nn import APPNP\n\nfrom citation import get_planetoid_dataset, random_planetoid_splits, run\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, required=True)\nparser.add_argument('--random_splits', type=bool, default=False)\nparser.add_argument('--runs', type=int, default=100)\nparser.add_argument('--epochs', type=int, default=200)\nparser.add_argument('--lr', type=float, default=0.01)\nparser.add_argument('--weight_decay', type=float, default=0.0005)\nparser.add_argument('--early_stopping', type=int, default=10)\nparser.add_argument('--hidden', type=int, default=64)\nparser.add_argument('--dropout', type=float, default=0.5)\nparser.add_argument('--normalize_features', type=bool, default=True)\nparser.add_argument('--K', type=int, default=10)\nparser.add_argument('--alpha', type=float, default=0.1)\nargs = parser.parse_args()\n\n\nclass Net(torch.nn.Module):\n def __init__(self, dataset):\n super().__init__()\n self.lin1 = Linear(dataset.num_features, args.hidden)\n self.lin2 = Linear(args.hidden, dataset.num_classes)\n self.prop1 = APPNP(args.K, args.alpha)\n\n def reset_parameters(self):\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x = F.dropout(x, p=args.dropout, training=self.training)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=args.dropout, training=self.training)\n x = self.lin2(x)\n x = self.prop1(x, edge_index)\n return F.log_softmax(x, dim=1)\n\n\ndataset = get_planetoid_dataset(args.dataset, args.normalize_features)\npermute_masks = random_planetoid_splits if args.random_splits else None\nrun(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,\n args.early_stopping, permute_masks)\n",
"import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_geometric.nn import (GraphConv, SAGPooling, global_mean_pool,\n JumpingKnowledge)\n\n\nclass SAGPool(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden, ratio=0.8):\n super().__init__()\n self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean')\n self.convs = torch.nn.ModuleList()\n self.pools = torch.nn.ModuleList()\n self.convs.extend([\n GraphConv(hidden, hidden, aggr='mean')\n for i in range(num_layers - 1)\n ])\n self.pools.extend(\n [SAGPooling(hidden, ratio) for i in range((num_layers) // 2)])\n self.jump = JumpingKnowledge(mode='cat')\n self.lin1 = Linear(num_layers * hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n for pool in self.pools:\n pool.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n xs = [global_mean_pool(x, batch)]\n for i, conv in enumerate(self.convs):\n x = F.relu(conv(x, edge_index))\n xs += [global_mean_pool(x, batch)]\n if i % 2 == 0 and i < len(self.convs) - 1:\n pool = self.pools[i // 2]\n x, edge_index, _, batch, _, _ = pool(x, edge_index,\n batch=batch)\n x = self.jump(xs)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n",
"from typing import Optional\n\nimport os\nimport warnings\nimport os.path as osp\nfrom math import pi as PI\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Embedding, Sequential, Linear, ModuleList\nimport numpy as np\n\nfrom torch_scatter import scatter\nfrom torch_geometric.data.makedirs import makedirs\nfrom torch_geometric.data import download_url, extract_zip, Dataset\nfrom torch_geometric.nn import radius_graph, MessagePassing\n\nqm9_target_dict = {\n 0: 'dipole_moment',\n 1: 'isotropic_polarizability',\n 2: 'homo',\n 3: 'lumo',\n 4: 'gap',\n 5: 'electronic_spatial_extent',\n 6: 'zpve',\n 7: 'energy_U0',\n 8: 'energy_U',\n 9: 'enthalpy_H',\n 10: 'free_energy',\n 11: 'heat_capacity',\n}\n\n\nclass SchNet(torch.nn.Module):\n r\"\"\"The continuous-filter convolutional neural network SchNet from the\n `\"SchNet: A Continuous-filter Convolutional Neural Network for Modeling\n Quantum Interactions\" <https://arxiv.org/abs/1706.08566>`_ paper that uses\n the interactions blocks of the form\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\sum_{j \\in \\mathcal{N}(i)} \\mathbf{x}_j \\odot\n h_{\\mathbf{\\Theta}} ( \\exp(-\\gamma(\\mathbf{e}_{j,i} - \\mathbf{\\mu}))),\n\n here :math:`h_{\\mathbf{\\Theta}}` denotes an MLP and\n :math:`\\mathbf{e}_{j,i}` denotes the interatomic distances between atoms.\n\n .. note::\n\n For an example of using a pretrained SchNet variant, see\n `examples/qm9_pretrained_schnet.py\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\n qm9_pretrained_schnet.py>`_.\n\n Args:\n hidden_channels (int, optional): Hidden embedding size.\n (default: :obj:`128`)\n num_filters (int, optional): The number of filters to use.\n (default: :obj:`128`)\n num_interactions (int, optional): The number of interaction blocks.\n (default: :obj:`6`)\n num_gaussians (int, optional): The number of gaussians :math:`\\mu`.\n (default: :obj:`50`)\n cutoff (float, optional): Cutoff distance for interatomic interactions.\n (default: :obj:`10.0`)\n max_num_neighbors (int, optional): The maximum number of neighbors to\n collect for each node within the :attr:`cutoff` distance.\n (default: :obj:`32`)\n readout (string, optional): Whether to apply :obj:`\"add\"` or\n :obj:`\"mean\"` global aggregation. (default: :obj:`\"add\"`)\n dipole (bool, optional): If set to :obj:`True`, will use the magnitude\n of the dipole moment to make the final prediction, *e.g.*, for\n target 0 of :class:`torch_geometric.datasets.QM9`.\n (default: :obj:`False`)\n mean (float, optional): The mean of the property to predict.\n (default: :obj:`None`)\n std (float, optional): The standard deviation of the property to\n predict. (default: :obj:`None`)\n atomref (torch.Tensor, optional): The reference of single-atom\n properties.\n Expects a vector of shape :obj:`(max_atomic_number, )`.\n \"\"\"\n\n url = 'http://www.quantum-machine.org/datasets/trained_schnet_models.zip'\n\n def __init__(self, hidden_channels: int = 128, num_filters: int = 128,\n num_interactions: int = 6, num_gaussians: int = 50,\n cutoff: float = 10.0, max_num_neighbors: int = 32,\n readout: str = 'add', dipole: bool = False,\n mean: Optional[float] = None, std: Optional[float] = None,\n atomref: Optional[torch.Tensor] = None):\n super().__init__()\n\n import ase\n\n self.hidden_channels = hidden_channels\n self.num_filters = num_filters\n self.num_interactions = num_interactions\n self.num_gaussians = num_gaussians\n self.cutoff = cutoff\n self.max_num_neighbors = max_num_neighbors\n self.readout = readout\n self.dipole = dipole\n self.readout = 'add' if self.dipole else self.readout\n self.mean = mean\n self.std = std\n self.scale = None\n\n atomic_mass = torch.from_numpy(ase.data.atomic_masses)\n self.register_buffer('atomic_mass', atomic_mass)\n\n self.embedding = Embedding(100, hidden_channels)\n self.distance_expansion = GaussianSmearing(0.0, cutoff, num_gaussians)\n\n self.interactions = ModuleList()\n for _ in range(num_interactions):\n block = InteractionBlock(hidden_channels, num_gaussians,\n num_filters, cutoff)\n self.interactions.append(block)\n\n self.lin1 = Linear(hidden_channels, hidden_channels // 2)\n self.act = ShiftedSoftplus()\n self.lin2 = Linear(hidden_channels // 2, 1)\n\n self.register_buffer('initial_atomref', atomref)\n self.atomref = None\n if atomref is not None:\n self.atomref = Embedding(100, 1)\n self.atomref.weight.data.copy_(atomref)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.embedding.reset_parameters()\n for interaction in self.interactions:\n interaction.reset_parameters()\n torch.nn.init.xavier_uniform_(self.lin1.weight)\n self.lin1.bias.data.fill_(0)\n torch.nn.init.xavier_uniform_(self.lin2.weight)\n self.lin2.bias.data.fill_(0)\n if self.atomref is not None:\n self.atomref.weight.data.copy_(self.initial_atomref)\n\n @staticmethod\n def from_qm9_pretrained(root: str, dataset: Dataset, target: int):\n import ase\n import schnetpack as spk # noqa\n\n assert target >= 0 and target <= 12\n\n units = [1] * 12\n units[0] = ase.units.Debye\n units[1] = ase.units.Bohr**3\n units[5] = ase.units.Bohr**2\n\n root = osp.expanduser(osp.normpath(root))\n makedirs(root)\n folder = 'trained_schnet_models'\n if not osp.exists(osp.join(root, folder)):\n path = download_url(SchNet.url, root)\n extract_zip(path, root)\n os.unlink(path)\n\n name = f'qm9_{qm9_target_dict[target]}'\n path = osp.join(root, 'trained_schnet_models', name, 'split.npz')\n\n split = np.load(path)\n train_idx = split['train_idx']\n val_idx = split['val_idx']\n test_idx = split['test_idx']\n\n # Filter the splits to only contain characterized molecules.\n idx = dataset.data.idx\n assoc = idx.new_empty(idx.max().item() + 1)\n assoc[idx] = torch.arange(idx.size(0))\n\n train_idx = assoc[train_idx[np.isin(train_idx, idx)]]\n val_idx = assoc[val_idx[np.isin(val_idx, idx)]]\n test_idx = assoc[test_idx[np.isin(test_idx, idx)]]\n\n path = osp.join(root, 'trained_schnet_models', name, 'best_model')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n state = torch.load(path, map_location='cpu')\n\n net = SchNet(hidden_channels=128, num_filters=128, num_interactions=6,\n num_gaussians=50, cutoff=10.0,\n atomref=dataset.atomref(target))\n\n net.embedding.weight = state.representation.embedding.weight\n\n for int1, int2 in zip(state.representation.interactions,\n net.interactions):\n int2.mlp[0].weight = int1.filter_network[0].weight\n int2.mlp[0].bias = int1.filter_network[0].bias\n int2.mlp[2].weight = int1.filter_network[1].weight\n int2.mlp[2].bias = int1.filter_network[1].bias\n int2.lin.weight = int1.dense.weight\n int2.lin.bias = int1.dense.bias\n\n int2.conv.lin1.weight = int1.cfconv.in2f.weight\n int2.conv.lin2.weight = int1.cfconv.f2out.weight\n int2.conv.lin2.bias = int1.cfconv.f2out.bias\n\n net.lin1.weight = state.output_modules[0].out_net[1].out_net[0].weight\n net.lin1.bias = state.output_modules[0].out_net[1].out_net[0].bias\n net.lin2.weight = state.output_modules[0].out_net[1].out_net[1].weight\n net.lin2.bias = state.output_modules[0].out_net[1].out_net[1].bias\n\n mean = state.output_modules[0].atom_pool.average\n net.readout = 'mean' if mean is True else 'add'\n\n dipole = state.output_modules[0].__class__.__name__ == 'DipoleMoment'\n net.dipole = dipole\n\n net.mean = state.output_modules[0].standardize.mean.item()\n net.std = state.output_modules[0].standardize.stddev.item()\n\n if state.output_modules[0].atomref is not None:\n net.atomref.weight = state.output_modules[0].atomref.weight\n else:\n net.atomref = None\n\n net.scale = 1. / units[target]\n\n return net, (dataset[train_idx], dataset[val_idx], dataset[test_idx])\n\n def forward(self, z, pos, batch=None):\n \"\"\"\"\"\"\n assert z.dim() == 1 and z.dtype == torch.long\n batch = torch.zeros_like(z) if batch is None else batch\n\n h = self.embedding(z)\n\n edge_index = radius_graph(pos, r=self.cutoff, batch=batch,\n max_num_neighbors=self.max_num_neighbors)\n row, col = edge_index\n edge_weight = (pos[row] - pos[col]).norm(dim=-1)\n edge_attr = self.distance_expansion(edge_weight)\n\n for interaction in self.interactions:\n h = h + interaction(h, edge_index, edge_weight, edge_attr)\n\n h = self.lin1(h)\n h = self.act(h)\n h = self.lin2(h)\n\n if self.dipole:\n # Get center of mass.\n mass = self.atomic_mass[z].view(-1, 1)\n c = scatter(mass * pos, batch, dim=0) / scatter(mass, batch, dim=0)\n h = h * (pos - c.index_select(0, batch))\n\n if not self.dipole and self.mean is not None and self.std is not None:\n h = h * self.std + self.mean\n\n if not self.dipole and self.atomref is not None:\n h = h + self.atomref(z)\n\n out = scatter(h, batch, dim=0, reduce=self.readout)\n\n if self.dipole:\n out = torch.norm(out, dim=-1, keepdim=True)\n\n if self.scale is not None:\n out = self.scale * out\n\n return out\n\n def __repr__(self):\n return (f'{self.__class__.__name__}('\n f'hidden_channels={self.hidden_channels}, '\n f'num_filters={self.num_filters}, '\n f'num_interactions={self.num_interactions}, '\n f'num_gaussians={self.num_gaussians}, '\n f'cutoff={self.cutoff})')\n\n\nclass InteractionBlock(torch.nn.Module):\n def __init__(self, hidden_channels, num_gaussians, num_filters, cutoff):\n super().__init__()\n self.mlp = Sequential(\n Linear(num_gaussians, num_filters),\n ShiftedSoftplus(),\n Linear(num_filters, num_filters),\n )\n self.conv = CFConv(hidden_channels, hidden_channels, num_filters,\n self.mlp, cutoff)\n self.act = ShiftedSoftplus()\n self.lin = Linear(hidden_channels, hidden_channels)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n torch.nn.init.xavier_uniform_(self.mlp[0].weight)\n self.mlp[0].bias.data.fill_(0)\n torch.nn.init.xavier_uniform_(self.mlp[2].weight)\n self.mlp[2].bias.data.fill_(0)\n self.conv.reset_parameters()\n torch.nn.init.xavier_uniform_(self.lin.weight)\n self.lin.bias.data.fill_(0)\n\n def forward(self, x, edge_index, edge_weight, edge_attr):\n x = self.conv(x, edge_index, edge_weight, edge_attr)\n x = self.act(x)\n x = self.lin(x)\n return x\n\n\nclass CFConv(MessagePassing):\n def __init__(self, in_channels, out_channels, num_filters, nn, cutoff):\n super().__init__(aggr='add')\n self.lin1 = Linear(in_channels, num_filters, bias=False)\n self.lin2 = Linear(num_filters, out_channels)\n self.nn = nn\n self.cutoff = cutoff\n\n self.reset_parameters()\n\n def reset_parameters(self):\n torch.nn.init.xavier_uniform_(self.lin1.weight)\n torch.nn.init.xavier_uniform_(self.lin2.weight)\n self.lin2.bias.data.fill_(0)\n\n def forward(self, x, edge_index, edge_weight, edge_attr):\n C = 0.5 * (torch.cos(edge_weight * PI / self.cutoff) + 1.0)\n W = self.nn(edge_attr) * C.view(-1, 1)\n\n x = self.lin1(x)\n x = self.propagate(edge_index, x=x, W=W)\n x = self.lin2(x)\n return x\n\n def message(self, x_j, W):\n return x_j * W\n\n\nclass GaussianSmearing(torch.nn.Module):\n def __init__(self, start=0.0, stop=5.0, num_gaussians=50):\n super().__init__()\n offset = torch.linspace(start, stop, num_gaussians)\n self.coeff = -0.5 / (offset[1] - offset[0]).item()**2\n self.register_buffer('offset', offset)\n\n def forward(self, dist):\n dist = dist.view(-1, 1) - self.offset.view(1, -1)\n return torch.exp(self.coeff * torch.pow(dist, 2))\n\n\nclass ShiftedSoftplus(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.shift = torch.log(torch.tensor(2.0)).item()\n\n def forward(self, x):\n return F.softplus(x) - self.shift\n",
"import torch\r\nfrom torch_sparse import SparseTensor\r\nfrom torch_geometric.nn import HANConv\r\n\r\n\r\ndef test_han_conv():\r\n\r\n x_dict = {\r\n 'author': torch.randn(6, 16),\r\n 'paper': torch.randn(5, 12),\r\n 'term': torch.randn(4, 3)\r\n }\r\n edge1 = torch.randint(0, 6, (2, 7), dtype=torch.long)\r\n edge2 = torch.randint(0, 5, (2, 4), dtype=torch.long)\r\n edge3 = torch.randint(0, 3, (2, 5), dtype=torch.long)\r\n edge_index_dict = {\r\n ('author', 'metapath0', 'author'): edge1,\r\n ('paper', 'matapath1', 'paper'): edge2,\r\n ('paper', 'matapath2', 'paper'): edge3,\r\n }\r\n\r\n adj_t_dict = {}\r\n for edge_type, edge_index in edge_index_dict.items():\r\n src_type, _, dst_type = edge_type\r\n adj_t_dict[edge_type] = SparseTensor(\r\n row=edge_index[0], col=edge_index[1],\r\n sparse_sizes=(x_dict[src_type].size(0),\r\n x_dict[dst_type].size(0))).t()\r\n\r\n metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))\r\n in_channels = {'author': 16, 'paper': 12, 'term': 3}\r\n\r\n conv = HANConv(in_channels, 16, metadata, heads=2)\r\n assert str(conv) == 'HANConv(16, heads=2)'\r\n out_dict1 = conv(x_dict, edge_index_dict)\r\n assert len(out_dict1) == 3\r\n assert out_dict1['author'].size() == (6, 16)\r\n assert out_dict1['paper'].size() == (5, 16)\r\n assert out_dict1['term'] is None\r\n del out_dict1['term']\r\n del x_dict['term']\r\n\r\n out_dict2 = conv(x_dict, adj_t_dict)\r\n assert len(out_dict1) == len(out_dict2)\r\n for node_type in out_dict1.keys():\r\n assert torch.allclose(out_dict1[node_type], out_dict2[node_type],\r\n atol=1e-6)\r\n\r\n # non zero dropout\r\n conv = HANConv(in_channels, 16, metadata, heads=2, dropout=0.1)\r\n assert str(conv) == 'HANConv(16, heads=2)'\r\n out_dict1 = conv(x_dict, edge_index_dict)\r\n assert len(out_dict1) == 2\r\n assert out_dict1['author'].size() == (6, 16)\r\n assert out_dict1['paper'].size() == (5, 16)\r\n\r\n\r\ndef test_han_conv_lazy():\r\n\r\n x_dict = {\r\n 'author': torch.randn(6, 16),\r\n 'paper': torch.randn(5, 12),\r\n }\r\n edge1 = torch.randint(0, 6, (2, 8), dtype=torch.long)\r\n edge2 = torch.randint(0, 5, (2, 6), dtype=torch.long)\r\n edge_index_dict = {\r\n ('author', 'metapath0', 'author'): edge1,\r\n ('paper', 'metapath1', 'paper'): edge2,\r\n }\r\n\r\n adj_t_dict = {}\r\n for edge_type, edge_index in edge_index_dict.items():\r\n src_type, _, dst_type = edge_type\r\n adj_t_dict[edge_type] = SparseTensor(\r\n row=edge_index[0], col=edge_index[1],\r\n sparse_sizes=(x_dict[src_type].size(0),\r\n x_dict[dst_type].size(0))).t()\r\n\r\n metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))\r\n conv = HANConv(-1, 16, metadata, heads=2)\r\n assert str(conv) == 'HANConv(16, heads=2)'\r\n out_dict1 = conv(x_dict, edge_index_dict)\r\n assert len(out_dict1) == 2\r\n assert out_dict1['author'].size() == (6, 16)\r\n assert out_dict1['paper'].size() == (5, 16)\r\n out_dict2 = conv(x_dict, adj_t_dict)\r\n\r\n assert len(out_dict1) == len(out_dict2)\r\n for node_type in out_dict1.keys():\r\n assert torch.allclose(out_dict1[node_type], out_dict2[node_type],\r\n atol=1e-6)\r\n",
"import torch\nimport time\nimport logging\n\nfrom torch_geometric.graphgym.config import cfg\nfrom torch_geometric.graphgym.loss import compute_loss\nfrom torch_geometric.graphgym.utils.epoch import is_eval_epoch, is_ckpt_epoch\nfrom torch_geometric.graphgym.checkpoint import load_ckpt, save_ckpt, \\\n clean_ckpt\n\nfrom torch_geometric.graphgym.register import register_train\n\n\ndef train_epoch(logger, loader, model, optimizer, scheduler):\n model.train()\n time_start = time.time()\n for batch in loader:\n optimizer.zero_grad()\n batch.to(torch.device(cfg.device))\n pred, true = model(batch)\n loss, pred_score = compute_loss(pred, true)\n loss.backward()\n optimizer.step()\n logger.update_stats(true=true.detach().cpu(),\n pred=pred_score.detach().cpu(), loss=loss.item(),\n lr=scheduler.get_last_lr()[0],\n time_used=time.time() - time_start,\n params=cfg.params)\n time_start = time.time()\n scheduler.step()\n\n\ndef eval_epoch(logger, loader, model):\n model.eval()\n time_start = time.time()\n for batch in loader:\n batch.to(torch.device(cfg.device))\n pred, true = model(batch)\n loss, pred_score = compute_loss(pred, true)\n logger.update_stats(true=true.detach().cpu(),\n pred=pred_score.detach().cpu(), loss=loss.item(),\n lr=0, time_used=time.time() - time_start,\n params=cfg.params)\n time_start = time.time()\n\n\n@register_train('example')\ndef train_example(loggers, loaders, model, optimizer, scheduler):\n start_epoch = 0\n if cfg.train.auto_resume:\n start_epoch = load_ckpt(model, optimizer, scheduler,\n cfg.train.epoch_resume)\n if start_epoch == cfg.optim.max_epoch:\n logging.info('Checkpoint found, Task already done')\n else:\n logging.info('Start from epoch %s', start_epoch)\n\n num_splits = len(loggers)\n for cur_epoch in range(start_epoch, cfg.optim.max_epoch):\n train_epoch(loggers[0], loaders[0], model, optimizer, scheduler)\n loggers[0].write_epoch(cur_epoch)\n if is_eval_epoch(cur_epoch):\n for i in range(1, num_splits):\n eval_epoch(loggers[i], loaders[i], model)\n loggers[i].write_epoch(cur_epoch)\n if is_ckpt_epoch(cur_epoch):\n save_ckpt(model, optimizer, scheduler, cur_epoch)\n for logger in loggers:\n logger.close()\n if cfg.train.ckpt_clean:\n clean_ckpt()\n\n logging.info('Task done, results saved in %s', cfg.run_dir)\n",
"import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_geometric.nn import GCNConv, global_mean_pool, JumpingKnowledge\n\n\nclass GCN(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden):\n super().__init__()\n self.conv1 = GCNConv(dataset.num_features, hidden)\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers - 1):\n self.convs.append(GCNConv(hidden, hidden))\n self.lin1 = Linear(hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n for conv in self.convs:\n x = F.relu(conv(x, edge_index))\n x = global_mean_pool(x, batch)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n\n\nclass GCNWithJK(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden, mode='cat'):\n super().__init__()\n self.conv1 = GCNConv(dataset.num_features, hidden)\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers - 1):\n self.convs.append(GCNConv(hidden, hidden))\n self.jump = JumpingKnowledge(mode)\n if mode == 'cat':\n self.lin1 = Linear(num_layers * hidden, hidden)\n else:\n self.lin1 = Linear(hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.jump.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n xs = [x]\n for conv in self.convs:\n x = F.relu(conv(x, edge_index))\n xs += [x]\n x = self.jump(xs)\n x = global_mean_pool(x, batch)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n",
"import os.path as osp\n\nimport argparse\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.datasets import Flickr\nfrom torch_geometric.loader import GraphSAINTRandomWalkSampler\nfrom torch_geometric.nn import GraphConv\nfrom torch_geometric.utils import degree\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Flickr')\ndataset = Flickr(path)\ndata = dataset[0]\nrow, col = data.edge_index\ndata.edge_weight = 1. / degree(col, data.num_nodes)[col] # Norm by in-degree.\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--use_normalization', action='store_true')\nargs = parser.parse_args()\n\nloader = GraphSAINTRandomWalkSampler(data, batch_size=6000, walk_length=2,\n num_steps=5, sample_coverage=100,\n save_dir=dataset.processed_dir,\n num_workers=4)\n\n\nclass Net(torch.nn.Module):\n def __init__(self, hidden_channels):\n super().__init__()\n in_channels = dataset.num_node_features\n out_channels = dataset.num_classes\n self.conv1 = GraphConv(in_channels, hidden_channels)\n self.conv2 = GraphConv(hidden_channels, hidden_channels)\n self.conv3 = GraphConv(hidden_channels, hidden_channels)\n self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)\n\n def set_aggr(self, aggr):\n self.conv1.aggr = aggr\n self.conv2.aggr = aggr\n self.conv3.aggr = aggr\n\n def forward(self, x0, edge_index, edge_weight=None):\n x1 = F.relu(self.conv1(x0, edge_index, edge_weight))\n x1 = F.dropout(x1, p=0.2, training=self.training)\n x2 = F.relu(self.conv2(x1, edge_index, edge_weight))\n x2 = F.dropout(x2, p=0.2, training=self.training)\n x3 = F.relu(self.conv3(x2, edge_index, edge_weight))\n x3 = F.dropout(x3, p=0.2, training=self.training)\n x = torch.cat([x1, x2, x3], dim=-1)\n x = self.lin(x)\n return x.log_softmax(dim=-1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net(hidden_channels=256).to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n\ndef train():\n model.train()\n model.set_aggr('add' if args.use_normalization else 'mean')\n\n total_loss = total_examples = 0\n for data in loader:\n data = data.to(device)\n optimizer.zero_grad()\n\n if args.use_normalization:\n edge_weight = data.edge_norm * data.edge_weight\n out = model(data.x, data.edge_index, edge_weight)\n loss = F.nll_loss(out, data.y, reduction='none')\n loss = (loss * data.node_norm)[data.train_mask].sum()\n else:\n out = model(data.x, data.edge_index)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n\n loss.backward()\n optimizer.step()\n total_loss += loss.item() * data.num_nodes\n total_examples += data.num_nodes\n return total_loss / total_examples\n\n\[email protected]_grad()\ndef test():\n model.eval()\n model.set_aggr('mean')\n\n out = model(data.x.to(device), data.edge_index.to(device))\n pred = out.argmax(dim=-1)\n correct = pred.eq(data.y.to(device))\n\n accs = []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n accs.append(correct[mask].sum().item() / mask.sum().item())\n return accs\n\n\nfor epoch in range(1, 51):\n loss = train()\n accs = test()\n print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {accs[0]:.4f}, '\n f'Val: {accs[1]:.4f}, Test: {accs[2]:.4f}')\n"
] | [
[
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.nn.functional.dropout",
"torch.nn.ParameterDict",
"torch.nn.ModuleDict",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.functional.leaky_relu",
"torch.stack"
],
[
"torch.nn.functional.one_hot"
],
[
"torch.nn.BatchNorm1d",
"torch.cat"
],
[
"torch.stack",
"torch.tensor"
],
[
"torch.nn.BatchNorm1d",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available"
],
[
"torch.nn.Linear",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout"
],
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout"
],
[
"numpy.isin",
"torch.linspace",
"torch.norm",
"torch.load",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear",
"torch.pow",
"torch.nn.init.xavier_uniform_",
"numpy.load",
"torch.nn.functional.softplus",
"torch.cos"
],
[
"torch.randn",
"torch.allclose",
"torch.randint"
],
[
"torch.device"
],
[
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout"
],
[
"torch.nn.functional.nll_loss",
"torch.cat",
"torch.nn.functional.dropout",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anibalsolon/brainhack-donostia.github.io | [
"ad4f30f938923af7ff85fed542972f94f2032d13"
] | [
"populate_projects.py"
] | [
"import os\nimport pandas as pd\nfrom string import Template\nimport wget\n\ncsv_file_path = \"https://docs.google.com/spreadsheets/d/1AlflVlTg1KmajQrWBOUBT2XeoAUqfjB9SCQfDIPvSXo/export?format=csv&gid=565678921\"\nproject_card_path = \"assets/templates/project_card.html\"\nprojects_page_path = \"assets/templates/template_projects.md\"\n\n\ndef populate_project_card(title, description, leader):\n with open(str(project_card_path), 'r') as card:\n card_tpl = Template(card.read())\n card_html = card_tpl.substitute(projectTitle=title,\n projectDescription=description,\n projectLeader=leader)\n card.close()\n return card_html\n\n\ndef populate_projects_page(html):\n with open(str(projects_page_path), 'r') as prj:\n prj_tpl = Template(prj.read())\n prj_html = prj_tpl.substitute(projectCards=html,\n link=\"/projects/\")\n prj.close()\n return prj_html\n\n\ndef main():\n # Download CSV file\n filename = wget.download(csv_file_path)\n\n # Read CSV file\n df = pd.read_csv(filename)\n df = df[df[\"Leader:\"].notna()]\n\n prj_card = \"\"\n\n for pj_index, prj_row in df.iterrows():\n prj_title = prj_row[\"Project title:\"]\n prj_descr = prj_row[\"Project description:\"]\n prj_leader = prj_row[\"Leader:\"]\n\n prj_card += populate_project_card(prj_title, prj_descr, prj_leader)\n\n prj_page = populate_projects_page(prj_card)\n\n with open(\"projects.md\", \"wb\") as f:\n f.write(prj_page.encode(\"utf-8\"))\n\n os.remove(filename)\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kylepgr/heart-disease-pred | [
"d128cc815dde4839ba18e887113bb47387499ce1"
] | [
"heart_app/views.py"
] | [
"from typing_extensions import SupportsIndex\r\nfrom django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.http import HttpResponse\r\nfrom .forms import InputForm\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nfrom pymongo import MongoClient\r\n\r\nclient = MongoClient('localhost', 27017)\r\ndb = client['PatientDB']\r\n\r\n\r\nloaded_model = pickle.load(open(\"C:/Users/Kyle/Untitled Folder/finalized_model.pkl\", 'rb'))\r\n\r\ndef index(request):\r\n if request.method == \"POST\":\r\n myform = InputForm(request.POST)\r\n if myform.is_valid():\r\n age = myform.cleaned_data['age_v']\r\n sex = myform.cleaned_data['sex_v']\r\n\r\n cp = myform.cleaned_data['cp_v']\r\n thalach = myform.cleaned_data['thalach_v']\r\n exang = myform.cleaned_data['exang_v']\r\n oldpeak = myform.cleaned_data['oldpeak_v']\r\n slope = myform.cleaned_data['slope_v']\r\n\r\n ca = myform.cleaned_data['ca_v']\r\n\r\n m_inputs = [[age, sex, cp, thalach, exang, oldpeak, slope, ca]]\r\n \r\n\r\n y_pred = [np.exp(point)/np.sum(np.exp(point), axis=0)\r\n for point in m_inputs]\r\n\r\n \r\n return render(request, 'index.html', {'prediction': round(y_pred.mean())})\r\n\r\n\r\n else:\r\n myform = InputForm()\r\n\r\n \r\n\r\n return render(request, 'index.html', {'form': myform})\r\n\r\ndef updateDataBase(request):\r\n temp={}\r\n \r\n temp['age']= myform.cleaned_data['age_v']\r\n temp['sex']= myform.cleaned_data['sex_v']\r\n temp['cp']= myform.cleaned_data['cp_v']\r\n temp['thalach']= myform.cleaned_data['thalach_v']\r\n temp['exang']= myform.cleaned_data['exang_v']\r\n temp['oldpeak']= myform.cleaned_data['oldpeak_v']\r\n temp['slope']= myform.cleaned_data['slope_v']\r\n temp['ca']= myform.cleaned_data['ca_v']\r\n \r\n collectionD.insert_one(temp)\r\n countOfrow = collectionD.find().count()\r\n context = {\"Row Count\": countOfrow}\r\n \r\n return render(request,'viewDB.html',context)\r\n \r\n \r\n\r\n\r\n \r\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JanaLasser/agent_based_COVID_SEIRX | [
"c4e28d472a0484fe1a125ba6974683973141c09e",
"c4e28d472a0484fe1a125ba6974683973141c09e"
] | [
"src/scseirx/model_SEIRX.py",
"src/scseirx/model_school.py"
] | [
"import numpy as np\nimport networkx as nx\nfrom math import gamma\nfrom scipy.optimize import root_scalar\n\nfrom mesa import Model\nfrom mesa.time import RandomActivation, SimultaneousActivation\nfrom mesa.datacollection import DataCollector\n\nfrom scseirx.testing_strategy import Testing\n\n## data collection functions ##\ndef get_N_diagnostic_tests(model):\n return model.number_of_diagnostic_tests\n\n\ndef get_N_preventive_screening_tests(model):\n return model.number_of_preventive_screening_tests\n\n\ndef get_infection_state(agent):\n if agent.exposed == True: return 'exposed'\n elif agent.infectious == True: return 'infectious'\n elif agent.recovered == True: return 'recovered'\n else: return 'susceptible'\n\ndef get_quarantine_state(agent):\n if agent.quarantined == True: return True\n else: return False\n\n\ndef get_undetected_infections(model):\n return model.undetected_infections\n\n\ndef get_predetected_infections(model):\n return model.predetected_infections\n\n\ndef get_pending_test_infections(model):\n return model.pending_test_infections\n\n\ndef get_diagnostic_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['student']\ndef get_diagnostic_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['teacher']\ndef get_diagnostic_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['family_member']\ndef get_diagnostic_test_detected_infections_resident(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['resident']\ndef get_diagnostic_test_detected_infections_employee(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['employee']\ndef get_diagnostic_test_detected_infections_unistudent(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['unistudent']\ndef get_diagnostic_test_detected_infections_lecturer(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['lecturer']\n\ndiagnostic_test_detected_infections_funcs = {\n 'student':get_diagnostic_test_detected_infections_student,\n 'teacher':get_diagnostic_test_detected_infections_teacher,\n 'family_member':get_diagnostic_test_detected_infections_family_member,\n 'resident':get_diagnostic_test_detected_infections_resident,\n 'employee':get_diagnostic_test_detected_infections_employee,\n 'unistudent':get_diagnostic_test_detected_infections_unistudent,\n 'lecturer':get_diagnostic_test_detected_infections_lecturer\n}\n\ndef get_preventive_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['student']\ndef get_preventive_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['teacher']\ndef get_preventive_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['family_member']\ndef get_preventive_test_detected_infections_resident(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['resident']\ndef get_preventive_test_detected_infections_employee(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['employee']\ndef get_preventive_test_detected_infections_unistudent(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['unistudent']\ndef get_preventive_test_detected_infections_lecturer(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['lecturer']\n\npreventive_test_detected_infections_funcs = {\n 'student':get_preventive_test_detected_infections_student,\n 'teacher':get_preventive_test_detected_infections_teacher,\n 'family_member':get_preventive_test_detected_infections_family_member,\n 'resident':get_preventive_test_detected_infections_resident,\n 'employee':get_preventive_test_detected_infections_employee,\n 'unistudent':get_preventive_test_detected_infections_unistudent,\n 'lecturer':get_preventive_test_detected_infections_lecturer\n}\n\n\n# parameter sanity check functions\n\n\ndef check_positive(var):\n\tassert var >= 0, 'negative number'\n\treturn var\n\n\ndef check_bool(var):\n\tassert type(var) == bool, 'not a bool'\n\treturn var\n\n\ndef check_positive_int(var):\n if var == None:\n return var\n assert type(var) == int, 'not an integer'\n assert var >= 0, 'negative number'\n return var\n\n\ndef check_contact_type_dict(var):\n\tassert type(var) == dict, 'not a dictionary'\n\tassert set(var.keys()).issubset({'very_far', 'far', 'intermediate', 'close'}), \\\n\t\t'does not contain the correct contact types (has to be very_far, far, intermediate or close)'\n\tassert all((isinstance(i, int) or isinstance(i, float)) for i in var.values()), \\\n\t\t'contact type weights are not numeric'\n\n\treturn var\n\n\ndef check_K1_contact_types(var):\n for area in var:\n assert area in ['very_far', 'far', 'intermediate',\n 'close'], 'K1 contact type not recognised'\n return var\n\n\ndef check_testing(var):\n assert var in ['diagnostic', 'background', 'preventive',\n 'background+preventive', False], \\\n 'unknown testing mode: {}'.format(var)\n\n return var\n\n\n\ndef check_probability(var):\n\tassert (type(var) == float) or (var == 0) or (var == 1), \\\n\t\t '{} not a float'.format(var)\n\tassert var >= 0, 'probability negative'\n\tassert var <= 1, 'probability larger than 1'\n\treturn var\n\n\ndef check_graph(var):\n assert type(var) in [nx.Graph, nx.MultiGraph], 'not a networkx graph'\n assert len(var.nodes) > 0, 'graph has no nodes'\n assert len(var.edges) > 0, 'graph has no edges'\n areas = [e[2]['contact_type'] for e in var.edges(data=True)]\n areas = set(areas)\n for a in areas:\n assert a in {'very_far', 'far', 'intermediate',\n 'close'}, 'contact type {} not recognised'.format(a)\n return var\n\n\ndef check_index_case(var, agent_types):\n\tallowed_strings = agent_types[:]\n\tallowed_strings.extend(['continuous'])\n\tassert var in allowed_strings, 'unknown index case mode'\n\treturn var\n\n\ndef check_discount(var):\n if var['slope'] != None:\n assert var['slope'] <= 0, 'slope needs to be <= 0 or None'\n assert np.abs(var['slope']) <= 1, 'absolute value of slope needs to be <= 1'\n assert var['intercept'], 'intercept needs to be positive'\n assert var['intercept'], 'intercept needs to be <= 1'\n return var\n\n\ndef get_weibull_shape(k, mu, var):\n '''\n Calculates the shape parameter of a Weibull distribution, given its mean\n mu and its variance var\n '''\n return var / mu**2 - gamma(1 + 2/k) / gamma(1+1/k)**2 + 1\n\n\n\ndef get_weibull_scale(mu, k):\n '''\n Calculates the scale parameter of a Weibull distribution, given its mean\n mu and its shape parameter k\n '''\n return mu / gamma(1 + 1/k)\n\n\ndef weibull_two_param(shape, scale):\n '''\n A two-parameter Weibull distribution, based on numpy ramdon's single\n parameter distribution. We use this distribution in the simulation to draw\n random epidemiological parameters for agents from the given distribution\n See https://numpy.org/doc/stable/reference/random/generated/numpy.random.weibull.html\n '''\n return scale * np.random.weibull(shape)\n\n\nclass SEIRX(Model):\n '''\n A model with a number of different agents that reproduces\n the SEIRX dynamics of pandemic spread in a facility. Note:\n all times are set to correspond to days\n\n G: networkx undirected graph, interaction graph between agents. Edges have\n to have edge the edge attribute 'contact_type' specifying the closeness of\n contacts, which can be ['very far', 'far', 'intermediate' and 'close'].\n Nodes have to have the node attribute 'type' which specifies the agent type\n of the given node (for example 'student' or 'teacher' in a school scenario).\n In addition, nodes can have the attribute 'unit', which assigns them to a\n unit in space (for example a 'class' in a school scenario).\n\n verbosity: integer in [0, 1, 2], controls text output to std out to track\n simulation progress and transmission dynamics. Default = 0.\n\n testing, default = 'diagnostic'\n 'diagnostic': only diagnostic tests for symptomatic agents\n 'background': adds background screens of all agents after a positive\n diagnostic test\n 'preventive': adds preventive screens of agent groups to diagnostic\n testing. Screens happen in time intervals specified \n separately for each agent group in the variable \n 'screening_interval'.\n 'background+preventive': preventive screens AND background screens on\n top of diagnostic testing.\n\n infection_duration, default = 11 NOTE: includes the time an agent is exposed\n but not yet infectious at the beginning of an infection\n positive integer: mean or median of the infection duration in days\n list of two floats: mean and standard deviation of a distribution\n specifying the infection duration in days. These\n numbers will be used to construct a Weibull\n distribution from which the infection duration will\n be drawn for every agent individually\n\n exposure_duration, default = 4. Sets the time from transmission to becoming\n infectious\n positive integer: mean or median of the exposure duration in days\n list of two floats: mean and standard deviation of a distribution\n specifying the exposure duration in days. These\n numbers will be used to construct a Weibull\n distributoin from which the exposure duration will\n be drawn for every agent individually.\n\n time_until_symptoms, default = 6. Sets the time from transmission to\n (potentially) developing symptoms. Symptom probability has to be set for\n each agent group individually using the parameter 'symptom_probability'\n positive integer: mean or median of the time until symptoms in days\n list of two floats: mean and standard deviation of a distribution\n specifying the time until symptoms in days. These\n numbers will be used to construct a Weibull\n distribution from which the time until symptoms will\n be drawn for every agent individually.\n\n quarantine_duration, default = 14. Positive integer, sets the time a\n positively tested agent is quarantined in days\n\n infection_risk_contact_type_weights: dictionary of the form\n {'very_far':float, 'far':float, 'intermediate':float, 'close':float}\n that sets transmission risk multipliers for different contact types of\n agents specified in the contact network G. Default: {'very_far': 0.1,\n 'far': 0.5, 'intermediate': 1, 'close': 3}\n\n subclinical_modifier: default = 1.0. Float, modifies the infectiousness of\n asymptomatic cases. Example: if subclinical_modifier = 0.5, the\n infectiousness of an asymptomatic case will be reduced to 50%.\n\n K1_contact_types: list of strings from ['very_far', 'far', 'intermediate',\n 'close']. Definition of contact types for which agents are considered\n \"K1 contact persons\" if they had contact to a positively tested person wtith\n a specified contact intensity. Default = ['close'].\n\n diagnostic_test_type, default = 'one_day_PCR'. String, specifies the test\n technology and test result turnover time used for diagnostic testing. For\n example 'same_day_antigen' or 'two_day_PCR'. See module \"Testing\" for\n different implemented testing techologies.\n\n preventive_screening_test_type:, default = 'one_day_PCR', String, specifies\n the test technology and test result turnover time used for preventive\n sreening. For example 'same_day_antigen' or 'two_day_PCR'. See module\n \"Testing\" for different implemented testing techologies.\n\n follow_up_testing_interval, default = None. Positive integer, sets the time\n a follow-up screen (background screen) is initiated after an initial screen\n triggered by a positive test result. Only applies if the testing strategy is\n 'background' or preventive.\n\n liberating_testing, default = False. Boolean, flag that specifies, whether\n or not an agent is released from quarantine after returning a negative test\n result.\n\n\tindex_case, default = 'employee' (nursing home scenario) or 'teacher'\n (school scenario). Specifies how infections are introduced into the facility.\n agent_type: If an agent type (for example 'student' or 'teacher' in\n the school scenario) is specified, a single randomly\n chosen agent from this agent group will become the index\n case and no further index cases will be introduced into\n the scenario.\n 'continuous': In this case, agents have a continuous risk to become\n index cases in every simulation step. The risk has to\n be specified for every agent group individually, using\n the 'index_probability' parameter. If only a single\n agent group has a non-zero index probability, then only\n agents from this group can become index cases.\n\n\n agent_types: dictionary of the structure\n {\n agent type:\n {\n screening interval : integer, number of days between each preventive\n screen in this agent group\n\n index probability : float in the range [0, 1], sets the probability\n to become an index case in each time step\n\n mask : bool\n whether or not the agent type is wearing a mask\n }\n }\n\n The dictionary's keys are the names of the agent types which have to\n correspond to the node attributes in the contact graph. The screening\n interval sets the time-delay between preventive screens of this agent group,\n the index probability sets the probability of a member of this agent group\n becoming an index case in every time step\n\n seed: positive integer, fixes the seed of the simulation to enable\n repeatable simulation runs. If seed = None, the simulation will be\n initialized at random.\n '''\n\n def __init__(self, G,\n verbosity = 0,\n base_transmission_risk = 0.05,\n testing='diagnostic',\n exposure_duration = [5.0, 1.9],\n time_until_symptoms = [6.4, 0.8],\n infection_duration = [10.91, 3.95],\n quarantine_duration = 10,\n subclinical_modifier = 0.6,\n infection_risk_contact_type_weights = {\n 'very_far': 0.1,\n 'far': 0.25,\n 'intermediate': 0.5,\n 'close': 1},\n K1_contact_types = ['close'],\n diagnostic_test_type = 'one_day_PCR',\n preventive_screening_test_type = 'same_day_antigen',\n follow_up_testing_interval = None,\n liberating_testing = False,\n index_case = 'teacher',\n agent_types = {\n 'teacher': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0},\n 'student': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0},\n 'family_member':{'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0}},\n age_transmission_risk_discount = \\\n {'slope':-0.02,\n 'intercept':1},\n age_symptom_modification = \\\n {'slope':-0.02545,\n 'intercept':0.854545},\n mask_filter_efficiency = {'exhale':0, 'inhale':0},\n transmission_risk_ventilation_modifier = 0,\n transmission_risk_vaccination_modifier = {\n 'reception':1,\n 'transmission':0},\n seed = None):\n\n # mesa models already implement fixed seeds through their own random\n # number generations. Sadly, we need to use the Weibull distribution\n # here, which is not implemented in mesa's random number generation\n # module. Therefore, we need to initialize the numpy random number\n # generator with the given seed as well\n if seed != None:\n np.random.seed(seed)\n\n # sets the (daily) transmission risk for a household contact without\n # any precautions. Target infection ratios are taken from literature\n # and the value of the base_transmission_risk is calibrated such that\n # the simulation produces the correct infection ratios in a household\n # setting with the given distributions for epidemiological parameters\n # of agents\n self.base_transmission_risk = base_transmission_risk\n \t# sets the level of detail of text output to stdout (0 = no output)\n self.verbosity = check_positive_int(verbosity)\n # flag to turn off the testing & tracing strategy\n self.testing = check_testing(testing)\n self.running = True # needed for the batch runner implemented by mesa\n # set the interaction mode to simultaneous activation\n self.schedule = SimultaneousActivation(self)\n\n\n # internal step counter used to launch screening tests\n self.Nstep = 0\n\n # since we may have weekday-specific contact networks, we need\n # to keep track of the day of the week. Since the index case\n # per default is introduced at step 0 in index case mode, we\n # need to offset the starting weekday by a random number of weekdays\n # to prevent artifacts from always starting on the same day of the week\n\n self.weekday_offset = self.random.randint(1, 8)\n self.weekday = self.Nstep + self.weekday_offset\n\n ## epidemiological parameters: can be either a single integer or the\n # mean and standard deviation of a distribution\n self.epi_params = {}\n # counter to track the number of pathological parameter combinations\n # that had to be re-rolled (only here for debugging and control reasons)\n self.param_rerolls = 0\n\n for param, param_name in zip([exposure_duration, time_until_symptoms,\n infection_duration],['exposure_duration', 'time_until_symptoms',\n 'infection_duration']):\n\n if isinstance(param, int):\n self.epi_params[param_name] = check_positive_int(param)\n\n elif isinstance(param, list) and len(param) == 2:\n\n mu = check_positive(param[0])\n var = check_positive(param[1]**2)\n shape = root_scalar(get_weibull_shape, args=(mu, var),\n method='toms748', bracket=[0.2, 500]).root\n scale = get_weibull_scale(mu, shape)\n\n self.epi_params[param_name] = [shape, scale]\n else:\n print('{} format not recognized, should be either a single '+\\\n 'int or a tuple of two positive numbers'.format(param_name))\n\n\n # duration of quarantine\n self.quarantine_duration = check_positive_int(quarantine_duration)\n\n self.infection_risk_area_weights = check_contact_type_dict(\n infection_risk_contact_type_weights)\n\n # modifier for infectiosness for asymptomatic cases\n self.subclinical_modifier = check_positive(subclinical_modifier)\n # modifiers for the infection risk, depending on contact type\n self.infection_risk_contact_type_weights = infection_risk_contact_type_weights\n\n # modifications for age-dependent transmission and reception risks and\n # symptom probabilities\n self.age_transmission_risk_discount = \\\n check_discount(age_transmission_risk_discount)\n\n self.age_symptom_modification = age_symptom_modification\n #check_discount(age_symptom_modification)\n\n self.mask_filter_efficiency = mask_filter_efficiency\n self.transmission_risk_ventilation_modifier = \\\n transmission_risk_ventilation_modifier\n self.transmission_risk_vaccination_modifier = \\\n transmission_risk_vaccination_modifier\n ## agents and their interactions\n # interaction graph of agents\n self.G = check_graph(G)\n # add weights as edge attributes so they can be visualised easily\n if type(self.G) == nx.MultiGraph:\n for (u, v, key, contact_type) in self.G.edges(keys=True,\n data='contact_type'):\n self.G[u][v][key]['weight'] = \\\n self.infection_risk_contact_type_weights[contact_type]\n else:\n for e in G.edges(data=True):\n G[e[0]][e[1]]['weight'] = self.infection_risk_contact_type_weights\\\n \t[G[e[0]][e[1]]['contact_type']]\n\n # extract the different agent types from the contact graph\n self.agent_types = list(agent_types.keys())\n # dictionary of available agent classes with agent types and classes\n self.agent_classes = {}\n if 'resident' in agent_types:\n from scseirx.agent_resident import resident\n self.agent_classes['resident'] = resident\n if 'employee' in agent_types:\n from scseirx.agent_employee import employee\n self.agent_classes['employee'] = employee\n if 'student' in agent_types:\n from scseirx.agent_student import student\n self.agent_classes['student'] = student\n if 'teacher' in agent_types:\n from scseirx.agent_teacher import teacher\n self.agent_classes['teacher'] = teacher\n if 'family_member' in agent_types:\n from scseirx.agent_family_member import family_member\n self.agent_classes['family_member'] = family_member\n if 'lecturer' in agent_types:\n from scseirx.agent_lecturer import lecturer\n self.agent_classes['lecturer'] = lecturer\n if 'unistudent' in agent_types:\n from scseirx.agent_unistudent import unistudent\n self.agent_classes['unistudent'] = unistudent\n\n ## set agent characteristics for all agent groups\n # list of agent characteristics\n params = ['screening_interval','index_probability', 'mask' ,'vaccination_ratio',\n 'voluntary_testing_rate']\n\n # default values that are used in case a characteristic is not specified\n # for an agent group\n defaults = {'screening_interval':None,\n 'index_probability':0,\n 'mask':False,\n 'vaccination_ratio':0,\n 'voluntary_testing_rate':1\n }\n\n # sanity checks that are applied to parameters passed to the class\n # constructor to make sure they conform to model expectations\n check_funcs = [check_positive_int, check_probability, check_bool,\n check_probability, check_probability]\n\n # member dicts that store the parameter values for each agent group\n self.screening_intervals = {}\n self.index_probabilities = {}\n self.masks = {}\n self.vaccination_probabilities = {}\n self.voluntary_testing_rates = {}\n\n\n param_dicts = [self.screening_intervals, self.index_probabilities,\n self.masks, self.vaccination_probabilities, self.voluntary_testing_rates]\n\n # iterate over all possible agent parameters and agent groups: set the\n # respective value to the value passed through the constructor or to\n # the default value if no value has been passed\n for param,param_dict,check_func in zip(params,param_dicts,check_funcs):\n for at in self.agent_types:\n try:\n param_dict.update({at:check_func(agent_types[at][param])})\n except KeyError:\n param_dict.update({at:defaults[param]})\n\n # pass all parameters relevant for the testing strategy to the testing\n # class. NOTE: this separation is not a strictly necessary design\n # decision but I like to keep the parameters related to testing and\n # tracing in a separate place\n self.Testing = Testing(self, diagnostic_test_type,\n preventive_screening_test_type,\n check_positive_int(follow_up_testing_interval),\n self.screening_intervals,\n check_bool(liberating_testing),\n check_K1_contact_types(K1_contact_types),\n verbosity)\n\n\n # specifies either continuous probability for index cases in agent\n # groups based on the 'index_probability' for each agent group, or a\n # single (randomly chosen) index case in the passed agent group\n self.index_case = check_index_case(index_case, self.agent_types)\n\n self.num_agents = {}\n\n ## add agents\n # extract the agent nodes from the graph and add them to the scheduler\n for agent_type in self.agent_types:\n IDs = [x for x,y in G.nodes(data=True) if y['type'] == agent_type]\n self.num_agents.update({agent_type:len(IDs)})\n\n # get the agent locations (units) from the graph node attributes\n units = [self.G.nodes[ID]['unit'] for ID in IDs]\n\n # determine the agents that will be vaccinated, given the \n # vaccination ratio of the respective agent group\n vaccination_status = np.asarray([False] * len(IDs))\n if self.vaccination_probabilities[agent_type] > 0:\n n = round(self.vaccination_probabilities[agent_type] * len(IDs))\n idx = list(range(len(IDs)))\n rnd_idx = np.asarray(self.random.sample(idx, n))\n vaccination_status[rnd_idx] = True\n\n\n for ID, unit, vaccinated in zip(IDs, units, vaccination_status):\n\n tmp_epi_params = {}\n # for each of the three epidemiological parameters, check if\n # the parameter is an integer (if yes, pass it directly to the\n # agent constructor), or if it is specified by the shape and\n # scale parameters of a Weibull distribution. In the latter\n # case, draw a new number for every agent from the distribution\n # NOTE: parameters drawn from the distribution are rounded to\n # the nearest integer\n while True:\n for param_name, param in self.epi_params.items():\n if isinstance(param, int):\n tmp_epi_params[param_name] = param\n\n else:\n tmp_epi_params[param_name] = \\\n round(weibull_two_param(param[0], param[1]))\n\n if tmp_epi_params['exposure_duration'] > 0 and \\\n tmp_epi_params['time_until_symptoms'] >= \\\n tmp_epi_params['exposure_duration'] and\\\n tmp_epi_params['infection_duration'] > \\\n tmp_epi_params['exposure_duration']:\n break\n else:\n self.param_rerolls += 1\n if verbosity > 1:\n print('pathological epi-param case found!')\n print(tmp_epi_params)\n\n # check if the agent participates in voluntary testing\n p = self.voluntary_testing_rates[agent_type]\n voluntary_testing = np.random.choice([True, False],\n p=[p, 1-p])\n\n # construct the agent object\n a = self.agent_classes[agent_type](ID, unit, self,\n tmp_epi_params['exposure_duration'],\n tmp_epi_params['time_until_symptoms'],\n tmp_epi_params['infection_duration'],\n vaccinated,\n voluntary_testing,\n verbosity)\n self.schedule.add(a)\n\n\n\t\t# infect the first agent in single index case mode\n if self.index_case != 'continuous':\n infection_targets = [\n a for a in self.schedule.agents if a.type == index_case]\n # pick a random agent to infect in the selected agent group\n target = self.random.randint(0, len(infection_targets) - 1)\n infection_targets[target].exposed = True\n if self.verbosity > 0:\n print('{} exposed: {}'.format(index_case,\n infection_targets[target].ID))\n\n\n # list of agents that were tested positive this turn\n self.newly_positive_agents = []\n # flag that indicates if there were new positive tests this turn\n self.new_positive_tests = False\n # dictionary of flags that indicate whether a given agent group has\n # been creened this turn\n self.screened_agents= {\n 'reactive':{agent_type: False for agent_type in self.agent_types},\n 'follow_up':{agent_type: False for agent_type in self.agent_types},\n 'preventive':{agent_type: False for agent_type in self.agent_types}}\n\n\n # dictionary of counters that count the days since a given agent group\n # was screened. Initialized differently for different index case modes\n if (self.index_case == 'continuous') or \\\n \t (not np.any(list(self.Testing.screening_intervals.values()))):\n \tself.days_since_last_agent_screen = {agent_type: 0 for agent_type in\n \tself.agent_types}\n # NOTE: if we initialize these variables with 0 in the case of a single\n # index case, we introduce a bias since in 'single index case mode' the\n # first index case will always become exposed in step 0. To realize\n # random states of the preventive sceening procedure with respect to the\n # incidence of the index case, we have to randomly pick the days since\n # the last screen for the agent group from which the index case is\n else:\n \tself.days_since_last_agent_screen = {}\n \tfor agent_type in self.agent_types:\n \t\tif self.Testing.screening_intervals[agent_type] != None:\n \t\t\tself.days_since_last_agent_screen.update({\n \t\t\t\tagent_type: self.random.choice(range(0,\n \t\t\t\t self.Testing.screening_intervals[agent_type] + 1))})\n \t\telse:\n \t\t\tself.days_since_last_agent_screen.update({agent_type: 0})\n\n # dictionary of flags that indicates whether a follow-up screen for a\n # given agent group is scheduled\n self.scheduled_follow_up_screen = {agent_type: False for agent_type in\n \tself.agent_types}\n\n # counters\n self.number_of_diagnostic_tests = 0\n self.number_of_preventive_screening_tests = 0\n self.positive_tests = {self.Testing.preventive_screening_test_type:\n {agent_type:0 for agent_type in self.agent_types},\n self.Testing.diagnostic_test_type:\n {agent_type:0 for agent_type in self.agent_types}}\n\n self.undetected_infections = 0\n self.predetected_infections = 0\n self.pending_test_infections = 0\n self.quarantine_counters = {agent_type:0 for agent_type in agent_types.keys()}\n self.false_negative = 0\n\n # data collectors to save population counts and agent states every\n # time step\n\n model_reporters = {\n 'N_diagnostic_tests':get_N_diagnostic_tests,\n 'N_preventive_screening_tests':get_N_preventive_screening_tests,\n 'undetected_infections':get_undetected_infections,\n 'predetected_infections':get_predetected_infections,\n 'pending_test_infections':get_pending_test_infections\n }\n\n for agent_type in self.agent_types:\n model_reporters.update({\n 'diagnostic_test_detected_infections_{}'.format(agent_type):\\\n diagnostic_test_detected_infections_funcs[agent_type]\n })\n model_reporters.update({\n 'preventive_test_detected_infections_{}'.format(agent_type):\\\n preventive_test_detected_infections_funcs[agent_type]\n })\n\n\n self.datacollector = DataCollector(\n model_reporters=model_reporters,\n agent_reporters=\n \t{\n \t'infection_state': get_infection_state,\n 'quarantine_state': get_quarantine_state\n })\n\n\n ## transmission risk modifiers\n def get_transmission_risk_contact_type_modifier(self, source, target):\n # construct the edge key as combination between agent IDs and weekday\n n1 = source.ID\n n2 = target.ID\n tmp = [n1, n2]\n tmp.sort()\n n1, n2 = tmp\n key = '{}{}d{}'.format(n1, n2, self.weekday)\n contact_weight = self.G.get_edge_data(n1, n2, key)['weight']\n\n # the link weight is a multiplicative modifier of the link strength.\n # contacts of type \"close\" have, by definition, a weight of 1. Contacts\n # of type intermediate, far or very far have a weight < 1 and therefore\n # are less likely to transmit an infection. For example, if the contact\n # type far has a weight of 0.2, a contact of type far has only a 20%\n # chance of transmitting an infection, when compared to a contact of\n # type close. To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of success)\n # by the modifications introduced by preventive measures. These\n # modifications are formulated in terms of \"probability of failure\", or\n # \"q\". A low contact weight has a high probability of failure, therefore\n # we return q = 1 - contact_weight here.\n q1 = 1 - contact_weight\n\n return q1\n\n\n def get_transmission_risk_age_modifier_transmission(self, source):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = source.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n\n # The age weight can be interpreted as multiplicative factor that\n # reduces the chance for transmission with decreasing age. The slope\n # of the age_transmission_discount function is the decrease (in % of\n # the transmission risk for an 18 year old or above) of transmission\n # risk with every year a person is younger than 18 (the intercept is\n # 1 by definition).\n # To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of \n # success) by the modifications introduced by preventive measures. \n # These modifications are formulated in terms of \"probability of \n # failure\", or \"q\". A low age weight has a high probability of \n # failure, therefore we return q = 1 - age_weight here.\n q2 = 1 - age_weight\n else:\n q2 = 0\n\n return q2\n\n\n def get_transmission_risk_age_modifier_reception(self, target):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = target.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n # see description in get_transmission_risk_age_modifier_transmission\n q3 = 1 - age_weight\n else:\n q3 = 0\n\n return q3\n\n\n # infectiousness is constant and high until symptom onset and then\n # decreases monotonically until agents are not infectious anymore\n # at the end of the infection_duration\n def get_transmission_risk_progression_modifier(self, source):\n if source.days_since_exposure < source.exposure_duration:\n progression_weight = 0\n elif source.days_since_exposure <= source.time_until_symptoms:\n progression_weight = 1\n elif source.days_since_exposure > source.time_until_symptoms and \\\n source.days_since_exposure <= source.infection_duration:\n # we add 1 in the denominator, such that the source is also\n # (slightly) infectious on the last day of the infection_duration\n progression_weight = \\\n (source.days_since_exposure - source.time_until_symptoms) / \\\n (source.infection_duration - source.time_until_symptoms + 1)\n else:\n progression_weight = 0\n # see description in get_transmission_risk_age_modifier_transmission\n q4 = 1 - progression_weight\n\n return q4\n\n def get_transmission_risk_subclinical_modifier(self, source):\n if source.symptomatic_course == False:\n subclinical_weight = self.subclinical_modifier\n else:\n subclinical_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q5 = 1 - subclinical_weight\n return q5\n\n def get_transmission_risk_exhale_modifier(self, source):\n if source.mask:\n exhale_weight = self.mask_filter_efficiency['exhale']\n else:\n exhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q6 = 1 - exhale_weight\n return q6\n\n\n def get_transmission_risk_inhale_modifier(self, target):\n if target.mask:\n inhale_weight = self.mask_filter_efficiency['inhale']\n else:\n inhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q7 = 1 - inhale_weight\n return q7\n\n\n def get_transmission_risk_ventilation_modifier(self):\n ventilation_weight = self.transmission_risk_ventilation_modifier\n # see description in get_transmission_risk_age_modifier_transmission\n q8 = 1 - ventilation_weight\n return q8\n\n def get_transmission_risk_vaccination_modifier_reception(self, a):\n if a.vaccinated:\n q9 = self.transmission_risk_vaccination_modifier['reception']\n else:\n q9 = 0\n return q9\n\n def get_transmission_risk_vaccination_modifier_transmission(self, a):\n if a.vaccinated:\n q10 = self.transmission_risk_vaccination_modifier['transmission']\n else:\n q10 = 0\n return q10\n\n def test_agent(self, a, test_type):\n a.tested = True\n a.pending_test = test_type\n if test_type == self.Testing.diagnostic_test_type:\n self.number_of_diagnostic_tests += 1\n else:\n self.number_of_preventive_screening_tests += 1\n\n if a.exposed:\n # tests that happen in the period of time in which the agent is\n # exposed but not yet infectious. \n # Note: tests[test_type]['time_until_testable'] is negative for\n # tests that can detect an infection before agents become infectious\n if a.days_since_exposure >= a.exposure_duration + \\\n self.Testing.tests[test_type]['time_until_testable']:\n \n if self.verbosity > 1:\n print('{} {} sent positive sample (even though not infectious yet)'\n .format(a.type, a.ID))\n a.sample = 'positive'\n self.predetected_infections += 1\n self.positive_tests[test_type][a.type] += 1\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n elif a.infectious:\n # tests that happen in the period of time in which the agent is\n # infectious and the infection is detectable by a given test\n # Note: tests[test_type]['time_until_testable'] is negative for \n # tests that can detect an infection before agents become \n # infectious. tests[test_type]['time_testable'] is negative for\n # tests that cease to detect an infection before agents stop being\n # infectious\n if a.days_since_exposure >= a.exposure_duration + \\\n self.Testing.tests[test_type]['time_until_testable'] and \\\n a.days_since_exposure <= a.infection_duration + \\\n self.Testing.tests[test_type]['time_testable']:\n if self.verbosity > 1:\n print('{} {} sent positive sample'.format(a.type, a.ID))\n a.sample = 'positive'\n self.positive_tests[test_type][a.type] += 1\n\n # track the undetected infections to assess how important they are\n # for infection spread\n else:\n if self.verbosity > 1:\n print('{} {} sent negative sample (even though infectious)'\n .format(a.type, a.ID))\n a.sample = 'negative'\n self.undetected_infections += 1\n\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n # for same-day testing, immediately act on the results of the test\n if a.days_since_tested >= self.Testing.tests[test_type]['time_until_test_result']:\n a.act_on_test_result()\n\n def screen_agents(self, agent_group, test_type, screen_type):\n # only test agents that have not been tested already in this simulation\n # step and that are not already known positive cases\n\n if self.verbosity > 0:\n print('initiating {} {} screen'\\\n .format(screen_type, agent_group))\n\n untested_agents = [a for a in self.schedule.agents if\n (a.tested == False and a.known_positive == False\n and a.type == agent_group)]\n\n if len(untested_agents) > 0:\n self.screened_agents[screen_type][agent_group] = True\n self.days_since_last_agent_screen[agent_group] = 0\n\n # only test agents if they participate in voluntary testing\n if screen_type == 'preventive':\n for a in untested_agents:\n if a.voluntary_testing:\n self.test_agent(a, test_type)\n else:\n if self.verbosity > 1:\n print('not testing {} {}, not participating in voluntary testing'\\\n .format(agent_group, a.ID))\n else:\n for a in untested_agents:\n self.test_agent(a, test_type)\n\n if self.verbosity > 0:\n print()\n else:\n if self.verbosity > 0:\n print('no agents tested because all agents have already been tested')\n\n # the type of the test used in the pending test result is stored in the\n # variable pending_test\n\n def collect_test_results(self):\n agents_with_test_results = [a for a in self.schedule.agents if\n (a.pending_test and\n a.days_since_tested >= self.Testing.tests[a.pending_test]['time_until_test_result'])]\n\n return agents_with_test_results\n\n def trace_contacts(self, a):\n if a.quarantined == False:\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n if self.verbosity > 0:\n print('qurantined {} {}'.format(a.type, a.ID))\n\n # find all agents that share edges with the agent\n # that are classified as K1 contact types in the testing\n # strategy\n if a in self.G.nodes():\n K1_contacts = [e[1] for e in self.G.edges(a.ID, data=True) if\n e[2]['contact_type'] in self.Testing.K1_contact_types]\n K1_contacts = [a for a in self.schedule.agents if a.ID in K1_contacts]\n\n for K1_contact in K1_contacts:\n if self.verbosity > 0:\n print('quarantined {} {} (K1 contact of {} {})'\n .format(K1_contact.type, K1_contact.ID, a.type, a.ID))\n K1_contact.quarantined = True\n K1_contact.quarantine_start = self.Nstep\n\n def test_symptomatic_agents(self):\n # find symptomatic agents that have not been tested yet and are not\n # in quarantine and test them\n newly_symptomatic_agents = np.asarray([a for a in self.schedule.agents\n if (a.symptoms == True and a.tested == False and a.quarantined == False)])\n\n for a in newly_symptomatic_agents:\n # all symptomatic agents are quarantined by default\n if self.verbosity > 0:\n print('quarantined: {} {}'.format(a.type, a.ID))\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n self.test_agent(a, self.Testing.diagnostic_test_type)\n\n def quarantine_contacts(self):\n # trace and quarantine contacts of newly positive agents\n if len(self.newly_positive_agents) > 0:\n if self.verbosity > 0: print('new positive test(s) from {}'\n .format([a.ID for a in self.newly_positive_agents]))\n\n # send all K1 contacts of positive agents into quarantine\n for a in self.newly_positive_agents:\n self.trace_contacts(a)\n\n # indicate that a screen should happen because there are new\n # positive test results\n self.new_positive_tests = True\n self.newly_positive_agents = []\n\n else:\n self.new_positive_tests = False\n\n\n def step(self):\n self.weekday = (self.Nstep + self.weekday_offset) % 7 + 1\n # if the connection graph is time-resloved, set the graph that is\n # used to determine connections in this step to the sub-graph corres-\n # ponding to the current day of the week\n if self.dynamic_connections:\n self.G = self.weekday_connections[self.weekday]\n\n if self.verbosity > 0:\n print('weekday {}'.format(self.weekday))\n\n if self.testing:\n for agent_type in self.agent_types:\n for screen_type in ['reactive', 'follow_up', 'preventive']:\n self.screened_agents[screen_type][agent_type] = False\n\n if self.verbosity > 0:\n print('* testing and tracing *')\n\n self.test_symptomatic_agents()\n\n\n # collect and act on new test results\n agents_with_test_results = self.collect_test_results()\n for a in agents_with_test_results:\n a.act_on_test_result()\n\n self.quarantine_contacts()\n\n # screening:\n # a screen should take place if\n # (a) there are new positive test results\n # (b) as a follow-up screen for a screen that was initiated because\n # of new positive cases\n # (c) if there is a preventive screening policy and it is time for\n # a preventive screen in a given agent group\n\n # (a)\n if (self.testing == 'background' or self.testing == 'background+preventive')\\\n and self.new_positive_tests == True:\n for agent_type in self.screening_agents:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'reactive')\n self.scheduled_follow_up_screen[agent_type] = True\n\n # (b)\n elif (self.testing == 'background' or self.testing == 'background+preventive') and \\\n self.Testing.follow_up_testing_interval != None and \\\n sum(list(self.scheduled_follow_up_screen.values())) > 0:\n for agent_type in self.screening_agents:\n if self.scheduled_follow_up_screen[agent_type] and\\\n self.days_since_last_agent_screen[agent_type] >=\\\n self.Testing.follow_up_testing_interval:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'follow_up')\n else:\n if self.verbosity > 0:\n print('not initiating {} follow-up screen (last screen too close)'\\\n .format(agent_type))\n\n # (c) \n elif (self.testing == 'preventive' or self.testing == 'background+preventive')and \\\n np.any(list(self.Testing.screening_intervals.values())):\n\n for agent_type in self.screening_agents:\n interval = self.Testing.screening_intervals[agent_type]\n assert interval in [7, 3, 2, None], \\\n 'testing interval {} for agent type {} not supported!'\\\n .format(interval, agent_type)\n\n # (c.1) testing every 7 days = testing on Mondays\n if interval == 7 and self.weekday == 1:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.2) testing every 3 days = testing on Mo & Turs\n elif interval == 3 and self.weekday in [1, 4]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.3) testing every 2 days = testing on Mo, Wed & Fri\n elif interval == 2 and self.weekday in [1, 3, 5]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # No interval specified = no testing, even if testing\n # mode == preventive\n elif interval == None:\n pass\n else:\n if self.verbosity > 0:\n print('not initiating {} preventive screen (wrong weekday)'\\\n .format(agent_type))\n else:\n # do nothing\n pass\n\n for agent_type in self.agent_types:\n if not (self.screened_agents['reactive'][agent_type] or \\\n self.screened_agents['follow_up'][agent_type] or \\\n self.screened_agents['preventive'][agent_type]):\n self.days_since_last_agent_screen[agent_type] += 1\n\n\n if self.verbosity > 0: print('* agent interaction *')\n self.datacollector.collect(self)\n self.schedule.step()\n self.Nstep += 1\n",
"import numpy as np\nimport networkx as nx\nfrom mesa import Model\nfrom mesa.time import RandomActivation, SimultaneousActivation\nfrom mesa.datacollection import DataCollector\n\nfrom scseirx.model_SEIRX import *\n\n\n## data collection functions ##\n\ndef count_S_student(model):\n S = np.asarray([1 for a in model.schedule.agents if a.type == 'student' and\\\n a.exposed == False and a.recovered == False \\\n and a.infectious == False]).sum()\n return S\n\n\ndef count_E_student(model):\n E = np.asarray(\n [a.exposed for a in model.schedule.agents if a.type == 'student']).sum()\n return E\n\n\ndef count_I_student(model):\n I = np.asarray(\n [a.infectious for a in model.schedule.agents if a.type == 'student']).sum()\n return I\n\n\ndef count_I_symptomatic_student(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'student'and a.symptomatic_course)]).sum()\n return I\n\ndef count_V_student(model):\n V = np.asarray([a.vaccinated for a in model.schedule.agents if\n (a.type == 'student')]).sum()\n return V\n\ndef count_I_asymptomatic_student(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'student'and a.symptomatic_course == False)]).sum()\n return I\n\n\ndef count_R_student(model):\n R = np.asarray(\n [a.recovered for a in model.schedule.agents if a.type == 'student']).sum()\n return R\n\n\ndef count_X_student(model):\n X = np.asarray(\n [a.quarantined for a in model.schedule.agents if a.type == 'student']).sum()\n return X\n\n\ndef count_S_teacher(model):\n S = np.asarray([1 for a in model.schedule.agents if a.type == 'feacher' and\\\n a.exposed == False and a.recovered == False \\\n and a.infectious == False]).sum()\n return S\n\n\ndef count_E_teacher(model):\n E = np.asarray(\n [a.exposed for a in model.schedule.agents if a.type == 'teacher']).sum()\n return E\n\n\ndef count_I_teacher(model):\n I = np.asarray(\n [a.infectious for a in model.schedule.agents if a.type == 'teacher']).sum()\n return I\n\n\ndef count_I_symptomatic_teacher(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'teacher'and a.symptomatic_course)]).sum()\n return I\n\ndef count_V_teacher(model):\n V = np.asarray([a.vaccinated for a in model.schedule.agents if\n (a.type == 'teacher')]).sum()\n return V\n\ndef count_I_asymptomatic_teacher(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'teacher'and a.symptomatic_course == False)]).sum()\n return I\n\n\ndef count_R_teacher(model):\n R = np.asarray(\n [a.recovered for a in model.schedule.agents if a.type == 'teacher']).sum()\n return R\n\n\ndef count_X_teacher(model):\n X = np.asarray(\n [a.quarantined for a in model.schedule.agents if a.type == 'teacher']).sum()\n return X\n\n\ndef count_S_family_member(model):\n S = np.asarray([1 for a in model.schedule.agents if a.type == 'family_member' and\\\n a.exposed == False and a.recovered == False \\\n and a.infectious == False]).sum()\n return S\n\n\ndef count_E_family_member(model):\n E = np.asarray(\n [a.exposed for a in model.schedule.agents if a.type == 'family_member']).sum()\n return E\n\n\ndef count_I_family_member(model):\n I = np.asarray(\n [a.infectious for a in model.schedule.agents if a.type == 'family_member']).sum()\n return I\n\n\ndef count_I_symptomatic_family_member(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'family_member'and a.symptomatic_course)]).sum()\n return I\n\ndef count_V_family_member(model):\n V = np.asarray([a.vaccinated for a in model.schedule.agents if\n (a.type == 'family_member')]).sum()\n return V\n\ndef count_I_asymptomatic_family_member(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'family_member'and a.symptomatic_course == False)]).sum()\n return I\n\n\ndef count_R_family_member(model):\n R = np.asarray(\n [a.recovered for a in model.schedule.agents if a.type == 'family_member']).sum()\n return R\n\n\ndef count_X_family_member(model):\n X = np.asarray(\n [a.quarantined for a in model.schedule.agents if a.type == 'family_member']).sum()\n return X\n\n\ndef check_reactive_student_screen(model):\n return model.screened_agents['reactive']['student']\n\n\ndef check_follow_up_student_screen(model):\n return model.screened_agents['follow_up']['student']\n\n\ndef check_preventive_student_screen(model):\n return model.screened_agents['preventive']['student']\n\n\ndef check_reactive_teacher_screen(model):\n return model.screened_agents['reactive']['teacher']\n\n\ndef check_follow_up_teacher_screen(model):\n return model.screened_agents['follow_up']['teacher']\n\n\ndef check_preventive_teacher_screen(model):\n return model.screened_agents['preventive']['teacher']\n\n\ndef check_reactive_family_member_screen(model):\n return model.screened_agents['reactive']['family_member']\n\n\ndef check_follow_up_family_member_screen(model):\n return model.screened_agents['follow_up']['family_member']\n\n\ndef check_preventive_family_member_screen(model):\n return model.screened_agents['preventive']['family_member']\n\n\n\ndata_collection_functions = \\\n {\n 'student':\n {\n 'S':count_S_student,\n 'E':count_E_student,\n 'I':count_I_student,\n 'I_asymptomatic':count_I_asymptomatic_student,\n 'V':count_V_student,\n 'I_symptomatic':count_I_symptomatic_student,\n 'R':count_R_student,\n 'X':count_X_student\n },\n 'teacher':\n {\n 'S':count_S_teacher,\n 'E':count_E_teacher,\n 'I':count_I_teacher,\n 'I_asymptomatic':count_I_asymptomatic_teacher,\n 'V':count_V_teacher,\n 'I_symptomatic':count_I_symptomatic_teacher,\n 'R':count_R_teacher,\n 'X':count_X_teacher\n },\n 'family_member':\n {\n 'S':count_S_family_member,\n 'E':count_E_family_member,\n 'I':count_I_family_member,\n 'I_asymptomatic':count_I_asymptomatic_family_member,\n 'V':count_V_family_member,\n 'I_symptomatic':count_I_symptomatic_family_member,\n 'R':count_R_family_member,\n 'X':count_X_family_member\n }\n }\n\n\n\nclass SEIRX_school(SEIRX):\n '''\n Model specific parameters:\n age_risk_discount: discount factor that lowers the transmission and\n reception risk of agents based on age for children. This is only applied\n to student agents as all other agents are assumed to be adults. This\n parameter needs to be calibrated against data.\n\n See documentation of model_SEIRX for the description of other parameters.\n '''\n\n def __init__(self, G,\n verbosity = 0,\n base_transmission_risk = 0.05,\n testing = 'diagnostic',\n exposure_duration = [5.0, 1.9],\n time_until_symptoms = [6.4, 0.8],\n infection_duration = [10.91, 3.95],\n quarantine_duration = 10,\n subclinical_modifier = 0.6,\n infection_risk_contact_type_weights = {\n 'very_far': 0.1,\n 'far': 0.25,\n 'intermediate': 0.5,\n 'close': 1},\n K1_contact_types = ['close'],\n diagnostic_test_type = 'one_day_PCR',\n preventive_screening_test_type = 'same_day_antigen',\n follow_up_testing_interval = None,\n liberating_testing = False,\n index_case = 'teacher',\n agent_types = {\n 'teacher': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_probability': 0},\n 'student': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_probability': 0},\n 'family_member':{'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_probability': 0}},\n age_transmission_risk_discount = \\\n {'slope':-0.02,\n 'intercept':1},\n age_symptom_modification = \\\n {'slope':-0.02545,\n 'intercept':0.854545},\n mask_filter_efficiency = {'exhale':0, 'inhale':0},\n transmission_risk_ventilation_modifier = 0,\n transmission_risk_vaccination_modifier = {'reception':1, 'transmission':0},\n seed = None):\n\n\n super().__init__(G,\n verbosity = verbosity,\n base_transmission_risk = base_transmission_risk,\n testing = testing,\n exposure_duration = exposure_duration,\n time_until_symptoms = time_until_symptoms,\n infection_duration = infection_duration,\n quarantine_duration = quarantine_duration,\n subclinical_modifier = subclinical_modifier,\n infection_risk_contact_type_weights = \\\n infection_risk_contact_type_weights,\n K1_contact_types = K1_contact_types,\n diagnostic_test_type = diagnostic_test_type,\n preventive_screening_test_type = \\\n preventive_screening_test_type,\n follow_up_testing_interval = follow_up_testing_interval,\n liberating_testing = liberating_testing,\n index_case = index_case,\n agent_types = agent_types,\n age_transmission_risk_discount = \\\n age_transmission_risk_discount,\n age_symptom_modification = age_symptom_modification,\n mask_filter_efficiency = mask_filter_efficiency,\n transmission_risk_ventilation_modifier = \\\n transmission_risk_ventilation_modifier,\n transmission_risk_vaccination_modifier = \\\n transmission_risk_vaccination_modifier,\n seed = seed)\n\n # type of the model for some type-specific functionality\n self.model = 'school'\n\n # agent types that are included in preventive, background & follow-up\n # screens\n self.screening_agents = ['teacher', 'student']\n\n # define, whether or not a multigraph that defines separate connections\n # for every day of the week is used\n self.dynamic_connections = True\n self.MG = G\n self.weekday_connections = {}\n all_edges = self.MG.edges(keys=True, data='weekday')\n N_weekdays = 7\n for i in range(1, N_weekdays + 1):\n wd_edges = [(u, v, k) for (u, v, k, wd) in all_edges if wd == i]\n self.weekday_connections[i] = G.edge_subgraph(wd_edges).copy()\n\n\n # data collectors to save population counts and agent states every\n # time step\n model_reporters = {}\n for agent_type in self.agent_types:\n\n for state in ['S','E','I','I_asymptomatic','I_symptomatic','R','X', 'V']:\n\n model_reporters.update({'{}_{}'.format(state, agent_type):\\\n data_collection_functions[agent_type][state]})\n\n model_reporters.update(\\\n {\n 'screen_students_reactive':check_reactive_student_screen,\n 'screen_students_follow_up':check_follow_up_student_screen,\n 'screen_students_preventive':check_preventive_student_screen,\n 'screen_teachers_reactive':check_reactive_teacher_screen,\n 'screen_teachers_follow_up':check_follow_up_teacher_screen,\n 'screen_teachers_preventive':check_preventive_teacher_screen,\n 'screen_family_members_reactive':check_reactive_family_member_screen,\n 'screen_family_members_follow_up':check_follow_up_family_member_screen,\n 'screen_family_members_preventive':check_preventive_family_member_screen,\n 'N_diagnostic_tests':get_N_diagnostic_tests,\n 'N_preventive_screening_tests':get_N_preventive_screening_tests,\n 'diagnostic_test_detected_infections_student':\\\n get_diagnostic_test_detected_infections_student,\n 'diagnostic_test_detected_infections_teacher':\\\n get_diagnostic_test_detected_infections_teacher,\n 'diagnostic_test_detected_infections_family_member':\\\n get_diagnostic_test_detected_infections_family_member,\n 'preventive_test_detected_infections_student':\\\n get_preventive_test_detected_infections_student,\n 'preventive_test_detected_infections_teacher':\\\n get_preventive_test_detected_infections_teacher,\n 'preventive_test_detected_infections_family_member':\\\n get_preventive_test_detected_infections_family_member,\n 'undetected_infections':get_undetected_infections,\n 'predetected_infections':get_predetected_infections,\n 'pending_test_infections':get_pending_test_infections\n })\n\n agent_reporters =\\\n {\n 'infection_state':get_infection_state,\n 'quarantine_state':get_quarantine_state\n }\n\n self.datacollector = DataCollector(\n model_reporters = model_reporters,\n agent_reporters = agent_reporters)\n\n def calculate_transmission_probability(self, source, target, base_risk):\n \"\"\"\n Calculates the risk of transmitting an infection between a source agent\n and a target agent given the model's and agent's properties and the base\n transmission risk.\n\n Transmission is an independent Bernoulli trial with a probability of\n success p. The probability of transmission without any modifications\n by for example masks or ventilation is given by the base_risk, which\n is calibrated in the model. The probability is modified by contact type\n q1 (also calibrated in the model), age of the transmitting agent q2\n & age of the receiving agent q3 (both age dependencies are linear in\n age and the same, and they are calibrated), infection progression q4\n (from literature), reduction of viral load due to a sublclinical course\n of the disease q5 (from literature), reduction of exhaled viral load of\n the source by mask wearing q6 (from literature), reduction of inhaled\n viral load by the target q7 (from literature), and ventilation of the\n rooms q8 (from literature).\n\n Parameters\n ----------\n source : agent_SEIRX\n Source agent that transmits the infection to the target.\n target: agent_SEIRX\n Target agent that (potentially) receives the infection from the\n source.\n base_risk : float\n Probability p of infection transmission without any modifications\n through prevention measures.\n\n Returns\n -------\n p : float\n Modified transmission risk.\n \"\"\"\n n1 = source.ID\n n2 = target.ID\n tmp = [n1, n2]\n tmp.sort()\n n1, n2 = tmp\n key = n1 + n2 + 'd{}'.format(self.weekday)\n link_type = self.G.get_edge_data(n1, n2, key)['link_type']\n\n q1 = self.get_transmission_risk_contact_type_modifier(source, target)\n q2 = self.get_transmission_risk_age_modifier_transmission(source)\n q3 = self.get_transmission_risk_age_modifier_reception(target)\n q4 = self.get_transmission_risk_progression_modifier(source)\n q5 = self.get_transmission_risk_subclinical_modifier(source)\n q9 = self.get_transmission_risk_vaccination_modifier_reception(target)\n q10 = self.get_transmission_risk_vaccination_modifier_transmission(source)\n\n # contact types where masks and ventilation are irrelevant\n if link_type in ['student_household', 'teacher_household', \n 'student_student_friends']:\n p = 1 - (1 - base_risk * (1 - q1) * (1 - q2) * (1 - q3) * \\\n (1 - q4) * (1 - q5) * (1 - q9) * (1 - q10))\n\n # contact types were masks and ventilation are relevant\n elif link_type in ['student_student_intra_class',\n 'student_student_table_neighbour',\n 'student_student_daycare',\n 'teacher_teacher_short',\n 'teacher_teacher_long',\n 'teacher_teacher_team_teaching',\n 'teacher_teacher_daycare_supervision',\n 'teaching_teacher_student',\n 'daycare_supervision_teacher_student']:\n q6 = self.get_transmission_risk_exhale_modifier(source)\n q7 = self.get_transmission_risk_inhale_modifier(target)\n q8 = self.get_transmission_risk_ventilation_modifier()\n\n p = 1 - (1 - base_risk * (1 - q1) * (1 - q2) * (1 - q3) * \\\n (1 - q4) * (1 - q5) * (1 - q6) * (1 - q7) * (1 - q8) * \\\n (1 - q9) * (1 - q10))\n\n else:\n print('unknown link type: {}'.format(link_type))\n p = None\n return p\n"
] | [
[
"numpy.random.weibull",
"numpy.abs",
"numpy.random.seed",
"numpy.random.choice",
"numpy.asarray",
"scipy.optimize.root_scalar"
],
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sankar-mukherjee/DCASE-2018---Task-4- | [
"f8034641efef6e60ea721abc5569d9c1aa8ee56d"
] | [
"task4_crnn.py"
] | [
"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n#########################################################################\n# This code is an adaptation from Toni Heittola's code [task1 baseline dcase 2018](https://github.com/DCASE-REPO/dcase2018_baseline/tree/master/task1/)\n# Copyright Nicolas Turpault, Romain Serizel, Hamid Eghbal-zadeh, Ankit Parag Shah, 2018, v1.0\n# This software is distributed under the terms of the License MIT\n#########################################################################\nimport dcase_util\nimport sys\nimport numpy\nimport os\nimport random\nimport pickle\n\nimport tensorflow as tf\nfrom keras import backend as K\nimport keras\n\n#from evaluation_measures import get_f_measure_by_class, event_based_evaluation, segment_based_evaluation\nfrom evaluation_measures import get_f_measure_by_class, event_based_evaluation\nfrom Dataset_dcase2018 import DCASE2018_Task4_DevelopmentSet\n\ndcase_util.utils.setup_logging(logging_file='task4.log')\nprint(keras.__version__)\n\nrandom.seed(10)\nnumpy.random.seed(42)\n\ntf.set_random_seed(1234)\nsess = tf.Session(graph=tf.get_default_graph())\nK.set_session(sess)\n\n\ndef main(parameters):\n log = dcase_util.ui.ui.FancyLogger()\n log.title('DCASE2018 / Task4')\n\n overwirte_preprocessing = False\n overwrite_learning = False\n overwrite_testing = True\n\n # =====================================================================\n # Parameters\n # =====================================================================\n # Process parameters\n param = dcase_util.containers.DCASEAppParameterContainer(\n parameters,\n path_structure={\n 'FEATURE_EXTRACTOR': [\n 'DATASET',\n 'FEATURE_EXTRACTOR'\n ],\n 'FEATURE_NORMALIZER': [\n 'DATASET',\n 'FEATURE_EXTRACTOR'\n ],\n 'LEARNER': [\n 'DATASET',\n 'FEATURE_EXTRACTOR',\n 'FEATURE_NORMALIZER',\n 'FEATURE_SEQUENCER',\n 'LEARNER'\n ],\n 'RECOGNIZER': [\n 'DATASET',\n 'FEATURE_EXTRACTOR',\n 'FEATURE_NORMALIZER',\n 'FEATURE_SEQUENCER',\n 'LEARNER',\n 'RECOGNIZER'\n ],\n }\n ).process()\n\n # Make sure all system paths exists\n dcase_util.utils.Path().create(\n paths=list(param['path'].values())\n )\n\n # Initialize\n keras_model_first_pass = None\n keras_model_second_pass = None\n\n # =====================================================================\n # Dataset\n # =====================================================================\n # Get dataset and initialize it\n\n db = DCASE2018_Task4_DevelopmentSet(included_content_types=['all'],\n local_path=\"\",\n data_path=param.get_path('path.dataset'),\n audio_paths=[\n os.path.join(\"dataset\", \"audio\", \"train\", \"weak\"),\n os.path.join(\"dataset\", \"audio\", \"train\", \"unlabel_in_domain\"),\n os.path.join(\"dataset\", \"audio\", \"train\", \"unlabel_out_of_domain\"),\n os.path.join(\"dataset\", \"audio\", \"test\")\n ]\n ).initialize()\n\n # Active folds\n folds = db.folds(\n mode=param.get_path('dataset.parameters.evaluation_mode')\n )\n\n active_fold_list = param.get_path('dataset.parameters.fold_list')\n if active_fold_list:\n folds = list(set(folds).intersection(active_fold_list))\n\n # =====================================================================\n # Feature extraction stage\n # =====================================================================\n if param.get_path('flow.feature_extraction'):\n log.section_header('Feature Extraction / Train material')\n\n # Prepare feature extractor\n mel_extractor = dcase_util.features.MelExtractor(\n **param.get_path('feature_extractor.parameters.mel')\n )\n\n # Loop over all audio files in the dataset and extract features for them.\n # for audio_filename in db.audio_files:\n for audio_filename in db.audio_files:\n # Get filename for feature data from audio filename\n feature_filename = dcase_util.utils.Path(\n path=audio_filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle'\n )\n\n if not os.path.isfile(feature_filename) or overwirte_preprocessing:\n log.line(\n data=os.path.split(audio_filename)[1],\n indent=2\n )\n\n # Load audio data\n audio = dcase_util.containers.AudioContainer().load(\n filename=audio_filename,\n mono=True,\n fs=param.get_path('feature_extractor.fs')\n )\n\n # Extract features and store them into FeatureContainer, and save it to the disk\n dcase_util.containers.FeatureContainer(\n data=mel_extractor.extract(audio.data),\n time_resolution=param.get_path('feature_extractor.hop_length_seconds')\n ).save(\n filename=feature_filename\n )\n\n log.foot()\n\n # =====================================================================\n # Feature normalization stage\n # =====================================================================\n\n if param.get_path('flow.feature_normalization'):\n log.section_header('Feature Normalization')\n\n # Get filename for the normalization factors\n features_norm_filename = os.path.join(\n param.get_path('path.application.feature_normalizer'),\n 'normalize_values.cpickle'\n )\n\n if not os.path.isfile(features_norm_filename) or overwirte_preprocessing:\n normalizer = dcase_util.data.Normalizer(\n filename=features_norm_filename\n )\n\n # Loop through all training data, two train folds\n for fold in folds:\n for filename in db.train(fold=fold).unique_files:\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle',\n )\n\n # Load feature matrix\n features = dcase_util.containers.FeatureContainer().load(\n filename=feature_filename\n )\n\n # Accumulate statistics\n normalizer.accumulate(\n data=features.data\n )\n\n # Finalize and save\n normalizer.finalize().save()\n\n log.foot()\n\n # Create processing chain for features\n feature_processing_chain = dcase_util.processors.ProcessingChain()\n for chain in param.get_path('feature_processing_chain'):\n processor_name = chain.get('processor_name')\n init_parameters = chain.get('init_parameters', {})\n\n # Inject parameters\n if processor_name == 'dcase_util.processors.NormalizationProcessor':\n init_parameters['filename'] = features_norm_filename\n\n if init_parameters.get('enable') is None or init_parameters.get('enable') is True:\n feature_processing_chain.push_processor(\n processor_name=processor_name,\n init_parameters=init_parameters,\n )\n\n # =====================================================================\n # Learning stage\n # =====================================================================\n if param.get_path('flow.learning'):\n log.section_header('Learning')\n\n # setup keras parameters\n dcase_util.keras.setup_keras(\n seed=param.get_path('learner.parameters.random_seed'),\n profile=param.get_path('learner.parameters.keras_profile'),\n backend=param.get_path('learner.parameters.backend'),\n device=param.get_path('learner.parameters.device'),\n verbose=False\n )\n\n # encoder used to convert text labels into vector\n many_hot_encoder = dcase_util.data.ManyHotEncoder(\n label_list=db.tags(),\n time_resolution=1\n )\n\n # =====================================================================\n # Training first pass\n # =====================================================================\n\n fold = 1\n # Get model filename\n fold1_model_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'model_fold_{fold}.h5'.format(fold=fold)\n )\n\n if not os.path.isfile(fold1_model_filename) or overwrite_learning:\n # Split the dataset into training and validation files\n training_files, validation_files = db.validation_split(\n fold=fold,\n split_type='random',\n validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),\n verbose=True\n )\n\n batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')\n shuffle = param.get_path('learner.parameters.model.first_pass.fit.shuffle')\n\n # Get items (with labels) associated with training files\n training_items = db.train(fold=fold).filter(file_list=training_files)\n\n # Create the generator, which convert filename and item into arrays batch_X, batch_y in right formats\n training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=shuffle)\n\n validation_items = db.train(fold=fold).filter(file_list=validation_files)\n validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=False)\n\n # Update constants with useful information to setup the model\n model_parameter_constants = {\n 'NB_CLASSES': db.tag_count(),\n 'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),\n 'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),\n }\n model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))\n\n # Load the sequential keras model defined in the YAML.\n keras_model_first_pass = dcase_util.keras.create_sequential_model(\n model_parameter_list=param.get_path('learner.parameters.model.first_pass.config'),\n constants=model_parameter_constants\n )\n\n # Print the model configuration\n keras_model_first_pass.summary(print_fn=log.line)\n\n # Create optimizer object from info given in YAML\n param.set_path(\n path='learner.parameters.compile.optimizer',\n new_value=dcase_util.keras.create_optimizer(\n class_name=param.get_path('learner.parameters.optimizer.class_name'),\n config=param.get_path('learner.parameters.optimizer.config')\n )\n )\n # Compile model\n keras_model_first_pass.compile(\n **param.get_path('learner.parameters.compile')\n )\n\n epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')\n\n # Setup callbacks used during training\n callback_list = [\n dcase_util.keras.ProgressLoggerCallback(\n epochs=epochs,\n metric=param.get_path('learner.parameters.compile.metrics')[0],\n loss=param.get_path('learner.parameters.compile.loss'),\n output_type='logging',\n **param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')\n )\n ]\n if param.get_path('learner.parameters.callbacks.StopperCallback'):\n callback_list.append(\n dcase_util.keras.StopperCallback(\n epochs=epochs,\n **param.get_path('learner.parameters.callbacks.StopperCallback')\n )\n )\n\n if param.get_path('learner.parameters.callbacks.StasherCallback'):\n callback_list.append(\n dcase_util.keras.StasherCallback(\n epochs=epochs,\n **param.get_path('learner.parameters.callbacks.StasherCallback')\n )\n )\n\n processing_interval = param.get_path(\n 'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'\n )\n epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')\n\n # Iterate through epoch to be able to manually update callbacks\n for epoch_start in range(0, epochs, processing_interval):\n epoch_end = epoch_start + processing_interval\n\n # Make sure we have only specified amount of epochs\n if epoch_end > epochs:\n epoch_end = epochs\n\n # Train keras_model_first_pass\n keras_model_first_pass.fit_generator(\n generator=training_generator,\n steps_per_epoch=len(training_files) // batch_size,\n validation_data=validation_generator,\n validation_steps=len(validation_files) // batch_size,\n callbacks=callback_list,\n verbose=0,\n initial_epoch=epoch_start,\n epochs=epoch_end\n )\n\n # Get f_measures of the current epoch\n val_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,\n len(validation_files) // batch_size)\n val_macro_f_measure = val_macro_f_measure.mean()\n\n tra_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), training_generator,\n len(training_files) // batch_size,\n )\n tra_macro_f_measure = tra_macro_f_measure.mean()\n\n # Inject external metric values to the callbacks\n for callback in callback_list:\n if hasattr(callback, 'set_external_metric_value'):\n callback.set_external_metric_value(\n metric_label='val_macro_f_measure',\n metric_value=val_macro_f_measure\n )\n callback.set_external_metric_value(\n metric_label='tra_macro_f_measure',\n metric_value=tra_macro_f_measure\n )\n\n # Manually update callbacks\n for callback in callback_list:\n if hasattr(callback, 'update'):\n callback.update()\n\n # Check we need to stop training\n stop_training = False\n for callback in callback_list:\n if hasattr(callback, 'stop'):\n if callback.stop():\n log.line(\"Early stropping\")\n stop_training = True\n\n if stop_training:\n # Stop the training loop\n break\n\n # Fetch best model\n for callback in callback_list:\n if isinstance(callback, dcase_util.keras.StasherCallback):\n callback.log()\n best_weights = callback.get_best()['weights']\n if best_weights:\n keras_model_first_pass.set_weights(best_weights)\n break\n\n # Save trained model\n keras_model_first_pass.save(fold1_model_filename)\n\n log.foot()\n\n # =======\n # Calculate best thresholds\n # =======\n thresholds_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'thresholds_{fold}.p'.format(fold=fold)\n )\n\n if not os.path.isfile(thresholds_filename) or overwrite_learning:\n training_files, validation_files = db.validation_split(\n fold=fold,\n split_type='random',\n validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),\n verbose=True\n )\n batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')\n validation_items = db.train(fold=fold).filter(file_list=validation_files)\n validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=False)\n\n # Load model if not trained during this run\n if not keras_model_first_pass:\n keras_model_first_pass = keras.models.load_model(fold1_model_filename)\n\n thresholds = [0] * db.tag_count()\n max_f_measure = [-numpy.inf] * db.tag_count()\n for threshold in numpy.arange(0., 1 + 1e-6, 0.1):\n # Assign current threshold to each class\n current_thresholds = [threshold] * db.tag_count()\n\n # Calculate f_measures with the current thresholds\n macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,\n len(validation_files) // batch_size,\n current_thresholds)\n\n # Update thresholds for class with better f_measures\n for i, label in enumerate(db.tags()):\n f_measure = macro_f_measure[i]\n if f_measure > max_f_measure[i]:\n max_f_measure[i] = f_measure\n thresholds[i] = threshold\n\n for i, label in enumerate(db.tags()):\n log.line(\"{:30}, threshold: {}\".format(label, thresholds[i]))\n\n thresholds_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'thresholds.p'.format(fold=fold)\n )\n pickle.dump(thresholds, open(thresholds_filename, \"wb\"))\n\n else:\n thresholds = pickle.load(open(thresholds_filename, \"rb\"))\n\n # =====================================================================\n # Predict stage from weak to predict unlabel_in_domain tags\n # =====================================================================\n\n log.section_header('Predict 1st pass, add labels to unlabel_in_domain data')\n\n # Get results filename\n fold_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'pred_weak_fold_{fold}.txt'.format(fold=fold)\n )\n\n if not os.path.isfile(fold_results_filename) or overwrite_testing:\n # Initialize results container\n res = dcase_util.containers.MetaDataContainer(\n filename=fold_results_filename\n )\n\n # Load model if not yet loaded\n if not keras_model_first_pass:\n keras_model_first_pass = keras.models.load_model(fold1_model_filename)\n\n # Loop through all test files from the current cross-validation fold\n for item in db.test(fold=fold):\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=item.filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle'\n )\n\n features = feature_processing_chain.process(\n filename=feature_filename\n )\n\n input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)\n input_data = input_data.reshape((1,)+input_data.shape) # (1, 500, 64)\n\n # Get network output\n probabilities = keras_model_first_pass.predict(x=input_data)\n\n # Binarization of the network output\n frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(\n probabilities=probabilities,\n binarization_type='class_threshold',\n threshold=thresholds,\n time_axis=0\n )\n\n estimated_tags = dcase_util.data.DecisionEncoder(\n label_list=db.tags()\n ).many_hot(\n frame_decisions=frame_decisions,\n time_axis=0\n )\n\n # Store result into results container\n res.append(\n {\n 'filename': item.filename,\n 'tags': estimated_tags[0]\n }\n )\n\n # Save results container\n res.save()\n\n log.foot()\n\n # =====================================================================\n # Learning stage 2nd pass, learn from weak and unlabel_in_domain annotated data\n # =====================================================================\n\n fold = 2\n\n log.line(data='Fold [{fold}]'.format(fold=fold), indent=2)\n\n # Get model filename\n fold2_model_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'model_fold_{fold}.h5'.format(fold=fold)\n )\n\n if not os.path.isfile(fold2_model_filename) or overwrite_learning:\n\n model_parameter_constants = {\n 'NB_CLASSES': db.tag_count(),\n 'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),\n 'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),\n }\n model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))\n\n keras_model_second_pass = dcase_util.keras.create_sequential_model(\n model_parameter_list=param.get_path('learner.parameters.model.second_pass.config'),\n constants=model_parameter_constants\n )\n\n keras_model_second_pass.summary(print_fn=log.line)\n\n # Create optimizer object\n param.set_path(\n path='learner.parameters.compile.optimizer',\n new_value=dcase_util.keras.create_optimizer(\n class_name=param.get_path('learner.parameters.optimizer.class_name'),\n config=param.get_path('learner.parameters.optimizer.config')\n )\n )\n # Compile model\n keras_model_second_pass.compile(\n **param.get_path('learner.parameters.compile')\n )\n\n # Get annotations from the 1st pass model\n fold1_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'pred_weak_fold_{fold}.txt'.format(fold=1)\n )\n # Load annotations\n predictions_first_pass = dcase_util.containers.MetaDataContainer(\n filename=fold1_results_filename\n ).load()\n\n # Split the dataset into train and validation. If \"weak\" is provided, files from weak.csv are used to\n # validate the model. Else, give a percentage which will be used\n if param.get_path('learner.parameters.model.second_pass.validation_amount') == \"weak\":\n training_files = predictions_first_pass.unique_files\n training_items = predictions_first_pass\n validation_files = db.train(fold=1).unique_files\n validation_items = db.train(fold=1)\n else:\n # Get validation files\n training_files, validation_files = db.validation_split(\n fold=fold,\n split_type='random',\n validation_amount=param.get_path('learner.parameters.model.second_pass.validation_amount'),\n verbose=False\n )\n training_fold2 = predictions_first_pass + db.train(fold=1)\n\n training_items = training_fold2.filter(file_list=training_files)\n validation_items = training_fold2.filter(file_list=validation_files)\n\n processing_interval = param.get_path(\n 'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'\n )\n epochs = param.get_path('learner.parameters.model.second_pass.fit.epochs')\n\n batch_size = param.get_path('learner.parameters.model.second_pass.fit.batch_size')\n shuffle = param.get_path('learner.parameters.model.second_pass.fit.shuffle')\n\n # Create generators, which convert filename and item into arrays batch_X, batch_y in right formats\n training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=shuffle, mode=\"strong\")\n\n validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder,\n feature_processing_chain,\n batch_size=batch_size, shuffle=False, mode=\"strong\")\n\n # Initialize callbacks used during training\n callback_list = [\n dcase_util.keras.ProgressLoggerCallback(\n epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),\n metric=param.get_path('learner.parameters.compile.metrics')[0],\n loss=param.get_path('learner.parameters.compile.loss'),\n output_type='logging',\n **param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')\n )\n ]\n if param.get_path('learner.parameters.callbacks.StopperCallback'):\n callback_list.append(\n dcase_util.keras.StopperCallback(\n epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),\n **param.get_path('learner.parameters.callbacks.StopperCallback')\n )\n )\n\n if param.get_path('learner.parameters.callbacks.StasherCallback'):\n callback_list.append(\n dcase_util.keras.StasherCallback(\n epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),\n **param.get_path('learner.parameters.callbacks.StasherCallback')\n )\n )\n\n for epoch_start in range(0, epochs, processing_interval):\n epoch_end = epoch_start + processing_interval\n\n # Make sure we have only specified amount of epochs\n if epoch_end > epochs:\n epoch_end = epochs\n\n # Train keras_model_second_pass\n keras_model_second_pass.fit_generator(\n generator=training_generator,\n steps_per_epoch=len(training_files) // batch_size,\n validation_data=validation_generator,\n validation_steps=len(validation_files) // batch_size,\n callbacks=callback_list,\n verbose=0,\n initial_epoch=epoch_start,\n epochs=epoch_end\n )\n\n # Calculate external metrics, f_measure of the current epoch\n val_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), validation_generator,\n len(validation_files) // batch_size, )\n val_macro_f_measure = val_macro_f_measure.mean()\n\n tra_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), training_generator,\n len(training_files) // batch_size,\n )\n tra_macro_f_measure = tra_macro_f_measure.mean()\n\n # Inject external metric values to the callbacks\n for callback in callback_list:\n if hasattr(callback, 'set_external_metric_value'):\n callback.set_external_metric_value(\n metric_label='val_macro_f_measure',\n metric_value=val_macro_f_measure\n )\n callback.set_external_metric_value(\n metric_label='tra_macro_f_measure',\n metric_value=tra_macro_f_measure\n )\n\n # Manually update callbacks\n for callback in callback_list:\n if hasattr(callback, 'update'):\n callback.update()\n\n # Check we need to stop training\n stop_training = False\n for callback in callback_list:\n if hasattr(callback, 'stop'):\n if callback.stop():\n log.line(\"Early stropping\")\n stop_training = True\n\n if stop_training:\n # Stop the training loop\n break\n\n # Fetch best model\n for callback in callback_list:\n if isinstance(callback, dcase_util.keras.StasherCallback):\n callback.log()\n best_weights = callback.get_best()['weights']\n if best_weights:\n keras_model_second_pass.set_weights(best_weights)\n break\n\n # Save trained model\n keras_model_second_pass.save(fold2_model_filename)\n\n log.foot()\n\n # =====================================================================\n # Testing stage, get strong annotations\n # =====================================================================\n\n if param.get_path('flow.testing'):\n log.section_header('Testing')\n\n # Get results filename\n fold_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'res_fold_{fold}.txt'.format(fold=2)\n )\n\n # Get model filename\n fold2_model_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'model_fold_{fold}.h5'.format(fold=2)\n )\n\n if not os.path.isfile(fold_results_filename) or overwrite_testing:\n # Load model if not yet loaded\n if not keras_model_second_pass:\n keras_model_second_pass = keras.models.load_model(fold2_model_filename)\n\n # Initialize results container\n res = dcase_util.containers.MetaDataContainer(\n filename=fold_results_filename\n )\n\n # Loop through all test files from the current cross-validation fold\n for item in db.test(fold=2):\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=item.filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle'\n )\n\n # Get features array\n features = feature_processing_chain.process(\n filename=feature_filename\n )\n\n input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)\n # Create a batch with only one file\n input_data = input_data.reshape((1,) + input_data.shape) # (1, 500, 64)\n\n # Get network output for strong data\n probabilities = keras_model_second_pass.predict(input_data)\n\n # only one file in the batch\n probabilities = probabilities[0]\n\n if param.get_path('recognizer.frame_binarization.enable'):\n # Binarization of the network output\n frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(\n probabilities=probabilities,\n binarization_type=param.get_path('recognizer.frame_binarization.binarization_type'),\n threshold=param.get_path('recognizer.frame_binarization.threshold'),\n time_axis=0\n )\n else:\n frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(\n probabilities=probabilities,\n binarization_type=\"global_threshold\",\n threshold=0.5,\n time_axis=0\n )\n\n decision_encoder = dcase_util.data.DecisionEncoder(\n label_list=db.tags()\n )\n\n if param.get_path('recognizer.process_activity.enable'):\n frame_decisions = decision_encoder.process_activity(\n frame_decisions,\n window_length=param.get_path('recognizer.process_activity.window_length'),\n time_axis=0)\n\n for i, label in enumerate(db.tags()):\n\n # given a list of ones, give the onset and offset in frames\n estimated_events = decision_encoder.find_contiguous_regions(\n activity_array=frame_decisions[:, i]\n )\n\n for [onset, offset] in estimated_events:\n hop_length_seconds = param.get_path('feature_extractor.hop_length_seconds')\n # Store result into results container, convert frames to seconds\n res.append(\n {\n 'filename': item.filename,\n 'event_label': label,\n 'onset': onset * hop_length_seconds,\n 'offset': offset * hop_length_seconds\n }\n )\n\n # Save results container\n res.save()\n log.foot()\n\n # =====================================================================\n # Evaluation stage, get results\n # =====================================================================\n\n if param.get_path('flow.evaluation'):\n log.section_header('Evaluation')\n\n stats_filename = os.path.join(param.get_path('path.application.recognizer'), 'evaluation.txt')\n\n if not os.path.isfile(stats_filename) or overwrite_testing:\n fold_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'res_fold_{fold}.txt'.format(fold=fold)\n )\n\n # test data used to evaluate the system\n reference_event_list = db.eval(fold=fold)\n\n # predictions done during the step test before\n estimated_event_list = dcase_util.containers.MetaDataContainer().load(\n filename=fold_results_filename\n )\n\n # Calculate the metric\n event_based_metric = event_based_evaluation(reference_event_list, estimated_event_list)\n\n with open(stats_filename, \"w\") as stats_file:\n stats_file.write(event_based_metric.__str__())\n\n log.line(event_based_metric.__str__(), indent=4)\n\n log.foot()\n\n\ndef data_generator(items, feature_path, many_hot_encoder, feature_processing_chain, batch_size=1, shuffle=True, mode='weak'):\n \"\"\" Transform MetaDataContainer into batches of data\n\n Parameters\n ----------\n\n items : MetaDataContainer, items to be generated\n\n feature_path : String, base path where features are stored\n\n many_hot_encoder : ManyHotEncoder, class to encode data\n\n feature_processing_chain : ProcessingChain, chain to process data\n\n batch_size : int, size of the batch to be returned\n\n shuffle : bool, shuffle the items before creating the batch\n\n mode : \"weak\" or \"strong\", indicate to return labels as tags (1/file) or event_labels (1/frame)\n\n Return\n ------\n\n (batch_X, batch_y): generator, arrays containing batches of data.\n\n \"\"\"\n while True:\n batch_X = []\n batch_y = []\n if shuffle:\n random.shuffle(items)\n for item in items:\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=item.filename\n ).modify(\n path_base=feature_path,\n filename_extension='.cpickle',\n )\n\n features = feature_processing_chain.process(\n filename=feature_filename\n )\n input_data = features.data.reshape(features.shape[:-1]).T\n\n # Target\n targets = item.tags\n targets = many_hot_encoder.encode(targets, length_frames=1).data.flatten()\n if mode == \"strong\":\n targets = numpy.repeat(targets.reshape((1,) + targets.shape), input_data.shape[0], axis=0)\n\n if batch_size == 1:\n batch_X = input_data.reshape((1,) + input_data.shape)\n batch_y = targets.reshape((1,) + targets.shape)\n else:\n batch_X.append(input_data)\n batch_y.append(targets)\n if len(batch_X) == batch_size and len(batch_y) == batch_size:\n yield numpy.array(batch_X), numpy.array(batch_y)\n\n batch_X = []\n batch_y = []\n\n\n\nif __name__ == \"__main__\":\n # Read parameters file\n parameters = dcase_util.containers.DictContainer().load(\n filename='task4_crnn.yaml'\n )\n\n try:\n sys.exit(main(parameters))\n except (ValueError, IOError) as e:\n sys.exit(e)\n"
] | [
[
"numpy.random.seed",
"numpy.arange",
"tensorflow.set_random_seed",
"tensorflow.get_default_graph",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
rockscie/async_blp | [
"acb8777ccf2499681bde87d76ca780b61219699c"
] | [
"tests/test_instruments_request.py"
] | [
"import pandas as pd\nimport pytest\n\nfrom async_blp.instruments_requests import InstrumentRequestBase\n\n\[email protected]\nclass TestInstrumentRequestBase:\n\n def test__weight(self):\n request = InstrumentRequestBase('query', max_results=5)\n request.response_fields = ['field_1', 'field_2']\n\n assert request.weight == 10\n\n async def test__process(self, security_lookup_msg):\n request = InstrumentRequestBase('query', max_results=5)\n request.response_fields = ['security', 'description']\n\n request.send_queue_message(security_lookup_msg)\n request.send_queue_message(None)\n\n data, _ = await request.process()\n\n expected_data = pd.DataFrame([['F US Equity', 'Ford Motors Co']],\n columns=['security', 'description'])\n\n pd.testing.assert_frame_equal(expected_data, data)\n"
] | [
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
olegs22/Quickquasar_QA | [
"df74994780216846501710b79b4dce7d025809c9"
] | [
"run_quickquasars.py"
] | [
"import numpy as np\nimport os\nimport shutil\nimport glob as glob\n\ndef get_slurm_script(script_name,command,outdir,idir,mail,log,part,nodes,threads,time,job_name):\n if os.path.isdir(outdir+'/run') == False:\n os.mkdir(outdir+'/run')\n file_name = outdir + '/run/' + script_name\n f = open(file_name,'w')\n slurm_dict = dict()\n slurm_dict['line_0'] = '#SBATCH -C haswell\\n'\n slurm_dict['line_1'] = '#SBATCH --partition='+part+'\\n'\n slurm_dict['line_2'] = '#SBATCH --account=desi\\n'\n slurm_dict['line_3'] = '#SBATCH --nodes='+str(nodes)+'\\n'\n slurm_dict['line_4'] = '#SBATCH --time='+time+'\\n'\n slurm_dict['line_5'] = '#SBATCH --job-name='+job_name+'\\n'\n slurm_dict['line_6'] = '#SBATCH --output='+log+'\\n'\n slurm_dict['line_7'] = '#SBATCH --mail-user='+mail+'\\n'\n slurm_dict['line_8'] = 'idir='+idir+'\\n'\n slurm_dict['line_9'] = 'outdir='+outdir+'\\n'\n slurm_dict['line_10'] = 'nodes='+str(nodes)+'\\n' # CHECK MATCHING #SBATCH --nodes ABOVE !!!!\n slurm_dict['line_11'] = 'nthreads='+str(threads)+'\\n' # TO BE TUNED ; CAN HIT NODE MEMORY LIMIT ; 4 is max on edison for nside=16 and ~50 QSOs/deg2\n slurm_dict['line_12'] = 'echo \"get list of skewers to run ...\"\\n'\n slurm_dict['line_13'] = 'files=`\\ls -1 $idir/*/*/transmission*.fits*`\\n'\n slurm_dict['line_14'] = 'nfiles=`echo $files | wc -w`\\n'\n slurm_dict['line_15'] = 'nfilespernode=$((nfiles/nodes+1))\\n'\n slurm_dict['line_16'] = 'echo \"n files =\" $nfiles\\n'\n slurm_dict['line_17'] = 'echo \"n files per node =\" $nfilespernode\\n'\n slurm_dict['line_18'] = 'first=1\\n'\n slurm_dict['line_19'] = 'last=$nfilespernode\\n'\n slurm_dict['line_20'] = 'for node in `seq $nodes` ; do\\n'\n slurm_dict['line_21'] = ' echo \"starting node $node\"\\n'\n slurm_dict['line_22'] = ' # list of files to run\\n'\n slurm_dict['line_23'] = ' if (( $node == $nodes )) ; then\\n'\n slurm_dict['line_24'] = ' last=\"\"\\n'\n slurm_dict['line_25'] = ' fi\\n'\n slurm_dict['line_26'] = ' echo ${first}-${last}\\n'\n slurm_dict['line_27'] = ' tfiles=`echo $files | cut -d \" \" -f ${first}-${last}`\\n'\n slurm_dict['line_28'] = ' first=$(( first + nfilespernode ))\\n'\n slurm_dict['line_29'] = ' last=$(( last + nfilespernode ))\\n'\n \n set_up = \" srun -N 1 -n 1 -c $nthreads quickquasars -i $tfiles --nproc $nthreads --outdir $outdir/spectra-16 \"\n slurm_dict['line_30'] = set_up + command +'\\n'\n slurm_dict['line_31'] = ' done\\n'\n slurm_dict['line_32'] = 'wait\\n'\n slurm_dict['line_33'] = 'echo \"END\"\\n'\n for i in range(len(slurm_dict)):\n f.write(slurm_dict['line_' + str(i)])\n return None\n\nif __name__ == \"__main__\":\n import argparse\n from pathlib import Path\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--outdir',type=str,help='output directory of the quickquasar run')\n parser.add_argument('--idir',type=str,help='directory from where to fetch the input data')\n parser.add_argument('--mail',type=str,default=' ',help='email to sent status of the job')\n parser.add_argument('--log',type=str,default =' ',help='directory to output the log of the job run')\n parser.add_argument('--qos',type=str,default='regular',help='which queue')\n parser.add_argument('--nodes',type=int,default=40,help='number numbers to use')\n parser.add_argument('--threads',type=int,default=4,help='number of thread to use per node')\n parser.add_argument('--time',default='00:30:00',type=str)\n parser.add_argument('--name',type=str,default='lyasim',help='name of the job')\n parser.add_argument('--seed-generator',type=int,default=15430289,help='seed to run quickquasar')\n parser.add_argument('--nruns',type=int,default=1,help='number of quickquasar runs with the same arguments')\n args = parser.parse_args()\n\n outfile = open('submit.sh','w+')\n np.random.seed(args.seed_generator)\n for k in range(args.nruns):\n #make the output dirs\n output_dirs = args.outdir + '_'+str(k)\n if os.path.isdir(output_dirs) == False:\n os.mkdir(output_dirs)\n if os.path.isdir(output_dirs+'/logs') == False:\n os.mkdir(output_dirs+'/logs')\n if os.path.isdir(output_dirs+'/spectra-16') == False:\n os.mkdir(output_dirs+'/spectra-16')\n \n \n seed = np.random.randint(12345,98765,size=1)\n\n #read config file for quickquasart\n file = open('config.txt','r')\n lines = []\n for l in file:\n lines.append(l)\n \n for i in range(len(lines)):\n line_comp = lines[i].split()\n if len(line_comp) != 1:\n lines[i] = '--' + line_comp[0] + ' ' + line_comp[1] + ' ' \n else:\n lines[i] = '--' + line_comp[0] + ' '\n \n command = \"\".join(lines) + '--seed '+str(seed[0]) \n\n name = 'run_quickquasar.sh'\n get_slurm_script(name,command,output_dirs,args.idir,args.mail,args.log,args.qos,args.nodes,args.threads,args.time,args.name) \n \n \n outfile.write('sbatch '+output_dirs+'/run/'+name+'\\n')\n outfile.close()\n"
] | [
[
"numpy.random.seed",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marlon27/Light_FAMD | [
"fe4328f15f6145798869908fa126eabe75e85391"
] | [
"light_famd/mca.py"
] | [
"\"\"\"Multiple Correspondence Analysis (MCA)\"\"\"\n\nimport numpy as np\nfrom sklearn import utils\n\nfrom . import ca\nfrom . import one_hot\n\n\n\nclass MCA(ca.CA):\n\n def fit(self, X, y=None):\n if self.check_input:\n utils.check_array(X, dtype=[str, np.number])\n \n n_initial_columns = X.shape[1]\n\n # One-hot encode the data\n self.one_hot_ = one_hot.OneHotEncoder().fit(X)\n \n _X_t= self.one_hot_.transform(X) \n \n _0_freq_serie= (_X_t == 0).sum(axis=0)/ len(_X_t)\n \n self._usecols=_0_freq_serie[_0_freq_serie < 0.99].index\n print('MCA PROCESS ELIMINATED {0} COLUMNS SINCE THEIR MISS_RATES >= 99%'.format( _X_t.shape[1] - len(self._usecols) ))\n \n n_new_columns = len(self._usecols)\n self.total_inertia_ = (n_new_columns - n_initial_columns) / n_initial_columns\n # Apply CA to the indicator matrix\n super().fit(_X_t.loc[:,self._usecols])\n\n return self\n\n def _transform(self, X):\n return super()._transform(self.one_hot_.transform(X).loc[:,self._usecols])\n\n\n\n def transform(self, X):\n \"\"\"Computes the row principal coordinates of a dataset.\"\"\"\n utils.validation.check_is_fitted(self, 'singular_values_')\n if self.check_input:\n utils.check_array(X, dtype=[str, np.number])\n return self._transform(X)\n\n"
] | [
[
"sklearn.utils.check_array",
"sklearn.utils.validation.check_is_fitted"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shayxu-ai/A-Repository-for-Machine-Learning | [
"4b4cea15bb005d1c58f4395fde97cadf44fb0186",
"4b4cea15bb005d1c58f4395fde97cadf44fb0186"
] | [
"测试/tensorflow_hello/2.practices_on_nlp.py",
"contest/base_station_out_service_prediction/lgbm_main.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time: 2020/2/5,005 22:02\n# @Last Update: 2020/2/5,005 22:02\n# @Author: 徐缘\n# @FileName: 2.practices_on_nlp.py\n# @Software: PyCharm\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals # 导入一些熟悉的陌生人\n# 绝对引入,精确除法,print,unicode类型字符串。都是为了适配python2,不加也罢\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D\nfrom tensorflow.keras import Model\nfrom tensorflow import keras\n\n\nimport tensorflow_hub as hub # 模型库\nimport tensorflow_datasets as tfds # 数据|库 https://tensorflow.google.cn/datasets/api_docs/python/tfds?hl=en\ntfds.disable_progress_bar()\n\n\ndef version():\n \"\"\"\n 国际惯例,先看下版本\n \"\"\"\n print(\"Eager mode: \", tf.executing_eagerly())\n print(\"Hub version: \", hub.__version__)\n print(\"tfds version\", tfds.__version__)\n print(\"GPU is\", \"available\" if tf.config.experimental.list_physical_devices(\"GPU\") else \"NOT AVAILABLE\")\n\n\ndef tf_hub_hello():\n \"\"\"\n 预训练word2vector(迁移学习) + 全连接层\n loss: 0.329\n accuracy: 0.858 我记得 cnn 文本分类可以有95%呢\n\n \"\"\"\n train_data, validation_data, test_data = tfds.load(\n name=\"imdb_reviews\", split=('train[:60%]', 'train[60%:]', 'test'),\n as_supervised=True)\n train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))\n print(train_examples_batch)\n print(train_labels_batch)\n\n embedding = \"https://hub.tensorflow.google.cn/google/tf2-preview/gnews-swivel-20dim/1\"\n hub_layer = hub.KerasLayer(embedding, input_shape=[],\n dtype=tf.string, trainable=True)\n print(hub_layer(train_examples_batch[:3]))\n\n model = tf.keras.Sequential()\n model.add(hub_layer)\n model.add(tf.keras.layers.Dense(16, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n\n # model.summary()\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = model.fit(train_data.shuffle(10000).batch(512),\n epochs=20,\n validation_data=validation_data.batch(512),\n verbose=1)\n\n results = model.evaluate(test_data.batch(512), verbose=2)\n\n for name, value in zip(model.metrics_names, results):\n print(\"%s: %.3f\" % (name, value))\n\n\ndef preprocess_text():\n \"\"\"\n\n\n \"\"\"\n (train_data, test_data), info = tfds.load(\n # Use the version pre-encoded with an ~8k vocabulary.\n 'imdb_reviews/subwords8k',\n # Return the train/test datasets as a tuple.\n split=(tfds.Split.TRAIN, tfds.Split.TEST),\n # Return (example, label) pairs from the dataset (instead of a dictionary).\n as_supervised=True,\n # Also return the `info` structure.\n with_info=True)\n\n encoder = info.features['text'].encoder\n print('Vocabulary size: {}'.format(encoder.vocab_size))\n\n sample_string = 'Hello TensorFlow.'\n\n encoded_string = encoder.encode(sample_string)\n print('Encoded string is {}'.format(encoded_string))\n\n original_string = encoder.decode(encoded_string)\n print('The original string: \"{}\"'.format(original_string))\n\n assert original_string == sample_string\n\n for ts in encoded_string:\n print('{} ----> {}'.format(ts, encoder.decode([ts])))\n\n for train_example, train_label in train_data.take(1):\n print('Encoded text:', train_example[:10].numpy())\n print('Label:', train_label.numpy())\n\n encoder.decode(train_example)\n\n BUFFER_SIZE = 1000\n\n train_batches = (\n train_data\n .shuffle(BUFFER_SIZE)\n .padded_batch(32, train_data.output_shapes))\n\n test_batches = (\n test_data\n .padded_batch(32, train_data.output_shapes))\n\n for example_batch, label_batch in train_batches.take(2):\n print(\"Batch shape:\", example_batch.shape)\n print(\"label shape:\", label_batch.shape)\n\n model = keras.Sequential([\n keras.layers.Embedding(encoder.vocab_size, 16),\n keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(1, activation='sigmoid')])\n\n model.summary()\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = model.fit(train_batches,\n epochs=10,\n validation_data=test_batches,\n validation_steps=30)\n\n loss, accuracy = model.evaluate(test_batches)\n\n print(\"Loss: \", loss)\n print(\"Accuracy: \", accuracy)\n\n history_dict = history.history\n history_dict.keys()\n\n import matplotlib.pyplot as plt\n\n acc = history_dict['accuracy']\n val_acc = history_dict['val_accuracy']\n loss = history_dict['loss']\n val_loss = history_dict['val_loss']\n\n epochs = range(1, len(acc) + 1)\n\n # \"bo\" is for \"blue dot\"\n plt.plot(epochs, loss, 'bo', label='Training loss')\n # b is for \"solid blue line\"\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n\n plt.show()\n\n plt.clf() # clear figure\n\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n\n plt.show()\n return\n\n\nif __name__ == '__main__':\n # version()\n preprocess_text()\n\n\n",
"import lightgbm as lgb\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\npd.set_option('display.max_rows', None)\n\ndata_path = 'train_data.csv'\n\ntrain_data = pd.read_csv(data_path).dropna(axis=0)\ntrain_data.reset_index(drop=True, inplace=True)\n# print(len(train_data))\n# print(len(train_data[(train_data['1']>0) | (train_data['37']>0)][['1', '37']]))\n\ntrain_data.info()\ntrain_data.tail(10)\n\ntrain_data[['113', '113_6d', '113_6d_bool', '152', '152_6d', '152_6d_bool', 'label']]\n\n# train_data['label'] = train_data.apply(lambda row: 1 if (row['1'] or row['37']) else 0, axis=1).shift(axis=0, periods=-1)\n\n# train_data = train_data[train_data['starttime'] != \"2020-03-09\"]\ntrain_data_1 = train_data[train_data['label'] == 1]\ntrain_data_0 = train_data[train_data['label'] == 0]\n# .sample(n=len(train_data_1)*3)\ntrain_data_sampled = train_data_1.append(train_data_0).sample(frac=1).reindex()\n\ntrain_data_sampled[['113_6d_bool','152_6d_bool', 'label']]\n\n\ndef node_id_encode():\n from sklearn import preprocessing\n le = preprocessing.LabelEncoder()\n le.fit(train_data['node_id'])\n train_data['node_id'] = le.transform(train_data['node_id'])\n\n# node_id_encode()\n\ndef select_k_best():\n from sklearn.feature_selection import SelectKBest, f_classif\n\n feature_cols = train_data_sampled.columns.drop('label')\n\n # Keep 5 features\n selector = SelectKBest(f_classif, k=5)\n X_new = selector.fit_transform(train_data_sampled[feature_cols], train_data_sampled['label'])\n\n# select_k_best()\n\n\ndef baseline(X, Y):\n X_predict = X.apply(lambda row: 1 if row['113_2d_bool'] + row['152_2d_bool'] > 0 else 0, axis=1)\n # print(X_predict)\n\n f1 = metrics.f1_score(X_predict, Y, average='weighted')\n print(f1)\n f1 = metrics.f1_score(X_predict, Y)\n print(f1)\n\n print(metrics.classification_report(X_predict, Y, labels=None, target_names=None, sample_weight=None, digits=2))\n return\n\n\nbaseline(train_data_sampled, train_data_sampled['label'])\n\ncols = [i for i in train_data.columns if i not in ['label']]\n# cols = ['113_6d_bool', '152_6d_bool']\nx_train, x_test, y_train, y_test = train_test_split(train_data_sampled[cols], train_data_sampled['label'], test_size=0.25, random_state=0)\n\nparam = {\n 'boosting_type': 'gbdt',\n 'colsample_bytree': 1.0,\n 'learning_rate': 0.075286,\n 'max_depth': 20,\n 'n_estimators': 2000,\n 'n_jobs': 4,\n 'num_leaves': 100,\n 'reg_alpha': 1,\n 'reg_lambda': 1,\n 'subsample': 0.7,\n 'objective': 'binary',\n 'metric': 'auc',\n 'num_threads': 20,\n 'feature_fraction': 0.5,\n 'bagging_fraction': 0.6792676,\n 'verbose': 1,\n 'max_bin': 255,\n 'min_sum_hessian_in_leaf': 1\n}\n\nmodel = lgb.LGBMClassifier(**param)\nmodel.fit(x_train, y_train, eval_set=(x_test, y_test), eval_metric='binary_logloss', early_stopping_rounds=50, verbose=10,\n feature_name='auto', categorical_feature='auto', callbacks=None)\n\nmodel.booster_.feature_importance(importance_type='gain')\n\nimp = pd.DataFrame(model.booster_.feature_importance(importance_type='gain').tolist(),index=cols)\nimp.sort_values(by=[0], ascending=False)\n\ny_pred = model.predict(x_test, num_iteration=model.best_iteration_)\nf1 = metrics.f1_score(y_test, y_pred, average='weighted')\nprint(f1)\nf1 = metrics.f1_score(y_test, y_pred)\nprint(f1)\n\nprint(metrics.classification_report(y_test, y_pred, labels=None, target_names=None, sample_weight=None, digits=2))"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.executing_eagerly",
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.Sequential",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.feature_selection.SelectKBest",
"sklearn.metrics.f1_score",
"pandas.set_option",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
friedenhe/OpenMDAO | [
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d",
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d"
] | [
"openmdao/components/interp_util/interp.py",
"openmdao/core/tests/test_parallel_groups.py",
"openmdao/vectors/petsc_vector.py",
"openmdao/test_suite/components/matmultcomp.py",
"openmdao/test_suite/components/array_comp.py",
"openmdao/drivers/tests/test_scipy_optimizer.py",
"openmdao/core/tests/test_dyn_sizing.py",
"openmdao/components/tests/test_add_subtract_comp.py",
"openmdao/utils/spline_distributions.py",
"openmdao/utils/tests/test_hooks.py",
"openmdao/matrices/csc_matrix.py"
] | [
"\"\"\"\nBase class for interpolation methods that calculate values for each dimension independently.\n\nBased on Tables in NPSS, and was added to bridge the gap between some of the slower scipy\nimplementations.\n\"\"\"\nimport numpy as np\n\nfrom openmdao.components.interp_util.interp_akima import InterpAkima, Interp1DAkima\nfrom openmdao.components.interp_util.interp_bsplines import InterpBSplines\nfrom openmdao.components.interp_util.interp_cubic import InterpCubic\nfrom openmdao.components.interp_util.interp_lagrange2 import InterpLagrange2, Interp3DLagrange2\nfrom openmdao.components.interp_util.interp_lagrange3 import InterpLagrange3, Interp3DLagrange3\nfrom openmdao.components.interp_util.interp_scipy import InterpScipy\nfrom openmdao.components.interp_util.interp_slinear import InterpLinear, Interp3DSlinear, \\\n Interp1DSlinear, Interp2DSlinear\n\nfrom openmdao.components.interp_util.outofbounds_error import OutOfBoundsError\nfrom openmdao.utils.om_warnings import warn_deprecation\n\n\nINTERP_METHODS = {\n 'slinear': InterpLinear,\n 'lagrange2': InterpLagrange2,\n 'lagrange3': InterpLagrange3,\n 'cubic': InterpCubic,\n 'akima': InterpAkima,\n 'scipy_cubic': InterpScipy,\n 'scipy_slinear': InterpScipy,\n 'scipy_quintic': InterpScipy,\n 'bsplines': InterpBSplines,\n '1D-slinear': Interp1DSlinear,\n '2D-slinear': Interp2DSlinear,\n '3D-slinear': Interp3DSlinear,\n '3D-lagrange2': Interp3DLagrange2,\n '3D-lagrange3': Interp3DLagrange3,\n '1D-akima': Interp1DAkima,\n 'trilinear': Interp3DSlinear, # Deprecated\n 'akima1D': Interp1DAkima, # Deprecated\n}\n\nTABLE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima',\n 'scipy_cubic', 'scipy_slinear', 'scipy_quintic',\n 'trilinear', 'akima1D', # These two are Deprecated\n '3D-slinear', '2D-slinear', '1D-slinear',\n '1D-akima',\n '3D-lagrange2', '3D-lagrange3']\nSPLINE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima', 'bsplines',\n 'scipy_cubic', 'scipy_slinear', 'scipy_quintic']\n\n\nclass InterpND(object):\n \"\"\"\n Interpolation on a regular grid of arbitrary dimensions.\n\n The data must be defined on a regular grid; the grid spacing however may be uneven. Several\n interpolation methods are supported. These are defined in the child classes. Gradients are\n provided for all interpolation methods. Gradients with respect to grid values are also\n available optionally.\n\n Parameters\n ----------\n method : str\n Name of interpolation method.\n points : ndarray or tuple of ndarray\n The points defining the regular grid in n dimensions.\n For 1D interpolation, this can be an ndarray of table locations.\n For table interpolation, it can be a tuple or an ndarray. If it is a tuple, it should\n contain one ndarray for each table dimension.\n For spline evaluation, num_cp can be specified instead of points.\n values : ndarray or tuple of ndarray or None\n These must be specified for interpolation.\n The data on the regular grid in n dimensions.\n x_interp : ndarray or None\n If we are always interpolating at a fixed set of locations, then they can be\n specified here.\n extrapolate : bool\n If False, when interpolated values are requested outside of the domain of the input\n data, a ValueError is raised. If True, then the methods are allowed to extrapolate.\n Default is True (raise an exception).\n num_cp : None or int\n Optional. When specified, use a linear distribution of num_cp control points. If you\n are using 'bsplines' as the method, then num_cp must be set instead of points.\n **kwargs : dict\n Interpolator-specific options to pass onward.\n\n Attributes\n ----------\n extrapolate : bool\n If False, when interpolated values are requested outside of the domain of the input data,\n a ValueError is raised. If True, then the methods are allowed to extrapolate.\n Default is True.\n grid : tuple\n Collection of points that determine the regular grid.\n table : <InterpTable>\n Table object that contains algorithm that performs the interpolation.\n values : array_like, shape (m1, ..., mn, ...)\n The data on the regular grid in n dimensions.\n x_interp : ndarray\n Cached non-decreasing vector of points to be interpolated when used as an order-reducing\n spline.\n _compute_d_dvalues : bool\n When set to True, compute gradients with respect to the grid values.\n _compute_d_dx : bool\n When set to True, compute gradients with respect to the interpolated point location.\n _d_dx : ndarray\n Cache of computed gradients with respect to evaluation point.\n _d_dvalues : ndarray\n Cache of computed gradients with respect to table values.\n _interp : class\n Class specified as interpolation algorithm, used to regenerate if needed.\n _interp_config : dict\n Configuration object that stores the number of points required for each interpolation\n method.\n _interp_options : dict\n Dictionary of cached interpolator-specific options.\n _xi : ndarray\n Cache of current evaluation point.\n \"\"\"\n\n def __init__(self, method=\"slinear\", points=None, values=None, x_interp=None, extrapolate=False,\n num_cp=None, **kwargs):\n \"\"\"\n Initialize an InterpND object.\n\n This object can be setup and used to interpolate on a curve or multi-dimensional table.\n\n It can also be used to setup an interpolating spline that can be evaluated at fixed\n locations.\n\n For interpolation, specify values and points.\n\n For spline evaluation, specifiy x_interp and either points or num_cp.\n \"\"\"\n if not isinstance(method, str):\n msg = \"Argument 'method' should be a string.\"\n raise ValueError(msg)\n elif method not in INTERP_METHODS:\n all_m = ', '.join(['\"' + m + '\"' for m in INTERP_METHODS])\n raise ValueError('Interpolation method \"%s\" is not defined. Valid methods are '\n '%s.' % (method, all_m))\n elif method == 'akima1D':\n warn_deprecation(\"The 'akima1D' method has been renamed to '1D-akima'.\")\n elif method == 'trilinear':\n warn_deprecation(\"The 'trilinear' method has been renamed to '3D-slinear'.\")\n\n self.extrapolate = extrapolate\n\n # The table points are always defined, by specifying either the points directly, or num_cp.\n if points is None:\n if num_cp is not None:\n points = [np.linspace(0.0, 1.0, num_cp)]\n else:\n msg = \"Either 'points' or 'num_cp' must be specified.\"\n raise ValueError(msg)\n else:\n\n if isinstance(points, np.ndarray):\n points = [points]\n\n for i, p in enumerate(points):\n n_p = len(p)\n if not np.all(np.diff(p) > 0.):\n raise ValueError(\"The points in dimension %d must be strictly \"\n \"ascending\" % i)\n if not np.asarray(p).ndim == 1:\n raise ValueError(\"The points in dimension %d must be \"\n \"1-dimensional\" % i)\n\n # Table Interpolation\n if x_interp is None:\n\n if values is None:\n msg = \"Either 'values' or 'x_interp' must be specified.\"\n raise ValueError(msg)\n\n if method == 'bsplines':\n msg = \"Method 'bsplines' is not supported for table interpolation.\"\n raise ValueError(msg)\n\n if not hasattr(values, 'ndim'):\n # allow reasonable duck-typed values\n values = np.asarray(values)\n\n if hasattr(values, 'dtype') and hasattr(values, 'astype'):\n if not np.issubdtype(values.dtype, np.inexact):\n values = values.astype(float)\n\n if len(points) > values.ndim:\n raise ValueError(\"There are %d point arrays, but values has %d \"\n \"dimensions\" % (len(points), values.ndim))\n\n if (method.startswith('scipy') or method == 'akima') and \\\n (np.iscomplexobj(values[:]) or np.any(np.iscomplex(points[0]))):\n msg = f\"Interpolation method '{method}' does not support complex points or values.\"\n raise ValueError(msg)\n\n for i, p in enumerate(points):\n n_p = len(p)\n if values.shape[i] != n_p:\n raise ValueError(\"There are %d points and %d values in \"\n \"dimension %d\" % (len(p), values.shape[i], i))\n\n self.grid = tuple([np.asarray(p) for p in points])\n self.values = values\n self.x_interp = x_interp\n\n self._xi = None\n self._d_dx = None\n self._d_dvalues = None\n self._compute_d_dvalues = False\n self._compute_d_dx = True\n\n # Cache spline coefficients.\n interp = INTERP_METHODS[method]\n\n if method.startswith('scipy'):\n kwargs['interp_method'] = method\n\n table = interp(self.grid, values, interp, **kwargs)\n table.check_config()\n self.table = table\n self._interp = interp\n self._interp_options = kwargs\n\n def interpolate(self, x, compute_derivative=False):\n \"\"\"\n Interpolate at the sample coordinates.\n\n Parameters\n ----------\n x : ndarray or tuple\n Locations to interpolate.\n compute_derivative : bool\n Set to True to compute derivatives with respect to x.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n ndarray\n Value of derivative of interpolated output with respect to input x. (Only when\n compute_derivative is True).\n \"\"\"\n self._compute_d_dx = compute_derivative\n self.table._compute_d_dx = compute_derivative\n self.table._compute_d_dvalues = False\n\n if isinstance(x, np.ndarray):\n if len(x.shape) < 2:\n if len(self.grid) > 1:\n # Input is an array containing multi-D coordinates of a single point.\n x = np.atleast_2d(x)\n else:\n # Input is an array of separate points on a 1D table.\n x = np.atleast_2d(x).T\n else:\n # Input is a list or tuple of separate points.\n x = np.atleast_2d(x)\n\n # cache latest evaluation point for gradient method's use later\n self._xi = x\n\n xnew = self._interpolate(x)\n\n if compute_derivative:\n return xnew, self._d_dx\n else:\n return xnew\n\n def evaluate_spline(self, values, compute_derivative=False):\n \"\"\"\n Interpolate at all fixed output coordinates given the new table values.\n\n Parameters\n ----------\n values : ndarray(n_points)\n New data values for all points on the regular grid.\n compute_derivative : bool\n Set to True to compute derivatives with respect to x.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n ndarray\n Value of derivative of interpolated output with respect to values.\n \"\"\"\n self._compute_d_dvalues = compute_derivative\n self.table._compute_d_dvalues = compute_derivative\n self.table._compute_d_dx = False\n\n if len(values.shape) == 1:\n values = np.expand_dims(values, axis=0)\n\n # cache latest evaluation point for gradient method's use later\n self._xi = self.x_interp.copy()\n\n result = self._evaluate_spline(values)\n if result.shape[0] == 1:\n # Not vectorized, so drop the extra dimension.\n result = result.ravel()\n\n if compute_derivative:\n d_dvalues = self.spline_gradient()\n if d_dvalues.shape[0] == 1:\n d_dvalues = d_dvalues[0]\n return result, d_dvalues\n else:\n return result\n\n def _interpolate(self, xi):\n \"\"\"\n Interpolate at the sample coordinates.\n\n This method is called from OpenMDAO, and is not meant for standalone use.\n\n Parameters\n ----------\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n \"\"\"\n if not self.extrapolate:\n for i, p in enumerate(xi.T):\n if np.isnan(p).any():\n raise OutOfBoundsError(\"One of the requested xi contains a NaN\",\n i, np.NaN, self.grid[i][0], self.grid[i][-1])\n\n eps = 1e-14 * self.grid[i][-1]\n if np.any(p < self.grid[i][0] - eps) or np.any(p > self.grid[i][-1] + eps):\n p1 = np.where(self.grid[i][0] > p)[0]\n p2 = np.where(p > self.grid[i][-1])[0]\n # First violating entry is enough to direct the user.\n violated_idx = set(p1).union(p2).pop()\n value = p[violated_idx]\n raise OutOfBoundsError(\"One of the requested xi is out of bounds\",\n i, value, self.grid[i][0], self.grid[i][-1])\n\n if self._compute_d_dvalues:\n # If the table grid or values are component inputs, then we need to create a new table\n # each iteration.\n interp = self._interp\n self.table = interp(self.grid, self.values, interp, **self._interp_options)\n if not self.table._supports_d_dvalues:\n raise RuntimeError(f'Method {self.table._name} does not support the '\n '\"training_data_gradients\" option.')\n\n self.table._compute_d_dvalues = True\n\n table = self.table\n if table.vectorized(xi):\n result, derivs_x, derivs_val, derivs_grid = table.evaluate_vectorized(xi)\n\n else:\n n_nodes, nx = xi.shape\n result = np.empty((n_nodes, ), dtype=xi.dtype)\n derivs_x = np.empty((n_nodes, nx), dtype=xi.dtype)\n derivs_val = None\n\n # TODO: it might be possible to vectorize over n_nodes.\n for j in range(n_nodes):\n val, d_x, d_values, d_grid = table.evaluate(xi[j, ...])\n result[j] = val\n derivs_x[j, :] = d_x.ravel()\n if d_values is not None:\n if derivs_val is None:\n dv_shape = [n_nodes]\n dv_shape.extend(self.values.shape)\n derivs_val = np.zeros(dv_shape, dtype=xi.dtype)\n in_slice = table._full_slice\n full_slice = [slice(j, j + 1)]\n full_slice.extend(in_slice)\n shape = derivs_val[tuple(full_slice)].shape\n derivs_val[tuple(full_slice)] = d_values.reshape(shape)\n\n # Cache derivatives\n self._d_dx = derivs_x\n self._d_dvalues = derivs_val\n\n return result\n\n def _evaluate_spline(self, values):\n \"\"\"\n Interpolate at all fixed output coordinates given the new table values.\n\n This method is called from OpenMDAO, and is not meant for standalone use.\n\n Parameters\n ----------\n values : ndarray(n_nodes x n_points)\n The data on the regular grid in n dimensions.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n \"\"\"\n xi = self.x_interp\n self.values = values\n\n table = self.table\n if table._vectorized:\n\n if table._name == 'bsplines':\n # bsplines is fully vectorized.\n table.values = values\n result, _, derivs_val, _ = table.evaluate_vectorized(xi)\n\n else:\n # Scipy implementation vectorized over lookups, but not over multiple table values.\n interp = self._interp\n n_nodes, _ = values.shape\n nx = np.prod(xi.shape)\n\n result = np.empty((n_nodes, nx), dtype=values.dtype)\n derivs_val = None\n\n for j in range(n_nodes):\n\n table = interp(self.grid, values[j, :], interp, **self._interp_options)\n table._compute_d_dvalues = False\n table._compute_d_dx = False\n\n result[j, :], _, _, _ = table.evaluate_vectorized(xi.reshape((nx, 1)))\n\n else:\n interp = self._interp\n n_nodes, _ = values.shape\n nx = np.prod(xi.shape)\n result = np.empty((n_nodes, nx), dtype=values.dtype)\n derivs_val = None\n\n # TODO: it might be possible to vectorize over n_nodes.\n for j in range(n_nodes):\n\n table = interp(self.grid, values[j, :], interp, **self._interp_options)\n table._compute_d_dvalues = True\n table._compute_d_dx = False\n\n for k in range(nx):\n x_pt = np.atleast_2d(xi[k])\n val, _, d_values, _ = table.evaluate(x_pt)\n result[j, k] = val\n if d_values is not None:\n if derivs_val is None:\n dv_shape = [n_nodes, nx]\n dv_shape.extend(values.shape[1:])\n derivs_val = np.zeros(dv_shape, dtype=values.dtype)\n in_slice = table._full_slice\n full_slice = [slice(j, j + 1), slice(k, k + 1)]\n full_slice.extend(in_slice)\n shape = derivs_val[tuple(full_slice)].shape\n derivs_val[tuple(full_slice)] = d_values.reshape(shape)\n\n # Cache derivatives\n self._d_dvalues = derivs_val\n\n self.table = table\n return result\n\n def gradient(self, xi):\n \"\"\"\n Compute the gradients at the specified point.\n\n Most of the gradients are computed as the interpolation itself is performed,\n but are cached and returned separately by this method.\n\n If the point for evaluation differs from the point used to produce\n the currently cached gradient, the interpolation is re-performed in\n order to return the correct gradient.\n\n Parameters\n ----------\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data at.\n\n Returns\n -------\n ndarray\n Vector of gradients of the interpolated values with respect to each value in xi.\n \"\"\"\n if (self._xi is None) or (not np.array_equal(xi, self._xi)):\n # If inputs have changed since last computation, then re-interpolate.\n self.interpolate(xi)\n\n return self._gradient().reshape(np.asarray(xi).shape)\n\n def _gradient(self):\n \"\"\"\n Return the pre-computed gradients.\n\n Returns\n -------\n ndarray\n Vector of gradients of the interpolated values with respect to each value in xi.\n \"\"\"\n return self._d_dx\n\n def training_gradients(self, pt):\n \"\"\"\n Compute the training gradient for the vector of training points.\n\n Parameters\n ----------\n pt : ndarray\n Training point values.\n\n Returns\n -------\n ndarray\n Gradient of output with respect to training point values.\n \"\"\"\n if self.table._vectorized:\n return self.table.training_gradients(pt)\n\n else:\n grid = self.grid\n interp = self._interp\n opts = self._interp_options\n\n for i, axis in enumerate(grid):\n ngrid = axis.size\n values = np.zeros(ngrid)\n deriv_i = np.zeros(ngrid)\n\n for j in range(ngrid):\n values[j] = 1.0\n table = interp([grid[i]], values, interp, **opts)\n table._compute_d_dvalues = False\n deriv_i[j], _, _, _ = table.evaluate(pt[i:i + 1])\n values[j] = 0.0\n\n if i == 0:\n deriv_running = deriv_i.copy()\n else:\n deriv_running = np.outer(deriv_running, deriv_i)\n\n return deriv_running\n\n def spline_gradient(self):\n \"\"\"\n Return derivative of spline with respect to its control points.\n\n Returns\n -------\n ndarray\n Gradient of output with respect to training point values.\n \"\"\"\n vec_size, n_cp = self.values.shape\n x_interp = self.x_interp\n n_interp = len(x_interp)\n\n d_dvalues = self._d_dvalues\n if d_dvalues is not None:\n dy_ddata = np.zeros((vec_size, n_interp, n_cp), dtype=d_dvalues.dtype)\n\n if d_dvalues.shape[0] == vec_size:\n # Akima precomputes derivs at all points in vec_size.\n dy_ddata[:] = d_dvalues\n else:\n # Bsplines computed derivative is the same at all points in vec_size.\n dy_ddata[:] = np.broadcast_to(d_dvalues.toarray(), (vec_size, n_interp, n_cp))\n else:\n # Note: These derivatives are independent of control point y values, so they will never\n # be complex dtype.\n dy_ddata = np.zeros((n_interp, n_cp))\n\n # This way works for the rest of the interpolation methods.\n for k in range(n_interp):\n val = self.training_gradients(x_interp[k:k + 1])\n dy_ddata[k, :] = val\n dy_ddata = np.broadcast_to(dy_ddata, (vec_size, n_interp, n_cp))\n\n return dy_ddata\n",
"\"\"\"Test the parallel groups.\"\"\"\n\nimport unittest\nimport itertools\n\nfrom collections.abc import Iterable\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.utils.mpi import MPI\n\ntry:\n from parameterized import parameterized\nexcept ImportError:\n from openmdao.utils.assert_utils import SkipParameterized as parameterized\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\nfrom openmdao.test_suite.groups.parallel_groups import \\\n FanOutGrouped, FanInGrouped2, Diamond, ConvergeDiverge\n\nfrom openmdao.utils.assert_utils import assert_near_equal\nfrom openmdao.utils.logger_utils import TestLogger\nfrom openmdao.error_checking.check_config import _default_checks\n\n\nclass Noisy(ConvergeDiverge):\n def check_config(self, logger):\n msg = 'Only want to see this on rank 0'\n logger.error(msg)\n logger.warning(msg)\n logger.info(msg)\n\n\ndef _test_func_name(func, num, param):\n args = []\n for p in param.args:\n if not isinstance(p, Iterable):\n p = {p}\n for item in p:\n try:\n arg = item.__name__\n except:\n arg = str(item)\n args.append(arg)\n return func.__name__ + '_' + '_'.join(args)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestParallelGroups(unittest.TestCase):\n\n N_PROCS = 2\n\n @parameterized.expand(itertools.product([(om.LinearRunOnce, None)],\n [om.NonlinearBlockGS, om.NonlinearRunOnce]),\n name_func=_test_func_name)\n def test_fan_out_grouped(self, solv_tup, nlsolver):\n prob = om.Problem(FanOutGrouped())\n\n of=['c2.y', \"c3.y\"]\n wrt=['iv.x']\n\n solver, jactype = solv_tup\n\n prob.model.linear_solver = solver()\n if jactype is not None:\n prob.model.options['assembled_jac_type'] = jactype\n prob.model.nonlinear_solver = nlsolver()\n\n prob.setup(check=False, mode='fwd')\n prob.set_solver_print(level=0)\n prob.run_model()\n\n J = prob.compute_totals(of=['c2.y', \"c3.y\"], wrt=['iv.x'])\n\n assert_near_equal(J['c2.y', 'iv.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'iv.x'][0][0], 15.0, 1e-6)\n\n assert_near_equal(prob['c2.y'], -6.0, 1e-6)\n assert_near_equal(prob['c3.y'], 15.0, 1e-6)\n\n prob.setup(check=False, mode='rev')\n prob.run_model()\n\n J = prob.compute_totals(of=['c2.y', \"c3.y\"], wrt=['iv.x'])\n\n assert_near_equal(J['c2.y', 'iv.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'iv.x'][0][0], 15.0, 1e-6)\n\n assert_near_equal(prob['c2.y'], -6.0, 1e-6)\n assert_near_equal(prob['c3.y'], 15.0, 1e-6)\n\n @parameterized.expand(itertools.product([om.LinearRunOnce],\n [om.NonlinearBlockGS, om.NonlinearRunOnce]),\n name_func=_test_func_name)\n def test_fan_in_grouped(self, solver, nlsolver):\n\n prob = om.Problem()\n prob.model = FanInGrouped2()\n\n prob.model.linear_solver = solver()\n prob.model.nonlinear_solver = nlsolver()\n\n prob.setup(check=False, mode='fwd')\n prob.set_solver_print(level=0)\n prob.run_model()\n\n indep_list = ['p1.x', 'p2.x']\n unknown_list = ['c3.y']\n\n assert_near_equal(prob['c3.y'], 29.0, 1e-6)\n\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c3.y', 'p1.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'p2.x'][0][0], 35.0, 1e-6)\n\n # do this a second time to test caching of dist rows/cols\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c3.y', 'p1.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'p2.x'][0][0], 35.0, 1e-6)\n\n assert_near_equal(prob['c3.y'], 29.0, 1e-6)\n\n prob.setup(check=False, mode='rev')\n prob.run_model()\n\n assert_near_equal(prob['c3.y'], 29.0, 1e-6)\n\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c3.y', 'p1.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'p2.x'][0][0], 35.0, 1e-6)\n\n # do this a second time to test caching of dist rows/cols\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c3.y', 'p1.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'p2.x'][0][0], 35.0, 1e-6)\n\n assert_near_equal(prob['c3.y'], 29.0, 1e-6)\n\n def test_fan_in_grouped_feature(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', 1.)\n\n parallel = model.add_subsystem('parallel', om.ParallelGroup(), promotes_inputs=[('c1.x', 'x'), ('c2.x', 'x')])\n parallel.add_subsystem('c1', om.ExecComp(['y=-2.0*x']))\n parallel.add_subsystem('c2', om.ExecComp(['y=5.0*x']))\n\n model.add_subsystem('c3', om.ExecComp(['y=3.0*x1+7.0*x2']))\n\n model.connect(\"parallel.c1.y\", \"c3.x1\")\n model.connect(\"parallel.c2.y\", \"c3.x2\")\n\n prob.setup(check=False, mode='fwd')\n prob.run_model()\n\n assert_near_equal(prob['c3.y'], 29.0, 1e-6)\n\n @parameterized.expand(itertools.product([om.LinearRunOnce],\n [om.NonlinearBlockGS, om.NonlinearRunOnce]),\n name_func=_test_func_name)\n def test_diamond(self, solver, nlsolver):\n\n prob = om.Problem()\n prob.model = Diamond()\n\n prob.model.linear_solver = solver()\n prob.model.nonlinear_solver = nlsolver()\n\n prob.setup(check=False, mode='fwd')\n prob.set_solver_print(level=0)\n prob.run_model()\n\n assert_near_equal(prob['c4.y1'], 46.0, 1e-6)\n assert_near_equal(prob['c4.y2'], -93.0, 1e-6)\n\n indep_list = ['iv.x']\n unknown_list = ['c4.y1', 'c4.y2']\n\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c4.y1', 'iv.x'][0][0], 25, 1e-6)\n assert_near_equal(J['c4.y2', 'iv.x'][0][0], -40.5, 1e-6)\n\n prob.setup(check=False, mode='rev')\n prob.run_model()\n\n assert_near_equal(prob['c4.y1'], 46.0, 1e-6)\n assert_near_equal(prob['c4.y2'], -93.0, 1e-6)\n\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c4.y1', 'iv.x'][0][0], 25, 1e-6)\n assert_near_equal(J['c4.y2', 'iv.x'][0][0], -40.5, 1e-6)\n\n @parameterized.expand(itertools.product([om.LinearRunOnce],\n [om.NonlinearBlockGS, om.NonlinearRunOnce]),\n name_func=_test_func_name)\n def test_converge_diverge(self, solver, nlsolver):\n\n prob = om.Problem()\n prob.model = ConvergeDiverge()\n\n prob.model.linear_solver = solver()\n prob.model.nonlinear_solver = nlsolver()\n\n prob.setup(check=False, mode='fwd')\n prob.set_solver_print(level=0)\n prob.run_model()\n\n assert_near_equal(prob['c7.y1'], -102.7, 1e-6)\n\n indep_list = ['iv.x']\n unknown_list = ['c7.y1']\n\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c7.y1', 'iv.x'][0][0], -40.75, 1e-6)\n\n prob.setup(check=False, mode='rev')\n prob.run_model()\n\n assert_near_equal(prob['c7.y1'], -102.7, 1e-6)\n\n J = prob.compute_totals(of=unknown_list, wrt=indep_list)\n assert_near_equal(J['c7.y1', 'iv.x'][0][0], -40.75, 1e-6)\n\n assert_near_equal(prob['c7.y1'], -102.7, 1e-6)\n\n def test_zero_shape(self):\n raise unittest.SkipTest(\"zero shapes not fully supported yet\")\n class MultComp(ExplicitComponent):\n def __init__(self, mult):\n self.mult = mult\n super().__init__()\n\n def setup(self):\n if self.comm.rank == 0:\n self.add_input('x', shape=1)\n self.add_output('y', shape=1)\n else:\n self.add_input('x', shape=0)\n self.add_output('y', shape=0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = inputs['x'] * self.mult\n\n def compute_partials(self, inputs, partials):\n partials['y', 'x'] = np.array([self.mult])\n\n prob = om.Problem()\n\n model = prob.model\n model.add_subsystem('iv', om.IndepVarComp('x', 1.0))\n model.add_subsystem('c1', MultComp(3.0))\n\n model.sub = model.add_subsystem('sub', om.ParallelGroup())\n model.sub.add_subsystem('c2', MultComp(-2.0))\n model.sub.add_subsystem('c3', MultComp(5.0))\n\n model.add_subsystem('c2', MultComp(1.0))\n model.add_subsystem('c3', MultComp(1.0))\n\n model.connect('iv.x', 'c1.x')\n\n model.connect('c1.y', 'sub.c2.x')\n model.connect('c1.y', 'sub.c3.x')\n\n model.connect('sub.c2.y', 'c2.x')\n model.connect('sub.c3.y', 'c3.x')\n\n of=['c2.y', \"c3.y\"]\n wrt=['iv.x']\n\n prob.setup(check=False, mode='fwd')\n prob.set_solver_print(level=0)\n prob.run_model()\n\n J = prob.compute_totals(of=['c2.y', \"c3.y\"], wrt=['iv.x'])\n\n assert_near_equal(J['c2.y', 'iv.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'iv.x'][0][0], 15.0, 1e-6)\n\n assert_near_equal(prob['c2.y'], -6.0, 1e-6)\n assert_near_equal(prob['c3.y'], 15.0, 1e-6)\n\n prob.setup(check=False, mode='rev')\n prob.run_model()\n\n J = prob.compute_totals(of=['c2.y', \"c3.y\"], wrt=['iv.x'])\n\n assert_near_equal(J['c2.y', 'iv.x'][0][0], -6.0, 1e-6)\n assert_near_equal(J['c3.y', 'iv.x'][0][0], 15.0, 1e-6)\n\n assert_near_equal(prob['c2.y'], -6.0, 1e-6)\n assert_near_equal(prob['c3.y'], 15.0, 1e-6)\n\n def test_setup_messages_bad_vec_type(self):\n\n prob = om.Problem(Noisy())\n\n # check that error is thrown if not using PETScVector\n if MPI:\n msg = (\"Problem .*: The `distributed_vector_class` argument must be a distributed vector \"\n \"class like `PETScVector` when running in parallel under MPI but 'DefaultVector' \"\n \"was specified which is not distributed\\.\")\n with self.assertRaisesRegex(ValueError, msg):\n prob.setup(check=False, mode='fwd', distributed_vector_class=om.DefaultVector)\n else:\n prob.setup(check=False, mode='fwd')\n\n def test_setup_messages_only_on_proc0(self):\n prob = om.Problem(Noisy())\n\n # check that we get setup messages only on proc 0\n msg = 'Only want to see this on rank 0'\n testlogger = TestLogger()\n prob.setup(check=True, mode='fwd', logger=testlogger)\n prob.final_setup()\n\n if prob.comm.rank > 0:\n self.assertEqual(len(testlogger.get('error')), 0)\n self.assertEqual(len(testlogger.get('warning')), 0)\n self.assertEqual(len(testlogger.get('info')), 0)\n else:\n self.assertEqual(len(testlogger.get('error')), 1)\n self.assertTrue(testlogger.contains('warning',\n \"Only want to see this on rank 0\"))\n self.assertEqual(len(testlogger.get('info')), len(_default_checks) + 1)\n self.assertTrue(msg in testlogger.get('error')[0])\n for info in testlogger.get('info'):\n if msg in info:\n break\n else:\n self.fail(\"Didn't find '%s' in info messages.\" % msg)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestParallelListStates(unittest.TestCase):\n\n N_PROCS = 4\n\n def test_list_states_allprocs(self):\n class StateComp(om.ImplicitComponent):\n\n def initialize(self):\n self.mtx = np.array([\n [0.99, 0.01],\n [0.01, 0.99],\n ])\n\n def setup(self):\n self.add_input('rhs', val=np.ones(2))\n self.add_output('x', val=np.zeros(2))\n\n self.declare_partials(of='*', wrt='*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['x'] = self.mtx.dot(outputs['x']) - inputs['rhs']\n\n def solve_nonlinear(self, inputs, outputs):\n outputs['x'] = np.linalg.solve(self.mtx, inputs['rhs'])\n\n p = om.Problem()\n par = p.model.add_subsystem('par', om.ParallelGroup())\n par.add_subsystem('C1', StateComp())\n par.add_subsystem('C2', StateComp())\n par.add_subsystem('C3', om.ExecComp('y=2.0*x'))\n par.add_subsystem('C4', StateComp())\n\n p.setup()\n p.final_setup()\n self.assertEqual(sorted(p.model._list_states_allprocs()), ['par.C1.x', 'par.C2.x', 'par.C4.x'])\n\n\nclass ExComp(om.ExplicitComponent):\n def initialize(self):\n self.options.declare('num_nodes', types=int)\n\n def setup(self):\n nn = self.options['num_nodes']\n # Inputs\n self.add_input('accel', val=np.zeros(nn))\n self.add_output('deltav_dot', val=np.zeros(nn))\n # Setup partials\n ar = np.arange(self.options['num_nodes'])\n self.declare_partials(of='deltav_dot', wrt='accel', rows=ar, cols=ar, val=1.0)\n\n def compute(self, inputs, outputs):\n outputs['deltav_dot'] = inputs['accel']\n\n\nclass SubGroup(om.Group):\n def __init__(self, size, **kwargs):\n super().__init__(**kwargs)\n self.size = size\n\n def setup(self):\n ivc = om.IndepVarComp()\n ivc.add_output('accel', val=np.ones(self.size))\n self.add_subsystem('rhs', ivc)\n self.add_subsystem('ode', ExComp(num_nodes=self.size))\n self.connect('rhs.accel', 'ode.accel')\n self.add_design_var('rhs.accel', 3.0)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestParallelJacBug(unittest.TestCase):\n\n N_PROCS = 2\n\n def test_par_jac_bug(self):\n\n p = om.Problem()\n model = p.model\n par = model.add_subsystem('par', om.ParallelGroup())\n par.add_subsystem('p1', SubGroup(1))\n par.add_subsystem('p2', SubGroup(1))\n p.setup(mode='rev')\n p.run_model()\n J1 = p.driver._compute_totals(of=['par.p1.ode.deltav_dot'], wrt=['par.p1.ode.deltav_dot'],\n return_format='array')\n Jsave = J1.copy()\n J2 = p.driver._compute_totals(of=['par.p1.ode.deltav_dot'], wrt=['par.p1.ode.deltav_dot'],\n return_format='array')\n\n self.assertLess(np.max(np.abs(J2 - Jsave)), 1e-20)\n\n\ndef _make_tree_model():\n p = om.Problem()\n model = p.model\n\n units1 = units2 = 'ft'\n\n val = 1.0\n\n g1 = model.add_subsystem(\"G1\", om.Group(), promotes_inputs=['x'])\n\n g2 = g1.add_subsystem(\"G2\", om.Group(), promotes_inputs=['x'])\n g2.add_subsystem(\"C1\", om.ExecComp(\"y = 2. * x\",\n x={'val': val, 'units': units2},\n y={'val': 1.0, 'units': units2}),\n promotes_inputs=['x'])\n g2.add_subsystem(\"C2\", om.ExecComp(\"y = 3. * x\",\n x={'val': val, 'units': units1},\n y={'val': 1.0, 'units': units1}),\n promotes_inputs=['x'])\n\n g3 = model.add_subsystem(\"G3\", om.Group(), promotes_inputs=['x'])\n g3.add_subsystem(\"C3\", om.ExecComp(\"y = 4. * x\",\n x={'val': val, 'units': units1},\n y={'val': 1.0, 'units': units1}),\n promotes_inputs=['x'])\n g3.add_subsystem(\"C4\", om.ExecComp(\"y = 5. * x\",\n x={'val': val, 'units': units2},\n y={'val': 1.0, 'units': units2}),\n promotes_inputs=['x'])\n\n par = model.add_subsystem(\"par\", om.ParallelGroup(), promotes_inputs=['x'])\n\n g4 = par.add_subsystem(\"G4\", om.Group(), promotes_inputs=['x'])\n g4.add_subsystem(\"C5\", om.ExecComp(\"y = 6. * x\",\n x={'val': val, 'units': units2},\n y={'val': 1.0, 'units': units2}),\n promotes_inputs=['x'])\n g4.add_subsystem(\"C6\", om.ExecComp(\"y = 7. * x\",\n x={'val': val, 'units': units1},\n y={'val': 1.0, 'units': units1}),\n promotes_inputs=['x'])\n\n g5 = par.add_subsystem(\"G5\", om.Group(), promotes_inputs=['x'])\n g5.add_subsystem(\"C7\", om.ExecComp(\"y = 8. * x\",\n x={'val': val, 'units': units1},\n y={'val': 1.0, 'units': units1}),\n promotes_inputs=['x'])\n g5.add_subsystem(\"C8\", om.ExecComp(\"y = 9. * x\",\n x={'val': val, 'units': units2},\n y={'val': 1.0, 'units': units2}),\n promotes_inputs=['x'])\n\n model.add_subsystem(\"C9\", om.ExecComp(\"y = 10. * x\",\n x={'val': val, 'units': units2},\n y={'val': 1.0, 'units': units2}),\n promotes_inputs=['x'])\n\n return p\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestParallelOrdering(unittest.TestCase):\n\n N_PROCS = 2\n\n def test_get_ordered_comps(self):\n from openmdao.core.component import Component\n p = _make_tree_model()\n p.setup()\n p.run_model()\n ordered_names = list(p.model._ordered_comp_name_iter())\n self.assertEqual(ordered_names, ['_auto_ivc', 'G1.G2.C1', 'G1.G2.C2', 'G3.C3', 'G3.C4', 'par.G4.C5', 'par.G4.C6', 'par.G5.C7', 'par.G5.C8', 'C9'])\n locnames = [s.pathname for s in p.model.system_iter(recurse=True, typ=Component)]\n if p.model.comm.rank == 0:\n self.assertEqual(locnames, ['_auto_ivc', 'G1.G2.C1', 'G1.G2.C2', 'G3.C3', 'G3.C4', 'par.G4.C5', 'par.G4.C6', 'C9'])\n else:\n self.assertEqual(locnames, ['_auto_ivc', 'G1.G2.C1', 'G1.G2.C2', 'G3.C3', 'G3.C4', 'par.G5.C7', 'par.G5.C8', 'C9'])\n\n\nif __name__ == \"__main__\":\n from openmdao.utils.mpi import mpirun_tests\n mpirun_tests()\n",
"\"\"\"Define the PETSc Vector class.\"\"\"\nfrom openmdao.utils.mpi import MPI\n\nCITATION = '''@InProceedings{petsc-efficient,\n Author = \"Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith\",\n Title = \"Efficient Management of Parallelism in Object Oriented Numerical Software Libraries\",\n Booktitle = \"Modern Software Tools in Scientific Computing\",\n Editor = \"E. Arge and A. M. Bruaset and H. P. Langtangen\",\n Pages = \"163--202\",\n Publisher = \"Birkh{\\\"{a}}user Press\",\n Year = \"1997\"\n}'''\n\nif MPI is None:\n PETScVector = None\nelse:\n import numpy as np\n\n from petsc4py import PETSc\n from openmdao.core.constants import INT_DTYPE\n from openmdao.vectors.default_vector import DefaultVector\n from openmdao.vectors.petsc_transfer import PETScTransfer\n\n class PETScVector(DefaultVector):\n \"\"\"\n PETSc Vector implementation for running in parallel.\n\n Most methods use the DefaultVector's implementation.\n\n Parameters\n ----------\n name : str\n The name of the vector: 'nonlinear' or 'linear'.\n kind : str\n The kind of vector, 'input', 'output', or 'residual'.\n system : <System>\n Pointer to the owning system.\n root_vector : <Vector>\n Pointer to the vector owned by the root system.\n alloc_complex : bool\n Whether to allocate any imaginary storage to perform complex step. Default is False.\n\n Attributes\n ----------\n _dup_inds : ndarray of int\n Array of indices of variables that aren't locally owned, meaning that they duplicate\n variables that are 'owned' by a different process. Used by certain distributed\n calculations, e.g., get_norm(), where including duplicate values would result in\n the wrong answer.\n \"\"\"\n\n TRANSFER = PETScTransfer\n cite = CITATION\n distributed = True\n\n def __init__(self, name, kind, system, root_vector=None, alloc_complex=False):\n \"\"\"\n Initialize all attributes.\n \"\"\"\n super().__init__(name, kind, system, root_vector=root_vector,\n alloc_complex=alloc_complex)\n\n self._dup_inds = None\n\n def _initialize_data(self, root_vector):\n \"\"\"\n Internally allocate vectors.\n\n Parameters\n ----------\n root_vector : Vector or None\n the root's vector instance or None, if we are at the root.\n \"\"\"\n super()._initialize_data(root_vector)\n\n self._petsc = {}\n self._imag_petsc = {}\n data = self._data.real\n\n if self._alloc_complex:\n self._petsc = PETSc.Vec().createWithArray(data.copy(), comm=self._system().comm)\n else:\n self._petsc = PETSc.Vec().createWithArray(data, comm=self._system().comm)\n\n # Allocate imaginary for complex step\n if self._alloc_complex:\n data = self._data.imag\n self._imag_petsc = PETSc.Vec().createWithArray(data, comm=self._system().comm)\n\n def _get_dup_inds(self):\n \"\"\"\n Compute the indices into the data vector corresponding to non-distributed variables.\n\n Returns\n -------\n ndarray of int\n Index array corresponding to non-distributed variables.\n \"\"\"\n if self._dup_inds is None:\n system = self._system()\n if system.comm.size > 1:\n # Here, we find the indices that are not locally owned so that we can\n # temporarilly zero them out for the norm calculation.\n dup_inds = []\n abs2meta = system._var_allprocs_abs2meta[self._typ]\n for name, idx_slice in self.get_slice_dict().items():\n owning_rank = system._owning_rank[name]\n if not abs2meta[name]['distributed'] and owning_rank != system.comm.rank:\n dup_inds.extend(range(idx_slice.start, idx_slice.stop))\n\n self._dup_inds = np.array(dup_inds, dtype=INT_DTYPE)\n else:\n self._dup_inds = np.array([], dtype=INT_DTYPE)\n\n return self._dup_inds\n\n def _get_nodup(self):\n \"\"\"\n Retrieve a version of the data vector with any duplicate variables zeroed out.\n\n Returns\n -------\n ndarray\n Array the same size as our data array with duplicate variables zeroed out.\n If all variables are owned by this process, then the data array itself is\n returned without copying.\n \"\"\"\n dup_inds = self._get_dup_inds()\n has_dups = dup_inds.size > 0\n\n if has_dups:\n data_cache = self.asarray(copy=True)\n data_cache[dup_inds] = 0.0\n else:\n data_cache = self._get_data()\n\n return data_cache\n\n def _restore_dups(self):\n \"\"\"\n Restore our petsc array so that it corresponds once again to our local data array.\n\n This is done to restore the petsc array after we previously zeroed out all duplicated\n values.\n \"\"\"\n self._petsc.array = self._get_data()\n\n def get_norm(self):\n \"\"\"\n Return the norm of this vector.\n\n Returns\n -------\n float\n Norm of this vector.\n \"\"\"\n nodup = self._get_nodup()\n self._petsc.array = nodup.real\n distributed_norm = self._petsc.norm()\n self._restore_dups()\n return distributed_norm\n\n def dot(self, vec):\n \"\"\"\n Compute the dot product of the real parts of the current vec and the incoming vec.\n\n Parameters\n ----------\n vec : <Vector>\n The incoming vector being dotted with self.\n\n Returns\n -------\n float\n The computed dot product value.\n \"\"\"\n nodup = self._get_nodup()\n # we don't need to _resore_dups here since we don't modify _petsc.array.\n return self._system().comm.allreduce(np.dot(nodup, vec._get_data()))\n",
"\"\"\"\nA simple component used for derivative testing.\n\"\"\"\nimport time\n\nimport numpy as np\n\nimport openmdao.api as om\n\n\nclass MatMultComp(om.ExplicitComponent):\n def __init__(self, mat, approx_method='exact', sleep_time=0.1, **kwargs):\n super().__init__(**kwargs)\n self.mat = mat\n self.approx_method = approx_method\n self.sleep_time = sleep_time\n\n def setup(self):\n self.add_input('x', val=np.ones(self.mat.shape[1]))\n self.add_output('y', val=np.zeros(self.mat.shape[0]))\n self.num_computes = 0\n\n def setup_partials(self):\n self.declare_partials('*', '*', method=self.approx_method)\n\n def compute(self, inputs, outputs):\n outputs['y'] = self.mat.dot(inputs['x'])\n self.num_computes += 1\n time.sleep(self.sleep_time)\n\n\nif __name__ == '__main__':\n import sys\n\n import openmdao.api as om\n from openmdao.utils.mpi import MPI\n\n if len(sys.argv) > 1:\n size = int(sys.argv[1])\n else:\n size = 20\n\n if MPI:\n ncom = MPI.COMM_WORLD.size\n if MPI.COMM_WORLD.rank == 0:\n mat = np.random.random(5 * size).reshape((5, size)) - 0.5\n else:\n mat = None\n mat = MPI.COMM_WORLD.bcast(mat, root=0)\n profname = \"prof_%d.out\" % MPI.COMM_WORLD.rank\n else:\n mat = np.random.random(5 * size).reshape((5, size)) - 0.5\n profname = \"prof.out\"\n\n print(\"mat shape:\", mat.shape)\n\n p = om.Problem()\n model = p.model\n model.add_subsystem('indep', om.IndepVarComp('x', val=np.ones(mat.shape[1])))\n comp = model.add_subsystem('comp', MatMultComp(mat, approx_method='fd', num_par_fd=5))\n\n model.connect('indep.x', 'comp.x')\n\n p.setup(mode='fwd', force_alloc_complex=True)\n p.run_model()\n\n start = time.time()\n J = p.compute_totals(of=['comp.y'], wrt=['indep.x'])\n\n print(\"norm J - mat:\", np.linalg.norm(J['comp.y','indep.x'] - comp.mat))\n print(\"Elapsed time:\", time.time() - start)\n\n",
"import numpy as np\n\nimport openmdao.api as om\n\n\nclass ArrayComp(om.ExplicitComponent):\n\n def setup(self):\n\n J1 = np.array([[1.0, 3.0, -2.0, 7.0],\n [6.0, 2.5, 2.0, 4.0],\n [-1.0, 0.0, 8.0, 1.0],\n [1.0, 4.0, -5.0, 6.0]])\n\n self.J1 = J1\n self.J2 = J1 * 3.3\n self.Jb = J1.T\n\n # Inputs\n self.add_input('x1', np.zeros([4]))\n self.add_input('x2', np.zeros([4]))\n self.add_input('bb', np.zeros([4]))\n\n # Outputs\n self.add_output('y1', np.zeros([4]))\n\n self.exec_count = 0\n self.set_check_partial_options('x*', directional=True)\n\n def setup_partials(self):\n self.declare_partials(of='*', wrt='*')\n\n\n def compute(self, inputs, outputs):\n \"\"\"\n Execution.\n \"\"\"\n outputs['y1'] = self.J1.dot(inputs['x1']) + self.J2.dot(inputs['x2']) + self.Jb.dot(inputs['bb'])\n self.exec_count += 1\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Analytical derivatives.\n \"\"\"\n partials[('y1', 'x1')] = self.J1\n partials[('y1', 'x2')] = self.J2\n partials[('y1', 'bb')] = self.Jb\n",
"\"\"\" Unit tests for the ScipyOptimizeDriver.\"\"\"\n\nimport unittest\nimport sys\nfrom io import StringIO\n\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom scipy import __version__ as scipy_version\n\nimport openmdao.api as om\nfrom openmdao.test_suite.components.expl_comp_array import TestExplCompArrayDense, TestExplCompArraySparse, TestExplCompArrayJacVec\nfrom openmdao.test_suite.components.paraboloid import Paraboloid\nfrom openmdao.test_suite.components.paraboloid_distributed import DistParab\nfrom openmdao.test_suite.components.sellar import SellarDerivativesGrouped, SellarDerivatives\nfrom openmdao.test_suite.components.sellar_feature import SellarMDA\nfrom openmdao.test_suite.components.simple_comps import NonSquareArrayComp\nfrom openmdao.test_suite.groups.sin_fitter import SineFitter\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning\nfrom openmdao.utils.general_utils import run_driver\nfrom openmdao.utils.mpi import MPI\n\ntry:\n from openmdao.parallel_api import PETScVector\n vector_class = PETScVector\nexcept ImportError:\n vector_class = om.DefaultVector\n PETScVector = None\n\nrosenbrock_size = 6 # size of the design variable\n\ndef rosenbrock(x):\n x_0 = x[:-1]\n x_1 = x[1:]\n return sum((1 - x_0) ** 2) + 100 * sum((x_1 - x_0 ** 2) ** 2)\n\n\nclass Rosenbrock(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.ones(rosenbrock_size))\n self.add_output('f', 0.0)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rosenbrock(x)\n\ndef rastrigin(x):\n a = 10 # constant\n return np.sum(np.square(x) - a * np.cos(2 * np.pi * x)) + a * np.size(x)\n\n\nclass DummyComp(om.ExplicitComponent):\n \"\"\"\n Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3.\n \"\"\"\n\n def setup(self):\n self.add_input('x', val=0.0)\n self.add_input('y', val=0.0)\n\n self.add_output('c', val=0.0)\n\n self.declare_partials('*', '*', method='cs')\n\n def compute(self, inputs, outputs):\n \"\"\"\n f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3\n\n Optimal solution (minimum): x = 6.6667; y = -7.3333\n \"\"\"\n x = inputs['x']\n y = inputs['y']\n\n noise = 1e-10\n if self.comm.rank == 0:\n outputs['c'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0\n if self.comm.rank == 1:\n outputs['c'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0 + noise\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Jacobian for our paraboloid.\n \"\"\"\n x = inputs['x']\n y = inputs['y']\n\n partials['c', 'x'] = 2.0*x - 6.0 + y\n partials['c', 'y'] = 2.0*y + 8.0 + x\n\n\[email protected](MPI, \"MPI is required.\")\nclass TestMPIScatter(unittest.TestCase):\n N_PROCS = 2\n\n def test_design_vars_on_all_procs_scipy(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', 50.0)\n model.set_input_defaults('y', 50.0)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', DummyComp(), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-6, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=-15.0)\n\n prob.setup()\n prob.run_driver()\n\n proc_vals = MPI.COMM_WORLD.allgather([prob['x'], prob['y'], prob['c'], prob['f_xy']])\n np.testing.assert_array_almost_equal(proc_vals[0], proc_vals[1])\n\n def test_opt_distcomp(self):\n size = 7\n\n prob = om.Problem()\n model = prob.model\n\n ivc = om.IndepVarComp()\n ivc.add_output('x', np.ones((size, )))\n ivc.add_output('y', np.ones((size, )))\n ivc.add_output('a', -3.0 + 0.6 * np.arange(size))\n\n model.add_subsystem('p', ivc, promotes=['*'])\n model.add_subsystem(\"parab\", DistParab(arr_size=size, deriv_type='dense'), promotes=['*'])\n model.add_subsystem('sum', om.ExecComp('f_sum = sum(f_xy)',\n f_sum=np.ones((size, )),\n f_xy=np.ones((size, ))),\n promotes_outputs=['*'])\n model.promotes('sum', inputs=['f_xy'], src_indices=om.slicer[:])\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_constraint('f_xy', lower=0.0)\n model.add_objective('f_sum', index=-1)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP')\n\n prob.setup(force_alloc_complex=True)\n\n prob.run_driver()\n\n desvar = prob.driver.get_design_var_values()\n con = prob.driver.get_constraint_values()\n obj = prob.driver.get_objective_values()\n\n assert_near_equal(obj['sum.f_sum'], 0.0, 2e-6)\n assert_near_equal(con['parab.f_xy'],\n np.zeros(7),\n 1e-5)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestScipyOptimizeDriverMPI(unittest.TestCase):\n N_PROCS = 2\n\n def test_optimization_output_single_proc(self):\n prob = om.Problem()\n prob.model = SellarMDA()\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-8)\n\n prob.model.add_design_var('x', lower=0, upper=10)\n prob.model.add_design_var('z', lower=0, upper=10)\n prob.model.add_objective('obj')\n prob.model.add_constraint('con1', upper=0)\n prob.model.add_constraint('con2', upper=0)\n\n # Ask OpenMDAO to finite-difference across the model to compute the gradients for the optimizer\n prob.model.approx_totals()\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n stdout = sys.stdout\n strout = StringIO()\n sys.stdout = strout\n try:\n prob.run_driver()\n finally:\n sys.stdout = stdout\n output = strout.getvalue().split('\\n')\n\n msg = \"Optimization Complete\"\n if MPI.COMM_WORLD.rank == 0:\n self.assertEqual(msg, output[5])\n self.assertEqual(output.count(msg), 1)\n else:\n self.assertNotEqual(msg, output[0])\n self.assertNotEqual(output.count(msg), 1)\n\n\nclass TestScipyOptimizeDriver(unittest.TestCase):\n\n def test_driver_supports(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n with self.assertRaises(KeyError) as raises_msg:\n prob.driver.supports['equality_constraints'] = False\n\n exception = raises_msg.exception\n\n msg = \"ScipyOptimizeDriver: Tried to set read-only option 'equality_constraints'.\"\n\n self.assertEqual(exception.args[0], msg)\n\n def test_compute_totals_basic_return_array(self):\n # Make sure 'array' return_format works.\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=0.)\n model.set_input_defaults('y', val=0.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n\n prob.setup(check=False, mode='fwd')\n prob.set_solver_print(level=0)\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed.\")\n\n of = ['f_xy']\n wrt = ['x', 'y']\n derivs = prob.compute_totals(of=of, wrt=wrt, return_format='array')\n\n assert_near_equal(derivs[0, 0], -6.0, 1e-6)\n assert_near_equal(derivs[0, 1], 8.0, 1e-6)\n\n prob.setup(check=False, mode='rev')\n\n prob.run_model()\n\n of = ['f_xy']\n wrt = ['x', 'y']\n derivs = prob.compute_totals(of=of, wrt=wrt, return_format='array')\n\n assert_near_equal(derivs[0, 0], -6.0, 1e-6)\n assert_near_equal(derivs[0, 1], 8.0, 1e-6)\n\n def test_compute_totals_return_array_non_square(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp(name=\"x\", val=np.ones((2, ))))\n comp = model.add_subsystem('comp', NonSquareArrayComp())\n model.connect('px.x', 'comp.x1')\n\n model.add_design_var('px.x')\n model.add_objective('px.x')\n model.add_constraint('comp.y1')\n model.add_constraint('comp.y2')\n\n prob.setup(check=False, mode='auto')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed.\")\n\n derivs = prob.compute_totals(of=['comp.y1'], wrt=['px.x'], return_format='array')\n\n J = comp.JJ[0:3, 0:2]\n assert_near_equal(J, derivs, 1.0e-3)\n\n # Support for a name to be in 'of' and 'wrt'\n\n derivs = prob.compute_totals(of=['comp.y2', 'px.x', 'comp.y1'],\n wrt=['px.x'],\n return_format='array')\n\n assert_near_equal(J, derivs[3:, :], 1.0e-3)\n assert_near_equal(comp.JJ[3:4, 0:2], derivs[0:1, :], 1.0e-3)\n assert_near_equal(np.eye(2), derivs[1:3, :], 1.0e-3)\n\n def test_deriv_wrt_self(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp(name=\"x\", val=np.ones((2, ))))\n\n model.add_design_var('px.x')\n model.add_objective('px.x')\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed.\")\n\n # Support for a name to be in 'of' and 'wrt'\n\n J = prob.driver._compute_totals(of=['px.x'], wrt=['px.x'],\n return_format='array')\n\n assert_near_equal(J, np.eye(2), 1.0e-3)\n\n def test_scipy_optimizer_simple_paraboloid_unconstrained(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n prob.set_val('x', 50.)\n prob.set_val('y', 50.)\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'], 6.66666667, 1e-6)\n assert_near_equal(prob['y'], -7.3333333, 1e-6)\n\n def test_simple_paraboloid_unconstrained(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'], 6.66666667, 1e-6)\n assert_near_equal(prob['y'], -7.3333333, 1e-6)\n\n def test_simple_paraboloid_unconstrained_COBYLA(self):\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='COBYLA', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'], 6.66666667, 1e-6)\n assert_near_equal(prob['y'], -7.3333333, 1e-6)\n\n def test_simple_paraboloid_upper(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', upper=-15.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n assert_near_equal(prob['x'], 7.16667, 1e-6)\n assert_near_equal(prob['y'], -7.833334, 1e-6)\n\n def test_simple_paraboloid_lower(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n\n model.add_objective('f_xy')\n model.add_constraint('c', lower=15.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n assert_near_equal(prob['x'], 7.16667, 1e-6)\n assert_near_equal(prob['y'], -7.833334, 1e-6)\n\n def test_simple_paraboloid_equality(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', equals=-15.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n # (Note, loose tol because of appveyor py3.4 machine.)\n assert_near_equal(prob['x'], 7.16667, 1e-4)\n assert_near_equal(prob['y'], -7.833334, 1e-4)\n\n def test_unsupported_equality(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='COBYLA', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', equals=-15.0)\n\n prob.setup()\n\n with self.assertRaises(Exception) as raises_cm:\n prob.run_driver()\n\n exception = raises_cm.exception\n\n msg = \"Constraints of type 'eq' not handled by COBYLA.\"\n\n self.assertEqual(exception.args[0], msg)\n\n def test_scipy_missing_objective(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('x', om.IndepVarComp('x', 2.0), promotes=['*'])\n model.add_subsystem('f_x', Paraboloid(), promotes=['*'])\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP')\n\n prob.model.add_design_var('x', lower=0)\n # prob.model.add_constraint('x', lower=0)\n\n prob.setup()\n\n with self.assertRaises(Exception) as raises_msg:\n prob.run_driver()\n\n exception = raises_msg.exception\n\n msg = \"Driver requires objective to be declared\"\n\n self.assertEqual(exception.args[0], msg)\n\n def test_simple_paraboloid_double_sided_low(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=-11.0, upper=-10.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['y'] - prob['x'], -11.0, 1e-6)\n\n def test_simple_paraboloid_double_sided_high(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=10.0, upper=11.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)\n\n def test_simple_array_comp2D(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = areas - 20.0', c=np.zeros((2, 2)),\n areas=np.zeros((2, 2))),\n promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0, upper=50.0)\n model.add_objective('o')\n model.add_constraint('c', equals=0.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n obj = prob['o']\n assert_near_equal(obj, 20.0, 1e-6)\n\n def test_simple_array_comp2D_eq_con(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0] + areas[1, 1]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0, upper=50.0)\n model.add_objective('o')\n model.add_constraint('areas', equals=np.array([24.0, 21.0, 3.5, 17.5]))\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n obj = prob['o']\n assert_near_equal(obj, 41.5, 1e-6)\n\n def test_simple_array_comp2D_sparse_eq_con(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArraySparse(), promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0] + areas[1, 1]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0, upper=50.0)\n model.add_objective('o')\n model.add_constraint('areas', equals=np.array([24.0, 21.0, 3.5, 17.5]))\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n obj = prob['o']\n assert_near_equal(obj, 41.5, 1e-6)\n\n def test_simple_array_comp2D_jacvec_eq_con(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArrayJacVec(), promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0] + areas[1, 1]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0, upper=50.0)\n model.add_objective('o')\n model.add_constraint('areas', equals=np.array([24.0, 21.0, 3.5, 17.5]))\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n obj = prob['o']\n assert_near_equal(obj, 41.5, 1e-6)\n\n def test_simple_array_comp2D_dbl_sided_con(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0, upper=50.0)\n model.add_objective('o')\n model.add_constraint('areas', lower=np.array([24.0, 21.0, 3.5, 17.5]), upper=np.array([24.0, 21.0, 3.5, 17.5]))\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n con = prob['areas']\n assert_near_equal(con, np.array([[24.0, 21.0], [3.5, 17.5]]), 1e-6)\n\n def test_simple_array_comp2D_dbl_sided_con_array(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0, upper=50.0)\n model.add_objective('o')\n model.add_constraint('areas', lower=20.0, upper=20.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n obj = prob['o']\n assert_near_equal(obj, 20.0, 1e-6)\n\n def test_simple_array_comp2D_array_lo_hi(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])\n model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = areas - 20.0', c=np.zeros((2, 2)), areas=np.zeros((2, 2))),\n promotes=['*'])\n model.add_subsystem('obj', om.ExecComp('o = areas[0, 0]', areas=np.zeros((2, 2))),\n promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('widths', lower=-50.0*np.ones((2, 2)), upper=50.0*np.ones((2, 2)))\n model.add_objective('o')\n model.add_constraint('c', equals=0.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n obj = prob['o']\n assert_near_equal(obj, 20.0, 1e-6)\n\n def test_simple_paraboloid_scaled_desvars_fwd(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0, ref=.02)\n model.add_design_var('y', lower=-50.0, upper=50.0, ref=.02)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=10.0, upper=11.0)\n\n prob.setup(check=False, mode='fwd')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)\n\n def test_simple_paraboloid_scaled_desvars_rev(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0, ref=.02)\n model.add_design_var('y', lower=-50.0, upper=50.0, ref=.02)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=10.0, upper=11.0)\n\n prob.setup(check=False, mode='rev')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)\n\n def test_simple_paraboloid_scaled_constraint_fwd(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)\n\n prob.setup(check=False, mode='fwd')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)\n\n def test_simple_paraboloid_scaled_objective_fwd(self):\n\n prob = om.Problem()\n model = prob.model\n\n prob.set_solver_print(level=0)\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy', ref=10.)\n model.add_constraint('c', lower=10.0, upper=11.0)\n\n prob.setup(check=False, mode='fwd')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)\n\n def test_simple_paraboloid_scaled_objective_rev(self):\n\n prob = om.Problem()\n model = prob.model\n\n prob.set_solver_print(level=0)\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy', ref=10.)\n model.add_constraint('c', lower=10.0, upper=11.0)\n\n prob.setup(check=False, mode='rev')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)\n\n def test_sellar_mdf(self):\n\n prob = om.Problem()\n model = prob.model = SellarDerivativesGrouped()\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))\n model.add_design_var('x', lower=0.0, upper=10.0)\n model.add_objective('obj')\n model.add_constraint('con1', upper=0.0)\n model.add_constraint('con2', upper=0.0)\n\n prob.setup(check=False, mode='rev')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['z'][0], 1.9776, 1e-3)\n assert_near_equal(prob['z'][1], 0.0, 1e-3)\n assert_near_equal(prob['x'], 0.0, 1e-3)\n\n def test_bug_in_eq_constraints(self):\n # We were getting extra constraints created because lower and upper are maxfloat instead of\n # None when unused.\n p = om.Problem(model=SineFitter())\n p.driver = om.ScipyOptimizeDriver()\n\n p.setup()\n p.run_driver()\n\n max_defect = np.max(np.abs(p['defect.defect']))\n assert_near_equal(max_defect, 0.0, 1e-10)\n\n def test_reraise_exception_from_callbacks(self):\n class ReducedActuatorDisc(om.ExplicitComponent):\n\n def setup(self):\n\n # Inputs\n self.add_input('a', 0.5, desc=\"Induced Velocity Factor\")\n self.add_input('Vu', 10.0, units=\"m/s\", desc=\"Freestream air velocity, upstream of rotor\")\n\n # Outputs\n self.add_output('Vd', 0.0, units=\"m/s\",\n desc=\"Slipstream air velocity, downstream of rotor\")\n\n def compute(self, inputs, outputs):\n a = inputs['a']\n Vu = inputs['Vu']\n\n outputs['Vd'] = Vu * (1 - 2 * a)\n\n def compute_partials(self, inputs, J):\n Vu = inputs['Vu']\n\n J['Vd', 'a'] = -2.0 * Vu\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('a', .5)\n indeps.add_output('Vu', 10.0, units='m/s')\n\n prob.model.add_subsystem('a_disk', ReducedActuatorDisc(),\n promotes_inputs=['a', 'Vu'])\n\n # setup the optimization\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP')\n\n prob.model.add_design_var('a', lower=0., upper=1.)\n # negative one so we maximize the objective\n prob.model.add_objective('a_disk.Vd', scaler=-1)\n\n prob.setup()\n\n with self.assertRaises(KeyError) as context:\n prob.run_driver()\n\n msg = 'Variable name pair (\"Vd\", \"a\") must first be declared.'\n self.assertTrue(msg in str(context.exception))\n\n def test_simple_paraboloid_upper_COBYLA(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.set_input_defaults('x', val=50.)\n model.set_input_defaults('y', val=50.)\n\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='COBYLA', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', upper=-15.0)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n assert_near_equal(prob['x'], 7.16667, 1e-6)\n assert_near_equal(prob['y'], -7.833334, 1e-6)\n\n def test_simple_paraboloid_desvar_indices_COBYLA(self):\n # verify indices are handled properly when creating constraints for\n # upper and lower bounds on design variables for COBYLA\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indep', om.IndepVarComp('xy', val=[-1., 50., 50., -1]))\n model.add_subsystem('comp', Paraboloid())\n model.add_subsystem('cons', om.ExecComp('c = - x + y'))\n\n model.connect('indep.xy', ['comp.x', 'cons.x'], src_indices=[1])\n model.connect('indep.xy', ['comp.y', 'cons.y'], src_indices=[2])\n\n model.add_design_var('indep.xy', indices=[1,2], lower=[-50.0, -50.0], upper=[50.0, 50.0])\n model.add_objective('comp.f_xy')\n model.add_constraint('cons.c', upper=-15.0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='COBYLA', tol=1e-9, disp=False)\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n assert_near_equal(prob['indep.xy'], [-1, 7.16667, -7.833334, -1], 1e-6)\n\n def test_sellar_mdf_COBYLA(self):\n\n prob = om.Problem()\n model = prob.model = SellarDerivativesGrouped()\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='COBYLA', tol=1e-9, disp=False)\n\n model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))\n model.add_design_var('x', lower=0.0, upper=10.0)\n model.add_objective('obj')\n model.add_constraint('con1', upper=0.0)\n model.add_constraint('con2', upper=0.0)\n\n prob.setup(check=False, mode='rev')\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n assert_near_equal(prob['z'][0], 1.9776, 1e-3)\n assert_near_equal(prob['z'][1], 0.0, 1e-3)\n assert_near_equal(prob['x'], 0.0, 1e-3)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.1\"),\n \"scipy >= 1.1 is required.\")\n def test_trust_constr(self):\n\n class Rosenbrock(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.array([1.5, 1.5, 1.5]))\n self.add_output('f', 0.0)\n self.declare_partials('f', 'x', method='fd', form='central', step=1e-2)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rosenbrock(x)\n\n x0 = np.array([1.2, 0.8, 1.3])\n\n prob = om.Problem()\n model = prob.model\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', list(x0))\n\n prob.model.add_subsystem('rosen', Rosenbrock(), promotes=['*'])\n prob.model.add_subsystem('con', om.ExecComp('c=sum(x)', x=np.ones(3)), promotes=['*'])\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'trust-constr'\n driver.options['tol'] = 1e-8\n driver.options['maxiter'] = 2000\n driver.options['disp'] = False\n\n model.add_design_var('x')\n model.add_objective('f', scaler=1/rosenbrock(x0))\n model.add_constraint('c', lower=0, upper=10) # Double sided\n\n prob.setup()\n prob.run_driver()\n\n assert_near_equal(prob['x'], np.ones(3), 2e-2)\n assert_near_equal(prob['f'], 0., 1e-2)\n self.assertTrue(prob['c'] < 10)\n self.assertTrue(prob['c'] > 0)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.1\"),\n \"scipy >= 1.1 is required.\")\n def test_trust_constr_hess_option(self):\n\n class Rosenbrock(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.array([1.5, 1.5, 1.5]))\n self.add_output('f', 0.0)\n self.declare_partials('f', 'x', method='fd', form='central', step=1e-3)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rosenbrock(x)\n\n x0 = np.array([1.2, 0.8, 1.3])\n\n prob = om.Problem()\n model = prob.model\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', list(x0))\n\n prob.model.add_subsystem('rosen', Rosenbrock(), promotes=['*'])\n prob.model.add_subsystem('con', om.ExecComp('c=sum(x)', x=np.ones(3)), promotes=['*'])\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'trust-constr'\n driver.options['tol'] = 1e-8\n driver.options['maxiter'] = 2000\n driver.options['disp'] = False\n driver.opt_settings['hess'] = '2-point'\n\n model.add_design_var('x')\n model.add_objective('f', scaler=1/rosenbrock(x0))\n model.add_constraint('c', lower=0, upper=10) # Double sided\n\n prob.setup()\n prob.run_driver()\n\n assert_near_equal(prob['x'], np.ones(3), 2e-2)\n assert_near_equal(prob['f'], 0., 1e-2)\n self.assertTrue(prob['c'] < 10)\n self.assertTrue(prob['c'] > 0)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.1\"),\n \"scipy >= 1.1 is required.\")\n def test_trust_constr_equality_con(self):\n\n class Rosenbrock(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.array([1.5, 1.5, 1.5]))\n self.add_output('f', 0.0)\n self.declare_partials('f', 'x', method='fd', form='central', step=1e-4)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rosenbrock(x)\n\n x0 = np.array([0.5, 0.8, 1.4])\n\n prob = om.Problem()\n model = prob.model\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', list(x0))\n\n model.add_subsystem('rosen', Rosenbrock())\n model.add_subsystem('con', om.ExecComp('c=sum(x)', x=np.ones(3)))\n model.connect('indeps.x', 'rosen.x')\n model.connect('indeps.x', 'con.x')\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'trust-constr'\n driver.options['tol'] = 1e-5\n driver.options['maxiter'] = 2000\n driver.options['disp'] = False\n\n model.add_design_var('indeps.x')\n model.add_objective('rosen.f', scaler=1/rosenbrock(x0))\n model.add_constraint('con.c', equals=1.)\n\n prob.setup()\n prob.run_driver()\n\n assert_near_equal(prob['con.c'], 1., 1e-3)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.2\"),\n \"scipy >= 1.2 is required.\")\n def test_trust_constr_inequality_con(self):\n\n class Sphere(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.array([1.5, 1.5]))\n self.add_output('f', 0.0)\n self.declare_partials('f', 'x', method='fd', form='central', step=1e-4)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = sum(x**2)\n\n x0 = np.array([1.2, 1.5])\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', list(x0))\n\n prob.model.add_subsystem('sphere', Sphere(), promotes=['*'])\n prob.model.add_subsystem('con', om.ExecComp('c=sum(x)', x=np.ones(2)), promotes=['*'])\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'trust-constr'\n prob.driver.options['tol'] = 1e-5\n prob.driver.options['maxiter'] = 2000\n prob.driver.options['disp'] = False\n\n prob.model.add_design_var('x')\n prob.model.add_objective('f')\n prob.model.add_constraint('c', lower=1.0)\n\n prob.setup()\n prob.run_driver()\n\n assert_near_equal(prob['c'], 1.0, 1e-2)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.2\"),\n \"scipy >= 1.2 is required.\")\n def test_trust_constr_bounds(self):\n class Rosenbrock(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.array([-1.5, -1.5]))\n self.add_output('f', 1000.0)\n self.declare_partials('f', 'x', method='fd', form='central', step=1e-3)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = sum(x ** 2)\n\n x0 = np.array([-1.5, -1.5])\n\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', list(x0))\n\n prob.model.add_subsystem('sphere', Rosenbrock(), promotes=['*'])\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'trust-constr'\n prob.driver.options['tol'] = 1e-7\n prob.driver.options['maxiter'] = 2000\n prob.driver.options['disp'] = False\n\n prob.model.add_design_var('x', lower=np.array([-2., -2.]), upper=np.array([-1., -1.2]))\n prob.model.add_objective('f', scaler=1)\n\n prob.setup()\n prob.run_driver()\n\n assert_near_equal(prob['x'][0], -1., 1e-2)\n assert_near_equal(prob['x'][1], -1.2, 1e-2)\n\n def test_simple_paraboloid_lower_linear(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', lower=15.0, linear=True)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n assert_near_equal(prob['x'], 7.16667, 1e-6)\n assert_near_equal(prob['y'], -7.833334, 1e-6)\n\n self.assertEqual(prob.driver._obj_and_nlcons, ['comp.f_xy'])\n\n def test_simple_paraboloid_equality_linear(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', equals=-15.0, linear=True)\n\n prob.setup()\n\n failed = prob.run_driver()\n\n self.assertFalse(failed, \"Optimization failed, result =\\n\" +\n str(prob.driver.result))\n\n # Minimum should be at (7.166667, -7.833334)\n assert_near_equal(prob['x'], 7.16667, 1e-6)\n assert_near_equal(prob['y'], -7.833334, 1e-6)\n\n def test_debug_print_option_totals(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n prob.driver.options['debug_print'] = ['totals']\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', upper=-15.0)\n\n prob.setup(check=False, mode='rev')\n\n failed, output = run_driver(prob)\n\n self.assertFalse(failed, \"Optimization failed.\")\n\n self.assertTrue('In mode: rev.' in output)\n self.assertTrue(\"('comp.f_xy', [0])\" in output)\n self.assertTrue('Elapsed Time:' in output)\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n prob.driver.options['debug_print'] = ['totals']\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', upper=-15.0)\n\n prob.setup(check=False, mode='fwd')\n\n failed, output = run_driver(prob)\n\n self.assertFalse(failed, \"Optimization failed.\")\n\n self.assertTrue('In mode: fwd.' in output)\n self.assertTrue(\"('p1.x', [0])\" in output)\n self.assertTrue('Elapsed Time:' in output)\n\n def test_debug_print_all_options(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n prob.driver.options['debug_print'] = ['desvars', 'ln_cons', 'nl_cons', 'objs']\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', upper=-15.0)\n\n prob.setup()\n\n failed, output = run_driver(prob)\n\n self.assertFalse(failed, \"Optimization failed.\")\n\n output = output.split('\\n')\n\n self.assertTrue(output.count(\"Design Vars\") > 1,\n \"Should be more than one design vars header printed\")\n self.assertTrue(output.count(\"Nonlinear constraints\") > 1,\n \"Should be more than one nonlinear constraint header printed\")\n self.assertTrue(output.count(\"Linear constraints\") > 1,\n \"Should be more than one linear constraint header printed\")\n self.assertTrue(output.count(\"Objectives\") > 1,\n \"Should be more than one objective header printed\")\n self.assertTrue(len([s for s in output if s.startswith(\"{'p1.x\")]) > 1,\n \"Should be more than one p1.x printed\")\n self.assertTrue(len([s for s in output if \"'p2.y'\" in s]) > 1,\n \"Should be more than one p2.y printed\")\n self.assertTrue(len([s for s in output if s.startswith(\"{'con.c\")]) > 1,\n \"Should be more than one con.c printed\")\n self.assertTrue(len([s for s in output if s.startswith(\"{'comp.f_xy\")]) > 1,\n \"Should be more than one comp.f_xy printed\")\n\n def test_sellar_mdf_linear_con_directsolver(self):\n # This test makes sure that we call solve_nonlinear first if we have any linear constraints\n # to cache.\n prob = om.Problem()\n model = prob.model = SellarDerivatives()\n\n model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))\n model.add_design_var('x', lower=0.0, upper=10.0)\n model.add_objective('obj')\n model.add_constraint('con1', upper=0.0)\n model.add_constraint('con2', upper=0.0)\n model.add_constraint('x', upper=11.0, linear=True)\n\n prob.setup(check=False, mode='rev')\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n failed = prob.run_driver()\n\n assert_near_equal(prob['z'][0], 1.9776, 1e-3)\n assert_near_equal(prob['z'][1], 0.0, 1e-3)\n assert_near_equal(prob['x'], 0.0, 4e-3)\n\n self.assertEqual(len(prob.driver._lincongrad_cache), 1)\n # Piggyback test: make sure we can run the driver again as a subdriver without a keyerror.\n prob.driver.run()\n self.assertEqual(len(prob.driver._lincongrad_cache), 1)\n\n def test_call_final_setup(self):\n # Make sure we call final setup if our model hasn't been setup.\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_constraint('c', equals=-15.0)\n\n prob.setup()\n\n expected_msg = \\\n \"Problem .*: run_model must be called before total derivatives can be checked\\.\"\n with self.assertRaisesRegex(RuntimeError, expected_msg):\n totals = prob.check_totals(method='fd', out_stream=False)\n\n def test_cobyla_linear_constraint(self):\n # Bug where ScipyOptimizeDriver tried to compute and cache the constraint derivatives for the\n # lower and upper bounds of the desvars even though we were using a non-gradient optimizer.\n # This causd a KeyError.\n prob = om.Problem()\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', 3.0)\n indeps.add_output('y', -4.0)\n\n prob.model.add_subsystem('parab', Paraboloid())\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y'))\n\n prob.model.connect('indeps.x', ['parab.x', 'const.x'])\n prob.model.connect('indeps.y', ['parab.y', 'const.y'])\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='COBYLA', tol=1e-9, disp=False)\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n prob.model.add_design_var('indeps.x', **{'ref0': 0, 'ref': 2, 'lower': -50, 'upper': 50})\n prob.model.add_design_var('indeps.y', **{'ref0': 0, 'ref': 2, 'lower': -50, 'upper': 50})\n prob.model.add_objective('parab.f_xy', scaler = 4.0)\n prob.setup()\n prob.run_driver()\n\n # minimum value\n assert_near_equal(prob['parab.f_xy'], -27, 1e-6)\n\n def test_multiple_objectives_error(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])\n\n prob.set_solver_print(level=0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP', tol=1e-9, disp=False)\n\n self.assertFalse(prob.driver.supports['multiple_objectives'])\n prob.driver.options['debug_print'] = ['nl_cons', 'objs']\n\n model.add_design_var('x', lower=-50.0, upper=50.0)\n model.add_design_var('y', lower=-50.0, upper=50.0)\n model.add_objective('f_xy')\n model.add_objective('c') # Second objective\n prob.setup()\n\n with self.assertRaises(RuntimeError):\n prob.run_model()\n\n with self.assertRaises(RuntimeError):\n prob.run_driver()\n\n def test_basinhopping(self):\n\n class Func2d(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.ones(2))\n self.add_output('f', 0.0)\n self.declare_partials('f', 'x')\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]\n\n def compute_partials(self, inputs, partials):\n x = inputs['x']\n df = np.zeros(2)\n df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2\n df[1] = 2. * x[1] + 0.2\n partials['f', 'x'] = df\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indeps', om.IndepVarComp('x', np.ones(2)), promotes=['*'])\n model.add_subsystem('func2d', Func2d(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'basinhopping'\n driver.options['disp'] = False\n driver.opt_settings['niter'] = 1000\n driver.opt_settings['seed'] = 1234\n\n model.add_design_var('x', lower=[-1, -1], upper=[0, 0])\n model.add_objective('f')\n prob.setup()\n prob.run_driver()\n assert_near_equal(prob['x'], np.array([-0.1951, -0.1000]), 1e-3)\n assert_near_equal(prob['f'], -1.0109, 1e-3)\n\n def test_basinhopping_bounded(self):\n # It should find the local minimum, which is inside the bounds\n\n class Func2d(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.ones(2))\n self.add_output('f', 0.0)\n self.declare_partials('f', 'x')\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]\n\n def compute_partials(self, inputs, partials):\n x = inputs['x']\n df = np.zeros(2)\n df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2\n df[1] = 2. * x[1] + 0.2\n partials['f', 'x'] = df\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indeps', om.IndepVarComp('x', np.ones(2)), promotes=['*'])\n model.add_subsystem('func2d', Func2d(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'basinhopping'\n driver.options['disp'] = False\n driver.opt_settings['niter'] = 200\n driver.opt_settings['seed'] = 1234\n\n model.add_design_var('x', lower=[0, -1], upper=[1, 1])\n model.add_objective('f')\n prob.setup()\n prob.run_driver()\n assert_near_equal(prob['x'], np.array([0.234171, -0.1000]), 1e-3)\n assert_near_equal(prob['f'], -0.907267, 1e-3)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.2\"),\n \"scipy >= 1.2 is required.\")\n def test_dual_annealing_rastrigin(self):\n # Example from the Scipy documentation\n\n size = 3 # size of the design variable\n\n class Rastrigin(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', 0.5 * np.ones(size))\n self.add_output('f', 0.5)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rastrigin(x)\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indeps', om.IndepVarComp('x', np.ones(size)), promotes=['*'])\n model.add_subsystem('rastrigin', Rastrigin(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'dual_annealing'\n driver.options['disp'] = False\n driver.options['tol'] = 1e-9\n driver.options['maxiter'] = 3000\n driver.opt_settings['seed'] = 1234\n driver.opt_settings['initial_temp'] = 5230\n\n model.add_design_var('x', lower=-2 * np.ones(size), upper=2 * np.ones(size))\n model.add_objective('f')\n prob.setup()\n prob.run_driver()\n assert_near_equal(prob['x'], np.zeros(size), 1e-2)\n assert_near_equal(prob['f'], 0.0, 1e-2)\n\n def test_differential_evolution(self):\n # Source of example:\n # https://scipy.github.io/devdocs/generated/scipy.optimize.dual_annealing.html\n np.random.seed(6)\n\n size = 3 # size of the design variable\n\n class Rastrigin(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', 0.5 * np.ones(size))\n self.add_output('f', 0.5)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rastrigin(x)\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indeps', om.IndepVarComp('x', np.ones(size)), promotes=['*'])\n model.add_subsystem('rastrigin', Rastrigin(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'differential_evolution'\n driver.options['disp'] = False\n driver.options['tol'] = 1e-9\n\n model.add_design_var('x', lower=-5.12 * np.ones(size), upper=5.12 * np.ones(size))\n model.add_objective('f')\n prob.setup()\n prob.run_driver()\n assert_near_equal(prob['x'], np.zeros(size), 1e-6)\n assert_near_equal(prob['f'], 0.0, 1e-6)\n\n def test_differential_evolution_bounded(self):\n # Source of example:\n # https://scipy.github.io/devdocs/generated/scipy.optimize.dual_annealing.html\n # In this example the minimum is not the unbounded global minimum.\n\n size = 3 # size of the design variable\n\n class Rastrigin(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', 0.5 * np.ones(size))\n self.add_output('f', 0.5)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rastrigin(x)\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indeps', om.IndepVarComp('x', np.ones(size)), promotes=['*'])\n model.add_subsystem('rastrigin', Rastrigin(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'differential_evolution'\n driver.options['disp'] = False\n driver.options['tol'] = 1e-9\n\n model.add_design_var('x', lower=-2.0 * np.ones(size), upper=-0.5 * np.ones(size))\n model.add_objective('f')\n prob.setup()\n prob.run_driver()\n assert_near_equal(prob['x'], -np.ones(size), 1e-2)\n assert_near_equal(prob['f'], 3.0, 1e-2)\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.2\"),\n \"scipy >= 1.2 is required.\")\n def test_shgo_rosenbrock(self):\n # Source of example:\n # https://stefan-endres.github.io/shgo/\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('indeps', om.IndepVarComp('x', np.ones(rosenbrock_size)), promotes=['*'])\n model.add_subsystem('rosen', Rosenbrock(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'shgo'\n driver.options['disp'] = False\n driver.opt_settings['maxiter'] = None\n\n model.add_design_var('x', lower=np.zeros(rosenbrock_size), upper=2*np.ones(rosenbrock_size))\n model.add_objective('f')\n prob.setup()\n prob.run_driver()\n assert_near_equal(prob['x'], np.ones(rosenbrock_size), 1e-2)\n assert_near_equal(prob['f'], 0.0, 1e-2)\n\n def test_singular_jac_error_responses(self):\n prob = om.Problem()\n size = 3\n prob.model.add_subsystem('parab',\n om.ExecComp(['f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0',\n 'z = 12.0'], shape=(size,)),\n promotes_inputs=['x', 'y'])\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y', shape=(size,)),\n promotes_inputs=['x', 'y'])\n\n prob.model.set_input_defaults('x', 3.0 * np.ones(size))\n prob.model.set_input_defaults('y', -4.0 * np.ones(size))\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['singular_jac_behavior'] = 'error'\n\n prob.model.add_design_var('x', lower=-50, upper=50)\n prob.model.add_design_var('y', lower=-50, upper=50)\n prob.model.add_objective('parab.f_xy', index=1)\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n\n # This constraint produces a zero row.\n prob.model.add_constraint('parab.z', equals=12.)\n\n prob.setup()\n\n with self.assertRaises(RuntimeError) as msg:\n prob.run_driver()\n\n self.assertEqual(str(msg.exception),\n \"Constraints or objectives [('parab.z', inds=[0, 1, 2])] cannot be impacted by the design \" + \\\n \"variables of the problem.\")\n\n def test_singular_jac_error_desvars(self):\n prob = om.Problem()\n prob.model.add_subsystem('parab',\n om.ExecComp(['f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0 - 0*z',\n ]),\n promotes_inputs=['x', 'y', 'z'])\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y'), promotes_inputs=['x', 'y'])\n\n prob.model.set_input_defaults('x', 3.0)\n prob.model.set_input_defaults('y', -4.0)\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['singular_jac_behavior'] = 'error'\n\n prob.model.add_design_var('x', lower=-50, upper=50)\n prob.model.add_design_var('y', lower=-50, upper=50)\n\n # Design var z does not affect any quantities.\n prob.model.add_design_var('z', lower=-50, upper=50)\n\n prob.model.add_objective('parab.f_xy')\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n\n prob.setup()\n\n with self.assertRaises(RuntimeError) as msg:\n prob.run_driver()\n\n self.assertEqual(str(msg.exception),\n \"Design variables [('z', inds=[0])] have no impact on the constraints or objective.\")\n\n def test_singular_jac_ignore(self):\n prob = om.Problem()\n prob.model.add_subsystem('parab',\n om.ExecComp(['f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0',\n 'z = 12.0'],),\n promotes_inputs=['x', 'y'])\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y'), promotes_inputs=['x', 'y'])\n\n prob.model.set_input_defaults('x', 3.0)\n prob.model.set_input_defaults('y', -4.0)\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['singular_jac_behavior'] = 'ignore'\n\n prob.model.add_design_var('x', lower=-50, upper=50)\n prob.model.add_design_var('y', lower=-50, upper=50)\n prob.model.add_objective('parab.f_xy')\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n\n # This constraint produces a zero row.\n prob.model.add_constraint('parab.z', equals=12.)\n\n prob.setup()\n\n # Will not raise an exception.\n prob.run_driver()\n\n def test_singular_jac_warn(self):\n prob = om.Problem()\n prob.model.add_subsystem('parab',\n om.ExecComp(['f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0',\n 'z = 12.0'],),\n promotes_inputs=['x', 'y'])\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y'), promotes_inputs=['x', 'y'])\n\n prob.model.set_input_defaults('x', 3.0)\n prob.model.set_input_defaults('y', -4.0)\n\n prob.driver = om.ScipyOptimizeDriver(optimizer='SLSQP')\n # Default behavior is 'warn'\n\n prob.model.add_design_var('x', lower=-50, upper=50)\n prob.model.add_design_var('y', lower=-50, upper=50)\n prob.model.add_objective('parab.f_xy')\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n\n # This constraint produces a zero row.\n prob.model.add_constraint('parab.z', equals=12.)\n\n prob.setup()\n\n msg = \"Constraints or objectives [('parab.z', inds=[0])] cannot be impacted by the design variables of the problem.\"\n\n with assert_warning(UserWarning, msg):\n prob.run_driver()\n\n def test_singular_jac_error_desvars_multidim_indices_dv(self):\n prob = om.Problem()\n prob.model.add_subsystem('parab',\n om.ExecComp(['f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0 - 0*z'], shape=(3,2,2)),\n promotes_inputs=['x', 'y', 'z'])\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y', shape=(3,2,2)), promotes_inputs=['x', 'y'])\n\n prob.model.set_input_defaults('x', np.ones((3,2,2)) * 3.0)\n prob.model.set_input_defaults('y', np.ones((3,2,2)) * -4.0)\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['singular_jac_behavior'] = 'error'\n\n prob.model.add_design_var('x', lower=-50, upper=50)\n prob.model.add_design_var('y', lower=-50, upper=50)\n\n # Design var z does not affect any quantities.\n prob.model.add_design_var('z', lower=-50, upper=50, indices=[2,5,6], flat_indices=True)\n\n prob.model.add_objective('parab.f_xy', index=6, flat_indices=True)\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n\n prob.setup()\n\n with self.assertRaises(RuntimeError) as msg:\n prob.run_driver()\n\n self.assertEqual(str(msg.exception),\n \"Design variables [('z', inds=[(0, 1, 0), (1, 0, 1), (1, 1, 0)])] have no impact on the constraints or objective.\")\n\n def test_singular_jac_error_desvars_multidim_indices_con(self):\n prob = om.Problem()\n prob.model.add_subsystem('parab',\n om.ExecComp(['f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0 - z',\n 'f_z = z * 0.0'], shape=(3,2,2)),\n promotes_inputs=['x', 'y', 'z'])\n\n prob.model.add_subsystem('const', om.ExecComp('g = x + y', shape=(3,2,2)), promotes_inputs=['x', 'y'])\n\n prob.model.set_input_defaults('x', np.ones((3,2,2)) * 3.0)\n prob.model.set_input_defaults('y', np.ones((3,2,2)) * -4.0)\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['singular_jac_behavior'] = 'error'\n\n prob.model.add_design_var('x', lower=-50, upper=50)\n prob.model.add_design_var('y', lower=-50, upper=50)\n prob.model.add_design_var('z', lower=-50, upper=50)\n\n # objective parab.f_z is not impacted by any quantities.\n prob.model.add_objective('parab.f_z', index=6, flat_indices=True)\n\n prob.model.add_constraint('const.g', lower=0, upper=10.)\n\n prob.setup()\n\n with self.assertRaises(RuntimeError) as msg:\n prob.run_driver()\n\n self.assertEqual(str(msg.exception),\n \"Constraints or objectives [('parab.f_z', inds=[(1, 1, 0)])] cannot be impacted by the design variables of the problem.\")\n\n @unittest.skipUnless(LooseVersion(scipy_version) >= LooseVersion(\"1.2\"),\n \"scipy >= 1.2 is required.\")\n def test_feature_shgo_rastrigin(self):\n # Source of example: https://stefan-endres.github.io/shgo/\n\n size = 3 # size of the design variable\n\n def rastrigin(x):\n a = 10 # constant\n return np.sum(np.square(x) - a * np.cos(2 * np.pi * x)) + a * np.size(x)\n\n class Rastrigin(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.ones(size))\n self.add_output('f', 0.0)\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n x = inputs['x']\n outputs['f'] = rastrigin(x)\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('rastrigin', Rastrigin(), promotes=['*'])\n\n prob.driver = driver = om.ScipyOptimizeDriver()\n driver.options['optimizer'] = 'shgo'\n driver.options['disp'] = False\n driver.opt_settings['maxtime'] = 10 # seconds\n driver.opt_settings['iters'] = 3\n driver.opt_settings['maxiter'] = None\n\n model.add_design_var('x', lower=-5.12*np.ones(size), upper=5.12*np.ones(size))\n model.add_objective('f')\n prob.setup()\n\n prob.set_val('x', np.ones(size))\n prob.run_driver()\n\n assert_near_equal(prob.get_val('x'), np.zeros(size), 1e-6)\n assert_near_equal(prob.get_val('f'), 0.0, 1e-6)\n\n def test_multiple_constraints_scipy(self):\n\n p = om.Problem()\n\n exec = om.ExecComp(['y = x**2',\n 'z = a + x**2'],\n a={'shape': (1,)},\n y={'shape': (101,)},\n x={'shape': (101,)},\n z={'shape': (101,)})\n\n p.model.add_subsystem('exec', exec)\n\n p.model.add_design_var('exec.a', lower=-1000, upper=1000)\n p.model.add_objective('exec.y', index=50)\n p.model.add_constraint('exec.z', indices=[10], upper=0)\n p.model.add_constraint('exec.z', indices=[-1], equals=25, alias=\"ALIAS_TEST\")\n\n p.driver = om.ScipyOptimizeDriver()\n\n p.setup()\n\n p.set_val('exec.x', np.linspace(-10, 10, 101))\n\n p.run_driver()\n\n assert_near_equal(p.get_val('exec.z')[0], 25)\n assert_near_equal(p.get_val('exec.z')[10], -11)\n\n def test_con_and_obj_duplicate(self):\n\n p = om.Problem()\n\n exec = om.ExecComp(['y = x**2',\n 'z = a + x**2'],\n a={'shape': (1,)},\n y={'shape': (101,)},\n x={'shape': (101,)},\n z={'shape': (101,)})\n\n p.model.add_subsystem('exec', exec)\n\n p.model.add_design_var('exec.a', lower=-1000, upper=1000)\n p.model.add_objective('exec.z', index=50)\n p.model.add_constraint('exec.z', indices=[0], equals=25, alias='ALIAS_TEST')\n\n p.driver = om.ScipyOptimizeDriver()\n\n p.setup()\n\n p.set_val('exec.x', np.linspace(-10, 10, 101))\n\n p.run_driver()\n\n assert_near_equal(p.get_val('exec.z')[0], 25)\n assert_near_equal(p.get_val('exec.z')[50], -75)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import unittest\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.test_suite.components.sellar_feature import SellarMDA\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning\nfrom openmdao.utils.om_warnings import OMDeprecationWarning\n\nfrom openmdao.utils.mpi import MPI\nif MPI:\n try:\n from openmdao.vectors.petsc_vector import PETScVector\n except ImportError:\n PETScVector = None\n\n\nclass L2(om.ExplicitComponent):\n \"\"\"takes the 2 norm of the input\"\"\"\n\n def setup(self):\n self.add_input('vec', shape_by_conn=True)\n self.add_output('val', 0.0)\n\n def compute(self, inputs, outputs):\n outputs['val'] = np.linalg.norm(inputs['vec'])\n\n\nclass TestAdder(unittest.TestCase):\n def test_adder(self):\n\n prob = om.Problem()\n prob.model = om.Group()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp('in', np.ones(10)), promotes=['*'])\n\n prob.model.add_subsystem('L2norm', L2())\n prob.model.connect('in', ['L2norm.vec'])\n prob.setup()\n prob.run_model()\n np.testing.assert_allclose(prob['L2norm.vec'], np.ones(10))\n\n\n# This is based on passing size information through the system shown below\n# in all tests C starts with the shape information\n\n# +-----+\n# | |\n# | A +-----+\n# | | |\n# +-----+ |\n# +---+---+\n# | |\n# | B +-----+\n# | | |\n# +-------+ |\n# +---+----+\n# | |\n# | C +-----+\n# | | |\n# +--------+ +---+---+\n# | |\n# | D +----+\n# | | |\n# +-------+ |\n# +--+----+\n# | |\n# | E |\n# | |\n# +-------+\n\nclass B(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('in', copy_shape='out')\n self.add_output('out', shape_by_conn=True)\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['in']\n\n\nclass C(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('in', shape=4)\n self.add_output('out', shape=9)\n\n def compute(self, inputs, outputs):\n outputs['out'] = np.arange(9)\n\n\nclass D(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('in', shape_by_conn=True)\n self.add_output('out', copy_shape='in')\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['in']\n\n\nclass E(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('in', shape_by_conn=True)\n self.add_output('out', copy_shape='in')\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['in']\n\n\nclass B_distrib(om.ExplicitComponent):\n def setup(self):\n self.add_input('in', copy_shape='out', distributed=True)\n self.add_output('out', shape_by_conn=True, distributed=True)\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['in']\n\n\nclass C_distrib(om.ExplicitComponent):\n def setup(self):\n if self.comm.rank == 0:\n self.add_input('in', shape=1, src_indices=np.arange(0,1, dtype=int), distributed=True)\n elif self.comm.rank == 1:\n self.add_input('in', shape=2, src_indices=np.arange(1,3, dtype=int), distributed=True)\n else:\n self.add_input('in', shape=0, src_indices=np.arange(3,3, dtype=int), distributed=True)\n\n self.add_output('out', shape=3, distributed=True)\n\n def compute(self, inputs, outputs):\n outputs['out'] = np.sum(inputs['in']) * (self.comm.rank + 1)\n\n\nclass D_distrib(om.ExplicitComponent):\n def setup(self):\n self.add_input('in', shape_by_conn=True, distributed=True)\n self.add_output('out', copy_shape='in', distributed=True)\n\n def compute(self, inputs, outputs):\n outputs['out'] = inputs['in']\n\n\nclass TestPassSize(unittest.TestCase):\n def test_serial(self):\n prob = om.Problem()\n prob.model = om.Group()\n\n indeps = prob.model.add_subsystem('A', om.IndepVarComp())\n indeps.add_output('out', shape_by_conn=True)\n\n prob.model.add_subsystem('B', B())\n prob.model.connect('A.out', ['B.in'])\n\n prob.model.add_subsystem('C', C())\n prob.model.connect('B.out', ['C.in'])\n\n prob.model.add_subsystem('D', D())\n prob.model.connect('C.out', ['D.in'])\n\n prob.model.add_subsystem('E', E())\n prob.model.connect('D.out', ['E.in'])\n\n prob.setup()\n prob.run_model()\n self.assertEqual(prob.get_val('A.out').size ,4)\n self.assertEqual(prob.get_val('B.in').size ,4)\n self.assertEqual(prob.get_val('B.out').size ,4)\n\n self.assertEqual(prob.get_val('D.in').size ,9)\n self.assertEqual(prob.get_val('D.out').size ,9)\n self.assertEqual(prob.get_val('E.in').size ,9)\n\n def test_unresolved_err(self):\n prob = om.Problem()\n prob.model = om.Group()\n\n prob.model.add_subsystem('B', B())\n prob.model.connect('C.out', ['B.in'])\n\n prob.model.add_subsystem('C', B())\n prob.model.connect('B.out', ['C.in'])\n\n with self.assertRaises(Exception) as raises_cm:\n prob.setup()\n\n exception = raises_cm.exception\n\n msg = \"<model> <class Group>: Failed to resolve shapes for ['B.in', 'B.out', 'C.in', 'C.out']. To see the dynamic shape dependency graph, do 'openmdao view_dyn_shapes <your_py_file>'.\"\n self.assertEqual(exception.args[0], msg)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestPassSizeDistributed(unittest.TestCase):\n\n N_PROCS = 3\n\n def test_serial_start(self):\n \"\"\"the size information starts in the duplicated component C\"\"\"\n\n prob = om.Problem()\n prob.model = om.Group()\n\n indeps = prob.model.add_subsystem('A', om.IndepVarComp())\n indeps.add_output('out', shape_by_conn=True)\n\n prob.model.add_subsystem('B', B_distrib())\n prob.model.connect('A.out', ['B.in'])\n\n prob.model.add_subsystem('C', C())\n prob.model.connect('B.out', ['C.in'], src_indices=om.slicer[:])\n\n prob.model.add_subsystem('D', D_distrib())\n prob.model.connect('C.out', ['D.in'])\n\n prob.model.add_subsystem('E', E())\n prob.model.connect('D.out', ['E.in'])\n\n with self.assertRaises(RuntimeError) as cm:\n prob.setup()\n\n msg = \"<model> <class Group>: dynamic sizing of non-distributed input 'E.in' from distributed output 'D.out' is not supported.\"\n self.assertEquals(str(cm.exception), msg)\n\n def test_distributed_start(self):\n \"\"\"the size information starts in the distributed component C\"\"\"\n\n prob = om.Problem()\n prob.model = om.Group()\n\n indeps = prob.model.add_subsystem('A', om.IndepVarComp())\n indeps.add_output('out', shape_by_conn=True)\n\n prob.model.add_subsystem('B', B_distrib())\n prob.model.connect('A.out', ['B.in'])\n\n prob.model.add_subsystem('C', C_distrib())\n prob.model.connect('B.out', ['C.in'])\n\n prob.model.add_subsystem('D', D_distrib())\n prob.model.connect('C.out', ['D.in'])\n\n prob.model.add_subsystem('E', E())\n prob.model.connect('D.out', ['E.in'])\n\n with self.assertRaises(RuntimeError) as cm:\n prob.setup()\n\n msg = \"<model> <class Group>: dynamic sizing of non-distributed output 'A.out' from distributed input 'B.in' is not supported because not all B.in ranks are the same size (sizes=[1 2 0]).\"\n self.assertEquals(str(cm.exception), msg)\n\nclass ResizableComp(om.ExplicitComponent):\n # this is just a component that allows us to resize between setups\n def __init__(self, n_inputs=1, size=5, mult=2.):\n super().__init__()\n self.n_inputs = n_inputs\n self.size = size\n self.mult = mult\n\n def setup(self):\n for i in range(self.n_inputs):\n self.add_input(f\"x{i+1}\", val=np.ones(self.size))\n self.add_output(f\"y{i+1}\", val=np.ones(self.size))\n\n def compute(self, inputs, outputs):\n for i in range(self.n_inputs):\n outputs[f\"y{i+1}\"] = self.mult*inputs[f\"x{i+1}\"]\n\n\nclass DynShapeComp(om.ExplicitComponent):\n # component whose inputs and outputs are dynamically shaped\n def __init__(self, n_inputs=1):\n super().__init__()\n self.n_inputs = n_inputs\n\n for i in range(self.n_inputs):\n self.add_input(f\"x{i+1}\", shape_by_conn=True, copy_shape=f\"y{i+1}\")\n self.add_output(f\"y{i+1}\", shape_by_conn=True, copy_shape=f\"x{i+1}\")\n\n def compute(self, inputs, outputs):\n for i in range(self.n_inputs):\n outputs[f\"y{i+1}\"] = 2*inputs[f\"x{i+1}\"]\n\n\nclass DistribDynShapeComp(om.ExplicitComponent):\n # a distributed component whose inputs and outputs are dynamically shaped\n def __init__(self, n_inputs=1):\n super().__init__()\n self.n_inputs = n_inputs\n\n def setup(self):\n for i in range(self.n_inputs):\n self.add_input(f\"x{i+1}\", shape_by_conn=True, copy_shape=f\"y{i+1}\", distributed=True)\n self.add_output(f\"y{i+1}\", shape_by_conn=True, copy_shape=f\"x{i+1}\", distributed=True)\n\n def compute(self, inputs, outputs):\n for i in range(self.n_inputs):\n outputs[f\"y{i+1}\"] = 2*inputs[f\"x{i+1}\"]\n\n\nclass DynShapeGroupSeries(om.Group):\n # strings together some number of components in series.\n # component type is determined by comp_class\n def __init__(self, n_comps, n_inputs, comp_class):\n super().__init__()\n self.n_comps = n_comps\n self.n_inputs = n_inputs\n self.comp_class = comp_class\n\n for icmp in range(1, self.n_comps + 1):\n self.add_subsystem(f\"C{icmp}\", self.comp_class(n_inputs=self.n_inputs))\n\n for icmp in range(1, self.n_comps):\n for i in range(1, self.n_inputs + 1):\n self.connect(f\"C{icmp}.y{i}\", f\"C{icmp+1}.x{i}\")\n\n\nclass DynShapeGroupConnectedInputs(om.Group):\n # contains some number of components with all of their matching inputs connected.\n # component type is determined by comp_class\n def __init__(self, n_comps, n_inputs, comp_class):\n super().__init__()\n self.n_comps = n_comps\n self.n_inputs = n_inputs\n self.comp_class = comp_class\n\n for icmp in range(1, self.n_comps + 1):\n self.add_subsystem(f\"C{icmp}\", self.comp_class(n_inputs=self.n_inputs),\n promotes_inputs=['*'])\n\n\nclass TestDynShapes(unittest.TestCase):\n def test_baseline_series(self):\n # this is just a sized source and unsized sink, and we put a DynShapeGroupSeries in between them\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n indep.add_output('x2', val=np.ones((4,2)))\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3, 2, DynShapeComp))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1={'shape_by_conn': True, 'copy_shape': 'y1'},\n x2={'shape_by_conn': True, 'copy_shape': 'y2'},\n y1={'shape_by_conn': True, 'copy_shape': 'x1'},\n y2={'shape_by_conn': True, 'copy_shape': 'x2'}))\n p.model.connect('Gdyn.C3.y1', 'sink.x1')\n p.model.connect('Gdyn.C3.y2', 'sink.x2')\n p.model.connect('indep.x1', 'Gdyn.C1.x1')\n p.model.connect('indep.x2', 'Gdyn.C1.x2')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*16)\n np.testing.assert_allclose(p['sink.y2'], np.ones((4,2))*16)\n\n def test_copy_shape_out_out(self):\n # test copy_shape from output to output\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n indep.add_output('x2', val=np.ones((2,3)))\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3, 2, DynShapeComp))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1={'shape_by_conn': True, 'copy_shape': 'y1'},\n x2={'shape_by_conn': True, 'copy_shape': 'y2'},\n y1={'copy_shape': 'y2'},\n y2={'copy_shape': 'y1'}))\n p.model.connect('Gdyn.C3.y1', 'sink.x1')\n p.model.connect('Gdyn.C3.y2', 'sink.x2')\n p.model.connect('indep.x1', 'Gdyn.C1.x1')\n p.model.connect('indep.x2', 'Gdyn.C1.x2')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*16)\n np.testing.assert_allclose(p['sink.y2'], np.ones((2,3))*16)\n\n def test_copy_shape_in_in(self):\n # test copy_shape from input to input\n # The fact that this case works is a bit of a surprise since comp.x1 and comp.x2 do not set\n # shape_by_conn, so you would expect them to be unresolvable, but they connect to dynamic\n # shaped vars that DO have shape_by_conn set. Basically, if shape_by_conn is set on either\n # end of a connection when both vars are dynamically shaped, it's the same effect as if\n # both had set shape_by_conn since the shapes of any two connected vars must match.\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n indep.add_output('x2', val=np.ones((2,3)))\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3, 2, DynShapeComp))\n p.model.add_subsystem('comp', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1={'copy_shape': 'x2'},\n x2={'copy_shape': 'x1'},\n y1={'shape_by_conn': True},\n y2={'shape_by_conn': True}))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1=np.ones((2,3)),\n x2=np.ones((2,3)),\n y1=np.ones((2,3)),\n y2=np.ones((2,3))))\n p.model.connect('indep.x1', 'Gdyn.C1.x1')\n p.model.connect('indep.x2', 'Gdyn.C1.x2')\n p.model.connect('Gdyn.C3.y1', 'comp.x1')\n p.model.connect('Gdyn.C3.y2', 'comp.x2')\n p.model.connect('comp.y1', 'sink.x1')\n p.model.connect('comp.y2', 'sink.x2')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*32)\n np.testing.assert_allclose(p['sink.y2'], np.ones((2,3))*32)\n\n def test_copy_shape_in_in_unresolvable(self):\n # test copy_shape from input to input\n # In this case, our dynamicaly shaped inputs that do copy_shape from other inputs are connected to\n # non-dynamically shaped outputs, and because they don't set shape_by_conn, they are unresolvable,\n # unlike the test above where they connected to dynamically shaped outputs.\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n indep.add_output('x2', val=np.ones((2,3)))\n p.model.add_subsystem('comp', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1={'copy_shape': 'x2'},\n x2={'copy_shape': 'x1'},\n y1={'shape_by_conn': True},\n y2={'shape_by_conn': True}))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1=np.ones((2,3)),\n x2=np.ones((2,3)),\n y1=np.ones((2,3)),\n y2=np.ones((2,3))))\n p.model.connect('indep.x1', 'comp.x1')\n p.model.connect('indep.x2', 'comp.x2')\n p.model.connect('comp.y1', 'sink.x1')\n p.model.connect('comp.y2', 'sink.x2')\n with self.assertRaises(RuntimeError) as cm:\n p.setup()\n\n msg = \"<model> <class Group>: Failed to resolve shapes for ['comp.x1', 'comp.x2']. To see the dynamic shape dependency graph, do 'openmdao view_dyn_shapes <your_py_file>'.\"\n self.assertEqual(cm.exception.args[0], msg)\n\n def test_mismatched_dyn_shapes(self):\n # this is a sized source and sink, but their sizes are incompatible\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n indep.add_output('x2', val=np.ones((4,2)))\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3, 2, DynShapeComp))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1=np.ones((2,3)),\n x2=np.ones((3,2)),\n y1=np.ones((2,3)),\n y2=np.ones((3,2))))\n p.model.connect('Gdyn.C3.y1', 'sink.x1')\n p.model.connect('Gdyn.C3.y2', 'sink.x2')\n p.model.connect('indep.x1', 'Gdyn.C1.x1')\n p.model.connect('indep.x2', 'Gdyn.C1.x2')\n with self.assertRaises(Exception) as cm:\n p.setup()\n\n msg = \"<model> <class Group>: Shape mismatch, (3, 2) vs. (4, 2) for variable 'sink.x2' during dynamic shape determination.\"\n self.assertEqual(str(cm.exception), msg)\n\n def test_baseline_conn_inputs(self):\n # this is a sized source and unsized sink, with a DynShapeGroupConnectedInputs between them\n # indep.x? connects to Gdyn.C?.x?\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))),\n promotes_outputs=['*'])\n indep.add_output('x2', val=np.ones((4,2)))\n p.model.add_subsystem('Gdyn', DynShapeGroupConnectedInputs(2, 2, DynShapeComp),\n promotes_inputs=['*'])\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1={'shape_by_conn': True, 'copy_shape': 'y1'},\n x2={'shape_by_conn': True, 'copy_shape': 'y2'},\n y1={'shape_by_conn': True, 'copy_shape': 'x1'},\n y2={'shape_by_conn': True, 'copy_shape': 'x2'}))\n p.model.connect('Gdyn.C1.y1', 'sink.x1')\n p.model.connect('Gdyn.C2.y2', 'sink.x2')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*4)\n np.testing.assert_allclose(p['sink.y2'], np.ones((4,2))*4)\n np.testing.assert_allclose(p['Gdyn.C1.y2'], np.ones((4,2))*2) # unconnected dyn shaped output\n np.testing.assert_allclose(p['Gdyn.C2.y1'], np.ones((2,3))*2) # unconnected dyn shaped output\n\n def test_resetup(self):\n # test that the dynamic sizing reflects any changes that occur prior to 2nd call to setup.\n p = om.Problem()\n ninputs = 1\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(2, ninputs, DynShapeComp))\n comp = p.model.add_subsystem('sink', ResizableComp(ninputs, 10, 3.))\n p.model.connect('Gdyn.C2.y1', 'sink.x1')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones(10)*12)\n\n # now change the size and setup again\n comp.size = 5\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones(5)*12)\n\n def test_cycle_fwd_rev(self):\n # now put the DynShapeGroupSeries in a cycle (sink.y2 feeds back into Gdyn.C1.x2). Sizes are known\n # at both ends of the model (the IVC and at the sink)\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3,2, DynShapeComp))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1=np.ones((2,3)),\n x2=np.ones((4,2)),\n y1=np.ones((2,3)),\n y2=np.ones((4,2))))\n p.model.connect('Gdyn.C3.y1', 'sink.x1')\n p.model.connect('Gdyn.C3.y2', 'sink.x2')\n p.model.connect('sink.y2', 'Gdyn.C1.x2')\n p.model.connect('indep.x1', 'Gdyn.C1.x1')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*16)\n np.testing.assert_allclose(p['sink.y2'], np.ones((4,2))*16)\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*16)\n # each time we run_model, the value of sink.y2 will be multiplied by 16\n # because of the feedback\n np.testing.assert_allclose(p['sink.y2'], np.ones((4,2))*256)\n\n def test_cycle_rev(self):\n # now put the DynShapeGroupSeries in a cycle (sink.y2 feeds back into Gdyn.C1.x2), but here,\n # only the sink outputs are known and inputs are coming from auto_ivcs.\n p = om.Problem()\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3,2, DynShapeComp))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1=np.ones((2,3)),\n x2=np.ones((4,2)),\n y1=np.ones((2,3)),\n y2=np.ones((4,2))))\n p.model.connect('Gdyn.C3.y1', 'sink.x1')\n p.model.connect('Gdyn.C3.y2', 'sink.x2')\n p.model.connect('sink.y2', 'Gdyn.C1.x2')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*16)\n np.testing.assert_allclose(p['sink.y2'], np.ones((4,2))*16)\n p.run_model()\n np.testing.assert_allclose(p['sink.y1'], np.ones((2,3))*16)\n # each time we run_model, the value of sink.y2 will be multiplied by 16\n # because of the feedback\n np.testing.assert_allclose(p['sink.y2'], np.ones((4,2))*256)\n\n def test_cycle_unresolved(self):\n # now put the DynShapeGroupSeries in a cycle (sink.y2 feeds back into Gdyn.C1.x2), but here,\n # sink.y2 is unsized, so no var in the '2' loop can get resolved.\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n p.model.add_subsystem('Gdyn', DynShapeGroupSeries(3,2, DynShapeComp))\n p.model.add_subsystem('sink', om.ExecComp('y1, y2 = x1*2, x2*2',\n x1={'shape_by_conn': True, 'copy_shape': 'y1'},\n x2={'shape_by_conn': True, 'copy_shape': 'y2'},\n y1={'shape_by_conn': True, 'copy_shape': 'x1'},\n y2={'shape_by_conn': True, 'copy_shape': 'x2'}))\n p.model.connect('Gdyn.C3.y1', 'sink.x1')\n p.model.connect('Gdyn.C3.y2', 'sink.x2')\n p.model.connect('sink.y2', 'Gdyn.C1.x2')\n p.model.connect('indep.x1', 'Gdyn.C1.x1')\n with self.assertRaises(RuntimeError) as cm:\n p.setup()\n\n msg = \"<model> <class Group>: Failed to resolve shapes for ['Gdyn.C1.x2', 'Gdyn.C1.y2', 'Gdyn.C2.x2', 'Gdyn.C2.y2', 'Gdyn.C3.x2', 'Gdyn.C3.y2', 'sink.x2', 'sink.y2']. To see the dynamic shape dependency graph, do 'openmdao view_dyn_shapes <your_py_file>'.\"\n self.assertEqual(str(cm.exception), msg)\n\n def test_bad_copy_shape_name(self):\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n p.model.add_subsystem('sink', om.ExecComp('y1 = x1*2',\n x1={'shape_by_conn': True, 'copy_shape': 'y1'},\n y1={'shape_by_conn': True, 'copy_shape': 'x11'}))\n p.model.connect('indep.x1', 'sink.x1')\n with self.assertRaises(RuntimeError) as cm:\n p.setup()\n\n msg = \"<model> <class Group>: Can't copy shape of variable 'sink.x11'. Variable doesn't exist.\"\n self.assertEqual(str(cm.exception), msg)\n\n def test_unconnected_var_dyn_shape(self):\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp('x1', val=np.ones((2,3))))\n p.model.add_subsystem('sink', om.ExecComp('y1 = x1*2',\n x1={'shape_by_conn': True, 'copy_shape': 'y1'},\n y1={'shape_by_conn': True}))\n p.model.connect('indep.x1', 'sink.x1')\n with self.assertRaises(RuntimeError) as cm:\n p.setup()\n\n msg = \"<model> <class Group>: 'shape_by_conn' was set for unconnected variable 'sink.y1'.\"\n self.assertEqual(str(cm.exception), msg)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestDistribDynShapes(unittest.TestCase):\n N_PROCS = 4\n\n def test_remote_distrib(self):\n # this test has remote distributed components (distributed comps under parallel groups)\n p = om.Problem()\n indep = p.model.add_subsystem('indep', om.IndepVarComp())\n indep.add_output('x1', shape_by_conn=True)\n\n par = p.model.add_subsystem('par', om.ParallelGroup())\n G1 = par.add_subsystem('G1', DynShapeGroupSeries(2,1, DistribDynShapeComp))\n G2 = par.add_subsystem('G2', DynShapeGroupSeries(2,1, DistribDynShapeComp))\n\n # 'sink' has a defined shape and dyn shapes propagate in reverse from there.\n p.model.add_subsystem('sink', om.ExecComp(['y1=x1+x2'], shape=(8,)))\n p.model.connect('indep.x1', ['par.G1.C1.x1', 'par.G2.C1.x1'])\n p.model.connect('par.G1.C2.y1', 'sink.x1', src_indices=om.slicer[:])\n p.model.connect('par.G2.C2.y1', 'sink.x2', src_indices=om.slicer[:])\n\n with self.assertRaises(RuntimeError) as cm:\n p.setup()\n\n cname = 'G1' if p.model.comm.rank <= 1 else 'G2'\n msg = f\"'par.{cname}.C1' <class DistribDynShapeComp>: Can't determine src_indices automatically for input 'par.{cname}.C1.x1'. They must be supplied manually.\"\n self.assertEqual(str(cm.exception), msg)\n\n\nclass DynPartialsComp(om.ExplicitComponent):\n def setup(self):\n self.add_input('x', shape_by_conn=True, copy_shape='y')\n self.add_output('y', shape_by_conn=True, copy_shape='x')\n\n def setup_partials(self):\n size = self._get_var_meta('x', 'size')\n self.mat = np.eye(size) * 3.\n rng = np.arange(size)\n self.declare_partials('y', 'x', rows=rng, cols=rng, val=3.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = self.mat.dot(inputs['x'])\n\n\nclass TestDynShapeFeature(unittest.TestCase):\n def test_feature_fwd(self):\n\n p = om.Problem()\n p.model.add_subsystem('indeps', om.IndepVarComp('x', val=np.ones(5)))\n p.model.add_subsystem('comp', DynPartialsComp())\n p.model.add_subsystem('sink', om.ExecComp('y=x',\n x={'shape_by_conn': True, 'copy_shape': 'y'},\n y={'shape_by_conn': True, 'copy_shape': 'x'}))\n p.model.connect('indeps.x', 'comp.x')\n p.model.connect('comp.y', 'sink.x')\n p.setup()\n p.run_model()\n J = p.compute_totals(of=['sink.y'], wrt=['indeps.x'])\n assert_near_equal(J['sink.y', 'indeps.x'], np.eye(5)*3.)\n\n def test_feature_rev(sefl):\n\n p = om.Problem()\n p.model.add_subsystem('comp', DynPartialsComp())\n p.model.add_subsystem('sink', om.ExecComp('y=x', shape=5))\n p.model.connect('comp.y', 'sink.x')\n p.setup()\n p.run_model()\n J = p.compute_totals(of=['sink.y'], wrt=['comp.x'])\n assert_near_equal(J['sink.y', 'comp.x'], np.eye(5)*3.)\n\n def test_feature_middle(self):\n\n class PartialsComp(om.ExplicitComponent):\n def setup(self):\n self.add_input('x', val=np.ones(5))\n self.add_output('y', val=np.ones(5))\n\n def setup_partials(self):\n self.mat = np.eye(5) * 3.\n rng = np.arange(5)\n self.declare_partials('y', 'x', rows=rng, cols=rng, val=3.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = self.mat.dot(inputs['x'])\n\n p = om.Problem()\n p.model.add_subsystem('comp', PartialsComp())\n p.model.add_subsystem('sink', om.ExecComp('y=x',\n x={'shape_by_conn': True, 'copy_shape': 'y'},\n y={'shape_by_conn': True, 'copy_shape': 'x'}))\n p.model.connect('comp.y', 'sink.x')\n p.setup()\n p.run_model()\n J = p.compute_totals(of=['sink.y'], wrt=['comp.x'])\n assert_near_equal(J['sink.y', 'comp.x'], np.eye(5)*3.)\n\n\nclass DistCompDiffSizeKnownInput(om.ExplicitComponent):\n def setup(self):\n size = (self.comm.rank + 1) * 3\n self.add_input('x', val=np.random.random(size), distributed=True)\n\n\nclass DistCompKnownInput(om.ExplicitComponent):\n def setup(self):\n size = 3\n self.add_input('x', val=np.random.random(size), distributed=True)\n\n def compute(self, inputs, outputs):\n pass\n\n\nclass DistCompUnknownInput(om.ExplicitComponent):\n def setup(self):\n self.add_input('x', shape_by_conn=True, distributed=True)\n\n def compute(self, inputs, outputs):\n pass\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass TestDistribDynShapeCombos(unittest.TestCase):\n \"\"\"\n This will test the dynamic shaping on parallel runs with all of the possible\n combinations of connections and dynamic shaping directions.\n\n Here is a list of possible connections:\n\n duplicated => duplicated\n duplicated => distributed\n distributed => duplicated\n distributed => distributed\n \"\"\"\n\n N_PROCS = 3\n\n def test_ser_known_ser_unknown(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', val=np.random.random(2))\n p.model.add_subsystem('comp', om.ExecComp('y = x * 2',\n x={'shape_by_conn': True},\n y=np.zeros(2)))\n p.model.connect('indeps.x', 'comp.x')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p.get_val('indeps.x'), p.get_val('comp.x'))\n\n def test_ser_unknown_ser_known(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', shape_by_conn=True)\n p.model.add_subsystem('comp', om.ExecComp('y = x * 2',\n x=np.random.random(2),\n y=np.zeros(2)))\n p.model.connect('indeps.x', 'comp.x')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p.get_val('indeps.x'), p.get_val('comp.x'))\n\n def test_ser_unknown_dist_known_err(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', shape_by_conn=True)\n p.model.add_subsystem('comp', DistCompDiffSizeKnownInput())\n p.model.connect('indeps.x', 'comp.x')\n with self.assertRaises(Exception) as cm:\n p.setup()\n self.assertEquals(cm.exception.args[0],\n \"<model> <class Group>: dynamic sizing of non-distributed output 'indeps.x' from distributed input 'comp.x' is not supported because not all comp.x ranks are the same size (sizes=[3 6 9]).\")\n\n def test_dist_known_ser_unknown(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', np.ones(3), distributed=True)\n p.model.add_subsystem('comp', om.ExecComp('y = x * 2',\n x={'shape_by_conn': True},\n y={'copy_shape': 'x'}))\n p.model.connect('indeps.x', 'comp.x')\n with self.assertRaises(Exception) as cm:\n p.setup()\n self.assertEquals(cm.exception.args[0],\n \"<model> <class Group>: dynamic sizing of non-distributed input 'comp.x' from distributed output 'indeps.x' is not supported.\")\n\n def test_dist_unknown_ser_known(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', distributed=True, shape_by_conn=True)\n p.model.add_subsystem('comp', om.ExecComp('y = x * 2', shape=3))\n p.model.connect('indeps.x', 'comp.x')\n with self.assertRaises(Exception) as cm:\n p.setup()\n self.assertEquals(cm.exception.args[0],\n \"<model> <class Group>: Can't connect distributed output 'indeps.x' to non-distributed input 'comp.x' without specifying src_indices.\")\n\n def test_dist_known_dist_unknown(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n sizes = [3,0,5]\n indeps.add_output('x', np.random.random(sizes[MPI.COMM_WORLD.rank]), distributed=True)\n p.model.add_subsystem('comp', DistCompUnknownInput())\n p.model.connect('indeps.x', 'comp.x')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p.get_val('indeps.x'), p.get_val('comp.x'))\n\n def test_dist_unknown_dist_known(self):\n p = om.Problem()\n indeps = p.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', shape_by_conn=True, distributed=True)\n p.model.add_subsystem('comp', DistCompDiffSizeKnownInput())\n p.model.connect('indeps.x', 'comp.x')\n p.setup()\n p.run_model()\n np.testing.assert_allclose(p.get_val('indeps.x'), p.get_val('comp.x'))\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import unittest\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n\n\nclass TestAddSubtractCompScalars(unittest.TestCase):\n\n def setUp(self):\n self.nn = 1\n self.p = om.Problem()\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn,))\n ivc.add_output(name='b', shape=(self.nn,))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n adder.add_equation('adder_output',['input_a','input_b'])\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn,)\n self.p['b'] = np.random.rand(self.nn,)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n out = self.p['add_subtract_comp.adder_output']\n expected = a + b\n assert_near_equal(out, expected,1e-16)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestAddSubtractCompNx1(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn,))\n ivc.add_output(name='b', shape=(self.nn,))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n adder.add_equation('adder_output',['input_a','input_b'],vec_size=self.nn)\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn,)\n self.p['b'] = np.random.rand(self.nn,)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n out = self.p['add_subtract_comp.adder_output']\n expected = a + b\n assert_near_equal(out, expected,1e-16)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestAddSubtractCompNx3(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3))\n ivc.add_output(name='b', shape=(self.nn, 3))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n adder.add_equation('adder_output',['input_a','input_b'],vec_size=self.nn,length=3)\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn, 3)\n self.p['b'] = np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n out = self.p['add_subtract_comp.adder_output']\n expected = a + b\n assert_near_equal(out, expected,1e-16)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestAddSubtractMultipleInputs(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3))\n ivc.add_output(name='b', shape=(self.nn, 3))\n ivc.add_output(name='c', shape=(self.nn, 3))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b','c'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n adder.add_equation('adder_output',['input_a','input_b','input_c'],vec_size=self.nn,length=3)\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n self.p.model.connect('c', 'add_subtract_comp.input_c')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn, 3)\n self.p['b'] = np.random.rand(self.nn, 3)\n self.p['c'] = np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n c = self.p['c']\n out = self.p['add_subtract_comp.adder_output']\n expected = a + b + c\n assert_near_equal(out, expected,1e-16)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestAddSubtractScalingFactors(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3))\n ivc.add_output(name='b', shape=(self.nn, 3))\n ivc.add_output(name='c', shape=(self.nn, 3))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b','c'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n adder.add_equation('adder_output',['input_a','input_b','input_c'],vec_size=self.nn,length=3,scaling_factors=[2.,1.,-1])\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n self.p.model.connect('c', 'add_subtract_comp.input_c')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn, 3)\n self.p['b'] = np.random.rand(self.nn, 3)\n self.p['c'] = np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n c = self.p['c']\n out = self.p['add_subtract_comp.adder_output']\n expected = 2*a + b - c\n assert_near_equal(out, expected,1e-16)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestAddSubtractUnits(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3),units='ft')\n ivc.add_output(name='b', shape=(self.nn, 3),units='m')\n ivc.add_output(name='c', shape=(self.nn, 3),units='m')\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b','c'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n adder.add_equation('adder_output',['input_a','input_b','input_c'],vec_size=self.nn,length=3,units='ft')\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n self.p.model.connect('c', 'add_subtract_comp.input_c')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn, 3)\n self.p['b'] = np.random.rand(self.nn, 3)\n self.p['c'] = np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n c = self.p['c']\n out = self.p['add_subtract_comp.adder_output']\n m_to_ft = 3.280839895\n expected = a + b*m_to_ft + c*m_to_ft\n assert_near_equal(out, expected,1e-8)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestAddSubtractInit(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3), units='ft')\n ivc.add_output(name='b', shape=(self.nn, 3), units='m')\n ivc.add_output(name='c', shape=(self.nn, 3), units='m')\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b','c'])\n\n # verify proper handling of constructor args\n adder = om.AddSubtractComp(output_name='adder_output',\n input_names=['input_a', 'input_b', 'input_c'],\n vec_size=self.nn, length=3,\n scaling_factors=[2., 1., -1],\n units='ft')\n\n self.p.model.add_subsystem(name='add_subtract_comp', subsys=adder)\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n self.p.model.connect('c', 'add_subtract_comp.input_c')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn, 3)\n self.p['b'] = np.random.rand(self.nn, 3)\n self.p['c'] = np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n c = self.p['c']\n out = self.p['add_subtract_comp.adder_output']\n m_to_ft = 3.280839895\n expected = 2*a + b*m_to_ft - c*m_to_ft\n assert_near_equal(out, expected, 1e-8)\n\n def test_partials(self):\n partials = self.p.check_partials(method='fd', out_stream=None)\n assert_check_partials(partials)\n\n\nclass TestForExceptions(unittest.TestCase):\n\n def test_for_bad_scale_factors(self):\n self.nn = 5\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3))\n ivc.add_output(name='b', shape=(self.nn, 3))\n ivc.add_output(name='c', shape=(self.nn, 3))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b','c'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n\n with self.assertRaises(ValueError) as err:\n adder.add_equation('adder_output', ['input_a', 'input_b', 'input_c'], vec_size=self.nn,\n length=3, scaling_factors=[1, -1])\n\n expected_msg = \"'add_subtract_comp' <class AddSubtractComp>: Scaling factors list needs to be \" \\\n \"same length as input names\"\n\n self.assertEqual(str(err.exception), expected_msg)\n\n\n def test_for_bad_input_set(self):\n self.nn = 5\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3))\n ivc.add_output(name='b', shape=(self.nn, 3))\n ivc.add_output(name='c', shape=(self.nn, 3))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b','c'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp',\n subsys=om.AddSubtractComp())\n\n with self.assertRaises(ValueError) as err:\n adder.add_equation('adder_output', ['input_a',], vec_size=self.nn,\n length=3, scaling_factors=[1, -1])\n\n expected_msg = \"'add_subtract_comp' <class AddSubtractComp>: must specify more than one input \" \\\n \"name for an equation, but only one given\"\n\n self.assertEqual(str(err.exception), expected_msg)\n\n with self.assertRaises(ValueError) as err:\n adder.add_equation('adder_output', 'input_a', vec_size=self.nn,\n length=3, scaling_factors=[1, -1])\n\n expected_msg = \"'add_subtract_comp' <class AddSubtractComp>: must specify more than one input \" \\\n \"name for an equation, but only one given\"\n\n self.assertEqual(str(err.exception), expected_msg)\n\n\nclass TestAddSubtractCompTags(unittest.TestCase):\n\n def setUp(self):\n self.nn = 1\n self.p = om.Problem()\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn,))\n ivc.add_output(name='b', shape=(self.nn,))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a', 'b'])\n\n adder=self.p.model.add_subsystem(name='add_subtract_comp', subsys=om.AddSubtractComp())\n adder.add_equation('adder_output', ['input_a','input_b'], tags={'foo'})\n adder.add_equation('adder_output2', ['input_a','input_a'], tags={'bar'})\n\n self.p.model.connect('a', 'add_subtract_comp.input_a')\n self.p.model.connect('b', 'add_subtract_comp.input_b')\n\n self.p.setup()\n\n self.p['a'] = np.random.rand(self.nn,)\n self.p['b'] = np.random.rand(self.nn,)\n\n self.p.run_model()\n\n def test_results(self):\n a = self.p['a']\n b = self.p['b']\n\n foo_outputs = self.p.model.list_outputs(tags={'foo'}, out_stream=None)\n bar_outputs = self.p.model.list_outputs(tags={'bar'}, out_stream=None)\n\n self.assertEqual(len(foo_outputs), 1)\n self.assertEqual(len(bar_outputs), 1)\n\n assert_near_equal(foo_outputs[0][1]['val'], a + b)\n assert_near_equal(bar_outputs[0][1]['val'], a + a)\n\n\nclass TestFeature(unittest.TestCase):\n\n def test(self):\n \"\"\"\n A simple example to compute the resultant force on an aircraft and demonstrate the AddSubtract component\n \"\"\"\n n = 3\n\n p = om.Problem()\n model = p.model\n\n # Construct an adder/subtracter here. create a relationship through the add_equation method\n adder = om.AddSubtractComp()\n adder.add_equation('total_force', input_names=['thrust', 'drag', 'lift', 'weight'],\n vec_size=n, length=2, scaling_factors=[1, -1, 1, -1], units='kN')\n # Note the scaling factors. we assume all forces are positive sign upstream\n\n # The vector represents forces at 3 time points (rows) in 2 dimensional plane (cols)\n p.model.add_subsystem(name='totalforcecomp', subsys=adder,\n promotes_inputs=['thrust', 'drag', 'lift', 'weight'])\n\n p.setup()\n\n # Set thrust to exceed drag, weight to equal lift for this scenario\n p['thrust'][:, 0] = [500, 600, 700]\n p['drag'][:, 0] = [400, 400, 400]\n p['weight'][:, 1] = [1000, 1001, 1002]\n p['lift'][:, 1] = [1000, 1000, 1000]\n\n p.run_model()\n\n # Verify the results\n expected_i = np.array([[100, 200, 300], [0, -1, -2]]).T\n assert_near_equal(p.get_val('totalforcecomp.total_force', units='kN'), expected_i)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Helper function to create non uniform distributions for SplineComp.\"\"\"\n\nimport numpy as np\n\n\ndef cell_centered(num_cells, start=0.0, end=1.0):\n \"\"\"\n Cell centered distribution of control points.\n\n Parameters\n ----------\n num_cells : int\n Number of cells.\n start : int or float\n Minimum value to interpolate at.\n end : int or float\n Maximum value to interpolate at.\n\n Returns\n -------\n ndarray\n Values to interpolate at.\n \"\"\"\n interp_grid = np.linspace(start, end, num=num_cells + 1)\n\n return np.array(0.5 * (interp_grid[1:] + interp_grid[:-1]))\n\n\ndef sine_distribution(num_points, start=0.0, end=1.0, phase=np.pi):\n \"\"\"\n Sine distribution of control points.\n\n Parameters\n ----------\n num_points : int\n Number of points to predict at.\n start : int or float\n Minimum value to interpolate at.\n end : int or float\n Maximum value to interpolate at.\n phase : float\n Phase of the sine wave.\n\n Returns\n -------\n ndarray\n Values to interpolate at.\n \"\"\"\n t_vec = np.linspace(start, end, num_points)\n\n return np.array(0.5 * (1.0 + np.sin(-0.5 * phase + t_vec * phase)))\n\n\ndef node_centered(num_points, start=0.0, end=1.0):\n \"\"\"\n Distribute control points.\n\n Parameters\n ----------\n num_points : int\n Number of points to predict.\n start : int or float\n Minimum value to interpolate at.\n end : int or float\n Maximum value to interpolate at.\n\n Returns\n -------\n ndarray\n Values to interpolate at.\n \"\"\"\n return np.linspace(start, end, num_points + 1)\n",
"import unittest\nimport numpy as np\n\nimport openmdao.api as om\nimport openmdao.utils.hooks as hooks\nfrom openmdao.utils.assert_utils import assert_warning\n\n\ndef make_hook(name):\n def hook_func(prob):\n prob.calls.append(name)\n return hook_func\n\n\ndef hooks_active(f):\n # turn hooks on and off around a hooks test\n def _wrapper(*args, **kwargs):\n hooks._reset_all_hooks()\n hooks.use_hooks = True\n try:\n f(*args, **kwargs)\n finally:\n hooks.use_hooks = False\n return _wrapper\n\n\nclass HooksTestCase(unittest.TestCase):\n def build_model(self):\n prob = om.Problem()\n prob.calls = []\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 3.0))\n model.add_subsystem('p2', om.IndepVarComp('y', -4.0))\n model.add_subsystem('comp', om.ExecComp(\"f_xy=2.0*x+3.0*y\"))\n\n model.connect('p1.x', 'comp.x')\n model.connect('p2.y', 'comp.y')\n prob.setup()\n return prob\n\n @hooks_active\n def test_ncalls(self):\n hooks._register_hook('final_setup', 'Problem', pre=make_hook('pre_final'), post=make_hook('post_final'), ncalls=2)\n hooks._register_hook('final_setup', 'Problem', pre=make_hook('pre_final2'), post=make_hook('post_final2'))\n\n prob = self.build_model()\n prob.run_model()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, ['pre_final', 'pre_final2', 'post_final', 'post_final2',\n 'pre_final', 'pre_final2', 'post_final', 'post_final2',\n 'pre_final2', 'post_final2',\n ])\n\n @hooks_active\n def test_exit(self):\n hooks._register_hook('final_setup', 'Problem', pre=make_hook('pre_final'), post=make_hook('post_final'))\n hooks._register_hook('final_setup', 'Problem', pre=make_hook('pre_final2'), post=make_hook('post_final2'), exit=True)\n\n prob = self.build_model()\n try:\n prob.run_model()\n prob.run_model()\n prob.run_model()\n except SystemExit:\n self.assertEqual(prob.calls, ['pre_final', 'pre_final2', 'post_final', 'post_final2'])\n else:\n self.fail(\"sys.exit() was not called\")\n\n @hooks_active\n def test_multiwrap(self):\n pre_final = make_hook('pre_final')\n post_final = make_hook('post_final')\n hooks._register_hook('final_setup', 'Problem', pre=pre_final, post=post_final)\n hooks._register_hook('final_setup', 'Problem', pre=make_hook('pre_final2'), post=make_hook('post_final2'))\n\n prob = self.build_model()\n prob.run_model()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, ['pre_final', 'pre_final2', 'post_final', 'post_final2',\n 'pre_final', 'pre_final2', 'post_final', 'post_final2',\n 'pre_final', 'pre_final2', 'post_final', 'post_final2',\n ])\n\n hooks._unregister_hook('final_setup', 'Problem', pre=pre_final, post=False)\n prob.calls = []\n\n prob.run_model()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, ['pre_final2', 'post_final', 'post_final2',\n 'pre_final2', 'post_final', 'post_final2',\n 'pre_final2', 'post_final', 'post_final2',\n ])\n\n hooks._unregister_hook('final_setup', 'Problem', pre=True, post=False)\n prob.calls = []\n\n prob.run_model()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, ['post_final', 'post_final2',\n 'post_final', 'post_final2',\n 'post_final', 'post_final2',\n ])\n\n @hooks_active\n def test_problem_hooks(self):\n hooks._register_hook('setup', 'Problem', pre=make_hook('pre_setup'), post=make_hook('post_setup'))\n hooks._register_hook('final_setup', 'Problem', pre=make_hook('pre_final'), post=make_hook('post_final'))\n hooks._register_hook('run_model', 'Problem', pre=make_hook('pre_run_model'), post=make_hook('post_run_model'))\n\n prob = self.build_model()\n\n prob.run_model()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, ['pre_setup', 'post_setup',\n 'pre_run_model', 'pre_final', 'post_final', 'post_run_model',\n 'pre_run_model', 'pre_final', 'post_final', 'post_run_model',\n 'pre_run_model', 'pre_final', 'post_final', 'post_run_model',\n ])\n\n np.testing.assert_allclose(prob['comp.f_xy'], -6.0)\n\n hooks._unregister_hook('setup', 'Problem', pre=False)\n hooks._unregister_hook('final_setup', 'Problem')\n hooks._unregister_hook('run_model', 'Problem', post=False)\n prob.calls = []\n\n prob.setup()\n prob.run_model()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, ['pre_setup', 'post_run_model', 'post_run_model', 'post_run_model'])\n\n hooks._unregister_hook('setup', 'Problem')\n\n msg = \"No hook found for method 'final_setup' for class 'Problem' and instance 'None'.\"\n\n # already removed final_setup hooks earlier, so expect a warning here\n with assert_warning(UserWarning, msg):\n hooks._unregister_hook('final_setup', 'Problem')\n\n hooks._unregister_hook('run_model', 'Problem')\n prob.calls = []\n\n prob.setup()\n prob.run_model()\n prob.run_model()\n\n self.assertEqual(prob.calls, [])\n self.assertEqual(len(hooks._hooks), 0) # should be no hooks left\n\n @hooks_active\n def test_problem_hooks_kwargs(self):\n\n x0 = 33.0\n y0 = 44.0\n\n def set_prob_vars_hook_func(prob, **kwargs):\n if 'x0' in kwargs:\n prob['p1.x'] = kwargs['x0']\n if 'y0' in kwargs:\n prob['p2.y'] = kwargs['y0']\n\n hooks._register_hook('final_setup', 'Problem', pre=set_prob_vars_hook_func, x0=x0, y0=y0)\n\n prob = self.build_model()\n\n prob.run_model()\n\n self.assertEqual(prob['comp.x'], x0)\n self.assertEqual(prob['comp.y'], y0)\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Define the CSCmatrix class.\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import csc_matrix\n\nfrom openmdao.matrices.coo_matrix import COOMatrix\n\n\nclass CSCMatrix(COOMatrix):\n \"\"\"\n Sparse matrix in Compressed Col Storage format.\n\n Parameters\n ----------\n comm : MPI.Comm or <FakeComm>\n Communicator of the top-level system that owns the <Jacobian>.\n is_internal : bool\n If True, this is the int_mtx of an AssembledJacobian.\n \"\"\"\n\n def _build(self, num_rows, num_cols, system=None):\n \"\"\"\n Allocate the matrix.\n\n Parameters\n ----------\n num_rows : int\n number of rows in the matrix.\n num_cols : int\n number of cols in the matrix.\n system : <System>\n owning system.\n \"\"\"\n super()._build(num_rows, num_cols, system)\n self._coo = self._matrix\n\n def _pre_update(self):\n \"\"\"\n Do anything that needs to be done at the start of AssembledJacobian._update.\n \"\"\"\n self._matrix = self._coo\n\n def _post_update(self):\n \"\"\"\n Do anything that needs to be done at the end of AssembledJacobian._update.\n \"\"\"\n coo = self._coo\n # this will add any repeated entries together\n # NOTE: The CSC matrix was created in the following way instead of using self._coo.tocsc()\n # because on older versions of scipy, self._coo.tocsc() reuses the row/col arrays and the\n # result is that self._coo.row and self._coo.col get scrambled after csc conversion.\n self._matrix = csc_matrix((coo.data, (coo.row, coo.col)), shape=coo.shape)\n\n def _convert_mask(self, mask):\n \"\"\"\n Convert the mask to the format of this sparse matrix (CSC, etc.) from COO.\n\n Parameters\n ----------\n mask : ndarray\n The mask of indices to zero out.\n\n Returns\n -------\n ndarray\n The converted mask array.\n \"\"\"\n coo = self._coo\n csc = csc_matrix((mask, (coo.row, coo.col)), shape=coo.shape)\n return csc.data\n\n def set_complex_step_mode(self, active):\n \"\"\"\n Turn on or off complex stepping mode.\n\n When turned on, the value in each subjac is cast as complex, and when turned\n off, they are returned to real values.\n\n Parameters\n ----------\n active : bool\n Complex mode flag; set to True prior to commencing complex step.\n \"\"\"\n if active:\n if 'complex' not in self._matrix.dtype.__str__():\n self._matrix.data = self._matrix.data.astype(complex)\n self._matrix.dtype = complex\n self._coo.data = self._coo.data.astype(complex)\n self._coo.dtype = complex\n else:\n self._matrix.data = self._matrix.data.real\n self._matrix.dtype = np.float\n self._coo.data = self._coo.data.real\n self._coo.dtype = np.float\n"
] | [
[
"numpy.iscomplex",
"numpy.expand_dims",
"numpy.array_equal",
"numpy.linspace",
"numpy.asarray",
"numpy.isnan",
"numpy.issubdtype",
"numpy.atleast_2d",
"numpy.diff",
"numpy.broadcast_to",
"numpy.prod",
"numpy.iscomplexobj",
"numpy.any",
"numpy.outer",
"numpy.zeros",
"numpy.where",
"numpy.empty"
],
[
"numpy.linalg.solve",
"numpy.abs",
"numpy.arange",
"numpy.ones",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array"
],
[
"numpy.random.random",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.ones"
],
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.square",
"numpy.abs",
"numpy.random.seed",
"numpy.linspace",
"numpy.arange",
"numpy.eye",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.size",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.random.random",
"numpy.arange",
"numpy.eye",
"numpy.linalg.norm",
"numpy.ones",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.array",
"numpy.random.rand"
],
[
"numpy.array",
"numpy.linspace",
"numpy.sin"
],
[
"numpy.testing.assert_allclose"
],
[
"scipy.sparse.csc_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mremilien/object-deformnet | [
"bb07fe05f1ee3983835ebe071252541cee5c42f8",
"bb07fe05f1ee3983835ebe071252541cee5c42f8",
"bb07fe05f1ee3983835ebe071252541cee5c42f8"
] | [
"data/shape_dataset.py",
"train_deform.py",
"lib/align.py"
] | [
"import h5py\nimport numpy as np\nimport torch.utils.data as data\n\n\nclass ShapeDataset(data.Dataset):\n def __init__(self, h5_file, mode, n_points=2048, augment=False):\n assert (mode == 'train' or mode == 'val'), 'Mode must be \"train\" or \"val\".'\n self.mode = mode\n self.n_points = n_points\n self.augment = augment\n # load data from h5py file\n with h5py.File(h5_file, 'r') as f:\n self.length = f[self.mode].attrs['len']\n self.data = f[self.mode]['data'][:]\n self.label = f[self.mode]['label'][:]\n # augmentation parameters\n self.sigma = 0.01\n self.clip = 0.02\n self.shift_range = 0.02\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n xyz = self.data[index]\n label = self.label[index] - 1 # data saved indexed from 1\n # randomly downsample\n np_data = xyz.shape[0]\n assert np_data >= self.n_points, 'Not enough points in shape.'\n idx = np.random.choice(np_data, self.n_points)\n xyz = xyz[idx, :]\n # data augmentation\n if self.augment:\n jitter = np.clip(self.sigma*np.random.randn(self.n_points, 3), -self.clip, self.clip)\n xyz[:, :3] += jitter\n shift = np.random.uniform(-self.shift_range, self.shift_range, (1, 3))\n xyz[:, :3] += shift\n return xyz, label\n",
"import os\nimport time\nimport argparse\nimport random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tensorflow as tf\nfrom lib.network import DeformNet\nfrom lib.loss import Loss\nfrom data.pose_dataset import PoseDataset\nfrom lib.utils import setup_logger, compute_sRT_errors\nfrom lib.align import estimateSimilarityTransform\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='CAMERA', help='CAMERA or CAMERA+Real')\nparser.add_argument('--data_dir', type=str, default='data', help='data directory')\nparser.add_argument('--n_pts', type=int, default=1024, help='number of foreground points')\nparser.add_argument('--n_cat', type=int, default=6, help='number of object categories')\nparser.add_argument('--nv_prior', type=int, default=1024, help='number of vertices in shape priors')\nparser.add_argument('--img_size', type=int, default=192, help='cropped image size')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size')\nparser.add_argument('--num_workers', type=int, default=10, help='number of data loading workers')\nparser.add_argument('--gpu', type=str, default='0', help='GPU to use')\nparser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')\nparser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')\nparser.add_argument('--max_epoch', type=int, default=50, help='max number of epochs to train')\nparser.add_argument('--resume_model', type=str, default='', help='resume from saved model')\nparser.add_argument('--result_dir', type=str, default='results/camera', help='directory to save train results')\nopt = parser.parse_args()\n\nopt.decay_epoch = [0, 10, 20, 30, 40]\nopt.decay_rate = [1.0, 0.6, 0.3, 0.1, 0.01]\nopt.corr_wt = 1.0\nopt.cd_wt = 5.0\nopt.entropy_wt = 0.0001\nopt.deform_wt = 0.01\n\n\ndef train_net():\n # set result directory\n if not os.path.exists(opt.result_dir):\n os.makedirs(opt.result_dir)\n tb_writer = tf.summary.FileWriter(opt.result_dir)\n logger = setup_logger('train_log', os.path.join(opt.result_dir, 'log.txt'))\n for key, value in vars(opt).items():\n logger.info(key + ': ' + str(value))\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu\n # model & loss\n estimator = DeformNet(opt.n_cat, opt.nv_prior)\n estimator.cuda()\n criterion = Loss(opt.corr_wt, opt.cd_wt, opt.entropy_wt, opt.deform_wt)\n if opt.resume_model != '':\n estimator.load_state_dict(torch.load(opt.resume_model))\n # dataset\n train_dataset = PoseDataset(opt.dataset, 'train', opt.data_dir, opt.n_pts, opt.img_size)\n val_dataset = PoseDataset(opt.dataset, 'test', opt.data_dir, opt.n_pts, opt.img_size)\n # start training\n st_time = time.time()\n train_steps = 1500\n global_step = train_steps * (opt.start_epoch - 1)\n n_decays = len(opt.decay_epoch)\n assert len(opt.decay_rate) == n_decays\n for i in range(n_decays):\n if opt.start_epoch > opt.decay_epoch[i]:\n decay_count = i\n train_size = train_steps * opt.batch_size\n indices = []\n page_start = -train_size\n for epoch in range(opt.start_epoch, opt.max_epoch + 1):\n # train one epoch\n logger.info('Time {0}'.format(time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)) + \\\n ', ' + 'Epoch %02d' % epoch + ', ' + 'Training started'))\n # create optimizer and adjust learning rate if needed\n if decay_count < len(opt.decay_rate):\n if epoch > opt.decay_epoch[decay_count]:\n current_lr = opt.lr * opt.decay_rate[decay_count]\n optimizer = torch.optim.Adam(estimator.parameters(), lr=current_lr)\n decay_count += 1\n # sample train subset\n page_start += train_size\n len_last = len(indices) - page_start\n if len_last < train_size:\n indices = indices[page_start:]\n if opt.dataset == 'CAMERA+Real':\n # CAMERA : Real = 3 : 1\n camera_len = train_dataset.subset_len[0]\n real_len = train_dataset.subset_len[1]\n real_indices = list(range(camera_len, camera_len+real_len))\n camera_indices = list(range(camera_len))\n n_repeat = (train_size - len_last) // (4 * real_len) + 1\n data_list = random.sample(camera_indices, 3*n_repeat*real_len) + real_indices*n_repeat\n random.shuffle(data_list)\n indices += data_list\n else:\n data_list = list(range(train_dataset.length))\n for i in range((train_size - len_last) // train_dataset.length + 1):\n random.shuffle(data_list)\n indices += data_list\n page_start = 0\n train_idx = indices[page_start:(page_start+train_size)]\n train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx)\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, sampler=train_sampler,\n num_workers=opt.num_workers, pin_memory=True)\n estimator.train()\n for i, data in enumerate(train_dataloader, 1):\n points, rgb, choose, cat_id, model, prior, sRT, nocs = data\n points = points.cuda()\n rgb = rgb.cuda()\n choose = choose.cuda()\n cat_id = cat_id.cuda()\n model = model.cuda()\n prior = prior.cuda()\n sRT = sRT.cuda()\n nocs = nocs.cuda()\n assign_mat, deltas = estimator(points, rgb, choose, cat_id, prior)\n loss, corr_loss, cd_loss, entropy_loss, deform_loss = criterion(assign_mat, deltas, prior, nocs, model)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n global_step += 1\n # write results to tensorboard\n summary = tf.Summary(value=[tf.Summary.Value(tag='learning_rate', simple_value=current_lr),\n tf.Summary.Value(tag='train_loss', simple_value=loss),\n tf.Summary.Value(tag='corr_loss', simple_value=corr_loss),\n tf.Summary.Value(tag='cd_loss', simple_value=cd_loss),\n tf.Summary.Value(tag='entropy_loss', simple_value=entropy_loss),\n tf.Summary.Value(tag='deform_loss', simple_value=deform_loss)])\n tb_writer.add_summary(summary, global_step)\n if i % 10 == 0:\n logger.info('Batch {0} Loss:{1:f}, corr_loss:{2:f}, cd_loss:{3:f}, entropy_loss:{4:f}, deform_loss:{5:f}'.format(\n i, loss.item(), corr_loss.item(), cd_loss.item(), entropy_loss.item(), deform_loss.item()))\n\n logger.info('>>>>>>>>----------Epoch {:02d} train finish---------<<<<<<<<'.format(epoch))\n\n # evaluate one epoch\n logger.info('Time {0}'.format(time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)) +\n ', ' + 'Epoch %02d' % epoch + ', ' + 'Testing started'))\n val_loss = 0.0\n total_count = np.zeros((opt.n_cat,), dtype=int)\n strict_success = np.zeros((opt.n_cat,), dtype=int) # 5 degree and 5 cm\n easy_success = np.zeros((opt.n_cat,), dtype=int) # 10 degree and 5 cm\n iou_success = np.zeros((opt.n_cat,), dtype=int) # relative scale error < 0.1\n # sample validation subset\n val_size = 1500\n val_idx = random.sample(list(range(val_dataset.length)), val_size)\n val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_idx)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, sampler=val_sampler,\n num_workers=opt.num_workers, pin_memory=True)\n estimator.eval()\n for i, data in enumerate(val_dataloader, 1):\n points, rgb, choose, cat_id, model, prior, sRT, nocs = data\n points = points.cuda()\n rgb = rgb.cuda()\n choose = choose.cuda()\n cat_id = cat_id.cuda()\n model = model.cuda()\n prior = prior.cuda()\n sRT = sRT.cuda()\n nocs = nocs.cuda()\n assign_mat, deltas = estimator(points, rgb, choose, cat_id, prior)\n loss, _, _, _, _ = criterion(assign_mat, deltas, prior, nocs, model)\n # estimate pose and scale\n inst_shape = prior + deltas\n assign_mat = F.softmax(assign_mat, dim=2)\n nocs_coords = torch.bmm(assign_mat, inst_shape)\n nocs_coords = nocs_coords.detach().cpu().numpy()[0]\n points = points.cpu().numpy()[0]\n # use choose to remove repeated points\n choose = choose.cpu().numpy()[0]\n _, choose = np.unique(choose, return_index=True)\n nocs_coords = nocs_coords[choose, :]\n points = points[choose, :]\n _, _, _, pred_sRT = estimateSimilarityTransform(nocs_coords, points)\n # evaluate pose\n cat_id = cat_id.item()\n if pred_sRT is not None:\n sRT = sRT.detach().cpu().numpy()[0]\n R_error, T_error, IoU = compute_sRT_errors(pred_sRT, sRT)\n if R_error < 5 and T_error < 0.05:\n strict_success[cat_id] += 1\n if R_error < 10 and T_error < 0.05:\n easy_success[cat_id] += 1\n if IoU < 0.1:\n iou_success[cat_id] += 1\n total_count[cat_id] += 1\n val_loss += loss.item()\n if i % 100 == 0:\n logger.info('Batch {0} Loss:{1:f}'.format(i, loss.item()))\n # compute accuracy\n strict_acc = 100 * (strict_success / total_count)\n easy_acc = 100 * (easy_success / total_count)\n iou_acc = 100 * (iou_success / total_count)\n for i in range(opt.n_cat):\n logger.info('{} accuracies:'.format(val_dataset.cat_names[i]))\n logger.info('5^o 5cm: {:4f}'.format(strict_acc[i]))\n logger.info('10^o 5cm: {:4f}'.format(easy_acc[i]))\n logger.info('IoU < 0.1: {:4f}'.format(iou_acc[i]))\n strict_acc = np.mean(strict_acc)\n easy_acc = np.mean(easy_acc)\n iou_acc = np.mean(iou_acc)\n val_loss = val_loss / val_size\n summary = tf.Summary(value=[tf.Summary.Value(tag='val_loss', simple_value=val_loss),\n tf.Summary.Value(tag='5^o5cm_acc', simple_value=strict_acc),\n tf.Summary.Value(tag='10^o5cm_acc', simple_value=easy_acc),\n tf.Summary.Value(tag='iou_acc', simple_value=iou_acc)])\n tb_writer.add_summary(summary, global_step)\n logger.info('Epoch {0:02d} test average loss: {1:06f}'.format(epoch, val_loss))\n logger.info('Overall accuracies:')\n logger.info('5^o 5cm: {:4f} 10^o 5cm: {:4f} IoU: {:4f}'.format(strict_acc, easy_acc, iou_acc))\n logger.info('>>>>>>>>----------Epoch {:02d} test finish---------<<<<<<<<'.format(epoch))\n # save model after each epoch\n torch.save(estimator.state_dict(), '{0}/model_{1:02d}.pth'.format(opt.result_dir, epoch))\n\n\nif __name__ == '__main__':\n train_net()\n",
"\"\"\"\n RANSAC for Similarity Transformation Estimation\n Modified from https://github.com/hughw19/NOCS_CVPR2019\n Originally Written by Srinath Sridhar\n\"\"\"\nimport time\nimport numpy as np\n\n\ndef estimateSimilarityUmeyama(SourceHom, TargetHom):\n # Copy of original paper is at: http://web.stanford.edu/class/cs273/refs/umeyama.pdf\n SourceCentroid = np.mean(SourceHom[:3, :], axis=1)\n TargetCentroid = np.mean(TargetHom[:3, :], axis=1)\n nPoints = SourceHom.shape[1]\n CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid, (nPoints, 1)).transpose()\n CenteredTarget = TargetHom[:3, :] - np.tile(TargetCentroid, (nPoints, 1)).transpose()\n CovMatrix = np.matmul(CenteredTarget, np.transpose(CenteredSource)) / nPoints\n if np.isnan(CovMatrix).any():\n print('nPoints:', nPoints)\n print(SourceHom.shape)\n print(TargetHom.shape)\n raise RuntimeError('There are NANs in the input.')\n\n U, D, Vh = np.linalg.svd(CovMatrix, full_matrices=True)\n d = (np.linalg.det(U) * np.linalg.det(Vh)) < 0.0\n if d:\n D[-1] = -D[-1]\n U[:, -1] = -U[:, -1]\n # rotation\n Rotation = np.matmul(U, Vh)\n # scale\n varP = np.var(SourceHom[:3, :], axis=1).sum()\n Scale = 1 / varP * np.sum(D)\n # translation\n Translation = TargetHom[:3, :].mean(axis=1) - SourceHom[:3, :].mean(axis=1).dot(Scale*Rotation.T)\n # transformation matrix\n OutTransform = np.identity(4)\n OutTransform[:3, :3] = Scale * Rotation\n OutTransform[:3, 3] = Translation\n\n return Scale, Rotation, Translation, OutTransform\n\n\ndef estimateSimilarityTransform(source: np.array, target: np.array, verbose=False):\n \"\"\" Add RANSAC algorithm to account for outliers.\n\n \"\"\"\n assert source.shape[0] == target.shape[0], 'Source and Target must have same number of points.'\n SourceHom = np.transpose(np.hstack([source, np.ones([source.shape[0], 1])]))\n TargetHom = np.transpose(np.hstack([target, np.ones([target.shape[0], 1])]))\n # Auto-parameter selection based on source heuristics\n # Assume source is object model or gt nocs map, which is of high quality\n SourceCentroid = np.mean(SourceHom[:3, :], axis=1)\n nPoints = SourceHom.shape[1]\n CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid, (nPoints, 1)).transpose()\n SourceDiameter = 2 * np.amax(np.linalg.norm(CenteredSource, axis=0))\n InlierT = SourceDiameter / 10.0 # 0.1 of source diameter\n maxIter = 128\n confidence = 0.99\n\n if verbose:\n print('Inlier threshold: ', InlierT)\n print('Max number of iterations: ', maxIter)\n\n BestInlierRatio = 0\n BestInlierIdx = np.arange(nPoints)\n for i in range(0, maxIter):\n # Pick 5 random (but corresponding) points from source and target\n RandIdx = np.random.randint(nPoints, size=5)\n Scale, _, _, OutTransform = estimateSimilarityUmeyama(SourceHom[:, RandIdx], TargetHom[:, RandIdx])\n PassThreshold = Scale * InlierT # propagate inlier threshold to target scale\n Diff = TargetHom - np.matmul(OutTransform, SourceHom)\n ResidualVec = np.linalg.norm(Diff[:3, :], axis=0)\n InlierIdx = np.where(ResidualVec < PassThreshold)[0]\n nInliers = InlierIdx.shape[0]\n InlierRatio = nInliers / nPoints\n # update best hypothesis\n if InlierRatio > BestInlierRatio:\n BestInlierRatio = InlierRatio\n BestInlierIdx = InlierIdx\n if verbose:\n print('Iteration: ', i)\n print('Inlier ratio: ', BestInlierRatio)\n # early break\n if (1 - (1 - BestInlierRatio ** 5) ** i) > confidence:\n break\n\n if(BestInlierRatio < 0.1):\n print('[ WARN ] - Something is wrong. Small BestInlierRatio: ', BestInlierRatio)\n return None, None, None, None\n\n SourceInliersHom = SourceHom[:, BestInlierIdx]\n TargetInliersHom = TargetHom[:, BestInlierIdx]\n Scale, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(SourceInliersHom, TargetInliersHom)\n\n if verbose:\n print('BestInlierRatio:', BestInlierRatio)\n print('Rotation:\\n', Rotation)\n print('Translation:\\n', Translation)\n print('Scale:', Scale)\n\n return Scale, Rotation, Translation, OutTransform\n\n\ndef backproject(depth, intrinsics, instance_mask):\n \"\"\" Back-projection, use opencv camera coordinate frame.\n\n \"\"\"\n cam_fx = intrinsics[0, 0]\n cam_fy = intrinsics[1, 1]\n cam_cx = intrinsics[0, 2]\n cam_cy = intrinsics[1, 2]\n\n non_zero_mask = (depth > 0)\n final_instance_mask = np.logical_and(instance_mask, non_zero_mask)\n idxs = np.where(final_instance_mask)\n\n z = depth[idxs[0], idxs[1]]\n x = (idxs[1] - cam_cx) * z / cam_fx\n y = (idxs[0] - cam_cy) * z / cam_fy\n pts = np.stack((x, y, z), axis=1)\n\n return pts, idxs\n\n\ndef align_nocs_to_depth(masks, coords, depth, intrinsics, instance_ids, img_path, verbose=False):\n num_instances = len(instance_ids)\n error_messages = ''\n elapses = []\n scales = np.zeros(num_instances)\n rotations = np.zeros((num_instances, 3, 3))\n translations = np.zeros((num_instances, 3))\n\n for i in range(num_instances):\n mask = masks[:, :, i]\n coord = coords[:, :, i, :]\n pts, idxs = backproject(depth, intrinsics, mask)\n coord_pts = coord[idxs[0], idxs[1], :] - 0.5\n try:\n start = time.time()\n s, R, T, outtransform = estimateSimilarityTransform(coord_pts, pts, False)\n elapsed = time.time() - start\n if verbose:\n print('elapsed: ', elapsed)\n elapses.append(elapsed)\n except Exception as e:\n message = '[ Error ] aligning instance {} in {} fails. Message: {}.'.format(instance_ids[i], img_path, str(e))\n print(message)\n error_messages += message + '\\n'\n s = 1.0\n R = np.eye(3)\n T = np.zeros(3)\n outtransform = np.identity(4, dtype=np.float32)\n\n scales[i] = s / 1000.0\n rotations[i, :, :] = R\n translations[i, :] = T / 1000.0\n\n return scales, rotations, translations, error_messages, elapses\n"
] | [
[
"numpy.random.uniform",
"numpy.random.randn",
"numpy.random.choice"
],
[
"torch.nn.functional.softmax",
"tensorflow.summary.FileWriter",
"torch.load",
"numpy.unique",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"tensorflow.Summary.Value",
"numpy.mean",
"torch.bmm",
"numpy.zeros"
],
[
"numpy.mean",
"numpy.var",
"numpy.where",
"numpy.random.randint",
"numpy.linalg.svd",
"numpy.arange",
"numpy.eye",
"numpy.matmul",
"numpy.stack",
"numpy.linalg.det",
"numpy.zeros",
"numpy.isnan",
"numpy.identity",
"numpy.transpose",
"numpy.logical_and",
"numpy.sum",
"numpy.linalg.norm",
"numpy.tile",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dileep-kishore/deeplearning-examples | [
"2b230ea17f366f602044d44cc8abcac419d4e521"
] | [
"deeplearning_examples/loaders/Churn.py"
] | [
"# @Author: dileep\n# @Last Modified by: dileep\n\nfrom collections import OrderedDict\nimport os\nfrom typing import Tuple, Iterable, Sequence, Dict, Union\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom . import datapath\nfrom ..preprocessing import Encoder\nfrom ..sampling import hold_out\n\n#TODO: Make this a subclass of torch.utils.data.Dataset\nclass Churn:\n \"\"\"\n Class for loading the `churn` dataset to predict whether customer `exited` or not\n Parameters:\n ----------\n features : Iterable[str]\n List of features to be used in training and testing.\n NOTE: Do not include the dependent variable\n Options: {RowNumber,CustomerId,Surname,CreditScore,Geography,Gender,\n Age,Tenure,Balance,NumOfProducts,HasCrCard,IsActiveMember,\n EstimatedSalary}\n Attributes:\n ----------\n raw_data : pd.Series\n Raw data returned in the form of a pandas dataframe\n train_data : Tuple[np.ndarray, np.ndarray]\n Tuple of (features, targets) where each is a numpy ndarray\n test_data : Tuple[np.ndarray, np.ndarray]\n Tuple of (features, targets) where each is a numpy ndarray\n \"\"\"\n _feature_dict = {\n 'multi-category': {'Geography'},\n 'binary-category': {'Gender', 'HasCrCard', 'IsActiveMember', 'Exited'},\n 'int': {'CreditScore', 'Age', 'Tenure', 'NumOfProducts'},\n 'float': {'Balance', 'EstimatedSalary'}\n }\n\n def __init__(self, features: Union[Iterable[str], str] = 'all') -> None:\n churn_path = os.path.join(datapath(), 'churn/Churn_Modeling.csv')\n self.raw_data = pd.read_csv(churn_path, index_col=0)\n if features == 'all':\n features = self.all_features\n assert self._validate_features(features), \"Invalid features given\"\n self._features = features + ['Exited']\n\n def __call__(self):\n raw_train, raw_test = hold_out(self.raw_data[self._features])\n feat_meta = self._get_feat_meta(self._features)\n data_encoder = Encoder(feat_meta)\n return data_encoder.encode(raw_train, raw_test, 'Exited')\n\n @property\n def all_features(self) -> Iterable[str]:\n \"\"\"\n Returns all the possible features that can be used\n Returns:\n -------\n Iterable[str]\n A list of all possible features\n \"\"\"\n features = list(self.raw_data.columns)\n return [f for f in features if f not in {'Exited', 'RowNumber', 'CustomerId', 'Surname'}]\n\n def _validate_features(self, features: Iterable[str]) -> bool:\n \"\"\"\n Returns whether the input set of features are valid\n Parameters:\n ----------\n features : Iterable[str]\n Features input to the class\n Returns:\n -------\n bool\n True/False based on validity\n \"\"\"\n all_features = set()\n for f_set in self._feature_dict.values():\n all_features.update(f_set)\n return not any(filter(lambda f: f not in all_features, features))\n\n def _get_feat_meta(self, features: Iterable[str]) -> Dict[str, str]:\n \"\"\"\n Returns the type for each feature\n Parameters:\n ----------\n features : Iterable[str]\n A list of features that are to be used for classification\n Returns:\n -------\n Dict[str, str]\n Dictionary of features and their corresponding types\n \"\"\"\n invert_fdict = {frozenset(v): k for k, v in self._feature_dict.items()}\n feat_meta: Dict[str, str] = OrderedDict()\n for feat in features:\n for feat_group, data_type in invert_fdict.items():\n if feat in feat_group:\n feat_meta[feat] = data_type\n continue\n return feat_meta\n\n def encode_features(self, features: Iterable[str]) -> Tuple[np.ndarray, np.ndarray]:\n cat_features = (self._feature_dict['binary-category'] or\n self._feature_dict['multi-category'])\n for feat in features:\n if feat in cat_features:\n self.pp\n\n def split_data(self, features: Iterable[str]) -> Sequence[np.ndarray]:\n \"\"\"\n Splits the raw data into training and testing using the features as a filter\n Parameters:\n ----------\n features : Iterable[str]\n Features that are to be used in the training and testing data\n Returns:\n -------\n Sequence[np.ndarray]\n Sequence of x_train, x_test, y_train, y_test\n \"\"\"\n pass\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Joshinn-io/augur | [
"e9410887f58af2b26c350edf08e3f70ff783bdc5"
] | [
"tests/test_metrics/test_issue_metrics.py"
] | [
"#SPDX-License-Identifier: MIT\n\nimport pytest\nimport pandas as pd\n\ndef test_issues_new(metrics):\n #repo_id\n assert metrics.issues_new(1, 1 , period='year').iloc[0]['issues'] > 0\n\n #repo_group_id\n assert metrics.issues_new(10, period='year').iloc[1]['issues'] > 0\n\n #begin_date & end_date\n assert metrics.issues_new(10, 25430, period='week', begin_date='2017',\n end_date='2017-10').iloc[1]['issues'] > 0\n assert metrics.issues_new(10, period='month', begin_date='2017-05',\n end_date='2018').iloc[2]['issues'] > 0\n\ndef test_issues_active(metrics):\n # repo\n assert metrics.issues_active(1, 1, period='year').iloc[0]['issues'] > 0\n\n # repo_group\n assert metrics.issues_active(10, period='year').iloc[0]['issues'] > 0\n\n # begin_date & end_date\n assert metrics.issues_active(10, 25430, period='month', begin_date='2020-02',\n end_date='2020-03').iloc[0]['issues'] > 0\n\n assert metrics.issues_active(10, period='week', begin_date='2020-01',\n end_date='2020-03') .iloc[0]['issues'] > 0\n\ndef test_issues_closed(metrics):\n # repo\n assert metrics.issues_closed(10, 25430, period='year').iloc[0]['issues'] > 0\n\n #repo_group\n assert metrics.issues_closed(10, period='year').iloc[0]['issues'] > 0\n\n # begin_date & end_date\n assert metrics.issues_closed(10, 25430, period='week', begin_date='2019',\n end_date='2020-02').iloc[0]['issues'] > 0\n\n assert metrics.issues_closed(10, period='month', begin_date='2018-05',\n end_date='2019-08-15').iloc[0]['issues'] > 0\n\ndef test_issue_duration(metrics):\n # repo\n assert metrics.issue_duration(10, 25430).iloc[0]['duration'] == '20 days 03:08:22.000000000'\n\n # repo_group\n assert metrics.issue_duration(10).iloc[0]['duration'] == '20 days 03:08:22.000000000'\n\ndef test_issue_participants(metrics):\n # repo\n assert metrics.issue_participants(10, 25430).iloc[0]['participants'] > 0\n\n # repo_group\n assert metrics.issue_participants(10).iloc[0]['participants'] > 0\n\ndef test_issue_throughput(metrics):\n # repo\n assert metrics.issue_throughput(10, 25430).iloc[0]['throughput'] >= 0\n\n # repo_group\n assert metrics.issue_throughput(10).iloc[0]['throughput'] >= 0\n\ndef test_issue_backlog(metrics):\n #repo_id\n assert metrics.issue_backlog(10, 25430).iloc[0]['issue_backlog'] > 0\n\n #repo_group_id\n assert metrics.issue_backlog(10).iloc[0]['issue_backlog'] > 0\n\n\ndef test_issues_first_time_closed(metrics):\n\n # repo id\n assert metrics.issues_first_time_closed(10, repo_id=25430, period='year').isin(\n [pd.Timestamp('2019', tz='UTC')]).any().any()\n\n # repo_group_id\n assert metrics.issues_first_time_closed(10, period='year').isin(\n [pd.Timestamp('2020', tz='UTC')]).any().any()\n\n # begin_date and end_date\n assert metrics.issues_first_time_closed(10, period='year', begin_date='2019-1-1 00:00:00',\n end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()\n\n assert metrics.issues_first_time_closed(10, repo_id=25430, period='year', begin_date='2019-1-1 00:00:00',\n end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()\n\n\ndef test_open_issues_count(metrics):\n # repo\n assert metrics.open_issues_count(10, 25430).iloc[0]['open_count'] > 0\n\n # repo_group\n assert metrics.open_issues_count(10).iloc[0]['open_count'] > 0\n\ndef test_closed_issues_count(metrics):\n # repo\n assert metrics.closed_issues_count(10, 25430).iloc[0]['closed_count'] > 0\n\n # repo_group\n assert metrics.closed_issues_count(10).iloc[0]['closed_count'] > 0\n\ndef test_issues_open_age(metrics):\n #repo group\n assert metrics.issues_open_age(10).iloc[0]['open_date'] > 0\n # repo\n assert metrics.issues_open_age(10, 25430).iloc[0]['open_date'] > 0\n\ndef test_issues_closed_resolution_duration(metrics):\n # repo group\n assert metrics.issues_closed_resolution_duration(10).iloc[0]['diffdate'] >= 0\n # repo\n assert metrics.issues_closed_resolution_duration(10, 25430).iloc[0]['diffdate'] >= 0\n\ndef test_average_issue_resolution_time(metrics):\n #repo\n assert metrics.average_issue_resolution_time(10, 25430).isin(\n ['augur', '61 days 12:20:43.791667']).any().any()\n\n # repo_group\n assert metrics.average_issue_resolution_time(10).isin(\n ['grimoirelab', ' 67 days 22:41:55.260417']).any().any()\n\ndef test_issues_maintainer_response_duration(metrics):\n assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0\n assert metrics.issues_maintainer_response_duration(10).iloc[0].average_days_comment > 0\n assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0\n\ndef test_issue_comments_mean(metrics):\n assert metrics.issue_comments_mean(10).any().any()\n assert metrics.issue_comments_mean(10, 25430).any().any()\n assert metrics.issue_comments_mean(10, group_by='year').any().any()\n assert metrics.issue_comments_mean(10, 25430, group_by='year').any().any()\n\ndef test_issue_comments_mean_std(metrics):\n assert metrics.issue_comments_mean_std(10).any().any()\n assert metrics.issue_comments_mean_std(10, 25430).any().any()\n assert metrics.issue_comments_mean_std(10, group_by='year').any().any()\n assert metrics.issue_comments_mean_std(10, 25430, group_by='year').any().any()\n"
] | [
[
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c-martinez/compactness | [
"679a1644e0cd3ded278e9917efe171b5e89fc780"
] | [
"pydescriptors/helpers.py"
] | [
"import numpy as _np\n\nfrom .moments import immoment3D as _immoment3D\n\ndef getSphere(side):\n \"\"\"Create a 3D volume of sideXsideXside, where voxels representing a\n sphere are ones and background is zeros.\n\n Keyword arguments:\n side -- the number of voxels the 3D volume should have on each side.\n\n Returns:\n A (side,side,side) shaped matrix of zeros and ones.\n \"\"\"\n volume = _np.zeros((side, side, side))\n r = side / 2\n Xs, Ys = _np.meshgrid(_np.arange(-r, r), _np.arange(-r, r))\n for k, z in enumerate(_np.arange(-r, r)):\n volume[:, :, k] = _np.sqrt(Xs ** 2 + Ys ** 2 + z ** 2) < r\n return volume\n\n\ndef rotate3D(X, Y, Z, rx, ry):\n \"\"\"Rotates a 3D object along one ordinate axis at a time.\n\n Keyword arguments:\n X -- The X coordinate of the voxels to be rotated.\n Y -- The Y coordinate of the voxels to be rotated.\n Z -- The Z coordinate of the voxels to be rotated.\n\n Returns:\n X,Y,Z coordinates of the rotated voxels.\n \"\"\"\n R = _np.eye(3)\n Rx = _np.array([[1, 0, 0],\n [0, _np.cos(rx), -_np.sin(rx)],\n [0, _np.sin(rx), _np.cos(rx)]])\n Ry = _np.array([[_np.cos(ry), 0, _np.sin(ry)],\n [0, 1, 0],\n [-_np.sin(ry), 0, _np.cos(ry)]])\n R = _np.dot(R, Rx)\n R = _np.dot(R, Ry)\n\n XYZ = _np.vstack([X, Y, Z])\n XYZ_ = _np.dot(XYZ.T, R)\n\n return XYZ_[:, 0], XYZ_[:, 1], XYZ_[:, 2]\n\n\ndef recenter(X, Y, Z):\n # TODO: Document, write unit test\n m000 = _immoment3D(X, Y, Z, 0, 0, 0)\n m100 = _immoment3D(X, Y, Z, 1, 0, 0)\n m010 = _immoment3D(X, Y, Z, 0, 1, 0)\n m001 = _immoment3D(X, Y, Z, 0, 0, 1)\n\n # Find centroid\n cx = m100 / m000\n cy = m010 / m000\n cz = m001 / m000\n\n # Recentering\n X_ = X - cx\n Y_ = Y - cy\n Z_ = Z - cz\n\n return X_, Y_, Z_\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.arange",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yaront/MutSig | [
"456dc793ab2dbd955b5cef098fd14539d428de0b"
] | [
"scripts/Emdometrial/Statistics/mut_analysis.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 26 20:42:43 2018\n\n@author: tomer\n\"\"\"\n\n#%%\n# =================================================\n# # Mutation per gene\n# =================================================\n\nimport numpy as np\nimport pandas as pd\n\n#%%\n\n#tumor = sys.argv[1]\n#tumor = tumor.split('/')[-1].split('.')[0]\n#print tumor\n\ntumor = 'UCEC'\n\n#%% Reading data\n\nprint(\"Starting: \" + tumor)\n\nmut_data = pd.read_table('./../../../databases/Endometrial/TCGA_MAFs/' + tumor + '.maf', sep = '\\t')\nbmi_data = pd.read_table('./../../../databases/Endometrial/information/TCGA_bmi_data.txt', sep = '\\t')\npat_bmi = bmi_data[bmi_data['bmi'] != '--']\npat_bmi = pat_bmi[(18.5 < pd.to_numeric(pat_bmi['bmi'])) & (pd.to_numeric(pat_bmi['bmi']) < 90)]\n\npatients = list(set(np.unique(['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']])).intersection(list(pat_bmi['submitter_id'].values)))\n\npat_bmi = pat_bmi[[(x in patients) for x in pat_bmi['submitter_id'].values]].sort_values(by = ['bmi'])\npat_mut = mut_data[[(x in patients) for x in ['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']]]]\npat_mut = pat_mut[pat_mut['Variant_Classification'].isin(['Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Translation_Start_Site'])]\n\n#%% Creating table of mutations per BMI and mutation burden per patient\n\ngene_bmi_mut = pd.DataFrame(0, columns = ['BMI','Total_Mutations'] + list(np.unique(pat_mut['Hugo_Symbol'])), index = np.sort(pat_bmi[['submitter_id','bmi']])[:,1])\ngene_bmi_mut['BMI'] = np.sort(pat_bmi[['submitter_id','bmi']])[:,0]\n\npat_name_mut = ['-'.join(x.split('-')[0:3]) for x in pat_mut['Tumor_Sample_Barcode']]\n\nfor pat in gene_bmi_mut.index:\n gene_bmi_mut.loc[pat,'Total_Mutations'] = pat_name_mut.count(pat)\n\ngene_bmi_mut = gene_bmi_mut[gene_bmi_mut['Total_Mutations'] < 3000]\n\n\n#%% Assigning mutations per gene per patient\n\nprint(\"Calculating mutations for \" + tumor)\n\nfor g in np.unique(pat_mut['Hugo_Symbol']):\n gene_mut = pat_mut[pat_mut['Hugo_Symbol'] == g]\n gene_pat = ['-'.join(x.split('-')[0:3]) for x in gene_mut['Tumor_Sample_Barcode']]\n\n for p in np.unique(gene_pat):\n gene_bmi_mut.loc[p,g] = gene_pat.count(p)\n\ngene_bmi_mut = gene_bmi_mut.transpose()\n\nnorm_gene_bmi_mut = []\n\n\n#%% Finding the slope\n\nprint(\"Calculating slope for \" + tumor)\n\ninds = {bmi: ind for ind,bmi in enumerate(set(pd.to_numeric(gene_bmi_mut.loc['BMI',:])))}\nbmi_ind = [inds[bmi] for bmi in pd.to_numeric(gene_bmi_mut.loc['BMI',:])]\n\nslope = []\nfor i,j in gene_bmi_mut.iloc[2:,:].iterrows():\n norm_mut = pd.to_numeric(j) / pd.to_numeric(gene_bmi_mut.loc['Total_Mutations'])\n norm_gene_bmi_mut.append(norm_mut)\n weight_mut = np.bincount(np.array(bmi_ind),weights=list(map(float,norm_mut.values))) / np.bincount(np.array(bmi_ind))\n slope.append(np.polyfit(list(range(len(weight_mut))), weight_mut,1)[0])\n\nnorm_gene_bmi_mut = pd.DataFrame(norm_gene_bmi_mut)\nnorm_gene_bmi_mut = pd.concat([gene_bmi_mut.loc[['BMI','Total_Mutations'],:],norm_gene_bmi_mut])\nnorm_gene_bmi_mut.index = gene_bmi_mut.index\n\ngene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope\ngene_bmi_mut = gene_bmi_mut.sort_values(by = ['Slope'])\ngene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'\n\nnorm_gene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope\nnorm_gene_bmi_mut = norm_gene_bmi_mut.sort_values(by = ['Slope'])\nnorm_gene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'\n\n\n#%% Writing the data\n\nprint(\"Writing \" + tumor)\n\ngene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut.txt', header = True, index = True, sep = '\\t')\nnorm_gene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut_norm.txt', header = True, index = True, sep = '\\t')\n\nwriter = pd.ExcelWriter('./../output/' + tumor + '_bmi_gene_mut_slope.xlsx', engine='xlsxwriter')\ngene_bmi_mut.to_excel(writer, sheet_name = tumor + '_binary')\nnorm_gene_bmi_mut.to_excel(writer, sheet_name = tumor + '_norm')\nwriter.save()\n\nprint(\"Done: \" + tumor)\n\n\n\n\n\n"
] | [
[
"pandas.concat",
"numpy.unique",
"pandas.DataFrame",
"numpy.sort",
"pandas.read_table",
"pandas.ExcelWriter",
"numpy.array",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
polikutinevgeny/FrontsCNN | [
"a9f48d5afcdd7e0fe561840d94af36c0fedf1c15"
] | [
"dataset_results.py"
] | [
"import gc\nimport numpy as np\n\n\ndef dataset_results(dataset, model, binary=False):\n x = np.array([dataset[i][0][0] for i in range(len(dataset))])\n y_true = np.array([dataset[i][1][0] for i in range(len(dataset))])\n y_pred = model.predict(x, batch_size=1, verbose=0).flatten()\n if binary:\n y_true = y_true[..., 0].flatten()\n else:\n y_true = np.argmax(y_true, axis=-1).flatten()\n del x\n gc.collect()\n return y_true, y_pred\n"
] | [
[
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaxSobolMark/mbrl-lib | [
"bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594",
"bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594"
] | [
"mbrl/env/pets_reacher.py",
"mbrl/env/reward_functions/half_cheetah_jump_reward_function.py"
] | [
"import os\nfrom typing import Tuple\n\nimport numpy as np\nfrom numpy.random import MT19937, RandomState, SeedSequence\nimport torch\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\n\nclass Reacher3DEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, task_id=None, hide_goal=False):\n self.viewer = None\n utils.EzPickle.__init__(self)\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.goal = np.zeros(3)\n self._hide_goal = hide_goal\n mujoco_env.MujocoEnv.__init__(\n self, os.path.join(dir_path, \"assets/reacher3d.xml\"), 2)\n self._task_id = task_id\n if task_id is not None:\n self._rng = RandomState(MT19937(SeedSequence(task_id)))\n self.goal = self._rng.normal(loc=0, scale=0.1, size=[3])\n\n def step(self, a):\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n # print('[pets_reacher:22] ob[7:10]: ', ob[7:10])\n reward = -np.sum(\n np.square(Reacher3DEnv.get_EE_pos(ob[None]) - self.goal))\n reward -= 0.01 * np.square(a).sum()\n done = False\n return ob, reward, done, dict(reward_dist=0, reward_ctrl=0)\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 1\n self.viewer.cam.distance = 2.5\n self.viewer.cam.elevation = -30\n self.viewer.cam.azimuth = 270\n\n def reset_model(self):\n qpos, qvel = np.copy(self.init_qpos), np.copy(self.init_qvel)\n if self._task_id is not None:\n qpos[-3:] += self.goal\n else:\n qpos[-3:] += np.random.normal(loc=0, scale=0.1, size=[3])\n self.goal = qpos[-3:]\n qvel[-3:] = 0\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def _get_obs(self):\n if not self._hide_goal:\n return np.concatenate([\n self.data.qpos.flat,\n self.data.qvel.flat[:-3],\n ])\n return np.concatenate([\n self.data.qpos.flat[:-3],\n self.data.qvel.flat[:-3],\n ])\n\n @staticmethod\n def get_EE_pos(states, are_tensors=False):\n theta1, theta2, theta3, theta4, theta5, theta6, _ = (\n states[:, :1],\n states[:, 1:2],\n states[:, 2:3],\n states[:, 3:4],\n states[:, 4:5],\n states[:, 5:6],\n states[:, 6:],\n )\n\n if not are_tensors:\n\n rot_axis = np.concatenate(\n [\n np.cos(theta2) * np.cos(theta1),\n np.cos(theta2) * np.sin(theta1),\n -np.sin(theta2),\n ],\n axis=1,\n )\n rot_perp_axis = np.concatenate(\n [-np.sin(theta1),\n np.cos(theta1),\n np.zeros(theta1.shape)],\n axis=1)\n cur_end = np.concatenate(\n [\n 0.1 * np.cos(theta1) +\n 0.4 * np.cos(theta1) * np.cos(theta2),\n 0.1 * np.sin(theta1) +\n 0.4 * np.sin(theta1) * np.cos(theta2) - 0.188,\n -0.4 * np.sin(theta2),\n ],\n axis=1,\n )\n\n for length, hinge, roll in [(0.321, theta4, theta3),\n (0.16828, theta6, theta5)]:\n perp_all_axis = np.cross(rot_axis, rot_perp_axis)\n x = np.cos(hinge) * rot_axis\n y = np.sin(hinge) * np.sin(roll) * rot_perp_axis\n z = -np.sin(hinge) * np.cos(roll) * perp_all_axis\n new_rot_axis = x + y + z\n new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)\n new_rot_perp_axis[np.linalg.norm(\n new_rot_perp_axis, axis=1) < 1e-30] = rot_perp_axis[\n np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]\n new_rot_perp_axis /= np.linalg.norm(new_rot_perp_axis,\n axis=1,\n keepdims=True)\n rot_axis, rot_perp_axis, cur_end = (\n new_rot_axis,\n new_rot_perp_axis,\n cur_end + length * new_rot_axis,\n )\n\n return cur_end\n else:\n rot_axis = torch.cat(\n [\n torch.cos(theta2) * torch.cos(theta1),\n torch.cos(theta2) * torch.sin(theta1),\n -torch.sin(theta2),\n ],\n dim=1,\n )\n rot_perp_axis = torch.cat([\n -torch.sin(theta1),\n torch.cos(theta1),\n torch.zeros_like(theta1)\n ],\n dim=1)\n cur_end = torch.cat(\n [\n 0.1 * torch.cos(theta1) +\n 0.4 * torch.cos(theta1) * torch.cos(theta2),\n 0.1 * torch.sin(theta1) +\n 0.4 * torch.sin(theta1) * torch.cos(theta2) - 0.188,\n -0.4 * torch.sin(theta2),\n ],\n dim=1,\n )\n\n for length, hinge, roll in [(0.321, theta4, theta3),\n (0.16828, theta6, theta5)]:\n perp_all_axis = torch.cross(rot_axis, rot_perp_axis)\n x = torch.cos(hinge) * rot_axis\n y = torch.sin(hinge) * torch.sin(roll) * rot_perp_axis\n z = -torch.sin(hinge) * torch.cos(roll) * perp_all_axis\n new_rot_axis = x + y + z\n new_rot_perp_axis = torch.cross(new_rot_axis, rot_axis)\n new_rot_perp_axis[torch.linalg.norm(\n new_rot_perp_axis, dim=1) < 1e-30] = rot_perp_axis[\n torch.linalg.norm(new_rot_perp_axis, dim=1) < 1e-30]\n new_rot_perp_axis /= torch.linalg.norm(new_rot_perp_axis,\n dim=1,\n keepdims=True)\n rot_axis, rot_perp_axis, cur_end = (\n new_rot_axis,\n new_rot_perp_axis,\n cur_end + length * new_rot_axis,\n )\n\n return cur_end\n\n @staticmethod\n def get_reward(ob, action):\n # This is a bit tricky to implement, implement when needed\n print('NOT SUPPOSED TO RUN THIS!')\n raise NotImplementedError\n\n def forward_postprocess_fn(\n self, inputs: torch.Tensor, mean: torch.Tensor, logvar: torch.Tensor,\n min_logvar: torch.nn.parameter.Parameter\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if not self._hide_goal:\n mean[..., 7:10] = inputs[..., 7:10]\n logvar[..., 7:10] = torch.full(logvar[..., 7:10].shape,\n -float('inf'))\n return mean, logvar\n",
"from .base_reward_function import BaseRewardFunction\nimport torch\n\n\nclass HalfCheetahJumpRewardFunction(BaseRewardFunction):\n OBS_DIM = 18\n\n def __init__(self, env):\n super(HalfCheetahJumpRewardFunction, self).__init__(env)\n self._z_init = self._env.unwrapped.sim.data.qpos[1]\n\n def get_reward(self, observation: torch.Tensor, action: torch.Tensor,\n device: str):\n reward_ctrl = -0.1 * torch.sum(torch.square(action), dim=-1)\n reward_run = observation[:, 0]\n z = observation[:, 1]\n reward_jump = 15 * torch.maximum(z - self._z_init,\n torch.FloatTensor([0]).to(device))\n return reward_run + reward_ctrl + reward_jump\n"
] | [
[
"numpy.square",
"torch.sin",
"torch.zeros_like",
"numpy.linalg.norm",
"numpy.cos",
"torch.linalg.norm",
"numpy.concatenate",
"numpy.sin",
"numpy.copy",
"numpy.random.normal",
"numpy.random.SeedSequence",
"numpy.cross",
"numpy.zeros",
"torch.cos",
"torch.cross"
],
[
"torch.FloatTensor",
"torch.square"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mosesnah-shared/whip-project-targeting | [
"7f47598635f027e2cb05ad33b66ed67627d20329"
] | [
"MuJoCo/modules/utils.py"
] | [
"# [Built-in modules]\nimport os\nimport re\nimport sys\nimport shutil\nimport time, datetime\nimport math as myMath\nimport glob\n\n# [3rd party modules]\nimport cv2\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\nimport sympy as sp\nfrom sympy.utilities.lambdify import lambdify, implemented_function\n\nfrom scipy.special import lambertw\nfrom scipy.integrate import quad\nfrom scipy.spatial.transform import Rotation as R\n\n# [Local modules]\nfrom modules.constants import Constants\n\nclass MyVideo:\n \"\"\"\n Description\n ----------\n\n Arguments\n ---------\n\n Returns\n -------\n \"\"\"\n def __init__( self, vid_dir = None, height = 1440, width = 850, fps = 60 ):\n\n # self.height = height\n # self.width = width\n\n self.height = 2880\n self.width = 1800\n\n self.vid_dir = vid_dir if not None else \".\"\n self.fps = fps\n\n fourcc = cv2.VideoWriter_fourcc( *'MP4V' ) # 4-character code of codec used to compress the frames.\n # For example, VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec,\n # VideoWriter::fourcc('M','J','P','G') is a motion-jpeg codec etc.\n # List of codes can be obtained at Video Codecs by FOURCC page.\n # self.outVideo = cv2.VideoWriter( self.vid_dir + \"/video.mp4\", fourcc, fps, ( self.height, self.width ) )\n self.outVideo = cv2.VideoWriter( self.vid_dir + \"/video.mp4\", fourcc, fps, ( self.height//2, self.width//2 ) )\n\n def write( self, myViewer ):\n data = myViewer.read_pixels( self.height, self.width, depth = False ) # Get the pixel from the render screen\n data = cv2.cvtColor( data, cv2.COLOR_BGR2RGB )\n\n # data = cv2.resize( data,( self.height, self.width ) )\n data = cv2.resize( data,( self.height//2, self.width//2 ) )\n\n self.outVideo.write( np.flip( data, axis = 0 ) )\n\n def release( self ):\n self.outVideo.release()\n\ndef length_elem2elem( mjModel, mjData, elem_name1, elem_name2 ):\n type1 = get_elem_type( mjModel, elem_name1 )\n type2 = get_elem_type( mjModel, elem_name2 )\n\n # The euclidean distance between two elements, calling using \"get_geom_xpos\" or \"get_site_xpos\" or \"get_body_xpos\" methods\n return np.linalg.norm( getattr( mjData, \"get_\" + type1 + \"_\" + \"xpos\" )( elem_name1 )\n - getattr( mjData, \"get_\" + type2 + \"_\" + \"xpos\" )( elem_name2 ) , ord = 2 )\n\n\ndef get_elem_type( mjModel, elem_name ):\n \"\"\"\n The naming convention of our mujoco simulation is \"{elem}_name\", where elem = [geom, site, body]\n The string before the first underbar '_' describes the elem(ent) of the model.\n This function parses the string and returns the first string (i.e., the element of the model)\n \"\"\"\n return elem_name.split( '_' )[ 0 ] # Parse and get the first string before \"_\"\n\ndef get_property( mjModel, elem_name, prop_name ):\n # Get the property of the name\n\n # The name of the elements start with \"XXXX_\", hence getting the string before the underbar.\n type = get_elem_type( mjModel, elem_name )\n\n for idx, s in enumerate( getattr( mjModel, type + \"_\" + \"names\" ) ): # run through the list of \"geom_names\" or \"body_names\"\n if elem_name == s:\n tmp = getattr( mjModel, type + \"_\" + prop_name )\n return tmp[ idx ]\n\n # If couldn't match in list, raise error\n raise NameError( 'Cannot find geom_name with {0} in list, please check'.format( elem_name ) )\n\n\ndef snake2camel( s ):\n \"\"\"\n Switch string s from snake_form_naming to CamelCase\n \"\"\"\n\n return ''.join( word.title() for word in s.split( '_' ) )\n\ndef camel2snake( s ):\n \"\"\"\n Switch string s from CamelCase to snake_form_naming\n [REF] https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case\n \"\"\"\n re.sub( r'(?<!^)(?=[A-Z])', '_', s ).lower()\n\ndef clear_dir( dir ):\n \"\"\" Cleaning up the contents in the directory \"\"\"\n\n\n\ndef args_cleanup( args, s ):\n \"\"\"\n Description\n -----------\n Clean-up the substring s for keys in args\n\n Arguments\n ---------\n args: The dictionary to be parsed\n s : Substring to be discarded. e.g. s = '--', then \"--record\" --> \"record\"\n\n \"\"\"\n if not isinstance( args, dict ) or not isinstance( s, str ):\n raise ValueError( \"Wrong input type. args should be type dict and s should be type str. {0:} and {1:} are rather given\".format(\n type( args ), type( str ) ) )\n\n for old_key in list( args ) :\n new_key = old_key.replace( s, '' )\n args[ new_key ] = args.pop( old_key )\n\n return args\n\n\ndef rot2quat( rot ):\n # Taking the SO(3) matrix as an input and return the quaternion\n\n return quat\n\ndef euler2quaternion( euler_angs ):\n \"\"\"\n Description\n -----------\n This code is directly from the following reference\n [REF] https://computergraphics.stackexchange.com/questions/8195/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr\n Converting a R4 quaternion vector (w, x, y, z) to Euler Angle (Roll, Pitch, Yaw)\n\n Arguments\n ---------\n [NAME] [TYPE] [DESCRIPTION]\n (1) yaw, pitch, roll The euler angles of the given quaternion vector.\n\n [OUTPUTS]\n -----------\n [NAME] [TYPE] [DESCRIPTION]\n (1) quatVec List The quaternion vector, ordered in w, x, y and z\n\n\n \"\"\"\n yaw, pitch, roll = euler_angs[ : ]\n\n cy = np.cos( yaw * 0.5 )\n sy = np.sin( yaw * 0.5 )\n cp = np.cos( pitch * 0.5 )\n sp = np.sin( pitch * 0.5 )\n cr = np.cos( roll * 0.5 )\n sr = np.sin( roll * 0.5 )\n\n\n w = cr * cp * cy + sr * sp * sy;\n x = sr * cp * cy - cr * sp * sy;\n y = cr * sp * cy + sr * cp * sy;\n z = cr * cp * sy - sr * sp * cy;\n\n return w,x,y,z\n\n\ndef quaternion2euler( quatVec ): # Inputting quaternion matrix and outputing the yaw, pitch, roll of the euler angle.\n \"\"\"\n Description\n -----------\n Converting a R4 quaternion vector (w, x, y, z) to Euler Angle (Roll, Pitch, Yaw)\n This code is directly from the following reference\n [REF] https://computergraphics.stackexchange.com/questions/8195/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr\n\n Arguments\n ---------\n [NAME] [TYPE] [DESCRIPTION]\n (1) quatVec List The quaternion vector, ordered in w, x, y and z\n\n Outputs\n --------\n [NAME] [TYPE] [DESCRIPTION]\n (1) yaw, pitch, roll The euler angles of the given quaternion vector.\n\n\n \"\"\"\n\n if len( quatVec ) != 4:\n raise ValueError( \"Wrong size of input argument. Given size is [{0:d}] while it should be 4\".format(\n len( quatVec ) ) )\n\n w, x, y ,z = quatVec[:]\n\n t0 = + 2.0 * ( w * x + y * z )\n t1 = + 1.0 - 2.0 * ( x * x + y * y )\n roll = myMath.atan2( t0, t1 )\n\n t2 = + 2.0 * ( w * y - z * x )\n t2 = + 1.0 if t2 > +1.0 else t2\n t2 = - 1.0 if t2 < -1.0 else t2\n pitch = myMath.asin( t2 )\n\n t3 = + 2.0 * ( w * z + x * y )\n t4 = + 1.0 - 2.0 * ( y * y + z * z )\n yaw = myMath.atan2( t3, t4 )\n\n return yaw, pitch, roll\n\ndef str2bool( s ):\n \"\"\"\n\n Description:\n ----------\n Converting an input string to a boolean\n\n Arguments:\n ----------\n [NAME] [TYPE] [DESCRIPTION]\n (1) s dict, str The string which\n\n Returns:\n ----------\n True/False depending on the given input strin gv\n\n \"\"\"\n if isinstance( s, dict ):\n for key, _ in s.items():\n s[ key ] = str2bool( s[ key ] )\n else:\n return v.lower() in ( \"yes\", \"true\", \"t\", \"1\" )\n\ndef str2float( s ):\n \"\"\"\n\n Description:\n ----------\n Converting an input string to a float arraay\n\n Arguments:\n ----------\n [NAME] [TYPE] [DESCRIPTION]\n (1) s str The string which will be parsed to float array\n\n Returns:\n ----------\n The parsed float array\n\n \"\"\"\n if not isinstance( s, str ):\n raise ValueError( \"Input argument should be string, but {} is given\".format( type( s ) ) )\n\n return [ float( i ) for i in re.findall( r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\", s ) ]\n\ndef my_mkdir( ):\n\n dir = Constants.TMP_DIR # Temporarily saving at tmp\n dir += datetime.datetime.now().strftime( \"%Y%m%d_%H%M%S/\" ) # Appending the date when this directory is called.\n if not os.path.exists( dir ): # If directory not exist\n os.makedirs( dir, exist_ok = True ) # mkdir -p functionality via exist_ok\n\n return dir\n\n\ndef my_mvdir( from_dir, to_dir ):\n shutil.move( from_dir , to_dir )\n\n\n\n\ndef my_rmdir( dir ):\n\n if not isinstance( dir, str ):\n raise ValueError( \"Input directory should be a str, {} is given\".format( type ( dir ) ) )\n\n try:\n shutil.rmtree( dir )\n except:\n print( \"{0:s} Doesn't exist, hence cannot remove the directory\".format( dir ) )\n\n print( \"Erasing Directory [{0:s}]\".format( dir ) )\n\ndef my_print( **kwargs ):\n \"\"\"\n Description:\n ----------\n ** double asterisk means giving the argument as dictionary\n By using double asterisk \"kwargs\" as input argument,\n\n Arguments:\n ----------\n\n Returns:\n ----------\n \"\"\"\n\n prec = kwargs[ \"prec\" ] if \"prec\" in kwargs else 5\n f = kwargs[ \"file\" ] if \"file\" in kwargs else sys.stdout # If there is a keyword called \"file\" then use that as our standard output\n\n tmpMaxLen = len( max( kwargs.keys( ), key = len ) ) # Getting the maximum length of a string list\n\n for args in kwargs:\n\n if 'file' == args.lower( ):\n # Ignore the file's value, since it should not be added to the \"output.txt\" log file.\n continue\n\n\n print( \"[{1:{0}s}]:\".format( tmpMaxLen, args ), end = ' ', file = f ) # Printing out the name of the array\n # {1:{0}s} Enables to set a variable as format length.\n tmpData = kwargs[ args ]\n\n if isinstance( tmpData, ( float, int ) ):\n tmpPrint = \"{2:{1}.{0}f}\".format( prec, prec + 2, tmpData )\n\n elif isinstance( tmpData, list ):\n tmpPrint = np.array2string( np.array( tmpData ).flatten(), precision = prec, separator = ',' )\n\n elif isinstance( tmpData, np.ndarray ):\n tmpPrint = np.array2string( tmpData.flatten() , precision = prec, separator = ',' )\n\n elif isinstance( tmpData, str ):\n tmpPrint = tmpData\n\n elif tmpData is None:\n tmpPrint = \"None\"\n\n else:\n raise ValueError( \"CHECK INPUT\")\n\n print( tmpPrint, file = f )\n\ndef solve_eq_posture( q0 ):\n\n q1_0 = q0[ 0 ]\n q2_0 = q0[ 1 ]\n q3_0 = q0[ 2 ]\n q4_0 = q0[ 3 ]\n\n q1 = sp.Symbol( 'q1' )\n q2 = sp.Symbol( 'q2' )\n q3 = sp.Symbol( 'q3' )\n q4 = sp.Symbol( 'q4' )\n\n eqn1 = 0.52444712807465876380774716380984*sp.cos(q2)*sp.sin(q1) - 0.12721953522735995889547666592989*sp.cos(q1)*sp.sin(q2) - 0.05501625493258266441642945210333*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) - 0.063807174539763700238381716189906*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) - 0.042749427781976545581699156173272*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) + 0.1762293392050615636890142923221*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) + 0.1762293392050615636890142923221*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) - 0.063807174539763700238381716189906*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) + 0.042749427781976545581699156173272*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) + 0.063807174539763700238381716189906*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) + 0.1762293392050615636890142923221*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q1 - q1_0\n eqn2 = 0.1966778910733553153988850681344*sp.cos(q1)*sp.sin(q2) - 0.12721953522735995889547666592989*sp.cos(q2)*sp.sin(q1) + 0.020788410744410568131712579997838*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) + 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) + 0.066089435759419945526360606891103*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) - 0.042749427781976545581699156173272*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) - 0.042749427781976545581699156173272*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) + 0.015478241093474287559672575298464*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) - 0.066089435759419945526360606891103*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) - 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) - 0.042749427781976545581699156173272*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q2 - q2_0\n eqn3 = 0.1637248203220158515591720060911*sp.cos(q2)*sp.sin(q1) - 0.061864967327922570916598488111049*sp.cos(q1)*sp.sin(q2) - 0.083555731966853175052278857037891*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) - 0.019919678510073035582195188908372*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) - 0.020788410744410568131712579997838*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) + 0.05501625493258266441642945210333*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) + 0.05501625493258266441642945210333*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) - 0.019919678510073035582195188908372*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) + 0.020788410744410568131712579997838*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) + 0.019919678510073035582195188908372*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) + 0.05501625493258266441642945210333*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q3 - q3_0\n eqn4 = 0.046062245513354471704303705337225*sp.cos(q1)*sp.sin(q2) - 0.18988602913048024944941971625667*sp.cos(q2)*sp.sin(q1) + 0.019919678510073035582195188908372*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) + 0.10117159250577656415259752975544*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) + 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) - 0.063807174539763700238381716189906*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) - 0.063807174539763700238381716189906*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) + 0.10117159250577656415259752975544*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) - 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) - 0.10117159250577656415259752975544*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) - 0.063807174539763700238381716189906*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q4 - q4_0\n\n sol = sp.solvers.nsolve( ( eqn1, eqn2, eqn3, eqn4 ), ( q1, q2, q3, q4 ), q0 )\n sol = np.array( sol )\n return np.array( [ sol[ 0 ][ 0 ], sol[ 1 ][ 0 ], sol[ 2 ][ 0 ], sol[ 3 ][ 0 ] ] )\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.array",
"numpy.flip",
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qpiel/gammapy | [
"cfb976909e63f4d5d578e1495245c0baad69482b",
"cfb976909e63f4d5d578e1495245c0baad69482b",
"cfb976909e63f4d5d578e1495245c0baad69482b"
] | [
"gammapy/stats/tests/test_significance.py",
"gammapy/irf/psf_3d.py",
"gammapy/spectrum/utils.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom ...stats import (\n significance_to_probability_normal,\n probability_to_significance_normal,\n probability_to_significance_normal_limit,\n significance_to_probability_normal_limit,\n)\n\n\ndef test_significance_to_probability_normal():\n significance = 5\n p = significance_to_probability_normal(significance)\n assert_allclose(p, 2.8665157187919328e-07)\n\n s = probability_to_significance_normal(p)\n assert_allclose(s, significance)\n\n\ndef test_significance_to_probability_normal_limit():\n significance = 5\n p = significance_to_probability_normal_limit(significance)\n assert_allclose(p, 2.792513e-07)\n\n s = probability_to_significance_normal_limit(p)\n assert_allclose(s, significance)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom scipy.interpolate import RegularGridInterpolator\nfrom astropy.table import Table\nfrom astropy.io import fits\nfrom astropy.units import Quantity\nfrom astropy.coordinates import Angle\nfrom ..utils.array import array_stats_str\nfrom ..utils.energy import Energy\nfrom ..utils.scripts import make_path\nfrom .psf_table import TablePSF, EnergyDependentTablePSF\n\n__all__ = [\"PSF3D\"]\n\n\nclass PSF3D(object):\n \"\"\"PSF with axes: energy, offset, rad.\n\n Data format specification: :ref:`gadf:psf_table`\n\n Parameters\n ----------\n energy_lo : `~astropy.units.Quantity`\n Energy bins lower edges (1-dim)\n energy_hi : `~astropy.units.Quantity`\n Energy bins upper edges (1-dim)\n offset : `~astropy.coordinates.Angle`\n Offset angle (1-dim)\n rad_lo : `~astropy.coordinates.Angle`\n Offset angle bins lower edges\n rad_hi : `~astropy.coordinates.Angle`\n Offset angle bins upper edges\n psf_value : `~astropy.units.Quantity`\n PSF (3-dim with axes: psf[rad_index, offset_index, energy_index]\n energy_thresh_lo : `~astropy.units.Quantity`\n Lower energy threshold.\n energy_thresh_hi : `~astropy.units.Quantity`\n Upper energy threshold.\n \"\"\"\n\n def __init__(\n self,\n energy_lo,\n energy_hi,\n offset,\n rad_lo,\n rad_hi,\n psf_value,\n energy_thresh_lo=Quantity(0.1, \"TeV\"),\n energy_thresh_hi=Quantity(100, \"TeV\"),\n ):\n self.energy_lo = energy_lo.to(\"TeV\")\n self.energy_hi = energy_hi.to(\"TeV\")\n self.offset = Angle(offset)\n self.rad_lo = Angle(rad_lo)\n self.rad_hi = Angle(rad_hi)\n self.psf_value = psf_value.to(\"sr^-1\")\n self.energy_thresh_lo = energy_thresh_lo.to(\"TeV\")\n self.energy_thresh_hi = energy_thresh_hi.to(\"TeV\")\n\n def info(self):\n \"\"\"Print some basic info.\n \"\"\"\n ss = \"\\nSummary PSF3D info\\n\"\n ss += \"---------------------\\n\"\n ss += array_stats_str(self.energy_lo, \"energy_lo\")\n ss += array_stats_str(self.energy_hi, \"energy_hi\")\n ss += array_stats_str(self.offset, \"offset\")\n ss += array_stats_str(self.rad_lo, \"rad_lo\")\n ss += array_stats_str(self.rad_hi, \"rad_hi\")\n ss += array_stats_str(self.psf_value, \"psf_value\")\n\n # TODO: should quote containment values also\n\n return ss\n\n def _energy_logcenter(self):\n \"\"\"Get logcenters of energy bins.\n\n Returns\n -------\n energies : `~astropy.units.Quantity`\n Logcenters of energy bins\n \"\"\"\n return np.sqrt(self.energy_lo * self.energy_hi)\n\n def _rad_center(self):\n \"\"\"Get centers of rad bins (`~astropy.coordinates.Angle` in deg).\n \"\"\"\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")\n\n @classmethod\n def read(cls, filename, hdu=\"PSF_2D_TABLE\"):\n \"\"\"Create `PSF3D` from FITS file.\n\n Parameters\n ----------\n filename : str\n File name\n hdu : str\n HDU name\n \"\"\"\n filename = str(make_path(filename))\n table = Table.read(filename, hdu=hdu)\n return cls.from_table(table)\n\n @classmethod\n def from_table(cls, table):\n \"\"\"Create `PSF3D` from `~astropy.table.Table`.\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table Table-PSF info.\n \"\"\"\n theta_lo = table[\"THETA_LO\"].quantity[0]\n theta_hi = table[\"THETA_HI\"].quantity[0]\n offset = (theta_hi + theta_lo) / 2\n offset = Angle(offset, unit=table[\"THETA_LO\"].unit)\n\n energy_lo = table[\"ENERG_LO\"].quantity[0]\n energy_hi = table[\"ENERG_HI\"].quantity[0]\n\n rad_lo = table[\"RAD_LO\"].quantity[0]\n rad_hi = table[\"RAD_HI\"].quantity[0]\n\n psf_value = table[\"RPSF\"].quantity[0]\n\n opts = {}\n try:\n opts[\"energy_thresh_lo\"] = Quantity(table.meta[\"LO_THRES\"], \"TeV\")\n opts[\"energy_thresh_hi\"] = Quantity(table.meta[\"HI_THRES\"], \"TeV\")\n except KeyError:\n pass\n\n return cls(energy_lo, energy_hi, offset, rad_lo, rad_hi, psf_value, **opts)\n\n def to_fits(self):\n \"\"\"\n Convert PSF table data to FITS HDU list.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n PSF in HDU list format.\n \"\"\"\n # Set up data\n names = [\n \"ENERG_LO\",\n \"ENERG_HI\",\n \"THETA_LO\",\n \"THETA_HI\",\n \"RAD_LO\",\n \"RAD_HI\",\n \"RPSF\",\n ]\n units = [\"TeV\", \"TeV\", \"deg\", \"deg\", \"deg\", \"deg\", \"sr^-1\"]\n data = [\n self.energy_lo,\n self.energy_hi,\n self.offset,\n self.offset,\n self.rad_lo,\n self.rad_hi,\n self.psf_value,\n ]\n\n table = Table()\n for name_, data_, unit_ in zip(names, data, units):\n table[name_] = [data_]\n table[name_].unit = unit_\n\n hdu = fits.BinTableHDU(table)\n hdu.header[\"LO_THRES\"] = self.energy_thresh_lo.value\n hdu.header[\"HI_THRES\"] = self.energy_thresh_hi.value\n\n return fits.HDUList([fits.PrimaryHDU(), hdu])\n\n def write(self, filename, *args, **kwargs):\n \"\"\"Write PSF to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n \"\"\"\n self.to_fits().writeto(filename, *args, **kwargs)\n\n def evaluate(self, energy=None, offset=None, rad=None, interp_kwargs=None):\n \"\"\"Interpolate PSF value at a given offset and energy.\n\n Parameters\n ----------\n energy : `~astropy.units.Quantity`\n energy value\n offset : `~astropy.coordinates.Angle`\n Offset in the field of view\n rad : `~astropy.coordinates.Angle`\n Offset wrt source position\n interp_kwargs : dict\n option for interpolation for `~scipy.interpolate.RegularGridInterpolator`\n\n Returns\n -------\n values : `~astropy.units.Quantity`\n Interpolated value\n \"\"\"\n if not interp_kwargs:\n interp_kwargs = dict(bounds_error=False, fill_value=None)\n\n if energy is None:\n energy = self._energy_logcenter()\n if offset is None:\n offset = self.offset\n if rad is None:\n rad = self._rad_center()\n\n energy = Energy(energy).to(\"TeV\")\n offset = Angle(offset).to(\"deg\")\n rad = Angle(rad).to(\"deg\")\n\n energy_bin = self._energy_logcenter()\n\n offset_bin = self.offset.to(\"deg\")\n rad_bin = self._rad_center()\n points = (rad_bin, offset_bin, energy_bin)\n interpolator = RegularGridInterpolator(points, self.psf_value, **interp_kwargs)\n rr, off, ee = np.meshgrid(rad.value, offset.value, energy.value, indexing=\"ij\")\n shape = ee.shape\n pix_coords = np.column_stack([rr.flat, off.flat, ee.flat])\n data_interp = interpolator(pix_coords)\n return Quantity(data_interp.reshape(shape), self.psf_value.unit)\n\n def to_energy_dependent_table_psf(self, theta=\"0 deg\", rad=None, exposure=None):\n \"\"\"\n Convert PSF3D in EnergyDependentTablePSF.\n\n Parameters\n ----------\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view\n rad : `~astropy.coordinates.Angle`\n Offset from PSF center used for evaluating the PSF on a grid.\n Default is the ``rad`` from this PSF.\n exposure : `~astropy.units.Quantity`\n Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.\n Default exposure = 1.\n\n Returns\n -------\n table_psf : `~gammapy.irf.EnergyDependentTablePSF`\n Energy-dependent PSF\n \"\"\"\n theta = Angle(theta)\n energies = self._energy_logcenter()\n\n if rad is None:\n rad = self._rad_center()\n else:\n rad = Angle(rad)\n\n psf_value = self.evaluate(offset=theta, rad=rad)\n psf_value = psf_value[:, 0, :].transpose()\n\n return EnergyDependentTablePSF(\n energy=energies, rad=rad, exposure=exposure, psf_value=psf_value\n )\n\n def to_table_psf(self, energy, theta=\"0 deg\", interp_kwargs=None, **kwargs):\n \"\"\"Create `~gammapy.irf.TablePSF` at one given energy.\n\n Parameters\n ----------\n energy : `~astropy.units.Quantity`\n Energy\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n interp_kwargs : dict\n Option for interpolation for `~scipy.interpolate.RegularGridInterpolator`\n\n Returns\n -------\n psf : `~gammapy.irf.TablePSF`\n Table PSF\n \"\"\"\n energy = Quantity(energy)\n theta = Angle(theta)\n psf_value = self.evaluate(energy, theta, interp_kwargs=interp_kwargs).squeeze()\n rad = self._rad_center()\n return TablePSF(rad, psf_value, **kwargs)\n\n def containment_radius(\n self, energy, theta=\"0 deg\", fraction=0.68, interp_kwargs=None\n ):\n \"\"\"Containment radius.\n\n Parameters\n ----------\n energy : `~astropy.units.Quantity`\n Energy\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n fraction : float\n Containment fraction. Default fraction = 0.68\n\n Returns\n -------\n radius : `~astropy.units.Quantity`\n Containment radius in deg\n \"\"\"\n energy = Quantity(energy)\n if energy.ndim == 0:\n energy = Quantity([energy.value], energy.unit)\n\n theta = Angle(theta)\n if theta.ndim == 0:\n theta = Quantity([theta.value], theta.unit)\n\n unit = None\n radius = np.zeros((energy.size, theta.size))\n for e in range(energy.size):\n for t in range(theta.size):\n try:\n psf = self.to_table_psf(energy[e], theta[t], interp_kwargs)\n except:\n # This can raise an `error` from scipy UnivariateSpline:\n # error: (xb<=x[0]) failed for 2nd keyword xb: fpcurf0:xb=nan\n # Not sure what type exactly or how to catch it.\n radius[e, t] = np.nan\n continue\n r = psf.containment_radius(fraction)\n radius[e, t] = r.value\n unit = r.unit\n\n return Quantity(radius.squeeze(), unit)\n\n def plot_containment_vs_energy(\n self, fractions=[0.68, 0.95], thetas=Angle([0, 1], \"deg\"), ax=None\n ):\n \"\"\"Plot containment fraction as a function of energy.\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n energy = Energy.equal_log_spacing(self.energy_lo[0], self.energy_hi[-1], 100)\n\n for theta in thetas:\n for fraction in fractions:\n radius = self.containment_radius(energy, theta, fraction).squeeze()\n label = \"{} deg, {:.1f}%\".format(theta, 100 * fraction)\n ax.plot(energy.value, radius.value, label=label)\n\n ax.semilogx()\n ax.legend(loc=\"best\")\n ax.set_xlabel(\"Energy (TeV)\")\n ax.set_ylabel(\"Containment radius (deg)\")\n\n def plot_psf_vs_rad(self, theta=\"0 deg\", energy=Quantity(1, \"TeV\")):\n \"\"\"Plot PSF vs rad.\n\n Parameters\n ----------\n energy : `~astropy.units.Quantity`\n Energy. Default energy = 1 TeV\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n \"\"\"\n theta = Angle(theta)\n table = self.to_table_psf(energy=energy, theta=theta)\n return table.plot_psf_vs_rad()\n\n def plot_containment(\n self, fraction=0.68, ax=None, show_safe_energy=False, add_cbar=True, **kwargs\n ):\n \"\"\"\n Plot containment image with energy and theta axes.\n\n Parameters\n ----------\n fraction : float\n Containment fraction between 0 and 1.\n add_cbar : bool\n Add a colorbar\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n energy = self._energy_logcenter()\n offset = self.offset\n\n # Set up and compute data\n containment = self.containment_radius(energy, offset, fraction)\n\n # plotting defaults\n kwargs.setdefault(\"cmap\", \"GnBu\")\n kwargs.setdefault(\"vmin\", np.nanmin(containment.value))\n kwargs.setdefault(\"vmax\", np.nanmax(containment.value))\n\n # Plotting\n x = energy.value\n y = offset.value\n caxes = ax.pcolormesh(x, y, containment.value.T, **kwargs)\n\n # Axes labels and ticks, colobar\n ax.semilogx()\n ax.set_ylabel(\"Offset ({unit})\".format(unit=offset.unit))\n ax.set_xlabel(\"Energy ({unit})\".format(unit=energy.unit))\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n\n if show_safe_energy:\n self._plot_safe_energy_range(ax)\n\n if add_cbar:\n label = \"Containment radius R{0:.0f} ({1})\" \"\".format(\n 100 * fraction, containment.unit\n )\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n def _plot_safe_energy_range(self, ax):\n \"\"\"add safe energy range lines to the plot\"\"\"\n esafe = self.energy_thresh_lo\n omin = self.offset.value.min()\n omax = self.offset.value.max()\n ax.hlines(y=esafe.value, xmin=omin, xmax=omax)\n label = \"Safe energy threshold: {0:3.2f}\".format(esafe)\n ax.text(x=0.1, y=0.9 * esafe.value, s=label, va=\"top\")\n\n def peek(self, figsize=(15, 5)):\n \"\"\"Quick-look summary plots.\"\"\"\n import matplotlib.pyplot as plt\n\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)\n\n self.plot_containment(fraction=0.68, ax=axes[0])\n self.plot_containment(fraction=0.95, ax=axes[1])\n self.plot_containment_vs_energy(ax=axes[2])\n\n # TODO: implement this plot\n # psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')\n # psf.plot_components(ax=axes[2])\n\n plt.tight_layout()\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom astropy.units import Quantity\n\n__all__ = [\"CountsPredictor\", \"integrate_spectrum\"]\n\n\nclass CountsPredictor(object):\n \"\"\"Calculate number of predicted counts (``npred``).\n\n The true and reconstructed energy binning are inferred from the provided IRFs.\n\n Parameters\n ----------\n model : `~gammapy.spectrum.models.SpectralModel`\n Spectral model\n aeff : `~gammapy.irf.EffectiveAreaTable`\n EffectiveArea\n edisp : `~gammapy.irf.EnergyDispersion`, optional\n EnergyDispersion\n livetime : `~astropy.units.Quantity`\n Observation duration (may be contained in aeff)\n e_true : `~astropy.units.Quantity`, optional\n Desired energy axis of the prediced counts vector if no IRFs are given\n\n Examples\n --------\n Calculate prediced counts in a desired reconstruced energy binning\n\n .. plot::\n :include-source:\n\n from gammapy.irf import EnergyDispersion, EffectiveAreaTable\n from gammapy.spectrum import models, CountsPredictor\n import numpy as np\n import astropy.units as u\n import matplotlib.pyplot as plt\n\n e_true = np.logspace(-2,2.5,109) * u.TeV\n e_reco = np.logspace(-2,2,73) * u.TeV\n\n aeff = EffectiveAreaTable.from_parametrization(energy=e_true)\n edisp = EnergyDispersion.from_gauss(e_true=e_true, e_reco=e_reco,\n sigma=0.3, bias=0)\n\n model = models.PowerLaw(index=2.3,\n amplitude=\"2.5e-12 cm-2 s-1 TeV-1\",\n reference=\"1 TeV\")\n\n livetime = 1 * u.h\n\n predictor = CountsPredictor(model=model,\n aeff=aeff,\n edisp=edisp,\n livetime=livetime)\n predictor.run()\n predictor.npred.plot_hist()\n plt.show()\n \"\"\"\n\n def __init__(self, model, aeff=None, edisp=None, livetime=None, e_true=None):\n self.model = model\n self.aeff = aeff\n self.edisp = edisp\n self.livetime = livetime\n self.e_true = e_true\n self.e_reco = None\n\n self.true_flux = None\n self.true_counts = None\n self.npred = None\n\n def run(self):\n self.integrate_model()\n self.apply_aeff()\n self.apply_edisp()\n\n def integrate_model(self):\n \"\"\"Integrate model in true energy space\"\"\"\n if self.aeff is not None:\n # TODO: True energy is converted to model amplitude unit. See issue 869\n ref_unit = None\n try:\n for unit in self.model.parameters[\"amplitude\"].quantity.unit.bases:\n if unit.is_equivalent(\"eV\"):\n ref_unit = unit\n except IndexError:\n ref_unit = \"TeV\"\n self.e_true = self.aeff.energy.bins.to(ref_unit)\n else:\n if self.e_true is None:\n raise ValueError(\"No true energy binning given\")\n\n self.true_flux = self.model.integral(\n emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True\n )\n\n def apply_aeff(self):\n if self.aeff is not None:\n cts = self.true_flux * self.aeff.data.data\n else:\n cts = self.true_flux\n\n # Multiply with livetime if not already contained in aeff or model\n if cts.unit.is_equivalent(\"s-1\"):\n cts *= self.livetime\n\n self.true_counts = cts.to(\"\")\n\n def apply_edisp(self):\n from . import CountsSpectrum\n\n if self.edisp is not None:\n cts = self.edisp.apply(self.true_counts)\n self.e_reco = self.edisp.e_reco.bins\n else:\n cts = self.true_counts\n self.e_reco = self.e_true\n\n self.npred = CountsSpectrum(\n data=cts, energy_lo=self.e_reco[:-1], energy_hi=self.e_reco[1:]\n )\n\n\ndef integrate_spectrum(func, xmin, xmax, ndecade=100, intervals=False):\n \"\"\"\n Integrate 1d function using the log-log trapezoidal rule. If scalar values\n\n for xmin and xmax are passed an oversampled grid is generated using the\n ``ndecade`` keyword argument. If xmin and xmax arrays are passed, no\n oversampling is performed and the integral is computed in the provided\n grid.\n\n Parameters\n ----------\n func : callable\n Function to integrate.\n xmin : `~astropy.units.Quantity` or array-like\n Integration range minimum\n xmax : `~astropy.units.Quantity` or array-like\n Integration range minimum\n ndecade : int, optional\n Number of grid points per decade used for the integration.\n Default : 100.\n intervals : bool, optional\n Return integrals in the grid not the sum, default: False\n \"\"\"\n is_quantity = False\n if isinstance(xmin, Quantity):\n unit = xmin.unit\n xmin = xmin.value\n xmax = xmax.to_value(unit)\n is_quantity = True\n\n if np.isscalar(xmin):\n logmin = np.log10(xmin)\n logmax = np.log10(xmax)\n n = (logmax - logmin) * ndecade\n x = np.logspace(logmin, logmax, n)\n else:\n x = np.append(xmin, xmax[-1])\n\n if is_quantity:\n x = x * unit\n\n y = func(x)\n\n val = _trapz_loglog(y, x, intervals=intervals)\n\n return val\n\n\n# This function is copied over from https://github.com/zblz/naima/blob/master/naima/utils.py#L261\n# and slightly modified to allow use with the uncertainties package\n\n\ndef _trapz_loglog(y, x, axis=-1, intervals=False):\n \"\"\"\n Integrate along the given axis using the composite trapezoidal rule in\n loglog space.\n\n Integrate `y` (`x`) along given axis in loglog space.\n\n Parameters\n ----------\n y : array_like\n Input array to integrate.\n x : array_like, optional\n Independent variable to integrate over.\n axis : int, optional\n Specify the axis.\n intervals : bool, optional\n Return array of shape x not the total integral, default: False\n\n Returns\n -------\n trapz : float\n Definite integral as approximated by trapezoidal rule in loglog space.\n \"\"\"\n log10 = np.log10\n\n try:\n y_unit = y.unit\n y = y.value\n except AttributeError:\n y_unit = 1.0\n try:\n x_unit = x.unit\n x = x.value\n except AttributeError:\n x_unit = 1.0\n\n y = np.asanyarray(y)\n x = np.asanyarray(x)\n\n slice1 = [slice(None)] * y.ndim\n slice2 = [slice(None)] * y.ndim\n slice1[axis] = slice(None, -1)\n slice2[axis] = slice(1, None)\n slice1, slice2 = tuple(slice1), tuple(slice2)\n\n # arrays with uncertainties contain objects\n if y.dtype == \"O\":\n from uncertainties.unumpy import log10\n\n # uncertainties.unumpy.log10 can't deal with tiny values see\n # https://github.com/gammapy/gammapy/issues/687, so we filter out the values\n # here. As the values are so small it doesn't affect the final result.\n # the sqrt is taken to create a margin, because of the later division\n # y[slice2] / y[slice1]\n valid = y > np.sqrt(np.finfo(float).tiny)\n x, y = x[valid], y[valid]\n\n if x.ndim == 1:\n shape = [1] * y.ndim\n shape[axis] = x.shape[0]\n x = x.reshape(shape)\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n # Compute the power law indices in each integration bin\n b = log10(y[slice2] / y[slice1]) / log10(x[slice2] / x[slice1])\n\n # if local powerlaw index is -1, use \\int 1/x = log(x); otherwise use normal\n # powerlaw integration\n trapzs = np.where(\n np.abs(b + 1.0) > 1e-10,\n (y[slice1] * (x[slice2] * (x[slice2] / x[slice1]) ** b - x[slice1]))\n / (b + 1),\n x[slice1] * y[slice1] * np.log(x[slice2] / x[slice1]),\n )\n\n tozero = (y[slice1] == 0.0) + (y[slice2] == 0.0) + (x[slice1] == x[slice2])\n trapzs[tozero] = 0.0\n\n if intervals:\n return trapzs * x_unit * y_unit\n\n ret = np.add.reduce(trapzs, axis) * x_unit * y_unit\n\n return ret\n"
] | [
[
"numpy.testing.assert_allclose"
],
[
"numpy.nanmax",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"numpy.nanmin",
"scipy.interpolate.RegularGridInterpolator",
"matplotlib.pyplot.subplots",
"numpy.column_stack",
"numpy.meshgrid",
"numpy.zeros"
],
[
"numpy.log",
"numpy.abs",
"numpy.logspace",
"numpy.add.reduce",
"numpy.finfo",
"numpy.append",
"numpy.log10",
"numpy.asanyarray",
"numpy.isscalar",
"numpy.errstate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsfw/yolov3 | [
"bf6d03d9a84a0ac1e94bcc4f9a026f7d32dfbdab"
] | [
"dataReader.py"
] | [
"import os\nimport config\nimport json\nimport tensorflow as tf\nimport numpy as np\nfrom collections import defaultdict\n\nclass Reader:\n def __init__(self, mode, data_dir, anchors_path, num_classes, tfrecord_num = 12, input_shape = 416, max_boxes = 20):\n \"\"\"\n Introduction\n ------------\n 构造函数\n Parameters\n ----------\n data_dir: 文件路径\n mode: 数据集模式\n anchors: 数据集聚类得到的anchor\n num_classes: 数据集图片类别数量\n input_shape: 图像输入模型的大小\n max_boxes: 每张图片最大的box数量\n jitter: 随机长宽比系数\n hue: 调整hsv颜色空间系数\n sat: 调整饱和度系数\n cont: 调整对比度系数\n bri: 调整亮度系数\n \"\"\"\n self.data_dir = data_dir\n self.input_shape = input_shape\n self.max_boxes = max_boxes\n self.mode = mode\n self.annotations_file = {'train' : config.train_annotations_file, 'val' : config.val_annotations_file}\n self.data_file = {'train': config.train_data_file, 'val': config.val_data_file}\n self.anchors_path = anchors_path\n self.anchors = self._get_anchors()\n self.num_classes = num_classes\n file_pattern = self.data_dir + \"/*\" + self.mode + '.tfrecords'\n self.TfrecordFile = tf.gfile.Glob(file_pattern)\n self.class_names = self._get_class(config.classes_path)\n if len(self.TfrecordFile) == 0:\n self.convert_to_tfrecord(self.data_dir, tfrecord_num)\n\n def _get_anchors(self):\n \"\"\"\n Introduction\n ------------\n 获取anchors\n Returns\n -------\n anchors: anchor数组\n \"\"\"\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def _get_class(self, classes_path):\n \"\"\"\n Introduction\n ------------\n 获取类别名字\n Returns\n -------\n class_names: coco数据集类别对应的名字\n \"\"\"\n classes_path = os.path.expanduser(classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def Preprocess_true_boxes(self, true_boxes):\n \"\"\"\n Introduction\n ------------\n 对训练数据的ground truth box进行预处理\n Parameters\n ----------\n true_boxes: ground truth box 形状为[boxes, 5], x_min, y_min, x_max, y_max, class_id\n \"\"\"\n num_layers = len(self.anchors) // 3\n anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array([self.input_shape, self.input_shape], dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2.\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]\n\n grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]\n y_true = [np.zeros((grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + self.num_classes), dtype='float32') for l in range(num_layers)]\n # 这里扩充维度是为了后面应用广播计算每个图中所有box的anchor互相之间的iou\n anchors = np.expand_dims(self.anchors, 0)\n anchors_max = anchors / 2.\n anchors_min = -anchors_max\n # 因为之前对box做了padding, 因此需要去除全0行\n valid_mask = boxes_wh[..., 0] > 0\n wh = boxes_wh[valid_mask]\n # 为了应用广播扩充维度\n wh = np.expand_dims(wh, -2)\n # wh 的shape为[box_num, 1, 2]\n boxes_max = wh / 2.\n boxes_min = -boxes_max\n\n intersect_min = np.maximum(boxes_min, anchors_min)\n intersect_max = np.minimum(boxes_max, anchors_max)\n intersect_wh = np.maximum(intersect_max - intersect_min, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # 找出和ground truth box的iou最大的anchor box, 然后将对应不同比例的负责该ground turth box 的位置置为ground truth box坐标\n best_anchor = np.argmax(iou, axis = -1)\n for t, n in enumerate(best_anchor):\n for l in range(num_layers):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[t, 0] * grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[t, 1] * grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[t, 4].astype('int32')\n y_true[l][j, i, k, 0:4] = true_boxes[t, 0:4]\n y_true[l][j, i, k, 4] = 1.\n y_true[l][j, i, k, 5 + c] = 1.\n return y_true[0], y_true[1], y_true[2]\n\n\n\n def read_annotations(self):\n \"\"\"\n Introduction\n ------------\n 读取COCO数据集图片路径和对应的标注\n Parameters\n ----------\n data_file: 文件路径\n \"\"\"\n image_data = []\n boxes_data = []\n name_box_id = defaultdict(list)\n with open(self.annotations_file[self.mode], encoding='utf-8') as file:\n data = json.load(file)\n annotations = data['annotations']\n for ant in annotations:\n id = ant['image_id']\n name = os.path.join(self.data_file[self.mode], '%012d.jpg' % id)\n cat = ant['category_id']\n if cat >= 1 and cat <= 11:\n cat = cat - 1\n elif cat >= 13 and cat <= 25:\n cat = cat - 2\n elif cat >= 27 and cat <= 28:\n cat = cat - 3\n elif cat >= 31 and cat <= 44:\n cat = cat - 5\n elif cat >= 46 and cat <= 65:\n cat = cat - 6\n elif cat == 67:\n cat = cat - 7\n elif cat == 70:\n cat = cat - 9\n elif cat >= 72 and cat <= 82:\n cat = cat - 10\n elif cat >= 84 and cat <= 90:\n cat = cat - 11\n name_box_id[name].append([ant['bbox'], cat])\n\n for key in name_box_id.keys():\n boxes = []\n image_data.append(key)\n box_infos = name_box_id[key]\n for info in box_infos:\n x_min = info[0][0]\n y_min = info[0][1]\n x_max = x_min + info[0][2]\n y_max = y_min + info[0][3]\n boxes.append(np.array([x_min, y_min, x_max, y_max, info[1]]))\n boxes_data.append(np.array(boxes))\n\n return image_data, boxes_data\n\n\n def convert_to_tfrecord(self, tfrecord_path, num_tfrecords):\n \"\"\"\n Introduction\n ------------\n 将图片和boxes数据存储为tfRecord\n Parameters\n ----------\n tfrecord_path: tfrecord文件存储路径\n num_tfrecords: 分成多少个tfrecord\n \"\"\"\n image_data, boxes_data = self.read_annotations()\n images_num = int(len(image_data) / num_tfrecords)\n for index_records in range(num_tfrecords):\n output_file = os.path.join(tfrecord_path, str(index_records) + '_' + self.mode + '.tfrecords')\n with tf.python_io.TFRecordWriter(output_file) as record_writer:\n for index in range(index_records * images_num, (index_records + 1) * images_num):\n with tf.gfile.FastGFile(image_data[index], 'rb') as file:\n image = file.read()\n xmin, xmax, ymin, ymax, label = [], [], [], [], []\n for box in boxes_data[index]:\n xmin.append(box[0])\n ymin.append(box[1])\n xmax.append(box[2])\n ymax.append(box[3])\n label.append(box[4])\n example = tf.train.Example(features = tf.train.Features(\n feature = {\n 'image/encoded' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [image])),\n 'image/object/bbox/xmin' : tf.train.Feature(float_list = tf.train.FloatList(value = xmin)),\n 'image/object/bbox/xmax': tf.train.Feature(float_list = tf.train.FloatList(value = xmax)),\n 'image/object/bbox/ymin': tf.train.Feature(float_list = tf.train.FloatList(value = ymin)),\n 'image/object/bbox/ymax': tf.train.Feature(float_list = tf.train.FloatList(value = ymax)),\n 'image/object/bbox/label': tf.train.Feature(float_list = tf.train.FloatList(value = label)),\n }\n ))\n record_writer.write(example.SerializeToString())\n if index % 1000 == 0:\n print('Processed {} of {} images'.format(index + 1, len(image_data)))\n\n\n def parser(self, serialized_example):\n \"\"\"\n Introduction\n ------------\n 解析tfRecord数据\n Parameters\n ----------\n serialized_example: 序列化的每条数据\n \"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features = {\n 'image/encoded' : tf.FixedLenFeature([], dtype = tf.string),\n 'image/object/bbox/xmin' : tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(dtype = tf.float32)\n }\n )\n image = tf.image.decode_jpeg(features['image/encoded'], channels = 3)\n image = tf.image.convert_image_dtype(image, tf.uint8)\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, axis = 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, axis = 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, axis = 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, axis = 0)\n label = tf.expand_dims(features['image/object/bbox/label'].values, axis = 0)\n bbox = tf.concat(axis = 0, values = [xmin, ymin, xmax, ymax, label])\n bbox = tf.transpose(bbox, [1, 0])\n image, bbox = self.Preprocess(image, bbox)\n bbox_true_13, bbox_true_26, bbox_true_52 = tf.py_func(self.Preprocess_true_boxes, [bbox], [tf.float32, tf.float32, tf.float32])\n return image, bbox, bbox_true_13, bbox_true_26, bbox_true_52\n\n def Preprocess(self, image, bbox):\n \"\"\"\n Introduction\n ------------\n 对图片进行预处理,增强数据集\n Parameters\n ----------\n image: tensorflow解析的图片\n bbox: 图片中对应的box坐标\n \"\"\"\n image_width, image_high = tf.cast(tf.shape(image)[1], tf.float32), tf.cast(tf.shape(image)[0], tf.float32)\n input_width = tf.cast(self.input_shape, tf.float32)\n input_high = tf.cast(self.input_shape, tf.float32)\n new_high = image_high * tf.minimum(input_width / image_width, input_high / image_high)\n new_width = image_width * tf.minimum(input_width / image_width, input_high / image_high)\n # 将图片按照固定长宽比进行padding缩放\n dx = (input_width - new_width) / 2\n dy = (input_high - new_high) / 2\n image = tf.image.resize_images(image, [tf.cast(new_high, tf.int32), tf.cast(new_width, tf.int32)], method = tf.image.ResizeMethod.BICUBIC)\n new_image = tf.image.pad_to_bounding_box(image, tf.cast(dy, tf.int32), tf.cast(dx, tf.int32), tf.cast(input_high, tf.int32), tf.cast(input_width, tf.int32))\n image_ones = tf.ones_like(image)\n image_ones_padded = tf.image.pad_to_bounding_box(image_ones, tf.cast(dy, tf.int32), tf.cast(dx, tf.int32), tf.cast(input_high, tf.int32), tf.cast(input_width, tf.int32))\n image_color_padded = (1 - image_ones_padded) * 128\n image = image_color_padded + new_image\n # 矫正bbox坐标\n xmin, ymin, xmax, ymax, label = tf.split(value = bbox, num_or_size_splits=5, axis = 1)\n xmin = xmin * new_width / image_width + dx\n xmax = xmax * new_width / image_width + dx\n ymin = ymin * new_high / image_high + dy\n ymax = ymax * new_high / image_high + dy\n bbox = tf.concat([xmin, ymin, xmax, ymax, label], 1)\n if self.mode == 'train':\n # 随机左右翻转图片\n def _flip_left_right_boxes(boxes):\n xmin, ymin, xmax, ymax, label = tf.split(value = boxes, num_or_size_splits = 5, axis = 1)\n flipped_xmin = tf.subtract(input_width, xmax)\n flipped_xmax = tf.subtract(input_width, xmin)\n flipped_boxes = tf.concat([flipped_xmin, ymin, flipped_xmax, ymax, label], 1)\n return flipped_boxes\n flip_left_right = tf.greater(tf.random_uniform([], dtype = tf.float32, minval = 0, maxval = 1), 0.5)\n image = tf.cond(flip_left_right, lambda : tf.image.flip_left_right(image), lambda : image)\n bbox = tf.cond(flip_left_right, lambda: _flip_left_right_boxes(bbox), lambda: bbox)\n # 将图片归一化到0和1之间\n image = image / 255.\n image = tf.clip_by_value(image, clip_value_min = 0.0, clip_value_max = 1.0)\n bbox = tf.clip_by_value(bbox, clip_value_min = 0, clip_value_max = input_width - 1)\n bbox = tf.cond(tf.greater(tf.shape(bbox)[0], config.max_boxes), lambda: bbox[:config.max_boxes], lambda: tf.pad(bbox, paddings = [[0, config.max_boxes - tf.shape(bbox)[0]], [0, 0]], mode = 'CONSTANT'))\n return image, bbox\n\n\n def build_dataset(self, batch_size):\n \"\"\"\n Introduction\n ------------\n 建立数据集dataset\n Parameters\n ----------\n batch_size: batch大小\n Return\n ------\n dataset: 返回tensorflow的dataset\n \"\"\"\n dataset = tf.data.TFRecordDataset(filenames = self.TfrecordFile)\n dataset = dataset.map(self.parser, num_parallel_calls = 10)\n if self.mode == 'train':\n dataset = dataset.repeat().shuffle(9000).batch(batch_size).prefetch(batch_size)\n else:\n dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)\n return dataset\n"
] | [
[
"numpy.expand_dims",
"numpy.minimum",
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.py_func",
"tensorflow.data.TFRecordDataset",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.subtract",
"numpy.argmax",
"tensorflow.image.decode_jpeg",
"tensorflow.shape",
"tensorflow.gfile.Glob",
"numpy.floor",
"tensorflow.VarLenFeature",
"tensorflow.train.BytesList",
"tensorflow.train.FloatList",
"tensorflow.split",
"numpy.array",
"tensorflow.clip_by_value",
"numpy.maximum",
"tensorflow.transpose",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.image.flip_left_right",
"tensorflow.image.convert_image_dtype",
"tensorflow.random_uniform",
"tensorflow.gfile.FastGFile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
sebtac/MLxE | [
"93baa6b7c9fd14e54abd7199e868fb828e9a7c52"
] | [
"a3c_master_sewak.py"
] | [
"\"\"\" A3C in Code - Centralized/ Gobal Network Parameter Server/ Controller\n\nBased On:\n \nA3C Code as in the book Deep Reinforcement Learning, Chapter 12.\n\nRuntime: Python 3.6.5\nDependencies: numpy, matplotlib, tensorflow (/ tensorflow-gpu), gym\nDocStrings: GoogleStyle\n\nAuthor : Mohit Sewak ([email protected])\nInspired from: A3C implementation on TensorFLow official github repository (Tensorflow/models/research)\n\n**********************************************************************\n\nAdjusted by Seabstian Taciak as part of develeopment of MLxE Architecture\n\n@author: sebtac\n@contact: https://www.linkedin.com/in/sebastian-taciak-5893861/\n\n\"\"\"\n\n# SET BEFORE RUNNIG\n\n# AGENT TYPE\n# 0 - Sewak Base Agent (Fixed)\n# 1 - Sewak DNN Adjusted\n# 2 - Sewak \"Task\" Modified\n# 3 - Sewak ISTB (Iterative, Synchronous Thread Based)\n\nAgent_Type = 3\n\nlearning_rate = 0.0001\n\nimport multiprocessing\ncores = multiprocessing.cpu_count() # DEFAULT SETTING\n#cores = 1 # FOR DEBUGGING\n\n# GENERAL IMPORTS\nimport sys\nsys.path.append(r'C:\\Users\\surface\\Documents\\Python\\RL\\MLxE\\Mohit Sewak RL\\Mohit12_A3C')\nimport time\nimport winsound\nimport logging\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# DEEP LEARING and ENVIRONEMENT RELATER IMPORTS\nimport tensorflow as tf\nimport tensorflow_addons as tfa # ST for DNN Adjustment\nimport gym\n\n# CUSTOM SEWAK's MODULES with OPTIONAL SEBTAC ADJUSTMENTS\nfrom experience_replay_sewak import SimpleListBasedMemory\n\nif Agent_Type == 0:\n from actorcritic_model_sewak import ActorCriticModel as ACModel # For Sewak Fixed version\n from a3c_worker_sewak_base import A3C_Worker # the intial Sewak's implementation with fixes of the Policy_Loss Calcultion\nelif Agent_Type == 1:\n from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel\n from a3c_worker_sewak_DNN_Adjusted import A3C_Worker\nelif Agent_Type == 2:\n from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel\n from a3c_worker_sewak_Task_Modifications import A3C_Worker\nelif Agent_Type == 3:\n from actorcritic_model_sewak import ActorCriticModel_DoubleDimond as ACModel\n from a3c_worker_sewak_ISTB import A3C_Worker\n\n# SEWAK's Implementation Fix\n\"\"\"\n- Policy Loss Calcualtion\n- Using actual play in example generation (was random)\n\"\"\"\n\n# DNN Adjustments\n\"\"\"\n- Adding monotonic decrease in Learing Rate relative to the number of episodes run with:\n self.alpha_power = 0.998\n self.alpha_limit = 0.000001\n- Modifying the Model to: common_network_size=[128,256,128], policy_network_size=[64,128,64], value_network_size=[64,128,64]\n- Changing the Optimizer to RectifiedAdam -- requaires tensorflow_addons\n- Changing Gamma coeffcient to 0.97\n\"\"\"\n\n# Task Specific Modifications\n\"\"\"\n- Modified state representation with addition of 5th parameter representing the squared distance of the cart from the center of the plane\n- Adverse Initial Position\n- Negative Reward: -10.0 (originally 0.0)\n- Monotonically Decreasing Discount Factor (Gamma Coefficent)\n- Goal Specific Reward for cart being close to center of the pland and the pole being close to vertical\n\"\"\"\n\nclass A3C_Master():\n \"\"\"A3C Master\n\n Centralized Master class of A3C used for hosting the global network parameters and spawning the agents.\n\n Args:\n env_name (str): Name of a valid gym environment\n model_dir (str): Directory for saving the model during training, and loading the same while playing\n learning_rate (float): The learning rate (alpha) for the optimizer\n\n Examples:\n agent = A3C_Master()\n agent.train()\n agent.play()\n\n \"\"\"\n\n def __init__(self, Agent_Type=Agent_Type, env_name='CartPole-v0', model_dir=\"models\", learning_rate=learning_rate): #ST 0.001 for Fixed, 0.0001 otherwise \n self.env_name = env_name\n self.model_dir = model_dir\n self.alpha = learning_rate\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n self.env = gym.make(self.env_name)\n self.action_size = self.env.action_space.n\n \n if Agent_Type <= 1:\n self.state_size = self.env.observation_space.shape[0] # For None TaH imlementations\n elif Agent_Type == 2:\n self.state_size = self.env.observation_space.shape[0] + 1 # ST for TaH implementation\n elif Agent_Type == 3:\n self.state_size = self.env.observation_space.shape[0] + 1 # ST for TaH implementation\n \n if Agent_Type == 0:\n self.optimizer = tf.keras.optimizers.Adam(self.alpha)\n else:\n self.optimizer = tfa.optimizers.RectifiedAdam(self.alpha) # ST DNN Adjustment\n \n logger.debug(\"StateSize:{}, ActionSize:{}\".format(self.state_size, self.action_size))\n self.master_model = ACModel(self.action_size)\n self.master_model(tf.convert_to_tensor(np.random.random((1, self.state_size)), dtype=tf.float32))\n\n def train(self, cores):\n \"\"\"Train the A3C agent\n Main function to train the A3C agent after instantiation.\n\n This method uses the number of processor cores to spawns as many Workers. The workers are spawned as\n multiple parallel threads instead of multiple parallel processes. Being a threaded execution, the workers\n share memory and hence can write directly into the shared global variables.\n\n A more optimal, completely asynchronous implementation could be to spawn the workers as different processes\n using a task queue or multiprocessing. In case if this is adopted, then the shared variables need to made\n accessible in the distributed environment.\n\n \"\"\"\n\n a3c_workers = [A3C_Worker(self.master_model, \n self.optimizer, \n i, \n self.env_name, \n self.model_dir, \n workers_num = cores, \n learning_rate = learning_rate)\n for i in range(cores)]\n for i, worker in enumerate(a3c_workers):\n logger.info(\"Starting worker {}\".format(i))\n worker.start()\n [worker.join() for worker in a3c_workers]\n self.plot_training_statistics()\n\n def play(self):\n \"\"\"Play the environment using a trained agent\n\n This function opens a (graphical) window that will play a trained agent. The function will try to retrieve\n the model saved in the model_dir with filename formatted to contain the associated env_name.\n If the model is not found, then the function will first call the train function to start the training.\n\n \"\"\"\n env = self.env.unwrapped\n state = env.reset()\n model = self.master_model\n model_path = os.path.join(self.model_dir, 'model_{}.h5'.format(self.env_name))\n if not os.path.exists(model_path):\n logger.info('A3CMaster: No model found at {}, starting fresh training before playing!'.format(model_path))\n self.train()\n logger.info('A3CMaster: Playing env, Loading model from: {}'.format(model_path))\n print(\"Model Path:\", model_path)\n #model.load_weights(model_path)\n done = False\n step_counter = 0\n reward_sum = 0\n try:\n while not done:\n env.render(mode='rgb_array')\n policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32))\n policy = tf.nn.softmax(policy)\n action = np.argmax(policy)\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n logger.info(\"{}. Reward: {}, action: {}\".format(step_counter, reward_sum, action))\n step_counter += 1\n except KeyboardInterrupt:\n print(\"Received Keyboard Interrupt. Shutting down.\")\n finally:\n env.close()\n\n def plot_training_statistics(self, training_statistics=None):\n \"\"\"Plot training statistics\n\n This function plot the training statistics like the steps, rewards, discounted_rewards, and loss in each\n of the training episode.\n\n \"\"\"\n training_statistics = A3C_Worker.global_shared_training_stats if training_statistics is None \\\n else training_statistics\n all_episodes = []\n all_steps = []\n all_rewards = []\n all_discounted_rewards = []\n all_losses = []\n for stats in training_statistics:\n worker, episode, steps, reward, discounted_rewards, loss = stats\n all_episodes.append(episode)\n all_steps.append(steps)\n all_rewards.append(reward)\n all_discounted_rewards.append(discounted_rewards)\n all_losses.append(loss)\n self._make_double_axis_plot(all_episodes, all_steps, all_rewards)\n self._make_double_axis_plot(all_episodes,all_discounted_rewards,all_losses, label_y1=\"Discounted Reward\",\n label_y2=\"Loss\", color_y1=\"cyan\", color_y2=\"black\")\n \n np.savetxt('run.csv', all_steps, delimiter=',', fmt='%d')\n\n @staticmethod\n def _make_double_axis_plot(data_x, data_y1, data_y2, x_label='Episodes (e)', label_y1='Steps To Episode Completion',\n label_y2='Reward in each Episode', color_y1=\"red\", color_y2=\"blue\"):\n \"\"\"Internal helper function for plotting dual axis plots\n \"\"\"\n fig, ax1 = plt.subplots()\n ax1.set_xlabel(x_label)\n ax1.set_ylabel(label_y1, color=color_y1)\n ax1.plot(data_x, data_y1, color=color_y1)\n ax2 = ax1.twinx()\n ax2.set_ylabel(label_y2, color=color_y2)\n ax2.plot(data_x, data_y2, color=color_y2)\n fig.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n \"\"\"Main function for testing the A3C Master code's implementation\n \"\"\"\n agent = A3C_Master(Agent_Type=Agent_Type)\n agent.train(cores)\n #agent.play()\n \n for i in range(10):\n winsound.Beep(500,500)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax",
"numpy.random.random",
"matplotlib.pyplot.subplots",
"tensorflow.keras.optimizers.Adam",
"numpy.argmax",
"numpy.savetxt",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
geflaspohler/deep-OTD | [
"0daec276669776952b5142149007175b8a3c4d87"
] | [
"examples/cdv/plttraj.py"
] | [
"import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.size'] = 9\n\nndim = 6\ndata = np.genfromtxt('dOTD_tst1.out')\n\nxticks = [900, 1100, 1300]\nyticks = [[0.7, 0.8, 0.9, 1],\n [-0.2, 0, 0.2, 0.4],\n [-0.5, 0, 0.5],\n [-1, -0.5, 0],\n [-0.5, 0, 0.5],\n [-0.5, 0, 0.5, 1]]\n\ndef latexify(ticklabels):\n \"\"\"Manually set LaTeX format for tick labels.\"\"\"\n return [r\"$\" + str(label) + \"$\" for label in ticklabels]\n \nfor ii in range(ndim):\n fig = plt.figure(figsize=(2.2,1.3), constrained_layout=True)\n fig.set_constrained_layout_pads(w_pad=0, h_pad=0)\n ax = plt.axes()\n plt.plot(data[:,0], data[:,ii+1], 'k-', linewidth=0.75)\n plt.xlabel('$t$')\n plt.ylabel('$z_{' + str(ii+1) + '}$')\n plt.xlim(xticks[0], xticks[-1])\n plt.ylim(yticks[ii][0], yticks[ii][-1])\n ax.set_xticks(xticks)\n ax.set_yticks(yticks[ii])\n ax.set_xticklabels(latexify(xticks))\n ax.set_yticklabels(latexify(yticks[ii]))\n ax.yaxis.set_label_coords(-0.2, 0.5)\n ax.tick_params(direction='in', length=2)\n plt.savefig('traj' + str(ii+1) + '.pdf')\n\n\n"
] | [
[
"matplotlib.pyplot.ylim",
"numpy.genfromtxt",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nate-russell/Jungle | [
"114d744ed66fec11b8d5e62444253892a7ffa5cd",
"114d744ed66fec11b8d5e62444253892a7ffa5cd"
] | [
"jungle/code/sorting.py",
"jungle/test_tree.py"
] | [
"'''\nSorting Examples for showcasing and developing Jungle features\n'''\nimport inspect\nfrom jungle import JungleExperiment, JungleProfiler\nimport numpy as np\n\nprint('Finished Loading Modules')\n\nclass Sorting_Prototype:\n\n print('\\n---Test Sort N---')\n @JungleExperiment(reps=1, n=[100, 500])\n def test_sort_n(self, n=100, seed=1234):\n ''' Test sorting an iterable of size n with a random distribution '''\n # make data to sort with random distribution\n np.random.seed(seed)\n list_2_sort = list(np.random.randn(n))\n\n @JungleProfiler()\n def sort_n(l):\n sorted_list = self.sort(l)\n return sorted_list\n\n # Sort and check sort status\n sorted_list, _ = sort_n(list_2_sort)\n sort_status = all(sorted_list[i] <= sorted_list[i + 1] for i in range(len(sorted_list) - 1))\n return sort_status\n\n print('\\n---Test Block Sort---')\n @JungleExperiment(reps=1, n_blocks=[2, 4], block_size=[50, 100])\n @JungleProfiler()\n def test_block_random_sort(self, n_blocks=4, block_size=100):\n print('n_blocks: %s' % n_blocks)\n print('block_size: %s' % block_size)\n return 'something'\n\n\n\n\n\n\nclass NP_QuickSort(Sorting_Prototype):\n\n def sort(self, l):\n return np.sort(l, kind='quicksort')\n\n\nclass NP_MergeSort(Sorting_Prototype):\n\n def sort(self, l):\n return np.sort(l, kind='mergesort')\n\n\nclass NP_HeapSort(Sorting_Prototype):\n\n def sort(self, l):\n return np.sort(l, kind='heapsort')\n\n\nif __name__ == '__main__':\n print('\\n__main__\\n')\n\n print('\\n---Starting Call #1---')\n m1 = NP_QuickSort()\n jc1 = m1.test_sort_n()\n\n print('\\n---Starting Call #2---')\n m2 = NP_MergeSort()\n jc2 = m2.test_sort_n()\n\n print('\\n---Starting Call #3---')\n m1 = NP_QuickSort()\n jc1 = m1.test_block_random_sort()\n\n print('\\n---Starting Call #4---')\n m2 = NP_MergeSort()\n jc2 = m2.test_block_random_sort()\n",
"'''\nScript Purpose:\nTraverse content directory looking for classes that inherit from prototype classes and have test methods wrapped by JungleController.\nIf no modification has been made to a content file since last successful run of test_tree.py it will not be re run. (No Wasted Effort)\n\nDefinitions:\nPrototype Classes = classes with 'proto' in their name\nTest Methods = Bound methods belonging to a class that inherits from a prototype class\n'''\nimport glob\nimport copy\nimport importlib\nfrom jungle.utils.jungleprofiler import JungleExperiment, JungleEncoder\nimport json\nfrom json.decoder import JSONDecodeError\nimport os\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_style('whitegrid')\n\n\nclass TestTreeAutomation:\n ''' Automation Code for discovering test functions used in conjunction with JungleController '''\n\n def __init__(self, dev=False):\n directory = 'code'\n self.json_mod_log = 'content_mod_log.json'\n self.file_dict_path = 'test_tree.json'\n self.file_mod_dict = {}\n self.dev = dev\n\n # Load File Mod Log\n self.load()\n\n # Iterate over all of the files in content\n for filename in glob.iglob('%s/**/*.py' % directory, recursive=True):\n if self.isfile_modified(filename) or self.dev:\n print('File %s has been modified since last TestTreeAutomation Call' % filename)\n self.test_file(filename)\n else:\n print('File %s has NOT been modified since last TestTreeAutomation Call' % filename)\n\n # Write the Test Tree and File Mod Log to JSONs\n self.post_process()\n self.write()\n\n def load(self):\n ''' Load Mod Log and prior TestTree '''\n # Mod Log\n try:\n with open(self.json_mod_log, mode='r') as f:\n self.old_file_mod_dict = json.load(f)\n except FileNotFoundError:\n self.old_file_mod_dict = {}\n # Last TestTree\n try:\n with open(self.file_dict_path, mode='r') as f:\n self.file_dict = json.load(f)\n except (FileNotFoundError, JSONDecodeError):\n self.file_dict = {}\n\n def isfile_modified(self, filename):\n ''' Check if file needs to be updated, also update the last modified '''\n\n latest_mod_time = os.stat(filename).st_mtime\n # update last mod time\n self.file_mod_dict[filename] = latest_mod_time\n\n try:\n if latest_mod_time <= self.old_file_mod_dict[filename]:\n return False\n except KeyError:\n print('New File Found: %s' % filename)\n return True\n\n def test_file(self, filename):\n ''' Discover and complete tests '''\n\n module_text = filename.replace('\\\\', '.').strip('.py')\n print('\\nFile: %s\\tSanitized: %s' % (filename, module_text))\n temp_module = importlib.import_module(module_text)\n\n prototypes = {}\n\n for obj_name in dir(temp_module):\n obj = getattr(temp_module, obj_name)\n\n try:\n obj_base = obj.__bases__\n obj_base_name = ' - '.join(ob.__name__ for ob in obj_base)\n local_test_methods = [method for method in dir(obj) if 'test' in method.lower()]\n print('\\n\\tObject Name: %s' % obj_name)\n print('\\tObject: %s' % obj)\n print('\\tObject Base: %s' % obj_base_name)\n\n if 'proto' in obj_base_name.lower():\n for test_name in local_test_methods:\n print('\\t\\tTest Name: %s' % test_name)\n test_method = getattr(obj(), test_name)\n print('\\t\\tTest Method: %s' % test_method)\n try:\n test_return = test_method()\n print(test_return.__repr__)\n if isinstance(test_return, JungleExperiment):\n if obj_base_name in prototypes:\n if test_name in prototypes[obj_base_name]:\n prototypes[obj_base_name][test_name].update({obj_name: test_return})\n # prototypes[obj_base_name][test_name][obj_name] = test_return\n else:\n prototypes[obj_base_name][test_name] = {obj_name: test_return}\n else:\n prototypes[obj_base_name] = {test_name: {obj_name: test_return}}\n\n except Exception as e:\n raise e\n except AttributeError:\n pass\n\n if prototypes:\n self.file_dict[module_text] = prototypes\n return prototypes\n\n def write(self):\n ''' Write data to file '''\n\n print('Writing Filename Mod Log')\n with open(self.json_mod_log, mode='w') as out_file:\n json.dump(self.file_mod_dict, out_file, sort_keys=True, indent=3)\n\n print('Writing Test Tree')\n with open(self.file_dict_path, mode='w') as out_file:\n jdump = json.dump(self.file_dict, out_file, sort_keys=True, indent=3, cls=JungleEncoder)\n\n def post_process(self):\n ''' Scoop all of the related jungle controllers and combine them for reporting'''\n for file, prototype_dicts in self.file_dict.items():\n for prototype, test_dicts in prototype_dicts.items():\n for test, methods_dict in test_dicts.items():\n print('Just called post process')\n print(test)\n print(methods_dict)\n print('Calling combine jungle controllers')\n self.combine_junglecontrollers(methods_dict)\n\n def combine_junglecontrollers(self, methods_dict):\n ''' dict of method keys and JungleController dictionaries'''\n df_list = []\n print('---------------------------------------\\nMethods Dict')\n print(methods_dict)\n for method, jc in methods_dict.items():\n\n if not isinstance(jc, JungleExperiment):\n raise TypeError('arg: %s is not of type JungleController' % type(jc))\n else:\n cdf = jc.controller_df\n cdf['Method'] = method\n df_list.append(cdf)\n\n concat_df = pd.concat(df_list)\n sns.set_style('darkgrid')\n n_colors = len(np.unique(concat_df['Method']))\n # pal = sns.cubehelix_palette(n_colors, start=.5, rot=-.75)\n pal = sns.diverging_palette(255, 133, l=60, center=\"dark\", n=n_colors)\n # pal = sns.color_palette(\"Set2\", n_colors)\n\n print(concat_df)\n\n g = sns.FacetGrid(data=concat_df, col=\"rep\", hue='Method', col_wrap=5, size=1.5)\n g = g.map(plt.plot, \"kwarg: n\", \"walltime\", marker=\".\")\n plt.show()\n\n g = sns.factorplot(data=concat_df, x='kwarg: n', y='walltime', hue='Method',\n palette=pal, shade=True,\n size=6, aspect=2, alpha=0.5, capsize=.2)\n g.despine(offset=10, trim=True)\n plt.title('Yo yo check it out')\n plt.show()\n\n sns.factorplot(data=concat_df, x='index', y='walltime', hue='Method')\n plt.show()\n\n sns.factorplot(data=concat_df, x='start_seconds', y='controller walltime', hue='Method')\n plt.show()\n\n\nif __name__ == '__main__':\n TestTreeAutomation(dev=True)\n"
] | [
[
"numpy.sort",
"numpy.random.randn",
"numpy.random.seed"
],
[
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.title",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gmathez/Project_ADA_2018_Bruttin_Mathez_Petitpierre | [
"e237300b3d9fb966b0eb747dd66816cc6cfc11b3"
] | [
"main.py"
] | [
"# Import kivy tools\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.recycleboxlayout import RecycleBoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.checkbox import CheckBox\nfrom kivy.uix.spinner import Spinner\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.recycleview.views import RecycleDataViewBehavior\nfrom kivy.uix.behaviors import FocusBehavior\nfrom kivy.uix.recycleview.layout import LayoutSelectionBehavior\nfrom kivy.properties import BooleanProperty, ObjectProperty\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.lang import Builder\n\n# Import the kv files\nBuilder.load_file('./src/rv.kv')\nBuilder.load_file('./src/screenhome.kv')\nBuilder.load_file('./src/screenprofile.kv')\nBuilder.load_file('./src/screensettings.kv')\nBuilder.load_file('./src/screenproduct.kv')\nBuilder.load_file('./src/screenquantities.kv')\nBuilder.load_file('./src/screenfinal.kv')\nBuilder.load_file('./src/manager.kv')\n\n# Other imports\nimport pandas as pd\nimport re\nfrom Algo_main import algo # Import the algorithm for NutriScore computation\n\nclass SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,\n RecycleBoxLayout):\n ''' Add selection and focus behaviour to the view '''\n pass\n\nclass SelectableGrid(RecycleDataViewBehavior, GridLayout):\n ''' Add selection support to the Label '''\n\n index = None\n selected = BooleanProperty(False)\n selectable = BooleanProperty(True)\n\n def refresh_view_attrs(self, rv, index, data):\n ''' Catch and handle the view changes '''\n\n self.index = index\n self.ids['id_label1'].text = data['label1']['text']\n self.ids['id_label2'].text = data['label2']['text']\n self.ids['id_label3'].text = data['label3']['text']\n return super(SelectableGrid, self).refresh_view_attrs(\n rv, index, data)\n\n def on_touch_down(self, touch):\n ''' Add selection on touch down '''\n\n if super(SelectableGrid, self).on_touch_down(touch):\n return True\n\n if self.collide_point(*touch.pos) and self.selectable:\n return self.parent.select_with_touch(self.index, touch)\n\n def apply_selection(self, rv, index, is_selected):\n ''' Respond to the selection of items '''\n\n self.selected = is_selected \n \n\nclass SelectableQuantity(RecycleDataViewBehavior, GridLayout):\n ''' Add selection support to the Label '''\n\n index = None\n selected = BooleanProperty(False)\n selectable = BooleanProperty(True)\n\n def refresh_view_attrs(self, rv, index, data):\n ''' Catch and handle the view changes '''\n\n self.index = index\n self.ids['id_label1'].text = data['label1']['text']\n self.ids['id_label2'].text = data['label2']['text']\n self.ids['id_label3'].text = data['label3']['text']\n return super(SelectableQuantity, self).refresh_view_attrs(\n rv, index, data) \n\nclass RV(RecycleView):\n ''' Class for the RecycleView Controller '''\n\n def __init__(self, **kwargs):\n super(RV, self).__init__(**kwargs)\n\n def upload(self, query, active):\n ''' Search data according to the user input '''\n\n # Reset data\n self.data = []\n\n # Check if the Raw Food CheckBox is active or not\n if active:\n self.parent.parent.getSelection('API', query, True)\n self.data = [{'label1': {'text': 'API'}, 'label2': {'text': query}, 'label3': {'text': 'Add/Remove'}}]\n \n else:\n isinside = allTrue\n for item in query.split(): # Split the query in keywords\n isinside = isinside & \\\n (DF['product_name'].str.contains(item, case=False) | \\\n DF['Brands'].str.contains(item, case=False))\n\n if any(isinside):\n selection = DF[isinside] # Select products to display\n \n for row in selection.itertuples(): # Iterate through the columns of DF\n d = {'label1': {'text': str(row[0])}, \\\n 'label2': {'text': str(row[1])},\n 'label3': {'text': str(row[-1])}} # barcode, product_name, brand\n self.data.append(d)\n else:\n isinside = DF.index.str.contains(query, case=False) # Search for Barcode\n\n if any(isinside):\n selection = DF[isinside]\n\n for row in selection.itertuples():\n d = {'label1': {'text': str(row[0])}, \\\n 'label2': {'text': str(row[1])},\n 'label3': {'text': str(row[-1])}} # barcode, product_name, brand\n self.data.append(d) \n\n else:\n # In case no product is found\n self.data = [{'label1': {'text': ''}, \\\n 'label2': {'text': 'No product found'}, 'label3': {'text': ''}}]\n def getQuantities(self, dict):\n ''' Gather data for display on Quantities Screen '''\n\n self.data = []\n code = dict['code']\n product_name = dict['product_name']\n quantity = dict['quantity']\n\n for index in range(len(code)):\n d = {'label1': {'text': code[index]}, 'label2': {'text': product_name[index]}, \\\n 'label3': {'text': quantity[index]}}\n self.data.append(d)\n\nclass ScreenHome(Screen):\n ''' Class for the Home Screen. No variables or functions needed for this screen '''\n pass\n\nclass ScreenProfile(Screen):\n ''' Class for the Profile Screen '''\n\n def updateDF(self):\n global DF\n DF = pd.read_csv('https://drive.google.com/uc?export=download&id=1aLUh1UoQcS9lBa6oVRln-DuskxK5uK3y', \\\n index_col=[0], low_memory = False)\n\n DF.to_csv('./data/OpenFoodFacts_final.csv.gz', compression='gzip')\n self.ids['update'].text = 'Updated'\n self.ids['update'].background_color = (0,1,0,1)\n\n def update(self):\n self.ids['update'].text = 'Updating'\n self.ids['update'].background_color = (50/255,164/255,206/255,1) \n\n\nclass ScreenSettings(Screen):\n ''' Class for the Settings Screen '''\n\n settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \\\n 'email': '', 'activity': 0, 'days': 0}\n id_profile = -999\n\n def resetForm(self):\n ''' Reset the indicators of invalid input '''\n\n self.ids.sex.color = (1,1,1,1)\n self.ids.activity.color = (1,1,1,1)\n self.ids.age.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.weight.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.days.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.email.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.name.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.surname.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n\n def setForm(self, id_profile):\n self.id_profile = id_profile\n self.settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \\\n 'email': '', 'activity': 0, 'days': 0}\n\n if int(self.id_profile) >= 0:\n self.ids.name.text = str(profile_list.iloc[self.id_profile]['name'])\n self.ids.surname.text= str(profile_list.iloc[self.id_profile]['surname'])\n self.ids.age.text = str(profile_list.iloc[self.id_profile]['age'])\n if bool(profile_list.iloc[self.id_profile]['sex']):\n self.ids.male.active = True\n self.ids.female.active = False\n\n else:\n self.ids.male.active = False\n self.ids.female.active = True\n\n self.ids.weight.text = str(profile_list.iloc[self.id_profile]['weight'])\n self.ids.email.text = str(profile_list.iloc[self.id_profile]['email'])\n self.ids.days.text = str(profile_list.iloc[self.id_profile]['days'])\n if int(profile_list.iloc[self.id_profile]['activity']) == 1.8:\n self.ids.seated.active = False\n self.ids.both.active = False\n self.ids.standing.active = True\n\n elif int(profile_list.iloc[self.id_profile]['activity']) == 1.6:\n self.ids.seated.active = False\n self.ids.both.active = True\n self.ids.standing.active = False\n\n else:\n self.ids.seated.active = True\n self.ids.both.active = False\n self.ids.standing.active = False\n elif int(self.id_profile) == -999:\n self.ids.name.text = ''\n self.ids.surname.text = ''\n self.ids.age.text = ''\n self.ids.male.active = False\n self.ids.female.active = False\n self.ids.email.text = ''\n self.ids.weight.text = ''\n self.ids.seated.active = False\n self.ids.both.active = False\n self.ids.standing.active = False\n self.ids.days.text = ''\n else:\n self.changeScreen(False)\n\n def changeScreen(self, valid):\n ''' Handle the validity of the inputs and the change of current screen '''\n\n if valid:\n self.resetForm()\n # Check name validity\n if self.ids.name.text.strip() == '':\n self.ids.name.hint_text_color = (1,0,0,1)\n return False\n # Check surname validity\n elif self.ids.surname.text.strip() == '':\n self.ids.surname.hint_text_color = (1,0,0,1)\n return False\n # Check age validity\n elif self.ids.age.text.strip() == '' or int(self.ids.age.text) <= 0 or \\\n int(self.ids.age.text) >= 120:\n self.ids.age.text = ''\n self.ids.age.hint_text_color = (1,0,0,1)\n return False\n # Check sex validity\n elif not(self.ids.male.active or self.ids.female.active):\n self.ids.sex.color = (1,0,0,1) \n return False\n # Check email validity\n elif not re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", self.ids.email.text):\n self.ids.email.text = ''\n self.ids.email.hint_text_color = (1,0,0,1)\n return False\n # Check weight validity\n elif self.ids.weight.text.strip() == '' or int(self.ids.weight.text) <= 0:\n self.ids.weight.text = ''\n self.ids.weight.hint_text_color = (1,0,0,1)\n return False \n # Check activity validity\n elif not(self.ids.seated.active or self.ids.both.active or self.ids.standing.active):\n self.ids.activity.color = (1,0,0,1)\n return False\n # Check days validity\n elif self.ids.days.text.strip() == '' or int(self.ids.days.text) <= 0:\n self.ids.days.text = ''\n self.ids.days.hint_text_color = (1,0,0,1)\n return False\n \n else: # Validation of the form and reset\n self.settings['rec'] = True\n self.settings['name'] = self.ids.name.text\n self.settings['surname'] = self.ids.surname.text\n self.settings['age'] = int(self.ids.age.text)\n self.settings['weight'] = int(self.ids.weight.text)\n self.settings['email'] = self.ids.email.text\n self.settings['days'] = int(self.ids.days.text)\n self.settings['sex'] = self.ids.male.active\n\n if self.ids.seated.active:\n self.settings['activity'] = 1.4\n\n if self.ids.both.active:\n self.settings['activity'] = 1.6\n\n if self.ids.standing.active:\n self.settings['activity'] = 1.8\n\n self.resetForm()\n\n else: # If the user pass the settings screen\n self.settings['rec'] = False\n\n self.manager.setSettings(self.settings, self.id_profile)\n # Change the current screen\n self.manager.current = 'Product Screen'\n\nclass ScreenProduct(Screen):\n ''' Class for the Product Screen '''\n\n temp_dict = {'code':'', 'product_name': ''}\n\n def getSelection(self, text1, text2, state):\n # Select or deselect temporarly a product\n if state:\n self.temp_dict['code'] = text1\n self.temp_dict['product_name'] = text2\n\n else:\n self.temp_dict['code'] = ''\n self.temp_dict['product_name'] = ''\n\nclass ScreenQuantities(Screen):\n ''' Class for the Quantities Screen '''\n\n temp_dict = {'code': [], 'product_name': [], 'quantity': [], 'color': []}\n\n def initQuantity(self, data):\n ''' Initialize the dictionary of the products '''\n\n if self.temp_dict['quantity'] == []:\n self.temp_dict = data\n\n self.ids.rv.getQuantities(data)\n\n def updateQuantity(self, index, text1, text2, text3): \n ''' Store the quantities input by the user '''\n\n l = len(self.temp_dict['quantity'])\n\n if text3 == '' or text3 == '-' or int(text3) < 0:\n text3 = '0'\n\n if index < l:\n self.temp_dict['code'][index] = text1\n self.temp_dict['product_name'][index] = text2\n self.temp_dict['quantity'][index] = text3\n \n # Append the list of quantities if needed\n else:\n temp = ['0' for i in range(index-l)] \n self.temp_dict['code'] = self.temp_dict['code'] + temp + [text1]\n self.temp_dict['product_name'] = self.temp_dict['product_name'] + temp + [text2]\n self.temp_dict['quantity'] = self.temp_dict['quantity'] + temp + [text3]\n\n # Update the data displayed\n self.initQuantity(self.temp_dict)\n\nclass ScreenFinal(Screen):\n ''' Class for the Final Screen. No variables or functions needed for this screen '''\n pass\n\nclass Manager(ScreenManager):\n ''' Class for the Manager Controller. Store main data '''\n selected_products = {'code': [], 'product_name': [], 'quantity': []}\n settings = {'Rec': True, 'Name': '', 'Surname': '', 'Email': '', 'Age': 0, 'Sex': True, 'Pal': 0, \\\n 'Weight': 0, 'Day': 0}\n\n def getProfiles(self):\n self.ids.screen_profile.ids.profile_spinner.values = \\\n [str(index + 1) + ' : ' + str(profile_list['name'][index]) + ' ' + str(profile_list['surname'][index]) \\\n for index in profile_list.index]\n\n def toSettings(self, text):\n if text == 'new':\n id_profile = -999\n elif text == 'pass':\n id_profile = -1000\n else:\n items = text.split()\n id_profile = items[0].strip()\n id_profile = int(id_profile) - 1\n\n self.ids.screen_settings.setForm(id_profile)\n if id_profile != -1000:\n self.current = 'Settings Screen'\n \n\n def addProduct(self):\n ''' Add product to main storage '''\n item1 = self.ids.screen_product.temp_dict['code']\n item2 = self.ids.screen_product.temp_dict['product_name']\n\n if item1 != '' and item2 != '':\n self.selected_products['code'].append(item1)\n self.selected_products['product_name'].append(item2)\n self.selected_products['quantity'].append('0')\n\n def deleteProduct(self):\n ''' Remove product of main storage '''\n item1 = self.ids.screen_product.temp_dict['code']\n item2 = self.ids.screen_product.temp_dict['product_name']\n\n if item1 in self.selected_products['code'] and item2 in self.selected_products['product_name']:\n self.selected_products['code'].remove(item1)\n self.selected_products['product_name'].remove(item2)\n self.selected_products['quantity'].pop()\n\n def getQuantities(self, data):\n ''' Add quantities to main storage '''\n\n self.selected_products['quantity'] = data['quantity']\n l = len(self.selected_products['quantity'])\n\n for item in range(l):\n\n if self.selected_products['quantity'][item] == '':\n self.selected_products['quantity'][item] = '0'\n \n self.current = 'Final Screen'\n\n def setSettings(self, data, new):\n ''' Add settings to main storage '''\n\n self.settings['Rec'] = data['rec']\n self.settings['Name'] = data['name']\n self.settings['Surname'] = data['surname']\n self.settings['Email'] = data['email']\n self.settings['Pal'] = data['activity']\n self.settings['Weight'] = data['weight']\n self.settings['Day'] = data['days']\n self.settings['Sex'] = data['sex']\n self.settings['Age'] = data['age']\n \n update = True\n\n if new == -999:\n temp_df = pd.DataFrame.from_dict({'index': [len(profile_list)], \\\n 'name': [data['name']], 'surname': [data['surname']], \\\n 'age': [data['age']], 'sex': [data['sex']], 'email': [data['email']], \\\n 'weight': [data['weight']], \\\n 'activity': [data['activity']], 'days': [data['days']]}).set_index('index')\n new_profile_list = pd.concat([profile_list, temp_df]) \n elif new == -1000:\n update = False\n else:\n temp_df = pd.DataFrame.from_dict({'name': [data['name']], 'surname': [data['surname']], \\\n 'age': [data['age']], 'sex': [data['sex']], 'email': [data['email']], 'weight': [data['weight']], \\\n 'activity': [data['activity']], 'days': [data['days']]})\n new_profile_list= profile_list\n new_profile_list.iloc[new] = temp_df.iloc[0]\n\n if update:\n new_profile_list.to_csv('./data/profile.csv', sep=';')\n\n\n def computation(self):\n ''' Call algo for computation of NutriScore and recommendation. Display results '''\n dict_product = {'Product': [], 'API': []}\n\n for index in range(len(self.selected_products['code'])):\n \n # Separation of API and OpenFoodFacts data\n if str(self.selected_products['code'][index]) == 'API':\n dict_product['API'].append((str(self.selected_products[\n 'product_name'][index]), int(self.selected_products['quantity'][index])))\n \n else:\n dict_product['Product'].append((str(self.selected_products[\n 'code'][index]), int(self.selected_products['quantity'][index])))\n\n # Run the algorithm to get the recommendation to print on-screen\n text_app_beverages, text_app_nonbeverages = algo(dict_product, self.settings, DF)\n self.ids.screen_final.ids.beverages.text = text_app_beverages\n self.ids.screen_final.ids.non_beverages.text = text_app_nonbeverages\n\nclass NutriScoreApp(App):\n ''' Main class of the App '''\n\n def build(self):\n ''' Import the database for the whole application '''\n global DF, allTrue, profile_list\n\n try:\n DF = pd.read_csv('./data/OpenFoodFacts_final.csv.gz', low_memory=False, index_col = [0])\n allTrue = DF['product_name'].str.contains('', case=False) # True Vector of length len(DF)\n profile_list = pd.read_csv('./data/profile.csv', sep=';', index_col=[0])\n\n except:\n print('Fatal error: files missing') \n \n return Manager()\n\nif __name__ == '__main__':\n NutriScoreApp().run()\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
edervishaj/spotify-recsys-challenge | [
"5e7844a77ce3c26658400f161d2d74d682f30e69",
"5e7844a77ce3c26658400f161d2d74d682f30e69",
"5e7844a77ce3c26658400f161d2d74d682f30e69",
"5e7844a77ce3c26658400f161d2d74d682f30e69",
"5e7844a77ce3c26658400f161d2d74d682f30e69",
"5e7844a77ce3c26658400f161d2d74d682f30e69"
] | [
"personal/Ervin/run_knn_collaborative_item.py",
"recommenders/script/creative/top_pop_album.py",
"recommenders/nlp_bm25.py",
"recommenders/nlp_strict.py",
"recommenders/model/cb_al_ar_bm25.py",
"recommenders/script/main/nlp_fusion.py"
] | [
"from utils.datareader import Datareader\nfrom utils.evaluator import Evaluator\nfrom utils.submitter import Submitter\nfrom utils.post_processing import eurm_to_recommendation_list_submission\nfrom utils.post_processing import eurm_to_recommendation_list\nfrom utils.pre_processing import norm_l1_row, norm_max_row, norm_max_col\nfrom recommenders.knn_collaborative_item import Knn_collaborative_item\nimport recommenders.similarity.similarity as sm\nimport scipy.sparse as sps\nimport sys\nimport numpy as np\nfrom personal.Ervin.other_similarity import position_similarity\n\n\n\n'''\nThis file contains just an example on how to run the algorithm.\nThe parameter used are just the result of a first research of the optimum value.\nTo run this file just set the parameter at the start of the main function or set from console as argv parameter.\nAs argv you can even set mode of execution (online, offline) and the name of the result file\n'''\nif __name__ == '__main__':\n\n ### Select execution mode: 'offline', 'online' ###\n mode = \"offline\"\n name = \"CFitem\"\n knn = 200\n topk = 750\n\n if len(sys.argv) > 1:\n mode = sys.argv[1]\n name = sys.argv[2]\n knn = int(sys.argv[3])\n topk = int(sys.argv[4])\n\n complete_name = mode+\"_\"+name+\"_knn=\"+str(knn)+\"_topk=\"+str(topk)\n\n if mode == \"offline\":\n\n \"\"\"Test Set\"\"\"\n #Data initialization\n dr = Datareader(verbose=True, mode=mode, only_load=True)\n\n #Evaluetor initialization\n ev = Evaluator(dr)\n\n #Recommender algorithm initialization\n rec = Knn_collaborative_item()\n\n #Getting for the recommender algorithm\n urm = dr.get_urm()\n urm.data = np.ones(len(urm.data))\n position_urm = dr.get_position_matrix(position_type='last')\n pos_urm = position_urm.T.tocoo().tocsr()\n pid = dr.get_test_pids()\n\n #Fitting data\n rec.fit(urm, pid)\n\n #Computing similarity/model\n rec.compute_model(top_k= knn, sm_type=sm.TVERSKY, shrink=200, alpha=0.1, beta=1, binary=True, verbose=True)\n rec.model = rec.model.tocsr()\n rec.model.eliminate_zeros()\n # rec.model = norm_max_row(rec.model)\n\n print('Initial model has {:2} data'.format(len(rec.model.data)))\n\n print('[ Updating the model ]')\n rec.model = position_similarity(rec.model, pos_urm, knn=knn, verbose=True)\n rec.model.eliminate_zeros()\n\n print('New model has {:2} data'.format(len(rec.model.data)))\n\n #Computing ratings\n rec.compute_rating(top_k=topk,verbose=True, small=True, remove_seed=False)\n\n #evaluation and saving\n sps.save_npz(complete_name+\".npz\", rec.eurm)\n ev.evaluate(recommendation_list=eurm_to_recommendation_list(rec.eurm, datareader=dr, remove_seed=True),\n name=name, old_mode=False)\n\n if mode == \"online\":\n\n \"\"\"Submission\"\"\"\n #Data initialization\n dr = Datareader(verbose=True, mode=mode, only_load=False)\n\n #Recommender algorithm initialization\n rec = Knn_collaborative_item()\n\n #Submitter initialization\n sb = Submitter(dr)\n\n #Getting for the recommender algorithm\n urm = dr.get_urm()\n pid = dr.get_test_pids()\n\n #Fitting data\n rec.fit(urm, pid)\n\n #Computing similarity/model\n rec.compute_model(top_k=knn, sm_type=sm.TVERSKY,shrink=200, alpha=0.1, beta=1, binary=True, verbose=True)\n\n #Computing ratings\n rec.compute_rating(top_k=topk, verbose=True, small=True)\n\n #submission\n sps.save_npz(complete_name+\".npz\", rec.eurm)\n sb.submit(recommendation_list=eurm_to_recommendation_list_submission(rec.eurm), name=name, track=\"main\", verify=True, gzipped=False)\n\n\n\n\n",
"import sys\nfrom utils.definitions import ROOT_DIR\nfrom recommenders.script.main.top_pop_p import Top_pop_p\nimport scipy.sparse as sps\n\narg = sys.argv[1:]\nmode = arg[0]\n\n\nt = Top_pop_p()\neurm = t.get_top_pop_album(mode)\nsps.save_npz(ROOT_DIR+\"/recommenders/script/creative/\"+mode+\"_npz/top_pop_2_album_\"+mode+\".npz\", eurm)\n",
"from utils.datareader import Datareader\nfrom utils.evaluator import Evaluator\nfrom utils.submitter import Submitter\nfrom utils.print_tuning import TunePrint\nimport utils.post_processing as post\nimport utils.pre_processing as pre\nimport recommenders.similarity.s_plus as ss\nimport recommenders.similarity.p3alpha_rp3beta as p3r3\nimport numpy as np\nimport scipy.sparse as sps\nfrom recommenders.nlp import NLP\n\n \n#similarity = tversky_similarity(ucm, binary=False, shrink=1, alpha=0.1, beta=1\n\n\nclass NLP_BM25:\n def __init__(self, urm, ucm=None, stopwords=[], load_ucm=False, save_ucm=False, binary=False, verbose=True, mode='offline', datareader=None, verbose_evaluation=True):\n assert(mode in ('offline', 'online'))\n if binary: urm.data=np.ones(urm.data.shape[0])\n # best: norm, wor, split, skipw, porter2, lanca2\n norm = True\n work = True\n split = True\n skip_words = True\n date = False\n porter = False\n porter2 = True\n lanca = False\n lanca2 = True\n data1 = False\n self.ucm=ucm\n if self.ucm is None and not load_ucm:\n nlp = NLP(datareader, stopwords=stopwords, norm=norm, work=work, split=split, date=date, skip_words=skip_words,\n porter=porter, porter2=porter2, lanca=lanca, lanca2=lanca2)\n self.ucm = nlp.get_UCM(data1=data1)\n elif self.ucm is None and load_ucm:\n self.load_ucm('ucm_nlp.npz')\n if save_ucm:\n self.save_ucm('ucm_nlp.npz')\n self.m_uc = pre.bm25_row(self.ucm.copy()).tocsr()\n self.m_cu = pre.bm25_row(self.ucm.copy()).T.tocsr()\n self.urm = urm\n self.binary = binary\n self.verbose = verbose\n self.verbose_ev = verbose_evaluation\n self.dr = datareader\n self.mode = mode\n if mode == 'offline':\n self.ev = Evaluator(self.dr)\n \n def model(self, alpha=1, k=200, shrink=0, threshold=0, target_items=None):\n if target_items is None: target_items=self.dr.get_test_pids() # work with s*urm\n self.alpha = alpha\n self.k = k\n self.shrink, self.threshold = shrink, threshold\n self.s = ss.p3alpha_similarity(self.m_uc, self.m_cu,\n k=k, shrink=shrink, alpha=alpha, threshold=threshold,\n verbose=self.verbose, target_items=target_items)\n \n def recommend(self, target_pids=None, eurm_k=750):\n #if target_pids is None it calculate the whole eurm\n self.eurm = ss.dot_product(self.s, self.urm, k=eurm_k, target_items=target_pids, verbose=self.verbose)\n # TODO: here we can try some postprocessing on eurm if complete (like normalize for column)\n \n #### METHODS FOR OFFLINE MODE ####\n def fast_recommend(self, target_pids=None, eurm_k=750):\n assert(self.mode=='offline')\n if target_pids is None: target_pids=self.dr.get_test_pids()\n self.recommend(target_pids=target_pids, eurm_k=eurm_k)\n\n def fast_evaluate_eurm(self, target_pids=None):\n assert(self.mode=='offline')\n res = self.ev.fast_evaluate_eurm(self.eurm, target_pids=target_pids, verbose=self.verbose_ev)\n return res\n\n def evaluate_eurm(self, target_pids):\n assert(self.mode=='offline')\n eurm = sps.csr_matrix(self.eurm[target_pids])\n eurm = post.eurm_remove_seed(eurm, self.dr)\n rec_list = post.eurm_to_recommendation_list(eurm)\n res = self.ev.evaluate(rec_list, str(self) , verbose=self.verbose_ev, return_result='all')\n return res\n\n #### UTILITY METHODS ####\n \n def clear_similarity(self): del self.s\n\n def clear_eurm(self): del self.eurm\n\n def save_similarity(self, name_file, compressed=False):\n sps.save_npz(name_file, self.s, compressed)\n\n def save_small_eurm(self, name_file, target_pids, compressed=True):\n eurm = sps.csr_matrix(self.eurm[target_pids])\n sps.save_npz(name_file, eurm, compressed)\n \n def save_ucm(self, name_file, compressed=False):\n sps.save_npz(name_file, self.ucm.tocsr(), compressed)\n \n def load_ucm(self, name_file):\n self.ucm = sps.load_npz(name_file).tocsr()\n\n #### OVERRIDE METHODS ####\n\n def __str__(self):\n name = ('NLP_BM25: alpha=%.3f, beta=%.3f, k=%d, shrink=%d, threshold=%.5f, binary=%s, rp3mode=%d' \n % (self.alpha, self.beta, self.k , self.shrink, self.threshold, str(self.binary), self.rp3_mode))\n return name\n \n #### TUNING METHODS ####\n\n",
"from recommenders.similarity.dot_product import dot_product\nfrom recommenders.similarity.s_plus import tversky_similarity\nfrom tqdm import tqdm\nfrom scipy import sparse\nfrom utils.definitions import *\nfrom utils.post_processing import *\nimport numpy as np\nimport pandas as pd\nfrom utils.datareader import Datareader\nfrom utils.evaluator import Evaluator\nfrom utils.pre_processing import *\n\n\nclass NLPStrict(object):\n\n def __init__(self, datareader):\n self.datareader = datareader\n self.title_to_idx = dict()\n\n train_playlists_df = datareader.get_df_train_playlists()\n test_playlists_df = datareader.get_df_test_playlists()\n concat_df = pd.concat([train_playlists_df, test_playlists_df])\n concat_df = concat_df.fillna('')\n\n if datareader.offline():\n concat_df = concat_df.sort_values(['pid'], ascending=True)\n\n self.playlists = concat_df['pid'].as_matrix()\n self.playlist_titles = concat_df['name'].as_matrix()\n self.playlist_titles = [(str(x).lower()).encode('unicode-escape').decode('ASCII') for x in self.playlist_titles]\n\n counter = 0\n\n for title in self.playlist_titles:\n if title not in self.title_to_idx.keys() and title != '':\n self.title_to_idx[title] = counter\n counter += 1\n\n def get_UCM(self):\n \"\"\"\n Build a UCM (playlists, titles) with lowercase titles of playlists and emojis.\n No tokenization or stemming is applied.\n :return: ucm: the user content matrix\n \"\"\"\n rows = []\n cols = []\n data = []\n\n print(max(self.playlists))\n\n for i in tqdm(range(len(self.playlist_titles)), desc='Building UCM'):\n t = self.playlist_titles[i]\n p = self.playlists[i]\n\n if t != '':\n rows.append(p)\n cols.append(self.title_to_idx[t])\n data.append(1)\n\n ucm = sparse.csr_matrix((data, (rows, cols)), shape=(max(self.playlists) + 1,\n len(list(self.title_to_idx.keys()))))\n\n return ucm\n\n\nif __name__ == '__main__':\n\n datareader = Datareader(mode='offline', only_load=True)\n evaluator = Evaluator(datareader)\n\n nlp_lele = sparse.load_npz(ROOT_DIR + '/data/ensemble/nlp_eurm_offline_bm25.npz')\n nlp_strict = sparse.load_npz(ROOT_DIR + '/data/eurm_nlp_strict.npz')\n top_pop = datareader.get_eurm_top_pop()\n\n top_pop = norm_l1_row(top_pop)\n nlp_lele = norm_l1_row(nlp_lele)\n nlp_strict = norm_l1_row(nlp_strict)\n\n nlp_fusion = (nlp_lele * 0.6) + (nlp_strict * 0.4)\n sparse.save_npz(ROOT_DIR + '/data/eurm_nlp_fusion_offline.npz', nlp_fusion)\n evaluator.evaluate(eurm_to_recommendation_list(nlp_fusion, datareader=datareader),\n name='nlp_fusion_no_toppop')\n\n for a in [0.50, 0.55, 0.60, 0.65, 0.70]:\n for b in [0.10, 0.15]:\n nlp_fusion = (nlp_lele * a) + (nlp_strict * (1.0 - a)) + (b * top_pop)\n\n evaluator.evaluate(eurm_to_recommendation_list(nlp_fusion, datareader=datareader),\n name='nlp_fusion_bm25_'+str(a)+'_'+str(b), do_plot=False)\n",
"from utils.datareader import Datareader\nfrom utils.evaluator import Evaluator\nfrom utils.submitter import Submitter\nfrom utils.print_tuning import TunePrint\nimport utils.post_processing as post\nimport utils.pre_processing as pre\nimport recommenders.similarity.s_plus as ss\nimport recommenders.similarity.p3alpha_rp3beta as p3r3\nimport numpy as np\nimport scipy.sparse as sps\nimport sklearn.preprocessing as sk\n\n\nclass CB_AL_AR_BM25:\n def __init__(self, icm, urm, binary=False, verbose=True, mode='offline', datareader=None, verbose_evaluation=True):\n assert(mode in ('offline', 'online'))\n if binary: urm.data=np.ones(urm.data.shape[0])\n self.urm = urm\n #self.m_ic = pre.bm25_col(pre.bm25_row(icm.copy())).tocsr() #it work good at track level but not at artist level\n #self.m_ci = pre.bm25_col(pre.bm25_row(icm.T.copy())).tocsr() #it work good at track level but not at artist level\n self.m_ic = pre.bm25_col(icm.copy()).tocsr()\n self.m_ci = pre.bm25_col(icm.T.copy()).tocsr()\n self.binary = binary\n self.verbose = verbose\n self.verbose_ev = verbose_evaluation\n self.dr = datareader\n self.mode = mode\n if mode == 'offline':\n self.ev = Evaluator(self.dr)\n \n def model(self, alpha=1, k=200, shrink=0, threshold=0, target_items=None):\n #if target_items is None it calculate the whole similarity\n self.alpha = alpha\n self.k = k\n self.shrink, self.threshold = shrink, threshold\n self.s = ss.p3alpha_similarity(self.m_ic, self.m_ci,\n k=k, shrink=shrink, alpha=alpha, threshold=threshold,\n verbose=self.verbose, target_items=target_items)\n \n def recommend(self, target_pids=None, eurm_k=750):\n #if target_pids is None it calculate the whole eurm\n self.eurm = ss.dot_product(self.urm, self.s.T, k=eurm_k, target_items=target_pids, verbose=self.verbose) ##or s.T????\n #self.eurm = sk.normalize(X=self.eurm, norm='l2',axis=0,copy=False, return_norm=False)\n # TODO: here we can try some postprocessing on eurm if complete (like normalize for column)\n \n #### METHODS FOR OFFLINE MODE ####\n def fast_recommend(self, target_pids=None, eurm_k=750):\n assert(self.mode=='offline')\n if target_pids is None: target_pids=self.dr.get_test_pids()\n self.recommend(target_pids=target_pids, eurm_k=eurm_k)\n\n def fast_evaluate_eurm(self, target_pids=None):\n assert(self.mode=='offline')\n res = self.ev.fast_evaluate_eurm(self.eurm, target_pids=target_pids, verbose=self.verbose_ev)\n return res\n\n def evaluate_eurm(self, target_pids):\n assert(self.mode=='offline')\n eurm = sps.csr_matrix(self.eurm[target_pids])\n eurm = post.eurm_remove_seed(eurm, self.dr)\n rec_list = post.eurm_to_recommendation_list(eurm)\n res = self.ev.evaluate(rec_list, str(self) , verbose=self.verbose_ev, return_result='all')\n return res\n\n #### UTILITY METHODS ####\n \n def clear_similarity(self): del self.s\n\n def clear_eurm(self): del self.eurm\n\n def save_similarity(self, name_file, compressed=False):\n sps.save_npz(name_file, self.s, compressed)\n\n def save_small_eurm(self, name_file, target_pids, compressed=True):\n eurm = sps.csr_matrix(self.eurm[target_pids])\n sps.save_npz(name_file, eurm, compressed)\n \n #### OVERRIDE METHODS ####\n\n def __str__(self):\n name = ('CB_AL_AR_BM25: alpha=%.3f, k=%d, shrink=%d, threshold=%.5f, binary=%s'\n % (self.alpha, self.k , self.shrink, self.threshold, str(self.binary)))\n return name\n \n #### TUNING METHODS ####\n def tune_alpha(self, range_alpha=np.arange(0.5,1.5,0.1), k=100,\n shrink=0, threshold=0, verbose_tune=False,\n filename='tuning_bm25_alpha', overwrite=False, save_mean = True, save_full=True\n ):\n tp = TunePrint(filename=filename, full=save_full, mean=save_mean, overwrite=overwrite)\n self.model(alpha=1, k=k, shrink=shrink, threshold=threshold) #exploit this trick to generate fastest model\n save_data = self.s.data\n for alpha in range_alpha:\n self.s.data = save_data\n self.s.data = np.power(self.s.data, alpha)\n self.alpha = alpha\n self.fast_recommend()\n mean, df_all_values = self.fast_evaluate_eurm()\n self.clear_eurm()\n s_mean = 'P = %1.4f, NDCG = %1.4f, CLICK = %1.4f'%(mean[0],mean[1],mean[2])\n if verbose_tune: print(str(self)+'\\n'+s_mean)\n if save_mean: tp.print_mean_values(str(self), mean)\n if save_full: tp.print_full_values(description=str(self),dict_val={'alpha':alpha}, dataframe=df_all_values)\n tp.make_pdf_full()\n\n\n def tune_k(self, range_k=np.arange(25,300,25), alpha=1,\n shrink=0, threshold=0, verbose_tune=False,\n filename='tuning_bm25_k', overwrite=False, save_mean = True, save_full=True\n ):\n tp = TunePrint(filename=filename, full=save_full, mean=save_mean, overwrite=overwrite, verbose=verbose_tune)\n for k in range_k:\n self.model(alpha=alpha, k=k, shrink=shrink, threshold=threshold)\n self.fast_recommend()\n self.clear_similarity()\n mean, df_all_values = self.fast_evaluate_eurm()\n self.clear_eurm()\n s_mean = 'P = %1.4f, NDCG = %1.4f, CLICK = %1.4f'%(mean[0],mean[1],mean[2])\n if verbose_tune: print(str(self)+'\\n'+s_mean)\n # save values\n if save_mean: tp.print_mean_values(str(self), mean)\n if save_full: tp.print_full_values(description=str(self),dict_val={'k':k}, dataframe=df_all_values)\n tp.make_pdf_full()\n \n def tune_shrink(self, range_shrink=np.arange(25,300,25), k=200, alpha=1,\n threshold=0, verbose_tune=False,\n filename='tuning_bm25_shrink', overwrite=False, save_mean = True, save_full=True\n ):\n tp = TunePrint(filename=filename, full=save_full, mean=save_mean, overwrite=overwrite)\n for shrink in range_shrink:\n self.model(alpha=alpha, k=k, shrink=shrink, threshold=threshold)\n self.fast_recommend()\n self.clear_similarity()\n mean, df_all_values = self.fast_evaluate_eurm()\n self.clear_eurm()\n s_mean = 'P = %1.4f, NDCG = %1.4f, CLICK = %1.4f'%(mean[0],mean[1],mean[2])\n if verbose_tune: print(str(self)+'\\n'+s_mean)\n # save values\n if save_mean: tp.print_mean_values(str(self), mean)\n if save_full: tp.print_full_values(description=str(self),dict_val={'shrink':shrink}, dataframe=df_all_values)\n tp.make_pdf_full()\n",
"from recommenders.similarity.s_plus import dot_product, tversky_similarity\nfrom utils.post_processing import *\nfrom utils.pre_processing import *\nfrom scipy import sparse\nfrom recommenders.nlp_strict import NLPStrict\nfrom recommenders.nlp import NLP\nfrom utils.sparse import *\nimport sys\n\narg = sys.argv[1:]\nmode = arg[0]\nif len(arg) > 1:\n topk = int(arg[1])\nelse:\n topk = 750\n\n# INITIALIZATION\ndr = Datareader(mode=mode, verbose=False, only_load=True)\ntest_pids = dr.get_test_pids()\nurm = dr.get_urm()\nurm.data = np.ones(len(urm.data))\n\n# PARAMS\nnorm = True\nwork = True\nsplit = True\nskip_words = True\ndate = False\nporter = False\nporter2 = True\nlanca = False\nlanca2 = True\ndata1 = False\n\n# NLP STRICT\nnlp_strict = NLPStrict(dr)\nucm_strict = nlp_strict.get_UCM().astype(np.float64)\ntop_pop = dr.get_eurm_top_pop()\n\n# Do not train on challenge set\nucm_strict_T = ucm_strict.copy()\ninplace_set_rows_zero(ucm_strict_T, test_pids)\nucm_strict_T = ucm_strict_T.T\n\nsim = tversky_similarity(ucm_strict, ucm_strict_T, k=450, alpha=0.2, beta=0.5,\n shrink=150, target_items=test_pids)\n\n# Compute eurm\neurm = dot_product(sim, urm, k=topk)\neurm = eurm.tocsr()\neurm = eurm[test_pids, :]\n\n# NLP TOKENS\nnlp = NLP(dr)\n\nucm = nlp.get_UCM(data1=data1).astype(np.float64)\n\n# Do not train on challenge set\nucm_T = ucm.copy()\ninplace_set_rows_zero(ucm_T, test_pids)\nucm_T = ucm_T.T\n\nsim_lele = tversky_similarity(ucm, ucm_T, k=200, alpha=0.9, beta=1.0,\n shrink=0, target_items=test_pids)\n\n# Compute eurm\neurm_lele = dot_product(sim_lele, urm, k=topk)\neurm_lele = eurm_lele.tocsr()\neurm_lele = eurm_lele[test_pids, :]\n\n# NLP FUSION\na = 0.2\neurm_l1 = norm_l1_row(eurm)\neurm_lele_l1 = norm_l1_row(eurm_lele)\nnlp_fusion = a * eurm_l1 + (1.0 - a) * eurm_lele_l1\n\nif dr.online():\n sparse.save_npz(ROOT_DIR + '/recommenders/script/main/online_npz/nlp_fusion_online.npz', nlp_fusion)\nelse:\n sparse.save_npz(ROOT_DIR + '/recommenders/script/main/offline_npz/nlp_fusion_offline.npz', nlp_fusion)\n\n"
] | [
[
"scipy.sparse.save_npz"
],
[
"scipy.sparse.save_npz"
],
[
"scipy.sparse.load_npz",
"scipy.sparse.save_npz",
"scipy.sparse.csr_matrix",
"numpy.ones"
],
[
"scipy.sparse.save_npz",
"scipy.sparse.load_npz",
"pandas.concat"
],
[
"numpy.power",
"numpy.arange",
"scipy.sparse.csr_matrix",
"numpy.ones",
"scipy.sparse.save_npz"
],
[
"scipy.sparse.save_npz"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Valentinkvn/Udacity-Full-Autonomous-Vehicle-Project | [
"b1313345a09f84c122a91c1145230fe69da0d20f"
] | [
"ros/src/styx/bridge.py"
] | [
"\nimport rospy\n\nimport tf\nfrom geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped\nfrom dbw_mkz_msgs.msg import SteeringReport, ThrottleCmd, BrakeCmd, SteeringCmd\nfrom std_msgs.msg import Float32 as Float\nfrom std_msgs.msg import Bool\nfrom sensor_msgs.msg import PointCloud2\nfrom sensor_msgs.msg import Image\nimport sensor_msgs.point_cloud2 as pcl2\nfrom std_msgs.msg import Header\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom styx_msgs.msg import TrafficLight, TrafficLightArray, Lane\nimport numpy as np\nfrom PIL import Image as PIL_Image\nfrom io import BytesIO\nimport base64\n\nimport math\n\nTYPE = {\n 'bool': Bool,\n 'float': Float,\n 'pose': PoseStamped,\n 'pcl': PointCloud2,\n 'twist': TwistStamped,\n 'steer': SteeringReport,\n 'trafficlights': TrafficLightArray,\n 'steer_cmd': SteeringCmd,\n 'brake_cmd': BrakeCmd,\n 'throttle_cmd': ThrottleCmd,\n 'path_draw': Lane,\n 'image':Image\n}\n\nNUM_IMAGES_TO_SKIP = 2\n\nclass Bridge(object):\n def __init__(self, conf, server):\n rospy.init_node('styx_server')\n self.server = server\n self.vel = 0.\n self.yaw = None\n self.angular_vel = 0.\n self.bridge = CvBridge()\n self.img_count = 0\n\n self.callbacks = {\n '/vehicle/steering_cmd': self.callback_steering,\n '/vehicle/throttle_cmd': self.callback_throttle,\n '/vehicle/brake_cmd': self.callback_brake,\n '/final_waypoints': self.callback_path\n }\n\n self.subscribers = [rospy.Subscriber(e.topic, TYPE[e.type], self.callbacks[e.topic])\n for e in conf.subscribers]\n\n self.publishers = {e.name: rospy.Publisher(e.topic, TYPE[e.type], queue_size=1)\n for e in conf.publishers}\n\n def create_light(self, x, y, z, yaw, state):\n light = TrafficLight()\n\n light.header = Header()\n light.header.stamp = rospy.Time.now()\n light.header.frame_id = '/world'\n\n light.pose = self.create_pose(x, y, z, yaw)\n light.state = state\n\n return light\n\n def create_pose(self, x, y, z, yaw=0.):\n pose = PoseStamped()\n\n pose.header = Header()\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = '/world'\n\n pose.pose.position.x = x\n pose.pose.position.y = y\n pose.pose.position.z = z\n\n q = tf.transformations.quaternion_from_euler(0., 0., math.pi * yaw/180.)\n pose.pose.orientation = Quaternion(*q)\n\n return pose\n\n def create_float(self, val):\n fl = Float()\n fl.data = val\n return fl\n\n def create_twist(self, velocity, angular):\n tw = TwistStamped()\n tw.twist.linear.x = velocity\n tw.twist.angular.z = angular\n return tw\n\n def create_steer(self, val):\n st = SteeringReport()\n st.steering_wheel_angle_cmd = val * math.pi/180.\n st.enabled = True\n st.speed = self.vel\n return st\n\n def calc_angular(self, yaw):\n angular_vel = 0.\n if self.yaw is not None:\n angular_vel = (yaw - self.yaw)/(rospy.get_time() - self.prev_time)\n self.yaw = yaw\n self.prev_time = rospy.get_time()\n return angular_vel\n\n def create_point_cloud_message(self, pts):\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n cloud_message = pcl2.create_cloud_xyz32(header, pts)\n return cloud_message\n\n def broadcast_transform(self, name, position, orientation):\n br = tf.TransformBroadcaster()\n br.sendTransform(position,\n orientation,\n rospy.Time.now(),\n name,\n \"world\")\n\n def publish_odometry(self, data):\n pose = self.create_pose(data['x'], data['y'], data['z'], data['yaw'])\n\n position = (data['x'], data['y'], data['z'])\n orientation = tf.transformations.quaternion_from_euler(0, 0, math.pi * data['yaw']/180.)\n self.broadcast_transform(\"base_link\", position, orientation)\n\n self.publishers['current_pose'].publish(pose)\n self.vel = data['velocity']* 0.44704\n self.angular = self.calc_angular(data['yaw'] * math.pi/180.)\n self.publishers['current_velocity'].publish(self.create_twist(self.vel, self.angular))\n\n\n def publish_controls(self, data):\n steering, throttle, brake = data['steering_angle'], data['throttle'], data['brake']\n self.publishers['steering_report'].publish(self.create_steer(steering))\n self.publishers['throttle_report'].publish(self.create_float(throttle))\n self.publishers['brake_report'].publish(self.create_float(brake))\n\n def publish_obstacles(self, data):\n for obs in data['obstacles']:\n pose = self.create_pose(obs[0], obs[1], obs[2])\n self.publishers['obstacle'].publish(pose)\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n cloud = pcl2.create_cloud_xyz32(header, data['obstacles'])\n self.publishers['obstacle_points'].publish(cloud)\n\n def publish_lidar(self, data):\n self.publishers['lidar'].publish(self.create_point_cloud_message(zip(data['lidar_x'], data['lidar_y'], data['lidar_z'])))\n\n def publish_traffic(self, data):\n x, y, z = data['light_pos_x'], data['light_pos_y'], data['light_pos_z'],\n yaw = [math.atan2(dy, dx) for dx, dy in zip(data['light_pos_dx'], data['light_pos_dy'])]\n status = data['light_state']\n\n lights = TrafficLightArray()\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n lights.lights = [self.create_light(*e) for e in zip(x, y, z, yaw, status)]\n self.publishers['trafficlights'].publish(lights)\n\n def publish_dbw_status(self, data):\n self.publishers['dbw_status'].publish(Bool(data))\n\n def publish_camera(self, data):\n self.img_count += 1\n if self.img_count >= NUM_IMAGES_TO_SKIP:\n # rospy.logwarn(\"Publish camera data\")\n imgString = data[\"image\"]\n image = PIL_Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n image_message = self.bridge.cv2_to_imgmsg(image_array, encoding=\"rgb8\")\n self.publishers['image'].publish(image_message)\n self.img_count = 0\n\n def callback_steering(self, data):\n self.server('steer', data={'steering_angle': str(data.steering_wheel_angle_cmd)})\n\n def callback_throttle(self, data):\n self.server('throttle', data={'throttle': str(data.pedal_cmd)})\n\n def callback_brake(self, data):\n self.server('brake', data={'brake': str(data.pedal_cmd)})\n\n def callback_path(self, data):\n x_values = []\n y_values = []\n z_values = []\n for waypoint in data.waypoints:\n x = waypoint.pose.pose.position.x\n y = waypoint.pose.pose.position.y\n z = waypoint.pose.pose.position.z+0.5\n x_values.append(x)\n y_values.append(y)\n z_values.append(z)\n\n self.server('drawline', data={'next_x': x_values, 'next_y': y_values, 'next_z': z_values})\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HeegyuKim/face_recognition | [
"d96d2c94225e49d3dd8f2cae4444d35d5c88d13b"
] | [
"mfr2.py"
] | [
"import os\nimport shutil\n\nimport os\nfrom glob import glob\nimport pandas as pd\nimport random\nfrom collections import defaultdict\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\n\ndef get_all_images(dir):\n types = [\"jpeg\", \"jpg\", \"png\"]\n files = []\n for t in types:\n path = os.path.join(dir, \"**\", \"*.\" + t)\n files.extend(glob(path))\n \n return files\n\n\ndef casia(dir):\n files = get_all_images(dir)\n users = defaultdict(set)\n rows = []\n\n for file in files:\n user = file.split(\"/\")[-2]\n users[user].add(file)\n rows.append({\n \"image\": file,\n \"id\": user\n })\n\n df = pd.DataFrame(rows)\n positives = []\n negatives = []\n\n for user, files in users.items():\n if len(files) <= 1:\n continue\n \n samples = random.sample(files, 2)\n positives.append({\n \"image1\": samples[0],\n \"image2\": samples[1],\n \"id1\": user,\n \"id2\": user,\n \"label\": 1\n })\n \n user_ids = list(users.keys())\n for i in range(0, len(user_ids), 2):\n if i == len(user_ids) - 1:\n continue\n\n id1, id2 = user_ids[i], user_ids[i + 1]\n files1, files2 = users[id1], users[id2]\n\n if len(files1) < 2 or len(files2) < 2:\n break\n \n samples1, samples2 = random.sample(files1, 2), random.sample(files2, 2)\n for j in range(2):\n negatives.append({\n \"image1\": samples1[j],\n \"image2\": samples2[j],\n \"id1\": id1,\n \"id2\": id2,\n \"label\": -1\n })\n \n test_set = pd.DataFrame(positives + negatives)\n return df, test_set\n\n# trainset, testset = casia(\"train/\")\n# trainset.to_csv(\"train.csv\", index=False)\n# testset.to_csv(\"train_eval.csv\", index=False)\n\nfor file in glob(\"dataset/validation/**/*.png\", recursive=True):\n tokens = file.split(\"/\")\n filename = tokens[-1]\n id = tokens[-3]\n\n dst = f\"mfeval/{id}/{filename}\"\n os.makedirs(os.path.abspath(os.path.dirname(dst)), exist_ok=True)\n shutil.copyfile(file, dst)"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cmarlin/agents | [
"1729e06f42237b34dab8bd9d8c01980c2d2b391c"
] | [
"tf_agents/experimental/examples/sac/haarnoja18/sac_train_eval.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Train and Eval SAC.\n\nAll hyperparameters come from the SAC paper\nhttps://arxiv.org/pdf/1812.05905.pdf\n\"\"\"\nimport functools\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport gin\nimport reverb\nimport tensorflow as tf\n\nfrom tf_agents.agents.sac import sac_agent\nfrom tf_agents.agents.sac import tanh_normal_projection_network\nfrom tf_agents.environments import suite_mujoco\nfrom tf_agents.keras_layers import inner_reshape\nfrom tf_agents.metrics import py_metrics\nfrom tf_agents.networks import nest_map\nfrom tf_agents.networks import sequential\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.policies import py_tf_eager_policy\nfrom tf_agents.policies import random_py_policy\nfrom tf_agents.replay_buffers import reverb_replay_buffer\nfrom tf_agents.replay_buffers import reverb_utils\nfrom tf_agents.train import actor\nfrom tf_agents.train import learner\nfrom tf_agents.train import triggers\nfrom tf_agents.train.utils import spec_utils\nfrom tf_agents.train.utils import train_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.')\nflags.DEFINE_integer(\n 'reverb_port', None,\n 'Port for reverb server, if None, use a randomly chosen unused port.')\nflags.DEFINE_integer('num_iterations', 3000000,\n 'Total number train/eval iterations to perform.')\nflags.DEFINE_integer(\n 'eval_interval', 10000,\n 'Number of train steps between evaluations. Set to 0 to skip.')\nflags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')\nflags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')\n\n\ndense = functools.partial(\n tf.keras.layers.Dense,\n activation=tf.keras.activations.relu,\n kernel_initializer='glorot_uniform')\n\n\ndef create_fc_network(layer_units):\n return sequential.Sequential([dense(num_units) for num_units in layer_units])\n\n\ndef create_identity_layer():\n return tf.keras.layers.Lambda(lambda x: x)\n\n\ndef create_sequential_critic_network(obs_fc_layer_units,\n action_fc_layer_units,\n joint_fc_layer_units):\n \"\"\"Create a sequential critic network.\"\"\"\n # Split the inputs into observations and actions.\n def split_inputs(inputs):\n return {'observation': inputs[0], 'action': inputs[1]}\n\n # Create an observation network.\n obs_network = (create_fc_network(obs_fc_layer_units) if obs_fc_layer_units\n else create_identity_layer())\n\n # Create an action network.\n action_network = (create_fc_network(action_fc_layer_units)\n if action_fc_layer_units else create_identity_layer())\n\n # Create a joint network.\n joint_network = (create_fc_network(joint_fc_layer_units)\n if joint_fc_layer_units else create_identity_layer())\n\n # Final layer.\n value_layer = tf.keras.layers.Dense(1, kernel_initializer='glorot_uniform')\n\n return sequential.Sequential([\n tf.keras.layers.Lambda(split_inputs),\n nest_map.NestMap({\n 'observation': obs_network,\n 'action': action_network\n }),\n nest_map.NestFlatten(),\n tf.keras.layers.Concatenate(),\n joint_network,\n value_layer,\n inner_reshape.InnerReshape(current_shape=[1], new_shape=[])\n ], name='sequential_critic')\n\n\nclass _TanhNormalProjectionNetworkWrapper(\n tanh_normal_projection_network.TanhNormalProjectionNetwork):\n \"\"\"Wrapper to pass predefined `outer_rank` to underlying projection net.\"\"\"\n\n def __init__(self, sample_spec, predefined_outer_rank=1):\n super(_TanhNormalProjectionNetworkWrapper, self).__init__(sample_spec)\n self.predefined_outer_rank = predefined_outer_rank\n\n def call(self, inputs, network_state=(), **kwargs):\n kwargs['outer_rank'] = self.predefined_outer_rank\n if 'step_type' in kwargs:\n del kwargs['step_type']\n return super(_TanhNormalProjectionNetworkWrapper,\n self).call(inputs, **kwargs)\n\n\ndef create_sequential_actor_network(actor_fc_layers, action_tensor_spec):\n \"\"\"Create a sequential actor network.\"\"\"\n def tile_as_nest(non_nested_output):\n return tf.nest.map_structure(lambda _: non_nested_output,\n action_tensor_spec)\n\n return sequential.Sequential(\n [dense(num_units) for num_units in actor_fc_layers] +\n [tf.keras.layers.Lambda(tile_as_nest)] + [\n nest_map.NestMap(\n tf.nest.map_structure(_TanhNormalProjectionNetworkWrapper,\n action_tensor_spec))\n ])\n\n\[email protected]\ndef train_eval(\n root_dir,\n env_name='HalfCheetah-v2',\n # Training params\n initial_collect_steps=10000,\n num_iterations=3200000,\n actor_fc_layers=(256, 256),\n critic_obs_fc_layers=None,\n critic_action_fc_layers=None,\n critic_joint_fc_layers=(256, 256),\n # Agent params\n batch_size=256,\n actor_learning_rate=3e-4,\n critic_learning_rate=3e-4,\n alpha_learning_rate=3e-4,\n gamma=0.99,\n target_update_tau=0.005,\n target_update_period=1,\n reward_scale_factor=0.1,\n # Replay params\n reverb_port=None,\n replay_capacity=1000000,\n # Others\n policy_save_interval=10000,\n replay_buffer_save_interval=100000,\n eval_interval=10000,\n eval_episodes=30,\n debug_summaries=False,\n summarize_grads_and_vars=False):\n \"\"\"Trains and evaluates SAC.\"\"\"\n logging.info('Training SAC on: %s', env_name)\n collect_env = suite_mujoco.load(env_name)\n eval_env = suite_mujoco.load(env_name)\n\n _, action_tensor_spec, time_step_tensor_spec = (\n spec_utils.get_tensor_specs(collect_env))\n\n train_step = train_utils.create_train_step()\n\n actor_net = create_sequential_actor_network(\n actor_fc_layers=actor_fc_layers, action_tensor_spec=action_tensor_spec)\n\n critic_net = create_sequential_critic_network(\n obs_fc_layer_units=critic_obs_fc_layers,\n action_fc_layer_units=critic_action_fc_layers,\n joint_fc_layer_units=critic_joint_fc_layers)\n\n agent = sac_agent.SacAgent(\n time_step_tensor_spec,\n action_tensor_spec,\n actor_network=actor_net,\n critic_network=critic_net,\n actor_optimizer=tf.keras.optimizers.Adam(\n learning_rate=actor_learning_rate),\n critic_optimizer=tf.keras.optimizers.Adam(\n learning_rate=critic_learning_rate),\n alpha_optimizer=tf.keras.optimizers.Adam(\n learning_rate=alpha_learning_rate),\n target_update_tau=target_update_tau,\n target_update_period=target_update_period,\n td_errors_loss_fn=tf.math.squared_difference,\n gamma=gamma,\n reward_scale_factor=reward_scale_factor,\n gradient_clipping=None,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step)\n agent.initialize()\n\n table_name = 'uniform_table'\n table = reverb.Table(\n table_name,\n max_size=replay_capacity,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1))\n\n reverb_checkpoint_dir = os.path.join(root_dir, learner.TRAIN_DIR,\n learner.REPLAY_BUFFER_CHECKPOINT_DIR)\n reverb_checkpointer = reverb.platform.checkpointers_lib.DefaultCheckpointer(\n path=reverb_checkpoint_dir)\n reverb_server = reverb.Server([table],\n port=reverb_port,\n checkpointer=reverb_checkpointer)\n reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(\n agent.collect_data_spec,\n sequence_length=2,\n table_name=table_name,\n local_server=reverb_server)\n rb_observer = reverb_utils.ReverbAddTrajectoryObserver(\n reverb_replay.py_client,\n table_name,\n sequence_length=2,\n stride_length=1)\n\n dataset = reverb_replay.as_dataset(\n sample_batch_size=batch_size, num_steps=2).prefetch(50)\n experience_dataset_fn = lambda: dataset\n\n saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)\n env_step_metric = py_metrics.EnvironmentSteps()\n learning_triggers = [\n triggers.PolicySavedModelTrigger(\n saved_model_dir,\n agent,\n train_step,\n interval=policy_save_interval,\n metadata_metrics={triggers.ENV_STEP_METADATA_KEY: env_step_metric}),\n triggers.ReverbCheckpointTrigger(\n train_step,\n interval=replay_buffer_save_interval,\n reverb_client=reverb_replay.py_client),\n # TODO(b/165023684): Add SIGTERM handler to checkpoint before preemption.\n triggers.StepPerSecondLogTrigger(train_step, interval=1000),\n ]\n\n agent_learner = learner.Learner(\n root_dir,\n train_step,\n agent,\n experience_dataset_fn,\n triggers=learning_triggers)\n\n random_policy = random_py_policy.RandomPyPolicy(\n collect_env.time_step_spec(), collect_env.action_spec())\n initial_collect_actor = actor.Actor(\n collect_env,\n random_policy,\n train_step,\n steps_per_run=initial_collect_steps,\n observers=[rb_observer])\n logging.info('Doing initial collect.')\n initial_collect_actor.run()\n\n tf_collect_policy = agent.collect_policy\n collect_policy = py_tf_eager_policy.PyTFEagerPolicy(\n tf_collect_policy, use_tf_function=True)\n\n collect_actor = actor.Actor(\n collect_env,\n collect_policy,\n train_step,\n steps_per_run=1,\n metrics=actor.collect_metrics(10),\n summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),\n observers=[rb_observer, env_step_metric])\n\n tf_greedy_policy = greedy_policy.GreedyPolicy(agent.policy)\n eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(\n tf_greedy_policy, use_tf_function=True)\n\n eval_actor = actor.Actor(\n eval_env,\n eval_greedy_policy,\n train_step,\n episodes_per_run=eval_episodes,\n metrics=actor.eval_metrics(eval_episodes),\n summary_dir=os.path.join(root_dir, 'eval'),\n )\n\n if eval_interval:\n logging.info('Evaluating.')\n eval_actor.run_and_log()\n\n logging.info('Training.')\n for _ in range(num_iterations):\n collect_actor.run()\n agent_learner.run(iterations=1)\n\n if eval_interval and agent_learner.train_step_numpy % eval_interval == 0:\n logging.info('Evaluating.')\n eval_actor.run_and_log()\n\n rb_observer.close()\n reverb_server.stop()\n\n\ndef main(_):\n logging.set_verbosity(logging.INFO)\n tf.compat.v1.enable_v2_behavior()\n\n gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)\n\n train_eval(\n FLAGS.root_dir,\n num_iterations=FLAGS.num_iterations,\n reverb_port=FLAGS.reverb_port,\n eval_interval=FLAGS.eval_interval)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('root_dir')\n app.run(main)\n"
] | [
[
"tensorflow.keras.layers.Concatenate",
"tensorflow.compat.v1.enable_v2_behavior",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.Adam",
"tensorflow.nest.map_structure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jdavidagudelo/tensorflow-models | [
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979"
] | [
"research/compression/entropy_coder/core/entropy_coder_train.py",
"research/object_detection/core/anchor_generator.py",
"research/morph_net/framework/concat_and_slice_regularizers_test.py",
"research/im2txt/im2txt/ops/inputs.py",
"research/maskgan/losses/losses.py",
"research/minigo/preprocessing_test.py",
"research/object_detection/utils/np_box_mask_list_ops_test.py",
"research/resnet/resnet_main.py",
"research/tcn/train.py",
"research/learning_unsupervised_learning/summary_utils.py",
"research/object_detection/predictors/heads/keras_mask_head_test.py",
"research/object_detection/dataset_tools/oid_hierarchical_labels_expansion_test.py",
"research/brain_coder/single_task/results_lib_test.py",
"official/boosted_trees/train_higgs_test.py",
"samples/outreach/blogs/blog_estimators_dataset.py",
"research/tcn/estimators/svtcn_loss_test.py",
"research/vid2depth/ops/icp_op.py",
"research/autoaugment/augmentation_transforms.py",
"research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py",
"research/tcn/utils/util.py",
"research/slim/nets/mobilenet_v1_test.py",
"research/astronet/astronet/ops/metrics.py",
"research/skip_thoughts/skip_thoughts/train.py",
"research/astronet/astronet/util/example_util_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Train an entropy coder model.\"\"\"\n\nimport time\n\nimport tensorflow as tf\n\nfrom research.compression.entropy_coder.core import code_loader\nfrom research.compression.entropy_coder.core import config_helper\n\n# pylint: enable=unused-import\nfrom research.compression.entropy_coder.model import model_factory\n\nFLAGS = tf.app.flags.FLAGS\n\n# Hardware resources configuration.\ntf.app.flags.DEFINE_string('master', '',\n \"\"\"Name of the TensorFlow master to use.\"\"\")\ntf.app.flags.DEFINE_string('train_dir', None,\n \"\"\"Directory where to write event logs.\"\"\")\ntf.app.flags.DEFINE_integer('task', None,\n \"\"\"Task id of the replica running the training.\"\"\")\ntf.app.flags.DEFINE_integer('ps_tasks', 0, \"\"\"Number of tasks in the ps job.\n If 0 no ps job is used.\"\"\")\n\n# Model selection and configuration.\ntf.app.flags.DEFINE_string('model', None, \"\"\"Underlying encoder model.\"\"\")\ntf.app.flags.DEFINE_string('model_config', None,\n \"\"\"Model config protobuf given as text file.\"\"\")\n\n# Training data and parameters configuration.\ntf.app.flags.DEFINE_string('input_config', None,\n \"\"\"Path to the training input config file.\"\"\")\ntf.app.flags.DEFINE_string('train_config', None,\n \"\"\"Path to the training experiment config file.\"\"\")\n\n\ndef train():\n if FLAGS.train_dir is None:\n raise ValueError('Parameter train_dir must be provided')\n if FLAGS.task is None:\n raise ValueError('Parameter task must be provided')\n if FLAGS.model is None:\n raise ValueError('Parameter model must be provided')\n\n input_config_string = config_helper.GetConfigString(FLAGS.input_config)\n input_config = config_helper.InputConfig(input_config_string)\n\n # Training parameters.\n train_config_string = config_helper.GetConfigString(FLAGS.train_config)\n train_config = config_helper.TrainConfig(train_config_string)\n\n batch_size = train_config.batch_size\n initial_learning_rate = train_config.learning_rate\n decay_rate = train_config.decay_rate\n samples_per_decay = train_config.samples_per_decay\n\n # Parameters for learning-rate decay.\n # The formula is decay_rate ** floor(steps / decay_steps).\n decay_steps = samples_per_decay / batch_size\n decay_steps = max(decay_steps, 1)\n\n first_code = code_loader.ReadFirstCode(input_config.data)\n first_code_height = (\n first_code.features.feature['code_shape'].int64_list.value[0])\n first_code_width = (\n first_code.features.feature['code_shape'].int64_list.value[1])\n max_bit_depth = (\n first_code.features.feature['code_shape'].int64_list.value[2])\n print('Maximum code depth: {}'.format(max_bit_depth))\n\n with tf.Graph().as_default():\n ps_ops = [\"Variable\", \"VariableV2\", \"AutoReloadVariable\", \"VarHandleOp\"]\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,\n ps_ops=ps_ops)):\n codes = code_loader.LoadBinaryCode(\n input_config=input_config,\n batch_size=batch_size)\n if input_config.unique_code_size:\n print('Input code size: {} x {}'.format(first_code_height,\n first_code_width))\n codes.set_shape(\n [batch_size, first_code_height, first_code_width, max_bit_depth])\n else:\n codes.set_shape([batch_size, None, None, max_bit_depth])\n codes_effective_shape = tf.shape(codes)\n\n global_step = tf.contrib.framework.create_global_step()\n\n # Apply learning-rate decay.\n learning_rate = tf.train.exponential_decay(\n learning_rate=initial_learning_rate,\n global_step=global_step,\n decay_steps=decay_steps,\n decay_rate=decay_rate,\n staircase=True)\n tf.summary.scalar('Learning Rate', learning_rate)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n epsilon=1.0)\n\n # Create the entropy coder model.\n model = model_factory.GetModelRegistry().CreateModel(FLAGS.model)\n model_config_string = config_helper.GetConfigString(FLAGS.model_config)\n model.Initialize(global_step, optimizer, model_config_string)\n model.BuildGraph(codes)\n\n summary_op = tf.summary.merge_all()\n\n # Verify that the model can actually be trained.\n if model.train_op is None:\n raise ValueError('Input model {} is not trainable'.format(FLAGS.model))\n\n # We disable the summary thread run by Supervisor class by passing\n # summary_op=None. We still pass save_summaries_secs because it is used by\n # the global step counter thread.\n is_chief = (FLAGS.task == 0)\n sv = tf.train.Supervisor(logdir=FLAGS.train_dir,\n is_chief=is_chief,\n global_step=global_step,\n # saver=model.saver,\n summary_op=None,\n save_summaries_secs=120,\n save_model_secs=600,\n recovery_wait_secs=30)\n\n sess = sv.PrepareSession(FLAGS.master)\n sv.StartQueueRunners(sess)\n\n step = sess.run(global_step)\n print('Trainer initial step: {}.'.format(step))\n\n # Once everything has been setup properly, save the configs.\n if is_chief:\n config_helper.SaveConfig(FLAGS.train_dir, 'input_config.json',\n input_config_string)\n config_helper.SaveConfig(FLAGS.train_dir, 'model_config.json',\n model_config_string)\n config_helper.SaveConfig(FLAGS.train_dir, 'train_config.json',\n train_config_string)\n\n # Train the model.\n next_summary_time = time.time()\n while not sv.ShouldStop():\n feed_dict = None\n\n # Once in a while, update the summaries on the chief worker.\n if is_chief and next_summary_time < time.time():\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n sv.SummaryComputed(sess, summary_str)\n next_summary_time = time.time() + sv.save_summaries_secs\n else:\n tf_tensors = {\n 'train': model.train_op,\n 'code_length': model.average_code_length\n }\n np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)\n print(np_tensors['code_length'])\n\n sv.Stop()\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Base anchor generator.\n\nThe job of the anchor generator is to create (or load) a collection\nof bounding boxes to be used as anchors.\n\nGenerated anchors are assumed to match some convolutional grid or list of grid\nshapes. For example, we might want to generate anchors matching an 8x8\nfeature map and a 4x4 feature map. If we place 3 anchors per grid location\non the first feature map and 6 anchors per grid location on the second feature\nmap, then 3*8*8 + 6*4*4 = 288 anchors are generated in total.\n\nTo support fully convolutional settings, feature map shapes are passed\ndynamically at generation time. The number of anchors to place at each location\nis static --- implementations of AnchorGenerator must always be able return\nthe number of anchors that it uses per location for each feature map.\n\"\"\"\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nimport tensorflow as tf\n\n\nclass AnchorGenerator(object):\n \"\"\"Abstract base class for anchor generators.\"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def name_scope(self):\n \"\"\"Name scope.\n\n Must be defined by implementations.\n\n Returns:\n a string representing the name scope of the anchor generation operation.\n \"\"\"\n pass\n\n @property\n def check_num_anchors(self):\n \"\"\"Whether to dynamically check the number of anchors generated.\n\n Can be overridden by implementations that would like to disable this\n behavior.\n\n Returns:\n a boolean controlling whether the Generate function should dynamically\n check the number of anchors generated against the mathematically\n expected number of anchors.\n \"\"\"\n return True\n\n @abstractmethod\n def num_anchors_per_location(self):\n \"\"\"Returns the number of anchors per spatial location.\n\n Returns:\n a list of integers, one for each expected feature map to be passed to\n the `generate` function.\n \"\"\"\n pass\n\n def generate(self, feature_map_shape_list, **params):\n \"\"\"Generates a collection of bounding boxes to be used as anchors.\n\n TODO(rathodv): remove **params from argument list and make stride and\n offsets (for multiple_grid_anchor_generator) constructor arguments.\n\n Args:\n feature_map_shape_list: list of (height, width) pairs in the format\n [(height_0, width_0), (height_1, width_1), ...] that the generated\n anchors must align with. Pairs can be provided as 1-dimensional\n integer tensors of length 2 or simply as tuples of integers.\n **params: parameters for anchor generation op\n\n Returns:\n boxes_list: a list of BoxLists each holding anchor boxes corresponding to\n the input feature map shapes.\n\n Raises:\n ValueError: if the number of feature map shapes does not match the length\n of NumAnchorsPerLocation.\n \"\"\"\n if self.check_num_anchors and (\n len(feature_map_shape_list) != len(self.num_anchors_per_location())):\n raise ValueError('Number of feature maps is expected to equal the length '\n 'of `num_anchors_per_location`.')\n with tf.name_scope(self.name_scope()):\n anchors_list = self._generate(feature_map_shape_list, **params)\n if self.check_num_anchors:\n with tf.control_dependencies([\n self._assert_correct_number_of_anchors(\n anchors_list, feature_map_shape_list)]):\n for item in anchors_list:\n item.set(tf.identity(item.get()))\n return anchors_list\n\n @abstractmethod\n def _generate(self, feature_map_shape_list, **params):\n \"\"\"To be overridden by implementations.\n\n Args:\n feature_map_shape_list: list of (height, width) pairs in the format\n [(height_0, width_0), (height_1, width_1), ...] that the generated\n anchors must align with.\n **params: parameters for anchor generation op\n\n Returns:\n boxes_list: a list of BoxList, each holding a collection of N anchor\n boxes.\n \"\"\"\n pass\n\n def _assert_correct_number_of_anchors(self, anchors_list,\n feature_map_shape_list):\n \"\"\"Assert that correct number of anchors was generated.\n\n Args:\n anchors_list: A list of box_list.BoxList object holding anchors generated.\n feature_map_shape_list: list of (height, width) pairs in the format\n [(height_0, width_0), (height_1, width_1), ...] that the generated\n anchors must align with.\n Returns:\n Op that raises InvalidArgumentError if the number of anchors does not\n match the number of expected anchors.\n \"\"\"\n expected_num_anchors = 0\n actual_num_anchors = 0\n for num_anchors_per_location, feature_map_shape, anchors in zip(\n self.num_anchors_per_location(), feature_map_shape_list, anchors_list):\n expected_num_anchors += (num_anchors_per_location\n * feature_map_shape[0]\n * feature_map_shape[1])\n actual_num_anchors += anchors.num_boxes()\n return tf.assert_equal(expected_num_anchors, actual_num_anchors)\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for framework.concat_and_slice_regularizers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom research.morph_net.framework import concat_and_slice_regularizers\nfrom research.morph_net.testing import op_regularizer_stub\n\n\nclass ConcatAndSliceRegularizersTest(tf.test.TestCase):\n\n def setUp(self):\n self._reg_vec1 = [0.1, 0.3, 0.6, 0.2]\n self._alive_vec1 = [False, True, True, False]\n self._reg_vec2 = [0.2, 0.4, 0.5]\n self._alive_vec2 = [False, True, False]\n self._reg1 = op_regularizer_stub.OpRegularizerStub(self._reg_vec1,\n self._alive_vec1)\n self._reg2 = op_regularizer_stub.OpRegularizerStub(self._reg_vec2,\n self._alive_vec2)\n\n def testConcatRegularizer(self):\n concat_reg = concat_and_slice_regularizers.ConcatRegularizer(\n [self._reg1, self._reg2])\n with self.test_session():\n self.assertAllEqual(self._alive_vec1 + self._alive_vec2,\n concat_reg.alive_vector.eval())\n self.assertAllClose(self._reg_vec1 + self._reg_vec2,\n concat_reg.regularization_vector.eval(), 1e-5)\n\n def testSliceRegularizer(self):\n concat_reg = concat_and_slice_regularizers.SlicingReferenceRegularizer(\n lambda: self._reg1, 1, 2)\n with self.test_session():\n self.assertAllEqual(self._alive_vec1[1:3],\n concat_reg.alive_vector.eval())\n self.assertAllClose(self._reg_vec1[1:3],\n concat_reg.regularization_vector.eval(), 1e-5)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Input ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef parse_sequence_example(serialized, image_feature, caption_feature):\n \"\"\"Parses a tensorflow.SequenceExample into an image and caption.\n\n Args:\n serialized: A scalar string Tensor; a single serialized SequenceExample.\n image_feature: Name of SequenceExample context feature containing image\n data.\n caption_feature: Name of SequenceExample feature list containing integer\n captions.\n\n Returns:\n encoded_image: A scalar string Tensor containing a JPEG encoded image.\n caption: A 1-D uint64 Tensor with dynamically specified length.\n \"\"\"\n context, sequence = tf.parse_single_sequence_example(\n serialized,\n context_features={\n image_feature: tf.FixedLenFeature([], dtype=tf.string)\n },\n sequence_features={\n caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n })\n\n encoded_image = context[image_feature]\n caption = sequence[caption_feature]\n return encoded_image, caption\n\n\ndef prefetch_input_data(reader,\n file_pattern,\n is_training,\n batch_size,\n values_per_shard,\n input_queue_capacity_factor=16,\n num_reader_threads=1,\n shard_queue_name=\"filename_queue\",\n value_queue_name=\"input_queue\"):\n \"\"\"Prefetches string values from disk into an input queue.\n\n In training the capacity of the queue is important because a larger queue\n means better mixing of training examples between shards. The minimum number of\n values kept in the queue is values_per_shard * input_queue_capacity_factor,\n where input_queue_memory factor should be chosen to trade-off better mixing\n with memory usage.\n\n Args:\n reader: Instance of tf.ReaderBase.\n file_pattern: Comma-separated list of file patterns (e.g.\n /tmp/train_data-?????-of-00100).\n is_training: Boolean; whether prefetching for training or eval.\n batch_size: Model batch size used to determine queue capacity.\n values_per_shard: Approximate number of values per shard.\n input_queue_capacity_factor: Minimum number of values to keep in the queue\n in multiples of values_per_shard. See comments above.\n num_reader_threads: Number of reader threads to fill the queue.\n shard_queue_name: Name for the shards filename queue.\n value_queue_name: Name for the values input queue.\n\n Returns:\n A Queue containing prefetched string values.\n \"\"\"\n data_files = []\n for pattern in file_pattern.split(\",\"):\n data_files.extend(tf.gfile.Glob(pattern))\n if not data_files:\n tf.logging.fatal(\"Found no input files matching %s\", file_pattern)\n else:\n tf.logging.info(\"Prefetching values from %d files matching %s\",\n len(data_files), file_pattern)\n\n if is_training:\n filename_queue = tf.train.string_input_producer(\n data_files, shuffle=True, capacity=16, name=shard_queue_name)\n min_queue_examples = values_per_shard * input_queue_capacity_factor\n capacity = min_queue_examples + 100 * batch_size\n values_queue = tf.RandomShuffleQueue(\n capacity=capacity,\n min_after_dequeue=min_queue_examples,\n dtypes=[tf.string],\n name=\"random_\" + value_queue_name)\n else:\n filename_queue = tf.train.string_input_producer(\n data_files, shuffle=False, capacity=1, name=shard_queue_name)\n capacity = values_per_shard + 3 * batch_size\n values_queue = tf.FIFOQueue(\n capacity=capacity, dtypes=[tf.string], name=\"fifo_\" + value_queue_name)\n\n enqueue_ops = []\n for _ in range(num_reader_threads):\n _, value = reader.read(filename_queue)\n enqueue_ops.append(values_queue.enqueue([value]))\n tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(\n values_queue, enqueue_ops))\n tf.summary.scalar(\n \"queue/%s/fraction_of_%d_full\" % (values_queue.name, capacity),\n tf.cast(values_queue.size(), tf.float32) * (1. / capacity))\n\n return values_queue\n\n\ndef batch_with_dynamic_pad(images_and_captions,\n batch_size,\n queue_capacity,\n add_summaries=True):\n \"\"\"Batches input images and captions.\n\n This function splits the caption into an input sequence and a target sequence,\n where the target sequence is the input sequence right-shifted by 1. Input and\n target sequences are batched and padded up to the maximum length of sequences\n in the batch. A mask is created to distinguish real words from padding words.\n\n Example:\n Actual captions in the batch ('-' denotes padded character):\n [\n [ 1 2 3 4 5 ],\n [ 1 2 3 4 - ],\n [ 1 2 3 - - ],\n ]\n\n input_seqs:\n [\n [ 1 2 3 4 ],\n [ 1 2 3 - ],\n [ 1 2 - - ],\n ]\n\n target_seqs:\n [\n [ 2 3 4 5 ],\n [ 2 3 4 - ],\n [ 2 3 - - ],\n ]\n\n mask:\n [\n [ 1 1 1 1 ],\n [ 1 1 1 0 ],\n [ 1 1 0 0 ],\n ]\n\n Args:\n images_and_captions: A list of pairs [image, caption], where image is a\n Tensor of shape [height, width, channels] and caption is a 1-D Tensor of\n any length. Each pair will be processed and added to the queue in a\n separate thread.\n batch_size: Batch size.\n queue_capacity: Queue capacity.\n add_summaries: If true, add caption length summaries.\n\n Returns:\n images: A Tensor of shape [batch_size, height, width, channels].\n input_seqs: An int32 Tensor of shape [batch_size, padded_length].\n target_seqs: An int32 Tensor of shape [batch_size, padded_length].\n mask: An int32 0/1 Tensor of shape [batch_size, padded_length].\n \"\"\"\n enqueue_list = []\n for image, caption in images_and_captions:\n caption_length = tf.shape(caption)[0]\n input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)\n\n input_seq = tf.slice(caption, [0], input_length)\n target_seq = tf.slice(caption, [1], input_length)\n indicator = tf.ones(input_length, dtype=tf.int32)\n enqueue_list.append([image, input_seq, target_seq, indicator])\n\n images, input_seqs, target_seqs, mask = tf.train.batch_join(\n enqueue_list,\n batch_size=batch_size,\n capacity=queue_capacity,\n dynamic_pad=True,\n name=\"batch_and_pad\")\n\n if add_summaries:\n lengths = tf.add(tf.reduce_sum(mask, 1), 1)\n tf.summary.scalar(\"caption_length/batch_min\", tf.reduce_min(lengths))\n tf.summary.scalar(\"caption_length/batch_max\", tf.reduce_max(lengths))\n tf.summary.scalar(\"caption_length/batch_mean\", tf.reduce_mean(lengths))\n\n return images, input_seqs, target_seqs, mask\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Losses for Generator and Discriminator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef discriminator_loss(predictions, labels, missing_tokens):\n \"\"\"Discriminator loss based on predictions and labels.\n\n Args:\n predictions: Discriminator linear predictions Tensor of shape [batch_size,\n sequence_length]\n labels: Labels for predictions, Tensor of shape [batch_size,\n sequence_length]\n missing_tokens: Indicator for the missing tokens. Evaluate the loss only\n on the tokens that were missing.\n\n Returns:\n loss: Scalar tf.float32 loss.\n\n \"\"\"\n loss = tf.losses.sigmoid_cross_entropy(labels,\n predictions,\n weights=missing_tokens)\n loss = tf.Print(\n loss, [loss, labels, missing_tokens],\n message='loss, labels, missing_tokens',\n summarize=25,\n first_n=25)\n return loss\n\n\ndef cross_entropy_loss_matrix(gen_labels, gen_logits):\n \"\"\"Computes the cross entropy loss for G.\n\n Args:\n gen_labels: Labels for the correct token.\n gen_logits: Generator logits.\n\n Returns:\n loss_matrix: Loss matrix of shape [batch_size, sequence_length].\n \"\"\"\n cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=gen_labels, logits=gen_logits)\n return cross_entropy_loss\n\n\ndef GAN_loss_matrix(dis_predictions):\n \"\"\"Computes the cross entropy loss for G.\n\n Args:\n dis_predictions: Discriminator predictions.\n\n Returns:\n loss_matrix: Loss matrix of shape [batch_size, sequence_length].\n \"\"\"\n eps = tf.constant(1e-7, tf.float32)\n gan_loss_matrix = -tf.log(dis_predictions + eps)\n return gan_loss_matrix\n\n\ndef generator_GAN_loss(predictions):\n \"\"\"Generator GAN loss based on Discriminator predictions.\"\"\"\n return -tf.log(tf.reduce_mean(predictions))\n\n\ndef generator_blended_forward_loss(gen_logits, gen_labels, dis_predictions,\n is_real_input):\n \"\"\"Computes the masked-loss for G. This will be a blend of cross-entropy\n loss where the true label is known and GAN loss where the true label has been\n masked.\n\n Args:\n gen_logits: Generator logits.\n gen_labels: Labels for the correct token.\n dis_predictions: Discriminator predictions.\n is_real_input: Tensor indicating whether the label is present.\n\n Returns:\n loss: Scalar tf.float32 total loss.\n \"\"\"\n cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=gen_labels, logits=gen_logits)\n gan_loss = -tf.log(dis_predictions)\n loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)\n return tf.reduce_mean(loss_matrix)\n\n\ndef wasserstein_generator_loss(gen_logits, gen_labels, dis_values,\n is_real_input):\n \"\"\"Computes the masked-loss for G. This will be a blend of cross-entropy\n loss where the true label is known and GAN loss where the true label is\n missing.\n\n Args:\n gen_logits: Generator logits.\n gen_labels: Labels for the correct token.\n dis_values: Discriminator values Tensor of shape [batch_size,\n sequence_length].\n is_real_input: Tensor indicating whether the label is present.\n\n Returns:\n loss: Scalar tf.float32 total loss.\n \"\"\"\n cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=gen_labels, logits=gen_logits)\n # Maximize the dis_values (minimize the negative)\n gan_loss = -dis_values\n loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)\n loss = tf.reduce_mean(loss_matrix)\n return loss\n\n\ndef wasserstein_discriminator_loss(real_values, fake_values):\n \"\"\"Wasserstein discriminator loss.\n\n Args:\n real_values: Value given by the Wasserstein Discriminator to real data.\n fake_values: Value given by the Wasserstein Discriminator to fake data.\n\n Returns:\n loss: Scalar tf.float32 loss.\n\n \"\"\"\n real_avg = tf.reduce_mean(real_values)\n fake_avg = tf.reduce_mean(fake_values)\n\n wasserstein_loss = real_avg - fake_avg\n return wasserstein_loss\n\n\ndef wasserstein_discriminator_loss_intrabatch(values, is_real_input):\n \"\"\"Wasserstein discriminator loss. This is an odd variant where the value\n difference is between the real tokens and the fake tokens within a single\n batch.\n\n Args:\n values: Value given by the Wasserstein Discriminator of shape [batch_size,\n sequence_length] to an imputed batch (real and fake).\n is_real_input: tf.bool Tensor of shape [batch_size, sequence_length]. If\n true, it indicates that the label is known.\n\n Returns:\n wasserstein_loss: Scalar tf.float32 loss.\n\n \"\"\"\n zero_tensor = tf.constant(0., dtype=tf.float32, shape=[])\n\n present = tf.cast(is_real_input, tf.float32)\n missing = tf.cast(1 - present, tf.float32)\n\n # Counts for real and fake tokens.\n real_count = tf.reduce_sum(present)\n fake_count = tf.reduce_sum(missing)\n\n # Averages for real and fake token values.\n real = tf.mul(values, present)\n fake = tf.mul(values, missing)\n real_avg = tf.reduce_sum(real) / real_count\n fake_avg = tf.reduce_sum(fake) / fake_count\n\n # If there are no real or fake entries in the batch, we assign an average\n # value of zero.\n real_avg = tf.where(tf.equal(real_count, 0), zero_tensor, real_avg)\n fake_avg = tf.where(tf.equal(fake_count, 0), zero_tensor, fake_avg)\n\n wasserstein_loss = real_avg - fake_avg\n return wasserstein_loss\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for preprocessing.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport tempfile\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom research.minigo import coords\nfrom research.minigo import features\nfrom research.minigo import go\nfrom research.minigo import model_params\nimport numpy as np\nfrom research.minigo import preprocessing\nfrom research.minigo import utils_test\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nTEST_SGF = '''(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]\n HA[0]RE[W+1.5]GM[1];B[fd];W[cf])'''\n\n\ndef create_random_data(num_examples):\n raw_data = []\n for _ in range(num_examples):\n feature = np.random.random([\n utils_test.BOARD_SIZE, utils_test.BOARD_SIZE,\n features.NEW_FEATURES_PLANES]).astype(np.uint8)\n pi = np.random.random([utils_test.BOARD_SIZE * utils_test.BOARD_SIZE\n + 1]).astype(np.float32)\n value = np.random.random()\n raw_data.append((feature, pi, value))\n return raw_data\n\n\nclass TestPreprocessing(utils_test.MiniGoUnitTest):\n\n def extract_data(self, tf_record, filter_amount=1):\n pos_tensor, label_tensors = preprocessing.get_input_tensors(\n model_params.DummyMiniGoParams(), 1, [tf_record], num_repeats=1,\n shuffle_records=False, shuffle_examples=False,\n filter_amount=filter_amount)\n recovered_data = []\n with tf.Session() as sess:\n while True:\n try:\n pos_value, label_values = sess.run([pos_tensor, label_tensors])\n recovered_data.append((\n pos_value,\n label_values['pi_tensor'],\n label_values['value_tensor']))\n except tf.errors.OutOfRangeError:\n break\n return recovered_data\n\n def assertEqualData(self, data1, data2):\n # Assert that two data are equal, where both are of form:\n # data = List<Tuple<feature_array, pi_array, value>>\n self.assertEqual(len(data1), len(data2))\n for datum1, datum2 in zip(data1, data2):\n # feature\n self.assertEqualNPArray(datum1[0], datum2[0])\n # pi\n self.assertEqualNPArray(datum1[1], datum2[1])\n # value\n self.assertEqual(datum1[2], datum2[2])\n\n def test_serialize_round_trip(self):\n np.random.seed(1)\n raw_data = create_random_data(10)\n tfexamples = list(map(preprocessing.make_tf_example, *zip(*raw_data)))\n\n with tempfile.NamedTemporaryFile() as f:\n preprocessing.write_tf_examples(f.name, tfexamples)\n recovered_data = self.extract_data(f.name)\n\n self.assertEqualData(raw_data, recovered_data)\n\n def test_filter(self):\n raw_data = create_random_data(100)\n tfexamples = list(map(preprocessing.make_tf_example, *zip(*raw_data)))\n\n with tempfile.NamedTemporaryFile() as f:\n preprocessing.write_tf_examples(f.name, tfexamples)\n recovered_data = self.extract_data(f.name, filter_amount=.05)\n\n self.assertLess(len(recovered_data), 50)\n\n def test_serialize_round_trip_no_parse(self):\n np.random.seed(1)\n raw_data = create_random_data(10)\n tfexamples = list(map(preprocessing.make_tf_example, *zip(*raw_data)))\n\n with tempfile.NamedTemporaryFile() as start_file, \\\n tempfile.NamedTemporaryFile() as rewritten_file:\n preprocessing.write_tf_examples(start_file.name, tfexamples)\n # We want to test that the rewritten, shuffled file contains correctly\n # serialized tf.Examples.\n batch_size = 4\n batches = list(preprocessing.shuffle_tf_examples(\n 1000, batch_size, [start_file.name]))\n # 2 batches of 4, 1 incomplete batch of 2.\n self.assertEqual(len(batches), 3)\n\n # concatenate list of lists into one list\n all_batches = list(itertools.chain.from_iterable(batches))\n\n for _ in batches:\n preprocessing.write_tf_examples(\n rewritten_file.name, all_batches, serialize=False)\n\n original_data = self.extract_data(start_file.name)\n recovered_data = self.extract_data(rewritten_file.name)\n\n # stuff is shuffled, so sort before checking equality\n def sort_key(nparray_tuple):\n return nparray_tuple[2]\n\n original_data = sorted(original_data, key=sort_key)\n recovered_data = sorted(recovered_data, key=sort_key)\n\n self.assertEqualData(original_data, recovered_data)\n\n def test_make_dataset_from_sgf(self):\n with tempfile.NamedTemporaryFile() as sgf_file, \\\n tempfile.NamedTemporaryFile() as record_file:\n sgf_file.write(TEST_SGF.encode('utf8'))\n sgf_file.seek(0)\n preprocessing.make_dataset_from_sgf(\n utils_test.BOARD_SIZE, sgf_file.name, record_file.name)\n recovered_data = self.extract_data(record_file.name)\n start_pos = go.Position(utils_test.BOARD_SIZE)\n first_move = coords.from_sgf('fd')\n next_pos = start_pos.play_move(first_move)\n second_move = coords.from_sgf('cf')\n expected_data = [\n (\n features.extract_features(utils_test.BOARD_SIZE, start_pos),\n preprocessing._one_hot(utils_test.BOARD_SIZE, coords.to_flat(\n utils_test.BOARD_SIZE, first_move)), -1\n ),\n (\n features.extract_features(utils_test.BOARD_SIZE, next_pos),\n preprocessing._one_hot(utils_test.BOARD_SIZE, coords.to_flat(\n utils_test.BOARD_SIZE, second_move)), -1\n )\n ]\n self.assertEqualData(expected_data, recovered_data)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.utils.np_box_mask_list_ops.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom research.object_detection.utils import np_box_mask_list\nfrom research.object_detection.utils import np_box_mask_list_ops\n\n\nclass AreaRelatedTest(tf.test.TestCase):\n\n def setUp(self):\n boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],\n dtype=float)\n masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0]],\n dtype=np.uint8)\n masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.uint8)\n masks1 = np.stack([masks1_0, masks1_1])\n boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],\n [0.0, 0.0, 20.0, 20.0]],\n dtype=float)\n masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0]],\n dtype=np.uint8)\n masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.uint8)\n masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 0, 0]],\n dtype=np.uint8)\n masks2 = np.stack([masks2_0, masks2_1, masks2_2])\n self.box_mask_list1 = np_box_mask_list.BoxMaskList(\n box_data=boxes1, mask_data=masks1)\n self.box_mask_list2 = np_box_mask_list.BoxMaskList(\n box_data=boxes2, mask_data=masks2)\n\n def test_area(self):\n areas = np_box_mask_list_ops.area(self.box_mask_list1)\n expected_areas = np.array([8.0, 10.0], dtype=float)\n self.assertAllClose(expected_areas, areas)\n\n def test_intersection(self):\n intersection = np_box_mask_list_ops.intersection(self.box_mask_list1,\n self.box_mask_list2)\n expected_intersection = np.array([[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]],\n dtype=float)\n self.assertAllClose(intersection, expected_intersection)\n\n def test_iou(self):\n iou = np_box_mask_list_ops.iou(self.box_mask_list1, self.box_mask_list2)\n expected_iou = np.array(\n [[1.0, 0.0, 8.0 / 25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=float)\n self.assertAllClose(iou, expected_iou)\n\n def test_ioa(self):\n ioa21 = np_box_mask_list_ops.ioa(self.box_mask_list1, self.box_mask_list2)\n expected_ioa21 = np.array([[1.0, 0.0, 8.0 / 25.0],\n [0.0, 9.0 / 15.0, 7.0 / 25.0]],\n dtype=np.float32)\n self.assertAllClose(ioa21, expected_ioa21)\n\n\nclass NonMaximumSuppressionTest(tf.test.TestCase):\n\n def setUp(self):\n boxes1 = np.array(\n [[4.0, 3.0, 7.0, 6.0], [5.0, 6.0, 10.0, 10.0]], dtype=float)\n boxes2 = np.array(\n [[3.0, 4.0, 6.0, 8.0], [5.0, 6.0, 10.0, 10.0], [1.0, 1.0, 10.0, 10.0]],\n dtype=float)\n masks1 = np.array(\n [[[0, 1, 0], [1, 1, 0], [0, 0, 0]], [[0, 1, 1], [0, 1, 1], [0, 1, 1]]],\n dtype=np.uint8)\n masks2 = np.array(\n [[[0, 1, 0], [1, 1, 1], [0, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 1, 1]],\n [[0, 1, 1], [0, 1, 1], [0, 1, 1]]],\n dtype=np.uint8)\n self.boxes1 = boxes1\n self.boxes2 = boxes2\n self.masks1 = masks1\n self.masks2 = masks2\n\n def test_with_no_scores_field(self):\n box_mask_list = np_box_mask_list.BoxMaskList(\n box_data=self.boxes1, mask_data=self.masks1)\n max_output_size = 3\n iou_threshold = 0.5\n\n with self.assertRaises(ValueError):\n np_box_mask_list_ops.non_max_suppression(\n box_mask_list, max_output_size, iou_threshold)\n\n def test_nms_disabled_max_output_size_equals_one(self):\n box_mask_list = np_box_mask_list.BoxMaskList(\n box_data=self.boxes2, mask_data=self.masks2)\n box_mask_list.add_field('scores',\n np.array([.9, .75, .6], dtype=float))\n max_output_size = 1\n iou_threshold = 1. # No NMS\n expected_boxes = np.array([[3.0, 4.0, 6.0, 8.0]], dtype=float)\n expected_masks = np.array(\n [[[0, 1, 0], [1, 1, 1], [0, 0, 0]]], dtype=np.uint8)\n nms_box_mask_list = np_box_mask_list_ops.non_max_suppression(\n box_mask_list, max_output_size, iou_threshold)\n self.assertAllClose(nms_box_mask_list.get(), expected_boxes)\n self.assertAllClose(nms_box_mask_list.get_masks(), expected_masks)\n\n def test_multiclass_nms(self):\n boxes = np.array(\n [[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],\n dtype=np.float32)\n mask0 = np.array([[0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0]],\n dtype=np.uint8)\n mask1 = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]],\n dtype=np.uint8)\n mask2 = np.array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1]],\n dtype=np.uint8)\n masks = np.stack([mask0, mask1, mask2])\n box_mask_list = np_box_mask_list.BoxMaskList(\n box_data=boxes, mask_data=masks)\n scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],\n [0.7, -0.7, 0.6, 0.2, -0.9],\n [0.4, 0.34, -0.9, 0.2, 0.31]],\n dtype=np.float32)\n box_mask_list.add_field('scores', scores)\n box_mask_list_clean = np_box_mask_list_ops.multi_class_non_max_suppression(\n box_mask_list, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)\n\n scores_clean = box_mask_list_clean.get_field('scores')\n classes_clean = box_mask_list_clean.get_field('classes')\n boxes = box_mask_list_clean.get()\n masks = box_mask_list_clean.get_masks()\n expected_scores = np.array([0.7, 0.6, 0.34, 0.31])\n expected_classes = np.array([0, 2, 1, 4])\n expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],\n [0.4, 0.2, 0.8, 0.8],\n [0.6, 0.0, 1.0, 1.0],\n [0.6, 0.0, 1.0, 1.0]],\n dtype=np.float32)\n self.assertAllClose(scores_clean, expected_scores)\n self.assertAllClose(classes_clean, expected_classes)\n self.assertAllClose(boxes, expected_boxes)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"ResNet Train/Eval module.\n\"\"\"\nimport time\nimport six\nimport sys\n\nfrom research.resnet import cifar_input\nimport numpy as np\nfrom research.resnet import resnet_model\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('dataset', 'cifar10', 'cifar10 or cifar100.')\ntf.app.flags.DEFINE_string('mode', 'train', 'train or eval.')\ntf.app.flags.DEFINE_string('train_data_path', '',\n 'Filepattern for training data.')\ntf.app.flags.DEFINE_string('eval_data_path', '',\n 'Filepattern for eval data')\ntf.app.flags.DEFINE_integer('image_size', 32, 'Image side length.')\ntf.app.flags.DEFINE_string('train_dir', '',\n 'Directory to keep training outputs.')\ntf.app.flags.DEFINE_string('eval_dir', '',\n 'Directory to keep eval outputs.')\ntf.app.flags.DEFINE_integer('eval_batch_count', 50,\n 'Number of batches to eval.')\ntf.app.flags.DEFINE_bool('eval_once', False,\n 'Whether evaluate the model only once.')\ntf.app.flags.DEFINE_string('log_root', '',\n 'Directory to keep the checkpoints. Should be a '\n 'parent directory of FLAGS.train_dir/eval_dir.')\ntf.app.flags.DEFINE_integer('num_gpus', 0,\n 'Number of gpus used for training. (0 or 1)')\n\n\ndef train(hps):\n \"\"\"Training loop.\"\"\"\n images, labels = cifar_input.build_input(\n FLAGS.dataset, FLAGS.train_data_path, hps.batch_size, FLAGS.mode)\n model = resnet_model.ResNet(hps, images, labels, FLAGS.mode)\n model.build_graph()\n\n param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(\n tf.get_default_graph(),\n tfprof_options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n\n tf.contrib.tfprof.model_analyzer.print_model_analysis(\n tf.get_default_graph(),\n tfprof_options=tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS)\n\n truth = tf.argmax(model.labels, axis=1)\n predictions = tf.argmax(model.predictions, axis=1)\n precision = tf.reduce_mean(tf.to_float(tf.equal(predictions, truth)))\n\n summary_hook = tf.train.SummarySaverHook(\n save_steps=100,\n output_dir=FLAGS.train_dir,\n summary_op=tf.summary.merge([model.summaries,\n tf.summary.scalar('Precision', precision)]))\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors={'step': model.global_step,\n 'loss': model.cost,\n 'precision': precision},\n every_n_iter=100)\n\n class _LearningRateSetterHook(tf.train.SessionRunHook):\n \"\"\"Sets learning_rate based on global step.\"\"\"\n\n def begin(self):\n self._lrn_rate = 0.1\n\n def before_run(self, run_context):\n return tf.train.SessionRunArgs(\n model.global_step, # Asks for global step value.\n feed_dict={model.lrn_rate: self._lrn_rate}) # Sets learning rate\n\n def after_run(self, run_context, run_values):\n train_step = run_values.results\n if train_step < 40000:\n self._lrn_rate = 0.1\n elif train_step < 60000:\n self._lrn_rate = 0.01\n elif train_step < 80000:\n self._lrn_rate = 0.001\n else:\n self._lrn_rate = 0.0001\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=FLAGS.log_root,\n hooks=[logging_hook, _LearningRateSetterHook()],\n chief_only_hooks=[summary_hook],\n # Since we provide a SummarySaverHook, we need to disable default\n # SummarySaverHook. To do that we set save_summaries_steps to 0.\n save_summaries_steps=0,\n config=tf.ConfigProto(allow_soft_placement=True)) as mon_sess:\n while not mon_sess.should_stop():\n mon_sess.run(model.train_op)\n\n\ndef evaluate(hps):\n \"\"\"Eval loop.\"\"\"\n images, labels = cifar_input.build_input(\n FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode)\n model = resnet_model.ResNet(hps, images, labels, FLAGS.mode)\n model.build_graph()\n saver = tf.train.Saver()\n summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)\n\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n tf.train.start_queue_runners(sess)\n\n best_precision = 0.0\n while True:\n try:\n ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)\n except tf.errors.OutOfRangeError as e:\n tf.logging.error('Cannot restore checkpoint: %s', e)\n continue\n if not (ckpt_state and ckpt_state.model_checkpoint_path):\n tf.logging.info('No model to eval yet at %s', FLAGS.log_root)\n continue\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n\n total_prediction, correct_prediction = 0, 0\n for _ in six.moves.range(FLAGS.eval_batch_count):\n (summaries, loss, predictions, truth, train_step) = sess.run(\n [model.summaries, model.cost, model.predictions,\n model.labels, model.global_step])\n\n truth = np.argmax(truth, axis=1)\n predictions = np.argmax(predictions, axis=1)\n correct_prediction += np.sum(truth == predictions)\n total_prediction += predictions.shape[0]\n\n precision = 1.0 * correct_prediction / total_prediction\n best_precision = max(precision, best_precision)\n\n precision_summ = tf.Summary()\n precision_summ.value.add(\n tag='Precision', simple_value=precision)\n summary_writer.add_summary(precision_summ, train_step)\n best_precision_summ = tf.Summary()\n best_precision_summ.value.add(\n tag='Best Precision', simple_value=best_precision)\n summary_writer.add_summary(best_precision_summ, train_step)\n summary_writer.add_summary(summaries, train_step)\n tf.logging.info('loss: %.3f, precision: %.3f, best precision: %.3f' %\n (loss, precision, best_precision))\n summary_writer.flush()\n\n if FLAGS.eval_once:\n break\n\n time.sleep(60)\n\n\ndef main(_):\n if FLAGS.num_gpus == 0:\n dev = '/cpu:0'\n elif FLAGS.num_gpus == 1:\n dev = '/gpu:0'\n else:\n raise ValueError('Only support 0 or 1 gpu.')\n\n if FLAGS.mode == 'train':\n batch_size = 128\n elif FLAGS.mode == 'eval':\n batch_size = 100\n\n if FLAGS.dataset == 'cifar10':\n num_classes = 10\n elif FLAGS.dataset == 'cifar100':\n num_classes = 100\n\n hps = resnet_model.HParams(batch_size=batch_size,\n num_classes=num_classes,\n min_lrn_rate=0.0001,\n lrn_rate=0.1,\n num_residual_units=5,\n use_bottleneck=False,\n weight_decay_rate=0.0002,\n relu_leakiness=0.1,\n optimizer='mom')\n\n with tf.device(dev):\n if FLAGS.mode == 'train':\n train(hps)\n elif FLAGS.mode == 'eval':\n evaluate(hps)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Trains TCN models (and baseline comparisons).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom research.tcn.estimators.get_estimator import get_estimator\nfrom research.tcn.utils import util\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\ntf.flags.DEFINE_string(\n 'config_paths', '',\n \"\"\"\n Path to a YAML configuration files defining FLAG values. Multiple files\n can be separated by the `#` symbol. Files are merged recursively. Setting\n a key in these files is equivalent to setting the FLAG value with\n the same name.\n \"\"\")\ntf.flags.DEFINE_string(\n 'model_params', '{}', 'YAML configuration string for the model parameters.')\ntf.app.flags.DEFINE_string('master', 'local',\n 'BNS name of the TensorFlow master to use')\ntf.app.flags.DEFINE_string(\n 'logdir', '/tmp/tcn', 'Directory where to write event logs.')\ntf.app.flags.DEFINE_integer(\n 'task', 0, 'Task id of the replica running the training.')\ntf.app.flags.DEFINE_integer(\n 'ps_tasks', 0, 'Number of tasks in the ps job. If 0 no ps job is used.')\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main(_):\n \"\"\"Runs main training loop.\"\"\"\n # Parse config dict from yaml config files / command line flags.\n config = util.ParseConfigsToLuaTable(\n FLAGS.config_paths, FLAGS.model_params, save=True, logdir=FLAGS.logdir)\n\n # Choose an estimator based on training strategy.\n estimator = get_estimator(config, FLAGS.logdir)\n\n # Run training\n estimator.train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2018 Google, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tensorflow as tf\nimport matplotlib\nimport numpy as np\nimport re\n\nmatplotlib.use(\"Agg\")\n\n_DEBUG_DISABLE_SUMMARIES = False\n\n\nclass LoggingFileWriter(tf.summary.FileWriter):\n \"\"\"A FileWriter that also logs things out.\n\n This is entirely for ease of debugging / not having to open up Tensorboard\n a lot.\n \"\"\"\n\n def __init__(self, logdir, regexes=[], **kwargs):\n self.regexes = regexes\n super(LoggingFileWriter, self).__init__(logdir, **kwargs)\n\n def add_summary(self, summary, global_step):\n if type(summary) != tf.Summary:\n summary_p = tf.Summary()\n summary_p.ParseFromString(summary)\n summary = summary_p\n for s in summary.value:\n for exists in [re.match(p, s.tag) for p in self.regexes]:\n if exists is not None:\n tf.logging.info(\"%d ] %s : %f\", global_step, s.tag, s.simple_value)\n break\n super(LoggingFileWriter, self).add_summary(summary, global_step)\n\n\ndef image_grid(images, max_grid_size=4, border=1):\n \"\"\"Given images and N, return first N^2 images as an NxN image grid.\n\n Args:\n images: a `Tensor` of size [batch_size, height, width, channels]\n max_grid_size: Maximum image grid height/width\n\n Returns:\n Single image batch, of dim [1, h*n, w*n, c]\n \"\"\"\n batch_size = images.shape.as_list()[0]\n to_pad = int((np.ceil(np.sqrt(batch_size))) ** 2 - batch_size)\n images = tf.pad(images, [[0, to_pad], [0, border], [0, border], [0, 0]])\n\n batch_size = images.shape.as_list()[0]\n grid_size = min(int(np.sqrt(batch_size)), max_grid_size)\n assert images.shape.as_list()[0] >= grid_size * grid_size\n\n # If we have a depth channel\n if images.shape.as_list()[-1] == 4:\n images = images[:grid_size * grid_size, :, :, 0:3]\n depth = tf.image.grayscale_to_rgb(images[:grid_size * grid_size, :, :, 3:4])\n\n images = tf.reshape(images, [-1, images.shape.as_list()[2], 3])\n split = tf.split(images, grid_size, axis=0)\n depth = tf.reshape(depth, [-1, images.shape.as_list()[2], 3])\n depth_split = tf.split(depth, grid_size, axis=0)\n grid = tf.concat(split + depth_split, 1)\n return tf.expand_dims(grid, 0)\n else:\n images = images[:grid_size * grid_size, :, :, :]\n images = tf.reshape(\n images, [-1, images.shape.as_list()[2],\n images.shape.as_list()[3]])\n split = tf.split(value=images, num_or_size_splits=grid_size, axis=0)\n grid = tf.concat(split, 1)\n return tf.expand_dims(grid, 0)\n\n\ndef first_layer_weight_image(weight, shape):\n weight_image = tf.reshape(weight,\n shape + [tf.identity(weight).shape.as_list()[1]])\n # [winx, winy, wout]\n mean, var = tf.nn.moments(weight_image, [0, 1, 2], keep_dims=True)\n # mean, var = tf.nn.moments(weight_image, [0,1], keep_dims=True)\n weight_image = (weight_image - mean) / tf.sqrt(var + 1e-5)\n weight_image = (weight_image + 1.0) / 2.0\n weight_image = tf.clip_by_value(weight_image, 0, 1)\n weight_image = tf.transpose(weight_image, (3, 0, 1, 2))\n grid = image_grid(weight_image, max_grid_size=10)\n return grid\n\n\ndef inner_layer_weight_image(weight):\n \"\"\"Visualize a weight matrix of an inner layer.\n Add padding to make it square, then visualize as a gray scale image\n \"\"\"\n weight = tf.identity(weight) # turn into a tensor\n weight = weight / (tf.reduce_max(tf.abs(weight), [0], keep_dims=True))\n weight = tf.reshape(weight, [1] + weight.shape.as_list() + [1])\n return weight\n\n\ndef activation_image(activations, label_onehot):\n \"\"\"Make a row sorted by class for each activation. Put a black line around the activations.\"\"\"\n labels = tf.argmax(label_onehot, axis=1)\n _, n_classes = label_onehot.shape.as_list()\n mean, var = tf.nn.moments(activations, [0, 1])\n activations = (activations - mean) / tf.sqrt(var + 1e-5)\n\n activations = tf.clip_by_value(activations, -1, 1)\n activations = (activations + 1.0) / 2.0 # shift to [0, 1]\n\n canvas = []\n for i in range(n_classes):\n inds = tf.where(tf.equal(labels, i))\n\n def _gather():\n return tf.squeeze(tf.gather(activations, inds), 1)\n\n def _empty():\n return tf.zeros([0, activations.shape.as_list()[1]], dtype=tf.float32)\n\n assert inds.shape.as_list()[0] is None\n x = tf.cond(tf.equal(tf.shape(inds)[0], 0), _empty, _gather)\n canvas.append(x)\n canvas.append(tf.zeros([1, activations.shape.as_list()[1]]))\n canvas = tf.concat(canvas, 0)\n canvas = tf.reshape(canvas, [1, activations.shape.as_list()[0] + n_classes, canvas.shape.as_list()[1], 1])\n return canvas\n\n\ndef sorted_images(images, label_onehot):\n # images is [bs, x, y, c]\n labels = tf.argmax(label_onehot, axis=1)\n _, n_classes = label_onehot.shape.as_list()\n to_stack = []\n for i in range(n_classes):\n inds = tf.where(tf.equal(labels, i))\n\n def _gather():\n return tf.squeeze(tf.gather(images, inds), 1)\n\n def _empty():\n return tf.zeros([0] + images.shape.as_list()[1:], dtype=tf.float32)\n\n assert inds.shape.as_list()[0] is None\n x = tf.cond(tf.equal(tf.shape(inds)[0], 0), _empty, _gather)\n to_stack.append(x)\n # pad / trim all up to 10.\n padded = []\n for t in to_stack:\n n_found = tf.shape(t)[0]\n pad = tf.pad(t[0:10], tf.stack([tf.stack([0, tf.maximum(0, 10 - n_found)]), [0, 0], [0, 0], [0, 0]]))\n padded.append(pad)\n\n xs = [tf.concat(tf.split(p, 10), axis=1) for p in padded]\n ys = tf.concat(xs, axis=2)\n ys = tf.cast(tf.clip_by_value(ys, 0., 1.) * 255., tf.uint8)\n return ys\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.predictors.heads.mask_head.\"\"\"\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom research.object_detection.builders import hyperparams_builder\nfrom research.object_detection.predictors.heads import keras_mask_head\nfrom research.object_detection.protos import hyperparams_pb2\nfrom research.object_detection.utils import test_case\n\n\nclass ConvolutionalMaskPredictorTest(test_case.TestCase):\n\n @staticmethod\n def _build_conv_hyperparams():\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n activation: NONE\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\n\n def test_prediction_size_use_depthwise_false(self):\n conv_hyperparams = self._build_conv_hyperparams()\n mask_prediction_head = keras_mask_head.ConvolutionalMaskHead(\n is_training=True,\n num_classes=20,\n use_dropout=True,\n dropout_keep_prob=0.5,\n kernel_size=3,\n conv_hyperparams=conv_hyperparams,\n freeze_batchnorm=False,\n num_predictions_per_location=1,\n use_depthwise=False,\n mask_height=7,\n mask_width=7)\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n mask_predictions = mask_prediction_head(image_feature)\n self.assertAllEqual([64, 323, 20, 7, 7],\n mask_predictions.get_shape().as_list())\n\n # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10\n\n def test_class_agnostic_prediction_size_use_depthwise_false(self):\n conv_hyperparams = self._build_conv_hyperparams()\n mask_prediction_head = keras_mask_head.ConvolutionalMaskHead(\n is_training=True,\n num_classes=20,\n use_dropout=True,\n dropout_keep_prob=0.5,\n kernel_size=3,\n conv_hyperparams=conv_hyperparams,\n freeze_batchnorm=False,\n num_predictions_per_location=1,\n use_depthwise=False,\n mask_height=7,\n mask_width=7,\n masks_are_class_agnostic=True)\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n mask_predictions = mask_prediction_head(image_feature)\n self.assertAllEqual([64, 323, 1, 7, 7],\n mask_predictions.get_shape().as_list())\n\n # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the OpenImages label expansion (OIDHierarchicalLabelsExpansion).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom research.object_detection.dataset_tools import oid_hierarchical_labels_expansion\n\n\ndef create_test_data():\n hierarchy = {\n 'LabelName':\n 'a',\n 'Subcategory': [{\n 'LabelName': 'b'\n }, {\n 'LabelName': 'c',\n 'Subcategory': [{\n 'LabelName': 'd'\n }, {\n 'LabelName': 'e'\n }]\n }, {\n 'LabelName': 'f',\n 'Subcategory': [{\n 'LabelName': 'd'\n }, ]\n }]\n }\n bbox_rows = [\n '123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0',\n '123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0'\n ]\n label_rows = [\n '123,verification,b,0', '123,verification,c,0', '124,verification,d,1'\n ]\n return hierarchy, bbox_rows, label_rows\n\n\nclass HierarchicalLabelsExpansionTest(tf.test.TestCase):\n\n def test_bbox_expansion(self):\n hierarchy, bbox_rows, _ = create_test_data()\n expansion_generator = (\n oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(\n hierarchy))\n all_result_rows = []\n for row in bbox_rows:\n all_result_rows.extend(expansion_generator.expand_boxes_from_csv(row))\n self.assertItemsEqual([\n '123,xclick,b,1,0.1,0.2,0.1,0.2,1,1,0,0,0',\n '123,xclick,d,1,0.2,0.3,0.1,0.2,1,1,0,0,0',\n '123,xclick,f,1,0.2,0.3,0.1,0.2,1,1,0,0,0',\n '123,xclick,c,1,0.2,0.3,0.1,0.2,1,1,0,0,0'\n ], all_result_rows)\n\n def test_labels_expansion(self):\n hierarchy, _, label_rows = create_test_data()\n expansion_generator = (\n oid_hierarchical_labels_expansion.OIDHierarchicalLabelsExpansion(\n hierarchy))\n all_result_rows = []\n for row in label_rows:\n all_result_rows.extend(expansion_generator.expand_labels_from_csv(row))\n self.assertItemsEqual([\n '123,verification,b,0', '123,verification,c,0', '123,verification,d,0',\n '123,verification,e,0', '124,verification,d,1', '124,verification,f,1',\n '124,verification,c,1'\n ], all_result_rows)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\"\"\"Tests for results_lib.\"\"\"\n\nimport contextlib\nimport os\nimport shutil\nimport tempfile\nfrom six.moves import xrange\nimport tensorflow as tf\n\nfrom research.brain_coder.single_task import results_lib # brain coder\n\n\[email protected]\ndef temporary_directory(suffix='', prefix='tmp', base_path=None):\n \"\"\"A context manager to create a temporary directory and clean up on exit.\n\n The parameters are the same ones expected by tempfile.mkdtemp.\n The directory will be securely and atomically created.\n Everything under it will be removed when exiting the context.\n\n Args:\n suffix: optional suffix.\n prefix: options prefix.\n base_path: the base path under which to create the temporary directory.\n Yields:\n The absolute path of the new temporary directory.\n \"\"\"\n temp_dir_path = tempfile.mkdtemp(suffix, prefix, base_path)\n try:\n yield temp_dir_path\n finally:\n try:\n shutil.rmtree(temp_dir_path)\n except OSError as e:\n if e.message == 'Cannot call rmtree on a symbolic link':\n # Interesting synthetic exception made up by shutil.rmtree.\n # Means we received a symlink from mkdtemp.\n # Also means must clean up the symlink instead.\n os.unlink(temp_dir_path)\n else:\n raise\n\n\ndef freeze(dictionary):\n \"\"\"Convert dict to hashable frozenset.\"\"\"\n iter_items = [(k, dictionary.get(k)) for k in dictionary]\n return frozenset(iter_items)\n\n\nclass ResultsLibTest(tf.test.TestCase):\n\n def testResults(self):\n with temporary_directory() as logdir:\n results_obj = results_lib.Results(logdir)\n self.assertEqual(results_obj.read_this_shard(), [])\n results_obj.append(\n {'foo': 1.5, 'bar': 2.5, 'baz': 0})\n results_obj.append(\n {'foo': 5.5, 'bar': -1, 'baz': 2})\n self.assertEqual(\n results_obj.read_this_shard(),\n [{'foo': 1.5, 'bar': 2.5, 'baz': 0},\n {'foo': 5.5, 'bar': -1, 'baz': 2}])\n\n # noinspection PyTypeChecker\n def testShardedResults(self):\n with temporary_directory() as logdir:\n n = 4 # Number of shards.\n results_objs = [\n results_lib.Results(logdir, shard_id=i) for i in xrange(n)]\n for i, robj in enumerate(results_objs):\n robj.append({'foo': i, 'bar': 1 + i * 2})\n results_list, _ = results_objs[0].read_all()\n\n # Check results. Order does not matter here.\n self.assertEqual(\n set(freeze(r) for r in results_list),\n set(freeze({'foo': i, 'bar': 1 + i * 2}) for i in xrange(n)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for boosted_tree.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n# pylint: disable=g-bad-import-order\nfrom official.boosted_trees import train_higgs\nfrom official.utils.testing import integration\n\nTEST_CSV = os.path.join(os.path.dirname(__file__), \"train_higgs_test.csv\")\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\n\nclass BaseTest(tf.test.TestCase):\n \"\"\"Tests for Wide Deep model.\"\"\"\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(BaseTest, cls).setUpClass()\n train_higgs.define_train_higgs_flags()\n\n def setUp(self):\n # Create temporary CSV file\n self.data_dir = self.get_temp_dir()\n data = pd.read_csv(\n TEST_CSV, dtype=np.float32, names=[\"c%02d\" % i for i in range(29)]\n ).values\n self.input_npz = os.path.join(self.data_dir, train_higgs.NPZ_FILE)\n # numpy.savez doesn't take gfile.Gfile, so need to write down and copy.\n tmpfile = tempfile.NamedTemporaryFile()\n np.savez_compressed(tmpfile, data=data)\n tf.gfile.Copy(tmpfile.name, self.input_npz)\n\n def test_read_higgs_data(self):\n \"\"\"Tests read_higgs_data() function.\"\"\"\n # Error when a wrong data_dir is given.\n with self.assertRaisesRegexp(RuntimeError, \"Error loading data.*\"):\n train_data, eval_data = train_higgs.read_higgs_data(\n self.data_dir + \"non-existing-path\",\n train_start=0, train_count=15, eval_start=15, eval_count=5)\n\n # Loading fine with the correct data_dir.\n train_data, eval_data = train_higgs.read_higgs_data(\n self.data_dir,\n train_start=0, train_count=15, eval_start=15, eval_count=5)\n self.assertEqual((15, 29), train_data.shape)\n self.assertEqual((5, 29), eval_data.shape)\n\n def test_make_inputs_from_np_arrays(self):\n \"\"\"Tests make_inputs_from_np_arrays() function.\"\"\"\n train_data, _ = train_higgs.read_higgs_data(\n self.data_dir,\n train_start=0, train_count=15, eval_start=15, eval_count=5)\n (input_fn, feature_names,\n feature_columns) = train_higgs.make_inputs_from_np_arrays(\n features_np=train_data[:, 1:], label_np=train_data[:, 0:1])\n\n # Check feature_names.\n self.assertAllEqual(feature_names,\n [\"feature_%02d\" % (i + 1) for i in range(28)])\n\n # Check feature columns.\n self.assertEqual(28, len(feature_columns))\n bucketized_column_type = type(\n tf.feature_column.bucketized_column(\n tf.feature_column.numeric_column(\"feature_01\"),\n boundaries=[0, 1, 2])) # dummy boundaries.\n for feature_column in feature_columns:\n self.assertIsInstance(feature_column, bucketized_column_type)\n # At least 2 boundaries.\n self.assertGreaterEqual(len(feature_column.boundaries), 2)\n # Tests that the source column names of the bucketized columns match.\n self.assertAllEqual(feature_names,\n [col.source_column.name for col in feature_columns])\n\n # Check features.\n features, labels = input_fn().make_one_shot_iterator().get_next()\n with tf.Session() as sess:\n features, labels = sess.run((features, labels))\n self.assertIsInstance(features, dict)\n self.assertAllEqual(feature_names, sorted(features.keys()))\n self.assertAllEqual([[15, 1]] * 28,\n [features[name].shape for name in feature_names])\n # Validate actual values of some features.\n self.assertAllClose(\n [0.869293, 0.907542, 0.798834, 1.344384, 1.105009, 1.595839,\n 0.409391, 0.933895, 1.405143, 1.176565, 0.945974, 0.739356,\n 1.384097, 1.383548, 1.343652],\n np.squeeze(features[feature_names[0]], 1))\n self.assertAllClose(\n [-0.653674, -0.213641, 1.540659, -0.676015, 1.020974, 0.643109,\n -1.038338, -2.653732, 0.567342, 0.534315, 0.720819, -0.481741,\n 1.409523, -0.307865, 1.474605],\n np.squeeze(features[feature_names[10]], 1))\n\n def test_end_to_end(self):\n \"\"\"Tests end-to-end running.\"\"\"\n model_dir = os.path.join(self.get_temp_dir(), \"model\")\n integration.run_synthetic(\n main=train_higgs.main, tmp_root=self.get_temp_dir(), extra_flags=[\n \"--data_dir\", self.data_dir,\n \"--model_dir\", model_dir,\n \"--n_trees\", \"5\",\n \"--train_start\", \"0\",\n \"--train_count\", \"12\",\n \"--eval_start\", \"12\",\n \"--eval_count\", \"8\",\n ],\n synth=False, max_train=None)\n self.assertTrue(tf.gfile.Exists(os.path.join(model_dir, \"checkpoint\")))\n\n def test_end_to_end_with_export(self):\n \"\"\"Tests end-to-end running.\"\"\"\n model_dir = os.path.join(self.get_temp_dir(), \"model\")\n export_dir = os.path.join(self.get_temp_dir(), \"export\")\n integration.run_synthetic(\n main=train_higgs.main, tmp_root=self.get_temp_dir(), extra_flags=[\n \"--data_dir\", self.data_dir,\n \"--model_dir\", model_dir,\n \"--export_dir\", export_dir,\n \"--n_trees\", \"5\",\n \"--train_start\", \"0\",\n \"--train_count\", \"12\",\n \"--eval_start\", \"12\",\n \"--eval_count\", \"8\",\n ],\n synth=False, max_train=None)\n self.assertTrue(tf.gfile.Exists(os.path.join(model_dir, \"checkpoint\")))\n self.assertTrue(tf.gfile.Exists(os.path.join(export_dir)))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# This is the complete code for the following blogpost:\n# https://developers.googleblog.com/2017/09/introducing-tensorflow-datasets.html\n# (https://goo.gl/Ujm2Ep)\n\nimport os\n\nimport six.moves.urllib.request as request\nimport tensorflow as tf\n\n# Check that we have correct TensorFlow version installed\ntf_version = tf.__version__\nprint(\"TensorFlow version: {}\".format(tf_version))\nassert \"1.4\" <= tf_version, \"TensorFlow r1.4 or later is needed\"\n\n# Windows users: You only need to change PATH, rest is platform independent\nPATH = \"/tmp/tf_dataset_and_estimator_apis\"\n\n# Fetch and store Training and Test dataset files\nPATH_DATASET = PATH + os.sep + \"dataset\"\nFILE_TRAIN = PATH_DATASET + os.sep + \"iris_training.csv\"\nFILE_TEST = PATH_DATASET + os.sep + \"iris_test.csv\"\nURL_TRAIN = \"http://download.tensorflow.org/data/iris_training.csv\"\nURL_TEST = \"http://download.tensorflow.org/data/iris_test.csv\"\n\n\ndef download_dataset(url, file):\n if not os.path.exists(PATH_DATASET):\n os.makedirs(PATH_DATASET)\n if not os.path.exists(file):\n data = request.urlopen(url).read()\n with open(file, \"wb\") as f:\n f.write(data)\n f.close()\n\n\ndownload_dataset(URL_TRAIN, FILE_TRAIN)\ndownload_dataset(URL_TEST, FILE_TEST)\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n# The CSV features in our training & test data\nfeature_names = [\n 'SepalLength',\n 'SepalWidth',\n 'PetalLength',\n 'PetalWidth']\n\n\n# Create an input function reading a file using the Dataset API\n# Then provide the results to the Estimator API\n\n\ndef my_input_fn(file_path, perform_shuffle=False, repeat_count=1):\n def decode_csv(line):\n parsed_line = tf.decode_csv(line, [[0.], [0.], [0.], [0.], [0]])\n label = parsed_line[-1] # Last element is the label\n del parsed_line[-1] # Delete last element\n features = parsed_line # Everything but last elements are the features\n d = dict(zip(feature_names, features)), label\n return d\n\n dataset = (tf.data.TextLineDataset(file_path) # Read text file\n .skip(1) # Skip header row\n .map(decode_csv)) # Transform each elem by applying decode_csv fn\n if perform_shuffle:\n # Randomizes input using a window of 256 elements (read into memory)\n dataset = dataset.shuffle(buffer_size=256)\n dataset = dataset.repeat(repeat_count) # Repeats dataset this # times\n dataset = dataset.batch(32) # Batch size to use\n iterator = dataset.make_one_shot_iterator()\n batch_features, batch_labels = iterator.get_next()\n return batch_features, batch_labels\n\n\nnext_batch = my_input_fn(FILE_TRAIN, True) # Will return 32 random elements\n\n# Create the feature_columns, which specifies the input to our model\n# All our input features are numeric, so use numeric_column for each one\nfeature_columns = [tf.feature_column.numeric_column(k) for k in feature_names]\n\n# Create a deep neural network regression classifier\n# Use the DNNClassifier pre-made estimator\nclassifier = tf.estimator.DNNClassifier(\n feature_columns=feature_columns, # The input features to our model\n hidden_units=[10, 10], # Two layers, each with 10 neurons\n n_classes=3,\n model_dir=PATH) # Path to where checkpoints etc are stored\n\n# Train our model, use the previously defined function my_input_fn\n# Input to training is a file with training example\n# Stop training after 8 iterations of train data (epochs)\nclassifier.train(\n input_fn=lambda: my_input_fn(FILE_TRAIN, True, 8))\n\n# Evaluate our model using the examples contained in FILE_TEST\n# Return value will contain evaluation_metrics such as: loss & average_loss\nevaluate_result = classifier.evaluate(\n input_fn=lambda: my_input_fn(FILE_TEST, False, 4))\nprint(\"Evaluation results\")\nfor key in evaluate_result:\n print(\" {}, was: {}\".format(key, evaluate_result[key]))\n\n# Predict the type of some Iris flowers.\n# Let's predict the examples in FILE_TEST, repeat only once.\npredict_results = classifier.predict(\n input_fn=lambda: my_input_fn(FILE_TEST, False, 1))\nprint(\"Predictions on test file\")\nfor prediction in predict_results:\n # Will print the predicted class, i.e: 0, 1, or 2 if the prediction\n # is Iris Sentosa, Vericolor, Virginica, respectively.\n print(prediction[\"class_ids\"][0])\n\n# Let create a dataset for prediction\n# We've taken the first 3 examples in FILE_TEST\nprediction_input = [[5.9, 3.0, 4.2, 1.5], # -> 1, Iris Versicolor\n [6.9, 3.1, 5.4, 2.1], # -> 2, Iris Virginica\n [5.1, 3.3, 1.7, 0.5]] # -> 0, Iris Sentosa\n\n\ndef new_input_fn():\n def decode(x):\n x = tf.split(x, 4) # Need to split into our 4 features\n return dict(zip(feature_names, x)) # To build a dict of them\n\n dataset = tf.data.Dataset.from_tensor_slices(prediction_input)\n dataset = dataset.map(decode)\n iterator = dataset.make_one_shot_iterator()\n next_feature_batch = iterator.get_next()\n return next_feature_batch, None # In prediction, we have no labels\n\n\n# Predict all our prediction_input\npredict_results = classifier.predict(input_fn=new_input_fn)\n\n# Print results\nprint(\"Predictions:\")\nfor idx, prediction in enumerate(predict_results):\n type = prediction[\"class_ids\"][0] # Get the predicted class (index)\n if type == 0:\n print(\" I think: {}, is Iris Sentosa\".format(prediction_input[idx]))\n elif type == 1:\n print(\" I think: {}, is Iris Versicolor\".format(prediction_input[idx]))\n else:\n print(\" I think: {}, is Iris Virginica\".format(prediction_input[idx]))\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for svtcn_loss.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom research.tcn.estimators import svtcn_loss\nimport tensorflow as tf\n\n\nclass SVTCNLoss(tf.test.TestCase):\n\n def testSVTCNLoss(self):\n with self.test_session():\n num_data = 64\n num_sequences = 2\n num_data_per_seq = num_data // num_sequences\n feat_dim = 6\n margin = 1.0\n times = np.tile(np.arange(num_data_per_seq, dtype=np.int32),\n num_sequences)\n times = np.reshape(times, [times.shape[0], 1])\n sequence_ids = np.concatenate(\n [np.ones(num_data_per_seq) * i for i in range(num_sequences)])\n sequence_ids = np.reshape(sequence_ids, [sequence_ids.shape[0], 1])\n\n pos_radius = 6\n neg_radius = 12\n\n embedding = np.random.rand(num_data, feat_dim).astype(np.float32)\n\n # Compute the loss in NP\n\n # Get a positive mask, i.e. indices for each time index\n # that are inside the positive range.\n in_pos_range = np.less_equal(\n np.abs(times - times.transpose()), pos_radius)\n\n # Get a negative mask, i.e. indices for each time index\n # that are inside the negative range (> t + (neg_mult * pos_radius)\n # and < t - (neg_mult * pos_radius).\n in_neg_range = np.greater(np.abs(times - times.transpose()), neg_radius)\n\n sequence_adjacency = sequence_ids == sequence_ids.T\n sequence_adjacency_not = np.logical_not(sequence_adjacency)\n\n pdist_matrix = euclidean_distances(embedding, squared=True)\n loss_np = 0.0\n num_positives = 0.0\n for i in range(num_data):\n for j in range(num_data):\n if in_pos_range[i, j] and i != j and sequence_adjacency[i, j]:\n num_positives += 1.0\n\n pos_distance = pdist_matrix[i][j]\n neg_distances = []\n\n for k in range(num_data):\n if in_neg_range[i, k] or sequence_adjacency_not[i, k]:\n neg_distances.append(pdist_matrix[i][k])\n\n neg_distances.sort() # sort by distance\n chosen_neg_distance = neg_distances[0]\n\n for l in range(len(neg_distances)):\n chosen_neg_distance = neg_distances[l]\n if chosen_neg_distance > pos_distance:\n break\n\n loss_np += np.maximum(\n 0.0, margin - chosen_neg_distance + pos_distance)\n\n loss_np /= num_positives\n\n # Compute the loss in TF\n loss_tf = svtcn_loss.singleview_tcn_loss(\n embeddings=tf.convert_to_tensor(embedding),\n timesteps=tf.convert_to_tensor(times),\n pos_radius=pos_radius,\n neg_radius=neg_radius,\n margin=margin,\n sequence_ids=tf.convert_to_tensor(sequence_ids),\n multiseq=True\n )\n loss_tf = loss_tf.eval()\n self.assertAllClose(loss_np, loss_tf)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Loads icp op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\nimport tensorflow as tf\n\ntry:\n icp_op_module = tf.load_op_library('./ops/icp_op.so')\n icp = icp_op_module.icp\nexcept Exception: # pylint: disable=broad-except\n logging.error('Could not load object file for ICP op.')\n icp = None\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Transforms used in the Augmentation Policies.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport numpy as np\n# pylint:disable=g-multiple-import\nfrom PIL import ImageOps, ImageEnhance, ImageFilter, Image\n\n# pylint:enable=g-multiple-import\n\n\nIMAGE_SIZE = 32\n# What is the dataset mean and std of the images on the training set\nMEANS = [0.49139968, 0.48215841, 0.44653091]\nSTDS = [0.24703223, 0.24348513, 0.26158784]\nPARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted\n\n\ndef random_flip(x):\n \"\"\"Flip the input x horizontally with 50% probability.\"\"\"\n if np.random.rand(1)[0] > 0.5:\n return np.fliplr(x)\n return x\n\n\ndef zero_pad_and_crop(img, amount=4):\n \"\"\"Zero pad by `amount` zero pixels on each side then take a random crop.\n\n Args:\n img: numpy image that will be zero padded and cropped.\n amount: amount of zeros to pad `img` with horizontally and verically.\n\n Returns:\n The cropped zero padded img. The returned numpy array will be of the same\n shape as `img`.\n \"\"\"\n padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,\n img.shape[2]))\n padded_img[amount:img.shape[0] + amount, amount:\n img.shape[1] + amount, :] = img\n top = np.random.randint(low=0, high=2 * amount)\n left = np.random.randint(low=0, high=2 * amount)\n new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]\n return new_img\n\n\ndef create_cutout_mask(img_height, img_width, num_channels, size):\n \"\"\"Creates a zero mask used for cutout of shape `img_height` x `img_width`.\n\n Args:\n img_height: Height of image cutout mask will be applied to.\n img_width: Width of image cutout mask will be applied to.\n num_channels: Number of channels in the image.\n size: Size of the zeros mask.\n\n Returns:\n A mask of shape `img_height` x `img_width` with all ones except for a\n square of zeros of shape `size` x `size`. This mask is meant to be\n elementwise multiplied with the original image. Additionally returns\n the `upper_coord` and `lower_coord` which specify where the cutout mask\n will be applied.\n \"\"\"\n assert img_height == img_width\n\n # Sample center where cutout mask will be applied\n height_loc = np.random.randint(low=0, high=img_height)\n width_loc = np.random.randint(low=0, high=img_width)\n\n # Determine upper right and lower left corners of patch\n upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))\n lower_coord = (min(img_height, height_loc + size // 2),\n min(img_width, width_loc + size // 2))\n mask_height = lower_coord[0] - upper_coord[0]\n mask_width = lower_coord[1] - upper_coord[1]\n assert mask_height > 0\n assert mask_width > 0\n\n mask = np.ones((img_height, img_width, num_channels))\n zeros = np.zeros((mask_height, mask_width, num_channels))\n mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (\n zeros)\n return mask, upper_coord, lower_coord\n\n\ndef cutout_numpy(img, size=16):\n \"\"\"Apply cutout with mask of shape `size` x `size` to `img`.\n\n The cutout operation is from the paper https://arxiv.org/abs/1708.04552.\n This operation applies a `size`x`size` mask of zeros to a random location\n within `img`.\n\n Args:\n img: Numpy image that cutout will be applied to.\n size: Height/width of the cutout mask that will be\n\n Returns:\n A numpy tensor that is the result of applying the cutout mask to `img`.\n \"\"\"\n img_height, img_width, num_channels = (img.shape[0], img.shape[1],\n img.shape[2])\n assert len(img.shape) == 3\n mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size)\n return img * mask\n\n\ndef float_parameter(level, maxval):\n \"\"\"Helper function to scale `val` between 0 and maxval .\n\n Args:\n level: Level of the operation that will be between [0, `PARAMETER_MAX`].\n maxval: Maximum value that the operation can have. This will be scaled\n to level/PARAMETER_MAX.\n\n Returns:\n A float that results from scaling `maxval` according to `level`.\n \"\"\"\n return float(level) * maxval / PARAMETER_MAX\n\n\ndef int_parameter(level, maxval):\n \"\"\"Helper function to scale `val` between 0 and maxval .\n\n Args:\n level: Level of the operation that will be between [0, `PARAMETER_MAX`].\n maxval: Maximum value that the operation can have. This will be scaled\n to level/PARAMETER_MAX.\n\n Returns:\n An int that results from scaling `maxval` according to `level`.\n \"\"\"\n return int(level * maxval / PARAMETER_MAX)\n\n\ndef pil_wrap(img):\n \"\"\"Convert the `img` numpy tensor to a PIL Image.\"\"\"\n return Image.fromarray(\n np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA')\n\n\ndef pil_unwrap(pil_img):\n \"\"\"Converts the PIL img to a numpy array.\"\"\"\n pic_array = (np.array(pil_img.getdata()).reshape((32, 32, 4)) / 255.0)\n i1, i2 = np.where(pic_array[:, :, 3] == 0)\n pic_array = (pic_array[:, :, :3] - MEANS) / STDS\n pic_array[i1, i2] = [0, 0, 0]\n return pic_array\n\n\ndef apply_policy(policy, img):\n \"\"\"Apply the `policy` to the numpy `img`.\n\n Args:\n policy: A list of tuples with the form (name, probability, level) where\n `name` is the name of the augmentation operation to apply, `probability`\n is the probability of applying the operation and `level` is what strength\n the operation to apply.\n img: Numpy image that will have `policy` applied to it.\n\n Returns:\n The result of applying `policy` to `img`.\n \"\"\"\n pil_img = pil_wrap(img)\n for xform in policy:\n assert len(xform) == 3\n name, probability, level = xform\n xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(probability, level)\n pil_img = xform_fn(pil_img)\n return pil_unwrap(pil_img)\n\n\nclass TransformFunction(object):\n \"\"\"Wraps the Transform function for pretty printing options.\"\"\"\n\n def __init__(self, func, name):\n self.f = func\n self.name = name\n\n def __repr__(self):\n return '<' + self.name + '>'\n\n def __call__(self, pil_img):\n return self.f(pil_img)\n\n\nclass TransformT(object):\n \"\"\"Each instance of this class represents a specific transform.\"\"\"\n\n def __init__(self, name, xform_fn):\n self.name = name\n self.xform = xform_fn\n\n def pil_transformer(self, probability, level):\n def return_function(im):\n if random.random() < probability:\n im = self.xform(im, level)\n return im\n\n name = self.name + '({:.1f},{})'.format(probability, level)\n return TransformFunction(return_function, name)\n\n def do_transform(self, image, level):\n f = self.pil_transformer(PARAMETER_MAX, level)\n return pil_unwrap(f(pil_wrap(image)))\n\n\n################## Transform Functions ##################\nidentity = TransformT('identity', lambda pil_img, level: pil_img)\nflip_lr = TransformT(\n 'FlipLR',\n lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT))\nflip_ud = TransformT(\n 'FlipUD',\n lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM))\n# pylint:disable=g-long-lambda\nauto_contrast = TransformT(\n 'AutoContrast',\n lambda pil_img, level: ImageOps.autocontrast(\n pil_img.convert('RGB')).convert('RGBA'))\nequalize = TransformT(\n 'Equalize',\n lambda pil_img, level: ImageOps.equalize(\n pil_img.convert('RGB')).convert('RGBA'))\ninvert = TransformT(\n 'Invert',\n lambda pil_img, level: ImageOps.invert(\n pil_img.convert('RGB')).convert('RGBA'))\n# pylint:enable=g-long-lambda\nblur = TransformT(\n 'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR))\nsmooth = TransformT(\n 'Smooth',\n lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH))\n\n\ndef _rotate_impl(pil_img, level):\n \"\"\"Rotates `pil_img` from -30 to 30 degrees depending on `level`.\"\"\"\n degrees = int_parameter(level, 30)\n if random.random() > 0.5:\n degrees = -degrees\n return pil_img.rotate(degrees)\n\n\nrotate = TransformT('Rotate', _rotate_impl)\n\n\ndef _posterize_impl(pil_img, level):\n \"\"\"Applies PIL Posterize to `pil_img`.\"\"\"\n level = int_parameter(level, 4)\n return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA')\n\n\nposterize = TransformT('Posterize', _posterize_impl)\n\n\ndef _shear_x_impl(pil_img, level):\n \"\"\"Applies PIL ShearX to `pil_img`.\n\n The ShearX operation shears the image along the horizontal axis with `level`\n magnitude.\n\n Args:\n pil_img: Image in PIL object.\n level: Strength of the operation specified as an Integer from\n [0, `PARAMETER_MAX`].\n\n Returns:\n A PIL Image that has had ShearX applied to it.\n \"\"\"\n level = float_parameter(level, 0.3)\n if random.random() > 0.5:\n level = -level\n return pil_img.transform((32, 32), Image.AFFINE, (1, level, 0, 0, 1, 0))\n\n\nshear_x = TransformT('ShearX', _shear_x_impl)\n\n\ndef _shear_y_impl(pil_img, level):\n \"\"\"Applies PIL ShearY to `pil_img`.\n\n The ShearY operation shears the image along the vertical axis with `level`\n magnitude.\n\n Args:\n pil_img: Image in PIL object.\n level: Strength of the operation specified as an Integer from\n [0, `PARAMETER_MAX`].\n\n Returns:\n A PIL Image that has had ShearX applied to it.\n \"\"\"\n level = float_parameter(level, 0.3)\n if random.random() > 0.5:\n level = -level\n return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, level, 1, 0))\n\n\nshear_y = TransformT('ShearY', _shear_y_impl)\n\n\ndef _translate_x_impl(pil_img, level):\n \"\"\"Applies PIL TranslateX to `pil_img`.\n\n Translate the image in the horizontal direction by `level`\n number of pixels.\n\n Args:\n pil_img: Image in PIL object.\n level: Strength of the operation specified as an Integer from\n [0, `PARAMETER_MAX`].\n\n Returns:\n A PIL Image that has had TranslateX applied to it.\n \"\"\"\n level = int_parameter(level, 10)\n if random.random() > 0.5:\n level = -level\n return pil_img.transform((32, 32), Image.AFFINE, (1, 0, level, 0, 1, 0))\n\n\ntranslate_x = TransformT('TranslateX', _translate_x_impl)\n\n\ndef _translate_y_impl(pil_img, level):\n \"\"\"Applies PIL TranslateY to `pil_img`.\n\n Translate the image in the vertical direction by `level`\n number of pixels.\n\n Args:\n pil_img: Image in PIL object.\n level: Strength of the operation specified as an Integer from\n [0, `PARAMETER_MAX`].\n\n Returns:\n A PIL Image that has had TranslateY applied to it.\n \"\"\"\n level = int_parameter(level, 10)\n if random.random() > 0.5:\n level = -level\n return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, 0, 1, level))\n\n\ntranslate_y = TransformT('TranslateY', _translate_y_impl)\n\n\ndef _crop_impl(pil_img, level, interpolation=Image.BILINEAR):\n \"\"\"Applies a crop to `pil_img` with the size depending on the `level`.\"\"\"\n cropped = pil_img.crop((level, level, IMAGE_SIZE - level, IMAGE_SIZE - level))\n resized = cropped.resize((IMAGE_SIZE, IMAGE_SIZE), interpolation)\n return resized\n\n\ncrop_bilinear = TransformT('CropBilinear', _crop_impl)\n\n\ndef _solarize_impl(pil_img, level):\n \"\"\"Applies PIL Solarize to `pil_img`.\n\n Translate the image in the vertical direction by `level`\n number of pixels.\n\n Args:\n pil_img: Image in PIL object.\n level: Strength of the operation specified as an Integer from\n [0, `PARAMETER_MAX`].\n\n Returns:\n A PIL Image that has had Solarize applied to it.\n \"\"\"\n level = int_parameter(level, 256)\n return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')\n\n\nsolarize = TransformT('Solarize', _solarize_impl)\n\n\ndef _cutout_pil_impl(pil_img, level):\n \"\"\"Apply cutout to pil_img at the specified level.\"\"\"\n size = int_parameter(level, 20)\n if size <= 0:\n return pil_img\n img_height, img_width, num_channels = (32, 32, 3)\n _, upper_coord, lower_coord = (\n create_cutout_mask(img_height, img_width, num_channels, size))\n pixels = pil_img.load() # create the pixel map\n for i in range(upper_coord[0], lower_coord[0]): # for every col:\n for j in range(upper_coord[1], lower_coord[1]): # For every row\n pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly\n return pil_img\n\n\ncutout = TransformT('Cutout', _cutout_pil_impl)\n\n\ndef _enhancer_impl(enhancer):\n \"\"\"Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL.\"\"\"\n\n def impl(pil_img, level):\n v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it\n return enhancer(pil_img).enhance(v)\n\n return impl\n\n\ncolor = TransformT('Color', _enhancer_impl(ImageEnhance.Color))\ncontrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))\nbrightness = TransformT('Brightness', _enhancer_impl(\n ImageEnhance.Brightness))\nsharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))\n\nALL_TRANSFORMS = [\n flip_lr,\n flip_ud,\n auto_contrast,\n equalize,\n invert,\n rotate,\n posterize,\n crop_bilinear,\n solarize,\n color,\n contrast,\n brightness,\n sharpness,\n shear_x,\n shear_y,\n translate_x,\n translate_y,\n cutout,\n blur,\n smooth\n]\n\nNAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS}\nTRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A Multitask Gaussian process.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import flags\nfrom absl import logging\n\nimport numpy as np\nimport tensorflow as tf\nfrom research.deep_contextual_bandits.bandits.core.bayesian_nn import BayesianNN\n\nFLAGS = flags.FLAGS\ntfd = tf.contrib.distributions\n\nclass MultitaskGP(BayesianNN):\n \"\"\"Implements a Gaussian process with multi-task outputs.\n\n Optimizes the hyperparameters over the log marginal likelihood.\n Uses a Matern 3/2 + linear covariance and returns\n sampled predictions for test inputs. The outputs are optionally\n correlated where the correlation structure is learned through latent\n embeddings of the tasks.\n \"\"\"\n\n def __init__(self, hparams):\n self.name = \"MultiTaskGP\"\n self.hparams = hparams\n\n self.n_in = self.hparams.context_dim\n self.n_out = self.hparams.num_outputs\n self.keep_fixed_after_max_obs = self.hparams.keep_fixed_after_max_obs\n\n self._show_training = self.hparams.show_training\n self._freq_summary = self.hparams.freq_summary\n\n # Dimensionality of the latent task vectors\n self.task_latent_dim = self.hparams.task_latent_dim\n\n # Maximum number of observations to include\n self.max_num_points = self.hparams.max_num_points\n\n if self.hparams.learn_embeddings:\n self.learn_embeddings = self.hparams.learn_embeddings\n else:\n self.learn_embeddings = False\n\n # create the graph corresponding to the BNN instance\n self.graph = tf.Graph()\n with self.graph.as_default():\n # store a new session for the graph\n self.sess = tf.Session()\n\n with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):\n self.n = tf.placeholder(shape=[], dtype=tf.float64)\n self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float64)\n self.x_in = tf.placeholder(shape=[None, self.n_in], dtype=tf.float64)\n self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float64)\n self.weights = tf.placeholder(shape=[None, self.n_out],\n dtype=tf.float64)\n\n self.build_model()\n self.sess.run(tf.global_variables_initializer())\n\n def atleast_2d(self, x, dims):\n return tf.reshape(tf.expand_dims(x, axis=0), (-1, dims))\n\n def sq_dist(self, x, x2):\n a2 = tf.reduce_sum(tf.square(x), 1)\n b2 = tf.reduce_sum(tf.square(x2), 1)\n sqdists = tf.expand_dims(a2, 1) + b2 - 2.0 * tf.matmul(x, tf.transpose(x2))\n return sqdists\n\n # Covariance between outputs\n def task_cov(self, x, x2):\n \"\"\"Squared Exponential Covariance Kernel over latent task embeddings.\"\"\"\n # Index into latent task vectors\n x_vecs = tf.gather(self.task_vectors, tf.argmax(x, axis=1), axis=0)\n x2_vecs = tf.gather(self.task_vectors, tf.argmax(x2, axis=1), axis=0)\n r = self.sq_dist(self.atleast_2d(x_vecs, self.task_latent_dim),\n self.atleast_2d(x2_vecs, self.task_latent_dim))\n return tf.exp(-r)\n\n def cov(self, x, x2):\n \"\"\"Matern 3/2 + Linear Gaussian Process Covariance Function.\"\"\"\n ls = tf.clip_by_value(self.length_scales, -5.0, 5.0)\n ls_lin = tf.clip_by_value(self.length_scales_lin, -5.0, 5.0)\n r = self.sq_dist(self.atleast_2d(x, self.n_in)/tf.nn.softplus(ls),\n self.atleast_2d(x2, self.n_in)/tf.nn.softplus(ls))\n r = tf.clip_by_value(r, 0, 1e8)\n\n # Matern 3/2 Covariance\n matern = (1.0 + tf.sqrt(3.0*r + 1e-16)) * tf.exp(-tf.sqrt(3.0*r + 1e-16))\n # Linear Covariance\n lin = tf.matmul(x / tf.nn.softplus(ls_lin),\n x2 / tf.nn.softplus(ls_lin), transpose_b=True)\n return (tf.nn.softplus(self.amplitude) * matern +\n tf.nn.softplus(self.amplitude_linear) * lin)\n\n def build_model(self):\n \"\"\"Defines the GP model.\n\n The loss is computed for partial feedback settings (bandits), so only\n the observed outcome is backpropagated (see weighted loss).\n Selects the optimizer and, finally, it also initializes the graph.\n \"\"\"\n\n logging.info(\"Initializing model %s.\", self.name)\n self.global_step = tf.train.get_or_create_global_step()\n\n # Define state for the model (inputs, etc.)\n self.x_train = tf.get_variable(\n \"training_data\",\n initializer=tf.ones(\n [self.hparams.batch_size, self.n_in], dtype=tf.float64),\n validate_shape=False,\n trainable=False)\n self.y_train = tf.get_variable(\n \"training_labels\",\n initializer=tf.zeros([self.hparams.batch_size, 1], dtype=tf.float64),\n validate_shape=False,\n trainable=False)\n self.weights_train = tf.get_variable(\n \"weights_train\",\n initializer=tf.ones(\n [self.hparams.batch_size, self.n_out], dtype=tf.float64),\n validate_shape=False,\n trainable=False)\n self.input_op = tf.assign(self.x_train, self.x_in, validate_shape=False)\n self.input_w_op = tf.assign(\n self.weights_train, self.weights, validate_shape=False)\n\n self.input_std = tf.get_variable(\n \"data_standard_deviation\",\n initializer=tf.ones([1, self.n_out], dtype=tf.float64),\n dtype=tf.float64,\n trainable=False)\n self.input_mean = tf.get_variable(\n \"data_mean\",\n initializer=tf.zeros([1, self.n_out], dtype=tf.float64),\n dtype=tf.float64,\n trainable=True)\n\n # GP Hyperparameters\n self.noise = tf.get_variable(\n \"noise\", initializer=tf.cast(0.0, dtype=tf.float64))\n self.amplitude = tf.get_variable(\n \"amplitude\", initializer=tf.cast(1.0, dtype=tf.float64))\n self.amplitude_linear = tf.get_variable(\n \"linear_amplitude\", initializer=tf.cast(1.0, dtype=tf.float64))\n self.length_scales = tf.get_variable(\n \"length_scales\", initializer=tf.zeros([1, self.n_in], dtype=tf.float64))\n self.length_scales_lin = tf.get_variable(\n \"length_scales_linear\",\n initializer=tf.zeros([1, self.n_in], dtype=tf.float64))\n\n # Latent embeddings of the different outputs for task covariance\n self.task_vectors = tf.get_variable(\n \"latent_task_vectors\",\n initializer=tf.random_normal(\n [self.n_out, self.task_latent_dim], dtype=tf.float64))\n\n # Normalize outputs across each dimension\n # Since we have different numbers of observations across each task, we\n # normalize by their respective counts.\n index_counts = self.atleast_2d(tf.reduce_sum(self.weights, axis=0),\n self.n_out)\n index_counts = tf.where(index_counts > 0, index_counts,\n tf.ones(tf.shape(index_counts), dtype=tf.float64))\n self.mean_op = tf.assign(self.input_mean,\n tf.reduce_sum(self.y, axis=0) / index_counts)\n self.var_op = tf.assign(\n self.input_std, tf.sqrt(1e-4 + tf.reduce_sum(tf.square(\n self.y - tf.reduce_sum(self.y, axis=0) / index_counts), axis=0)\n / index_counts))\n\n with tf.control_dependencies([self.var_op]):\n y_normed = self.atleast_2d(\n (self.y - self.input_mean) / self.input_std, self.n_out)\n y_normed = self.atleast_2d(tf.boolean_mask(y_normed, self.weights > 0), 1)\n self.out_op = tf.assign(self.y_train, y_normed, validate_shape=False)\n\n # Observation noise\n alpha = tf.nn.softplus(self.noise) + 1e-6\n\n # Covariance\n with tf.control_dependencies([self.input_op, self.input_w_op, self.out_op]):\n self.self_cov = (self.cov(self.x_in, self.x_in) *\n self.task_cov(self.weights, self.weights) +\n tf.eye(tf.shape(self.x_in)[0], dtype=tf.float64) * alpha)\n\n self.chol = tf.cholesky(self.self_cov)\n self.kinv = tf.cholesky_solve(self.chol, tf.eye(tf.shape(self.x_in)[0],\n dtype=tf.float64))\n\n self.input_inv = tf.Variable(\n tf.eye(self.hparams.batch_size, dtype=tf.float64),\n validate_shape=False,\n trainable=False)\n self.input_cov_op = tf.assign(self.input_inv, self.kinv,\n validate_shape=False)\n\n # Log determinant by taking the singular values along the diagonal\n # of self.chol\n with tf.control_dependencies([self.input_cov_op]):\n logdet = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(self.chol) + 1e-16))\n\n # Log Marginal likelihood\n self.marginal_ll = -tf.reduce_sum(-0.5 * tf.matmul(\n tf.transpose(y_normed), tf.matmul(self.kinv, y_normed)) - 0.5 * logdet -\n 0.5 * self.n * np.log(2 * np.pi))\n\n zero = tf.cast(0., dtype=tf.float64)\n one = tf.cast(1., dtype=tf.float64)\n standard_normal = tfd.Normal(loc=zero, scale=one)\n\n # Loss is marginal likelihood and priors\n self.loss = tf.reduce_sum(\n self.marginal_ll -\n (standard_normal.log_prob(self.amplitude) +\n standard_normal.log_prob(tf.exp(self.noise)) +\n standard_normal.log_prob(self.amplitude_linear) +\n tfd.Normal(loc=zero, scale=one * 10.).log_prob(\n self.task_vectors))\n )\n\n # Optimizer for hyperparameters\n optimizer = tf.train.AdamOptimizer(learning_rate=self.hparams.lr)\n vars_to_optimize = [\n self.amplitude, self.length_scales, self.length_scales_lin,\n self.amplitude_linear, self.noise, self.input_mean\n ]\n\n if self.learn_embeddings:\n vars_to_optimize.append(self.task_vectors)\n grads = optimizer.compute_gradients(self.loss, vars_to_optimize)\n self.train_op = optimizer.apply_gradients(grads,\n global_step=self.global_step)\n\n # Predictions for test data\n self.y_mean, self.y_pred = self.posterior_mean_and_sample(self.x)\n\n # create tensorboard metrics\n self.create_summaries()\n self.summary_writer = tf.summary.FileWriter(\"{}/graph_{}\".format(\n FLAGS.logdir, self.name), self.sess.graph)\n self.check = tf.add_check_numerics_ops()\n\n def posterior_mean_and_sample(self, candidates):\n \"\"\"Draw samples for test predictions.\n\n Given a Tensor of 'candidates' inputs, returns samples from the posterior\n and the posterior mean prediction for those inputs.\n\n Args:\n candidates: A (num-examples x num-dims) Tensor containing the inputs for\n which to return predictions.\n Returns:\n y_mean: The posterior mean prediction given these inputs\n y_sample: A sample from the posterior of the outputs given these inputs\n \"\"\"\n # Cross-covariance for test predictions\n w = tf.identity(self.weights_train)\n inds = tf.squeeze(\n tf.reshape(\n tf.tile(\n tf.reshape(tf.range(self.n_out), (self.n_out, 1)),\n (1, tf.shape(candidates)[0])), (-1, 1)))\n\n cross_cov = self.cov(tf.tile(candidates, [self.n_out, 1]), self.x_train)\n cross_task_cov = self.task_cov(tf.one_hot(inds, self.n_out), w)\n cross_cov *= cross_task_cov\n\n # Test mean prediction\n y_mean = tf.matmul(cross_cov, tf.matmul(self.input_inv, self.y_train))\n\n # Test sample predictions\n # Note this can be done much more efficiently using Kronecker products\n # if all tasks are fully observed (which we won't assume)\n test_cov = (\n self.cov(tf.tile(candidates, [self.n_out, 1]),\n tf.tile(candidates, [self.n_out, 1])) *\n self.task_cov(tf.one_hot(inds, self.n_out),\n tf.one_hot(inds, self.n_out)) -\n tf.matmul(cross_cov,\n tf.matmul(self.input_inv,\n tf.transpose(cross_cov))))\n\n # Get the matrix square root through an SVD for drawing samples\n # This seems more numerically stable than the Cholesky\n s, _, v = tf.svd(test_cov, full_matrices=True)\n test_sqrt = tf.matmul(v, tf.matmul(tf.diag(s), tf.transpose(v)))\n\n y_sample = (\n tf.matmul(\n test_sqrt,\n tf.random_normal([tf.shape(test_sqrt)[0], 1], dtype=tf.float64)) +\n y_mean)\n\n y_sample = (\n tf.transpose(tf.reshape(y_sample,\n (self.n_out, -1))) * self.input_std +\n self.input_mean)\n\n return y_mean, y_sample\n\n def create_summaries(self):\n with self.graph.as_default():\n tf.summary.scalar(\"loss\", self.loss)\n tf.summary.scalar(\"log_noise\", self.noise)\n tf.summary.scalar(\"log_amp\", self.amplitude)\n tf.summary.scalar(\"log_amp_lin\", self.amplitude_linear)\n tf.summary.histogram(\"length_scales\", self.length_scales)\n tf.summary.histogram(\"length_scales_lin\", self.length_scales_lin)\n self.summary_op = tf.summary.merge_all()\n\n def train(self, data, num_steps):\n \"\"\"Trains the GP for num_steps, using the data in 'data'.\n\n Args:\n data: ContextualDataset object that provides the data.\n num_steps: Number of minibatches to train the network for.\n \"\"\"\n\n logging.info(\"Training %s for %d steps...\", self.name, num_steps)\n for step in range(num_steps):\n numpts = min(data.num_points(None), self.max_num_points)\n if numpts >= self.max_num_points and self.keep_fixed_after_max_obs:\n x = data.contexts[:numpts, :]\n y = data.rewards[:numpts, :]\n weights = np.zeros((x.shape[0], self.n_out))\n for i, val in enumerate(data.actions[:numpts]):\n weights[i, val] = 1.0\n else:\n x, y, weights = data.get_batch_with_weights(numpts)\n\n ops = [\n self.global_step, self.summary_op, self.loss, self.noise,\n self.amplitude, self.amplitude_linear, self.length_scales,\n self.length_scales_lin, self.input_cov_op, self.input_op, self.var_op,\n self.input_w_op, self.out_op, self.train_op\n ]\n\n res = self.sess.run(ops,\n feed_dict={self.x: x,\n self.x_in: x,\n self.y: y,\n self.weights: weights,\n self.n: numpts,\n })\n\n if step % self._freq_summary == 0:\n if self._show_training:\n logging.info(\"step: %d, loss: %g noise: %f amp: %f amp_lin: %f\",\n step, res[2], res[3], res[4], res[5])\n summary = res[1]\n global_step = res[0]\n self.summary_writer.add_summary(summary, global_step=global_step)\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"General utility functions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport six\nfrom research.tcn.utils.luatables import T\nimport tensorflow as tf\nimport yaml\nfrom yaml.constructor import ConstructorError\n\n\n# pylint: disable=invalid-name\n\n\ndef GetFilesRecursively(topdir):\n \"\"\"Gets all records recursively for some topdir.\n\n Args:\n topdir: String, path to top directory.\n Returns:\n allpaths: List of Strings, full paths to all leaf records.\n Raises:\n ValueError: If there are no files found for this directory.\n \"\"\"\n assert topdir\n topdir = os.path.expanduser(topdir)\n allpaths = []\n for path, _, leaffiles in tf.gfile.Walk(topdir):\n if leaffiles:\n allpaths.extend([os.path.join(path, i) for i in leaffiles])\n if not allpaths:\n raise ValueError('No files found for top directory %s' % topdir)\n return allpaths\n\n\ndef NoDuplicatesConstructor(loader, node, deep=False):\n \"\"\"Check for duplicate keys.\"\"\"\n mapping = {}\n for key_node, value_node in node.value:\n key = loader.construct_object(key_node, deep=deep)\n value = loader.construct_object(value_node, deep=deep)\n if key in mapping:\n raise ConstructorError('while constructing a mapping', node.start_mark,\n 'found duplicate key (%s)' % key,\n key_node.start_mark)\n mapping[key] = value\n return loader.construct_mapping(node, deep)\n\n\ndef WriteConfigAsYaml(config, logdir, filename):\n \"\"\"Writes a config dict as yaml to logdir/experiment.yml.\"\"\"\n if not tf.gfile.Exists(logdir):\n tf.gfile.MakeDirs(logdir)\n config_filename = os.path.join(logdir, filename)\n with tf.gfile.GFile(config_filename, 'w') as f:\n f.write(yaml.dump(config))\n tf.logging.info('wrote config to %s', config_filename)\n\n\ndef LoadConfigDict(config_paths, model_params):\n \"\"\"Loads config dictionary from specified yaml files or command line yaml.\"\"\"\n\n # Ensure that no duplicate keys can be loaded (causing pain).\n yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n NoDuplicatesConstructor)\n\n # Handle either ',' or '#' separated config lists, since borg will only\n # accept '#'.\n sep = ',' if ',' in config_paths else '#'\n\n # Load flags from config file.\n final_config = {}\n if config_paths:\n for config_path in config_paths.split(sep):\n config_path = config_path.strip()\n if not config_path:\n continue\n config_path = os.path.abspath(config_path)\n tf.logging.info('Loading config from %s', config_path)\n with tf.gfile.GFile(config_path.strip()) as config_file:\n config_flags = yaml.load(config_file)\n final_config = DeepMergeDict(final_config, config_flags)\n if model_params:\n model_params = MaybeLoadYaml(model_params)\n final_config = DeepMergeDict(final_config, model_params)\n tf.logging.info('Final Config:\\n%s', yaml.dump(final_config))\n return final_config\n\n\ndef MaybeLoadYaml(item):\n \"\"\"Parses item if it's a string. If it's a dictionary it's returned as-is.\"\"\"\n if isinstance(item, six.string_types):\n return yaml.load(item)\n elif isinstance(item, dict):\n return item\n else:\n raise ValueError('Got {}, expected YAML string or dict', type(item))\n\n\ndef DeepMergeDict(dict_x, dict_y, path=None):\n \"\"\"Recursively merges dict_y into dict_x.\"\"\"\n if path is None: path = []\n for key in dict_y:\n if key in dict_x:\n if isinstance(dict_x[key], dict) and isinstance(dict_y[key], dict):\n DeepMergeDict(dict_x[key], dict_y[key], path + [str(key)])\n elif dict_x[key] == dict_y[key]:\n pass # same leaf value\n else:\n dict_x[key] = dict_y[key]\n else:\n dict_x[key] = dict_y[key]\n return dict_x\n\n\ndef ParseConfigsToLuaTable(config_paths, extra_model_params=None,\n save=False, save_name='final_training_config.yml',\n logdir=None):\n \"\"\"Maps config_paths and extra_model_params to a Luatable-like object.\"\"\"\n # Parse config dict from yaml config files / command line flags.\n config = LoadConfigDict(config_paths, extra_model_params)\n if save:\n WriteConfigAsYaml(config, logdir, save_name)\n # Convert config dictionary to T object with dot notation.\n config = RecursivelyConvertToLuatable(config)\n return config\n\n\ndef SetNestedValue(d, keys, value):\n \"\"\"Sets a value in a nested dictionary.\n\n Example:\n d = {}, keys = ['data','augmentation','minscale'], value = 1.0.\n returns {'data': {'augmentation' : {'minscale': 1.0 }}}\n\n Args:\n d: A dictionary to set a nested value in.\n keys: list of dict keys nesting left to right.\n value: the nested value to set.\n Returns:\n None\n \"\"\"\n for key in keys[:-1]:\n d = d.setdefault(key, {})\n d[keys[-1]] = value\n\n\ndef RecursivelyConvertToLuatable(yaml_dict):\n \"\"\"Converts a dictionary to a LuaTable-like T object.\"\"\"\n if isinstance(yaml_dict, dict):\n yaml_dict = T(yaml_dict)\n for key, item in yaml_dict.iteritems():\n if isinstance(item, dict):\n yaml_dict[key] = RecursivelyConvertToLuatable(item)\n return yaml_dict\n\n\ndef KNNIds(query_vec, target_seq, k=1):\n \"\"\"Gets the knn ids to the query vec from the target sequence.\"\"\"\n sorted_distances = KNNIdsWithDistances(query_vec, target_seq, k)\n return [i[0] for i in sorted_distances]\n\n\ndef KNNIdsWithDistances(query_vec, target_seq, k=1):\n \"\"\"Gets the knn ids to the query vec from the target sequence.\"\"\"\n if not isinstance(np.array(target_seq), np.ndarray):\n target_seq = np.array(target_seq)\n assert np.shape(query_vec) == np.shape(target_seq[0])\n distances = [(i, np.linalg.norm(query_vec - target_vec)) for (\n i, target_vec) in enumerate(target_seq)]\n sorted_distances = sorted(distances, key=lambda x: x[1])\n return sorted_distances[:k]\n\n\ndef CopyLocalConfigsToCNS(outdir, configs, gfs_user):\n \"\"\"Copies experiment yaml config files to the job_logdir on /cns.\"\"\"\n assert configs\n assert outdir\n conf_files = configs.split(',')\n for conf_file in conf_files:\n copy_command = 'fileutil --gfs_user %s cp -f %s %s' % (\n gfs_user, conf_file, outdir)\n tf.logging.info(copy_command)\n os.system(copy_command)\n\n\ndef pairwise_distances(feature, squared=True):\n \"\"\"Computes the pairwise distance matrix in numpy.\n\n Args:\n feature: 2-D numpy array of size [number of data, feature dimension]\n squared: Boolean. If true, output is the pairwise squared euclidean\n distance matrix; else, output is the pairwise euclidean distance matrix.\n\n Returns:\n pdists: 2-D numpy array of size\n [number of data, number of data].\n \"\"\"\n triu = np.triu_indices(feature.shape[0], 1)\n upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)\n if squared:\n upper_tri_pdists **= 2.\n num_data = feature.shape[0]\n pdists = np.zeros((num_data, num_data))\n pdists[np.triu_indices(num_data, 1)] = upper_tri_pdists\n # Make symmetrical.\n pdists = pdists + pdists.T - np.diag(\n pdists.diagonal())\n return pdists\n\n\ndef is_tfrecord_input(inp):\n \"\"\"Checks if input is a TFRecord or list of TFRecords.\"\"\"\n\n def _is_tfrecord(inp):\n if not isinstance(inp, str):\n return False\n _, extension = os.path.splitext(inp)\n return extension == '.tfrecord'\n\n if isinstance(inp, str):\n return _is_tfrecord(inp)\n if isinstance(inp, list):\n return all(map(_is_tfrecord, inp))\n return False\n\n\ndef is_np_array(inp):\n if isinstance(inp, np.ndarray):\n return True\n if isinstance(inp, list):\n return all([isinstance(i, np.ndarray) for i in inp])\n return False\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Tests for MobileNet v1.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom research.slim.nets import mobilenet_v1\n\nslim = tf.contrib.slim\n\n\nclass MobilenetV1Test(tf.test.TestCase):\n\n def testBuildClassificationNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith(\n 'MobilenetV1/Logits/SpatialSqueeze'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue('Predictions' in end_points)\n self.assertListEqual(end_points['Predictions'].get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildPreLogitsNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = None\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])\n self.assertFalse('Logits' in end_points)\n self.assertFalse('Predictions' in end_points)\n\n def testBuildBaseNetwork(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)\n self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))\n self.assertListEqual(net.get_shape().as_list(),\n [batch_size, 7, 7, 1024])\n expected_endpoints = ['Conv2d_0',\n 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',\n 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_depthwise', 'Conv2d_3_pointwise',\n 'Conv2d_4_depthwise', 'Conv2d_4_pointwise',\n 'Conv2d_5_depthwise', 'Conv2d_5_pointwise',\n 'Conv2d_6_depthwise', 'Conv2d_6_pointwise',\n 'Conv2d_7_depthwise', 'Conv2d_7_pointwise',\n 'Conv2d_8_depthwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_depthwise', 'Conv2d_9_pointwise',\n 'Conv2d_10_depthwise', 'Conv2d_10_pointwise',\n 'Conv2d_11_depthwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_depthwise', 'Conv2d_12_pointwise',\n 'Conv2d_13_depthwise', 'Conv2d_13_pointwise']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n\n def testBuildOnlyUptoFinalEndpoint(self):\n batch_size = 5\n height, width = 224, 224\n endpoints = ['Conv2d_0',\n 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',\n 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_depthwise', 'Conv2d_3_pointwise',\n 'Conv2d_4_depthwise', 'Conv2d_4_pointwise',\n 'Conv2d_5_depthwise', 'Conv2d_5_pointwise',\n 'Conv2d_6_depthwise', 'Conv2d_6_pointwise',\n 'Conv2d_7_depthwise', 'Conv2d_7_pointwise',\n 'Conv2d_8_depthwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_depthwise', 'Conv2d_9_pointwise',\n 'Conv2d_10_depthwise', 'Conv2d_10_pointwise',\n 'Conv2d_11_depthwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_depthwise', 'Conv2d_12_pointwise',\n 'Conv2d_13_depthwise', 'Conv2d_13_pointwise']\n for index, endpoint in enumerate(endpoints):\n with tf.Graph().as_default():\n inputs = tf.random_uniform((batch_size, height, width, 3))\n out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint=endpoint)\n self.assertTrue(out_tensor.op.name.startswith(\n 'MobilenetV1/' + endpoint))\n self.assertItemsEqual(endpoints[:index + 1], end_points.keys())\n\n def testBuildCustomNetworkUsingConvDefs(self):\n batch_size = 5\n height, width = 224, 224\n conv_defs = [\n mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),\n mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),\n mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),\n mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)\n ]\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n net, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)\n self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))\n self.assertListEqual(net.get_shape().as_list(),\n [batch_size, 56, 56, 512])\n expected_endpoints = ['Conv2d_0',\n 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',\n 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_depthwise', 'Conv2d_3_pointwise']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n\n def testBuildAndCheckAllEndPointsUptoConv2d_13(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise')\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise',\n use_explicit_padding=True)\n endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],\n 'Conv2d_1_depthwise': [batch_size, 112, 112, 32],\n 'Conv2d_1_pointwise': [batch_size, 112, 112, 64],\n 'Conv2d_2_depthwise': [batch_size, 56, 56, 64],\n 'Conv2d_2_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_depthwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_4_depthwise': [batch_size, 28, 28, 128],\n 'Conv2d_4_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_depthwise': [batch_size, 14, 14, 256],\n 'Conv2d_6_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_12_depthwise': [batch_size, 7, 7, 512],\n 'Conv2d_12_pointwise': [batch_size, 7, 7, 1024],\n 'Conv2d_13_depthwise': [batch_size, 7, 7, 1024],\n 'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):\n batch_size = 5\n height, width = 224, 224\n output_stride = 16\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise')\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)\n endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],\n 'Conv2d_1_depthwise': [batch_size, 112, 112, 32],\n 'Conv2d_1_pointwise': [batch_size, 112, 112, 64],\n 'Conv2d_2_depthwise': [batch_size, 56, 56, 64],\n 'Conv2d_2_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_depthwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_4_depthwise': [batch_size, 28, 28, 128],\n 'Conv2d_4_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_depthwise': [batch_size, 14, 14, 256],\n 'Conv2d_6_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_12_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_12_pointwise': [batch_size, 14, 14, 1024],\n 'Conv2d_13_depthwise': [batch_size, 14, 14, 1024],\n 'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):\n batch_size = 5\n height, width = 224, 224\n output_stride = 8\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise')\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)\n endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],\n 'Conv2d_1_depthwise': [batch_size, 112, 112, 32],\n 'Conv2d_1_pointwise': [batch_size, 112, 112, 64],\n 'Conv2d_2_depthwise': [batch_size, 56, 56, 64],\n 'Conv2d_2_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_depthwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_4_depthwise': [batch_size, 28, 28, 128],\n 'Conv2d_4_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_7_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_7_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_8_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_8_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_9_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_9_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_10_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_10_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_11_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_11_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_12_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],\n 'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],\n 'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testBuildAndCheckAllEndPointsApproximateFaceNet(self):\n batch_size = 5\n height, width = 128, 128\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,\n use_explicit_padding=True)\n # For the Conv2d_0 layer FaceNet has depth=16\n endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],\n 'Conv2d_1_depthwise': [batch_size, 64, 64, 24],\n 'Conv2d_1_pointwise': [batch_size, 64, 64, 48],\n 'Conv2d_2_depthwise': [batch_size, 32, 32, 48],\n 'Conv2d_2_pointwise': [batch_size, 32, 32, 96],\n 'Conv2d_3_depthwise': [batch_size, 32, 32, 96],\n 'Conv2d_3_pointwise': [batch_size, 32, 32, 96],\n 'Conv2d_4_depthwise': [batch_size, 16, 16, 96],\n 'Conv2d_4_pointwise': [batch_size, 16, 16, 192],\n 'Conv2d_5_depthwise': [batch_size, 16, 16, 192],\n 'Conv2d_5_pointwise': [batch_size, 16, 16, 192],\n 'Conv2d_6_depthwise': [batch_size, 8, 8, 192],\n 'Conv2d_6_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_7_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_7_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_8_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_8_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_9_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_9_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_10_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_10_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_11_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_11_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_12_depthwise': [batch_size, 4, 4, 384],\n 'Conv2d_12_pointwise': [batch_size, 4, 4, 768],\n 'Conv2d_13_depthwise': [batch_size, 4, 4, 768],\n 'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testModelHasExpectedNumberOfParameters(self):\n batch_size = 5\n height, width = 224, 224\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n mobilenet_v1.mobilenet_v1_base(inputs)\n total_params, _ = slim.model_analyzer.analyze_vars(\n slim.get_model_variables())\n self.assertAlmostEqual(3217920, total_params)\n\n def testBuildEndPointsWithDepthMultiplierLessThanOne(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n\n endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]\n\n _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, scope='depth_multiplied_net',\n depth_multiplier=0.5)\n\n for key in endpoint_keys:\n original_depth = end_points[key].get_shape().as_list()[3]\n new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]\n self.assertEqual(0.5 * original_depth, new_depth)\n\n def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n\n endpoint_keys = [key for key in end_points.keys()\n if key.startswith('Mixed') or key.startswith('Conv')]\n\n _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, scope='depth_multiplied_net',\n depth_multiplier=2.0)\n\n for key in endpoint_keys:\n original_depth = end_points[key].get_shape().as_list()[3]\n new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]\n self.assertEqual(2.0 * original_depth, new_depth)\n\n def testRaiseValueErrorWithInvalidDepthMultiplier(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n with self.assertRaises(ValueError):\n _ = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, depth_multiplier=-0.1)\n with self.assertRaises(ValueError):\n _ = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, depth_multiplier=0.0)\n\n def testHalfSizeImages(self):\n batch_size = 5\n height, width = 112, 112\n num_classes = 1000\n\n inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_13_pointwise']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 4, 4, 1024])\n\n def testUnknownImageShape(self):\n tf.reset_default_graph()\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_13_pointwise']\n feed_dict = {inputs: input_np}\n tf.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])\n\n def testGlobalPoolUnknownImageShape(self):\n tf.reset_default_graph()\n batch_size = 1\n height, width = 250, 300\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,\n global_pool=True)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_13_pointwise']\n feed_dict = {inputs: input_np}\n tf.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])\n\n def testUnknowBatchSize(self):\n batch_size = 1\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.placeholder(tf.float32, (None, height, width, 3))\n logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, num_classes])\n images = tf.random_uniform((batch_size, height, width, 3))\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEqual(output.shape, (batch_size, num_classes))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n\n eval_inputs = tf.random_uniform((batch_size, height, width, 3))\n logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,\n is_training=False)\n predictions = tf.argmax(logits, 1)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEqual(output.shape, (batch_size,))\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 5\n eval_batch_size = 2\n height, width = 150, 150\n num_classes = 1000\n\n train_inputs = tf.random_uniform((train_batch_size, height, width, 3))\n mobilenet_v1.mobilenet_v1(train_inputs, num_classes)\n eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))\n logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,\n reuse=True)\n predictions = tf.argmax(logits, 1)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEqual(output.shape, (eval_batch_size,))\n\n def testLogitsNotSqueezed(self):\n num_classes = 25\n images = tf.random_uniform([1, 224, 224, 3])\n logits, _ = mobilenet_v1.mobilenet_v1(images,\n num_classes=num_classes,\n spatial_squeeze=False)\n\n with self.test_session() as sess:\n tf.global_variables_initializer().run()\n logits_out = sess.run(logits)\n self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])\n\n def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):\n sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)\n self.assertNotIn('is_training', sc[slim.arg_scope_func_key(\n slim.batch_norm)])\n\n def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):\n sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\n sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\n sc = mobilenet_v1.mobilenet_v1_arg_scope()\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for computing evaluation metrics.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef _metric_variable(name, shape, dtype):\n \"\"\"Creates a Variable in LOCAL_VARIABLES and METRIC_VARIABLES collections.\"\"\"\n return tf.get_variable(\n name,\n initializer=tf.zeros(shape, dtype),\n trainable=False,\n collections=[tf.GraphKeys.LOCAL_VARIABLES, tf.GraphKeys.METRIC_VARIABLES])\n\n\ndef _build_metrics(labels, predictions, weights, batch_losses):\n \"\"\"Builds TensorFlow operations to compute model evaluation metrics.\n\n Args:\n labels: Tensor with shape [batch_size].\n predictions: Tensor with shape [batch_size, output_dim].\n weights: Tensor with shape [batch_size].\n batch_losses: Tensor with shape [batch_size].\n\n Returns:\n A dictionary {metric_name: (metric_value, update_op).\n \"\"\"\n # Compute the predicted labels.\n assert len(predictions.shape) == 2\n binary_classification = (predictions.shape[1] == 1)\n if binary_classification:\n predictions = tf.squeeze(predictions, axis=[1])\n predicted_labels = tf.to_int32(\n tf.greater(predictions, 0.5), name=\"predicted_labels\")\n else:\n predicted_labels = tf.argmax(\n predictions, 1, name=\"predicted_labels\", output_type=tf.int32)\n\n metrics = {}\n with tf.variable_scope(\"metrics\"):\n # Total number of examples.\n num_examples = _metric_variable(\"num_examples\", [], tf.float32)\n update_num_examples = tf.assign_add(num_examples, tf.reduce_sum(weights))\n metrics[\"num_examples\"] = (num_examples.read_value(), update_num_examples)\n\n # Accuracy metrics.\n num_correct = _metric_variable(\"num_correct\", [], tf.float32)\n is_correct = weights * tf.to_float(tf.equal(labels, predicted_labels))\n update_num_correct = tf.assign_add(num_correct, tf.reduce_sum(is_correct))\n metrics[\"accuracy/num_correct\"] = (num_correct.read_value(),\n update_num_correct)\n accuracy = tf.div(num_correct, num_examples, name=\"accuracy\")\n metrics[\"accuracy/accuracy\"] = (accuracy, tf.no_op())\n\n # Weighted cross-entropy loss.\n metrics[\"losses/weighted_cross_entropy\"] = tf.metrics.mean(\n batch_losses, weights=weights, name=\"cross_entropy_loss\")\n\n # Possibly create additional metrics for binary classification.\n if binary_classification:\n labels = tf.cast(labels, dtype=tf.bool)\n predicted_labels = tf.cast(predicted_labels, dtype=tf.bool)\n\n # AUC.\n metrics[\"auc\"] = tf.metrics.auc(\n labels, predictions, weights=weights, num_thresholds=1000)\n\n def _count_condition(name, labels_value, predicted_value):\n \"\"\"Creates a counter for given values of predictions and labels.\"\"\"\n count = _metric_variable(name, [], tf.float32)\n is_equal = tf.to_float(\n tf.logical_and(\n tf.equal(labels, labels_value),\n tf.equal(predicted_labels, predicted_value)))\n update_op = tf.assign_add(count, tf.reduce_sum(weights * is_equal))\n return count.read_value(), update_op\n\n # Confusion matrix metrics.\n metrics[\"confusion_matrix/true_positives\"] = _count_condition(\n \"true_positives\", labels_value=True, predicted_value=True)\n metrics[\"confusion_matrix/false_positives\"] = _count_condition(\n \"false_positives\", labels_value=False, predicted_value=True)\n metrics[\"confusion_matrix/true_negatives\"] = _count_condition(\n \"true_negatives\", labels_value=False, predicted_value=False)\n metrics[\"confusion_matrix/false_negatives\"] = _count_condition(\n \"false_negatives\", labels_value=True, predicted_value=False)\n\n return metrics\n\n\ndef create_metric_fn(model):\n \"\"\"Creates a tuple (metric_fn, metric_fn_inputs).\n\n This function is primarily used for creating a TPUEstimator.\n\n The result of calling metric_fn(**metric_fn_inputs) is a dictionary\n {metric_name: (metric_value, update_op)}.\n\n Args:\n model: Instance of AstroModel.\n\n Returns:\n A tuple (metric_fn, metric_fn_inputs).\n \"\"\"\n weights = model.weights\n if weights is None:\n weights = tf.ones_like(model.labels, dtype=tf.float32)\n metric_fn_inputs = {\n \"labels\": model.labels,\n \"predictions\": model.predictions,\n \"weights\": weights,\n \"batch_losses\": model.batch_losses,\n }\n\n def metric_fn(labels, predictions, weights, batch_losses):\n return _build_metrics(labels, predictions, weights, batch_losses)\n\n return metric_fn, metric_fn_inputs\n\n\ndef create_metrics(model):\n \"\"\"Creates a dictionary {metric_name: (metric_value, update_op)}.\n\n This function is primarily used for creating an Estimator.\n\n Args:\n model: Instance of AstroModel.\n\n Returns:\n A dictionary {metric_name: (metric_value, update_op).\n \"\"\"\n metric_fn, metric_fn_inputs = create_metric_fn(model)\n return metric_fn(**metric_fn_inputs)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train the skip-thoughts model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom research.skip_thoughts.skip_thoughts import configuration\nfrom research.skip_thoughts.skip_thoughts import skip_thoughts_model\n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_string(\"input_file_pattern\", None,\n \"File pattern of sharded TFRecord files containing \"\n \"tf.Example protos.\")\ntf.flags.DEFINE_string(\"train_dir\", None,\n \"Directory for saving and loading checkpoints.\")\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef _setup_learning_rate(config, global_step):\n \"\"\"Sets up the learning rate with optional exponential decay.\n\n Args:\n config: Object containing learning rate configuration parameters.\n global_step: Tensor; the global step.\n\n Returns:\n learning_rate: Tensor; the learning rate with exponential decay.\n \"\"\"\n if config.learning_rate_decay_factor > 0:\n learning_rate = tf.train.exponential_decay(\n learning_rate=float(config.learning_rate),\n global_step=global_step,\n decay_steps=config.learning_rate_decay_steps,\n decay_rate=config.learning_rate_decay_factor,\n staircase=False)\n else:\n learning_rate = tf.constant(config.learning_rate)\n return learning_rate\n\n\ndef main(unused_argv):\n _ = unused_argv\n if not FLAGS.input_file_pattern:\n raise ValueError(\"--input_file_pattern is required.\")\n if not FLAGS.train_dir:\n raise ValueError(\"--train_dir is required.\")\n\n model_config = configuration.model_config(\n input_file_pattern=FLAGS.input_file_pattern)\n training_config = configuration.training_config()\n\n tf.logging.info(\"Building training graph.\")\n g = tf.Graph()\n with g.as_default():\n model = skip_thoughts_model.SkipThoughtsModel(model_config, mode=\"train\")\n model.build()\n\n learning_rate = _setup_learning_rate(training_config, model.global_step)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n train_tensor = tf.contrib.slim.learning.create_train_op(\n total_loss=model.total_loss,\n optimizer=optimizer,\n global_step=model.global_step,\n clip_gradient_norm=training_config.clip_gradient_norm)\n\n saver = tf.train.Saver()\n\n tf.contrib.slim.learning.train(\n train_op=train_tensor,\n logdir=FLAGS.train_dir,\n graph=g,\n global_step=model.global_step,\n number_of_steps=training_config.number_of_steps,\n save_summaries_secs=training_config.save_summaries_secs,\n saver=saver,\n save_interval_secs=training_config.save_model_secs)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"# Copyright 2018 The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for example_util.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom research.astronet.astronet.util import example_util\n\n\nclass ExampleUtilTest(tf.test.TestCase):\n\n def test_get_feature(self):\n # Create Example.\n bytes_list = tf.train.BytesList(\n value=[v.encode(\"latin-1\") for v in [\"a\", \"b\", \"c\"]])\n float_list = tf.train.FloatList(value=[1.0, 2.0, 3.0])\n int64_list = tf.train.Int64List(value=[11, 22, 33])\n ex = tf.train.Example(\n features=tf.train.Features(\n feature={\n \"a_bytes\": tf.train.Feature(bytes_list=bytes_list),\n \"b_float\": tf.train.Feature(float_list=float_list),\n \"c_int64\": tf.train.Feature(int64_list=int64_list),\n \"d_empty\": tf.train.Feature(),\n }))\n\n # Get bytes feature.\n np.testing.assert_array_equal(\n example_util.get_feature(ex, \"a_bytes\").astype(str), [\"a\", \"b\", \"c\"])\n np.testing.assert_array_equal(\n example_util.get_feature(ex, \"a_bytes\", \"bytes_list\").astype(str),\n [\"a\", \"b\", \"c\"])\n np.testing.assert_array_equal(\n example_util.get_bytes_feature(ex, \"a_bytes\").astype(str),\n [\"a\", \"b\", \"c\"])\n with self.assertRaises(TypeError):\n example_util.get_feature(ex, \"a_bytes\", \"float_list\")\n with self.assertRaises(TypeError):\n example_util.get_float_feature(ex, \"a_bytes\")\n with self.assertRaises(TypeError):\n example_util.get_int64_feature(ex, \"a_bytes\")\n\n # Get float feature.\n np.testing.assert_array_almost_equal(\n example_util.get_feature(ex, \"b_float\"), [1.0, 2.0, 3.0])\n np.testing.assert_array_almost_equal(\n example_util.get_feature(ex, \"b_float\", \"float_list\"), [1.0, 2.0, 3.0])\n np.testing.assert_array_almost_equal(\n example_util.get_float_feature(ex, \"b_float\"), [1.0, 2.0, 3.0])\n with self.assertRaises(TypeError):\n example_util.get_feature(ex, \"b_float\", \"int64_list\")\n with self.assertRaises(TypeError):\n example_util.get_bytes_feature(ex, \"b_float\")\n with self.assertRaises(TypeError):\n example_util.get_int64_feature(ex, \"b_float\")\n\n # Get int64 feature.\n np.testing.assert_array_equal(\n example_util.get_feature(ex, \"c_int64\"), [11, 22, 33])\n np.testing.assert_array_equal(\n example_util.get_feature(ex, \"c_int64\", \"int64_list\"), [11, 22, 33])\n np.testing.assert_array_equal(\n example_util.get_int64_feature(ex, \"c_int64\"), [11, 22, 33])\n with self.assertRaises(TypeError):\n example_util.get_feature(ex, \"c_int64\", \"bytes_list\")\n with self.assertRaises(TypeError):\n example_util.get_bytes_feature(ex, \"c_int64\")\n with self.assertRaises(TypeError):\n example_util.get_float_feature(ex, \"c_int64\")\n\n # Get empty feature.\n np.testing.assert_array_equal(example_util.get_feature(ex, \"d_empty\"), [])\n np.testing.assert_array_equal(\n example_util.get_feature(ex, \"d_empty\", \"float_list\"), [])\n np.testing.assert_array_equal(\n example_util.get_bytes_feature(ex, \"d_empty\"), [])\n np.testing.assert_array_equal(\n example_util.get_float_feature(ex, \"d_empty\"), [])\n np.testing.assert_array_equal(\n example_util.get_int64_feature(ex, \"d_empty\"), [])\n\n # Get nonexistent feature.\n with self.assertRaises(KeyError):\n example_util.get_feature(ex, \"nonexistent\")\n with self.assertRaises(KeyError):\n example_util.get_feature(ex, \"nonexistent\", \"bytes_list\")\n with self.assertRaises(KeyError):\n example_util.get_bytes_feature(ex, \"nonexistent\")\n with self.assertRaises(KeyError):\n example_util.get_float_feature(ex, \"nonexistent\")\n with self.assertRaises(KeyError):\n example_util.get_int64_feature(ex, \"nonexistent\")\n np.testing.assert_array_equal(\n example_util.get_feature(ex, \"nonexistent\", strict=False), [])\n np.testing.assert_array_equal(\n example_util.get_bytes_feature(ex, \"nonexistent\", strict=False), [])\n np.testing.assert_array_equal(\n example_util.get_float_feature(ex, \"nonexistent\", strict=False), [])\n np.testing.assert_array_equal(\n example_util.get_int64_feature(ex, \"nonexistent\", strict=False), [])\n\n def test_set_feature(self):\n ex = tf.train.Example()\n\n # Set bytes features.\n example_util.set_feature(ex, \"a1_bytes\", [\"a\", \"b\"])\n example_util.set_feature(ex, \"a2_bytes\", [\"A\", \"B\"], kind=\"bytes_list\")\n example_util.set_bytes_feature(ex, \"a3_bytes\", [\"x\", \"y\"])\n np.testing.assert_array_equal(\n np.array(ex.features.feature[\"a1_bytes\"].bytes_list.value).astype(str),\n [\"a\", \"b\"])\n np.testing.assert_array_equal(\n np.array(ex.features.feature[\"a2_bytes\"].bytes_list.value).astype(str),\n [\"A\", \"B\"])\n np.testing.assert_array_equal(\n np.array(ex.features.feature[\"a3_bytes\"].bytes_list.value).astype(str),\n [\"x\", \"y\"])\n with self.assertRaises(ValueError):\n example_util.set_feature(ex, \"a3_bytes\", [\"xxx\"]) # Duplicate.\n\n # Set float features.\n example_util.set_feature(ex, \"b1_float\", [1.0, 2.0])\n example_util.set_feature(ex, \"b2_float\", [10.0, 20.0], kind=\"float_list\")\n example_util.set_float_feature(ex, \"b3_float\", [88.0, 99.0])\n np.testing.assert_array_almost_equal(\n ex.features.feature[\"b1_float\"].float_list.value, [1.0, 2.0])\n np.testing.assert_array_almost_equal(\n ex.features.feature[\"b2_float\"].float_list.value, [10.0, 20.0])\n np.testing.assert_array_almost_equal(\n ex.features.feature[\"b3_float\"].float_list.value, [88.0, 99.0])\n with self.assertRaises(ValueError):\n example_util.set_feature(ex, \"b3_float\", [1234.0]) # Duplicate.\n\n # Set int64 features.\n example_util.set_feature(ex, \"c1_int64\", [1, 2, 3])\n example_util.set_feature(ex, \"c2_int64\", [11, 22, 33], kind=\"int64_list\")\n example_util.set_int64_feature(ex, \"c3_int64\", [88, 99])\n np.testing.assert_array_equal(\n ex.features.feature[\"c1_int64\"].int64_list.value, [1, 2, 3])\n np.testing.assert_array_equal(\n ex.features.feature[\"c2_int64\"].int64_list.value, [11, 22, 33])\n np.testing.assert_array_equal(\n ex.features.feature[\"c3_int64\"].int64_list.value, [88, 99])\n with self.assertRaises(ValueError):\n example_util.set_feature(ex, \"c3_int64\", [1234]) # Duplicate.\n\n # Overwrite features.\n example_util.set_feature(ex, \"a3_bytes\", [\"xxx\"], allow_overwrite=True)\n np.testing.assert_array_equal(\n np.array(ex.features.feature[\"a3_bytes\"].bytes_list.value).astype(str),\n [\"xxx\"])\n\n example_util.set_feature(ex, \"b3_float\", [1234.0], allow_overwrite=True)\n np.testing.assert_array_almost_equal(\n ex.features.feature[\"b3_float\"].float_list.value, [1234.0])\n\n example_util.set_feature(ex, \"c3_int64\", [1234], allow_overwrite=True)\n np.testing.assert_array_equal(\n ex.features.feature[\"c3_int64\"].int64_list.value, [1234])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.Graph",
"tensorflow.summary.scalar",
"tensorflow.shape",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.exponential_decay",
"tensorflow.train.replica_device_setter",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.summary.merge_all",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Supervisor",
"tensorflow.contrib.framework.create_global_step",
"tensorflow.app.run"
],
[
"tensorflow.assert_equal"
],
[
"tensorflow.test.main"
],
[
"tensorflow.reduce_max",
"tensorflow.FIFOQueue",
"tensorflow.FixedLenFeature",
"tensorflow.shape",
"tensorflow.slice",
"tensorflow.reduce_sum",
"tensorflow.reduce_mean",
"tensorflow.ones",
"tensorflow.train.batch_join",
"tensorflow.subtract",
"tensorflow.FixedLenSequenceFeature",
"tensorflow.RandomShuffleQueue",
"tensorflow.train.queue_runner.QueueRunner",
"tensorflow.train.string_input_producer",
"tensorflow.gfile.Glob",
"tensorflow.reduce_min",
"tensorflow.logging.fatal"
],
[
"tensorflow.constant",
"tensorflow.Print",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.losses.sigmoid_cross_entropy",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.mul",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.log",
"tensorflow.where"
],
[
"numpy.random.random",
"numpy.random.seed",
"tensorflow.test.main",
"tensorflow.logging.set_verbosity",
"tensorflow.Session"
],
[
"numpy.array",
"tensorflow.test.main",
"numpy.stack"
],
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.device",
"tensorflow.equal",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.get_default_graph",
"tensorflow.summary.scalar",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.ConfigProto",
"numpy.argmax",
"tensorflow.logging.set_verbosity",
"tensorflow.train.Saver",
"tensorflow.Summary",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.app.flags.DEFINE_bool",
"tensorflow.logging.info",
"numpy.sum",
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.FileWriter",
"tensorflow.train.start_queue_runners",
"tensorflow.train.SessionRunArgs",
"tensorflow.logging.error"
],
[
"tensorflow.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.app.run"
],
[
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.equal",
"tensorflow.pad",
"tensorflow.nn.moments",
"tensorflow.gather",
"tensorflow.Summary",
"tensorflow.argmax",
"tensorflow.image.grayscale_to_rgb",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.logging.info",
"tensorflow.split",
"tensorflow.clip_by_value",
"tensorflow.transpose",
"matplotlib.use",
"tensorflow.maximum",
"tensorflow.expand_dims",
"tensorflow.sqrt",
"tensorflow.abs"
],
[
"tensorflow.random_uniform",
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.gfile.Copy",
"numpy.squeeze",
"tensorflow.test.main",
"numpy.savez_compressed",
"tensorflow.feature_column.numeric_column",
"tensorflow.logging.set_verbosity",
"tensorflow.Session"
],
[
"tensorflow.decode_csv",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.feature_column.numeric_column",
"tensorflow.estimator.DNNClassifier",
"tensorflow.logging.set_verbosity",
"tensorflow.data.TextLineDataset",
"tensorflow.split"
],
[
"tensorflow.convert_to_tensor",
"numpy.logical_not",
"numpy.maximum",
"numpy.reshape",
"numpy.arange",
"tensorflow.test.main",
"numpy.ones",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.random.rand"
],
[
"tensorflow.load_op_library"
],
[
"numpy.fliplr",
"numpy.uint8",
"numpy.ones",
"numpy.random.rand",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
],
[
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.diag",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.cholesky",
"tensorflow.boolean_mask",
"tensorflow.Graph",
"tensorflow.diag_part",
"tensorflow.train.get_or_create_global_step",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.tile",
"tensorflow.matmul",
"numpy.log",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.one_hot",
"tensorflow.add_check_numerics_ops",
"tensorflow.svd",
"tensorflow.clip_by_value",
"tensorflow.summary.histogram",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.assign",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.eye",
"tensorflow.ones",
"tensorflow.variable_scope",
"tensorflow.sqrt",
"tensorflow.nn.softplus",
"tensorflow.random_normal"
],
[
"numpy.triu_indices",
"tensorflow.gfile.Walk",
"tensorflow.gfile.Exists",
"tensorflow.gfile.GFile",
"numpy.linalg.norm",
"tensorflow.gfile.MakeDirs",
"tensorflow.logging.info",
"numpy.shape",
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.Graph",
"tensorflow.test.main",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"numpy.random.uniform",
"tensorflow.argmax",
"tensorflow.random_uniform"
],
[
"tensorflow.metrics.mean",
"tensorflow.zeros",
"tensorflow.greater",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.ones_like",
"tensorflow.squeeze",
"tensorflow.equal",
"tensorflow.div",
"tensorflow.no_op",
"tensorflow.variable_scope",
"tensorflow.argmax",
"tensorflow.metrics.auc"
],
[
"tensorflow.contrib.slim.learning.create_train_op",
"tensorflow.contrib.slim.learning.train",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.flags.DEFINE_string",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver",
"tensorflow.app.run"
],
[
"tensorflow.train.Int64List",
"tensorflow.train.Feature",
"tensorflow.train.Example",
"tensorflow.test.main",
"numpy.testing.assert_array_equal",
"tensorflow.train.FloatList",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gurukiran07/pandas | [
"3cce96f515917170ea9bce731ffcc913750464b8"
] | [
"pandas/tests/groupby/test_groupby.py"
] | [
"from datetime import datetime\nfrom decimal import Decimal\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Grouper,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n read_csv,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.core.base import SpecificationError\nimport pandas.core.common as com\n\n\ndef test_repr():\n # GH18203\n result = repr(Grouper(key=\"A\", level=\"B\"))\n expected = \"Grouper(key='A', level='B', axis=0, sort=False)\"\n assert result == expected\n\n\[email protected](\"dtype\", [\"int64\", \"int32\", \"float64\", \"float32\"])\ndef test_basic(dtype):\n\n data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)\n\n index = np.arange(9)\n np.random.shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n for k, v in grouped:\n assert len(v) == 3\n\n agged = grouped.aggregate(np.mean)\n assert agged[1] == 1\n\n tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand\n tm.assert_series_equal(agged, grouped.mean())\n tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())\n\n expected = grouped.apply(lambda x: x * x.sum())\n transformed = grouped.transform(lambda x: x * x.sum())\n assert transformed[7] == 12\n tm.assert_series_equal(transformed, expected)\n\n value_grouped = data.groupby(data)\n tm.assert_series_equal(\n value_grouped.aggregate(np.mean), agged, check_index_type=False\n )\n\n # complex agg\n agged = grouped.aggregate([np.mean, np.std])\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate({\"one\": np.mean, \"two\": np.std})\n\n group_constants = {0: 10, 1: 20, 2: 30}\n agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())\n assert agged[1] == 21\n\n # corner cases\n msg = \"Must produce aggregated value\"\n # exception raised is type Exception\n with pytest.raises(Exception, match=msg):\n grouped.aggregate(lambda x: x * 2)\n\n\ndef test_groupby_nonobject_dtype(mframe, df_mixed_floats):\n key = mframe.index.codes[0]\n grouped = mframe.groupby(key)\n result = grouped.sum()\n\n expected = mframe.groupby(key.astype(\"O\")).sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 3911, mixed frame non-conversion\n df = df_mixed_floats.copy()\n df[\"value\"] = range(len(df))\n\n def max_value(group):\n return group.loc[group[\"value\"].idxmax()]\n\n applied = df.groupby(\"A\").apply(max_value)\n result = applied.dtypes\n expected = df.dtypes\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_return_type():\n\n # GH2893, return a reduced type\n df1 = DataFrame(\n [\n {\"val1\": 1, \"val2\": 20},\n {\"val1\": 1, \"val2\": 19},\n {\"val1\": 2, \"val2\": 27},\n {\"val1\": 2, \"val2\": 12},\n ]\n )\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n with tm.assert_produces_warning(FutureWarning):\n result = df1.groupby(\"val1\", squeeze=True).apply(func)\n assert isinstance(result, Series)\n\n df2 = DataFrame(\n [\n {\"val1\": 1, \"val2\": 20},\n {\"val1\": 1, \"val2\": 19},\n {\"val1\": 1, \"val2\": 27},\n {\"val1\": 1, \"val2\": 12},\n ]\n )\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n with tm.assert_produces_warning(FutureWarning):\n result = df2.groupby(\"val1\", squeeze=True).apply(func)\n assert isinstance(result, Series)\n\n # GH3596, return a consistent type (regression in 0.11 from 0.10.1)\n df = DataFrame([[1, 1], [1, 1]], columns=[\"X\", \"Y\"])\n with tm.assert_produces_warning(FutureWarning):\n result = df.groupby(\"X\", squeeze=False).count()\n assert isinstance(result, DataFrame)\n\n\ndef test_inconsistent_return_type():\n # GH5592\n # inconsistent return type\n df = DataFrame(\n {\n \"A\": [\"Tiger\", \"Tiger\", \"Tiger\", \"Lamb\", \"Lamb\", \"Pony\", \"Pony\"],\n \"B\": Series(np.arange(7), dtype=\"int64\"),\n \"C\": date_range(\"20130101\", periods=7),\n }\n )\n\n def f(grp):\n return grp.iloc[0]\n\n expected = df.groupby(\"A\").first()[[\"B\"]]\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n tm.assert_frame_equal(result, expected)\n\n def f(grp):\n if grp.name == \"Tiger\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n e = expected.copy()\n e.loc[\"Tiger\"] = np.nan\n tm.assert_frame_equal(result, e)\n\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n e = expected.copy()\n e.loc[\"Pony\"] = np.nan\n tm.assert_frame_equal(result, e)\n\n # 5592 revisited, with datetimes\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"C\"]]\n e = df.groupby(\"A\").first()[[\"C\"]]\n e.loc[\"Pony\"] = pd.NaT\n tm.assert_frame_equal(result, e)\n\n # scalar outputs\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0].loc[\"C\"]\n\n result = df.groupby(\"A\").apply(f)\n e = df.groupby(\"A\").first()[\"C\"].copy()\n e.loc[\"Pony\"] = np.nan\n e.name = None\n tm.assert_series_equal(result, e)\n\n\ndef test_pass_args_kwargs(ts, tsframe):\n def f(x, q=None, axis=0):\n return np.percentile(x, q, axis=axis)\n\n g = lambda x: np.percentile(x, 80, axis=0)\n\n # Series\n ts_grouped = ts.groupby(lambda x: x.month)\n agg_result = ts_grouped.agg(np.percentile, 80, axis=0)\n apply_result = ts_grouped.apply(np.percentile, 80, axis=0)\n trans_result = ts_grouped.transform(np.percentile, 80, axis=0)\n\n agg_expected = ts_grouped.quantile(0.8)\n trans_expected = ts_grouped.transform(g)\n\n tm.assert_series_equal(apply_result, agg_expected)\n tm.assert_series_equal(agg_result, agg_expected)\n tm.assert_series_equal(trans_result, trans_expected)\n\n agg_result = ts_grouped.agg(f, q=80)\n apply_result = ts_grouped.apply(f, q=80)\n trans_result = ts_grouped.transform(f, q=80)\n tm.assert_series_equal(agg_result, agg_expected)\n tm.assert_series_equal(apply_result, agg_expected)\n tm.assert_series_equal(trans_result, trans_expected)\n\n # DataFrame\n df_grouped = tsframe.groupby(lambda x: x.month)\n agg_result = df_grouped.agg(np.percentile, 80, axis=0)\n apply_result = df_grouped.apply(DataFrame.quantile, 0.8)\n expected = df_grouped.quantile(0.8)\n tm.assert_frame_equal(apply_result, expected, check_names=False)\n tm.assert_frame_equal(agg_result, expected)\n\n agg_result = df_grouped.agg(f, q=80)\n apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)\n tm.assert_frame_equal(agg_result, expected)\n tm.assert_frame_equal(apply_result, expected, check_names=False)\n\n\ndef test_len():\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])\n assert len(grouped) == len(df)\n\n grouped = df.groupby([lambda x: x.year, lambda x: x.month])\n expected = len({(x.year, x.month) for x in df.index})\n assert len(grouped) == expected\n\n # issue 11016\n df = DataFrame({\"a\": [np.nan] * 3, \"b\": [1, 2, 3]})\n assert len(df.groupby(\"a\")) == 0\n assert len(df.groupby(\"b\")) == 3\n assert len(df.groupby([\"a\", \"b\"])) == 3\n\n\ndef test_basic_regression():\n # regression\n result = Series([1.0 * x for x in list(range(1, 10)) * 10])\n\n data = np.random.random(1100) * 10.0\n groupings = Series(data)\n\n grouped = result.groupby(groupings)\n grouped.mean()\n\n\[email protected](\n \"dtype\", [\"float64\", \"float32\", \"int64\", \"int32\", \"int16\", \"int8\"]\n)\ndef test_with_na_groups(dtype):\n index = Index(np.arange(10))\n values = Series(np.ones(10), index, dtype=dtype)\n labels = Series(\n [np.nan, \"foo\", \"bar\", \"bar\", np.nan, np.nan, \"bar\", \"bar\", np.nan, \"foo\"],\n index=index,\n )\n\n # this SHOULD be an int\n grouped = values.groupby(labels)\n agged = grouped.agg(len)\n expected = Series([4, 2], index=[\"bar\", \"foo\"])\n\n tm.assert_series_equal(agged, expected, check_dtype=False)\n\n # assert issubclass(agged.dtype.type, np.integer)\n\n # explicitly return a float from my function\n def f(x):\n return float(len(x))\n\n agged = grouped.agg(f)\n expected = Series([4.0, 2.0], index=[\"bar\", \"foo\"])\n\n tm.assert_series_equal(agged, expected)\n\n\ndef test_indices_concatenation_order():\n\n # GH 2808\n\n def f1(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=[\"b\", \"c\"])\n res = DataFrame(columns=[\"a\"], index=multiindex)\n return res\n else:\n y = y.set_index([\"b\", \"c\"])\n return y\n\n def f2(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n return DataFrame()\n else:\n y = y.set_index([\"b\", \"c\"])\n return y\n\n def f3(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n multiindex = MultiIndex(\n levels=[[]] * 2, codes=[[]] * 2, names=[\"foo\", \"bar\"]\n )\n res = DataFrame(columns=[\"a\", \"b\"], index=multiindex)\n return res\n else:\n return y\n\n df = DataFrame({\"a\": [1, 2, 2, 2], \"b\": range(4), \"c\": range(5, 9)})\n\n df2 = DataFrame({\"a\": [3, 2, 2, 2], \"b\": range(4), \"c\": range(5, 9)})\n\n # correct result\n result1 = df.groupby(\"a\").apply(f1)\n result2 = df2.groupby(\"a\").apply(f1)\n tm.assert_frame_equal(result1, result2)\n\n # should fail (not the same number of levels)\n msg = \"Cannot concat indices that do not have the same number of levels\"\n with pytest.raises(AssertionError, match=msg):\n df.groupby(\"a\").apply(f2)\n with pytest.raises(AssertionError, match=msg):\n df2.groupby(\"a\").apply(f2)\n\n # should fail (incorrect shape)\n with pytest.raises(AssertionError, match=msg):\n df.groupby(\"a\").apply(f3)\n with pytest.raises(AssertionError, match=msg):\n df2.groupby(\"a\").apply(f3)\n\n\ndef test_attr_wrapper(ts):\n grouped = ts.groupby(lambda x: x.weekday())\n\n result = grouped.std()\n expected = grouped.agg(lambda x: np.std(x, ddof=1))\n tm.assert_series_equal(result, expected)\n\n # this is pretty cool\n result = grouped.describe()\n expected = {name: gp.describe() for name, gp in grouped}\n expected = DataFrame(expected).T\n tm.assert_frame_equal(result, expected)\n\n # get attribute\n result = grouped.dtype\n expected = grouped.agg(lambda x: x.dtype)\n tm.assert_series_equal(result, expected)\n\n # make sure raises error\n msg = \"'SeriesGroupBy' object has no attribute 'foo'\"\n with pytest.raises(AttributeError, match=msg):\n getattr(grouped, \"foo\")\n\n\ndef test_frame_groupby(tsframe):\n grouped = tsframe.groupby(lambda x: x.weekday())\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n assert len(aggregated) == 5\n assert len(aggregated.columns) == 4\n\n # by string\n tscopy = tsframe.copy()\n tscopy[\"weekday\"] = [x.weekday() for x in tscopy.index]\n stragged = tscopy.groupby(\"weekday\").aggregate(np.mean)\n tm.assert_frame_equal(stragged, aggregated, check_names=False)\n\n # transform\n grouped = tsframe.head(30).groupby(lambda x: x.weekday())\n transformed = grouped.transform(lambda x: x - x.mean())\n assert len(transformed) == 30\n assert len(transformed.columns) == 4\n\n # transform propagate\n transformed = grouped.transform(lambda x: x.mean())\n for name, group in grouped:\n mean = group.mean()\n for idx in group.index:\n tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)\n\n # iterate\n for weekday, group in grouped:\n assert group.index[0].weekday() == weekday\n\n # groups / group_indices\n groups = grouped.groups\n indices = grouped.indices\n\n for k, v in groups.items():\n samething = tsframe.index.take(indices[k])\n assert (samething == v).all()\n\n\ndef test_frame_groupby_columns(tsframe):\n mapping = {\"A\": 0, \"B\": 0, \"C\": 1, \"D\": 1}\n grouped = tsframe.groupby(mapping, axis=1)\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n assert len(aggregated) == len(tsframe)\n assert len(aggregated.columns) == 2\n\n # transform\n tf = lambda x: x - x.mean()\n groupedT = tsframe.T.groupby(mapping, axis=0)\n tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))\n\n # iterate\n for k, v in grouped:\n assert len(v.columns) == 2\n\n\ndef test_frame_set_name_single(df):\n grouped = df.groupby(\"A\")\n\n result = grouped.mean()\n assert result.index.name == \"A\"\n\n result = df.groupby(\"A\", as_index=False).mean()\n assert result.index.name != \"A\"\n\n result = grouped.agg(np.mean)\n assert result.index.name == \"A\"\n\n result = grouped.agg({\"C\": np.mean, \"D\": np.std})\n assert result.index.name == \"A\"\n\n result = grouped[\"C\"].mean()\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg(np.mean)\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg([np.mean, np.std])\n assert result.index.name == \"A\"\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"foo\": np.mean, \"bar\": np.std})\n\n\ndef test_multi_func(df):\n col1 = df[\"A\"]\n col2 = df[\"B\"]\n\n grouped = df.groupby([col1.get, col2.get])\n agged = grouped.mean()\n expected = df.groupby([\"A\", \"B\"]).mean()\n\n # TODO groupby get drops names\n tm.assert_frame_equal(\n agged.loc[:, [\"C\", \"D\"]], expected.loc[:, [\"C\", \"D\"]], check_names=False\n )\n\n # some \"groups\" with no data\n df = DataFrame(\n {\n \"v1\": np.random.randn(6),\n \"v2\": np.random.randn(6),\n \"k1\": np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"]),\n \"k2\": np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"]),\n },\n index=[\"one\", \"two\", \"three\", \"four\", \"five\", \"six\"],\n )\n # only verify that it works for now\n grouped = df.groupby([\"k1\", \"k2\"])\n grouped.agg(np.sum)\n\n\ndef test_multi_key_multiple_functions(df):\n grouped = df.groupby([\"A\", \"B\"])[\"C\"]\n\n agged = grouped.agg([np.mean, np.std])\n expected = DataFrame({\"mean\": grouped.agg(np.mean), \"std\": grouped.agg(np.std)})\n tm.assert_frame_equal(agged, expected)\n\n\ndef test_frame_multi_key_function_list():\n data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n grouped = data.groupby([\"A\", \"B\"])\n funcs = [np.mean, np.std]\n agged = grouped.agg(funcs)\n expected = pd.concat(\n [grouped[\"D\"].agg(funcs), grouped[\"E\"].agg(funcs), grouped[\"F\"].agg(funcs)],\n keys=[\"D\", \"E\", \"F\"],\n axis=1,\n )\n assert isinstance(agged.index, MultiIndex)\n assert isinstance(expected.index, MultiIndex)\n tm.assert_frame_equal(agged, expected)\n\n\[email protected](\"op\", [lambda x: x.sum(), lambda x: x.mean()])\ndef test_groupby_multiple_columns(df, op):\n data = df\n grouped = data.groupby([\"A\", \"B\"])\n\n result1 = op(grouped)\n\n keys = []\n values = []\n for n1, gp1 in data.groupby(\"A\"):\n for n2, gp2 in gp1.groupby(\"B\"):\n keys.append((n1, n2))\n values.append(op(gp2.loc[:, [\"C\", \"D\"]]))\n\n mi = MultiIndex.from_tuples(keys, names=[\"A\", \"B\"])\n expected = pd.concat(values, axis=1).T\n expected.index = mi\n\n # a little bit crude\n for col in [\"C\", \"D\"]:\n result_col = op(grouped[col])\n pivoted = result1[col]\n exp = expected[col]\n tm.assert_series_equal(result_col, exp)\n tm.assert_series_equal(pivoted, exp)\n\n # test single series works the same\n result = data[\"C\"].groupby([data[\"A\"], data[\"B\"]]).mean()\n expected = data.groupby([\"A\", \"B\"]).mean()[\"C\"]\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_as_index_select_column():\n # GH 5764\n df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=[\"A\", \"B\"])\n result = df.groupby(\"A\", as_index=False)[\"B\"].get_group(1)\n expected = Series([2, 4], name=\"B\")\n tm.assert_series_equal(result, expected)\n\n result = df.groupby(\"A\", as_index=False)[\"B\"].apply(lambda x: x.cumsum())\n expected = Series(\n [2, 6, 6], name=\"B\", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_as_index_select_column_sum_empty_df():\n # GH 35246\n df = DataFrame(columns=[\"A\", \"B\", \"C\"])\n left = df.groupby(by=\"A\", as_index=False)[\"B\"].sum()\n assert type(left) is DataFrame\n assert left.to_dict() == {\"A\": {}, \"B\": {}}\n\n\ndef test_groupby_as_index_agg(df):\n grouped = df.groupby(\"A\", as_index=False)\n\n # single-key\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped.agg({\"C\": np.mean, \"D\": np.sum})\n expected2 = grouped.mean()\n expected2[\"D\"] = grouped.sum()[\"D\"]\n tm.assert_frame_equal(result2, expected2)\n\n grouped = df.groupby(\"A\", as_index=True)\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"Q\": np.sum})\n\n # multi-key\n\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped.agg({\"C\": np.mean, \"D\": np.sum})\n expected2 = grouped.mean()\n expected2[\"D\"] = grouped.sum()[\"D\"]\n tm.assert_frame_equal(result2, expected2)\n\n expected3 = grouped[\"C\"].sum()\n expected3 = DataFrame(expected3).rename(columns={\"C\": \"Q\"})\n result3 = grouped[\"C\"].agg({\"Q\": np.sum})\n tm.assert_frame_equal(result3, expected3)\n\n # GH7115 & GH8112 & GH8582\n df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=[\"jim\", \"joe\", \"jolie\"])\n ts = Series(np.random.randint(5, 10, 50), name=\"jim\")\n\n gr = df.groupby(ts)\n gr.nth(0) # invokes set_selection_from_grouper internally\n tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))\n\n for attr in [\"mean\", \"max\", \"count\", \"idxmax\", \"cumsum\", \"all\"]:\n gr = df.groupby(ts, as_index=False)\n left = getattr(gr, attr)()\n\n gr = df.groupby(ts.values, as_index=True)\n right = getattr(gr, attr)().reset_index(drop=True)\n\n tm.assert_frame_equal(left, right)\n\n\ndef test_ops_not_as_index(reduction_func):\n # GH 10355, 21090\n # Using as_index=False should not modify grouped column\n\n if reduction_func in (\"corrwith\",):\n pytest.skip(\"Test not applicable\")\n\n if reduction_func in (\"nth\", \"ngroup\"):\n pytest.skip(\"Skip until behavior is determined (GH #5755)\")\n\n df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=[\"a\", \"b\"])\n expected = getattr(df.groupby(\"a\"), reduction_func)()\n if reduction_func == \"size\":\n expected = expected.rename(\"size\")\n expected = expected.reset_index()\n\n g = df.groupby(\"a\", as_index=False)\n\n result = getattr(g, reduction_func)()\n tm.assert_frame_equal(result, expected)\n\n result = g.agg(reduction_func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(g[\"b\"], reduction_func)()\n tm.assert_frame_equal(result, expected)\n\n result = g[\"b\"].agg(reduction_func)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_as_index_series_return_frame(df):\n grouped = df.groupby(\"A\", as_index=False)\n grouped2 = df.groupby([\"A\", \"B\"], as_index=False)\n\n result = grouped[\"C\"].agg(np.sum)\n expected = grouped.agg(np.sum).loc[:, [\"A\", \"C\"]]\n assert isinstance(result, DataFrame)\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped2[\"C\"].agg(np.sum)\n expected2 = grouped2.agg(np.sum).loc[:, [\"A\", \"B\", \"C\"]]\n assert isinstance(result2, DataFrame)\n tm.assert_frame_equal(result2, expected2)\n\n result = grouped[\"C\"].sum()\n expected = grouped.sum().loc[:, [\"A\", \"C\"]]\n assert isinstance(result, DataFrame)\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped2[\"C\"].sum()\n expected2 = grouped2.sum().loc[:, [\"A\", \"B\", \"C\"]]\n assert isinstance(result2, DataFrame)\n tm.assert_frame_equal(result2, expected2)\n\n\ndef test_as_index_series_column_slice_raises(df):\n # GH15072\n grouped = df.groupby(\"A\", as_index=False)\n msg = r\"Column\\(s\\) C already selected\"\n\n with pytest.raises(IndexError, match=msg):\n grouped[\"C\"].__getitem__(\"D\")\n\n\ndef test_groupby_as_index_cython(df):\n data = df\n\n # single-key\n grouped = data.groupby(\"A\", as_index=False)\n result = grouped.mean()\n expected = data.groupby([\"A\"]).mean()\n expected.insert(0, \"A\", expected.index)\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n # multi-key\n grouped = data.groupby([\"A\", \"B\"], as_index=False)\n result = grouped.mean()\n expected = data.groupby([\"A\", \"B\"]).mean()\n\n arrays = list(zip(*expected.index.values))\n expected.insert(0, \"A\", arrays[0])\n expected.insert(1, \"B\", arrays[1])\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_as_index_series_scalar(df):\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n\n # GH #421\n\n result = grouped[\"C\"].agg(len)\n expected = grouped.agg(len).loc[:, [\"A\", \"B\", \"C\"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_as_index_corner(df, ts):\n msg = \"as_index=False only valid with DataFrame\"\n with pytest.raises(TypeError, match=msg):\n ts.groupby(lambda x: x.weekday(), as_index=False)\n\n msg = \"as_index=False only valid for axis=0\"\n with pytest.raises(ValueError, match=msg):\n df.groupby(lambda x: x.lower(), as_index=False, axis=1)\n\n\ndef test_groupby_multiple_key(df):\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])\n agged = grouped.sum()\n tm.assert_almost_equal(df.values, agged.values)\n\n grouped = df.T.groupby(\n [lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1\n )\n\n agged = grouped.agg(lambda x: x.sum())\n tm.assert_index_equal(agged.index, df.columns)\n tm.assert_almost_equal(df.T.values, agged.values)\n\n agged = grouped.agg(lambda x: x.sum())\n tm.assert_almost_equal(df.T.values, agged.values)\n\n\ndef test_groupby_multi_corner(df):\n # test that having an all-NA column doesn't mess you up\n df = df.copy()\n df[\"bad\"] = np.nan\n agged = df.groupby([\"A\", \"B\"]).mean()\n\n expected = df.groupby([\"A\", \"B\"]).mean()\n expected[\"bad\"] = np.nan\n\n tm.assert_frame_equal(agged, expected)\n\n\ndef test_omit_nuisance(df):\n grouped = df.groupby(\"A\")\n\n result = grouped.mean()\n expected = df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\").mean()\n tm.assert_frame_equal(result, expected)\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n tm.assert_frame_equal(agged, exp)\n\n df = df.loc[:, [\"A\", \"C\", \"D\"]]\n df[\"E\"] = datetime.now()\n grouped = df.groupby(\"A\")\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n tm.assert_frame_equal(result, expected)\n\n # won't work with axis = 1\n grouped = df.groupby({\"A\": 0, \"C\": 0, \"D\": 1, \"E\": 1}, axis=1)\n msg = \"'DatetimeArray' does not implement reduction 'sum'\"\n with pytest.raises(TypeError, match=msg):\n grouped.agg(lambda x: x.sum(0, numeric_only=False))\n\n\ndef test_omit_nuisance_sem(df):\n # GH 38774 - sem should work with nuisance columns\n grouped = df.groupby(\"A\")\n result = grouped.sem()\n expected = df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\").sem()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_omit_nuisance_python_multiple(three_group):\n grouped = three_group.groupby([\"A\", \"B\"])\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n tm.assert_frame_equal(agged, exp)\n\n\ndef test_empty_groups_corner(mframe):\n # handle empty groups\n df = DataFrame(\n {\n \"k1\": np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"]),\n \"k2\": np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"]),\n \"k3\": [\"foo\", \"bar\"] * 3,\n \"v1\": np.random.randn(6),\n \"v2\": np.random.randn(6),\n }\n )\n\n grouped = df.groupby([\"k1\", \"k2\"])\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n grouped = mframe[3:5].groupby(level=0)\n agged = grouped.apply(lambda x: x.mean())\n agged_A = grouped[\"A\"].apply(np.mean)\n tm.assert_series_equal(agged[\"A\"], agged_A)\n assert agged.index.name == \"first\"\n\n\ndef test_nonsense_func():\n df = DataFrame([0])\n msg = r\"unsupported operand type\\(s\\) for \\+: 'int' and 'str'\"\n with pytest.raises(TypeError, match=msg):\n df.groupby(lambda x: x + \"foo\")\n\n\ndef test_wrap_aggregated_output_multindex(mframe):\n df = mframe.T\n df[\"baz\", \"two\"] = \"peekaboo\"\n\n keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]\n agged = df.groupby(keys).agg(np.mean)\n assert isinstance(agged.columns, MultiIndex)\n\n def aggfun(ser):\n if ser.name == (\"foo\", \"one\"):\n raise TypeError\n else:\n return ser.sum()\n\n agged2 = df.groupby(keys).aggregate(aggfun)\n assert len(agged2.columns) + 1 == len(df.columns)\n\n\ndef test_groupby_level_apply(mframe):\n\n result = mframe.groupby(level=0).count()\n assert result.index.name == \"first\"\n result = mframe.groupby(level=1).count()\n assert result.index.name == \"second\"\n\n result = mframe[\"A\"].groupby(level=0).count()\n assert result.index.name == \"first\"\n\n\ndef test_groupby_level_mapper(mframe):\n deleveled = mframe.reset_index()\n\n mapper0 = {\"foo\": 0, \"bar\": 0, \"baz\": 1, \"qux\": 1}\n mapper1 = {\"one\": 0, \"two\": 0, \"three\": 1}\n\n result0 = mframe.groupby(mapper0, level=0).sum()\n result1 = mframe.groupby(mapper1, level=1).sum()\n\n mapped_level0 = np.array([mapper0.get(x) for x in deleveled[\"first\"]])\n mapped_level1 = np.array([mapper1.get(x) for x in deleveled[\"second\"]])\n expected0 = mframe.groupby(mapped_level0).sum()\n expected1 = mframe.groupby(mapped_level1).sum()\n expected0.index.name, expected1.index.name = \"first\", \"second\"\n\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n\n\ndef test_groupby_level_nonmulti():\n # GH 1313, GH 13901\n s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name=\"foo\"))\n expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name=\"foo\"))\n\n result = s.groupby(level=0).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=[0]).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=-1).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=[-1]).sum()\n tm.assert_series_equal(result, expected)\n\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=1)\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=-2)\n msg = \"No group keys passed!\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[])\n msg = \"multiple levels only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[0, 0])\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[0, 1])\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[1])\n\n\ndef test_groupby_complex():\n # GH 12902\n a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])\n expected = Series((1 + 2j, 5 + 10j))\n\n result = a.groupby(level=0).sum()\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = a.sum(level=0)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_series_indexed_differently():\n s1 = Series(\n [5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],\n index=Index([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"]),\n )\n s2 = Series(\n [1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index([\"a\", \"b\", \"d\", \"f\", \"g\", \"h\"])\n )\n\n grouped = s1.groupby(s2)\n agged = grouped.mean()\n exp = s1.groupby(s2.reindex(s1.index).get).mean()\n tm.assert_series_equal(agged, exp)\n\n\ndef test_groupby_with_hier_columns():\n tuples = list(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n )\n )\n index = MultiIndex.from_tuples(tuples)\n columns = MultiIndex.from_tuples(\n [(\"A\", \"cat\"), (\"B\", \"dog\"), (\"B\", \"cat\"), (\"A\", \"dog\")]\n )\n df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)\n\n result = df.groupby(level=0).mean()\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0, axis=1).mean()\n tm.assert_index_equal(result.index, df.index)\n\n result = df.groupby(level=0).agg(np.mean)\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0).apply(lambda x: x.mean())\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))\n tm.assert_index_equal(result.columns, Index([\"A\", \"B\"]))\n tm.assert_index_equal(result.index, df.index)\n\n # add a nuisance column\n sorted_columns, _ = columns.sortlevel(0)\n df[\"A\", \"foo\"] = \"bar\"\n result = df.groupby(level=0).mean()\n tm.assert_index_equal(result.columns, df.columns[:-1])\n\n\ndef test_grouping_ndarray(df):\n grouped = df.groupby(df[\"A\"].values)\n\n result = grouped.sum()\n expected = df.groupby(\"A\").sum()\n tm.assert_frame_equal(\n result, expected, check_names=False\n ) # Note: no names when grouping by value\n\n\ndef test_groupby_wrong_multi_labels():\n data = \"\"\"index,foo,bar,baz,spam,data\n0,foo1,bar1,baz1,spam2,20\n1,foo1,bar2,baz1,spam3,30\n2,foo2,bar2,baz1,spam2,40\n3,foo1,bar1,baz2,spam1,50\n4,foo3,bar1,baz2,spam1,60\"\"\"\n\n data = read_csv(StringIO(data), index_col=0)\n\n grouped = data.groupby([\"foo\", \"bar\", \"baz\", \"spam\"])\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_with_name(df):\n result = df.groupby(df[\"A\"]).mean()\n result2 = df.groupby(df[\"A\"], as_index=False).mean()\n assert result.index.name == \"A\"\n assert \"A\" in result2\n\n result = df.groupby([df[\"A\"], df[\"B\"]]).mean()\n result2 = df.groupby([df[\"A\"], df[\"B\"]], as_index=False).mean()\n assert result.index.names == (\"A\", \"B\")\n assert \"A\" in result2\n assert \"B\" in result2\n\n\ndef test_seriesgroupby_name_attr(df):\n # GH 6265\n result = df.groupby(\"A\")[\"C\"]\n assert result.count().name == \"C\"\n assert result.mean().name == \"C\"\n\n testFunc = lambda x: np.sum(x) * 2\n assert result.agg(testFunc).name == \"C\"\n\n\ndef test_consistency_name():\n # GH 12363\n\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n expected = df.groupby([\"A\"]).B.count()\n result = df.B.groupby(df.A).count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_name_propagation(df):\n # GH 6124\n def summarize(df, name=None):\n return Series({\"count\": 1, \"mean\": 2, \"omissions\": 3}, name=name)\n\n def summarize_random_name(df):\n # Provide a different name for each Series. In this case, groupby\n # should not attempt to propagate the Series name since they are\n # inconsistent.\n return Series({\"count\": 1, \"mean\": 2, \"omissions\": 3}, name=df.iloc[0][\"A\"])\n\n metrics = df.groupby(\"A\").apply(summarize)\n assert metrics.columns.name is None\n metrics = df.groupby(\"A\").apply(summarize, \"metrics\")\n assert metrics.columns.name == \"metrics\"\n metrics = df.groupby(\"A\").apply(summarize_random_name)\n assert metrics.columns.name is None\n\n\ndef test_groupby_nonstring_columns():\n df = DataFrame([np.arange(10) for x in range(10)])\n grouped = df.groupby(0)\n result = grouped.mean()\n expected = df.groupby(df[0]).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_mixed_type_columns():\n # GH 13432, unorderable types in py3\n df = DataFrame([[0, 1, 2]], columns=[\"A\", \"B\", 0])\n expected = DataFrame([[1, 2]], columns=[\"B\", 0], index=Index([0], name=\"A\"))\n\n result = df.groupby(\"A\").first()\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(\"A\").sum()\n tm.assert_frame_equal(result, expected)\n\n\n# TODO: Ensure warning isn't emitted in the first place\[email protected](\"ignore:Mean of:RuntimeWarning\")\ndef test_cython_grouper_series_bug_noncontig():\n arr = np.empty((100, 100))\n arr.fill(np.nan)\n obj = Series(arr[:, 0])\n inds = np.tile(range(10), 10)\n\n result = obj.groupby(inds).agg(Series.median)\n assert result.isna().all()\n\n\ndef test_series_grouper_noncontig_index():\n index = Index(tm.rands_array(10, 100))\n\n values = Series(np.random.randn(50), index=index[::2])\n labels = np.random.randint(0, 5, 50)\n\n # it works!\n grouped = values.groupby(labels)\n\n # accessing the index elements causes segfault\n f = lambda x: len(set(map(id, x.index)))\n grouped.agg(f)\n\n\ndef test_convert_objects_leave_decimal_alone():\n\n s = Series(range(5))\n labels = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"], dtype=\"O\")\n\n def convert_fast(x):\n return Decimal(str(x.mean()))\n\n def convert_force_pure(x):\n # base will be length 0\n assert len(x.values.base) > 0\n return Decimal(str(x.mean()))\n\n grouped = s.groupby(labels)\n\n result = grouped.agg(convert_fast)\n assert result.dtype == np.object_\n assert isinstance(result[0], Decimal)\n\n result = grouped.agg(convert_force_pure)\n assert result.dtype == np.object_\n assert isinstance(result[0], Decimal)\n\n\ndef test_groupby_dtype_inference_empty():\n # GH 6733\n df = DataFrame({\"x\": [], \"range\": np.arange(0, dtype=\"int64\")})\n assert df[\"x\"].dtype == np.float64\n\n result = df.groupby(\"x\").first()\n exp_index = Index([], name=\"x\", dtype=np.float64)\n expected = DataFrame({\"range\": Series([], index=exp_index, dtype=\"int64\")})\n tm.assert_frame_equal(result, expected, by_blocks=True)\n\n\ndef test_groupby_unit64_float_conversion():\n # GH: 30859 groupby converts unit64 to floats sometimes\n df = DataFrame({\"first\": [1], \"second\": [1], \"value\": [16148277970000000000]})\n result = df.groupby([\"first\", \"second\"])[\"value\"].max()\n expected = Series(\n [16148277970000000000],\n MultiIndex.from_product([[1], [1]], names=[\"first\", \"second\"]),\n name=\"value\",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_list_infer_array_like(df):\n result = df.groupby(list(df[\"A\"])).mean()\n expected = df.groupby(df[\"A\"]).mean()\n tm.assert_frame_equal(result, expected, check_names=False)\n\n with pytest.raises(KeyError, match=r\"^'foo'$\"):\n df.groupby(list(df[\"A\"][:-1]))\n\n # pathological case of ambiguity\n df = DataFrame({\"foo\": [0, 1], \"bar\": [3, 4], \"val\": np.random.randn(2)})\n\n result = df.groupby([\"foo\", \"bar\"]).mean()\n expected = df.groupby([df[\"foo\"], df[\"bar\"]]).mean()[[\"val\"]]\n\n\ndef test_groupby_keys_same_size_as_index():\n # GH 11185\n freq = \"s\"\n index = date_range(\n start=Timestamp(\"2015-09-29T11:34:44-0700\"), periods=2, freq=freq\n )\n df = DataFrame([[\"A\", 10], [\"B\", 15]], columns=[\"metric\", \"values\"], index=index)\n result = df.groupby([Grouper(level=0, freq=freq), \"metric\"]).mean()\n expected = df.set_index([df.index, \"metric\"])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_one_row():\n # GH 11741\n msg = r\"^'Z'$\"\n df1 = DataFrame(np.random.randn(1, 4), columns=list(\"ABCD\"))\n with pytest.raises(KeyError, match=msg):\n df1.groupby(\"Z\")\n df2 = DataFrame(np.random.randn(2, 4), columns=list(\"ABCD\"))\n with pytest.raises(KeyError, match=msg):\n df2.groupby(\"Z\")\n\n\ndef test_groupby_nat_exclude():\n # GH 6992\n df = DataFrame(\n {\n \"values\": np.random.randn(8),\n \"dt\": [\n np.nan,\n Timestamp(\"2013-01-01\"),\n np.nan,\n Timestamp(\"2013-02-01\"),\n np.nan,\n Timestamp(\"2013-02-01\"),\n np.nan,\n Timestamp(\"2013-01-01\"),\n ],\n \"str\": [np.nan, \"a\", np.nan, \"a\", np.nan, \"a\", np.nan, \"b\"],\n }\n )\n grouped = df.groupby(\"dt\")\n\n expected = [Index([1, 7]), Index([3, 5])]\n keys = sorted(grouped.groups.keys())\n assert len(keys) == 2\n for k, e in zip(keys, expected):\n # grouped.groups keys are np.datetime64 with system tz\n # not to be affected by tz, only compare values\n tm.assert_index_equal(grouped.groups[k], e)\n\n # confirm obj is not filtered\n tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)\n assert grouped.ngroups == 2\n\n expected = {\n Timestamp(\"2013-01-01 00:00:00\"): np.array([1, 7], dtype=np.intp),\n Timestamp(\"2013-02-01 00:00:00\"): np.array([3, 5], dtype=np.intp),\n }\n\n for k in grouped.indices:\n tm.assert_numpy_array_equal(grouped.indices[k], expected[k])\n\n tm.assert_frame_equal(grouped.get_group(Timestamp(\"2013-01-01\")), df.iloc[[1, 7]])\n tm.assert_frame_equal(grouped.get_group(Timestamp(\"2013-02-01\")), df.iloc[[3, 5]])\n\n with pytest.raises(KeyError, match=r\"^NaT$\"):\n grouped.get_group(pd.NaT)\n\n nan_df = DataFrame(\n {\"nan\": [np.nan, np.nan, np.nan], \"nat\": [pd.NaT, pd.NaT, pd.NaT]}\n )\n assert nan_df[\"nan\"].dtype == \"float64\"\n assert nan_df[\"nat\"].dtype == \"datetime64[ns]\"\n\n for key in [\"nan\", \"nat\"]:\n grouped = nan_df.groupby(key)\n assert grouped.groups == {}\n assert grouped.ngroups == 0\n assert grouped.indices == {}\n with pytest.raises(KeyError, match=r\"^nan$\"):\n grouped.get_group(np.nan)\n with pytest.raises(KeyError, match=r\"^NaT$\"):\n grouped.get_group(pd.NaT)\n\n\ndef test_groupby_two_group_keys_all_nan():\n # GH #36842: Grouping over two group keys shouldn't raise an error\n df = DataFrame({\"a\": [np.nan, np.nan], \"b\": [np.nan, np.nan], \"c\": [1, 2]})\n result = df.groupby([\"a\", \"b\"]).indices\n assert result == {}\n\n\ndef test_groupby_2d_malformed():\n d = DataFrame(index=range(2))\n d[\"group\"] = [\"g1\", \"g2\"]\n d[\"zeros\"] = [0, 0]\n d[\"ones\"] = [1, 1]\n d[\"label\"] = [\"l1\", \"l2\"]\n tmp = d.groupby([\"group\"]).mean()\n res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)\n tm.assert_index_equal(tmp.columns, Index([\"zeros\", \"ones\"]))\n tm.assert_numpy_array_equal(tmp.values, res_values)\n\n\ndef test_int32_overflow():\n B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))\n A = np.arange(25000)\n df = DataFrame({\"A\": A, \"B\": B, \"C\": A, \"D\": B, \"E\": np.random.randn(25000)})\n\n left = df.groupby([\"A\", \"B\", \"C\", \"D\"]).sum()\n right = df.groupby([\"D\", \"C\", \"B\", \"A\"]).sum()\n assert len(left) == len(right)\n\n\ndef test_groupby_sort_multi():\n df = DataFrame(\n {\n \"a\": [\"foo\", \"bar\", \"baz\"],\n \"b\": [3, 2, 1],\n \"c\": [0, 1, 2],\n \"d\": np.random.randn(3),\n }\n )\n\n tups = [tuple(row) for row in df[[\"a\", \"b\", \"c\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"a\", \"b\", \"c\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])\n\n tups = [tuple(row) for row in df[[\"c\", \"a\", \"b\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"c\", \"a\", \"b\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups)\n\n tups = [tuple(x) for x in df[[\"b\", \"c\", \"a\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"b\", \"c\", \"a\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])\n\n df = DataFrame(\n {\"a\": [0, 1, 2, 0, 1, 2], \"b\": [0, 0, 0, 1, 1, 1], \"d\": np.random.randn(6)}\n )\n grouped = df.groupby([\"a\", \"b\"])[\"d\"]\n result = grouped.sum()\n\n def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):\n tups = [tuple(row) for row in df[keys].values]\n tups = com.asarray_tuplesafe(tups)\n expected = f(df.groupby(tups)[field])\n for k, v in expected.items():\n assert result[k] == v\n\n _check_groupby(df, result, [\"a\", \"b\"], \"d\")\n\n\ndef test_dont_clobber_name_column():\n df = DataFrame(\n {\"key\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"], \"name\": [\"foo\", \"bar\", \"baz\"] * 2}\n )\n\n result = df.groupby(\"key\").apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n\ndef test_skip_group_keys():\n\n tsf = tm.makeTimeDataFrame()\n\n grouped = tsf.groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_values(by=\"A\")[:3])\n\n pieces = [group.sort_values(by=\"A\")[:3] for key, group in grouped]\n\n expected = pd.concat(pieces)\n tm.assert_frame_equal(result, expected)\n\n grouped = tsf[\"A\"].groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_values()[:3])\n\n pieces = [group.sort_values()[:3] for key, group in grouped]\n\n expected = pd.concat(pieces)\n tm.assert_series_equal(result, expected)\n\n\ndef test_no_nonsense_name(float_frame):\n # GH #995\n s = float_frame[\"C\"].copy()\n s.name = None\n\n result = s.groupby(float_frame[\"A\"]).agg(np.sum)\n assert result.name is None\n\n\ndef test_multifunc_sum_bug():\n # GH #1065\n x = DataFrame(np.arange(9).reshape(3, 3))\n x[\"test\"] = 0\n x[\"fl\"] = [1.3, 1.5, 1.6]\n\n grouped = x.groupby(\"test\")\n result = grouped.agg({\"fl\": \"sum\", 2: \"size\"})\n assert result[\"fl\"].dtype == np.float64\n\n\ndef test_handle_dict_return_value(df):\n def f(group):\n return {\"max\": group.max(), \"min\": group.min()}\n\n def g(group):\n return Series({\"max\": group.max(), \"min\": group.min()})\n\n result = df.groupby(\"A\")[\"C\"].apply(f)\n expected = df.groupby(\"A\")[\"C\"].apply(g)\n\n assert isinstance(result, Series)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"grouper\", [\"A\", [\"A\", \"B\"]])\ndef test_set_group_name(df, grouper):\n def f(group):\n assert group.name is not None\n return group\n\n def freduce(group):\n assert group.name is not None\n return group.sum()\n\n def foo(x):\n return freduce(x)\n\n grouped = df.groupby(grouper)\n\n # make sure all these work\n grouped.apply(f)\n grouped.aggregate(freduce)\n grouped.aggregate({\"C\": freduce, \"D\": freduce})\n grouped.transform(f)\n\n grouped[\"C\"].apply(f)\n grouped[\"C\"].aggregate(freduce)\n grouped[\"C\"].aggregate([freduce, foo])\n grouped[\"C\"].transform(f)\n\n\ndef test_group_name_available_in_inference_pass():\n # gh-15062\n df = DataFrame({\"a\": [0, 0, 1, 1, 2, 2], \"b\": np.arange(6)})\n\n names = []\n\n def f(group):\n names.append(group.name)\n return group.copy()\n\n df.groupby(\"a\", sort=False, group_keys=False).apply(f)\n\n expected_names = [0, 1, 2]\n assert names == expected_names\n\n\ndef test_no_dummy_key_names(df):\n # see gh-1291\n result = df.groupby(df[\"A\"].values).sum()\n assert result.index.name is None\n\n result = df.groupby([df[\"A\"].values, df[\"B\"].values]).sum()\n assert result.index.names == (None, None)\n\n\ndef test_groupby_sort_multiindex_series():\n # series multiindex groupby sort argument was not being passed through\n # _compress_group_index\n # GH 9444\n index = MultiIndex(\n levels=[[1, 2], [1, 2]],\n codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],\n names=[\"a\", \"b\"],\n )\n mseries = Series([0, 1, 2, 3, 4, 5], index=index)\n index = MultiIndex(\n levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=[\"a\", \"b\"]\n )\n mseries_result = Series([0, 2, 4], index=index)\n\n result = mseries.groupby(level=[\"a\", \"b\"], sort=False).first()\n tm.assert_series_equal(result, mseries_result)\n result = mseries.groupby(level=[\"a\", \"b\"], sort=True).first()\n tm.assert_series_equal(result, mseries_result.sort_index())\n\n\ndef test_groupby_reindex_inside_function():\n\n periods = 1000\n ind = date_range(start=\"2012/1/1\", freq=\"5min\", periods=periods)\n df = DataFrame({\"high\": np.arange(periods), \"low\": np.arange(periods)}, index=ind)\n\n def agg_before(func, fix=False):\n \"\"\"\n Run an aggregate func on the subset of data.\n \"\"\"\n\n def _func(data):\n d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()\n if fix:\n data[data.index[0]]\n if len(d) == 0:\n return None\n return func(d)\n\n return _func\n\n grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))\n closure_bad = grouped.agg({\"high\": agg_before(np.max)})\n closure_good = grouped.agg({\"high\": agg_before(np.max, True)})\n\n tm.assert_frame_equal(closure_bad, closure_good)\n\n\ndef test_groupby_multiindex_missing_pair():\n # GH9049\n df = DataFrame(\n {\n \"group1\": [\"a\", \"a\", \"a\", \"b\"],\n \"group2\": [\"c\", \"c\", \"d\", \"c\"],\n \"value\": [1, 1, 1, 5],\n }\n )\n df = df.set_index([\"group1\", \"group2\"])\n df_grouped = df.groupby(level=[\"group1\", \"group2\"], sort=True)\n\n res = df_grouped.agg(\"sum\")\n idx = MultiIndex.from_tuples(\n [(\"a\", \"c\"), (\"a\", \"d\"), (\"b\", \"c\")], names=[\"group1\", \"group2\"]\n )\n exp = DataFrame([[2], [1], [5]], index=idx, columns=[\"value\"])\n\n tm.assert_frame_equal(res, exp)\n\n\ndef test_groupby_multiindex_not_lexsorted():\n # GH 11640\n\n # define the lexsorted version\n lexsorted_mi = MultiIndex.from_tuples(\n [(\"a\", \"\"), (\"b1\", \"c1\"), (\"b2\", \"c2\")], names=[\"b\", \"c\"]\n )\n lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)\n assert lexsorted_df.columns._is_lexsorted()\n\n # define the non-lexsorted version\n not_lexsorted_df = DataFrame(\n columns=[\"a\", \"b\", \"c\", \"d\"], data=[[1, \"b1\", \"c1\", 3], [1, \"b2\", \"c2\", 4]]\n )\n not_lexsorted_df = not_lexsorted_df.pivot_table(\n index=\"a\", columns=[\"b\", \"c\"], values=\"d\"\n )\n not_lexsorted_df = not_lexsorted_df.reset_index()\n assert not not_lexsorted_df.columns._is_lexsorted()\n\n # compare the results\n tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)\n\n expected = lexsorted_df.groupby(\"a\").mean()\n with tm.assert_produces_warning(PerformanceWarning):\n result = not_lexsorted_df.groupby(\"a\").mean()\n tm.assert_frame_equal(expected, result)\n\n # a transforming function should work regardless of sort\n # GH 14776\n df = DataFrame(\n {\"x\": [\"a\", \"a\", \"b\", \"a\"], \"y\": [1, 1, 2, 2], \"z\": [1, 2, 3, 4]}\n ).set_index([\"x\", \"y\"])\n assert not df.index._is_lexsorted()\n\n for level in [0, 1, [0, 1]]:\n for sort in [False, True]:\n result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)\n expected = df\n tm.assert_frame_equal(expected, result)\n\n result = (\n df.sort_index()\n .groupby(level=level, sort=sort)\n .apply(DataFrame.drop_duplicates)\n )\n expected = df.sort_index()\n tm.assert_frame_equal(expected, result)\n\n\ndef test_index_label_overlaps_location():\n # checking we don't have any label/location confusion in the\n # wake of GH5375\n df = DataFrame(list(\"ABCDE\"), index=[2, 0, 2, 1, 1])\n g = df.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n tm.assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n # ... and again, with a generic Index of floats\n df.index = df.index.astype(float)\n g = df.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n tm.assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n\ndef test_transform_doesnt_clobber_ints():\n # GH 7972\n n = 6\n x = np.arange(n)\n df = DataFrame({\"a\": x // 2, \"b\": 2.0 * x, \"c\": 3.0 * x})\n df2 = DataFrame({\"a\": x // 2 * 1.0, \"b\": 2.0 * x, \"c\": 3.0 * x})\n\n gb = df.groupby(\"a\")\n result = gb.transform(\"mean\")\n\n gb2 = df2.groupby(\"a\")\n expected = gb2.transform(\"mean\")\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"sort_column\",\n [\"ints\", \"floats\", \"strings\", [\"ints\", \"floats\"], [\"ints\", \"strings\"]],\n)\[email protected](\n \"group_column\", [\"int_groups\", \"string_groups\", [\"int_groups\", \"string_groups\"]]\n)\ndef test_groupby_preserves_sort(sort_column, group_column):\n # Test to ensure that groupby always preserves sort order of original\n # object. Issue #8588 and #9651\n\n df = DataFrame(\n {\n \"int_groups\": [3, 1, 0, 1, 0, 3, 3, 3],\n \"string_groups\": [\"z\", \"a\", \"z\", \"a\", \"a\", \"g\", \"g\", \"g\"],\n \"ints\": [8, 7, 4, 5, 2, 9, 1, 1],\n \"floats\": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],\n \"strings\": [\"z\", \"d\", \"a\", \"e\", \"word\", \"word2\", \"42\", \"47\"],\n }\n )\n\n # Try sorting on different types and with different group types\n\n df = df.sort_values(by=sort_column)\n g = df.groupby(group_column)\n\n def test_sort(x):\n tm.assert_frame_equal(x, x.sort_values(by=sort_column))\n\n g.apply(test_sort)\n\n\ndef test_pivot_table_values_key_error():\n # This test is designed to replicate the error in issue #14938\n df = DataFrame(\n {\n \"eventDate\": date_range(datetime.today(), periods=20, freq=\"M\").tolist(),\n \"thename\": range(0, 20),\n }\n )\n\n df[\"year\"] = df.set_index(\"eventDate\").index.year\n df[\"month\"] = df.set_index(\"eventDate\").index.month\n\n with pytest.raises(KeyError, match=\"'badname'\"):\n df.reset_index().pivot_table(\n index=\"year\", columns=\"month\", values=\"badname\", aggfunc=\"count\"\n )\n\n\[email protected](\"columns\", [\"C\", [\"C\"]])\[email protected](\"keys\", [[\"A\"], [\"A\", \"B\"]])\[email protected](\n \"values\",\n [\n [True],\n [0],\n [0.0],\n [\"a\"],\n Categorical([0]),\n [to_datetime(0)],\n date_range(0, 1, 1, tz=\"US/Eastern\"),\n pd.array([0], dtype=\"Int64\"),\n pd.array([0], dtype=\"Float64\"),\n pd.array([False], dtype=\"boolean\"),\n ],\n)\[email protected](\"method\", [\"attr\", \"agg\", \"apply\"])\[email protected](\n \"op\", [\"idxmax\", \"idxmin\", \"mad\", \"min\", \"max\", \"sum\", \"prod\", \"skew\"]\n)\ndef test_empty_groupby(columns, keys, values, method, op, request):\n # GH8093 & GH26411\n\n if isinstance(values, Categorical) and len(keys) == 1 and method == \"apply\":\n mark = pytest.mark.xfail(raises=TypeError, match=\"'str' object is not callable\")\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and op in [\"idxmax\", \"idxmin\"]\n ):\n mark = pytest.mark.xfail(\n raises=ValueError, match=\"attempt to get arg(min|max) of an empty sequence\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and not isinstance(columns, list)\n ):\n mark = pytest.mark.xfail(\n raises=TypeError, match=\"'Categorical' does not implement\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and op in [\"mad\", \"min\", \"max\", \"sum\", \"prod\", \"skew\"]\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 2\n and op in [\"min\", \"max\", \"sum\"]\n and method != \"apply\"\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, pd.core.arrays.BooleanArray)\n and op in [\"sum\", \"prod\"]\n and method != \"apply\"\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n\n override_dtype = None\n if isinstance(values[0], bool) and op in (\"prod\", \"sum\") and method != \"apply\":\n # sum/product of bools is an integer\n override_dtype = \"int64\"\n\n df = DataFrame({\"A\": values, \"B\": values, \"C\": values}, columns=list(\"ABC\"))\n\n if hasattr(values, \"dtype\"):\n # check that we did the construction right\n assert (df.dtypes == values.dtype).all()\n\n df = df.iloc[:0]\n\n gb = df.groupby(keys)[columns]\n if method == \"attr\":\n result = getattr(gb, op)()\n else:\n result = getattr(gb, method)(op)\n\n expected = df.set_index(keys)[columns]\n if override_dtype is not None:\n expected = expected.astype(override_dtype)\n if len(keys) == 1:\n expected.index.name = keys[0]\n tm.assert_equal(result, expected)\n\n\ndef test_tuple_as_grouping():\n # https://github.com/pandas-dev/pandas/issues/18314\n df = DataFrame(\n {\n (\"a\", \"b\"): [1, 1, 1, 1],\n \"a\": [2, 2, 2, 2],\n \"b\": [2, 2, 2, 2],\n \"c\": [1, 1, 1, 1],\n }\n )\n\n with pytest.raises(KeyError, match=r\"('a', 'b')\"):\n df[[\"a\", \"b\", \"c\"]].groupby((\"a\", \"b\"))\n\n result = df.groupby((\"a\", \"b\"))[\"c\"].sum()\n expected = Series([4], name=\"c\", index=Index([1], name=(\"a\", \"b\")))\n tm.assert_series_equal(result, expected)\n\n\ndef test_tuple_correct_keyerror():\n # https://github.com/pandas-dev/pandas/issues/18798\n df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]]))\n with pytest.raises(KeyError, match=r\"^\\(7, 8\\)$\"):\n df.groupby((7, 8)).mean()\n\n\ndef test_groupby_agg_ohlc_non_first():\n # GH 21716\n df = DataFrame(\n [[1], [1]],\n columns=[\"foo\"],\n index=date_range(\"2018-01-01\", periods=2, freq=\"D\"),\n )\n\n expected = DataFrame(\n [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],\n columns=MultiIndex.from_tuples(\n (\n (\"foo\", \"sum\", \"foo\"),\n (\"foo\", \"ohlc\", \"open\"),\n (\"foo\", \"ohlc\", \"high\"),\n (\"foo\", \"ohlc\", \"low\"),\n (\"foo\", \"ohlc\", \"close\"),\n )\n ),\n index=date_range(\"2018-01-01\", periods=2, freq=\"D\"),\n )\n\n result = df.groupby(Grouper(freq=\"D\")).agg([\"sum\", \"ohlc\"])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_multiindex_nat():\n # GH 9236\n values = [\n (pd.NaT, \"a\"),\n (datetime(2012, 1, 2), \"a\"),\n (datetime(2012, 1, 2), \"b\"),\n (datetime(2012, 1, 3), \"a\"),\n ]\n mi = MultiIndex.from_tuples(values, names=[\"date\", None])\n ser = Series([3, 2, 2.5, 4], index=mi)\n\n result = ser.groupby(level=1).mean()\n expected = Series([3.0, 2.5], index=[\"a\", \"b\"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_empty_list_raises():\n # GH 5289\n values = zip(range(10), range(10))\n df = DataFrame(values, columns=[\"apple\", \"b\"])\n msg = \"Grouper and axis must be same length\"\n with pytest.raises(ValueError, match=msg):\n df.groupby([[]])\n\n\ndef test_groupby_multiindex_series_keys_len_equal_group_axis():\n # GH 25704\n index_array = [[\"x\", \"x\"], [\"a\", \"b\"], [\"k\", \"k\"]]\n index_names = [\"first\", \"second\", \"third\"]\n ri = MultiIndex.from_arrays(index_array, names=index_names)\n s = Series(data=[1, 2], index=ri)\n result = s.groupby([\"first\", \"third\"]).sum()\n\n index_array = [[\"x\"], [\"k\"]]\n index_names = [\"first\", \"third\"]\n ei = MultiIndex.from_arrays(index_array, names=index_names)\n expected = Series([3], index=ei)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_groups_in_BaseGrouper():\n # GH 26326\n # Test if DataFrame grouped with a pandas.Grouper has correct groups\n mi = MultiIndex.from_product([[\"A\", \"B\"], [\"C\", \"D\"]], names=[\"alpha\", \"beta\"])\n df = DataFrame({\"foo\": [1, 2, 1, 2], \"bar\": [1, 2, 3, 4]}, index=mi)\n result = df.groupby([Grouper(level=\"alpha\"), \"beta\"])\n expected = df.groupby([\"alpha\", \"beta\"])\n assert result.groups == expected.groups\n\n result = df.groupby([\"beta\", Grouper(level=\"alpha\")])\n expected = df.groupby([\"beta\", \"alpha\"])\n assert result.groups == expected.groups\n\n\[email protected](\"group_name\", [\"x\", [\"x\"]])\ndef test_groupby_axis_1(group_name):\n # GH 27614\n df = DataFrame(\n np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]\n )\n df.index.name = \"y\"\n df.columns.name = \"x\"\n\n results = df.groupby(group_name, axis=1).sum()\n expected = df.T.groupby(group_name).sum().T\n tm.assert_frame_equal(results, expected)\n\n # test on MI column\n iterables = [[\"bar\", \"baz\", \"foo\"], [\"one\", \"two\"]]\n mi = MultiIndex.from_product(iterables=iterables, names=[\"x\", \"x1\"])\n df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)\n results = df.groupby(group_name, axis=1).sum()\n expected = df.T.groupby(group_name).sum().T\n tm.assert_frame_equal(results, expected)\n\n\[email protected](\n \"op, expected\",\n [\n (\n \"shift\",\n {\n \"time\": [\n None,\n None,\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n None,\n None,\n ]\n },\n ),\n (\n \"bfill\",\n {\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ]\n },\n ),\n (\n \"ffill\",\n {\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ]\n },\n ),\n ],\n)\ndef test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):\n # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill\n tz = tz_naive_fixture\n data = {\n \"id\": [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"],\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n None,\n None,\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ],\n }\n df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))\n\n grouped = df.groupby(\"id\")\n result = getattr(grouped, op)()\n expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_only_none_group():\n # see GH21624\n # this was crashing with \"ValueError: Length of passed values is 1, index implies 0\"\n df = DataFrame({\"g\": [None], \"x\": 1})\n actual = df.groupby(\"g\")[\"x\"].transform(\"sum\")\n expected = Series([np.nan], name=\"x\")\n\n tm.assert_series_equal(actual, expected)\n\n\ndef test_groupby_duplicate_index():\n # GH#29189 the groupby call here used to raise\n ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])\n gb = ser.groupby(level=0)\n\n result = gb.mean()\n expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"idx\", [Index([\"a\", \"a\"]), MultiIndex.from_tuples(((\"a\", \"a\"), (\"a\", \"a\")))]\n)\[email protected](\"ignore:tshift is deprecated:FutureWarning\")\ndef test_dup_labels_output_shape(groupby_func, idx):\n if groupby_func in {\"size\", \"ngroup\", \"cumcount\"}:\n pytest.skip(\"Not applicable\")\n\n df = DataFrame([[1, 1]], columns=idx)\n grp_by = df.groupby([0])\n\n args = []\n if groupby_func in {\"fillna\", \"nth\"}:\n args.append(0)\n elif groupby_func == \"corrwith\":\n args.append(df)\n elif groupby_func == \"tshift\":\n df.index = [Timestamp(\"today\")]\n args.extend([1, \"D\"])\n\n result = getattr(grp_by, groupby_func)(*args)\n\n assert result.shape == (1, 2)\n tm.assert_index_equal(result.columns, idx)\n\n\ndef test_groupby_crash_on_nunique(axis):\n # Fix following 30253\n df = DataFrame({(\"A\", \"B\"): [1, 2], (\"A\", \"C\"): [1, 3], (\"D\", \"B\"): [0, 0]})\n\n axis_number = df._get_axis_number(axis)\n if not axis_number:\n df = df.T\n\n result = df.groupby(axis=axis_number, level=0).nunique()\n\n expected = DataFrame({\"A\": [1, 2], \"D\": [1, 1]})\n if not axis_number:\n expected = expected.T\n\n tm.assert_frame_equal(result, expected)\n\n # same thing, but empty columns\n gb = df[[]].groupby(axis=axis_number, level=0)\n res = gb.nunique()\n exp = expected[[]]\n tm.assert_frame_equal(res, exp)\n\n\ndef test_groupby_list_level():\n # GH 9790\n expected = DataFrame(np.arange(0, 9).reshape(3, 3))\n result = expected.groupby(level=[0]).mean()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"max_seq_items, expected\",\n [\n (5, \"{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}\"),\n (4, \"{0: [0], 1: [1], 2: [2], 3: [3], ...}\"),\n (1, \"{0: [0], ...}\"),\n ],\n)\ndef test_groups_repr_truncates(max_seq_items, expected):\n # GH 1135\n df = DataFrame(np.random.randn(5, 1))\n df[\"a\"] = df.index\n\n with pd.option_context(\"display.max_seq_items\", max_seq_items):\n result = df.groupby(\"a\").groups.__repr__()\n assert result == expected\n\n result = df.groupby(np.array(df.a)).groups.__repr__()\n assert result == expected\n\n\ndef test_group_on_two_row_multiindex_returns_one_tuple_key():\n # GH 18451\n df = DataFrame([{\"a\": 1, \"b\": 2, \"c\": 99}, {\"a\": 1, \"b\": 2, \"c\": 88}])\n df = df.set_index([\"a\", \"b\"])\n\n grp = df.groupby([\"a\", \"b\"])\n result = grp.indices\n expected = {(1, 2): np.array([0, 1], dtype=np.int64)}\n\n assert len(result) == 1\n key = (1, 2)\n assert (result[key] == expected[key]).all()\n\n\[email protected](\n \"klass, attr, value\",\n [\n (DataFrame, \"level\", \"a\"),\n (DataFrame, \"as_index\", False),\n (DataFrame, \"sort\", False),\n (DataFrame, \"group_keys\", False),\n (DataFrame, \"squeeze\", True),\n (DataFrame, \"observed\", True),\n (DataFrame, \"dropna\", False),\n pytest.param(\n Series,\n \"axis\",\n 1,\n marks=pytest.mark.xfail(\n reason=\"GH 35443: Attribute currently not passed on to series\"\n ),\n ),\n (Series, \"level\", \"a\"),\n (Series, \"as_index\", False),\n (Series, \"sort\", False),\n (Series, \"group_keys\", False),\n (Series, \"squeeze\", True),\n (Series, \"observed\", True),\n (Series, \"dropna\", False),\n ],\n)\[email protected](\n \"ignore:The `squeeze` parameter is deprecated:FutureWarning\"\n)\ndef test_subsetting_columns_keeps_attrs(klass, attr, value):\n # GH 9959 - When subsetting columns, don't drop attributes\n df = DataFrame({\"a\": [1], \"b\": [2], \"c\": [3]})\n if attr != \"axis\":\n df = df.set_index(\"a\")\n\n expected = df.groupby(\"a\", **{attr: value})\n result = expected[[\"b\"]] if klass is DataFrame else expected[\"b\"]\n assert getattr(result, attr) == getattr(expected, attr)\n\n\ndef test_subsetting_columns_axis_1():\n # GH 37725\n g = DataFrame({\"A\": [1], \"B\": [2], \"C\": [3]}).groupby([0, 0, 1], axis=1)\n match = \"Cannot subset columns when using axis=1\"\n with pytest.raises(ValueError, match=match):\n g[[\"A\", \"B\"]].sum()\n\n\[email protected](\"func\", [\"sum\", \"any\", \"shift\"])\ndef test_groupby_column_index_name_lost(func):\n # GH: 29764 groupby loses index sometimes\n expected = Index([\"a\"], name=\"idx\")\n df = DataFrame([[1]], columns=expected)\n df_grouped = df.groupby([1])\n result = getattr(df_grouped, func)().columns\n tm.assert_index_equal(result, expected)\n\n\ndef test_groupby_duplicate_columns():\n # GH: 31735\n df = DataFrame(\n {\"A\": [\"f\", \"e\", \"g\", \"h\"], \"B\": [\"a\", \"b\", \"c\", \"d\"], \"C\": [1, 2, 3, 4]}\n ).astype(object)\n df.columns = [\"A\", \"B\", \"B\"]\n result = df.groupby([0, 0, 0, 0]).min()\n expected = DataFrame([[\"e\", \"a\", 1]], columns=[\"A\", \"B\", \"B\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_with_tuple_name():\n # GH 37755\n ser = Series([1, 2, 3, 4], index=[1, 1, 2, 2], name=(\"a\", \"a\"))\n ser.index.name = (\"b\", \"b\")\n result = ser.groupby(level=0).last()\n expected = Series([2, 4], index=[1, 2], name=(\"a\", \"a\"))\n expected.index.name = (\"b\", \"b\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](not IS64, reason=\"GH#38778: fail on 32-bit system\")\[email protected](\n \"func, values\", [(\"sum\", [97.0, 98.0]), (\"mean\", [24.25, 24.5])]\n)\ndef test_groupby_numerical_stability_sum_mean(func, values):\n # GH#38778\n data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]\n df = DataFrame({\"group\": [1, 2] * 4, \"a\": data, \"b\": data})\n result = getattr(df.groupby(\"group\"), func)()\n expected = DataFrame({\"a\": values, \"b\": values}, index=Index([1, 2], name=\"group\"))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](not IS64, reason=\"GH#38778: fail on 32-bit system\")\ndef test_groupby_numerical_stability_cumsum():\n # GH#38934\n data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]\n df = DataFrame({\"group\": [1, 2] * 4, \"a\": data, \"b\": data})\n result = df.groupby(\"group\").cumsum()\n exp_data = (\n [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]\n )\n expected = DataFrame({\"a\": exp_data, \"b\": exp_data})\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n\ndef test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex):\n dups = rand_series_with_duplicate_datetimeindex\n result = dups.groupby(level=0).mean()\n expected = dups.groupby(dups.index).mean()\n tm.assert_series_equal(result, expected)\n"
] | [
[
"pandas._testing.assert_almost_equal",
"pandas.to_datetime",
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"numpy.random.randint",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.core.common.asarray_tuplesafe",
"pandas.Index",
"numpy.std",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas.concat",
"pandas._testing.assert_produces_warning",
"pandas.MultiIndex",
"pandas.Categorical",
"pandas.array",
"pandas._testing.rands_array",
"pandas.option_context",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.array",
"numpy.sum",
"numpy.random.random",
"pandas._testing.assert_equal",
"pandas.Grouper",
"numpy.random.shuffle",
"pandas.MultiIndex.from_arrays",
"numpy.percentile",
"numpy.ones",
"pandas._testing.makeTimeDataFrame",
"pandas.Timestamp",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kbrose/pytorch | [
"fc0b8e60337ae46b90ed5d2f6d1f623f0f8d6581"
] | [
"test/test_sort_and_select.py"
] | [
"import torch\nimport numpy as np\n\nimport random\nfrom torch._six import nan\nfrom itertools import permutations, product\n\nfrom torch.testing import all_types, all_types_and\nfrom torch.testing._internal.common_utils import \\\n (TEST_WITH_ROCM, TestCase, run_tests, make_tensor, slowTest)\nfrom torch.testing._internal.common_device_type import \\\n (instantiate_device_type_tests, dtypes, onlyOnCPUAndCUDA,\n skipCUDAIfRocm, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)\n\n# TODO: remove this\nSIZE = 100\n\nclass TestSortAndSelect(TestCase):\n\n def assertIsOrdered(self, order, x, mxx, ixx, task):\n SIZE = x.size(1)\n if order == 'descending':\n def check_order(a, b):\n # `a != a` because we put NaNs\n # at the end of ascending sorted lists,\n # and the beginning of descending ones.\n return ((a != a) | (a >= b)).all().item()\n elif order == 'ascending':\n def check_order(a, b):\n # see above\n return ((b != b) | (a <= b)).all().item()\n else:\n error('unknown order \"{}\", must be \"ascending\" or \"descending\"'.format(order))\n\n are_ordered = True\n for k in range(1, SIZE):\n self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),\n 'torch.sort ({}) values unordered for {}'.format(order, task))\n\n seen = set()\n indicesCorrect = True\n size0 = x.size(0)\n size = x.size(x.dim() - 1)\n x = x.tolist()\n mxx = mxx.tolist()\n ixx = ixx.tolist()\n for k in range(size0):\n seen.clear()\n for j in range(size):\n self.assertEqual(x[k][ixx[k][j]], mxx[k][j],\n msg='torch.sort ({}) indices wrong for {}'.format(order, task))\n seen.add(ixx[k][j])\n self.assertEqual(len(seen), size)\n\n def test_sort(self, device):\n # on CUDA 2048 vs >2048 have different code path for the dim being sorted\n for SIZE in (4, 2049):\n x = torch.rand(4, SIZE, device=device)\n res1val, res1ind = torch.sort(x)\n\n # Test inplace\n y = x.clone()\n y_inds = torch.tensor((), dtype=torch.int64, device=device)\n torch.sort(y, out=(y, y_inds))\n x_vals, x_inds = torch.sort(x)\n self.assertEqual(x_vals, y)\n self.assertEqual(x_inds, y_inds)\n\n # Test use of result tensor\n res2val = torch.tensor((), device=device)\n res2ind = torch.tensor((), device=device, dtype=torch.long)\n torch.sort(x, out=(res2val, res2ind))\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n self.assertEqual(torch.argsort(x), res1ind)\n self.assertEqual(x.argsort(), res1ind)\n\n # Test sorting of random numbers\n self.assertIsOrdered('ascending', x, res2val, res2ind, 'random')\n\n # Test simple sort\n self.assertEqual(\n torch.sort(torch.tensor((50, 40, 30, 20, 10), device=device))[0],\n torch.tensor((10, 20, 30, 40, 50), device=device),\n atol=0, rtol=0\n )\n\n # Test that we still have proper sorting with duplicate keys\n x = torch.floor(torch.rand(4, SIZE, device=device) * 10)\n torch.sort(x, out=(res2val, res2ind))\n self.assertIsOrdered('ascending', x, res2val, res2ind, 'random with duplicate keys')\n\n # DESCENDING SORT\n x = torch.rand(4, SIZE, device=device)\n res1val, res1ind = torch.sort(x, x.dim() - 1, True)\n\n # Test use of result tensor\n res2val = torch.tensor((), device=device)\n res2ind = torch.tensor((), device=device, dtype=torch.long)\n torch.sort(x, x.dim() - 1, True, out=(res2val, res2ind))\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n self.assertEqual(torch.argsort(x, x.dim() - 1, True), res1ind)\n self.assertEqual(x.argsort(x.dim() - 1, True), res1ind)\n\n # Test sorting of random numbers\n self.assertIsOrdered('descending', x, res2val, res2ind, 'random')\n\n # Test simple sort task\n self.assertEqual(\n torch.sort(torch.tensor((10, 20, 30, 40, 50), device=device), 0, True)[0],\n torch.tensor((50, 40, 30, 20, 10), device=device),\n atol=0, rtol=0\n )\n\n # Test that we still have proper sorting with duplicate keys\n self.assertIsOrdered('descending', x, res2val, res2ind, 'random with duplicate keys')\n\n # Test sorting with NaNs\n x = torch.rand(4, SIZE, device=device)\n x[1][2] = float('NaN')\n x[3][0] = float('NaN')\n torch.sort(x, out=(res2val, res2ind))\n self.assertIsOrdered('ascending', x, res2val, res2ind,\n 'random with NaNs')\n torch.sort(x, out=(res2val, res2ind), descending=True)\n self.assertIsOrdered('descending', x, res2val, res2ind,\n 'random with NaNs')\n\n # FIXME: remove torch.bool from unsupported types once support is added for cub sort\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128})\n def test_stable_sort(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n sizes = (100, 1000, 10000)\n for ncopies in sizes:\n x = torch.tensor([0, 1] * ncopies, dtype=dtype, device=device)\n _, idx = x.sort(stable=True)\n self.assertEqual(\n idx[:ncopies],\n torch.arange(start=0, end=2 * ncopies, step=2, device=device)\n )\n self.assertEqual(\n idx[ncopies:],\n torch.arange(start=1, end=2 * ncopies, step=2, device=device)\n )\n\n @onlyCUDA\n @dtypes(torch.uint8)\n @largeTensorTest('200GB') # Unfortunately 80GB A100 is not large enough\n def test_sort_large(self, device, dtype):\n t0 = torch.randperm(8192, device=device).to(dtype)\n t = t0.view(1, 8192).expand(2 ** 18 + 1, -1).contiguous()\n v, i = t.sort()\n del t\n iv, im = i.var_mean(dim=0)\n del i\n vv, vm = v.var_mean(dim=0)\n del v\n self.assertEqual(vv, torch.zeros_like(vv))\n self.assertEqual(iv, torch.zeros_like(iv))\n self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device))\n self.assertEqual(im, t0.sort().indices)\n\n def _test_sort_discontiguous(self, device, dtype):\n # on CUDA 2048 vs >2048 have different code path for the dim being sorted\n sizes = (5, 7, 2049)\n for shape in permutations(sizes):\n for perm in permutations((0, 1, 2)):\n for dim in range(3):\n t = torch.randn(shape, device=device, dtype=dtype).permute(perm)\n r1 = t.sort(dim=dim)\n r2 = t.contiguous().sort(dim=dim)\n self.assertEqual(r1, r2)\n n = t.size(dim)\n\n # assert ordered\n self.assertTrue((r1.values.narrow(dim, 1, n - 1) >= r1.values.narrow(dim, 0, n - 1)).all())\n\n # assert that different segments does not mix, which can easily happen\n # if the stride is not handled correctly\n self.assertTrue((t.unsqueeze(-1).transpose(dim, -1) == r1.values.unsqueeze(-1)).any(dim=dim).any(dim=-1).all())\n\n # assert stride is preserved\n if self.device_type == 'cuda':\n # FIXME: this behavior should be true for all cases, not\n # just the one specified in if condition\n self.assertEqual(r1.values.stride(), t.stride())\n self.assertEqual(r1.indices.stride(), t.stride())\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_sort_discontiguous(self, device, dtype):\n self._test_sort_discontiguous(device, dtype)\n\n @slowTest # this test is slow on CPU, but not on CUDA\n @onlyCPU\n @dtypes(torch.float32)\n def test_sort_discontiguous_slow(self, device, dtype):\n self._test_sort_discontiguous(device, dtype)\n\n # FIXME: remove torch.bool from unsupported types once support is added for cub sort\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128})\n def test_stable_sort_against_numpy(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n if dtype in torch.testing.floating_types_and(torch.float16, torch.bfloat16):\n inf = float('inf')\n neg_inf = -float('inf')\n nan = float('nan')\n else:\n if dtype != torch.bool:\n # no torch.iinfo support for torch.bool\n inf = torch.iinfo(dtype).max\n neg_inf = torch.iinfo(dtype).min\n else:\n inf = True\n neg_inf = ~inf\n # no nan for integral types, we use inf instead for simplicity\n nan = inf\n\n def generate_samples():\n from itertools import chain, combinations\n\n for sizes in [(1025,), (10000,)]:\n size = sizes[0]\n # binary strings\n yield (torch.tensor([0, 1] * size, dtype=dtype, device=device), 0)\n\n if self.device_type == 'cuda':\n return\n\n yield (torch.tensor([0, 1] * 100, dtype=dtype, device=device), 0)\n\n def repeated_index_fill(t, dim, idxs, vals):\n res = t\n for idx, val in zip(idxs, vals):\n res = res.index_fill(dim, idx, val)\n return res\n\n for sizes in [(1, 10), (10, 1), (10, 10), (10, 10, 10)]:\n size = min(*sizes)\n x = (torch.randn(*sizes, device=device) * size).to(dtype)\n yield (x, 0)\n\n # Generate tensors which are being filled at random locations\n # with values from the non-empty subsets of the set (inf, neg_inf, nan)\n # for each dimension.\n n_fill_vals = 3 # cardinality of (inf, neg_inf, nan)\n for dim in range(len(sizes)):\n idxs = (torch.randint(high=size, size=(size // 10,)) for i in range(n_fill_vals))\n vals = (inf, neg_inf, nan)\n subsets = chain.from_iterable(combinations(list(zip(idxs, vals)), r)\n for r in range(1, n_fill_vals + 1))\n for subset in subsets:\n idxs_subset, vals_subset = zip(*subset)\n yield (repeated_index_fill(x, dim, idxs_subset, vals_subset), dim)\n\n for sample, dim in generate_samples():\n _, idx_torch = sample.sort(dim=dim, stable=True)\n if dtype is torch.bfloat16:\n sample_numpy = sample.float().cpu().numpy()\n else:\n sample_numpy = sample.cpu().numpy()\n idx_numpy = np.argsort(sample_numpy, axis=dim, kind='stable')\n self.assertEqual(idx_torch, idx_numpy)\n\n @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))\n def test_msort(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n\n def test(shape):\n tensor = make_tensor(shape, device, dtype, low=-9, high=9)\n if tensor.size() != torch.Size([]):\n if dtype is torch.bfloat16:\n expected = torch.from_numpy(np.msort(tensor.float().cpu().numpy())).bfloat16()\n else:\n expected = torch.from_numpy(np.msort(tensor.cpu().numpy()))\n else:\n expected = tensor # numpy.msort() does not support empty shapes tensor\n\n result = torch.msort(tensor)\n self.assertEqual(result, expected)\n\n out = torch.empty_like(result)\n torch.msort(tensor, out=out)\n self.assertEqual(out, expected)\n\n shapes = (\n [],\n [0, ],\n [20, ],\n [1, 20],\n [30, 30],\n [10, 20, 30]\n )\n for shape in shapes:\n test(shape)\n\n def test_topk(self, device):\n def topKViaSort(t, k, dim, dir):\n sorted, indices = t.sort(dim, dir)\n return sorted.narrow(dim, 0, k), indices.narrow(dim, 0, k)\n\n def compareTensors(t, res1, ind1, res2, ind2, dim):\n # Values should be exactly equivalent\n self.assertEqual(res1, res2, atol=0, rtol=0)\n\n # Indices might differ based on the implementation, since there is\n # no guarantee of the relative order of selection\n if not ind1.eq(ind2).all():\n # To verify that the indices represent equivalent elements,\n # gather from the input using the topk indices and compare against\n # the sort indices\n vals = t.gather(dim, ind2)\n self.assertEqual(res1, vals, atol=0, rtol=0)\n\n def compare(t, k, dim, dir):\n topKVal, topKInd = t.topk(k, dim, dir, True)\n sortKVal, sortKInd = topKViaSort(t, k, dim, dir)\n compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim)\n\n t = torch.rand(random.randint(1, SIZE),\n random.randint(1, SIZE),\n random.randint(1, SIZE), device=device)\n\n for _kTries in range(3):\n for _dimTries in range(3):\n for transpose in (True, False):\n for dir in (True, False):\n testTensor = t\n if transpose:\n dim1 = random.randrange(t.ndimension())\n dim2 = dim1\n while dim1 == dim2:\n dim2 = random.randrange(t.ndimension())\n\n testTensor = t.transpose(dim1, dim2)\n\n dim = random.randrange(testTensor.ndimension())\n k = random.randint(1, testTensor.size(dim))\n compare(testTensor, k, dim, dir)\n\n def test_topk_arguments(self, device):\n q = torch.randn(10, 2, 10, device=device)\n # Make sure True isn't mistakenly taken as the 2nd dimension (interpreted as 1)\n self.assertRaises(TypeError, lambda: q.topk(4, True))\n\n @skipCUDAIfRocm\n def test_unique_dim(self, device):\n self.assertFalse(hasattr(torch, 'unique_dim'))\n\n def run_test(device, dtype):\n x = torch.tensor([[[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]],\n [[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]]],\n dtype=dtype,\n device=device)\n x_empty = torch.empty(5, 0, dtype=dtype, device=device)\n x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)\n x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device)\n expected_unique_dim0 = torch.tensor([[[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]]],\n dtype=dtype,\n device=device)\n expected_inverse_dim0 = torch.tensor([0, 0])\n expected_counts_dim0 = torch.tensor([2])\n expected_unique_dim1 = torch.tensor([[[0., 1.],\n [1., 1.],\n [2., 1.]],\n [[0., 1.],\n [1., 1.],\n [2., 1.]]],\n dtype=dtype,\n device=device)\n expected_unique_dim1_bool = torch.tensor([[[False, True], [True, True]],\n [[False, True], [True, True]]],\n dtype=torch.bool,\n device=device)\n expected_inverse_dim1 = torch.tensor([1, 0, 2, 0])\n expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0])\n expected_counts_dim1 = torch.tensor([2, 1, 1])\n expected_counts_dim1_bool = torch.tensor([2, 2])\n expected_unique_dim2 = torch.tensor([[[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]],\n [[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]]],\n dtype=dtype,\n device=device)\n expected_inverse_dim2 = torch.tensor([0, 1])\n expected_counts_dim2 = torch.tensor([1, 1])\n expected_unique_empty = torch.tensor([], dtype=dtype, device=device)\n expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)\n expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)\n # dim0\n x_unique = torch.unique(x, dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n\n x_unique, x_inverse = torch.unique(\n x,\n return_inverse=True,\n dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n self.assertEqual(expected_inverse_dim0, x_inverse)\n\n x_unique, x_counts = torch.unique(\n x,\n return_inverse=False,\n return_counts=True,\n dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n self.assertEqual(expected_counts_dim0, x_counts)\n\n x_unique, x_inverse, x_counts = torch.unique(\n x,\n return_inverse=True,\n return_counts=True,\n dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n self.assertEqual(expected_inverse_dim0, x_inverse)\n self.assertEqual(expected_counts_dim0, x_counts)\n\n # dim1\n x_unique = torch.unique(x, dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n\n x_unique, x_inverse = torch.unique(\n x,\n return_inverse=True,\n dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n self.assertEqual(expected_inverse_dim1_bool, x_inverse)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n self.assertEqual(expected_inverse_dim1, x_inverse)\n\n x_unique, x_counts = torch.unique(\n x,\n return_inverse=False,\n return_counts=True,\n dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n self.assertEqual(expected_counts_dim1_bool, x_counts)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n self.assertEqual(expected_counts_dim1, x_counts)\n\n x_unique, x_inverse, x_counts = torch.unique(\n x,\n return_inverse=True,\n return_counts=True,\n dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n self.assertEqual(expected_inverse_dim1_bool, x_inverse)\n self.assertEqual(expected_counts_dim1_bool, x_counts)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n self.assertEqual(expected_inverse_dim1, x_inverse)\n self.assertEqual(expected_counts_dim1, x_counts)\n\n # dim2\n x_unique = torch.unique(x, dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n\n x_unique, x_inverse = torch.unique(\n x,\n return_inverse=True,\n dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n self.assertEqual(expected_inverse_dim2, x_inverse)\n\n x_unique, x_counts = torch.unique(\n x,\n return_inverse=False,\n return_counts=True,\n dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n self.assertEqual(expected_counts_dim2, x_counts)\n\n x_unique, x_inverse, x_counts = torch.unique(\n x,\n return_inverse=True,\n return_counts=True,\n dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n self.assertEqual(expected_inverse_dim2, x_inverse)\n self.assertEqual(expected_counts_dim2, x_counts)\n\n # test empty tensor\n x_unique, x_inverse, x_counts = torch.unique(\n x_empty,\n return_inverse=True,\n return_counts=True,\n dim=1)\n self.assertEqual(expected_unique_empty, x_unique)\n self.assertEqual(expected_inverse_empty, x_inverse)\n self.assertEqual(expected_counts_empty, x_counts)\n\n # test not a well formed tensor\n # Checking for runtime error, as this is the expected behaviour\n with self.assertRaises(RuntimeError):\n torch.unique(\n x_ill_formed_empty,\n return_inverse=True,\n return_counts=True,\n dim=1)\n\n # test along dim2\n with self.assertRaises(RuntimeError):\n torch.unique(\n x_ill_formed_empty_another,\n return_inverse=True,\n return_counts=True,\n dim=2)\n\n # test consecutive version\n y = torch.tensor(\n [[0, 1],\n [0, 1],\n [0, 1],\n [1, 2],\n [1, 2],\n [3, 4],\n [0, 1],\n [0, 1],\n [3, 4],\n [1, 2]],\n dtype=dtype,\n device=device\n )\n expected_y_unique = torch.tensor(\n [[0, 1],\n [1, 2],\n [3, 4],\n [0, 1],\n [3, 4],\n [1, 2]],\n dtype=dtype,\n device=device\n )\n expected_y_inverse = torch.tensor([0, 0, 0, 1, 1, 2, 3, 3, 4, 5], dtype=torch.int64, device=device)\n expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device)\n expected_y_inverse_bool = torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device)\n expected_y_counts_bool = torch.tensor([3, 3, 2, 2], dtype=torch.int64, device=device)\n y_unique, y_inverse, y_counts = torch.unique_consecutive(y, return_inverse=True, return_counts=True, dim=0)\n if x.dtype == torch.bool:\n self.assertEqual(expected_y_inverse_bool, y_inverse)\n self.assertEqual(expected_y_counts_bool, y_counts)\n else:\n self.assertEqual(expected_y_inverse, y_inverse)\n self.assertEqual(expected_y_counts, y_counts)\n\n run_test(device, torch.float)\n run_test(device, torch.double)\n run_test(device, torch.long)\n run_test(device, torch.uint8)\n run_test(device, torch.bool)\n\n @onlyCUDA\n def test_topk_noncontiguous_gpu(self, device):\n t = torch.randn(20, device=device)[::2]\n top1, idx1 = t.topk(5)\n top2, idx2 = t.contiguous().topk(5)\n self.assertEqual(top1, top2)\n self.assertEqual(idx1, idx2)\n\n def _test_topk_dtype(self, device, dtype, integral, size):\n if integral:\n a = torch.randint(torch.iinfo(dtype).min, torch.iinfo(dtype).max,\n size=(size,), dtype=dtype, device=device)\n else:\n a = torch.randn(size=(size,), dtype=dtype, device=device)\n\n sort_topk = a.sort()[0][-(size // 2):].flip(0)\n topk = a.topk(size // 2)\n self.assertEqual(sort_topk, topk[0]) # check values\n self.assertEqual(sort_topk, a[topk[1]]) # check indices\n\n @dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)\n def test_topk_integral(self, device, dtype):\n small = 10\n large = 4096\n for curr_size in (small, large):\n self._test_topk_dtype(device, dtype, True, curr_size)\n\n @onlyCUDA\n @dtypes(torch.bfloat16)\n @skipCUDAIfRocm\n def test_topk_bfloat16(self, device, dtype):\n\n small = 10\n large = 8192\n for curr_size in (small, large):\n self._test_topk_dtype(device, dtype, False, curr_size)\n\n @dtypesIfCUDA(*torch.testing.get_all_fp_dtypes())\n @dtypes(torch.float, torch.double, torch.bfloat16)\n def test_topk_nonfinite(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n\n x = torch.tensor([float('nan'), float('inf'), 1e4, 0, -1e4, -float('inf')], device=device, dtype=dtype)\n val, idx = x.topk(4)\n expect = torch.tensor([float('nan'), float('inf'), 1e4, 0], device=device, dtype=dtype)\n self.assertEqual(val, expect)\n self.assertEqual(idx, [0, 1, 2, 3])\n\n val, idx = x.topk(4, largest=False)\n expect = torch.tensor([-float('inf'), -1e4, 0, 1e4], device=device, dtype=dtype)\n self.assertEqual(val, expect)\n self.assertEqual(idx, [5, 4, 3, 2])\n\n def test_topk_4d(self, device):\n x = torch.ones(2, 3072, 2, 2, device=device)\n x[:, 1, :, :] *= 2.\n x[:, 10, :, :] *= 1.5\n val, ind = torch.topk(x, k=2, dim=1)\n expected_ind = torch.ones(2, 2, 2, 2, dtype=torch.long, device=device)\n expected_ind[:, 1, :, :] = 10\n expected_val = torch.ones(2, 2, 2, 2, device=device)\n expected_val[:, 0, :, :] *= 2.\n expected_val[:, 1, :, :] *= 1.5\n self.assertEqual(val, expected_val, atol=0, rtol=0)\n self.assertEqual(ind, expected_ind, atol=0, rtol=0)\n\n @onlyOnCPUAndCUDA\n @dtypesIfCUDA(*(torch.testing.get_all_dtypes(include_complex=False,\n include_bool=False,\n include_half=False,\n include_bfloat16=True)))\n @dtypes(*(torch.testing.get_all_dtypes(include_complex=False, include_bool=False, include_half=False, include_bfloat16=False)))\n def test_topk_zero(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n\n # https://github.com/pytorch/pytorch/issues/49205\n t = torch.rand(2, 2, device=device).to(dtype=dtype)\n val, idx = torch.topk(t, k=0, largest=False)\n self.assertEqual(val.size(), torch.Size([2, 0]))\n self.assertEqual(idx.size(), torch.Size([2, 0]))\n\n def _test_unique_scalar_empty(self, dtype, device, f):\n # test scalar\n x = torch.tensor(0, dtype=dtype, device=device)\n unique, inverse, counts = f(x, return_inverse=True, return_counts=True)\n expected_unique = torch.tensor([0], dtype=dtype, device=device)\n expected_inverse = torch.tensor(0, device=device)\n expected_counts = torch.tensor([1], device=device)\n self.assertEqual(unique, expected_unique)\n self.assertEqual(inverse, expected_inverse)\n self.assertEqual(counts, expected_counts)\n\n # test zero sized tensor\n x = torch.zeros((0, 0, 3), dtype=dtype, device=device)\n unique, inverse, counts = f(x, return_inverse=True, return_counts=True)\n expected_unique = torch.tensor([], dtype=dtype, device=device)\n expected_inverse = torch.empty((0, 0, 3), dtype=torch.long, device=device)\n expected_counts = torch.tensor([], dtype=torch.long, device=device)\n self.assertEqual(unique, expected_unique)\n self.assertEqual(inverse, expected_inverse)\n self.assertEqual(counts, expected_counts)\n\n def _test_unique_with_expects(self, device, dtype, f, x, expected_unique, expected_inverse, expected_counts, additional_shape):\n def ensure_tuple(x):\n if isinstance(x, torch.Tensor):\n return (x,)\n return x\n\n for return_inverse in [True, False]:\n for return_counts in [True, False]:\n # test with expected\n ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts))\n self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts))\n self.assertEqual(expected_unique, ret[0])\n if return_inverse:\n self.assertEqual(expected_inverse, ret[1])\n if return_counts:\n count_index = 1 + int(return_inverse)\n self.assertEqual(expected_counts, ret[count_index])\n\n # tests per-element unique on a higher rank tensor.\n y = x.view(additional_shape)\n y_unique, y_inverse, y_counts = f(y, return_inverse=True, return_counts=True)\n self.assertEqual(expected_unique, y_unique)\n self.assertEqual(expected_inverse.view(additional_shape), y_inverse)\n self.assertEqual(expected_counts, y_counts)\n\n @dtypesIfCPU(*set(torch.testing.get_all_dtypes()) - {torch.complex64, torch.complex128})\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128})\n def test_unique(self, device, dtype):\n if dtype is torch.half and self.device_type == 'cpu':\n return # CPU does not have half support\n\n def ensure_tuple(x):\n if isinstance(x, torch.Tensor):\n return (x,)\n return x\n\n if dtype is torch.bool:\n x = torch.tensor([True, False, False, False, True, False, True, False], dtype=torch.bool, device=device)\n expected_unique = torch.tensor([False, True], dtype=torch.bool, device=device)\n expected_inverse = torch.tensor([1, 0, 0, 0, 1, 0, 1, 0], dtype=torch.long, device=device)\n expected_counts = torch.tensor([5, 3], dtype=torch.long, device=device)\n else:\n x = torch.tensor([1, 2, 3, 2, 8, 5, 2, 3], dtype=dtype, device=device)\n expected_unique = torch.tensor([1, 2, 3, 5, 8], dtype=dtype, device=device)\n expected_inverse = torch.tensor([0, 1, 2, 1, 4, 3, 1, 2], device=device)\n expected_counts = torch.tensor([1, 3, 2, 1, 1], device=device)\n\n # test sorted unique\n fs = (\n lambda x, **kwargs: torch.unique(x, sorted=True, **kwargs),\n lambda x, **kwargs: x.unique(sorted=True, **kwargs),\n )\n x_sliced = torch.empty(x.size(0) * 2, dtype=dtype, device=device)[::2].copy_(x)\n xs = (x, x_sliced)\n for f, x in product(fs, xs):\n self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (2, 2, 2))\n self._test_unique_scalar_empty(dtype, device, f)\n\n # test unsorted unique\n fs = (\n lambda x, **kwargs: torch.unique(x, sorted=False, **kwargs),\n lambda x, **kwargs: x.unique(sorted=False, **kwargs)\n )\n for f, x in product(fs, xs):\n self._test_unique_scalar_empty(dtype, device, f)\n for return_inverse, return_counts in product((True, False), repeat=2):\n ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts))\n self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts))\n x_list = x.tolist()\n x_unique_list = ret[0].tolist()\n self.assertEqual(expected_unique.tolist(), sorted(x_unique_list))\n if return_inverse:\n x_inverse_list = ret[1].tolist()\n for i, j in enumerate(x_inverse_list):\n self.assertEqual(x_list[i], x_unique_list[j])\n if return_counts:\n count_index = 1 + int(return_inverse)\n x_counts_list = ret[count_index].tolist()\n for i, j in zip(x_unique_list, x_counts_list):\n count = 0\n for k in x_list:\n if k == i:\n count += 1\n self.assertEqual(j, count)\n\n @dtypesIfCPU(*set(torch.testing.get_all_dtypes()) - {torch.complex64, torch.complex128})\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128})\n def test_unique_consecutive(self, device, dtype):\n if dtype is torch.half and self.device_type == 'cpu':\n return # CPU does not have half support\n\n if dtype is torch.bool:\n x = torch.tensor([True, False, False, False, True, True, False, False, False], dtype=torch.bool, device=device)\n expected_unique = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)\n expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 3], dtype=torch.long, device=device)\n expected_counts = torch.tensor([1, 3, 2, 3], dtype=torch.long, device=device)\n else:\n x = torch.tensor([1, 2, 2, 2, 5, 5, 2, 2, 3], dtype=dtype, device=device)\n expected_unique = torch.tensor([1, 2, 5, 2, 3], dtype=dtype, device=device)\n expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4], device=device)\n expected_counts = torch.tensor([1, 3, 2, 2, 1], device=device)\n\n for f in [torch.unique_consecutive, lambda x, **kwargs: x.unique_consecutive(**kwargs)]:\n self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (3, 3))\n self._test_unique_scalar_empty(dtype, device, f)\n\n @dtypes(torch.double)\n def test_kthvalue(self, device, dtype):\n SIZE = 50\n x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device)\n x0 = x.clone()\n\n k = random.randint(1, SIZE)\n res1val, res1ind = torch.kthvalue(x, k, keepdim=False)\n res2val, res2ind = torch.sort(x)\n\n self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)\n # test use of result tensors\n k = random.randint(1, SIZE)\n res1val = torch.tensor([], dtype=dtype, device=device)\n res1ind = torch.tensor([], dtype=torch.long, device=device)\n torch.kthvalue(x, k, keepdim=False, out=(res1val, res1ind))\n res2val, res2ind = torch.sort(x)\n self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)\n\n # test non-default dim\n k = random.randint(1, SIZE)\n res1val, res1ind = torch.kthvalue(x, k, 0, keepdim=False)\n res2val, res2ind = torch.sort(x, 0)\n self.assertEqual(res1val, res2val[k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind[k - 1], atol=0, rtol=0)\n\n # non-contiguous\n y = x.narrow(1, 0, 1)\n y0 = y.contiguous()\n k = random.randint(1, SIZE)\n res1val, res1ind = torch.kthvalue(y, k)\n res2val, res2ind = torch.kthvalue(y0, k)\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n\n # non-contiguous [Reference: https://github.com/pytorch/pytorch/issues/45721]\n non_contig_t = torch.tensor([0, -1, 1, -2, 2], dtype=dtype, device=device)[::2]\n expected_val, expected_ind = non_contig_t.contiguous().kthvalue(2)\n non_contig_cpu_t = non_contig_t.cpu()\n expected_val_cpu, expected_ind_cpu = non_contig_cpu_t.kthvalue(2)\n\n out_val, out_ind = non_contig_t.kthvalue(2)\n self.assertEqual(expected_val, out_val, atol=0, rtol=0)\n self.assertEqual(expected_ind, out_ind, atol=0, rtol=0)\n self.assertEqual(expected_val_cpu, out_val, atol=0, rtol=0)\n self.assertEqual(expected_ind_cpu, out_ind, atol=0, rtol=0)\n\n # check that the input wasn't modified\n self.assertEqual(x, x0, atol=0, rtol=0)\n\n # simple test case (with repetitions)\n y = torch.tensor((3., 5, 4, 1, 1, 5), dtype=dtype, device=device)\n self.assertEqual(torch.kthvalue(y, 3)[0], 3, atol=0, rtol=0)\n self.assertEqual(torch.kthvalue(y, 2)[0], 1, atol=0, rtol=0)\n\n # simple test case (with NaN)\n SIZE = 50\n x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device)\n x[torch.arange(SIZE), :, torch.randint(50, (50,))] = nan\n ks = [random.randint(1, SIZE), 1, SIZE, SIZE - 1]\n res2val, res2ind = torch.sort(x)\n for k in ks:\n res1val, res1ind = torch.kthvalue(x, k, keepdim=False)\n self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)\n\n # test overlapping output\n @dtypes(torch.double)\n @onlyOnCPUAndCUDA # Fails on XLA\n def test_kthvalue_overlap(self, device, dtype):\n S = 10\n k = 5\n a = torch.randn(S, device=device)\n indices = torch.empty((), device=device, dtype=torch.long)\n with self.assertRaisesRegex(RuntimeError, \"unsupported operation:\"):\n torch.kthvalue(a, k, out=(a, indices))\n\n @dtypes(torch.float)\n @onlyOnCPUAndCUDA # Fails on XLA\n def test_kthvalue_scalar(self, device, dtype):\n # Test scalar input (test case from https://github.com/pytorch/pytorch/issues/30818)\n # Tests that passing a scalar tensor or 1D tensor with 1 element work either way\n res = torch.tensor(2, device=device, dtype=dtype).kthvalue(1)\n ref = torch.tensor([2], device=device, dtype=dtype).kthvalue(1)\n self.assertEqual(res[0], ref[0].squeeze())\n self.assertEqual(res[1], ref[1].squeeze())\n\n @dtypes(*all_types())\n @dtypesIfCUDA(*all_types_and(torch.half))\n def test_isin(self, device, dtype):\n def assert_isin_equal(a, b):\n # Compare to the numpy reference implementation.\n x = torch.isin(a, b)\n a = a.cpu().numpy() if torch.is_tensor(a) else np.array(a)\n b = b.cpu().numpy() if torch.is_tensor(b) else np.array(b)\n y = np.isin(a, b)\n self.assertEqual(x, y)\n\n # multi-dim tensor, multi-dim tensor\n a = torch.arange(24, device=device, dtype=dtype).reshape([2, 3, 4])\n b = torch.tensor([[10, 20, 30], [0, 1, 3], [11, 22, 33]], device=device, dtype=dtype)\n assert_isin_equal(a, b)\n\n # zero-dim tensor\n zero_d = torch.tensor(3, device=device, dtype=dtype)\n assert_isin_equal(zero_d, b)\n assert_isin_equal(a, zero_d)\n assert_isin_equal(zero_d, zero_d)\n\n # empty tensor\n empty = torch.tensor([], device=device, dtype=dtype)\n assert_isin_equal(empty, b)\n assert_isin_equal(a, empty)\n assert_isin_equal(empty, empty)\n\n # scalar\n assert_isin_equal(a, 6)\n assert_isin_equal(5, b)\n\n def define_expected(lst, invert=False):\n expected = torch.tensor(lst, device=device)\n if invert:\n expected = expected.logical_not()\n return expected\n\n # Adapted from numpy's in1d tests\n for mult in [1, 10]:\n for invert in [False, True]:\n a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)\n b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)\n ec = define_expected([True, False, True, True], invert=invert)\n c = torch.isin(a, b, assume_unique=True, invert=invert)\n self.assertEqual(c, ec)\n\n a[0] = 8\n ec = define_expected([False, False, True, True], invert=invert)\n c = torch.isin(a, b, assume_unique=True, invert=invert)\n self.assertEqual(c, ec)\n\n a[0], a[3] = 4, 8\n ec = define_expected([True, False, True, False], invert=invert)\n c = torch.isin(a, b, assume_unique=True, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], device=device, dtype=dtype)\n b = torch.tensor([2, 3, 4] * mult, device=device, dtype=dtype)\n ec = define_expected([False, True, False, True, True, True, True, True, True,\n False, True, False, False, False], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n b = torch.tensor([2, 3, 4] * mult + [5, 5, 4] * mult, device=device, dtype=dtype)\n ec = define_expected([True, True, True, True, True, True, True, True, True, True,\n True, False, True, True], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)\n b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)\n ec = define_expected([True, False, True, True], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 7, 1, 1, 2], device=device, dtype=dtype)\n b = torch.tensor([2, 4, 3, 3, 1, 5] * mult, device=device, dtype=dtype)\n ec = define_expected([True, False, True, True, True], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 5], device=device, dtype=dtype)\n b = torch.tensor([2, 2] * mult, device=device, dtype=dtype)\n ec = define_expected([False, False], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n # multi-dimensional input case using sort-based algo\n for assume_unique in [False, True]:\n a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])\n b = torch.arange(3, 30, device=device, dtype=dtype)\n ec = define_expected([[False, False, False], [True, True, True]], invert=invert)\n c = torch.isin(a, b, invert=invert, assume_unique=assume_unique)\n self.assertEqual(c, ec)\n\n def test_isin_different_dtypes(self, device):\n supported_types = all_types() if device == 'cpu' else all_types_and(torch.half)\n for mult in [1, 10]:\n for assume_unique in [False, True]:\n for dtype1, dtype2 in product(supported_types, supported_types):\n a = torch.tensor([1, 2, 3], device=device, dtype=dtype1)\n b = torch.tensor([3, 4, 5] * mult, device=device, dtype=dtype2)\n ec = torch.tensor([False, False, True], device=device)\n c = torch.isin(a, b, assume_unique=assume_unique)\n self.assertEqual(c, ec)\n\n @onlyCUDA\n @dtypes(*all_types())\n def test_isin_different_devices(self, device, dtype):\n a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])\n b = torch.arange(3, 30, device='cpu', dtype=dtype)\n with self.assertRaises(RuntimeError):\n torch.isin(a, b)\n\n c = torch.arange(6, device='cpu', dtype=dtype).reshape([2, 3])\n d = torch.arange(3, 30, device=device, dtype=dtype)\n with self.assertRaises(RuntimeError):\n torch.isin(c, d)\n\n\ninstantiate_device_type_tests(TestSortAndSelect, globals())\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.randint",
"torch.zeros",
"torch.testing.get_all_fp_dtypes",
"torch.testing.all_types_and",
"torch.testing.get_all_dtypes",
"torch.randperm",
"torch.testing._internal.common_device_type.largeTensorTest",
"torch.iinfo",
"torch.unique",
"torch.testing.get_all_int_dtypes",
"torch.topk",
"torch.Size",
"torch.ones",
"torch.randn",
"torch.isin",
"torch.tensor",
"torch.testing._internal.common_utils.make_tensor",
"torch.testing.all_types",
"torch.rand",
"torch.sort",
"torch.arange",
"torch.unique_consecutive",
"torch.argsort",
"torch.kthvalue",
"numpy.isin",
"torch.empty_like",
"torch.empty",
"torch.msort",
"torch.zeros_like",
"torch.is_tensor",
"torch.testing._internal.common_device_type.dtypes",
"numpy.argsort",
"numpy.array",
"torch.testing._internal.common_utils.run_tests",
"torch.testing.floating_types_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yssource/pandas-ta | [
"98478f8bf049a4c8748d6f3c795f4f335ced05ca",
"bf7e2b395596e8a75bed863e9ce0a0f34d14e829"
] | [
"pandas_ta/performance/log_return.py",
"pandas_ta/volatility/donchian.py"
] | [
"# -*- coding: utf-8 -*-\nfrom numpy import log as nplog\nfrom pandas_ta.utils import get_offset, verify_series\n\n\ndef log_return(close, length=None, cumulative=False, offset=None, **kwargs):\n \"\"\"Indicator: Log Return\"\"\"\n # Validate Arguments\n close = verify_series(close)\n length = int(length) if length and length > 0 else 1\n offset = get_offset(offset)\n\n # Calculate Result\n log_return = nplog(close).diff(periods=length)\n\n if cumulative:\n log_return = log_return.cumsum()\n\n # Offset\n if offset != 0:\n log_return = log_return.shift(offset)\n\n # Handle fills\n if \"fillna\" in kwargs:\n log_return.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n log_return.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Name & Category\n log_return.name = f\"{'CUM' if cumulative else ''}LOGRET_{length}\"\n log_return.category = \"performance\"\n\n return log_return\n\n\nlog_return.__doc__ = \\\n\"\"\"Log Return\n\nCalculates the logarithmic return of a Series.\nSee also: help(df.ta.log_return) for additional **kwargs a valid 'df'.\n\nSources:\n https://stackoverflow.com/questions/31287552/logarithmic-returns-in-pandas-dataframe\n\nCalculation:\n Default Inputs:\n length=1, cumulative=False\n LOGRET = log( close.diff(periods=length) )\n CUMLOGRET = LOGRET.cumsum() if cumulative\n\nArgs:\n close (pd.Series): Series of 'close's\n length (int): It's period. Default: 20\n cumulative (bool): If True, returns the cumulative returns. Default: False\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.Series: New feature generated.\n\"\"\"\n",
"# -*- coding: utf-8 -*-\nfrom pandas import DataFrame\nfrom pandas_ta.utils import get_offset, verify_series\n\n\ndef donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):\n \"\"\"Indicator: Donchian Channels (DC)\"\"\"\n # Validate arguments\n high = verify_series(high)\n low = verify_series(low)\n lower_length = int(lower_length) if lower_length and lower_length > 0 else 20\n upper_length = int(upper_length) if upper_length and upper_length > 0 else 20\n lower_min_periods = int(kwargs[\"lower_min_periods\"]) if \"lower_min_periods\" in kwargs and kwargs[\"lower_min_periods\"] is not None else lower_length\n upper_min_periods = int(kwargs[\"upper_min_periods\"]) if \"upper_min_periods\" in kwargs and kwargs[\"upper_min_periods\"] is not None else upper_length\n offset = get_offset(offset)\n\n # Calculate Result\n lower = low.rolling(lower_length, min_periods=lower_min_periods).min()\n upper = high.rolling(upper_length, min_periods=upper_min_periods).max()\n mid = 0.5 * (lower + upper)\n\n # Handle fills\n if \"fillna\" in kwargs:\n lower.fillna(kwargs[\"fillna\"], inplace=True)\n mid.fillna(kwargs[\"fillna\"], inplace=True)\n upper.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n lower.fillna(method=kwargs[\"fill_method\"], inplace=True)\n mid.fillna(method=kwargs[\"fill_method\"], inplace=True)\n upper.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Offset\n if offset != 0:\n lower = lower.shift(offset)\n mid = mid.shift(offset)\n upper = upper.shift(offset)\n\n # Name and Categorize it\n lower.name = f\"DCL_{lower_length}_{upper_length}\"\n mid.name = f\"DCM_{lower_length}_{upper_length}\"\n upper.name = f\"DCU_{lower_length}_{upper_length}\"\n mid.category = upper.category = lower.category = \"volatility\"\n\n # Prepare DataFrame to return\n data = {lower.name: lower, mid.name: mid, upper.name: upper}\n dcdf = DataFrame(data)\n dcdf.name = f\"DC_{lower_length}_{upper_length}\"\n dcdf.category = mid.category\n\n return dcdf\n\n\ndonchian.__doc__ = \\\n\"\"\"Donchian Channels (DC)\n\nDonchian Channels are used to measure volatility, similar to\nBollinger Bands and Keltner Channels.\n\nSources:\n https://www.tradingview.com/wiki/Donchian_Channels_(DC)\n\nCalculation:\n Default Inputs:\n lower_length=upper_length=20\n LOWER = low.rolling(lower_length).min()\n UPPER = high.rolling(upper_length).max()\n MID = 0.5 * (LOWER + UPPER)\n\nArgs:\n high (pd.Series): Series of 'high's\n low (pd.Series): Series of 'low's\n lower_length (int): The short period. Default: 20\n upper_length (int): The short period. Default: 20\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.DataFrame: lower, mid, upper columns.\n\"\"\"\n"
] | [
[
"numpy.log"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Solara570/demo-solara | [
"3ce6df1fd68089c427bbd46fb0857e8b76428ca6",
"3ce6df1fd68089c427bbd46fb0857e8b76428ca6"
] | [
"articles/inversion.py",
"custom/custom_helpers.py"
] | [
"#coding=utf-8\n\n################################################################################################\n# A 3-part series on circle inversion, Descartes' theorem along with its variants, and more! #\n# #\n# Part 1: An Introduction to Circle Inversion - https://zhuanlan.zhihu.com/p/86644341 #\n# Part 2: Four Circles & Descartes' Theorem (1) - https://zhuanlan.zhihu.com/p/105819963 #\n# Part 3: Four Circles & Descartes' Theorem (2) - https://zhuanlan.zhihu.com/p/106874090 #\n################################################################################################\n\nimport numpy as np\nimport itertools as it\nfrom manimlib.constants import *\nfrom manimlib.utils.color import *\nfrom manimlib.utils.space_ops import *\nfrom manimlib.utils.simple_functions import *\nfrom manimlib.animation.composition import AnimationGroup\nfrom manimlib.animation.creation import ShowCreation, Write, DrawBorderThenFill\nfrom manimlib.animation.fading import FadeOut, FadeInFromDown\nfrom manimlib.animation.transform import Transform, ReplacementTransform, MoveToTarget, ApplyMethod\nfrom manimlib.mobject.mobject import Mobject\nfrom manimlib.mobject.coordinate_systems import Axes, NumberPlane, ThreeDAxes\nfrom manimlib.mobject.geometry import Circle, Line, Dot, SmallDot, Square, Polygon, RegularPolygon, \\\n Arrow, Sector, Vector\nfrom manimlib.mobject.numbers import DecimalNumber\nfrom manimlib.mobject.value_tracker import ValueTracker\nfrom manimlib.mobject.shape_matchers import BackgroundRectangle, SurroundingRectangle\nfrom manimlib.mobject.three_dimensions import Sphere\nfrom manimlib.mobject.svg.brace import Brace\nfrom manimlib.mobject.svg.tex_mobject import TexMobject, TextMobject\nfrom manimlib.mobject.types.vectorized_mobject import VMobject, VGroup, VectorizedPoint, DashedVMobject\nfrom manimlib.scene.scene import Scene\nfrom manimlib.scene.three_d_scene import ThreeDScene\n\nfrom short.apollonian_gasket import calc_centers_by_radii, calc_new_agc_info, AGCircle, \\\n ApollonianGasket, ApollonianGasketScene\nfrom short.ford_circles import get_coprime_numers_by_denom, get_stroke_width_by_height, \\\n AssembledFraction, ZoomInOnFordCircles\n\n\n#####\n## Constants\nMAX_NORM = 1e2\nCB_DARK = \"#825201\"\nCB_LIGHT = \"#B69B4C\"\n\n\n#####\n## General Methods\ndef complex_inversion(z, z0, r):\n return z0 + np.conjugate(r**2 / (z-z0))\n\ndef R3_inversion(point, inv_center, radius):\n z = R3_to_complex(point)\n z0 = R3_to_complex(inv_center)\n w = complex_inversion(z, z0, radius)\n return complex_to_R3(w)\n\ndef inversion(point, inv_center, radius):\n # Just a rename\n return R3_inversion(point, inv_center, radius)\n\ndef is_close_in_R3(p1, p2, thres = 1e-6):\n \"\"\"Check if two points are close in R^3.\"\"\"\n return np.linalg.norm(p1 - p2) < thres\n\ndef is_close(z1, z2, thres = 1e-6):\n \"\"\"Check if two complex numbers are close to each other.\"\"\"\n return np.abs(z1 - z2) < thres\n\ndef get_tangent_point(c1, c2, thres = 1e-4):\n \"\"\"Return the tangency point of circles 'c1' and 'c2'.\"\"\"\n p1 = c1.get_center()\n p2 = c2.get_center()\n r1 = c1.get_height() / 2\n r2 = c2.get_height() / 2\n d = get_norm(p2 - p1)\n if is_close(d, r1-r2, thres):\n return p1 + r1*normalize(p2-p1)\n elif is_close(d, r2-r1, thres):\n return p2 + r2*normalize(p1-p2)\n elif is_close(d, r1+r2, thres):\n return (r1*p2+r2*p1) / (r1+r2)\n else:\n raise Exception(\"These two circles aren't tangent.\")\n\ndef get_para_and_perp_components(point, lp1, lp2):\n v = lp2 - point\n v0 = lp2 - lp1\n v_para = fdiv(np.dot(v, v0), np.dot(v0, v0)) * v0\n v_perp = v - v_para\n return v_para, v_perp\n\ndef distance_to_the_line(point, lp1, lp2):\n \"\"\"Return the distance from 'point' to the line given by 'lp1' and 'lp2'.\"\"\"\n v_para, v_perp = get_para_and_perp_components(point, lp1, lp2)\n return np.linalg.norm(v_perp)\n\ndef is_on_the_line(point, lp1, lp2, thres = 1e-6):\n \"\"\"Check if 'point' is on the line given by two points 'lp1' and 'lp2'.\"\"\"\n return is_close(distance_to_the_line(point, lp1, lp2), thres)\n\ndef get_random_vector(max_step):\n \"\"\"Return a random vector with a maximum length of 'max_step'.\"\"\"\n return max_step*np.random.random() * rotate_vector(RIGHT, TAU*np.random.random())\n\ndef get_nearest_int(num):\n return int(np.round(num, 0))\n\ndef solve_quadratic_equation(a, b, c):\n delta = b**2 - 4*a*c\n x1 = (-b-np.sqrt(delta)) /(2*a)\n x2 = (-b+np.sqrt(delta)) /(2*a)\n print(a, b, c, x1, x2)\n return x1, x2\n\ndef get_next_terms(k1, k2, k3):\n \"\"\"Return two adjacent terms in the loxodromic sequence.\"\"\"\n b = -2*(k1+k2+k3)\n c = 2*(k1**2+k2**2+k3**2) - (k1+k2+k3)**2\n return list(map(get_nearest_int, solve_quadratic_equation(1, b, c)))\n\ndef get_sequence_string(arr):\n arr_copy = list(map(str, arr))\n arr_copy.insert(0, \"...\")\n arr_copy.append(\"...\")\n return \", \".join(arr_copy)\n\n\n#####\n## Mobjects\nclass FineCircle(Circle):\n CONFIG = {\n # In manim, circles are approximated by multiple cubic Beziers,\n # so it's necessary to increase the number of components for\n # high-precision calculations.\n \"num_components\": 100,\n }\n\n\nclass ExtendedLine(Line):\n def __init__(self, sp, ep, n = 10, **kwargs):\n unit_vec = normalize(ep - sp)\n new_sp = sp - n * unit_vec\n new_ep = ep + n * unit_vec\n Line.__init__(self, new_sp, new_ep, **kwargs)\n\n\nclass DotLabel(VMobject):\n CONFIG = {\n \"position\" : UP,\n \"label_buff\" : 0.25,\n }\n def __init__(self, label_text, dot, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dot = dot\n label = TexMobject(label_text, **kwargs)\n if self.position is not None:\n label.add_updater(\n lambda l: l.next_to(self.dot.get_center(), self.position, buff = self.label_buff)\n )\n self.add(label)\n\n def set_label(self, label):\n label.next_to(self.dot.get_center())\n\n\nclass TwoDotsSegment(Line):\n def __init__(self, dot_1, dot_2, **kwargs):\n self.dot_1 = dot_1\n self.dot_2 = dot_2\n sp, ep = self.get_dots_centers()\n Line.__init__(self, start = sp, end = ep, **kwargs)\n self.add_updater(self.set_start_and_end)\n\n def get_dots_centers(self):\n return self.dot_1.get_center(), self.dot_2.get_center()\n\n def set_start_and_end(self, line_mob):\n sp, ep = self.get_dots_centers()\n line_mob.put_start_and_end_on(sp, ep)\n\n\nclass LengthLabel(DecimalNumber):\n CONFIG = {\n \"num_decimal_places\" : 3,\n \"label_height\" : 0.3,\n \"label_buff\" : 0.3,\n \"offset\" : 0,\n \"is_on_opposite_side\" : False,\n }\n def __init__(self, line_mob, **kwargs):\n DecimalNumber.__init__(self, **kwargs)\n self.line_mob = line_mob\n self.add_updater(self.set_label)\n\n def set_label(self, label):\n label.set_value(self.line_mob.get_length())\n label.set_height(self.label_height)\n label.rotate(self.line_mob.get_angle())\n side_factor = -1 if self.is_on_opposite_side else 1\n label.move_to(\n self.line_mob.get_center() \\\n + self.line_mob.get_vector() / 2 * self.offset \\\n + side_factor * rotate_vector(self.line_mob.get_unit_vector(), PI/2) * self.label_buff\n )\n\n def set_offset(self, offset):\n self.offset = offset\n return self\n\n def switch_side(self):\n self.is_on_opposite_side = not self.is_on_opposite_side\n return self\n\n\nclass ManyDotsPolygon(VMobject):\n def __init__(self, *dots, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dots = dots\n dots_centers = self.get_dots_centers()\n polygon = Polygon(*dots_centers, **kwargs)\n polygon.add_updater(self.set_vertices)\n self.add(polygon)\n\n def get_dots_centers(self):\n return [dot.get_center() for dot in self.dots]\n\n def set_vertices(self, polygon_mob):\n vertices = self.get_dots_centers()\n polygon_mob.set_points_as_corners([*vertices, vertices[0]])\n\n\nclass AngleIndicator(VMobject):\n CONFIG = {\n \"color\" : RED,\n \"radius\" : 0.2,\n \"fill_opacity\" : 0.6,\n \"is_minor_arc\" : True,\n }\n def __init__(self, dot_A, dot_C, dot_B, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dot_A = dot_A\n self.dot_C = dot_C\n self.dot_B = dot_B\n sector = Sector()\n sector.add_updater(self.set_sector)\n self.add(sector)\n self.sector = sector\n\n def get_point_center(self, point_or_mob):\n if isinstance(point_or_mob, Mobject):\n return point_or_mob.get_center()\n else:\n return point_or_mob\n\n def get_point_centers(self):\n return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B]))\n\n def set_sector(self, mob):\n pt_A, pt_C, pt_B = self.get_point_centers()\n start_angle, angle = self.get_angles()\n outer_radius = min([self.radius, get_norm(pt_C - pt_A)/2, get_norm(pt_C - pt_B)/2])\n new_sector = Sector(\n start_angle = start_angle, angle = angle, outer_radius = outer_radius,\n color = self.color, fill_opacity = self.fill_opacity, stroke_width = 0\n )\n new_sector.move_arc_center_to(self.get_point_center(self.dot_C))\n mob.become(new_sector)\n \n def get_angles(self):\n pt_A, pt_C, pt_B = self.get_point_centers()\n start_angle = angle_of_vector(pt_A - pt_C)\n end_angle = angle_of_vector(pt_B - pt_C)\n angle = (end_angle - start_angle) % TAU\n if self.is_minor_arc and angle > PI:\n angle -= TAU\n return start_angle, angle\n\n\nclass RightAngleIndicator(VMobject):\n CONFIG = {\n \"color\" : WHITE,\n \"side_length\" : 0.2,\n \"line_width\" : 1,\n \"square_opacity\" : 0.5,\n }\n def __init__(self, dot_A, dot_C, dot_B, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dot_A = dot_A\n self.dot_C = dot_C\n self.dot_B = dot_B\n line = VMobject(stroke_width = self.line_width, fill_opacity = 0)\n square = VMobject(stroke_width = 0, fill_color = self.color, fill_opacity = self.square_opacity)\n line.add_updater(self.set_line)\n square.add_updater(self.set_square)\n self.add(square, line)\n self.line = line\n self.square = square\n\n def get_point_center(self, point_or_mob):\n if isinstance(point_or_mob, Mobject):\n return point_or_mob.get_center()\n else:\n return point_or_mob\n\n def get_point_centers(self):\n return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B]))\n\n def get_norm_vectors(self):\n pt_A, pt_C, pt_B = self.get_point_centers()\n norm_vec_CA = normalize(pt_A - pt_C)\n norm_vec_CB = normalize(pt_B - pt_C)\n return norm_vec_CA, norm_vec_CB\n\n def get_corner_points(self):\n pt_A, pt_C, pt_B = self.get_point_centers()\n norm_vec_CA, norm_vec_CB = self.get_norm_vectors()\n side_length = min([self.side_length, get_norm(pt_A - pt_C)/2, get_norm(pt_B - pt_C)/2])\n return (\n pt_C,\n pt_C + norm_vec_CA * side_length,\n pt_C + norm_vec_CA * side_length + norm_vec_CB * side_length,\n pt_C + norm_vec_CB * side_length\n )\n\n def set_line(self, line_mob):\n p, q, r, s = self.get_corner_points()\n line_mob.set_points_as_corners([q, r, s])\n\n def set_square(self, square_mob):\n p, q, r, s = self.get_corner_points()\n square_mob.set_points_as_corners([p, q, r, s, p])\n\n\nclass InversedDot(VMobject):\n CONFIG = {\n \"color\" : PINK,\n \"stroke_width\" : 3,\n \"fill_opacity\" : 1,\n \"is_hollow\" : True,\n \"center_color\" : BLACK,\n }\n def __init__(self, orig_dot, circle, **kwargs):\n self.orig_dot = orig_dot\n self.circle = circle\n VMobject.__init__(self, **kwargs)\n\n def generate_points(self):\n if self.is_hollow:\n self.fill_color = self.center_color\n else:\n self.fill_color = self.color\n self.stroke_width = 0\n inv_dot = Dot(ORIGIN, color = self.color)\n self.inv_dot = inv_dot\n self.add(inv_dot)\n self.add_updater_to_inversed_dot()\n\n def add_updater_to_inversed_dot(self):\n self.inv_dot.add_updater(self.move_inversed_dot)\n\n def move_inversed_dot(self, inv_dot):\n point = self.orig_dot.get_center()\n inv_center = self.circle.get_center()\n radius = self.circle.get_height() / 2.\n if is_close_in_R3(point, inv_center):\n pass\n else:\n inv_dot.move_to(inversion(point, inv_center, radius))\n\n\nclass InversedVMobject(VMobject):\n CONFIG = {\n \"is_analytical\" : True,\n \"match_original_style\" : False,\n \"use_dashed_vmob\" : True,\n \"dashed_vmob_config\": {\n \"num_dashes\" : 50,\n \"positive_space_ratio\" : 0.6,\n },\n }\n def __init__(self, orig_vmob, circle, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.orig_vmob = orig_vmob\n self.circle = circle\n self.orig_vmob_type = \"Others\"\n self.initialize_orig_vmob_type()\n self.add_updater_to_inversed_vmobject()\n\n def add_updater_to_inversed_vmobject(self):\n self.add_updater(self.set_inversed_vmobject)\n\n def initialize_orig_vmob_type(self):\n if isinstance(self.orig_vmob, Line):\n self.orig_vmob_type = \"Line\"\n elif isinstance(self.orig_vmob, Circle):\n self.orig_vmob_type = \"Circle\"\n else:\n self.orig_vmob_type = \"Others\"\n\n def set_orig_vmob_type(self, orig_vmob_type):\n self.orig_vmob_type = orig_vmob_type\n\n def set_inversed_vmobject(self, inv_vmob):\n inv_center = self.circle.get_center()\n radius = self.circle.get_height() / 2.\n if self.is_analytical and self.orig_vmob_type == \"Line\":\n # If it's a line...\n lp1, lp2 = self.orig_vmob.get_start_and_end()\n if is_on_the_line(inv_center, lp1, lp2):\n # If it's a line passing through the inversion center,\n # then the inversion is just the line itself.\n temp_vmob = ExtendedLine(lp1, lp2)\n else:\n # If it's a line NOT through the inversion center,\n # then the inversion is a circle passing through the inversion center.\n v_para, v_perp = get_para_and_perp_components(inv_center, lp1, lp2)\n d = distance_to_the_line(inv_center, lp1, lp2)\n # d = np.linalg.norm(v_perp)\n inv_vmob_radius = fdiv(radius**2, 2*d)\n closepoint = inv_center + v_perp\n inv_vmob_closepoint = inversion(closepoint, inv_center, radius)\n inv_vmob_center = (inv_center + inv_vmob_closepoint) / 2.\n temp_vmob = FineCircle(radius = inv_vmob_radius)\n temp_vmob.move_to(inv_vmob_center)\n elif self.is_analytical and self.orig_vmob_type == \"Circle\":\n # If it's a circle...\n orig_vmob_center = self.orig_vmob.get_center()\n orig_vmob_radius = self.orig_vmob.get_height() / 2.\n center_vec = orig_vmob_center - inv_center\n d = get_norm(center_vec)\n if is_close(orig_vmob_radius, d):\n # If it's a circle passing through the inversion center,\n # then the inversion is a line perps to the line through the circle centers.\n foot = inv_center + fdiv(radius**2, 2*d) * normalize(center_vec)\n lp1 = foot + rotate_vector(center_vec, PI/2)\n lp2 = foot + rotate_vector(center_vec, -PI/2)\n temp_vmob = ExtendedLine(lp1, lp2)\n else:\n # If it's a circle NOT through the inversion center,\n # then the inversion is a circle NOT through the inversion center.\n dp1 = orig_vmob_center - orig_vmob_radius * normalize(center_vec)\n dp2 = orig_vmob_center + orig_vmob_radius * normalize(center_vec)\n inv_dp1 = inversion(dp1, inv_center, radius)\n inv_dp2 = inversion(dp2, inv_center, radius)\n inv_vmob_radius = get_norm(inv_dp2 - inv_dp1) / 2.\n inv_vmob_center = (inv_dp2 + inv_dp1) / 2.\n temp_vmob = FineCircle(radius = inv_vmob_radius)\n temp_vmob.move_to(inv_vmob_center)\n else:\n temp_vmob = self.orig_vmob.copy()\n temp_vmob.apply_function(lambda p: inversion(p, inv_center, radius))\n if self.use_dashed_vmob:\n temp_vmob = DashedVMobject(temp_vmob, **self.dashed_vmob_config)\n inv_vmob.become(temp_vmob)\n if self.match_original_style:\n inv_vmob.match_style(self.orig_vmob)\n\n\nclass FourCirclesNormalForm(VMobject):\n CONFIG = {\n \"circle_colors\" : [MAROON_B, RED, GREEN, BLUE],\n \"r\" : 1.2,\n \"l\" : 9,\n \"use_dashed_vmob\" : True,\n \"dashed_vmob_config\" : {\n \"num_dashes\" : 30,\n \"positive_space_ratio\" : 0.6,\n }\n }\n def __init__(self, **kwargs):\n VMobject.__init__(self, **kwargs)\n c1 = Circle(radius = self.r, **kwargs).shift(self.r*LEFT)\n c2 = Circle(radius = self.r, **kwargs).shift(self.r*RIGHT)\n c3 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*DOWN)\n c4 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*UP)\n for mob, color in zip([c1, c2, c3, c4], self.circle_colors):\n mob.set_color(color)\n if self.use_dashed_vmob:\n self.add(DashedVMobject(mob, **self.dashed_vmob_config))\n else:\n self.add(mob)\n\n\nclass DescartesFourCircles(VMobject):\n CONFIG = {\n \"outer_circle_index\" : None,\n \"orig_circle_color\" : BLUE,\n \"new_circle_color\" : YELLOW,\n \"show_new_circles\" : True,\n \"show_new_circles_centers\" : False,\n }\n def __init__(self, ccdot1, ccdot2, ccdot3, **kwargs):\n self.ccdot1 = ccdot1\n self.ccdot2 = ccdot2\n self.ccdot3 = ccdot3\n VMobject.__init__(self, **kwargs)\n self.add_orig_circles()\n self.add_orig_circles_updaters()\n self.generate_new_circles()\n if self.show_new_circles:\n self.add_new_circles()\n if self.show_new_circles_centers:\n self.add_new_circles_centers()\n \n def add_orig_circles(self):\n self.c1, self.c2, self.c3 = self.cs = VGroup(*[\n Circle(arc_center = cc, radius = r, color = self.orig_circle_color)\n for cc, r in zip(self.get_orig_circle_centers(), self.calc_radii_by_centers())\n ])\n self.add(self.cs)\n\n def add_orig_circles_updaters(self):\n def get_center(k):\n return self.get_orig_circle_centers()[k]\n def get_abs_radius(k):\n return np.abs(self.calc_radii_by_centers()[k])\n # Since enumerate() won't work here (seriously?),\n # I have to use a much more direct approach - list them all.\n self.c1.add_updater(lambda c: c.move_to(get_center(0)))\n self.c1.add_updater(lambda c: c.set_height(2*get_abs_radius(0)))\n self.c2.add_updater(lambda c: c.move_to(get_center(1)))\n self.c2.add_updater(lambda c: c.set_height(2*get_abs_radius(1)))\n self.c3.add_updater(lambda c: c.move_to(get_center(2)))\n self.c3.add_updater(lambda c: c.set_height(2*get_abs_radius(2)))\n\n def get_orig_circles(self):\n return self.cs\n\n def get_orig_circle_centers(self):\n return [dot.get_center() for dot in (self.ccdot1, self.ccdot2, self.ccdot3)]\n\n def get_orig_circle_radii(self):\n return self.calc_radii_by_centers()\n\n def get_orig_circle_curvatures(self):\n return [fdiv(1, radius) for radius in self.calc_radii_by_centers()]\n\n def calc_radii_by_centers(self):\n p1, p2, p3 = self.get_orig_circle_centers()\n d12 = get_norm(p2 - p1)\n d23 = get_norm(p3 - p2)\n d13 = get_norm(p3 - p1)\n sum_r = (d12 + d23 + d13) / 2.\n if self.outer_circle_index == 1:\n # If circle 1 contains other two circles...\n return [-sum_r, sum_r-d12, sum_r-d13]\n elif self.outer_circle_index == 2:\n # If circle 2 contains other two circles...\n return [sum_r-d12, -sum_r, sum_r-d23]\n elif self.outer_circle_index == 3:\n # If circle 3 contains other two circles...\n return [sum_r-d13, sum_r-d23, -sum_r]\n else:\n return [sum_r-d23, sum_r-d13, sum_r-d12]\n\n def generate_new_circles(self):\n self.c4_1, self.c4_2 = self.new_circles = VGroup(*[\n Circle(arc_center = new_cc, radius = new_r, color = self.new_circle_color)\n for new_cc, new_r in self.calc_new_circles_centers_and_radii()\n ])\n self.generate_new_circles_centers()\n self.add_new_circles_updaters()\n\n def calc_new_circles_centers_and_radii(self):\n k1, k2, k3 = self.get_orig_circle_curvatures()\n z1, z2, z3 = map(R3_to_complex, self.get_orig_circle_centers())\n # Calculate the curvatures of new circles\n sum_k = k1 + k2 + k3\n sum_k2 = k1**2 + k2**2 + k3**2\n sum_k_cycle_prod = k1*k2 + k2*k3 + k3*k1\n b = (-2)*sum_k\n c = sum_k2 - 2*sum_k_cycle_prod\n delta = b**2 - 4*c\n k4_1 = (-b + np.sqrt(delta)) / 2\n k4_2 = (-b - np.sqrt(delta)) / 2\n # Calculate the centers of new circles\n # arxiv.org/abs/math/0101066v1 - Eqn 2.3\n sum_kz = k1*z1 + k2*z2 + k3*z3\n sum_k2z = k1**2 * z1 + k2**2 * z2 + k3**2 * z3\n coeff_1 = (sum_k - k4_1) * k4_1\n const_1 = 2 * sum_k2z - (sum_k + k4_1) * sum_kz\n z4_1 = const_1 / coeff_1\n coeff_2 = (sum_k - k4_2) * k4_2\n const_2 = 2 * sum_k2z - (sum_k + k4_2) * sum_kz\n z4_2 = const_2 / coeff_2\n return [[complex_to_R3(z4_1), fdiv(1, k4_1)], [complex_to_R3(z4_2), fdiv(1, k4_2)]]\n\n def generate_new_circles_centers(self):\n ccdot4_1 = Dot(color = self.new_circle_color)\n ccdot4_1.add_updater(lambda m: m.move_to(self.c4_1.get_center()))\n ccdot4_2 = Dot(color = self.new_circle_color)\n ccdot4_2.add_updater(lambda m: m.move_to(self.c4_2.get_center()))\n self.ccdot4_1 = ccdot4_1\n self.ccdot4_2 = ccdot4_2\n\n def add_new_circles_updaters(self):\n def get_new_center(k):\n return self.calc_new_circles_centers_and_radii()[k][0]\n def get_abs_new_radius(k):\n return np.abs(self.calc_new_circles_centers_and_radii()[k][1])\n # Since enumerate() won't work here (seriously?),\n # I have to use a much more direct approach - list them all.\n self.c4_1.add_updater(lambda c: c.move_to(get_new_center(0)))\n self.c4_1.add_updater(lambda c: c.set_height(2*get_abs_new_radius(0)))\n self.c4_2.add_updater(lambda c: c.move_to(get_new_center(1)))\n self.c4_2.add_updater(lambda c: c.set_height(2*get_abs_new_radius(1)))\n\n def add_new_circles(self):\n if not hasattr(self, \"new_circles\"):\n self.new_circles = generate_new_circles()\n self.add(self.new_circles)\n\n def get_new_circles(self):\n if not hasattr(self, \"new_circles\"):\n self.new_circles = generate_new_circles()\n return self.new_circles\n\n def add_new_circles_centers(self):\n self.add(self.ccdot4_1, self.ccdot4_2)\n\n def remove_new_circles_center(self):\n self.remove(self.ccdot4_1, self.ccdot4_2)\n\n\n\n#####\n## Inversion Introduction Scenes\nclass ConceptsInInversion(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n \"color_radius\" : RED,\n \"color_P\" : WHITE,\n }\n def construct(self):\n self.add_backgrounds()\n self.move_around_point_P()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 3.5, color = self.color_circle)\n circle_O.shift(3*LEFT)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n remark_O = TextMobject(\"反演中心\", color = self.color_circle)\n remark_O.next_to(label_O, LEFT, buff = 0.15)\n radius = Line(circle_O.get_center(), circle_O.get_left())\n label_radius = TexMobject(\"R\").scale(0.8)\n remark_radius = TextMobject(\"反演幂\").scale(0.8)\n brace_radius = Brace(radius, UP)\n brace_radius.put_at_tip(label_radius)\n remark_radius.next_to(label_radius, LEFT, buff = 0.15)\n group_radius = VGroup(radius, label_radius, brace_radius, remark_radius)\n group_radius.set_color(self.color_radius)\n group_radius.rotate(-PI/12, about_point = dot_O.get_center())\n def_inversion = TextMobject(\"反演变换:$P \\\\mapsto P'$\")\n rlt_inversion = TexMobject(\"|OP| \\\\times |OP'|=\", \"R^2\")\n rlt_inversion.next_to(def_inversion, DOWN, aligned_edge = RIGHT)\n rlt_inversion[-1].set_color(self.color_radius)\n remarks = VGroup(def_inversion, rlt_inversion)\n remarks.to_corner(DR)\n dot_P = Dot(LEFT, color = self.color_P)\n label_P = DotLabel(\"P\", dot_P, color = self.color_P, position = DL, label_buff = 0.2)\n dot_Pi = InversedDot(dot_P, circle_O, color = self.color_P)\n label_Pi = DotLabel(\"P'\", dot_Pi, color = self.color_P, position = DR, label_buff = 0.2)\n line_OP = TwoDotsSegment(dot_O, dot_P, stroke_width = 2)\n line_OPi = TwoDotsSegment(dot_O, dot_Pi, stroke_width = 2)\n self.add(remarks)\n self.add(group_radius)\n self.add(circle_O, dot_O, label_O, remark_O, remark_circle)\n self.add(dot_P, dot_Pi, label_P, label_Pi, line_OP, line_OPi)\n self.circle_O = circle_O\n self.dot_P = dot_P\n\n def move_around_point_P(self):\n self.dot_P.save_state()\n for dx, dy in [(-0.2, 0.3), (0.1, -0.4), (4, 0.3), (1, 1)]:\n vec = np.array([dx, dy, 0])\n self.play(self.dot_P.shift, vec, run_time = 1)\n self.wait()\n self.play(self.dot_P.move_to, self.circle_O.get_right())\n self.wait()\n self.play(self.dot_P.restore, run_time = 1)\n self.wait()\n\n\nclass InversionExamples(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n }\n def construct(self):\n circle_O = Circle(radius = 3.5, color = self.color_circle)\n circle_O.shift(3*LEFT)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n init_shape = Square(side_length = 1.2, color = BLUE).rotate(TAU/13)\n init_shape.next_to(circle_O.get_right(), LEFT, buff = 0.5)\n init_shape.save_state()\n inv_shape = InversedVMobject(init_shape, circle_O, use_dashed_vmob = False)\n new_shapes = [\n RegularPolygon(n = 6, start_angle = PI/7, color = PINK).scale(0.8),\n TexMobject(\"42\", color = RED).scale(2.5).rotate(-PI/9),\n TexMobject(\"\\\\pi\", color = MAROON_B).scale(5).rotate(PI/15),\n ]\n\n self.add(circle_O, remark_circle, dot_O, label_O)\n self.add(init_shape, inv_shape)\n for new_shape in new_shapes:\n # new_shape.set_color(BLUE)\n new_shape.next_to(circle_O.get_right(), LEFT, buff = 0.6)\n self.play(Transform(init_shape, new_shape), run_time = 1)\n self.wait()\n init_shape.generate_target()\n init_shape.target.become(new_shape)\n init_shape.target.shift(get_random_vector(0.5))\n random_angle = 0.5*np.random.random()\n init_shape.target.rotate(random_angle)\n self.play(MoveToTarget(init_shape, path_arc = random_angle, run_time = 1)),\n self.wait()\n self.play(ApplyMethod(init_shape.restore))\n self.wait()\n\n\nclass LineToLineInversion(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n \"color_orig\" : BLUE,\n \"color_inv\" : RED,\n }\n def construct(self):\n self.add_backgrounds()\n self.show_line_to_line_inversion()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 2.5, color = self.color_circle)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n conclusion = TextMobject(\"经过反演中心的直线\", \"$\\\\mapsto$\", \"经过反演中心的直线\")\n conclusion.scale(0.8)\n conclusion[0].set_color(self.color_orig)\n conclusion[2].set_color(self.color_inv)\n conclusion.to_corner(DR)\n self.add(circle_O, remark_circle, dot_O, label_O)\n self.add(conclusion)\n self.circle_O = circle_O\n \n def show_line_to_line_inversion(self):\n angle_tracker = ValueTracker(-PI/11)\n position_tracker = ValueTracker(1.4)\n angle_tracker.save_state()\n position_tracker.save_state()\n orig_line = ExtendedLine(LEFT, RIGHT, color = self.color_orig, stroke_width = 8)\n orig_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle()))\n inv_line = ExtendedLine(LEFT, RIGHT, color = self.color_inv, stroke_width = 4)\n inv_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle()))\n dot_P = Dot(color = self.color_orig)\n dot_P.add_updater(\n lambda m: m.move_to(\n position_tracker.get_value() * rotate_vector(RIGHT, angle_tracker.get_value())\n )\n )\n dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False, color = self.color_inv)\n label_P = DotLabel(\"P\", dot_P, position = DOWN, color = self.color_orig)\n label_Pi = DotLabel(\"P'\", dot_Pi, position = DOWN, color = self.color_inv)\n \n def get_lb():\n return LEFT_SIDE + UP * LEFT_SIDE[0] * np.tan(angle_tracker.get_value())\n def get_rb():\n return RIGHT_SIDE + UP * RIGHT_SIDE[0] * np.tan(angle_tracker.get_value())\n def is_oolb(m):\n return m.get_right()[0] < LEFT_SIDE[0]\n def is_oorb(m):\n return m.get_left()[0] > RIGHT_SIDE[0]\n\n oolb_arrow = Arrow(ORIGIN, LEFT, color = self.color_inv).scale(2)\n oolb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value() + PI))\n oolb_arrow.add_updater(lambda m: m.next_to(get_lb(), DOWN, aligned_edge = LEFT, buff = 0.2))\n oorb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_inv).scale(2)\n oorb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value()))\n oorb_arrow.add_updater(lambda m: m.next_to(get_rb(), DOWN, aligned_edge = RIGHT, buff = 0.2))\n oolb_label = TexMobject(\"P'\", color = self.color_inv, background_stroke_width = 0)\n oolb_label.add_updater(lambda m: m.next_to(oolb_arrow, DOWN, buff = 0.2))\n oorb_label = TexMobject(\"P'\", color = self.color_inv, background_stroke_width = 0)\n oorb_label.add_updater(lambda m: m.next_to(oorb_arrow, DOWN, buff = 0.2))\n oolb_group = VGroup(oolb_arrow, oolb_label)\n oorb_group = VGroup(oorb_arrow, oorb_label)\n oolb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oolb(label_Pi) else 0))\n oolb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oolb(label_Pi) else 0))\n oorb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oorb(label_Pi) else 0))\n oorb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oorb(label_Pi) else 0))\n\n self.add(orig_line, inv_line, dot_P, dot_Pi, label_P, label_Pi)\n self.add(oolb_group, oorb_group)\n for d_position, d_angle in [(2, 0), (1, PI/10), (-5, 0), (-3, -PI/7), (4, PI/11)]:\n self.play(\n ApplyMethod(position_tracker.increment_value, d_position),\n ApplyMethod(angle_tracker.increment_value, d_angle),\n run_time = 2,\n )\n self.wait()\n self.play(\n ApplyMethod(angle_tracker.restore),\n ApplyMethod(position_tracker.restore),\n run_time = 2,\n )\n self.wait()\n\n\nclass LineToCircleInversion(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n \"color_orig\" : BLUE,\n \"color_inv\" : RED,\n \"line_config\" : {\n \"stroke_width\" : 2,\n \"color\" : WHITE,\n },\n }\n def construct(self):\n self.add_backgrounds()\n self.add_shapes()\n self.show_line_to_circle_inversion()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 3, color = self.color_circle)\n circle_O.shift(3*LEFT+0.5*UP)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n conclusion1 = TextMobject(\"不经过反演中心的直线\", \"$\\\\mapsto$\", \"经过反演中心的圆\")\n conclusion1[0].set_color(self.color_orig)\n conclusion1[-1].set_color(self.color_inv)\n conclusion2 = TextMobject(\"经过反演中心的圆\", \"$\\\\mapsto$\", \"不经过反演中心的直线\")\n conclusion2[0].set_color(self.color_inv)\n conclusion2[-1].set_color(self.color_orig)\n conclusions = VGroup(conclusion1, conclusion2)\n for c in conclusions:\n c.scale(0.8)\n conclusions.arrange_submobjects(DOWN, index_of_submobject_to_align = 1)\n conclusions.to_corner(DR)\n bg_rect = BackgroundRectangle(conclusions)\n self.add(circle_O, remark_circle)\n self.add_foreground_mobjects(dot_O, label_O, bg_rect, conclusions)\n self.dot_O = dot_O\n self.circle_O = circle_O\n self.conclusions = conclusions\n self.bg_rect = bg_rect\n\n def add_shapes(self):\n position_tracker = ValueTracker(2)\n line_angle_tracker = ValueTracker(PI*9/19)\n circle_angle_tracker = ValueTracker(PI/5)\n line = ExtendedLine(LEFT, RIGHT, color = self.color_orig)\n line.add_updater(lambda m: m.move_to(position_tracker.get_value() * RIGHT))\n line.add_updater(lambda m: m.rotate(line_angle_tracker.get_value() - m.get_angle()))\n inv_line = InversedVMobject(line, self.circle_O, use_dashed_vmob = False, color = self.color_inv)\n inv_line_center = SmallDot(color = self.color_inv)\n inv_line_center.add_updater(lambda m: m.move_to(inv_line.get_center()))\n dot_Ai = Dot(color = self.color_inv)\n dot_Ai.add_updater(\n lambda m: m.move_to(inv_line.get_center() * 2 - self.circle_O.get_center())\n )\n dot_Pi = Dot(color = self.color_inv)\n dot_Pi.add_updater(\n lambda m: m.move_to(\n inv_line.get_center() \\\n + rotate_vector(\n inv_line.get_center() - self.circle_O.get_center(),\n circle_angle_tracker.get_value()\n )\n )\n )\n dot_P = InversedDot(dot_Pi, self.circle_O, is_hollow = False, color = self.color_orig)\n dot_A = InversedDot(dot_Ai, self.circle_O, is_hollow = False, color = self.color_orig)\n line_OA, line_OAi, line_OP, line_OPi, line_AP, line_AiPi = aux_lines = VGroup(*[\n TwoDotsSegment(pt_1, pt_2, **self.line_config)\n for pt_1, pt_2 in [\n (self.dot_O, dot_A), (self.dot_O, dot_Ai),\n (self.dot_O, dot_P), (self.dot_O, dot_Pi),\n (dot_A, dot_P), (dot_Ai, dot_Pi)\n ]\n ])\n ai_AiOPi = AngleIndicator(dot_Ai, self.dot_O, dot_Pi, color = MAROON_B, radius = 0.8)\n rtai_OAP = RightAngleIndicator(self.dot_O, dot_A, dot_P)\n rtai_OPiAi = RightAngleIndicator(self.dot_O, dot_Pi, dot_Ai)\n label_P = TexMobject(\"P\", color = self.color_orig)\n label_Pi = TexMobject(\"P'\", color = self.color_inv)\n label_A = TexMobject(\"A\", color = self.color_orig)\n label_Ai = TexMobject(\"A'\", color = self.color_inv)\n label_A.add_updater(\n lambda m: m.move_to(\n dot_A.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center())\n )\n )\n label_P.add_updater(\n lambda m: m.move_to(\n dot_P.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center())\n )\n )\n label_Ai.add_updater(\n lambda m: m.move_to(\n dot_Ai.get_center() + 0.4 * rotate_vector(\n normalize(dot_Ai.get_center() - inv_line_center.get_center()), -PI/4\n )\n )\n )\n label_Pi.add_updater(\n lambda m: m.move_to(\n dot_Pi.get_center() + 0.4 * normalize(dot_Pi.get_center() - inv_line_center.get_center())\n )\n )\n\n def get_ub():\n return line.get_center() + TOP + RIGHT * TOP[1] / np.tan(line_angle_tracker.get_value())\n def get_bb():\n return line.get_center() + BOTTOM + RIGHT * BOTTOM[1] / np.tan(line_angle_tracker.get_value())\n def is_ooub(m):\n return m.get_bottom()[1] > TOP[1]\n def is_oobb(m):\n return m.get_top()[1] < BOTTOM[1]\n ooub_arrow = Arrow(ORIGIN, LEFT, color = self.color_orig).scale(2)\n ooub_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value()))\n ooub_arrow.add_updater(lambda m: m.next_to(get_ub(), RIGHT, aligned_edge = TOP, buff = 0.2))\n oobb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_orig).scale(2)\n oobb_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value() + PI))\n oobb_arrow.add_updater(lambda m: m.next_to(get_bb(), RIGHT, aligned_edge = BOTTOM, buff = 0.2))\n oolb_label = TexMobject(\"P\", color = self.color_orig, background_stroke_width = 0)\n oolb_label.add_updater(lambda m: m.next_to(ooub_arrow, RIGHT, buff = 0.2))\n oorb_label = TexMobject(\"P\", color = self.color_orig, background_stroke_width = 0)\n oorb_label.add_updater(lambda m: m.next_to(oobb_arrow, RIGHT, buff = 0.2))\n ooub_group = VGroup(ooub_arrow, oolb_label)\n oobb_group = VGroup(oobb_arrow, oorb_label)\n ooub_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_ooub(label_P) else 0))\n ooub_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_ooub(label_P) else 0))\n oobb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oobb(label_P) else 0))\n oobb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oobb(label_P) else 0))\n\n self.add(line, inv_line)\n self.add(dot_A, dot_P, dot_Ai, dot_Pi)\n self.add(label_P, label_Pi, label_A, label_Ai)\n self.add(aux_lines)\n self.add(ai_AiOPi, rtai_OAP, rtai_OPiAi)\n self.add(ooub_group, oobb_group)\n\n self.position_tracker = position_tracker\n self.line_angle_tracker = line_angle_tracker\n self.circle_angle_tracker = circle_angle_tracker\n\n def show_line_to_circle_inversion(self):\n play_args = [\n [0, PI/12, 0, 2],\n [0, 0, PI*7/5, 4],\n [-2, PI/8, -PI/5, 3],\n [0, 0, PI*19/10, 6],\n [1.5, -PI/7, PI*2/5, 4],\n ]\n restore_arg = [\n -sum([arg[k] for arg in play_args])\n for k in range(len(play_args[0]))\n ]\n restore_arg[1] = (restore_arg[1] + PI) % (2*PI) - PI\n restore_arg[2] = (restore_arg[2] + PI) % (2*PI) - PI\n restore_arg[-1] = 3\n play_args.append(restore_arg)\n for d_center, d_line_angle, d_circle_angle, run_time in play_args:\n self.play(\n ApplyMethod(self.position_tracker.increment_value, d_center),\n ApplyMethod(self.line_angle_tracker.increment_value, d_line_angle),\n ApplyMethod(self.circle_angle_tracker.increment_value, d_circle_angle),\n run_time = run_time,\n )\n self.wait()\n\n\nclass InversionCreateSimilarTriangles(Scene):\n CONFIG = {\n \"random_seed\" : 5+7-0,\n \"num_of_nudges\" : 5,\n \"max_step\" : 1,\n \"color_A\" : RED,\n \"color_B\" : BLUE,\n \"color_combined\" : MAROON_B,\n \"color_circle\": YELLOW,\n }\n def construct(self):\n self.add_remark()\n self.show_figure_animation()\n\n def add_remark(self):\n cond_1 = TexMobject(\"{|OP|\", \"\\\\over\", \"|OQ|}\", \"=\", \"{|OQ'|\", \"\\\\over\", \"|OP'|}\")\n cond_2 = TexMobject(\"\\\\angle POQ\", \"=\", \"\\\\angle Q'OP'\")\n conds = VGroup(cond_1, cond_2)\n conds.arrange_submobjects(DOWN, buff = 0.5)\n conds_rect = SurroundingRectangle(conds, color = WHITE)\n arrow = TexMobject(\"\\\\Downarrow\")\n arrow.next_to(conds_rect, DOWN)\n concl = TexMobject(\"\\\\triangle OPQ\", \"\\\\sim\", \"\\\\triangle OQ'P'\")\n concl.next_to(arrow, DOWN)\n for mob in (cond_1[0], cond_1[2], concl[0]):\n mob.set_color(self.color_A)\n for mob in (cond_1[-1], cond_1[-3], concl[-1]):\n mob.set_color(self.color_B)\n for mob in (cond_2[0], cond_2[-1]):\n mob.set_color(self.color_combined)\n remark = VGroup(conds, conds_rect, arrow, concl)\n remark.to_corner(DR)\n self.add(remark)\n\n def show_figure_animation(self):\n circle = Circle(radius = 3, color = self.color_circle)\n circle.move_to(3.5*LEFT)\n dot_O = Dot(color = self.color_combined)\n dot_O.add_updater(lambda m: m.move_to(circle.get_center()))\n dot_P = Dot(point = 1.2*UP+LEFT, color = self.color_A)\n dot_Q = Dot(point = 0.5*DOWN+1.9*LEFT, color = self.color_A)\n dot_Pi = InversedDot(dot_P, circle, is_hollow = False, color = self.color_B)\n dot_Qi = InversedDot(dot_Q, circle, is_hollow = False, color = self.color_B)\n triangle_OPQ = ManyDotsPolygon(\n dot_O, dot_P, dot_Q, color = self.color_A,\n stroke_width = 5, fill_opacity = 0.4\n )\n triangle_OPiQi = ManyDotsPolygon(\n dot_O, dot_Pi, dot_Qi, color = self.color_B,\n stroke_width = 2, fill_opacity = 0.3\n )\n label_O, label_P, label_Pi, label_Q, label_Qi = (\n DotLabel(\n text, dot, color = color, position = position,\n background_stroke_width = 5,\n ).scale(0.8)\n for text, dot, color, position in zip(\n [\"O\", \"P\", \"P'\", \"Q\", \"Q'\"],\n [dot_O, dot_P, dot_Pi, dot_Q, dot_Qi],\n [self.color_combined, self.color_A, self.color_B, self.color_A, self.color_B],\n [LEFT, UP, UP, DOWN, DOWN]\n )\n )\n self.add(dot_O, dot_P, dot_Q, dot_Pi, dot_Qi)\n self.add(circle, triangle_OPQ, triangle_OPiQi)\n self.add(label_O, label_P, label_Pi, label_Q, label_Qi)\n dot_P.save_state()\n dot_Q.save_state()\n for k in range(self.num_of_nudges):\n nudge_P = get_random_vector(self.max_step)\n nudge_Q = get_random_vector(self.max_step)\n self.play(\n ApplyMethod(dot_P.shift, nudge_P),\n ApplyMethod(dot_Q.shift, nudge_Q),\n run_time = 2\n )\n self.wait()\n self.play(dot_P.restore, dot_Q.restore, run_time = 2)\n self.wait()\n\n\nclass CircleToCircleInversionProof(Scene):\n CONFIG = {\n \"color_O\" : YELLOW,\n \"color_A\" : RED,\n \"color_B\" : BLUE,\n \"color_combined\" : MAROON_B,\n \"label_buff\" : 0.1,\n \"label_scaling_factor\" : 0.75,\n \"line_config\" : {\n \"stroke_width\" : 2,\n \"color\" : WHITE,\n },\n }\n def construct(self):\n self.add_backgrounds()\n self.show_left_and_right_points()\n self.show_random_point()\n self.show_similar_triangles()\n self.show_complementary_property()\n self.show_inversion_result()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 3.2, color = self.color_O)\n circle_O.shift(3.5*LEFT)\n dot_O = Dot(circle_O.get_center(), color = self.color_O)\n remark_O = TextMobject(\"反演圆\", color = YELLOW)\n remark_O.next_to(circle_O.get_bottom(), UP, buff = 0.4)\n circle_C = Circle(radius = 0.8, stroke_width = 2)\n circle_C.next_to(circle_O.get_right(), LEFT, buff = 0.5)\n dot_C = Dot(circle_C.get_center())\n label_O, label_C = (\n DotLabel(\n text, dot, color = color, position = DOWN, label_buff = self.label_buff\n ).scale(self.label_scaling_factor)\n for text, dot, color in zip([\"O\", \"C\"], [dot_O, dot_C], [self.color_O, WHITE])\n )\n for orig_mob in (circle_C, dot_C, label_C):\n orig_mob.set_sheen_direction(RIGHT)\n orig_mob.set_color([self.color_A, self.color_B])\n inv_circle_template = InversedVMobject(circle_C, circle_O, use_dashed_vmob = False)\n inv_circle = Circle(radius = inv_circle_template.get_width()/2)\n inv_circle.move_to(inv_circle_template.get_center())\n inv_circle.set_sheen_direction(LEFT)\n inv_circle.set_color([self.color_A, self.color_B])\n self.add(circle_O, dot_O, circle_C, dot_C)\n self.add(label_O, label_C)\n self.add(remark_O)\n self.wait()\n\n self.circle_O = circle_O\n self.dot_O = dot_O\n self.remark_O = remark_O\n self.circle_C = circle_C\n self.dot_C = dot_C\n self.inv_circle = inv_circle\n\n def show_left_and_right_points(self):\n dot_A = Dot(color = self.color_A)\n dot_A.move_to(self.circle_C.get_left())\n dot_B = Dot(color = self.color_B)\n dot_B.move_to(self.circle_C.get_right())\n dot_Ai = InversedDot(dot_A, self.circle_O, is_hollow = False, color = self.color_A)\n dot_Bi = InversedDot(dot_B, self.circle_O, is_hollow = False, color = self.color_B)\n dot_Q = Dot((dot_Ai.get_center() + dot_Bi.get_center()) / 2)\n line_OB = Line(self.dot_O.get_center(), dot_B.get_center(), **self.line_config)\n line_OAi = Line(self.dot_O.get_center(), dot_Ai.get_center(), **self.line_config)\n label_A, label_Ai, label_B, label_Bi = (\n DotLabel(\n text, dot, color = color, position = position, label_buff = self.label_buff\n ).scale(self.label_scaling_factor)\n for text, dot, color, position in zip(\n [\"A\", \"A'\", \"B\", \"B'\"],\n [dot_A, dot_Ai, dot_B, dot_Bi],\n [self.color_A, self.color_A, self.color_B, self.color_B],\n [DL, DR, DR, DL]\n )\n )\n remark_AB = TextMobject(\"圆心连线 \\\\\\\\ 的交点...\").scale(0.6)\n remark_AB.next_to(VGroup(dot_A, dot_B), DOWN, buff = 1)\n arrows_AB = VGroup(*[\n Arrow(remark_AB.get_critical_point(direction), dot, buff = 0.1)\n for direction, dot in zip([UL, UR], [dot_A, dot_B])\n ])\n remark_AiBi = TextMobject(\"...以及它们的反点\").scale(0.8)\n remark_AiBi.next_to(VGroup(dot_Ai, dot_Bi), DOWN, buff = 1)\n arrows_AiBi = VGroup(*[\n Arrow(remark_AiBi.get_critical_point(direction), dot, buff = 0.1)\n for direction, dot in zip([UR, UL], [dot_Ai, dot_Bi])\n ])\n self.play(ShowCreation(line_OB))\n self.play(Write(dot_A), Write(dot_B), Write(label_A), Write(label_B))\n self.wait()\n self.play(Write(remark_AB), ShowCreation(arrows_AB))\n self.wait()\n self.play(\n ReplacementTransform(dot_A.deepcopy(), dot_Ai),\n ReplacementTransform(dot_B.deepcopy(), dot_Bi),\n )\n self.play(Write(label_Ai), Write(label_Bi))\n self.wait()\n self.play(\n ReplacementTransform(remark_AB, remark_AiBi),\n ReplacementTransform(arrows_AB, arrows_AiBi)\n )\n self.play(ReplacementTransform(line_OB, line_OAi))\n self.play(FadeOut(VGroup(remark_AiBi, arrows_AiBi)))\n self.wait()\n\n self.dot_A = dot_A\n self.dot_Ai = dot_Ai\n self.dot_B = dot_B\n self.dot_Bi = dot_Bi\n self.dot_Q = dot_Q\n self.line_OAi = line_OAi\n self.dots_AB = VGroup(dot_A, dot_Ai, dot_B, dot_Bi)\n self.labels_AB = VGroup(label_A, label_Ai, label_B, label_Bi)\n\n def show_random_point(self):\n angle_tracker = ValueTracker(PI/3)\n dot_P = Dot()\n dot_P.add_updater(\n lambda m: m.move_to(\n self.circle_C.point_at_angle(angle_tracker.get_value() % TAU)\n )\n )\n dot_P.add_updater(\n lambda m: m.set_color(\n interpolate_color(\n self.color_A, self.color_B,\n (dot_P.get_center()[0] - self.dot_A.get_center()[0]) / (self.dot_B.get_center()[0] - self.dot_A.get_center()[0])\n )\n )\n )\n label_P = DotLabel(\"P\", dot_P, position = None)\n label_P.scale(0.8)\n label_P.add_updater(lambda m: m.set_color(dot_P.get_color()))\n label_P.add_updater(\n lambda m: m.move_to(dot_P.get_center() * 1.4 - self.dot_C.get_center() * 0.4)\n )\n arrow_P = Vector(DR, buff = 0, color = WHITE).scale(0.5)\n arrow_P.add_updater(lambda m: m.next_to(dot_P, UL, buff = 0.1))\n remark_P = TextMobject(\"圆上任意一点...\").scale(0.75)\n remark_P.add_updater(lambda m: m.next_to(arrow_P, UL, buff = 0.1))\n dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False)\n dot_Pi.add_updater(lambda m: m.set_color(dot_P.get_color()))\n label_Pi = DotLabel(\"P'\", dot_Pi, position = None)\n label_Pi.scale(0.8)\n label_Pi.add_updater(lambda m: m.set_color(dot_Pi.get_color()))\n label_Pi.add_updater(\n lambda m: m.move_to(dot_Pi.get_center() * 1.1 - self.inv_circle.get_center() * 0.1)\n )\n arrow_Pi = Vector(DL, buff = 0, color = WHITE).scale(0.5)\n arrow_Pi.add_updater(lambda m: m.next_to(dot_Pi, UR, buff = 0.1))\n remark_Pi = TextMobject(\"...以及它的反点\").scale(0.75)\n remark_Pi.add_updater(lambda m: m.next_to(arrow_Pi, UR, buff = 0.1))\n line_OP, line_OPi, line_AP, line_AiPi, line_BP, line_BiPi = aux_lines = VGroup(*[\n TwoDotsSegment(pt_1, pt_2, **self.line_config)\n for pt_1, pt_2 in [\n (self.dot_O, dot_P), (self.dot_O, dot_Pi), (self.dot_A, dot_P),\n (self.dot_Ai, dot_Pi), (self.dot_B, dot_P), (self.dot_Bi, dot_Pi)\n ]\n ])\n rtai_APB = RightAngleIndicator(self.dot_A, dot_P, self.dot_B)\n rtai_BiPiAi = RightAngleIndicator(self.dot_Bi, dot_Pi, self.dot_Ai, side_length = 0.5)\n self.play(Write(dot_P), Write(label_P))\n self.play(ShowCreation(arrow_P), Write(remark_P))\n self.play(Write(line_AP), Write(line_BP))\n self.play(ShowCreation(rtai_APB))\n self.wait()\n self.play(ReplacementTransform(dot_P.deepcopy(), dot_Pi))\n self.play(Write(label_Pi))\n self.play(\n ReplacementTransform(arrow_P.deepcopy(), arrow_Pi),\n ReplacementTransform(remark_P.deepcopy(), remark_Pi),\n )\n self.play(angle_tracker.increment_value, PI/6, run_time = 2)\n self.play(FadeOut(VGroup(arrow_P, remark_P, arrow_Pi, remark_Pi)))\n self.wait()\n self.play(Write(VGroup(line_OP, line_OPi, line_AiPi, line_BiPi)))\n self.wait()\n\n self.dot_P = dot_P\n self.dot_Pi = dot_Pi\n self.rtai_APB = rtai_APB\n self.rtai_BiPiAi = rtai_BiPiAi\n self.angle_tracker = angle_tracker\n self.aux_lines = aux_lines\n self.dots_P = VGroup(dot_P, dot_Pi)\n self.labels_P = VGroup(label_P, label_Pi)\n self.rtais = VGroup(self.rtai_APB, self.rtai_BiPiAi)\n\n def show_similar_triangles(self):\n ai_OAP = AngleIndicator(self.dot_O, self.dot_A, self.dot_P, radius = 0.3, color = self.color_A)\n ai_OBP = AngleIndicator(self.dot_O, self.dot_B, self.dot_P, radius = 0.4, color = self.color_B)\n ai_OPiAi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Ai, radius = 0.3, color = self.color_A)\n ai_OPiBi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Bi, radius = 0.4, color = self.color_B)\n triangle_OAP, triangle_OPiAi, triangle_OBP, triangle_OPiBi = [\n ManyDotsPolygon(\n pt_1, pt_2, pt_3, color = self.color_combined,\n stroke_width = 0, fill_opacity = 0.4\n )\n for pt_1, pt_2, pt_3 in (\n (self.dot_O, self.dot_A, self.dot_P),\n (self.dot_O, self.dot_Pi, self.dot_Ai),\n (self.dot_O, self.dot_B, self.dot_P),\n (self.dot_O, self.dot_Pi, self.dot_Bi),\n )\n ]\n remark_sim_A = TexMobject(\"\\\\triangle OAP\", \"\\\\sim\", \"\\\\triangle OP'A'\")\n remark_sim_B = TexMobject(\"\\\\triangle OBP\", \"\\\\sim\", \"\\\\triangle OP'B'\")\n remark_arrow = TexMobject(\"\\\\Downarrow\")\n remark_angle_A = TexMobject(\"\\\\angle OAP\", \"=\", \"\\\\angle OP'A'\")\n remark_angle_B = TexMobject(\"\\\\angle OBP\", \"=\", \"\\\\angle OP'B'\")\n remarks_A = VGroup(remark_sim_A, remark_arrow, remark_angle_A)\n remarks_B = VGroup(remark_sim_B, remark_arrow, remark_angle_B)\n remarks_A.arrange_submobjects(DOWN)\n remarks_A.next_to(self.dot_Q, DOWN, buff = 1)\n remark_sim_B.move_to(remark_sim_A.get_center())\n remark_angle_B.move_to(remark_angle_A.get_center())\n for remark, color in ([remark_sim_A, self.color_combined], [remark_sim_B, self.color_combined], \\\n [remark_angle_A, self.color_A], [remark_angle_B, self.color_B]):\n remark[0].set_color(color)\n remark[-1].set_color(color)\n self.play(Write(remark_sim_A))\n self.play(FadeInFromDown(VGroup(remark_arrow, remark_angle_A)))\n self.wait()\n self.play(ShowCreation(triangle_OAP), ShowCreation(ai_OAP))\n self.wait()\n self.play(\n ReplacementTransform(triangle_OAP, triangle_OPiAi),\n ReplacementTransform(ai_OAP.deepcopy(), ai_OPiAi),\n )\n self.play(FadeOut(triangle_OPiAi))\n self.wait()\n self.play(ReplacementTransform(remarks_A, remarks_B))\n self.wait()\n self.play(ShowCreation(triangle_OBP), ShowCreation(ai_OBP))\n self.wait()\n self.play(\n ReplacementTransform(triangle_OBP, triangle_OPiBi),\n ReplacementTransform(ai_OBP.deepcopy(), ai_OPiBi),\n )\n self.play(FadeOut(remarks_B), FadeOut(triangle_OPiBi))\n self.wait()\n\n self.ai_OAP = ai_OAP\n self.ai_OBP = ai_OBP\n self.ai_OPiAi = ai_OPiAi\n self.ai_OPiBi = ai_OPiBi\n self.ais = VGroup(ai_OAP, ai_OBP, ai_OPiAi, ai_OPiBi)\n\n def show_complementary_property(self):\n ai_OAP_copy = self.ai_OAP.deepcopy()\n ai_OBP_copy = self.ai_OBP.deepcopy()\n rtai_APB_copy = self.rtai_APB.deepcopy()\n for ai_copy in (ai_OAP_copy, ai_OBP_copy, rtai_APB_copy):\n ai_copy.clear_updaters()\n comp_prop = VGroup(ai_OAP_copy, TexMobject(\"=\"), ai_OBP_copy, TexMobject(\"+\"), rtai_APB_copy)\n comp_prop.arrange_submobjects(RIGHT)\n comp_prop.scale(1.2)\n comp_prop.next_to(self.circle_O.get_top(), DOWN, buff = 1)\n self.play(\n ReplacementTransform(self.ai_OAP.deepcopy(), ai_OAP_copy),\n ReplacementTransform(self.ai_OBP.deepcopy(), ai_OBP_copy),\n ReplacementTransform(self.rtai_APB.deepcopy(), rtai_APB_copy),\n )\n self.play(Write(comp_prop[1]), Write(comp_prop[3]))\n self.wait()\n self.play(ReplacementTransform(rtai_APB_copy.deepcopy(), self.rtai_BiPiAi))\n self.wait()\n for ai in self.ais:\n ai.clear_updaters()\n self.play(\n FadeOut(comp_prop),\n FadeOut(self.ais),\n FadeOut(self.labels_AB), FadeOut(self.labels_P),\n )\n self.wait()\n\n def show_inversion_result(self):\n inv_circle_copy = self.inv_circle.deepcopy()\n self.play(self.angle_tracker.set_value, PI, run_time = 2)\n self.wait()\n def update_inv_circle(inv_circle):\n angle = self.angle_tracker.get_value()\n if (angle <= -PI) or (angle > PI):\n alpha = 1\n else:\n QPi = self.dot_Pi.get_center() - self.dot_Q.get_center()\n QAi = self.dot_Ai.get_center() - self.dot_Q.get_center()\n theta = angle_between(QPi, QAi)\n if self.dot_Pi.get_center()[1] < self.dot_Q.get_center()[1]:\n theta = 2*PI - theta\n alpha = theta / (2*PI)\n inv_circle.become(inv_circle_copy.get_subcurve(0, alpha))\n self.inv_circle.add_updater(update_inv_circle)\n self.add(self.inv_circle)\n self.play(\n ApplyMethod(self.angle_tracker.increment_value, -2*PI),\n run_time = 5,\n )\n self.inv_circle.clear_updaters()\n for line in self.aux_lines:\n line.clear_updaters()\n self.play(\n FadeOut(self.dots_AB), FadeOut(self.dots_P), FadeOut(self.rtais),\n FadeOut(self.line_OAi), FadeOut(self.aux_lines)\n )\n self.wait()\n color_template = Square(\n stroke_width = 0, fill_opacity = 1, fill_color = [self.color_A, self.color_B]\n )\n conclusion = TextMobject(\"不经过反演中心的圆\", \"$\\\\mapsto$\", \"不经过反演中心的圆\")\n conclusion.scale(0.8)\n conclusion[0].set_color_by_gradient(self.color_A, self.color_B)\n conclusion[2].set_color_by_gradient(self.color_B, self.color_A)\n conclusion.to_corner(DR)\n self.play(Write(conclusion))\n self.wait(3)\n self.play(FadeOut(conclusion), FadeOut(self.inv_circle))\n self.wait()\n\n\nclass ConcentricPropertyDoesNotHold(Scene):\n def setup(self):\n N = 8\n self.circle_radii = [0.9-0.1*k for k in range(N)]\n self.dot_radii = [0.08-0.005*k for k in range(N)]\n self.circle_colors = color_gradient([BLUE, GREEN, RED], N)\n\n def construct(self):\n orig_circles = VGroup(*[\n Circle(radius = radius, stroke_width = 1.5,color = color)\n for radius, color in zip(self.circle_radii, self.circle_colors)]\n )\n orig_circles.shift(2*LEFT+0.5*DOWN)\n orig_circles_centers = VGroup(*[\n Dot(circle.get_center(), radius = radius, color = color)\n for circle, radius, color in zip(orig_circles, self.dot_radii, self.circle_colors)\n ])\n # Dot(orig_circles.get_center())\n circle = Circle(radius = 3, color = YELLOW)\n circle.shift(3.8*LEFT+0.5*DOWN)\n circle_center = Dot(circle.get_center(), color = YELLOW)\n inv_circles = VGroup(*[\n InversedVMobject(orig_circle, circle).clear_updaters().set_color(color)\n for orig_circle, color in zip(orig_circles, self.circle_colors)\n ])\n inv_circles_centers = VGroup(*[\n Dot(inv_circle.get_center(), color = color)\n for inv_circle, color in zip(inv_circles, self.circle_colors)\n ])\n\n circle_text = TextMobject(\"反演圆\", color = YELLOW)\n circle_text.next_to(circle.get_bottom(), UP, buff = 0.4)\n orig_circles_text = TextMobject(\"同心的圆\", color = WHITE)\n orig_circles_text.next_to(orig_circles, UP)\n orig_circles_text.to_edge(UP, buff = 0.4)\n inv_circles_text = TextMobject(\"不同心的像\", color = WHITE)\n inv_circles_text.next_to(inv_circles, UP)\n inv_circles_text.to_edge(UP, buff = 0.4)\n arrow = Arrow(orig_circles_text.get_right(), inv_circles_text.get_left())\n\n self.add(circle, circle_center)\n self.add(orig_circles, orig_circles_centers)\n self.add(inv_circles, inv_circles_centers)\n self.add(circle_text, orig_circles_text, inv_circles_text, arrow)\n self.wait()\n\n\nclass DemonstratePtolemyInequality(Scene):\n CONFIG = {\n \"R\" : 2.7,\n \"angle_A\" : -PI*2/3,\n \"angle_B\" : PI*4/5,\n \"angle_D\" : -PI/5,\n \"radius_C\" : 3.2,\n \"angle_C\" : PI/5,\n }\n def construct(self):\n radius_tracker = ValueTracker(self.radius_C)\n angle_tracker = ValueTracker(self.angle_C)\n circle = Circle(radius = self.R, color = WHITE, stroke_width = 1)\n circle.shift(DOWN)\n dashed_circle = DashedVMobject(circle, num_dashes = 100, positive_space_ratio = 0.5)\n dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[\n Dot(circle.point_at_angle(angle % TAU), color = WHITE)\n for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D)\n ])\n dot_C.add_updater(\n lambda m: m.move_to(\n circle.get_center() + radius_tracker.get_value() * \\\n rotate_vector(RIGHT, angle_tracker.get_value())\n )\n )\n dot_labels = VGroup(*[\n DotLabel(text, dot, position = position, label_buff = 0.1)\n for text, dot, position in zip(\n [\"A\", \"B\", \"C\", \"D\"], dots, [DL, UL, UR, DR]\n )\n ])\n lines = VGroup(*[\n TwoDotsSegment(dot_1, dot_2)\n for dot_1, dot_2 in (\n [dot_B, dot_A], [dot_A, dot_C], [dot_A, dot_D],\n [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D],\n )\n ])\n length_labels = VGroup(*[LengthLabel(line) for line in lines])\n length_labels[0].switch_side()\n length_labels[2].switch_side()\n length_labels[1].set_offset(-0.4)\n length_labels[-2].set_offset(-0.4)\n\n def get_sums():\n AB, AC, AD, BC, BD, CD = [line.get_length() for line in lines]\n sum_lhs = AB * CD + AD * BC\n sum_rhs = AC * BD\n return sum_lhs, sum_rhs\n relation_eq = TexMobject(\n \"|AB| \\\\cdot |CD| + |AD| \\\\cdot |BC|\", \"=\", \"|AC| \\\\cdot |BD|\",\n background_stroke_width = 0,\n )\n relation_neq = TexMobject(\n \"|AB| \\\\cdot |CD| + |AD| \\\\cdot |BC|\", \">\", \"|AC| \\\\cdot |BD|\",\n background_stroke_width = 0,\n )\n relation_eq[1].set_color(GREEN)\n relation_neq[1].set_color(RED)\n relation_eq.to_edge(UP, buff = 1.2)\n for eq_mob, neq_mob in zip(relation_eq, relation_neq):\n neq_mob.move_to(eq_mob.get_center())\n lhs, eq_sign, rhs = relation_eq\n neq_sign = relation_neq[1]\n label_lhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True)\n label_rhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True)\n label_lhs.add_updater(lambda m: m.set_value(get_sums()[0]))\n label_rhs.add_updater(lambda m: m.set_value(get_sums()[1]))\n brace_lhs = Brace(lhs, UP, buff = 0.1)\n brace_rhs = Brace(rhs, UP, buff = 0.1)\n brace_lhs.put_at_tip(label_lhs)\n brace_rhs.put_at_tip(label_rhs)\n\n def get_indication_color(thres = 1e-2):\n return GREEN if is_close(radius_tracker.get_value(), self.R, thres = thres) else RED\n def get_indication_opacity(thres = 1e-2):\n return 0 if is_close(radius_tracker.get_value(), self.R, thres = thres) else 1\n figure_group = VGroup(dashed_circle, dots, lines, length_labels, dot_labels)\n figure_group.add_updater(lambda m: m.set_color(get_indication_color()))\n relation_group = VGroup(lhs, eq_sign, rhs, neq_sign, brace_lhs, brace_rhs, label_lhs, label_rhs)\n label_lhs.add_updater(lambda m: m.set_color(get_indication_color()))\n label_rhs.add_updater(lambda m: m.set_color(get_indication_color()))\n eq_sign.add_updater(lambda m: m.set_opacity(1 - get_indication_opacity()))\n neq_sign.add_updater(lambda m: m.set_opacity(get_indication_opacity()))\n self.add(figure_group)\n self.add(relation_group)\n\n deltas = [\n (0.5, -0.1), (0, -0.4), (-1, 0.3), (0, 0.4),\n (-1, 0), (0.3, -0.2), (0.7, -0.3),\n ]\n radius_tracker.save_state()\n angle_tracker.save_state()\n for d_radius, d_angle in deltas:\n self.play(\n ApplyMethod(radius_tracker.increment_value, d_radius),\n ApplyMethod(angle_tracker.increment_value, d_angle),\n run_time = 2,\n )\n self.wait()\n self.play(\n ApplyMethod(radius_tracker.restore),\n ApplyMethod(angle_tracker.restore),\n run_time = 2,\n )\n self.wait()\n\n\nclass PtolemyInversionFigure(Scene):\n CONFIG = {\n \"R\" : 3.8,\n \"r\" : 1.3,\n \"angle_A\" : PI,\n \"angle_B\" : PI/3,\n \"angle_C\" : -PI/9,\n \"angle_D\" : -PI*2/7,\n \"color_circle\" : YELLOW,\n \"color_ABD\" : BLUE,\n }\n def construct(self):\n circle_ABD = Circle(radius = self.r, color = self.color_ABD, stroke_width = 3)\n circle_ABD.shift(0.2*LEFT)\n dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[\n Dot(circle_ABD.point_at_angle(angle % TAU), color = WHITE)\n for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D)\n ])\n dot_A.set_color(self.color_circle)\n dot_C.shift(0.4*RIGHT)\n circle = Circle(radius = self.R, color = self.color_circle, stroke_width = 5)\n circle.move_to(dot_A.get_center())\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle.get_bottom(), UP)\n label_A, label_B, label_C, label_D = dot_labels = VGroup(*[\n DotLabel(text, dot, position = position, label_buff = 0.2)\n for text, dot, position in zip(\n [\"A\", \"B\", \"C\", \"D\"], dots, [DL, UP, DOWN, DOWN]\n )\n ])\n label_A.set_color(self.color_circle)\n dot_Bi, dot_Ci, dot_Di = inv_dots = VGroup(*[\n InversedDot(dot, circle, is_hollow = False, color = WHITE)\n for dot in (dot_B, dot_C, dot_D)\n ])\n label_Bi, label_Ci, label_Di = inv_dot_labels = VGroup(*[\n DotLabel(text, dot, position = RIGHT, label_buff = 0.2)\n for text, dot in zip([\"B'\", \"C'\", \"D'\"], [dot_Bi, dot_Ci, dot_Di])\n ])\n lines = VGroup(*[\n TwoDotsSegment(dot_1, dot_2, stroke_width = 1)\n for dot_1, dot_2 in (\n [dot_A, dot_B], [dot_A, dot_C], [dot_A, dot_D],\n [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D],\n [dot_A, dot_Bi], [dot_A, dot_Ci], [dot_A, dot_Di],\n [dot_Bi, dot_Ci], [dot_Bi, dot_Di], [dot_Ci, dot_Di],\n )\n ])\n inv_circle_ABD = InversedVMobject(circle_ABD, circle, use_dashed_vmob = False)\n inv_circle_ABD.add_updater(lambda m: m.set_color(self.color_ABD))\n inv_circle_ABD.add_updater(lambda m: m.set_stroke(width = 2))\n self.add(circle, remark_circle, circle_ABD, inv_circle_ABD)\n self.add(dots, dot_labels, inv_dots, inv_dot_labels, lines)\n self.add()\n self.wait()\n\n\n#####\n## Inversion Advanced P1 Scenes\nclass KissingCirclesPuzzle(Scene):\n def construct(self):\n self.show_figure()\n self.show_question()\n\n def show_figure(self):\n type_text_1 = TextMobject(\"外切-外切-外切\")\n type_text_2 = TextMobject(\"内切-内切-外切\")\n type_text_1.move_to(LEFT_SIDE/2)\n type_text_2.move_to(RIGHT_SIDE/2)\n type_text_1.to_edge(DOWN)\n type_text_2.to_edge(DOWN)\n dot_l1, dot_l2, dot_l3 = dots_l = VGroup(*[\n VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE)\n for coords in [(-3.9, 1.5), (-4.9, 0.0), (-2.8, -1.0)]\n ])\n dot_r1, dot_r2, dot_r3 = dots_r = VGroup(*[\n VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE)\n for coords in [(4.6, 0.3), (3.9, 0.6), (3.5, 1.6)]\n ])\n dfc_l = DescartesFourCircles(*dots_l, show_new_circles = False)\n dfc_r = DescartesFourCircles(*dots_r, show_new_circles = False, outer_circle_index = 2)\n for dfc in [dfc_l, dfc_r]:\n for mob in dfc.get_orig_circles():\n mob.set_stroke(width = 2, color = BLUE)\n self.add(type_text_1, type_text_2)\n self.add(dfc_l, dfc_r)\n self.dfc_l = dfc_l\n self.dfc_r = dfc_r\n self.dots_l = dots_l\n self.dots_r = dots_r\n\n def show_question(self):\n question = TextMobject(\"能否添加第四个圆,使之与其他三个圆都相切?\")\n question.to_edge(UP, buff = 0.2)\n self.add(question)\n self.wait()\n\n \nclass KissingCirclesSimplified(Scene):\n def construct(self):\n line1 = ExtendedLine(UL, UR)\n line2 = ExtendedLine(DL, DR)\n center_circle = Circle(radius = 1)\n figure_group = VGroup(line1, line2, center_circle)\n for mob in figure_group:\n mob.set_stroke(width = 2, color = BLUE)\n question = TextMobject(\"能否添加第四个“圆”,使之与其他三个“圆”都相切?\")\n question.next_to(figure_group, UP, buff = 0.5)\n group = VGroup(question, figure_group)\n group.move_to(ORIGIN)\n self.add(group)\n self.wait()\n\n\nclass KissingCirclesSimplifiedAnswer(Scene):\n def construct(self):\n line1 = ExtendedLine(UL, UR, stroke_width = 2, color = BLUE)\n line2 = ExtendedLine(DL, DR, stroke_width = 2, color = BLUE)\n center_circle = Circle(radius = 1, stroke_width = 2, color = BLUE)\n new_circles = VGroup(*[\n Circle(radius = 1, color = color, fill_opacity = 0.1, stroke_width = 5) \\\n .next_to(center_circle, direction, buff = 0)\n for direction, color in zip([LEFT, RIGHT], [RED, ORANGE])\n ])\n numbers = VGroup(*[\n TexMobject(f\"{num}\", color = circle.get_color()).move_to(circle.get_center())\n for num, circle in zip([\"1\", \"2\"], new_circles)\n ])\n group = VGroup(line1, line2, center_circle, new_circles, numbers)\n group.move_to(ORIGIN)\n self.add(group)\n self.wait()\n\n\nclass KissingCirclesSimplifiedExplanation(Scene):\n CONFIG = {\n \"dashed_vmob_config\" : {\n \"num_dashes\" : 30,\n \"positive_space_ratio\" : 0.6,\n },\n \"line_colors\" : [GREEN, BLUE],\n \"center_color\" : MAROON_B,\n \"circle_colors\" : [RED, ORANGE],\n }\n def construct(self):\n self.add_backgrounds()\n self.show_process()\n\n def add_backgrounds(self):\n N = 5\n line1 = Line(UP + N*LEFT, UP + N*RIGHT, stroke_width = 2, color = self.line_colors[0])\n line2 = Line(DOWN + N*LEFT, DOWN + N*RIGHT, stroke_width = 2, color = self.line_colors[1])\n center_circle = FineCircle(radius = 1, stroke_width = 2, color = self.center_color)\n new_circle1 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[0])\n new_circle1.next_to(center_circle, LEFT, buff = 0)\n new_circle2 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[1])\n new_circle2.next_to(center_circle, RIGHT, buff = 0)\n inv_old_group = VGroup(line1, line2, center_circle)\n inv_new_group = VGroup(new_circle1, new_circle2)\n inv_group = VGroup(inv_old_group, inv_new_group)\n inv_group.rotate(-PI*2/5)\n inv_group.shift(3*RIGHT)\n circle = FineCircle(radius = 3.5, color = YELLOW)\n circle.shift(2*LEFT)\n circle_center = Dot(circle.get_center(), color = YELLOW)\n remark_circle = TextMobject(\"反演圆\", color = YELLOW)\n remark_circle.next_to(circle.get_bottom(), UP)\n remark_center = VGroup(*[\n Arrow(DL, UR, color = YELLOW, buff = 0).scale(0.3),\n TextMobject(\"反演中心\", color = YELLOW).scale(0.8),\n ])\n remark_center.arrange_submobjects(DL, buff = 0)\n remark_center.next_to(circle_center, DL, buff = 0.1)\n orig_old_group = VGroup(*[\n InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True)\n for mob in inv_old_group\n ])\n orig_new_group = VGroup(*[\n InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True)\n for mob in inv_new_group\n ])\n for mob in orig_old_group:\n mob.clear_updaters()\n mob.set_stroke(width = 2)\n for mob in orig_new_group:\n mob.clear_updaters()\n mob.set_stroke(width = 5)\n mob.set_fill(opacity = 0.1)\n self.add(orig_old_group)\n self.add(circle, circle_center, remark_circle, remark_center)\n self.circle = circle\n self.inv_old_group = inv_old_group\n self.inv_new_group = inv_new_group\n self.orig_old_group = orig_old_group\n self.orig_new_group = orig_new_group\n \n def show_process(self):\n dashed_inv_old_group = VGroup(*[\n DashedVMobject(mob, **self.dashed_vmob_config)\n for mob in self.inv_old_group\n ])\n dashed_inv_new_group = VGroup(*[\n DashedVMobject(mob, **self.dashed_vmob_config)\n for mob in self.inv_new_group\n ])\n self.play(ShowCreation(dashed_inv_old_group, lag_ratio = 0.05), run_time = 3)\n self.wait()\n dashed_copys = VGroup(*[dashed_inv_old_group[-1].deepcopy() for k in range(2)])\n dashed_copys.generate_target()\n for mob_copy, mob_template in zip(dashed_copys.target, dashed_inv_new_group):\n mob_copy.match_style(mob_template)\n mob_copy.move_to(mob_template.get_center())\n self.play(MoveToTarget(dashed_copys), run_time = 3)\n self.remove(dashed_copys)\n self.add(dashed_inv_new_group)\n self.wait()\n self.play(DrawBorderThenFill(self.orig_new_group), run_time = 3)\n self.wait(2)\n self.play(\n FadeOut(dashed_inv_new_group),\n FadeOut(dashed_inv_old_group),\n FadeOut(self.orig_new_group),\n )\n self.wait()\n\n\nclass DifferentTangentTypesWithSameConclusion(KissingCirclesPuzzle):\n CONFIG = {\n \"random_seed\" : 570,\n \"num_of_nudges\" : 5, \n \"max_step\" : 0.5,\n \"color_1\" : ORANGE,\n \"color_2\" : RED,\n }\n def construct(self):\n super().show_figure()\n self.dots_l.save_state()\n self.dots_r.save_state()\n for dfc in [self.dfc_l, self.dfc_r]:\n dfc.add_new_circles()\n dfc.get_orig_circles().set_stroke(width = 2)\n c4_1, c4_2 = dfc.get_new_circles()\n c4_1.set_color(self.color_1)\n c4_2.set_color(self.color_2)\n self.add(self.dfc_l, self.dfc_r)\n for k in range(self.num_of_nudges):\n for dot in it.chain(self.dots_l, self.dots_r):\n dot.generate_target()\n dot.target.shift(get_random_vector(self.max_step))\n anims = AnimationGroup(*[\n MoveToTarget(dot, path_arc = PI/3., run_time = 1.5)\n for dot in it.chain(self.dots_l, self.dots_r)\n ], run_time = 2)\n self.play(anims)\n self.wait()\n self.play(self.dots_l.restore, self.dots_r.restore, run_time = 1.5)\n\n\nclass LineToCircleInversionRevisited(LineToCircleInversion):\n def construct(self):\n super().construct()\n self.remove_conclusions()\n self.add_explanation()\n\n def remove_conclusions(self):\n self.remove(self.bg_rect)\n self.remove(self.conclusions)\n\n def add_explanation(self):\n radius = Line(\n self.circle_O.get_left(), self.circle_O.get_center(),\n color = self.color_circle, stroke_width = 1,\n )\n radius_text = TexMobject(\"R\", color = self.color_circle)\n radius_text.next_to(radius, UP, buff = 0.1)\n radius_group = VGroup(radius, radius_text)\n radius_group.rotate(-PI/12, about_point = self.circle_O.get_center())\n remark_length = TexMobject(\"|OA| = d\", \"\\\\Downarrow\", \"|OA'| = \\dfrac{R^2}{d}\")\n remark_length.arrange_submobjects(DOWN)\n remark_length.scale(1.2)\n remark_length[0].set_color(self.color_orig)\n remark_length[-1].set_color(self.color_inv)\n remark_length.to_edge(RIGHT)\n self.add(radius_group, remark_length)\n self.wait()\n\n\nclass CircleToCircleInversionRevisited(CircleToCircleInversionProof):\n def construct(self):\n super().add_backgrounds()\n super().show_left_and_right_points()\n super().show_random_point()\n super().show_similar_triangles()\n self.arrange_elements()\n self.add_explanation()\n\n def arrange_elements(self):\n self.angle_tracker.set_value(PI/3)\n self.remove(self.remark_O)\n self.remove(self.ai_OAP, self.ai_OBP, self.ai_OPiAi, self.ai_OPiBi)\n self.add(self.inv_circle)\n self.add(self.dots_P, self.labels_P)\n self.add(self.dots_AB, self.labels_AB)\n self.add(self.aux_lines, self.rtais)\n dot_I = Dot(self.inv_circle.get_center())\n label_I = DotLabel(\"I\", dot_I, position = DOWN, label_buff = 0.15).scale(0.8)\n for mob in (dot_I, label_I):\n mob.set_sheen_direction(RIGHT)\n mob.set_color([self.color_B, self.color_A])\n remark_I = TextMobject(\"反形的圆心(并非$C$的反点!)\")\n remark_I.scale(0.5)\n remark_I.next_to(label_I, DOWN, buff = 0.1)\n self.add(dot_I, label_I, remark_I)\n\n def add_explanation(self):\n for circle, color, text, angle in zip(\n [self.circle_O, self.circle_C], [self.color_O, MAROON_B],\n [\"R\", \"r\"], [-PI/12, PI/3]\n ):\n radius = Line(\n circle.get_left(), circle.get_center(),\n color = color, stroke_width = 1,\n )\n radius_text = TexMobject(text, color = color)\n radius_text.next_to(radius, UP, buff = 0.1)\n radius_group = VGroup(radius, radius_text)\n radius_group.rotate(angle, about_point = circle.get_center())\n self.add(radius_group)\n remark_length_A = TexMobject(\"|OA| = d-r\", \"\\\\Rightarrow\", \"|OA'| = \\dfrac{R^2}{d-r}\")\n remark_length_B = TexMobject(\"|OB| = d+r\", \"\\\\Rightarrow\", \"|OB'| = \\dfrac{R^2}{d+r}\")\n remark_length_A[0].set_color(self.color_A)\n remark_length_A[-1].set_color(self.color_A)\n remark_length_B[0].set_color(self.color_B)\n remark_length_B[-1].set_color(self.color_B)\n length_group = VGroup(remark_length_A, remark_length_B)\n length_group.arrange_submobjects(DOWN, buff = 0.4)\n brace = Brace(length_group, RIGHT)\n arrow = TexMobject(\"\\\\Rightarrow\")\n remarks = VGroup(\n TexMobject(\"|A'B'| = \\\\dfrac{2 R^2 r}{|d^2-r^2|}\"),\n TexMobject(\"|OI| = \\\\dfrac{R^2 d}{|d^2-r^2|}\")\n )\n remarks.arrange_submobjects(DOWN, aligned_edge = LEFT)\n remarks.set_color(MAROON_B)\n result_group = VGroup(brace, arrow, remarks)\n result_group.arrange_submobjects(RIGHT)\n result_group.next_to(length_group, RIGHT)\n remark_group = VGroup(length_group, result_group)\n remark_group.center().to_edge(DOWN, buff = 0.2)\n bg_rect = BackgroundRectangle(remark_group, fill_opacity = 0.9)\n self.add(bg_rect, remark_group)\n self.wait()\n\n\nclass DescartesTheoremExamples(Scene):\n CONFIG = {\n \"circle_colors\" : [MAROON_B, RED, GREEN, BLUE],\n \"curvs_outer\" : [3, 6, 7, 34],\n \"curvs_inner\" : [10, 15, 19, -6],\n }\n def setup(self):\n self.text_color_map = dict(\n zip([\"{k_1}\", \"{k_2}\", \"{k_3}\", \"{k_4}\"], self.circle_colors)\n )\n\n def construct(self):\n self.add_title()\n self.add_outer_dfc()\n self.add_inner_dfc()\n\n def add_title(self):\n title = TexMobject(\n \"\\\\left(\", \"{k_1}\", \"+\", \"{k_2}\", \"+\", \"{k_3}\", \"+\", \"{k_4}\", \"\\\\right) ^2\",\n \"= 2 \\\\left(\", \"{k_1}\",\"^2 +\",\"{k_2}\",\"^2 +\",\"{k_3}\",\"^2 +\",\"{k_4}\",\"^2\", \"\\\\right)\"\n )\n title.set_color_by_tex_to_color_map(self.text_color_map)\n title.scale(1.2)\n title.to_edge(UP, buff = 0.2)\n self.add(title)\n\n def add_outer_dfc(self):\n r1, r2, r3, r4 = [1./curv for curv in self.curvs_outer]\n p1, p2, p3 = [\n VectorizedPoint(center)\n for center in calc_centers_by_radii(r1, r2, r3, init_angle = PI*2/3)\n ]\n outer_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False)\n c1, c2, c3 = outer_dfc.get_orig_circles()\n c4 = outer_dfc.get_new_circles()[0]\n outer_circles = VGroup(c1, c2, c3, c4)\n outer_circles.clear_updaters()\n outer_circles.set_height(5.5)\n outer_circles.to_corner(DL)\n texts = VGroup(*[\n TexMobject(f\"k_{num}\", \"=\", f\"{curv}\") \\\n .scale(0.8) \\\n .move_to(circle.get_center())\n for num, curv, circle in zip(range(1, 5), self.curvs_outer, outer_circles)\n ])\n for circle, text, color in zip(outer_circles, texts, self.circle_colors):\n circle.set_color(color)\n text.set_color(color)\n texts[-1].shift(2.5*RIGHT+1.2*UP)\n arrow = Arrow(\n texts[-1].get_bottom(), outer_circles[-1].get_right(),\n path_arc = -PI*2/3, buff = 0.1,\n ).set_color(self.circle_colors[-1])\n outer_group = VGroup(outer_circles, texts, arrow)\n self.add(outer_group)\n\n def add_inner_dfc(self):\n r1, r2, r3, r4 = [1./curv for curv in self.curvs_inner]\n p1, p2, p3 = [\n VectorizedPoint(center)\n for center in calc_centers_by_radii(r1, r2, r3, init_angle = -PI/7)\n ]\n inner_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False)\n c1, c2, c3 = inner_dfc.get_orig_circles()\n c4 = inner_dfc.get_new_circles()[1]\n inner_circles = VGroup(c1, c2, c3, c4)\n inner_circles.clear_updaters()\n inner_circles.set_height(5.5)\n inner_circles.to_corner(DR)\n inner_texts = VGroup(*[\n TexMobject(f\"k_{num}\", \"=\", f\"{curv}\") \\\n .scale(0.8) \\\n .move_to(circle.get_center())\n for num, curv, circle in zip(range(1, 5), self.curvs_inner, inner_circles)\n ])\n for circle, text, color in zip(inner_circles, inner_texts, self.circle_colors):\n circle.set_color(color)\n text.set_color(color)\n inner_texts[-1].shift(2.8*LEFT+2.7*UP)\n inner_arrow = Arrow(\n inner_texts[-1].get_critical_point(DOWN),\n inner_texts[-1].get_critical_point(DOWN)+0.7*DR,\n buff = 0.1,\n ).set_color(self.circle_colors[-1])\n inner_group = VGroup(inner_circles, inner_texts, inner_arrow)\n self.add(inner_group)\n self.wait()\n self.inner_circles = inner_circles\n self.inner_texts = inner_texts\n self.inner_arrow = inner_arrow\n\n\nclass DFCInversionProofP1(DescartesTheoremExamples):\n CONFIG = {\n \"remark_scale_text\" : \"示意图,图像并非真实比例\",\n \"orig_label_texts\" : [\"C_1\", \"C_2\", \"C_3\", \"C_4\"],\n \"inv_label_texts\" : [\"C_1'\", \"C_2'\", \"C_3'\", \"C_4'\"],\n }\n def construct(self):\n super().add_inner_dfc()\n self.arrange_elements()\n self.add_labels()\n self.add_inversion_center()\n self.add_mapsto_symbol()\n self.add_not_to_scale_remark()\n self.wait()\n\n def arrange_elements(self):\n self.remove(self.inner_texts, self.inner_arrow)\n self.inner_circles.center().shift(4*UP)\n normal_form = FourCirclesNormalForm()\n normal_form.shift(4*DOWN)\n self.add(normal_form)\n self.normal_form = normal_form\n\n def add_labels(self):\n orig_labels = VGroup()\n for n, (circle, text) in enumerate(zip(self.inner_circles, self.orig_label_texts)):\n label = TexMobject(text).scale(1.2)\n label.set_color(circle.get_color())\n label.move_to(circle.get_center())\n orig_labels.add(label)\n inv_labels = VGroup()\n for n, (circle, text) in enumerate(zip(self.normal_form, self.inv_label_texts)):\n label = TexMobject(text).scale(1.2)\n label.set_color(circle.get_color())\n label.move_to(circle.get_center())\n inv_labels.add(label)\n c1, c2, c3, c4 = self.inner_circles\n l1, l2, l3, l4 = orig_labels\n c1i, c2i, c3i, c4i = self.normal_form\n l1i, l2i, l3i, l4i = inv_labels\n l4.next_to(c4.get_bottom(), UP, buff = 0.3)\n l3i.next_to(c3i, DOWN).to_edge(RIGHT)\n l4i.next_to(c4i, UP).to_edge(RIGHT)\n self.add(orig_labels, inv_labels)\n self.orig_labels = orig_labels\n self.inv_labels = inv_labels\n\n def add_inversion_center(self):\n c1, c2, c3, c4 = self.inner_circles\n inv_center = get_tangent_point(c3, c4)\n dot_O = Dot(inv_center, color = YELLOW)\n label_O = TexMobject(\"O\", color = YELLOW).next_to(dot_O, UP)\n remark_O = TextMobject(\"反演中心\", color = YELLOW)\n remark_O.next_to(dot_O, RIGHT, buff = 1.5)\n arrow_O = Arrow(remark_O.get_left(), dot_O.get_right(), color = YELLOW, buff = 0.2)\n orig_center_group = VGroup(dot_O, label_O, remark_O, arrow_O)\n inv_dot_O = VectorizedPoint()\n inv_dot_O.next_to(self.normal_form[-1], UP, buff = 1.4)\n inv_dot_O.shift(2*RIGHT)\n inv_center_group = orig_center_group.deepcopy()\n inv_center_group.shift(inv_dot_O.get_center() - dot_O.get_center())\n self.add(orig_center_group, inv_center_group)\n self.orig_center_group = orig_center_group\n self.inv_center_group = inv_center_group\n\n def add_mapsto_symbol(self):\n mapsto = TexMobject(\"\\\\mapsto\")\n mapsto.rotate(-PI/2)\n mapsto.scale(2.5)\n mapsto.next_to(self.inner_circles, DOWN)\n remark_mapsto = TextMobject(\"反演变换\")\n remark_mapsto.next_to(mapsto, LEFT)\n self.add(mapsto, remark_mapsto)\n\n def add_not_to_scale_remark(self):\n remark_scale = TextMobject(\"(\" + self.remark_scale_text + \")\")\n remark_scale.scale(0.75)\n remark_scale.next_to(6.5*DL, RIGHT, buff = 0)\n self.add(remark_scale)\n\n\nclass DFCInversionProofP2(DFCInversionProofP1):\n CONFIG = {\n \"remark_scale_text\" : \"示意图,反演圆未标出,且图像并非真实比例\",\n \"inv_label_texts\" : [\"C_1'\", \"C_2'\", \"C_3':y=-1\", \"C_4':y=1\"],\n \"inv_center_coord_text\" : \"(x_0, y_0) \\\\, (y_0>1)\",\n \"circle_center_coord_texts\" : [\"(-1,0)\", \"(1,0)\"],\n }\n def construct(self):\n super().construct()\n self.change_center_remarks()\n self.add_coord_system()\n self.change_inv_labels()\n self.wait()\n\n def change_center_remarks(self):\n for center_group in (self.orig_center_group, self.inv_center_group):\n dot, label, remark, arrow = center_group\n self.remove(remark, arrow)\n if center_group is self.inv_center_group:\n coord = TexMobject(self.inv_center_coord_text)\n coord.next_to(dot, RIGHT)\n coord.set_color(dot.get_color())\n self.add(coord)\n\n def add_coord_system(self):\n c1, c2, c3, c4 = self.normal_form\n center_point = (c1.get_center() + c2.get_center()) / 2\n unit_size = c1.get_height()/2\n coord_system = Axes(\n center_point = center_point,\n number_line_config = {\"unit_size\" : unit_size},\n y_min = -1.8, y_max = 2.8,\n )\n self.add(coord_system)\n self.coord_system = coord_system\n\n def change_inv_labels(self):\n l1i, l2i, l3i, l4i = self.inv_labels\n for label, x_coord, coord_text in zip([l1i, l2i], [-1, 1], self.circle_center_coord_texts):\n center = self.coord_system.c2p(x_coord, 0)\n label.next_to(center, UP)\n dot_i = Dot(center, radius = 0.1).set_color(label.get_color())\n coord_i = TexMobject(coord_text).set_color(label.get_color()).next_to(center, DOWN)\n self.add(dot_i, coord_i)\n\n\n#####\n## Inversion Advanced P2 Scenes\nclass ApollonianGasketConstruction(ApollonianGasketScene):\n CONFIG = {\n \"max_iter\" : 8,\n \"curvatures\" : [2, 2, 3],\n \"init_angle\" : 0,\n \"curv_thres\" : 30000,\n \"ag_config\": {\n \"agc_config\" : {\n \"radius_thres\" : 1e-3,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n },\n \"color_curr\" : YELLOW,\n \"wait_time\" : 2,\n }\n def construct(self):\n r1, r2, r3 = [1./curv for curv in self.curvatures]\n p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle)\n agc1 = AGCircle(p1, r1, parents = None, **self.ag_config[\"agc_config\"])\n agc2 = AGCircle(p2, r2, parents = None, **self.ag_config[\"agc_config\"])\n agc3 = AGCircle(p3, r3, parents = None, **self.ag_config[\"agc_config\"])\n remark = TextMobject(\"(圆内数字为该圆的曲率)\")\n remark.scale(0.75).to_corner(DL)\n self.add(remark)\n for k in range(self.max_iter):\n agcs_copy = [agc.deepcopy() for agc in (agc1, agc2, agc3)]\n ag = ApollonianGasket(\n *agcs_copy, num_iter = k,\n curv_thres = self.curv_thres, **self.ag_config\n )\n iter_num = VGroup(\n TextMobject(\"迭代次数:\"), TexMobject(f\"{k}\")\n ).arrange_submobjects(RIGHT).scale(1.5)\n iter_num.to_edge(LEFT, buff = 1)\n ag.scale(3.8)\n ag.shift(np.array([0, 3.8, 0]) - ag.get_top() + 3*RIGHT)\n VGroup(*ag.agc_list[-1]).set_color(self.color_curr)\n self.add(ag, iter_num)\n self.wait(self.wait_time)\n if k != self.max_iter-1:\n self.remove(ag, iter_num)\n \n\nclass ApollonianGasketExample1(Scene):\n CONFIG = {\n \"max_iter\" : 20,\n \"curvatures\" : [3, 6, 7],\n \"curvature_texts\" : [-2, 3, 6, 7],\n \"init_angle\" : 0,\n \"curv_thres\" : 4000,\n \"ag_config\": {\n \"agc_config\" : {\n \"radius_thres\" : 1e-3,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n },\n \"ag_scaling_factor\" : 5.2,\n }\n def construct(self):\n r1, r2, r3 = [1./curv for curv in self.curvatures]\n p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle)\n agc1 = AGCircle(p1, r1, parents = None, **self.ag_config[\"agc_config\"])\n agc2 = AGCircle(p2, r2, parents = None, **self.ag_config[\"agc_config\"])\n agc3 = AGCircle(p3, r3, parents = None, **self.ag_config[\"agc_config\"])\n ag_seed = ApollonianGasket(\n *[agc.deepcopy() for agc in (agc1, agc2, agc3)],\n num_iter = 0, curv_thres = self.curv_thres, **self.ag_config\n )\n ag_result = ApollonianGasket(\n *[agc.deepcopy() for agc in (agc1, agc2, agc3)],\n num_iter = self.max_iter, curv_thres = self.curv_thres, **self.ag_config\n )\n ag_seed_center = ag_seed[0][0].get_right()\n ag_result_center = ag_result[0][0].get_right()\n arrow = Arrow(LEFT, RIGHT)\n figure_group = VGroup(ag_seed, ag_result, arrow)\n for ag, center, direction in zip(\n [ag_seed, ag_result], [ag_seed_center, ag_result_center], [4*LEFT, 4*RIGHT]):\n ag.scale(self.ag_scaling_factor)\n ag.shift(direction - center)\n figure_group.shift(DOWN)\n k1, k2, k3, k4 = list(map(str, self.curvature_texts))\n title = TexMobject(\n f\"({k1}+{k2}+{k3}+{k4})^2 = 2\\\\left[({k1})^2+{k2}^2+{k3}^2+{k4}^2 \\\\right]\"\n )\n title.set_width(13)\n title.set_color(YELLOW)\n title.to_edge(UP)\n self.add(figure_group, title)\n self.wait()\n\n\nclass ApollonianGasketExample2(ApollonianGasketExample1):\n CONFIG = {\n \"max_iter\" : 20,\n \"curvatures\" : [5, 8, 12],\n \"curvature_texts\" : [-3, 5, 8, 12],\n \"curv_thres\" : 5000,\n \"ag_config\": {\n \"agc_config\" : {\n \"radius_thres\" : 5e-4,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n },\n \"ag_scaling_factor\" : 8,\n }\n\n\nclass LoxodromicSpiralInTangentCircles(Scene):\n CONFIG = {\n \"max_iter\" : 20,\n \"agc_config\" : {\n \"radius_thres\" : 1,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n \"curve_config\" : {\n \"color\" : YELLOW,\n \"stroke_width\" : 2,\n },\n \"alpha\" : 0.6,\n \"dashed_line_config\" : {\n \"color\" : GREY,\n \"stroke_width\" : 0.5,\n \"num_dashes\" : 200,\n \"positive_space_ratio\" : 0.6,\n }\n }\n def construct(self):\n self.generate_circles()\n self.generate_curves()\n self.generate_labels()\n self.generate_lines()\n self.add_elements()\n self.zooming_in()\n\n def generate_circles(self):\n agcm2 = AGCircle(2/3.*UP, 1/3., **self.agc_config)\n agcm1 = AGCircle(RIGHT/2, 1/2., **self.agc_config)\n agczr = AGCircle(ORIGIN, -1, **self.agc_config)\n agcp1 = AGCircle(LEFT/2, 1/2., **self.agc_config)\n agcp2 = AGCircle(2/3.*DOWN, 1/3., **self.agc_config)\n agc_list = [agcm2, agcm1, agczr, agcp1, agcp2]\n for n in range(self.max_iter):\n A, B, C, known_agc = agc_list[:4]\n agc_m_k, agc_m_c = calc_new_agc_info(A, B, C, known_agc = known_agc)\n agc_m = AGCircle(agc_m_c, 1./agc_m_k, parents = (A, B, C), **self.agc_config)\n known_agc, C, B, A = agc_list[-4:]\n agc_p_k, agc_p_c = calc_new_agc_info(C, B, A, known_agc = known_agc)\n agc_p = AGCircle(agc_p_c, 1./agc_p_k, parents = (C, B, A), **self.agc_config)\n agc_list.insert(0, agc_m)\n agc_list.append(agc_p)\n agc_group = VGroup(*agc_list)\n agc_group.set_height(7.8)\n self.agc_list = agc_list\n self.agc_group = agc_group\n\n def generate_curves(self):\n agc_ps = self.agc_list[-self.max_iter-4:]\n agc_ps_points = []\n loxo_curve_p_solid = VMobject(**self.curve_config)\n for k in range(len(agc_ps)-2):\n if k != 0:\n c1, c2, c3 = agc_ps[k], agc_ps[k+1], agc_ps[k+2]\n pt1 = get_tangent_point(c1, c2)\n pt2 = get_tangent_point(c2, c3)\n p = c2.get_center()\n if k != 1:\n agc_ps_points.extend(\n [pt1, p*(1-self.alpha)+pt1*self.alpha, p*(1-self.alpha)+pt2*self.alpha, pt2]\n )\n else:\n agc_ps_points.extend(\n [pt1, p*0.7+pt1*0.3, p*0.6+pt2*0.4, pt2]\n )\n else:\n c1, c2 = agc_ps[1], agc_ps[2]\n pt = get_tangent_point(c1, c2)\n agc_ps_points.extend([8*LEFT, 7*LEFT, 6*LEFT, pt])\n loxo_curve_p_solid.append_points(agc_ps_points)\n loxo_curve_m_solid = loxo_curve_p_solid.deepcopy()\n loxo_curve_m_solid.rotate(PI, about_point = self.agc_group.get_center())\n self.loxo_curve_p_solid = loxo_curve_p_solid\n self.loxo_curve_m_solid = loxo_curve_m_solid\n \n def generate_labels(self):\n labels = VGroup(*[\n TexMobject(\"C_{%d}\" % num, background_stroke_width = 0)\n for num in range(-self.max_iter-2, self.max_iter+3)\n ])\n for label, circle in zip(labels, self.agc_group):\n label.set_height(circle.get_height()*0.15)\n label.move_to(circle.get_center())\n label_c0 = labels[self.max_iter+2]\n label_c0.set_height(0.8)\n label_c0.next_to(self.agc_group.get_critical_point(UL), DR, buff = 0.1)\n self.labels = labels\n\n def generate_lines(self):\n agc_ps = self.agc_list[-self.max_iter-2:]\n line_p_solid = VMobject(**self.dashed_line_config)\n line_p_solid_corners = [8*LEFT]\n for circle in agc_ps:\n line_p_solid_corners.append(circle.get_center())\n line_p_solid.set_points_as_corners(line_p_solid_corners)\n line_m_solid = line_p_solid.deepcopy()\n line_m_solid.rotate(PI, about_point = self.agc_group.get_center())\n self.line_p_solid = line_p_solid\n self.line_m_solid = line_m_solid\n\n def add_elements(self):\n figure = VGroup(\n self.agc_group, self.loxo_curve_p_solid, self.loxo_curve_m_solid,\n self.line_p_solid, self.line_m_solid, self.labels,\n )\n self.add(figure)\n self.figure = figure\n\n def zooming_in(self):\n self.figure.save_state()\n self.wait(0.5)\n self.play(\n ApplyMethod(self.figure.shift, -self.agc_group[-1].get_center()),\n run_time = 2,\n )\n self.wait()\n for k in range(10):\n self.play(\n ApplyMethod(self.figure.scale, 2.5, {\"about_point\" : self.agc_group[-1].get_center()}),\n run_time = 2,\n )\n self.wait()\n self.play(self.figure.restore, run_time = 15)\n self.wait(2)\n\n\nclass ShowFordCircles(ZoomInOnFordCircles):\n CONFIG = {\n \"q_max\" : 30,\n }\n def construct(self):\n self.setup_axes()\n self.setup_circles_and_labels()\n self.add_remarks()\n self.first_zoom_in()\n self.wait()\n\n def first_zoom_in(self):\n self.zoom_in_on(1/2., 6)\n\n def add_remarks(self):\n nl_text = TextMobject(\"数轴\")\n nl_arrow = Arrow(ORIGIN, UP).match_height(nl_text)\n nl_remark = VGroup(nl_arrow, nl_text)\n nl_remark.scale(0.8)\n nl_remark.set_color(LIGHT_GREY)\n nl_remark.arrange_submobjects(RIGHT, buff = 0.1)\n nl_remark.next_to(self.axes.coords_to_point(0, 0), DOWN, buff = 0.1)\n nl_remark.to_edge(LEFT, buff = 0.15)\n frac_remark = TextMobject(\"圆内分数为圆心横坐标\")\n frac_remark.scale(0.6)\n frac_remark.to_corner(DL, buff = 0.15)\n self.add(nl_remark, frac_remark)\n\n\nclass ShowFordCirclesDetails(ShowFordCircles):\n CONFIG = {\n \"q_max\" : 100,\n }\n def construct(self):\n super().construct()\n self.further_zoom_in()\n\n def setup_circles_and_labels(self):\n circles = VGroup()\n labels = VGroup()\n for q in range(1, self.q_max+1):\n for p in get_coprime_numers_by_denom(q):\n if (q <= 40) or (0.6 <= p/q <= 0.8):\n circle = self.generate_circle_by_fraction(p, q)\n circle.add_updater(\n lambda m: m.set_stroke(width = get_stroke_width_by_height(m.get_height()))\n )\n label = AssembledFraction(p, q)\n label.set_height(circle.get_height() * self.label_height_factor)\n label.move_to(circle.get_center())\n circles.add(circle)\n labels.add(label)\n self.add(circles, labels)\n self.circles = circles\n self.labels = labels\n\n def further_zoom_in(self):\n self.acl = VGroup(self.axes, self.circles, self.labels)\n self.acl.save_state()\n self.wait(0.5)\n self.play_zooming_animation(1/np.sqrt(2), 9, run_time = 5)\n self.wait()\n self.play_zooming_animation(0.73, 5, run_time = 4)\n self.wait()\n self.play_zooming_animation(0.74, 5, run_time = 4)\n self.wait()\n self.play(self.acl.restore, run_time = 5)\n self.wait(2)\n\n\nclass ProveFordCirclesPropertiesP1(Scene):\n CONFIG = {\n \"c1_frac\" : [2, 3],\n \"c2_frac\" : [3, 4],\n \"c3_frac\" : [5, 7],\n \"circle_config\" : {\"stroke_color\" : BLUE, \"stroke_width\" : 2,},\n \"line_config\" : {\"stroke_color\" : GREY, \"stroke_width\" : 2,},\n \"aux_line_config\" : {\"stroke_color\" : GREY, \"stroke_width\" : 0.8,},\n \"polygon_config\" : {\"fill_color\" : GREY, \"fill_opacity\" : 0.4, \"stroke_width\" : 0,},\n }\n def setup(self):\n a, b = self.c1_frac\n c, d = self.c2_frac\n p, q = self.c3_frac\n r1 = 1/(2*b**2)\n r2 = 1/(2*d**2)\n r3 = 1/(2*q**2)\n c1_center = a/b*RIGHT + r1*UP\n c2_center = c/d*RIGHT + r2*UP\n c3_center = p/q*RIGHT + r3*UP\n c1 = Circle(arc_center = c1_center, radius = r1, **self.circle_config)\n c2 = Circle(arc_center = c2_center, radius = r2, **self.circle_config)\n c3 = Circle(arc_center = c3_center, radius = r3, **self.circle_config)\n c1_dot = SmallDot(color = GREY)\n c1_dot.add_updater(lambda m: m.move_to(c1.get_center()))\n c2_dot = SmallDot(color = GREY)\n c2_dot.add_updater(lambda m: m.move_to(c2.get_center()))\n c3_dot = SmallDot(color = GREY)\n c3_dot.add_updater(lambda m: m.move_to(c3.get_center()))\n line = Line(\n 2*c1.get_bottom()-c2.get_bottom(),\n 2*c2.get_bottom()-c1.get_bottom(),\n **self.line_config\n )\n VGroup(c1, c2, c3, line).set_height(6).center().to_edge(UP)\n aux_line_1 = Line(c1.get_center(), c1.get_bottom(), **self.aux_line_config)\n aux_line_2 = Line(c2.get_center(), c2.get_bottom(), **self.aux_line_config)\n aux_line_3 = Line(c1.get_center(), c2.get_center(), **self.aux_line_config)\n aux_line_4 = Line(c1.get_bottom(), c2.get_bottom(), **self.aux_line_config) \\\n .shift(c2.get_height()/2*UP)\n polygon = Polygon(\n c1.get_center(), c2.get_center(), aux_line_4.get_start_and_end()[0],\n **self.polygon_config,\n )\n l1 = TexMobject(\"\\\\dfrac{a}{b}\").next_to(c1, DOWN)\n l2 = TexMobject(\"\\\\dfrac{c}{d}\").next_to(c2, DOWN)\n l3 = TexMobject(\"\\\\dfrac{a+c}{b+d}\").next_to(c3, DOWN)\n self.orig_group = VGroup(c1, c2, line, c1_dot, c2_dot, l1, l2)\n self.aux_group = VGroup(aux_line_1, aux_line_2, aux_line_3, aux_line_4, polygon)\n self.new_group = VGroup(c3, c3_dot, l3)\n \n def construct(self):\n self.add(self.orig_group, self.aux_group)\n self.wait()\n\n\nclass ProveFordCirclesPropertiesP2(ProveFordCirclesPropertiesP1):\n def construct(self):\n self.add(self.orig_group, self.new_group)\n self.wait()\n\n\nclass ShowFordCirclesFareySum(ZoomInOnFordCircles):\n pass\n # A rename, that's it.\n\n\nclass DFCInversionProofP3(DFCInversionProofP2):\n CONFIG = {\n \"remark_scale_text\" : \"示意图,反演圆未标出,且图像并非真实比例\",\n \"inv_label_texts\" : [\"C_1'\", \"C_2'\", \"C_3':\\\\mathrm{Im}(z)=-1\", \"C_4':\\\\mathrm{Im}(z)=1\"],\n \"inv_center_coord_text\" : \"z_0 = x_0+iy_0\\\\, (y_0>1)\",\n \"circle_center_coord_texts\" : [\"-1\", \"1\"],\n }\n def construct(self):\n super().construct()\n self.wait()\n\n def add_coord_system(self):\n c1, c2, c3, c4 = self.normal_form\n center_point = (c1.get_center() + c2.get_center()) / 2\n unit_size = c1.get_height()/2\n coord_system = NumberPlane(\n center_point = center_point,\n number_line_config = {\"unit_size\" : unit_size},\n y_min = -3, y_max = 3,\n background_line_style = {\n \"stroke_color\" : GREY,\n \"stroke_width\" : 1.5,\n \"stroke_opacity\" : 0.8,\n },\n )\n aux_coord_system = Axes(\n center_point = center_point,\n number_line_config = {\"unit_size\" : unit_size},\n y_min = -3, y_max = 3,\n stroke_opacity = 0.8,\n )\n self.add(coord_system, aux_coord_system)\n self.coord_system = coord_system\n\n\nclass NormalFormIn3D(ThreeDScene):\n CONFIG = {\n \"axis_unit_size\" : 1.5,\n \"axis_min\" : -1.5,\n \"axis_max\" : 2.8,\n \"resolution\" : (60, 120),\n \"plane_colors\" : [GREEN, BLUE],\n \"sphere_colors\" : [MAROON_B, RED, PINK],\n }\n def construct(self):\n self.add_3d_stuff()\n self.add_2d_stuff()\n\n def add_3d_stuff(self):\n self.set_camera_orientation(theta = 70 * DEGREES, phi = 50 * DEGREES)\n axes = ThreeDAxes(\n x_min = self.axis_min, x_max = self.axis_max,\n y_min = self.axis_min, y_max = self.axis_max,\n z_min = self.axis_min, z_max = self.axis_max,\n number_line_config = {\"unit_size\" : self.axis_unit_size},\n )\n sphere_centers = [\n axis.number_to_point(1)\n for axis in [axes.x_axis, axes.y_axis, axes.z_axis]\n ]\n radius = 1/np.sqrt(2) * self.axis_unit_size\n sphere_dots = VGroup(*[\n Sphere(\n radius = 0.08, resolution = self.resolution,\n fill_opacity = 1, stroke_width = 0,\n ).move_to(sphere_center).set_color(color)\n for sphere_center, color in zip(sphere_centers, self.sphere_colors)\n ])\n spheres = VGroup(*[\n Sphere(\n radius = radius, resolution = self.resolution,\n fill_opacity = 0.6, stroke_width = 0.5,\n ).move_to(sphere_center).set_color(color)\n for sphere_center, color in zip(sphere_centers, self.sphere_colors)\n ])\n planes = VGroup(*[\n VGroup(*[\n Square(\n side_length = 1, fill_opacity = fill_opacity,\n stroke_color = GREY, stroke_width = 0.3, stroke_opacity = 0.2,\n )\n for k in range(n**2)\n ]).arrange_in_grid(n, n, buff = 0) \\\n .apply_matrix(z_to_vector([1, 1, 1])) \\\n .move_to(np.average(sphere_centers)) \\\n .shift(radius * normalize(direction)) \\\n .set_color(color)\n for n, fill_opacity, direction, color in zip(\n [7, 8], [0.2, 0.3], [np.ones(3), -np.ones(3)], self.plane_colors,\n )\n ])\n figure_group = VGroup(axes, planes, sphere_dots, spheres)\n figure_group.shift(RIGHT*2+0.5*OUT)\n self.add(figure_group)\n self.add(axes)\n self.add(planes)\n self.add(sphere_dots, spheres)\n\n def add_2d_stuff(self):\n sphere_remarks = VGroup(*[\n TextMobject(\n \"球:圆心为\" + f\"$({int(x)},{int(y)},{int(z)})$\" + \\\n \",半径为\" + \"$\\\\dfrac{1}{\\\\sqrt{2}}$\"\n ).set_color(color)\n for (x, y, z), color in zip([RIGHT, UP, OUT], self.sphere_colors)\n ]).arrange_submobjects(DOWN)\n plane_remarks = VGroup(*[\n TexMobject(\n \"\\\\text{平面:}\" + \"x+y+z=1\" + sign + \"\\\\dfrac{\\\\sqrt{3}}{\\\\sqrt{2}\"\n ).set_color(color)\n for sign, color in zip([\"+\", \"-\"], self.plane_colors)\n ]).arrange_submobjects(DOWN)\n remarks = VGroup(sphere_remarks, plane_remarks)\n remarks.arrange_submobjects(DOWN, aligned_edge = LEFT)\n remarks.scale(0.8)\n remarks.to_corner(DR)\n self.add_fixed_in_frame_mobjects(remarks)\n self.wait()\n\n\n#####\n## Banner\nclass Banner_Intro(Scene):\n CONFIG = {\n \"circle_color\" : YELLOW,\n \"text_color\" : BLUE,\n \"inv_text_color\" : BLUE,\n \"circle_center\" : 0.8*UP,\n \"circle_radius\" : 3,\n \"grid_side_length\" : 0.5,\n \"x_range\" : 300,\n \"y_range\" : 300,\n \"dist_thres\" : 300,\n }\n def construct(self):\n circle = Circle(color = self.circle_color, radius = self.circle_radius, stroke_width = 5)\n circle.move_to(self.circle_center)\n dot = SmallDot(self.circle_center, color = self.circle_color)\n text = TextMobject(\"Inversion\", color = self.text_color, background_stroke_width = 3)\n text.rotate(PI/2.)\n text.move_to(0.4*RIGHT)\n text.apply_complex_function(np.exp)\n text.rotate(-PI/2.)\n text.scale(1.5)\n text.move_to(0.9*DOWN)\n inv_text = InversedVMobject(text, circle, use_dashed_vmob = False)\n inv_text.suspend_updating()\n inv_text.set_background_stroke(color = \"#303030\", width = 3)\n inv_text.set_stroke(width = 0)\n inv_text.set_fill(color = self.inv_text_color, opacity = 0.5)\n grid = VGroup(*[\n Square(\n side_length = self.grid_side_length,\n stroke_width = 0, fill_opacity = 0.3,\n fill_color = CB_DARK if (i+j)%2==0 else CB_LIGHT\n ).move_to(self.circle_center + (i*RIGHT+j*UP)*self.grid_side_length)\n for i in range(-self.x_range, self.x_range+1, 1)\n for j in range(-self.y_range, self.y_range+1, 1)\n if np.sqrt(i**2+j**2) * self.grid_side_length < self.dist_thres\n ])\n for square in grid:\n if is_close_in_R3(square.get_center(), self.circle_center):\n grid.remove(square)\n inv_grid = InversedVMobject(grid, circle, use_dashed_vmob = False)\n self.add(inv_grid, circle, dot, text, inv_text)\n self.wait()\n\n\nclass Banner_AdvancedP1(ApollonianGasketScene):\n CONFIG = {\n \"curvatures\" : [570, 968, 1112],\n \"init_angle\" : PI/7,\n \"num_iter\" : 20,\n \"curv_thres\" : 1e6,\n \"ag_config\" : {\n \"agc_config\" : {\n \"radius_thres\" : 5e-6,\n \"circle_color\" : YELLOW,\n \"label_color\" : WHITE,\n },\n },\n \"part_text\" : \"上篇\",\n }\n def construct(self):\n super().construct()\n ag = self.ag\n ag.set_height(7)\n circle_myst = ag.agc_list[0][0]\n label_myst = circle_myst.label\n label_question = TexMobject(\"???\")\n label_question.match_height(label_myst)\n label_question.move_to(label_myst)\n self.remove(label_myst)\n self.add(label_question)\n part = TextMobject(self.part_text)\n part.to_corner(DR)\n self.add(part)\n\n\nclass Banner_AdvancedP2(Banner_AdvancedP1):\n CONFIG = {\n \"part_text\" : \"下篇\",\n }\n\n\n\n",
"#coding=utf-8\n\nimport numpy as np\nimport random\n\nfrom manimlib.constants import *\nfrom manimlib.mobject.types.vectorized_mobject import VGroup\nfrom manimlib.utils.color import rgb_to_color, color_to_rgb\n\n\ndef list_shuffle(l):\n \"\"\"Return a shuffled copy of the original list ``l``.\"\"\"\n x = l[:]\n random.shuffle(x)\n return x\n\ndef vgroup_expansion(mobs):\n \"\"\"Flatten a nested VGroup object ``mobs``.\"\"\"\n while any(map(lambda x: isinstance(x, VGroup), mobs)):\n expanded_mobs = []\n for mob in mobs.submobjects:\n expanded_mobs.extend(mob)\n mobs = VGroup(expanded_mobs)\n return mobs\n\ndef mobs_shuffle(mobs):\n \"\"\"Shuffle the submobjects inside a VGroup object ``mobs``.\"\"\"\n mobs = vgroup_expansion(mobs)\n mobs.submobjects = list_shuffle(mobs.submobjects)\n return mobs\n\ndef fit_mobject_in(content_mob, container_mob, buffer_factor = 0.6):\n width_factor = container_mob.get_width() / content_mob.get_width()\n height_factor = container_mob.get_height() / content_mob.get_height()\n scale_factor = min(width_factor, height_factor)\n content_mob.scale(scale_factor * buffer_factor)\n content_mob.move_to(container_mob)\n return content_mob\n\ndef tweak_color(color1, color2, weight = 0.3):\n \"\"\"Return a weighted-average of two colors.\"\"\"\n weight = np.clip(weight, 0, 1)\n tweaked_rgb = weight * color_to_rgb(color2) + (1-weight) * color_to_rgb(color1)\n return rgb_to_color(tweaked_rgb)\n\ndef brighten(color, weight = 0.3):\n return tweak_color(color, WHITE, weight)\n\ndef darken(color, weight = 0.3):\n return tweak_color(color, BLACK, weight)\n\n\n"
] | [
[
"numpy.dot",
"numpy.random.random",
"numpy.abs",
"numpy.sqrt",
"numpy.linalg.norm",
"numpy.ones",
"numpy.round",
"numpy.average",
"numpy.array",
"numpy.conjugate"
],
[
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hannesb0/MSWH | [
"ce214f26369106c124052638e93cc38fbd58cc91"
] | [
"mswh/comm/tests/test_sql.py"
] | [
"import logging\r\nimport os\r\nimport unittest\r\n\r\nfrom mswh.comm.sql import Sql\r\n\r\nimport pandas as pd\r\n\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\n\r\n# has setUpClass method, thus run the test on the entire class\r\nclass SqlTests(unittest.TestCase):\r\n \"\"\"Tests the db-python read-write capabilities.\"\"\"\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n \"\"\"Initiates the sqlite db engine\r\n for the test db file.\r\n \"\"\"\r\n test_db_name = \"test.db\"\r\n test_db_fulpath = os.path.join(os.path.dirname(__file__), test_db_name)\r\n cls.test_db_fulpath = test_db_fulpath\r\n\r\n print(test_db_fulpath)\r\n # create test db if it does not exist\r\n\r\n if not os.path.exists(test_db_fulpath):\r\n os.system(\"touch \" + test_db_fulpath)\r\n\r\n cls.sql_api = Sql(test_db_fulpath)\r\n\r\n # example dict to write to db\r\n cls.df = pd.DataFrame(\r\n data=[[\"a\", 1], [\"b\", 2]], columns=[\"comp\", \"cost\"]\r\n )\r\n\r\n # example dict to write to db as table\r\n cls.dict = {\"k1\": [12, 13, 14], \"k2\": [\"a\", \"b\", \"c\"]}\r\n\r\n # example csv data\r\n cls.path_to_csv = os.path.join(os.path.dirname(__file__), \"table.csv\")\r\n\r\n # sql code to execute\r\n cls.raw_sql = \"\"\"CREATE TABLE sys_components\r\n(\r\n Component TEXT NOT NULL ,\r\n Function TEXT NOT NULL ,\r\n\r\nPRIMARY KEY (Component)\r\n);\"\"\"\r\n\r\n @classmethod\r\n def tearDownClass(cls):\r\n \"\"\"Clean up for any reinitiation of the test,\r\n but keep the result. Any new run will overwrite\r\n the result.\r\n \"\"\"\r\n store_db_name = \"test_done.db\"\r\n # close the test db\r\n cls.sql_api.db.close()\r\n store_db_fulpath = os.path.join(\r\n os.path.dirname(__file__), store_db_name\r\n )\r\n # rename file, overwrite if exists\r\n if os.path.exists(store_db_fulpath):\r\n os.remove(store_db_fulpath)\r\n\r\n os.rename(cls.test_db_fulpath, store_db_fulpath)\r\n\r\n def test_a_pd2table(self):\r\n \"\"\"Tests write pandas dataframe to\r\n db as a table.\r\n \"\"\"\r\n self.sql_api.pd2table(self.df, \"pd2table\")\r\n\r\n def test_b_csv2table(self):\r\n \"\"\"Tests write csv file to\r\n db as a table.\r\n \"\"\"\r\n self.sql_api.csv2table(self.path_to_csv, \"csv2table\")\r\n\r\n def test_c_table2pd(self):\r\n \"\"\"Reads a single table from db as a pd.df\"\"\"\r\n df = self.sql_api.table2pd(\"pd2table\")\r\n self.assertTrue((df == self.df).all().all())\r\n\r\n def test_d_commit(self):\r\n \"\"\"Use sql to write to db (e.g. create, alter)\"\"\"\r\n self.assertTrue(self.sql_api.commit(self.raw_sql))\r\n\r\n def test_e_tables2dict(self):\r\n \"\"\"Read all tables from db into a dictionary\r\n of dataframes.\r\n \"\"\"\r\n data = self.sql_api.tables2dict()\r\n self.assertEqual(data[\"pd2table\"].iloc[1, 1], 2)\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chrhck/pyABC | [
"731cfdec26bef3898bf6e244daa5c8f83f3fe19d",
"731cfdec26bef3898bf6e244daa5c8f83f3fe19d",
"731cfdec26bef3898bf6e244daa5c8f83f3fe19d",
"731cfdec26bef3898bf6e244daa5c8f83f3fe19d"
] | [
"test/visualization/test_visualization.py",
"pyabc/epsilon/temperature.py",
"pyabc/visualization/epsilon.py",
"pyabc/visserver/server.py"
] | [
"import pyabc\nimport tempfile\nimport pytest\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# create and run some model\n\n\ndef model(p):\n return {'ss0': p['p0'] + 0.1 * np.random.uniform(),\n 'ss1': p['p1'] + 0.1 * np.random.uniform()}\n\n\np_true = {'p0': 3, 'p1': 4}\nobservation = {'ss0': p_true['p0'], 'ss1': p_true['p1']}\nlimits = {'p0': (0, 5), 'p1': (1, 8)}\nprior = pyabc.Distribution(**{\n key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0])\n for key in p_true.keys()})\n\ndb_path = \"sqlite:///\" \\\n + os.path.join(tempfile.gettempdir(), \"test_visualize.db\")\n\n\ndistance = pyabc.PNormDistance(p=2)\nn_history = 2\nsampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)\n\nfor _ in range(n_history):\n abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler)\n abc.new(db_path, observation)\n abc.run(minimum_epsilon=.1, max_nr_populations=3)\n\n\nhistories = []\nlabels = []\nfor j in range(n_history):\n history = pyabc.History(db_path)\n history.id = j + 1\n histories.append(history)\n labels.append(\"Some run \" + str(j))\n\n\ndef test_epsilons():\n pyabc.visualization.plot_epsilons(histories, labels)\n plt.close()\n\n\ndef test_sample_numbers():\n pyabc.visualization.plot_sample_numbers(\n histories, rotation=43, size=(5, 5))\n _, ax = plt.subplots()\n pyabc.visualization.plot_sample_numbers(histories, labels, ax=ax)\n with pytest.raises(ValueError):\n pyabc.visualization.plot_sample_numbers(histories, [labels[0]])\n plt.close()\n\n\ndef test_sample_numbers_trajectory():\n pyabc.visualization.plot_sample_numbers_trajectory(\n histories, labels, yscale='log', rotation=90)\n _, ax = plt.subplots()\n pyabc.visualization.plot_sample_numbers_trajectory(\n histories, labels, yscale='log10', size=(8, 8), ax=ax)\n plt.close()\n\n\ndef test_acceptance_rates_trajectory():\n pyabc.visualization.plot_acceptance_rates_trajectory(\n histories, labels, yscale='log', rotation=76)\n _, ax = plt.subplots()\n pyabc.visualization.plot_acceptance_rates_trajectory(\n histories, labels, yscale='log10', rotation=76, size=(10, 5), ax=ax)\n plt.close()\n\n\ndef test_total_sample_numbers():\n pyabc.visualization.plot_total_sample_numbers(histories)\n pyabc.visualization.plot_total_sample_numbers(\n histories, labels, yscale='log', size=(10, 5))\n _, ax = plt.subplots()\n pyabc.visualization.plot_total_sample_numbers(\n histories, rotation=75, yscale='log10', ax=ax)\n plt.close()\n\n\ndef test_effective_sample_sizes():\n pyabc.visualization.plot_effective_sample_sizes(\n histories, labels, rotation=45, relative=True)\n plt.close()\n\n\ndef test_histograms():\n # 1d\n pyabc.visualization.plot_histogram_1d(\n histories[0], 'p0', bins=20,\n xmin=limits['p0'][0], xmax=limits['p0'][1], size=(5, 5), refval=p_true)\n # 2d\n pyabc.visualization.plot_histogram_2d(histories[0], 'p0', 'p1')\n pyabc.visualization.plot_histogram_2d(\n histories[0], 'p0', 'p1', xmin=limits['p0'][0], xmax=limits['p0'][1],\n ymin=limits['p1'][0], ymax=limits['p1'][1], size=(5, 6), refval=p_true)\n # matrix\n pyabc.visualization.plot_histogram_matrix(\n histories[0], bins=1000, size=(6, 7), refval=p_true)\n plt.close()\n\n\ndef test_kdes():\n history = histories[0]\n df, w = history.get_distribution(m=0, t=None)\n pyabc.visualization.plot_kde_1d(\n df, w, x='p0',\n xmin=limits['p0'][0], xmax=limits['p0'][1],\n label=\"PDF\")\n pyabc.visualization.plot_kde_2d(df, w, x='p0', y='p1')\n pyabc.visualization.plot_kde_matrix(df, w)\n\n # also use the highlevel interfaces\n pyabc.visualization.plot_kde_1d_highlevel(history, x='p0', size=(4, 5),\n refval=p_true)\n pyabc.visualization.plot_kde_2d_highlevel(history, x='p0', y='p1',\n size=(7, 5),\n refval=p_true)\n pyabc.visualization.plot_kde_matrix_highlevel(history, height=27.43,\n refval=p_true)\n plt.close()\n\n\ndef test_credible_intervals():\n pyabc.visualization.plot_credible_intervals(histories[0])\n pyabc.visualization.plot_credible_intervals(\n histories[0], levels=[0.2, 0.5, 0.9],\n show_kde_max_1d=True, show_kde_max=True, show_mean=True,\n refval=p_true)\n pyabc.visualization.plot_credible_intervals_for_time(\n histories, levels=[0.5, 0.99],\n show_kde_max_1d=True, show_kde_max=True, show_mean=True,\n refvals=p_true)\n plt.close()\n\n\ndef test_model_probabilities():\n pyabc.visualization.plot_model_probabilities(histories[0])\n plt.close()\n\n\ndef test_data_callback():\n def plot_data(sum_stat, weight, ax, **kwargs):\n ax.plot(sum_stat['ss0'], alpha=weight, **kwargs)\n\n def plot_data_aggregated(sum_stats, weights, ax, **kwargs):\n data = np.array([sum_stat['ss0'] for sum_stat in sum_stats])\n weights = np.array(weights).reshape((-1, 1))\n mean = (data * weights).sum(axis=0)\n plot_data({'ss0': mean}, 1.0, ax)\n\n pyabc.visualization.plot_data_callback(\n histories[0], plot_data, plot_data_aggregated)\n\n\ndef test_data_default():\n obs_dict = {1: 0.7, 2: np.array([43, 423, 5.5]),\n 3: pd.DataFrame({'a': [1, 2], 'b': [4, 6]})}\n sim_dict = {1: 6.5, 2: np.array([32, 5, 6]),\n 3: pd.DataFrame({'a': [1.55, -0.1], 'b': [54, 6]})}\n pyabc.visualization.plot_data_default(obs_dict, sim_dict)\n for i in range(5):\n obs_dict[i] = i + 1\n sim_dict[i] = i + 2\n pyabc.visualization.plot_data_default(obs_dict, sim_dict)\n plt.close()\n",
"import numpy as np\nimport scipy as sp\nimport pandas as pd\nimport numbers\nfrom typing import Callable, List, Union\nimport logging\n\nfrom .base import Epsilon\nfrom ..distance import SCALE_LIN\nfrom ..sampler import Sampler\nfrom ..storage import save_dict_to_json\n\nlogger = logging.getLogger(\"Epsilon\")\n\n\nclass TemperatureBase(Epsilon):\n \"\"\"\n A temperature scheme handles the decrease of the temperatures employed\n by a :class:`pyabc.acceptor.StochasticAcceptor` over time.\n\n This class is not functional on its own, its derivatives must be used.\n \"\"\"\n\n\nclass ListTemperature(TemperatureBase):\n \"\"\"\n Pass a list of temperature values to use successively.\n\n Parameters\n ----------\n values:\n The array of temperatures to use successively.\n For exact inference, finish with 1.\n \"\"\"\n\n def __init__(self, values: List[float]):\n self.values = values\n\n def __call__(self,\n t: int) -> float:\n return self.values[t]\n\n\nclass Temperature(TemperatureBase):\n \"\"\"\n This class implements a highly adaptive and configurable temperature\n scheme. Via the argument `schemes`, arbitrary temperature schemes can be\n passed to calculate the next generation's temperature, via `aggregate_fun`\n one can define how to combine multiple guesses, via `initial_temperature`\n the initial temperature can be set.\n\n Parameters\n ----------\n schemes: Union[Callable, List[Callable]], optional\n Temperature schemes returning proposed\n temperatures for the next time point, e.g.\n instances of :class:`pyabc.epsilon.TemperatureScheme`.\n aggregate_fun: Callable[List[float], float], optional\n The function to aggregate the schemes by, of the form\n ``Callable[List[float], float]``.\n Defaults to taking the minimum.\n initial_temperature: float, optional\n The initial temperature. If None provided, an AcceptanceRateScheme\n is used.\n enforce_exact_final_temperature: bool, optional\n Whether to force the final temperature (if max_nr_populations < inf)\n to be 1.0, giving exact inference.\n log_file: str, optional\n A log file for storing data of the temperature that are currently not\n saved in the database. The data are saved in json format.\n\n Properties\n ----------\n max_nr_populations: int\n The maximum number of iterations as passed to ABCSMC.\n May be inf, but not all schemes can handle that (and will complain).\n temperatures: Dict[int, float]\n Times as keys and temperatures as values.\n \"\"\"\n\n def __init__(\n self,\n schemes: Union[Callable, List[Callable]] = None,\n aggregate_fun: Callable[[List[float]], float] = None,\n initial_temperature: float = None,\n enforce_exact_final_temperature: bool = True,\n log_file: str = None):\n self.schemes = schemes\n\n if aggregate_fun is None:\n # use minimum over all proposed temperature values\n aggregate_fun = min\n self.aggregate_fun = aggregate_fun\n\n if initial_temperature is None:\n initial_temperature = AcceptanceRateScheme()\n self.initial_temperature = initial_temperature\n\n self.enforce_exact_final_temperature = enforce_exact_final_temperature\n self.log_file = log_file\n\n # to be filled later\n self.max_nr_populations = None\n self.temperatures = {}\n self.temperature_proposals = {}\n\n def initialize(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n acceptor_config: dict):\n self.max_nr_populations = max_nr_populations\n\n # set default schemes\n if self.schemes is None:\n # this combination proved rather stable\n acc_rate_scheme = AcceptanceRateScheme()\n decay_scheme = (\n ExpDecayFixedIterScheme() if np.isfinite(max_nr_populations)\n else ExpDecayFixedRatioScheme())\n self.schemes = [acc_rate_scheme, decay_scheme]\n\n # set initial temperature for time t\n self._update(t, get_weighted_distances, get_all_records,\n 1.0, acceptor_config)\n\n def configure_sampler(self, sampler: Sampler):\n if callable(self.initial_temperature):\n self.initial_temperature.configure_sampler(sampler)\n for scheme in self.schemes:\n scheme.configure_sampler(sampler)\n\n def update(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n acceptance_rate: float,\n acceptor_config: dict):\n # set temperature for time t\n self._update(t, get_weighted_distances,\n get_all_records, acceptance_rate,\n acceptor_config)\n\n def _update(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n acceptance_rate: float,\n acceptor_config):\n \"\"\"\n Compute the temperature for time `t`.\n \"\"\"\n # scheme arguments\n kwargs = dict(\n t=t,\n get_weighted_distances=get_weighted_distances,\n get_all_records=get_all_records,\n max_nr_populations=self.max_nr_populations,\n pdf_norm=acceptor_config['pdf_norm'],\n kernel_scale=acceptor_config['kernel_scale'],\n prev_temperature=self.temperatures.get(t-1, None),\n acceptance_rate=acceptance_rate,\n )\n\n if t >= self.max_nr_populations - 1 \\\n and self.enforce_exact_final_temperature:\n # t is last time\n temps = [1.0]\n elif not self.temperatures: # need an initial value\n if callable(self.initial_temperature):\n # execute scheme\n temps = [self.initial_temperature(**kwargs)]\n elif isinstance(self.initial_temperature, numbers.Number):\n temps = [self.initial_temperature]\n else:\n raise ValueError(\n \"Initial temperature must be a float or a callable\")\n else:\n # evaluate schemes\n temps = []\n for scheme in self.schemes:\n temp = scheme(**kwargs)\n temps.append(temp)\n\n # compute next temperature based on proposals and fallback\n # should not be higher than before\n fallback = self.temperatures[t-1] \\\n if t-1 in self.temperatures else np.inf\n temperature = self.aggregate_fun(temps)\n # also a value lower than 1.0 does not make sense\n temperature = max(min(temperature, fallback), 1.0)\n\n if not np.isfinite(temperature):\n raise ValueError(\"Temperature must be finite.\")\n # record found value\n self.temperatures[t] = temperature\n\n # logging\n logger.debug(f\"Proposed temperatures for {t}: {temps}.\")\n self.temperature_proposals[t] = temps\n if self.log_file:\n save_dict_to_json(self.temperature_proposals, self.log_file)\n\n def __call__(self,\n t: int) -> float:\n return self.temperatures[t]\n\n\nclass TemperatureScheme:\n \"\"\"\n A TemperatureScheme suggests the next temperature value. It is used as\n one of potentially multiple schemes employed in the Temperature class.\n This class is abstract.\n\n Parameters\n ----------\n t:\n The time to compute for.\n get_weighted_distances:\n Callable to obtain the weights and kernel values to be used for\n the scheme.\n get_all_records:\n Callable returning a List[dict] of all recorded particles.\n max_nr_populations:\n The maximum number of populations that are supposed to be taken.\n pdf_norm:\n The normalization constant c that will be used in the acceptance step.\n kernel_scale:\n Scale on which the pdf values are (linear or logarithmic).\n prev_temperature:\n The temperature that was used last time (or None if not applicable).\n acceptance_rate:\n The recently obtained rate.\n \"\"\"\n\n def __init__(self):\n pass\n\n def configure_sampler(self, sampler: Sampler):\n \"\"\"\n Modify the sampler. As in, and redirected from,\n :func:`pyabc.epsilon.Temperature.configure_sampler`.\n \"\"\"\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n pass\n\n\nclass AcceptanceRateScheme(TemperatureScheme):\n \"\"\"\n Try to keep the acceptance rate constant at a value of\n `target_rate`. Note that this scheme will fail to\n reduce the temperature sufficiently in later iterations, if the\n problem's inherent acceptance rate is lower, but it has been\n observed to give big feasible temperature leaps in early iterations.\n In particular, this scheme can be used to propose an initial temperature.\n\n Parameters\n ----------\n target_rate: float, optional\n The target acceptance rate to match.\n min_rate: float, optional\n The minimum rate below which not to apply the acceptance step scheme\n any more. Setting this to a value of e.g. 0.05 can make sense\n 1) because it may be unlikely that the acceptance rate scheme will\n propose a useful temperature at such low acceptance levels, and\n 2) to avoid uneccessary computations.\n \"\"\"\n\n def __init__(self, target_rate: float = 0.3, min_rate: float = None):\n self.target_rate = target_rate\n self.min_rate = min_rate\n\n def configure_sampler(self, sampler: Sampler):\n sampler.sample_factory.record_rejected = True\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # check minimum rate\n if self.min_rate is not None and acceptance_rate < self.min_rate:\n return np.inf\n\n # execute function (expensive if in calibration)\n records = get_all_records()\n # convert to dataframe for easier extraction\n records = pd.DataFrame(records)\n\n # previous and current transition densities\n t_pd_prev = np.array(records['transition_pd_prev'], dtype=float)\n t_pd = np.array(records['transition_pd'], dtype=float)\n # acceptance kernel likelihoods\n pds = np.array(records['distance'], dtype=float)\n\n # compute importance weights\n weights = t_pd / t_pd_prev\n # len would suffice, but maybe rather not rely on things to be normed\n weights /= sum(weights)\n\n temperature = match_acceptance_rate(\n weights, pds, pdf_norm, kernel_scale, self.target_rate)\n\n return temperature\n\n\ndef match_acceptance_rate(\n weights, pds, pdf_norm, kernel_scale, target_rate):\n \"\"\"\n For large temperature, changes become effective on an exponential scale,\n thus we optimize the logarithm of the inverse temperature beta.\n\n For a temperature close to 1, subtler changes are neccesary, however here\n the logarhtm is nearly linear anyway.\n \"\"\"\n # objective function which we wish to find a root for\n def obj(b):\n beta = np.exp(b)\n\n # compute rescaled posterior densities\n if kernel_scale == SCALE_LIN:\n acc_probs = (pds / pdf_norm) ** beta\n else: # kernel_scale == SCALE_LOG\n acc_probs = np.exp((pds - pdf_norm) * beta)\n\n # to acceptance probabilities to be sure\n acc_probs = np.minimum(acc_probs, 1.0)\n\n # objective function\n val = np.sum(weights * acc_probs) - target_rate\n return val\n\n # TODO the lower boundary min_b is somewhat arbitrary\n min_b = -100\n if obj(0) > 0:\n # function is monotonically decreasing\n # smallest possible value already > 0\n b_opt = 0\n elif obj(min_b) < 0:\n # it is obj(-inf) > 0 always\n logger.info(\"AcceptanceRateScheme: Numerics limit temperature.\")\n b_opt = min_b\n else:\n # perform binary search\n b_opt = sp.optimize.bisect(obj, min_b, 0, maxiter=100000)\n\n beta_opt = np.exp(b_opt)\n\n temperature = 1. / beta_opt\n return temperature\n\n\nclass ExpDecayFixedIterScheme(TemperatureScheme):\n \"\"\"\n The next temperature is set as\n\n .. math::\n T_j = T_{max}^{(n-j)/n}\n\n where n denotes the number of populations, and j=1,...,n the iteration.\n This translates to\n\n .. math::\n T_j = T_{j-1}^{(n-j)/(n-(j-1))}.\n\n This ensures that a temperature of 1.0 is reached after exactly the\n remaining number of steps.\n\n So, in both cases the sequence of temperatures follows an exponential\n decay, also known as a geometric progression, or a linear progression\n in log-space.\n\n Note that the formula is applied anew in each iteration.\n This is advantageous if also other schemes are used s.t. T_{j-1}\n is smaller than by the above.\n\n Parameters\n ----------\n\n alpha: float\n Factor by which to reduce the temperature, if `max_nr_populations`\n is infinite.\n \"\"\"\n\n def __init__(self):\n pass\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a finite number of iterations\n if max_nr_populations == np.inf:\n raise ValueError(\n \"The ExpDecayFixedIterScheme requires a finite \"\n \"`max_nr_populations`.\")\n\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # base temperature\n temp_base = prev_temperature\n\n # how many steps left?\n t_to_go = max_nr_populations - t\n\n # compute next temperature according to exponential decay\n temperature = temp_base ** ((t_to_go - 1) / t_to_go)\n\n return temperature\n\n\nclass ExpDecayFixedRatioScheme(TemperatureScheme):\n \"\"\"\n The next temperature is chosen as\n\n .. math::\n T_j = \\\\alpha \\\\cdot T_{j-1}.\n\n Like the :class:`pyabc.epsilon.ExpDecayFixedIterScheme`,\n this yields a geometric progression, however with a fixed ratio,\n irrespective of the number of iterations. If a finite number of\n iterations is specified in ABCSMC, there is no influence on the final\n jump to a temperature of 1.0.\n\n This is quite similar to the :class:`pyabc.epsilon.DalyScheme`, although\n simpler in implementation. The alpha value here corresponds to a value of\n 1 - alpha there.\n\n Parameters\n ----------\n alpha: float, optional\n The ratio of subsequent temperatures.\n min_rate: float, optional\n A minimum acceptance rate. If this rate has been violated in the\n previous iteration, the alpha value is increased.\n max_rate: float, optional\n Maximum rate to not be exceeded, otherwise the alpha value is\n decreased.\n \"\"\"\n def __init__(self, alpha: float = 0.5,\n min_rate: float = 1e-4, max_rate: float = 0.5):\n self.alpha = alpha\n self.min_rate = min_rate\n self.max_rate = max_rate\n self.alphas = {}\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n if prev_temperature is None:\n return np.inf\n\n # previous alpha\n alpha = self.alphas.get(t-1, self.alpha)\n\n # check if acceptance rate criterion violated\n if acceptance_rate > self.max_rate and t > 1:\n logger.debug(\"ExpDecayFixedRatioScheme: \"\n \"Reacting to high acceptance rate.\")\n alpha = max(alpha / 2, alpha - (1 - alpha) * 2)\n if acceptance_rate < self.min_rate:\n logger.debug(\"ExpDecayFixedRatioScheme: \"\n \"Reacting to low acceptance rate.\")\n # increase alpha\n alpha = alpha + (1 - alpha) / 2\n # record\n self.alphas[t] = alpha\n\n # reduce temperature\n temperature = self.alphas[t] * prev_temperature\n\n return temperature\n\n\nclass PolynomialDecayFixedIterScheme(TemperatureScheme):\n \"\"\"\n Compute next temperature as pre-last entry in\n\n >>> np.linspace(1, (temp_base)**(1 / temp_decay_exponent),\n >>> t_to_go + 1) ** temp_decay_exponent)\n\n Requires finite `max_nr_populations`.\n\n Note that this is similar to the\n :class:`pyabc.epsilon.ExpDecayFixedIterScheme`, which is\n indeed the limit for `exponent -> infinity`. For smaller\n exponent, the sequence makes larger steps for low temperatures. This\n can be useful in cases, where lower temperatures (which are usually\n more expensive) can be traversed in few larger steps, however also\n the opposite may be true, i.e. that more steps at low temperatures\n are advantageous.\n\n Parameters\n ----------\n exponent: float, optional\n The exponent to use in the scheme.\n \"\"\"\n\n def __init__(self, exponent: float = 3):\n self.exponent = exponent\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # base temperature\n temp_base = prev_temperature\n\n # check if we can compute a decay step\n if max_nr_populations == np.inf:\n raise ValueError(\"Can only perform PolynomialDecayScheme step \"\n \"with a finite max_nr_populations.\")\n\n # how many steps left?\n t_to_go = max_nr_populations - t\n\n # compute sequence\n temps = np.linspace(1, (temp_base)**(1 / self.exponent),\n t_to_go+1) ** self.exponent\n\n logger.debug(f\"Temperatures proposed by polynomial decay method: \"\n f\"{temps}.\")\n\n # pre-last step is the next step\n temperature = temps[-2]\n return temperature\n\n\nclass DalyScheme(TemperatureScheme):\n \"\"\"\n This scheme is loosely based on [#daly2017]_, however note that it does\n not try to replicate it entirely. In particular, the implementation\n of pyABC does not allow the sampling to be stopped when encountering\n too low acceptance rates, such that this can only be done ex-posteriori\n here.\n\n Parameters\n ----------\n alpha: float, optional\n The ratio by which to decrease the temperature value. More\n specifically, the next temperature is given as\n `(1-alpha) * temperature`.\n min_rate: float, optional\n A minimum acceptance rate. If this rate has been violated in the\n previous iteration, the alpha value is decreased.\n\n\n .. [#daly2017] Daly Aidan C., Cooper Jonathan, Gavaghan David J.,\n and Holmes Chris. \"Comparing two sequential Monte Carlo samplers\n for exact and approximate Bayesian inference on biological\n models\". Journal of The Royal Society Interface, 2017.\n \"\"\"\n\n def __init__(self, alpha: float = 0.5, min_rate: float = 1e-4):\n self.alpha = alpha\n self.min_rate = min_rate\n self.k = {}\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # base temperature\n temp_base = prev_temperature\n\n # addressing the std, not the var\n eps_base = np.sqrt(temp_base)\n\n if not self.k:\n # initial iteration\n self.k[t - 1] = eps_base\n\n k_base = self.k[t - 1]\n\n if acceptance_rate < self.min_rate:\n logger.debug(\"DalyScheme: Reacting to low acceptance rate.\")\n # reduce reduction\n k_base = self.alpha * k_base\n\n self.k[t] = min(k_base, self.alpha * eps_base)\n eps = eps_base - self.k[t]\n temperature = eps**2\n\n return temperature\n\n\nclass FrielPettittScheme(TemperatureScheme):\n \"\"\"\n Basically takes linear steps in log-space. See [#vyshemirsky2008]_.\n\n .. [#vyshemirsky2008] Vyshemirsky, Vladislav, and Mark A. Girolami.\n \"Bayesian ranking of biochemical system models.\"\n Bioinformatics 24.6 (2007): 833-839.\n \"\"\"\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # check if we can compute a decay step\n if max_nr_populations == np.inf:\n raise ValueError(\"Can only perform FrielPettittScheme step with a \"\n \"finite max_nr_populations.\")\n\n # base temperature\n temp_base = prev_temperature\n beta_base = 1. / temp_base\n\n # time to go\n t_to_go = max_nr_populations - t\n\n beta = beta_base + ((1. - beta_base) * 1 / t_to_go) ** 2\n\n temperature = 1. / beta\n return temperature\n\n\nclass EssScheme(TemperatureScheme):\n \"\"\"\n Try to keep the effective sample size (ESS) constant.\n\n Parameters\n ----------\n target_relative_ess: float\n Targe relative effective sample size.\n \"\"\"\n\n def __init__(self, target_relative_ess: float = 0.8):\n self.target_relative_ess = target_relative_ess\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # execute function (expensive if in calibration)\n df = get_weighted_distances()\n\n weights = np.array(df['w'], dtype=float)\n pdfs = np.array(df['distance'], dtype=float)\n\n # compute rescaled posterior densities\n if kernel_scale == SCALE_LIN:\n values = pdfs / pdf_norm\n else: # kernel_scale == SCALE_LOG\n values = np.exp(pdfs - pdf_norm)\n\n # to probability mass function (i.e. normalize)\n weights /= np.sum(weights)\n\n target_ess = len(weights) * self.target_relative_ess\n\n if prev_temperature is None:\n beta_base = 0.0\n else:\n beta_base = 1. / prev_temperature\n\n # objective to minimize\n def obj(beta):\n return (_ess(values, weights, beta) - target_ess)**2\n\n bounds = sp.optimize.Bounds(lb=np.array([beta_base]),\n ub=np.array([1.]))\n # TODO make more efficient by providing gradients\n ret = sp.optimize.minimize(\n obj, x0=np.array([0.5 * (1 + beta_base)]),\n bounds=bounds)\n beta = ret.x\n\n temperature = 1. / beta\n return temperature\n\n\ndef _ess(pdfs, weights, beta):\n \"\"\"\n Effective sample size (ESS) of importance samples.\n \"\"\"\n num = np.sum(weights * pdfs**beta)**2\n den = np.sum((weights * pdfs**beta)**2)\n return num / den\n",
"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.ticker import MaxNLocator\nfrom typing import Union, List\nimport numpy as np\n\nfrom ..storage import History\nfrom .util import to_lists_or_default\n\n\ndef plot_epsilons(\n histories: Union[List, History],\n labels: Union[List, str] = None,\n colors: List = None,\n scale: str = None,\n title: str = \"Epsilon values\",\n size: tuple = None,\n ax: mpl.axes.Axes = None):\n \"\"\"\n Plot epsilon trajectory.\n\n Parameters\n ----------\n\n histories: Union[List, History]\n The histories to plot from. History ids must be set correctly.\n labels: Union[List ,str], optional\n Labels corresponding to the histories. If None are provided,\n indices are used as labels.\n colors: List, optional\n Colors to use for the lines. If None, then the matplotlib\n default values are used.\n scale: str, optional (default='lin')\n Scaling to apply to the y axis.\n Must be one of 'lin', 'log', 'log10'.\n title: str, optional (default = \"Epsilon values\")\n Title for the plot.\n size: tuple of float, optional\n The size of the plot in inches.\n ax: matplotlib.axes.Axes, optional\n The axis object to use. A new one is created if None.\n\n Returns\n -------\n\n ax: Axis of the generated plot.\n \"\"\"\n # preprocess input\n histories, labels = to_lists_or_default(histories, labels)\n if colors is None:\n colors = [None for _ in range(len(histories))]\n if scale is None:\n scale = 'lin'\n\n # create figure\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n\n # extract epsilons\n eps = []\n for history in histories:\n # note: first entry is from calibration and thus translates to inf,\n # thus must be discarded\n eps.append(np.array(history.get_all_populations()['epsilon'][1:]))\n\n # scale\n eps = _apply_scale(eps, scale)\n\n # plot\n for ep, label, color in zip(eps, labels, colors):\n ax.plot(ep, 'x-', label=label, color=color)\n\n # format\n ax.set_xlabel(\"Population index\")\n ax.set_ylabel(_get_ylabel(scale))\n ax.legend()\n ax.set_title(title)\n # enforce integer ticks\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n # set size\n if size is not None:\n fig.set_size_inches(size)\n fig.tight_layout()\n\n return ax\n\n\ndef _apply_scale(eps, scale):\n \"\"\"\n Apply the `scale` transformation to `eps`.\n \"\"\"\n if scale == 'log':\n eps = [np.log(ep) for ep in eps]\n elif scale == 'log10':\n eps = [np.log10(ep) for ep in eps]\n elif scale != 'lin':\n raise ValueError(f\"Scale {scale} must be one of lin, log, log10.\")\n return eps\n\n\ndef _get_ylabel(scale):\n \"\"\"\n Get corect y axis label.\n \"\"\"\n if scale == 'log':\n ylabel = \"Log(Epsilon)\"\n elif scale == 'log10':\n ylabel = \"Log10(Epsilon)\"\n else:\n ylabel = \"Epsilon\"\n return ylabel\n",
"import os\nimport json\nfrom flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap\nimport click\nfrom pyabc import History\nimport pandas as pd\nimport bokeh.plotting.helpers as helpers\nfrom bokeh.plotting import figure\n# this has to be set before the other bokeh imports\nhelpers.DEFAULT_PALETTE = ['#000000', # Wong nature colorblind palette\n '#e69f00',\n '#56b4e9',\n '#009e73',\n '#f0e442',\n '#0072b2',\n '#d55e00',\n '#cc79a7']\nfrom bokeh.embed import components # noqa: E402\nfrom bokeh.resources import INLINE # noqa: E402\nfrom bokeh.models.widgets import Panel, Tabs # noqa: E402\n\nBOKEH = INLINE\n\n\nclass PlotScriptDiv:\n def __init__(self, script, div):\n self.script = script\n self.div = div\n\n\napp = Flask(__name__)\nBootstrap(app)\n\n\[email protected]('/')\ndef main():\n return render_template(\"index.html\")\n\n\[email protected](\"/abc\")\ndef abc_overview():\n history = app.config[\"HISTORY\"]\n runs = history.all_runs()\n return render_template(\"abc_overview.html\", runs=runs)\n\n\nclass ABCInfo:\n def __init__(self, abc):\n self.abc = abc\n\n def __getattr__(self, item):\n json_str = getattr(self.abc, item).replace(\"'\", '\"')\n try:\n return json.loads(json_str)\n except json.JSONDecodeError:\n return {}\n\n\[email protected](\"/abc/<int:abc_id>\")\ndef abc_detail(abc_id):\n history = app.config[\"HISTORY\"]\n history.id = abc_id\n abc = ABCInfo(history.get_abc())\n model_probabilities = history.get_model_probabilities()\n model_ids = model_probabilities.columns\n model_probabilities.columns = list(map(\"{}\".format,\n model_probabilities.columns))\n model_probabilities = model_probabilities.reset_index()\n if len(model_probabilities) > 0:\n populations = history.get_all_populations()\n populations = populations[populations.t >= 0]\n particles = (history.get_nr_particles_per_population().reset_index()\n .rename(columns={\"index\": \"t\", \"t\": \"particles\"})\n .query(\"t >= 0\"))\n\n melted = pd.melt(model_probabilities, id_vars=\"t\", var_name=\"m\",\n value_name=\"p\")\n melted[\"m\"] = pd.to_numeric(melted[\"m\"])\n\n # although it might seem cumbersome, not using the bkcharts\n # package works more reliably\n\n prob_plot = figure()\n prob_plot.xaxis.axis_label = 'Generation t'\n prob_plot.yaxis.axis_label = 'Probability'\n for c, (m, data) in zip(helpers.DEFAULT_PALETTE, melted.groupby(\"m\")):\n prob_plot.line(data[\"t\"], data[\"p\"],\n legend=\"Model \" + str(m), color=c,\n line_width=2)\n\n particles_fig = figure()\n particles_fig.xaxis.axis_label = 'Generation t'\n particles_fig.yaxis.axis_label = 'Particles'\n particles_fig.line(particles[\"t\"], particles[\"particles\"],\n line_width=2)\n\n samples_fig = figure()\n samples_fig.xaxis.axis_label = 'Generation t'\n samples_fig.yaxis.axis_label = 'Samples'\n samples_fig.line(populations[\"t\"], populations[\"samples\"],\n line_width=2)\n\n eps_fig = figure()\n eps_fig.xaxis.axis_label = 'Generation t'\n eps_fig.yaxis.axis_label = 'Epsilon'\n eps_fig.line(populations[\"t\"], populations[\"epsilon\"],\n line_width=2)\n\n plot = Tabs(tabs=[\n Panel(child=prob_plot, title=\"Probability\"),\n Panel(child=samples_fig, title=\"Samples\"),\n Panel(child=particles_fig, title=\"Particles\"),\n Panel(child=eps_fig, title=\"Epsilon\")])\n plot = PlotScriptDiv(*components(plot))\n\n return render_template(\"abc_detail.html\",\n abc_id=abc_id,\n plot=plot,\n BOKEH=BOKEH,\n model_ids=model_ids,\n abc=abc)\n return render_template(\"abc_detail.html\",\n abc_id=abc_id,\n plot=PlotScriptDiv(\"\", \"Exception: No data found.\"),\n BOKEH=BOKEH,\n abc=abc)\n\n\[email protected](\"/abc/<int:abc_id>/model/<int:model_id>/t/<t>\")\ndef abc_model(abc_id, model_id, t):\n history = app.config[\"HISTORY\"]\n history.id = abc_id\n if t == \"max\":\n t = history.max_t\n else:\n t = int(t)\n df, w = history.get_distribution(model_id, t)\n df[\"CDF\"] = w\n tabs = []\n\n model_ids = history.get_model_probabilities().columns\n for parameter in [col for col in df if col != \"CDF\"]:\n plot_df = df[[\"CDF\", parameter]].sort_values(parameter)\n plot_df_cumsum = plot_df.cumsum()\n plot_df_cumsum[parameter] = plot_df[parameter]\n f = figure()\n f.line(x=plot_df_cumsum[parameter], y=plot_df_cumsum[\"CDF\"])\n p = Panel(child=f, title=parameter)\n tabs.append(p)\n if len(tabs) == 0:\n plot = PlotScriptDiv(\"\", \"This model has no Parameters\")\n else:\n plot = PlotScriptDiv(*components(Tabs(tabs=tabs)))\n return render_template(\"model.html\",\n abc_id=abc_id,\n model_id=model_id,\n plot=plot,\n BOKEH=BOKEH,\n model_ids=model_ids,\n t=t,\n available_t=list(range(history.max_t+1)))\n\n\[email protected](\"/info\")\ndef server_info():\n history = app.config[\"HISTORY\"]\n return render_template(\"server_info.html\", db_path=history.db_file(),\n db_size=round(history.db_size, 2))\n\n\[email protected](404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\[email protected]()\[email protected](\"--debug\", default=False, type=bool,\n help=\"Whether to run the server in debug mode\")\[email protected](\"--port\", default=5000, type=int,\n help=\"The port on which the server runs\")\[email protected](\"db\")\ndef run_app(db, debug, port):\n db = os.path.expanduser(db)\n history = History(\"sqlite:///\" + db)\n app.config[\"HISTORY\"] = history\n app.run(debug=debug, port=port)\n"
] | [
[
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.minimum",
"numpy.sqrt",
"numpy.isfinite",
"numpy.linspace",
"pandas.DataFrame",
"numpy.exp",
"scipy.optimize.bisect",
"numpy.array",
"numpy.sum"
],
[
"matplotlib.ticker.MaxNLocator",
"numpy.log",
"numpy.log10",
"matplotlib.pyplot.subplots"
],
[
"pandas.to_numeric",
"pandas.melt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
supernord/tools-iuc | [
"95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5"
] | [
"tools/vsnp/vsnp_statistics.py"
] | [
"#!/usr/bin/env python\n\nimport argparse\nimport csv\nimport gzip\nimport os\nfrom functools import partial\n\nimport numpy\nimport pandas\nfrom Bio import SeqIO\n\n\ndef nice_size(size):\n # Returns a readably formatted string with the size\n words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']\n prefix = ''\n try:\n size = float(size)\n if size < 0:\n size = abs(size)\n prefix = '-'\n except Exception:\n return '??? bytes'\n for ind, word in enumerate(words):\n step = 1024 ** (ind + 1)\n if step > size:\n size = size / float(1024 ** ind)\n if word == 'bytes': # No decimals for bytes\n return \"%s%d bytes\" % (prefix, size)\n return \"%s%.1f %s\" % (prefix, size, word)\n return '??? bytes'\n\n\ndef output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):\n # Produce an Excel spreadsheet that\n # contains a row for each sample.\n columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',\n 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',\n 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']\n data_frames = []\n for i, fastq_file in enumerate(fastq_files):\n idxstats_file = idxstats_files[i]\n metrics_file = metrics_files[i]\n file_name_base = os.path.basename(fastq_file)\n # Read fastq_file into a data frame.\n _open = partial(gzip.open, mode='rt') if gzipped else open\n with _open(fastq_file) as fh:\n identifiers = []\n seqs = []\n letter_annotations = []\n for seq_record in SeqIO.parse(fh, \"fastq\"):\n identifiers.append(seq_record.id)\n seqs.append(seq_record.seq)\n letter_annotations.append(seq_record.letter_annotations[\"phred_quality\"])\n # Convert lists to Pandas series.\n s1 = pandas.Series(identifiers, name='id')\n s2 = pandas.Series(seqs, name='seq')\n # Gather Series into a data frame.\n fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id'])\n total_reads = int(len(fastq_df.index) / 4)\n current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)\n # Reference\n current_sample_df.at[file_name_base, 'Reference'] = dbkey\n # File Size\n current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))\n # Mean Read Length\n sampling_size = 10000\n if sampling_size > total_reads:\n sampling_size = total_reads\n fastq_df = fastq_df.iloc[3::4].sample(sampling_size)\n dict_mean = {}\n list_length = []\n i = 0\n for id, seq, in fastq_df.iterrows():\n dict_mean[id] = numpy.mean(letter_annotations[i])\n list_length.append(len(seq.array[0]))\n i += 1\n current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length)\n # Mean Read Quality\n df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])\n current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean()\n # Reads Passing Q30\n reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])\n reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size)\n current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30\n # Total Reads\n current_sample_df.at[file_name_base, 'Total Reads'] = total_reads\n # All Mapped Reads\n all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)\n current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads\n # Unmapped Reads\n current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads\n # Unmapped Reads Percentage of Total\n if unmapped_reads > 0:\n unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads)\n else:\n unmapped_reads_percentage = 0\n current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage\n # Reference with Coverage\n ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)\n current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage\n # Average Depth of Coverage\n current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage\n # Good SNP Count\n current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count\n data_frames.append(current_sample_df)\n output_df = pandas.concat(data_frames)\n output_df.to_csv(output_file, sep='\\t', quoting=csv.QUOTE_NONE, escapechar='\\\\')\n\n\ndef process_idxstats_file(idxstats_file):\n all_mapped_reads = 0\n unmapped_reads = 0\n with open(idxstats_file, \"r\") as fh:\n for i, line in enumerate(fh):\n line = line.rstrip('\\r\\n')\n items = line.split(\"\\t\")\n if i == 0:\n # NC_002945.4 4349904 213570 4047\n all_mapped_reads = int(items[2])\n elif i == 1:\n # * 0 0 82774\n unmapped_reads = int(items[3])\n return all_mapped_reads, unmapped_reads\n\n\ndef process_metrics_file(metrics_file):\n ref_with_coverage = '0%'\n avg_depth_of_coverage = 0\n good_snp_count = 0\n with open(metrics_file, \"r\") as ifh:\n for i, line in enumerate(ifh):\n if i == 0:\n # Skip comments.\n continue\n line = line.rstrip('\\r\\n')\n items = line.split(\"\\t\")\n if i == 1:\n # MarkDuplicates 10.338671 98.74%\n ref_with_coverage = items[3]\n avg_depth_of_coverage = items[2]\n elif i == 2:\n # VCFfilter 611\n good_snp_count = items[1]\n return ref_with_coverage, avg_depth_of_coverage, good_snp_count\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')\nparser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped')\nparser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory')\nparser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory')\nparser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory')\nparser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads')\nparser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')\nparser.add_argument('--read1', action='store', dest='read1', help='Required: single read')\nparser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')\nparser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats')\nparser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage')\n\nargs = parser.parse_args()\n\nfastq_files = []\nidxstats_files = []\nmetrics_files = []\n# Accumulate inputs.\nif args.read1 is not None:\n # The inputs are not dataset collections, so\n # read1, read2 (possibly) and vsnp_azc will also\n # not be None.\n fastq_files.append(args.read1)\n idxstats_files.append(args.samtools_idxstats)\n metrics_files.append(args.vsnp_azc)\n if args.read2 is not None:\n fastq_files.append(args.read2)\n idxstats_files.append(args.samtools_idxstats)\n metrics_files.append(args.vsnp_azc)\nelse:\n for file_name in sorted(os.listdir(args.input_reads_dir)):\n fastq_files.append(os.path.join(args.input_reads_dir, file_name))\n for file_name in sorted(os.listdir(args.input_idxstats_dir)):\n idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))\n if args.list_paired:\n # Add the idxstats file for reverse.\n idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))\n for file_name in sorted(os.listdir(args.input_metrics_dir)):\n metrics_files.append(os.path.join(args.input_metrics_dir, file_name))\n if args.list_paired:\n # Add the metrics file for reverse.\n metrics_files.append(os.path.join(args.input_metrics_dir, file_name))\noutput_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)\n"
] | [
[
"pandas.concat",
"pandas.Series",
"pandas.DataFrame",
"numpy.mean",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Abhijeet8901/CS231n | [
"c8e715028b453899d5069cdb34faf3fc2959c270"
] | [
"assignment2/cs231n/optim.py"
] | [
"import numpy as np\n\n\"\"\"\nThis file implements various first-order update rules that are commonly used\nfor training neural networks. Each update rule accepts current weights and the\ngradient of the loss with respect to those weights and produces the next set of\nweights. Each update rule has the same interface:\n\ndef update(w, dw, config=None):\n\nInputs:\n - w: A numpy array giving the current weights.\n - dw: A numpy array of the same shape as w giving the gradient of the\n loss with respect to w.\n - config: A dictionary containing hyperparameter values such as learning\n rate, momentum, etc. If the update rule requires caching values over many\n iterations, then config will also hold these cached values.\n\nReturns:\n - next_w: The next point after the update.\n - config: The config dictionary to be passed to the next iteration of the\n update rule.\n\nNOTE: For most update rules, the default learning rate will probably not\nperform well; however the default values of the other hyperparameters should\nwork well for a variety of different problems.\n\nFor efficiency, update rules may perform in-place updates, mutating w and\nsetting next_w equal to w.\n\"\"\"\n\n\ndef sgd(w, dw, config=None):\n \"\"\"\n Performs vanilla stochastic gradient descent.\n\n config format:\n - learning_rate: Scalar learning rate.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n\n w -= config[\"learning_rate\"] * dw\n return w, config\n\n\ndef sgd_momentum(w, dw, config=None):\n \"\"\"\n Performs stochastic gradient descent with momentum.\n\n config format:\n - learning_rate: Scalar learning rate.\n - momentum: Scalar between 0 and 1 giving the momentum value.\n Setting momentum = 0 reduces to sgd.\n - velocity: A numpy array of the same shape as w and dw used to store a\n moving average of the gradients.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n config.setdefault(\"momentum\", 0.9)\n v = config.get(\"velocity\", np.zeros_like(w))\n\n next_w=None\n ###########################################################################\n # TODO: Implement the momentum update formula. Store the updated value in #\n # the next_w variable. You should also use and update the velocity v. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n v= config[\"momentum\"]*v - config[\"learning_rate\"]*dw\n next_w=w+v\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n config[\"velocity\"] = v\n\n return next_w, config\n\n\ndef rmsprop(w, dw, config=None):\n \"\"\"\n Uses the RMSProp update rule, which uses a moving average of squared\n gradient values to set adaptive per-parameter learning rates.\n\n config format:\n - learning_rate: Scalar learning rate.\n - decay_rate: Scalar between 0 and 1 giving the decay rate for the squared\n gradient cache.\n - epsilon: Small scalar used for smoothing to avoid dividing by zero.\n - cache: Moving average of second moments of gradients.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n config.setdefault(\"decay_rate\", 0.99)\n config.setdefault(\"epsilon\", 1e-8)\n config.setdefault(\"cache\", np.zeros_like(w))\n\n next_w = None\n ###########################################################################\n # TODO: Implement the RMSprop update formula, storing the next value of w #\n # in the next_w variable. Don't forget to update cache value stored in #\n # config['cache']. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n cache=config[\"cache\"]\n cache=config[\"decay_rate\"]*cache + (1-config[\"decay_rate\"])*dw**2\n w+=(-config[\"learning_rate\"]*dw)/(np.sqrt(cache)+config[\"epsilon\"])\n next_w=w\n config[\"cache\"]=cache\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return next_w, config\n\n\ndef adam(w, dw, config=None):\n \"\"\"\n Uses the Adam update rule, which incorporates moving averages of both the\n gradient and its square and a bias correction term.\n\n config format:\n - learning_rate: Scalar learning rate.\n - beta1: Decay rate for moving average of first moment of gradient.\n - beta2: Decay rate for moving average of second moment of gradient.\n - epsilon: Small scalar used for smoothing to avoid dividing by zero.\n - m: Moving average of gradient.\n - v: Moving average of squared gradient.\n - t: Iteration number.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-3)\n config.setdefault(\"beta1\", 0.9)\n config.setdefault(\"beta2\", 0.999)\n config.setdefault(\"epsilon\", 1e-8)\n config.setdefault(\"m\", np.zeros_like(w))\n config.setdefault(\"v\", np.zeros_like(w))\n config.setdefault(\"t\", 0)\n\n next_w = None\n ###########################################################################\n # TODO: Implement the Adam update formula, storing the next value of w in #\n # the next_w variable. Don't forget to update the m, v, and t variables #\n # stored in config. #\n # #\n # NOTE: In order to match the reference output, please modify t _before_ #\n # using it in any calculations. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n lr=config[\"learning_rate\"]\n b1,b2,ep=config[\"beta1\"],config[\"beta2\"],config[\"epsilon\"]\n m=config[\"m\"]\n v=config[\"v\"]\n t=config[\"t\"]\n t+=1\n m=b1*m+(1-b1)*dw\n mt=m/(1-b1**t)\n v=b2*v+(1-b2)*dw**2\n vt=v/(1-b2**t)\n w-=(lr*mt)/(np.sqrt(vt)+ep)\n config[\"m\"],config[\"v\"],config[\"t\"]=m,v,t\n next_w=w\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return next_w, config\n"
] | [
[
"numpy.zeros_like",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
modichirag/21cmhod | [
"0807a7b0b880f4ba5bc7161b843d500ddcece5a7",
"0807a7b0b880f4ba5bc7161b843d500ddcece5a7"
] | [
"code/distributeHI.py",
"code/plotting/plot_xibr.py"
] | [
"import numpy as np\nimport re, os\nfrom pmesh.pm import ParticleMesh\nfrom nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower\nfrom nbodykit import setup_logging\nfrom mpi4py import MPI\n\nimport HImodels\n# enable logging, we have some clue what's going on.\nsetup_logging('info')\n\n#Get model as parameter\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--size', help='for small or big box', default='small')\nparser.add_argument('-m', '--model', help='model name to use')\nargs = parser.parse_args()\nif args.model == None:\n print('Specify a model name')\n sys.exit()\n#print(args, args.model)\n\nmodel = args.model #'ModelD'\nboxsize = args.size\n\n\n#\n#\n#Global, fixed things\nscratchyf = '/global/cscratch1/sd/yfeng1/m3127/'\nscratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'\nproject = '/project/projectdirs/m3127/H1mass/'\ncosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}\nalist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]\n\n\n#Parameters, box size, number of mesh cells, simulation, ...\nif boxsize == 'small':\n bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'\nelif boxsize == 'big':\n bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres'\nelse:\n print('Box size not understood, should be \"big\" or \"small\"')\n sys.exit()\n\n\n# It's useful to have my rank for printing...\npm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])\nrank = pm.comm.rank\ncomm = pm.comm\n\n\n#Which model & configuration to use\nmodeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC}\nmodedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'} \nHImodel = modeldict[model] #HImodels.ModelB\nmodelname = model\nmode = modedict[model]\nofolder = '../data/outputs/'\n\n\n\n\ndef distribution(aa, halocat, cencat, satcat, outfolder, mbins=None):\n '''Compute the fraction of HI in halos, centrals, satellites'''\n\n if rank==0: print('Calculating distribution')\n\n if mbins is None: mbins = np.logspace(9, 15, 100)\n hmass = halocat['Mass'].compute()\n\n\n htotal, hsize, h1total = [], [], []\n for im in range(mbins.size-1):\n mask = (hmass >= mbins[im]) & (hmass < mbins[im+1])\n rankweight = (hmass*mask).sum()\n htotal.append(comm.allreduce(rankweight))\n rankweight = (mask).sum()\n hsize.append(comm.allreduce(rankweight))\n \n h1bin = []\n for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]:\n rankweight = (cat.compute()*mask).sum()\n h1bin.append(comm.allreduce(rankweight))\n h1total.append(h1bin)\n\n \n #\n if rank==0:\n tosave = np.zeros((len(hsize), 5))\n tosave[:, 1] = hsize\n tosave[:, 0] = htotal / (tosave[:, 1])\n tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1))\n tosave[np.isnan(tosave)] = 0\n header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites'\n np.savetxt(outfolder + \"HI_dist_{:6.4f}.txt\".format(aa), tosave, fmt='%0.6e', header=header)\n \n \n\nif __name__==\"__main__\":\n if rank==0: print('Starting')\n suff='-m1_00p3mh-alpha-0p8-subvol'\n outfolder = ofolder + suff[1:]\n if bs == 1024: outfolder = outfolder + \"-big\"\n outfolder += \"/%s/\"%modelname\n if rank == 0: print(outfolder)\n #outfolder = ofolder + suff[1:] + \"/%s/\"%modelname\n try: \n os.makedirs(outfolder)\n except : pass\n\n for aa in alist:\n if rank == 0: print('\\n ############## Redshift = %0.2f ############## \\n'%(1/aa-1))\n halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')\n mp = halocat.attrs['MassTable'][1]*1e10##\n halocat['Mass'] = halocat['Length'].compute() * mp\n cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)\n satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)\n #\n\n HImodelz = HImodel(aa)\n halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)\n cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(), \n cencat.csize, cencat['Mass'].size, cencat.comm).local\n \n\n mbins = 10**np.arange(9, 15.1, 0.2)\n distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)\n\n",
"#!/usr/bin/env python3\n#\n# Plots the real-space correlation functions and biases for the HI.\n#\nimport numpy as np\nimport sys, os\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import InterpolatedUnivariateSpline as ius\n#\nfrom matplotlib import rc, rcParams, font_manager\nrcParams['font.family'] = 'serif'\nfsize = 12\nfontmanage = font_manager.FontProperties(family='serif', style='normal',\n size=fsize, weight='normal', stretch='normal')\nfont = {'family': fontmanage.get_family()[0],\n 'style': fontmanage.get_style(),\n 'weight': fontmanage.get_weight(),\n 'size': fontmanage.get_size(),\n }\nprint(font)\n\n#\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--size', help='which box size simulation', default='small')\nargs = parser.parse_args()\nboxsize = args.size\n\nsuff = 'm1_00p3mh-alpha-0p8-subvol'\nif boxsize == 'big':\n suff = suff + '-big'\n bs = 1024\nelse: bs = 256\n\nfigpath = '../../figs/%s/'%(suff)\ntry: os.makedirs(figpath)\nexcept: pass\n\n\nmodels = ['ModelA', 'ModelB', 'ModelC']\nmodel = 'ModelA'\ndpath = '../../data/outputs/%s/%s/'%(suff, model)\n\n\ndef make_xib_plot():\n \"\"\"Does the work of making the real-space xi(r) and b(r) figure.\"\"\"\n zlist = [2.0,4.0,6.0]\n blist = [1.8,2.6,3.5]\n clist = ['b','g','r']\n # Now make the figure.\n fig,ax = plt.subplots(1,2,figsize=(7,3.5))\n\n for zz,bb,col in zip(zlist,blist,clist):\n\n aa = 1.0/(1.0+zz)\n mfc = col\n # Put on a fake symbol for the legend.\n ax[0].plot([100],[100],'s',color=col,label=\"z={:.1f}\".format(zz))\n # Plot the data, xi_mm, xi_hm, xi_hh\n\n xim = np.loadtxt(dpath + \"ximatter_{:06.4f}.txt\".format(aa))\n ax[0].plot(xim[:,0],xim[:,1],'*--',color=col,mfc=mfc,alpha=0.5, markersize=3)\n\n xix = np.loadtxt(dpath + \"ximxh1mass_{:06.4f}.txt\".format(aa))\n ax[0].plot(xix[:,0],xix[:,1],'s-',color=col,mfc='None',alpha=0.75, markersize=3)\n\n xih = np.loadtxt(dpath + \"xih1mass_{:06.4f}.txt\".format(aa))\n ax[0].plot(xih[:,0],xih[:,1],'o--',color=col,mfc=mfc,alpha=0.75, markersize=3)\n\n\n # and the inferred biases.\n ba = np.sqrt(ius(xih[:,0],xih[:,1])(xim[:,0])/xim[:,1])\n bx = ius(xix[:,0], xix[:,1])(xim[:,0])/xim[:,1] \n #ba = np.sqrt(xih[:,1]/xim[:,1])\n #bx = xix[:,1]/xim[:,1]\n xx = [i.mean() for i in np.array_split(xim[:,0], np.arange(2, 28, 2))]\n ba = [i.mean() for i in np.array_split(ba, np.arange(2, 28, 2))]\n bx = [i.mean() for i in np.array_split(bx, np.arange(2, 28, 2))]\n \n ax[1].plot(xx,bx,'s-' ,color=col,mfc='None',alpha=0.75, markersize=3)\n ax[1].plot(xx,ba,'o--',color=col,mfc=mfc,alpha=0.75, markersize=3)\n # put on a line for Sigma -- labels make this too crowded.\n pk = np.loadtxt(\"../../data/pklin_{:6.4f}.txt\".format(aa))\n Sig = np.sqrt(np.trapz(pk[:,1],x=pk[:,0])/6./np.pi**2)\n ax[0].plot([Sig,Sig],[1e-10,1e10],':',color='darkgrey')\n # Tidy up the plot.\n ax[0].set_ylim(0.008,13.)\n ax[0].set_yscale('log')\n #\n ax[1].set_ylim(1.0,5.0)\n\n ax[0].legend(prop=fontmanage)\n # Put on some more labels.\n ax[0].set_ylabel(r'$\\xi(r)$', fontdict=font)\n ax[1].set_ylabel(r'Bias', fontdict=font)\n for axis in ax:\n axis.set_xlabel(r'$r\\quad [h^{-1}\\,{\\rm Mpc}]$', fontdict=font)\n axis.set_xlim(0.7,30.0)\n axis.set_xscale('log')\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n\n # and finish up.\n plt.tight_layout()\n plt.savefig(figpath + 'HI_xib_%s.pdf'%model)\n #\n\n\n\n\nif __name__==\"__main__\":\n make_xib_plot()\n #\n"
] | [
[
"numpy.logspace",
"numpy.arange",
"numpy.isnan"
],
[
"matplotlib.pyplot.tight_layout",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.arange",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.trapz"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jveverka/data-lab | [
"c2a43fd2c34520a9d490f29feff3035bdc70c0d6"
] | [
"ml-services/od-yolov3-tf2/yolov3_tf2/utils.py"
] | [
"from absl import logging\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\nYOLOV3_LAYER_LIST = [\n 'yolo_darknet',\n 'yolo_conv_0',\n 'yolo_output_0',\n 'yolo_conv_1',\n 'yolo_output_1',\n 'yolo_conv_2',\n 'yolo_output_2',\n]\n\nYOLOV3_TINY_LAYER_LIST = [\n 'yolo_darknet',\n 'yolo_conv_0',\n 'yolo_output_0',\n 'yolo_conv_1',\n 'yolo_output_1',\n]\n\n\ndef load_darknet_weights(model, weights_file, tiny=False):\n wf = open(weights_file, 'rb')\n major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)\n\n if tiny:\n layers = YOLOV3_TINY_LAYER_LIST\n else:\n layers = YOLOV3_LAYER_LIST\n\n for layer_name in layers:\n sub_model = model.get_layer(layer_name)\n for i, layer in enumerate(sub_model.layers):\n if not layer.name.startswith('conv2d'):\n continue\n batch_norm = None\n if i + 1 < len(sub_model.layers) and \\\n sub_model.layers[i + 1].name.startswith('batch_norm'):\n batch_norm = sub_model.layers[i + 1]\n\n logging.info(\"{}/{} {}\".format(\n sub_model.name, layer.name, 'bn' if batch_norm else 'bias'))\n\n filters = layer.filters\n size = layer.kernel_size[0]\n in_dim = layer.input_shape[-1]\n\n if batch_norm is None:\n conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)\n else:\n # darknet [beta, gamma, mean, variance]\n bn_weights = np.fromfile(\n wf, dtype=np.float32, count=4 * filters)\n # tf [gamma, beta, mean, variance]\n bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]\n\n # darknet shape (out_dim, in_dim, height, width)\n conv_shape = (filters, in_dim, size, size)\n conv_weights = np.fromfile(\n wf, dtype=np.float32, count=np.product(conv_shape))\n # tf shape (height, width, in_dim, out_dim)\n conv_weights = conv_weights.reshape(\n conv_shape).transpose([2, 3, 1, 0])\n\n if batch_norm is None:\n layer.set_weights([conv_weights, conv_bias])\n else:\n layer.set_weights([conv_weights])\n batch_norm.set_weights(bn_weights)\n\n assert len(wf.read()) == 0, 'failed to read all data'\n wf.close()\n\n\ndef broadcast_iou(box_1, box_2):\n # box_1: (..., (x1, y1, x2, y2))\n # box_2: (N, (x1, y1, x2, y2))\n\n # broadcast boxes\n box_1 = tf.expand_dims(box_1, -2)\n box_2 = tf.expand_dims(box_2, 0)\n # new_shape: (..., N, (x1, y1, x2, y2))\n new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))\n box_1 = tf.broadcast_to(box_1, new_shape)\n box_2 = tf.broadcast_to(box_2, new_shape)\n\n int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -\n tf.maximum(box_1[..., 0], box_2[..., 0]), 0)\n int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -\n tf.maximum(box_1[..., 1], box_2[..., 1]), 0)\n int_area = int_w * int_h\n box_1_area = (box_1[..., 2] - box_1[..., 0]) * \\\n (box_1[..., 3] - box_1[..., 1])\n box_2_area = (box_2[..., 2] - box_2[..., 0]) * \\\n (box_2[..., 3] - box_2[..., 1])\n return int_area / (box_1_area + box_2_area - int_area)\n\n\ndef draw_outputs(img, outputs, class_names):\n boxes, objectness, classes, nums = outputs\n boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]\n wh = np.flip(img.shape[0:2])\n for i in range(nums):\n x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))\n x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))\n img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)\n img = cv2.putText(img, '{} {:.4f}'.format(\n class_names[int(classes[i])], objectness[i]),\n x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)\n return img\n\n\ndef draw_labels(x, y, class_names):\n img = x.numpy()\n boxes, classes = tf.split(y, (4, 1), axis=-1)\n classes = classes[..., 0]\n wh = np.flip(img.shape[0:2])\n for i in range(len(boxes)):\n x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))\n x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))\n img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)\n img = cv2.putText(img, class_names[classes[i]],\n x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,\n 1, (0, 0, 255), 2)\n return img\n\n\ndef freeze_all(model, frozen=True):\n model.trainable = not frozen\n if isinstance(model, tf.keras.Model):\n for l in model.layers:\n freeze_all(l, frozen)"
] | [
[
"numpy.fromfile",
"numpy.product",
"tensorflow.shape",
"tensorflow.broadcast_to",
"tensorflow.maximum",
"tensorflow.minimum",
"tensorflow.expand_dims",
"tensorflow.split",
"numpy.array",
"numpy.flip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
}
] |
instance01/qubo-nn | [
"6f8058565f4b6ab4a8300501fc2f67cdaeed482f",
"6f8058565f4b6ab4a8300501fc2f67cdaeed482f",
"6f8058565f4b6ab4a8300501fc2f67cdaeed482f",
"6f8058565f4b6ab4a8300501fc2f67cdaeed482f"
] | [
"qubo_nn/plots/gen_tsne_gen4.py",
"qubo_nn/problems/knapsack_integer_weights.py",
"qubo_nn/plots/plot_tot_mc.py",
"qubo_nn/filling_level_diag.py"
] | [
"import pickle\nimport numpy as np\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nfrom qubo_nn.data import LMDBDataLoader\nfrom qubo_nn.config import Config\n\n\ncfg_id = '27_gen4'\ncfg = Config('../').get_cfg(cfg_id)\ncfg[\"use_big\"] = False\nlmdb_loader = LMDBDataLoader(cfg, reverse=False, base_path='../')\n\nX = []\ny = []\nfor i, data in enumerate(lmdb_loader.train_data_loader):\n if i > 43: # 44 batches á 500 = 22k (from total of 440k), so 5%\n break\n X.extend(data[0].tolist())\n y.extend(data[1].tolist())\n\nX = np.array(X)\nX = X.reshape(-1, 64**2)\nprint(X.shape)\n\nfor i in [10, 20, 30, 50, 70, 100, 200, 500, 1000]:\n tsne = TSNE(\n n_jobs=10,\n n_iter=5000,\n perplexity=i,\n # perplexity=500., # Best.\n verbose=1\n )\n Y = tsne.fit_transform(X)\n\n with open('tsne_gen4_data%d.pickle' % i, 'wb+') as f:\n pickle.dump((Y, y), f)\n",
"import numpy as np\nfrom qubo_nn.problems.problem import Problem\n\n\nclass KnapsackIntegerWeights(Problem):\n def __init__(self, cfg, w, c, W, A=10, B=1):\n self.w = w\n self.c = c\n self.W = W\n self.A = A\n self.B = B\n\n def gen_qubo_matrix(self):\n N = self.w.shape[0]\n Q = np.zeros((N + self.W, N + self.W))\n\n # First term.\n for i in range(self.W):\n Q[N + i][N + i] -= 2 * self.A\n for j in range(self.W):\n Q[N + i][N + j] += .5 * self.A\n Q[N + j][N + i] += .5 * self.A\n\n # Second term.\n for i in range(self.W):\n for j in range(self.W):\n Q[N + i][N + j] += .5 * (i+1) * (j+1) * self.A\n Q[N + j][N + i] += .5 * (i+1) * (j+1) * self.A\n\n for i in range(self.W):\n for j in range(N):\n Q[N + i][j] -= (i+1) * self.w[j] * self.A\n Q[j][N + i] -= (i+1) * self.w[j] * self.A\n\n for i in range(N):\n for j in range(N):\n Q[i][j] += .5 * self.w[i] * self.w[j] * self.A\n Q[j][i] += .5 * self.w[i] * self.w[j] * self.A\n\n # Last term.\n for i in range(N):\n Q[i][i] -= self.c[i] * self.B\n Q[j][i] -= self.c[i] * self.B\n\n return Q\n\n @classmethod\n def gen_problems(cls, cfg, n_problems, size=(20, 25), **kwargs):\n high = cfg[\"problems\"][\"KIW\"].get(\"high\", 50)\n\n problems = []\n\n for _ in range(n_problems):\n w = np.random.randint(0, high, size=(size[0],))\n c = np.random.randint(0, high, size=(size[0],))\n problems.append({\"w\": w, \"c\": c, \"W\": size[1]})\n\n return problems\n\n\nif __name__ == \"__main__\":\n w = np.array([2, 5, 3])\n c = np.array([5, 2, 4])\n W = 7\n sc = KnapsackIntegerWeights(\n {\"problems\": {\"KIW\": {}}},\n w, c, W\n )\n Q = sc.gen_qubo_matrix()\n\n print(Q.tolist())\n\n for x in np.c_[tuple(i.ravel() for i in np.mgrid[:2, :2, :2, :2, :2, :2, :2, :2, :2, :2])]: # noqa\n if x @ Q @ x.T < 0:\n print(x, \"|\", x @ Q @ x.T)\n",
"import pickle\n\nimport numpy as np\nimport scipy.stats as st\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport matplotlib.colors as colors\n\n\nmpl.font_manager._rebuild()\nplt.rc('font', family='Raleway')\n\n\ndef truncate_colormap(cmapIn='jet', minval=0.0, maxval=1.0, n=100):\n cmapIn = plt.get_cmap(cmapIn)\n\n new_cmap = colors.LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmapIn.name, a=minval, b=maxval),\n cmapIn(np.linspace(minval, maxval, n)))\n\n return new_cmap\n\n\ncmap_mod = truncate_colormap('Greens', minval=.5, maxval=.99)\n\n\ndef plot_confusion(id_):\n with open('tot_misclassifications_%s.pickle' % id_, 'rb') as f:\n arr1, mc_tables = pickle.load(f)\n\n # mc_table = np.mean(mc_tables, axis=0)\n mean = np.mean(mc_tables, axis=0)\n mean = mean.reshape(11 * 11)\n var = st.sem(mc_tables, axis=0)\n var = var.reshape(11 * 11)\n\n ci = st.t.interval(\n 0.95,\n len(mc_tables) - 1,\n loc=mean,\n scale=var\n )\n\n mean = mean.reshape((11, 11))\n ci = mean - ci[0].reshape((11, 11))\n\n fig, ax = plt.subplots(figsize=(7, 7))\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n for i in range(mean.shape[0]):\n for j in range(mean.shape[0]):\n if np.isnan(ci[i][j]):\n ci[i][j] = 0.\n ax.text(\n j,\n i,\n '%.02f \\n± %.02f' % (mean[i][j], ci[i][j]),\n ha=\"center\",\n va=\"center\",\n color=\"w\"\n )\n\n problems = [\"NP\", \"MC\", \"MVC\", \"SP\", \"M2SAT\", \"SPP\", \"GC\", \"QA\", \"QK\", \"M3SAT\", \"TSP\"] # noqa\n plt.xticks(list(range(len(problems))), problems)\n plt.yticks(list(range(len(problems))), problems)\n\n plt.tight_layout()\n ax.imshow(mean, cmap=cmap_mod, vmin=0, vmax=1)\n plt.savefig('confusion_%s.png' % id_)\n plt.savefig('confusion_%s.pdf' % id_)\n\n\ndef plot(id_):\n with open('tot_misclassifications_%s.pickle' % id_, 'rb') as f:\n arr1, confusion_matrix = pickle.load(f)\n\n fig, ax = plt.subplots(figsize=(3.5, 3))\n\n def sub_plot(arr, col):\n mean = np.mean(arr, axis=0)\n x = np.arange(mean.shape[0])\n\n ci = st.t.interval(\n 0.95,\n len(arr) - 1,\n loc=np.mean(arr, axis=0),\n scale=st.sem(arr, axis=0)\n )\n\n ax.plot(x, mean, color=col)\n ax.fill_between(x, ci[0], ci[1], color=col, alpha=.1)\n\n print(mean[-1], \"+-\", mean[-1] - ci[0][-1])\n\n sub_plot(arr1, 'c')\n plt.ylabel(\"Misclassification rate\")\n plt.xlabel(\"Epoch\")\n plt.tight_layout()\n plt.show()\n fig.savefig('tot_mc_%s.png' % id_)\n fig.savefig('tot_mc_%s.pdf' % id_)\n\n\ndef run():\n plot_confusion('27_scramble_100k')\n plot_confusion('27_noscramble_100k')\n plot('27_scramble_100k')\n plot('27_noscramble_100k')\n # plot('18_lr2_leaky')\n # plot('23')\n\n\nif __name__ == '__main__':\n run()\n",
"import pyxis as px\nimport numpy as np\n\nfrom qubo_nn.config import Config\nfrom qubo_nn.data import LMDBDataLoader\n\n\nproblems_short = [\"np\", \"mc\", \"mvc\", \"sp\", \"m2sat\", \"spp\", \"gc\", \"qa\", \"qk\", \"m3sat\", \"tsp\", \"gi\", \"sgi\", \"mcq\"]\nQUBO_SIZE = 64\n\n\nfor problem in problems_short:\n cfg = Config().get_cfg('red_%s_1' % problem)\n cfg[\"use_big\"] = False\n lmdb_loader = LMDBDataLoader(cfg)\n loader = lmdb_loader.train_data_loader\n data = list(loader)\n is_diag_same = True\n for batch in data:\n batch = batch[0]\n last_diag = None\n for qubo in batch:\n if last_diag is not None and not np.allclose(np.diag(qubo), last_diag):\n is_diag_same = False\n last_diag = np.diag(qubo)\n # print(total)\n # print(zeros)\n print(problem, is_diag_same)\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"matplotlib.font_manager._rebuild",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.isnan",
"numpy.arange",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"scipy.stats.sem",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.diag"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OSADP/TCA | [
"25bc1c1db00393cc6b8c6764610bf381494dfcb9",
"25bc1c1db00393cc6b8c6764610bf381494dfcb9"
] | [
"old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py",
"old_versions/TCA_2_2/TCA_V_2_2_1/code/TCAFileReader.py"
] | [
"#standard\nimport unittest\nimport math\n# from collections import OrderedDict\nfrom random import uniform\n\n#external\nimport pandas as pd\nfrom scipy.spatial import KDTree\n\n\n\ndef Find_RSE_range(df, RSEs, minrange):\n\n sub_df = df[['vehicle_ID', 'location_x', 'location_y']]\n\n\n tree = KDTree(sub_df[['location_x', 'location_y']].values)\n rse_points = list(RSEs.RSEListLocations.values())\n locs_index = tree.query_ball_point(rse_points, r=minrange)\n\n #link RSE back to vehicles\n rse_vehicles = {}\n for c, RSE in enumerate(RSEs.RSEListLocations.keys()):\n if len(locs_index[c]) > 0:\n vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist()\n rse_vehicles[RSE] = vlist\n else:\n rse_vehicles[RSE] = []\n\n return rse_vehicles\n\n\nclass BufferContentCheck(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_whole(self):\n minrange = 4.00\n num_vehicles = 10000\n num_RSE = 30\n\n # Vehicles_loc = {x:(uniform(0, 200), uniform(0, 200)) for x in range(num_vehicles)}\n # df = pd.DataFrame({\n # 'Vid' : ['V' + str(x) for x in Vehicles_loc.keys()],\n # 'x' : [Vehicles_loc[x][0] for x in Vehicles_loc],\n # 'y' : [Vehicles_loc[x][1] for x in Vehicles_loc],\n # })\n # df = df.set_index(['Vid'], drop=False)\n\n # RSEs = OrderedDict({'RSE' + str(x):(uniform(0, 200), uniform(0, 200)) for x in range(num_RSE)})\n\n # rse_info = Find_RSE_range(df, RSEs, minrange)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n",
"#standard\nimport os\nimport random as rnd\nimport sys\nimport tempfile as tmpfile\nimport unittest\nimport logging\n\n#external\nimport pandas as pd\n\n#tca\nfrom TCACore import logger\n\nclass Trajectories(object):\n \"\"\"Core class for reading vehicles trajectories\"\"\"\n\n def __init__(self, CHUNK_SIZE = 20000000 ):\n\n self.CHUNK_SIZE = CHUNK_SIZE\n self.temp_files = []\n self.rnd = rnd.Random()\n self.rnd.seed(3)\n\n self.equip_PDM = []\n self.equip_BSM = []\n self.equip_DualPDMBSM = []\n\n self.PDM_DSRC = []\n self.PDM_Cellular = []\n self.PDM_DualComm = []\n\n self.BSM_DSRC = []\n self.BSM_Cellular = []\n self.BSM_DualComm = []\n\n self.PDMBSM_DSRC = []\n self.PDMBSM_Cellular = []\n self.PDMBSM_DualComm = []\n\n self.DSRC_list = []\n self.cellular_list = []\n self.dualcomm_list = []\n\n\n def __del__(self):\n for tmp in self.temp_files:\n os.remove(tmp)\n\n def open_trajectory_file(self, vissim_file, filename, skip_lines = 1, ):\n\n if not vissim_file:\n try:\n return pd.read_csv(filename,\n iterator=True,\n chunksize=self.CHUNK_SIZE,\n skipinitialspace=True,\n index_col=False,\n )\n except:\n logger.error('Error: reading csv file. Please check the format of the file')\n raise\n\n else:\n try:\n return pd.read_csv(filename,\n iterator=True,\n chunksize=self.CHUNK_SIZE,\n sep=';',\n skipinitialspace=True,\n header=1,\n index_col=False,\n skiprows = skip_lines -1 )\n except:\n logger.error('Error: VISSIM fzp file does not have all required fields of: VehNr, t, WorldX, WorldY, and v')\n raise\n\n\n#---------------------------------------------------------------\n def load(self, CF):\n\n self.vissim_file = False\n self.include_accel = False\n\n if CF.Control['FileType'].lower() == 'vissim' or CF.Control['FileType'].lower() == 'fzp':\n self.vissim_file = True\n\n if self.vissim_file:\n line_skip = 0\n CF.Control['AccelColumn'] = True\n with open(CF.Control['TrajectoryFileName']) as in_f:\n line = in_f.readline()\n while 'VehNr;' not in line:\n line = in_f.readline()\n line_skip += 1\n\n if CF.Control['OutputLevel'] >=1:\n logger.info('Loading %s' % (CF.Control['TrajectoryFileName'].split('/')[-1]))\n\n #Determine Market Penetration\n if (CF.Control['PDMMarketPenetration'] != None) or (CF.Control['BSMMarketPenetration'] != None) or \\\n (CF.Control['DualPDMBSMMarketPenetration'] != None):\n\n if not self.vissim_file:\n _infile = self.open_trajectory_file( self.vissim_file, CF.Control['TrajectoryFileName'])\n else:\n _infile = self.open_trajectory_file( self.vissim_file, CF.Control['TrajectoryFileName'],\n skip_lines = line_skip )\n IDs = []\n\n #Determine Equipped Vehicles based on MarkPenetration\n file_num = 0\n for chunk in _infile:\n file_num += 1\n if self.vissim_file:\n IDs = list(set(IDs + list(chunk['VehNr'].unique())))\n else:\n IDs = list(set(IDs + list(chunk[CF.Control['IDColumn']].unique())))\n\n # print 'Temp Files # ', str(file_num)\n\n if CF.Control['OutputLevel'] >=1:\n logger.info('Read in %s vehicle IDs' % (str(len(IDs))))\n\n if CF.Control['DualPDMBSMMarketPenetration'] != None:\n num_Dual_PDMBSM = int(round(len(IDs) * (CF.Control['DualPDMBSMMarketPenetration'] / 100.0)))\n self.equip_DualPDMBSM = self.rnd.sample(IDs, num_Dual_PDMBSM)\n\n Sub_id_list = [ID for ID in self.equip_DualPDMBSM]\n if CF.Control['PDMBSMDSRCMarketPenetration'] != None:\n num_PDMBSM_DSRC = int(round(len(self.equip_DualPDMBSM) * (CF.Control['PDMBSMDSRCMarketPenetration'] / 100.00)))\n self.PDMBSM_DSRC = self.rnd.sample(Sub_id_list, num_PDMBSM_DSRC)\n Sub_id_list = [ID for ID in self.equip_DualPDMBSM if ID not in self.PDMBSM_DSRC]\n\n if CF.Control['PDMBSMCellularMarketPenetration'] != None:\n num_PDMBSM_Cellular = int(round(len(self.equip_DualPDMBSM) * (CF.Control['PDMBSMCellularMarketPenetration'] / 100.00)))\n if len(Sub_id_list) < num_PDMBSM_Cellular:\n num_PDMBSM_Cellular = len(Sub_id_list)\n self.PDMBSM_Cellular = self.rnd.sample(Sub_id_list, num_PDMBSM_Cellular)\n Sub_id_list = [ID for ID in self.equip_DualPDMBSM if ID not in self.PDMBSM_Cellular and ID not in self.PDMBSM_DSRC]\n\n if CF.Control['PDMBSMDualCommMarketPenetration'] != None:\n num_PDMBSM_DualComm = int(round(len(self.equip_DualPDMBSM) * (CF.Control['PDMBSMDualCommMarketPenetration'] / 100.00)))\n if len(Sub_id_list) < num_PDMBSM_DualComm:\n num_PDMBSM_DualComm = len(Sub_id_list)\n self.PDMBSM_DualComm = self.rnd.sample(Sub_id_list, num_PDMBSM_DualComm)\n\n if CF.Control['PDMMarketPenetration'] != None:\n num_PDM = int(round(len(IDs) * (CF.Control['PDMMarketPenetration'] / 100.0)))\n if len(self.equip_DualPDMBSM) > 0:\n Sub_id_list = [ID for ID in IDs if ID not in self.equip_DualPDMBSM]\n if len(Sub_id_list) < num_PDM:\n num_PDM = len(Sub_id_list)\n self.equip_PDM = self.rnd.sample(Sub_id_list, num_PDM)\n else:\n if len(IDs) < num_PDM:\n num_PDM = len(IDs)\n self.equip_PDM = self.rnd.sample(IDs, num_PDM)\n\n Sub_id_list = [ID for ID in self.equip_PDM]\n if CF.Control['PDMDSRCMarketPenetration'] != None:\n num_PDM_DSRC = int(round(len(self.equip_PDM) * (CF.Control['PDMDSRCMarketPenetration'] / 100.00)))\n self.PDM_DSRC = self.rnd.sample(Sub_id_list, num_PDM_DSRC)\n Sub_id_list = [ID for ID in self.equip_PDM if ID not in self.PDM_DSRC]\n\n if CF.Control['PDMCellularMarketPenetration'] != None:\n num_PDM_Cellular = int(round(len(self.equip_PDM) * (CF.Control['PDMCellularMarketPenetration'] / 100.00)))\n if len(Sub_id_list) < num_PDM_Cellular:\n num_PDM_Cellular = len(Sub_id_list)\n self.PDM_Cellular = self.rnd.sample(Sub_id_list, num_PDM_Cellular)\n Sub_id_list = [ID for ID in self.equip_PDM if ID not in self.PDM_Cellular and ID not in self.PDM_DSRC]\n\n if CF.Control['PDMDualCommMarketPenetration'] != None:\n num_PDM_DualComm = int(round(len(self.equip_PDM) * (CF.Control['PDMDualCommMarketPenetration'] / 100.00)))\n if len(Sub_id_list) < num_PDM_DualComm:\n num_PDM_DualComm = len(Sub_id_list)\n self.PDM_DualComm = self.rnd.sample(Sub_id_list, num_PDM_DualComm)\n\n if CF.Control['BSMMarketPenetration'] != None:\n num_BSM = int(round(len(IDs) * (CF.Control['BSMMarketPenetration'] / 100.0)))\n if len(self.equip_PDM) > 0 or len(self.equip_DualPDMBSM) > 0:\n Sub_id_list = [ID for ID in IDs if ID not in self.equip_PDM and ID not in self.equip_DualPDMBSM]\n if len(Sub_id_list) < num_BSM:\n num_BSM = len(Sub_id_list)\n self.equip_BSM = self.rnd.sample(Sub_id_list, num_BSM)\n else:\n if len(IDs) < num_BSM:\n num_BSM = len(IDs)\n self.equip_BSM = self.rnd.sample(IDs, num_BSM)\n Sub_id_list = [ID for ID in self.equip_BSM]\n if CF.Control['BSMDSRCMarketPenetration'] != None:\n num_BSM_DSRC = int(round(len(self.equip_BSM) * (CF.Control['BSMDSRCMarketPenetration'] / 100.00)))\n self.BSM_DSRC = self.rnd.sample(Sub_id_list, num_BSM_DSRC)\n Sub_id_list = [ID for ID in self.equip_BSM if ID not in self.BSM_DSRC]\n\n if CF.Control['BSMCellularMarketPenetration'] != None:\n num_BSM_Cellular = int(round(len(self.equip_BSM) * (CF.Control['BSMCellularMarketPenetration'] / 100.00)))\n if len(Sub_id_list) < num_BSM_Cellular:\n num_BSM_Cellular = len(Sub_id_list)\n self.BSM_Cellular = self.rnd.sample(Sub_id_list, num_BSM_Cellular)\n Sub_id_list = [ID for ID in self.equip_BSM if ID not in self.BSM_Cellular and ID not in self.BSM_DSRC]\n\n if CF.Control['BSMDualCommMarketPenetration'] != None:\n num_BSM_DualComm = int(round(len(self.equip_BSM) * (CF.Control['BSMDualCommMarketPenetration'] / 100.00)))\n if len(Sub_id_list) < num_BSM_DualComm:\n num_BSM_DualComm = len(Sub_id_list)\n self.BSM_DualComm = self.rnd.sample(Sub_id_list, num_BSM_DualComm)\n self.equip_vehicles = self.equip_PDM + self.equip_BSM + self.equip_DualPDMBSM\n\n elif (len(CF.Control['PDMVehicleIDs']) > 0) or (len(CF.Control['BSMVehicleIDs']) > 0) or (len(CF.Control['DualPDMBSMVehicleIDs']) > 0):\n\n if (len(CF.Control['DualPDMBSMVehicleIDs']) > 0):\n self.equip_DualPDMBSM = CF.Control['DualPDMBSMVehicleIDs']\n if (len(CF.Control['PDMBSMDSRCVehicleIDs']) > 0):\n self.PDMBSM_DSRC = CF.Control['PDMBSMDSRCVehicleIDs']\n if (len(CF.Control['PDMBSMCellularVehicleIDs']) > 0):\n self.PDMBSM_Cellular = CF.Control['PDMBSMCellularVehicleIDs']\n if (len(CF.Control['PDMBSMDualCommVehicleIDs']) > 0):\n self.PDMBSM_DualComm = CF.Control['PDMBSMDualCommVehicleIDs']\n\n if (len(CF.Control['PDMVehicleIDs']) > 0):\n self.equip_PDM = CF.Control['PDMVehicleIDs']\n if (len(CF.Control['PDMDSRCVehicleIDs']) > 0):\n self.PDM_DSRC = CF.Control['PDMDSRCVehicleIDs']\n if (len(CF.Control['PDMCellularVehicleIDs']) > 0):\n self.PDM_Cellular = CF.Control['PDMCellularVehicleIDs']\n if (len(CF.Control['PDMDualCommVehicleIDs']) > 0):\n self.PDM_DualComm = CF.Control['PDMDualCommVehicleIDs']\n\n if (len(CF.Control['BSMVehicleIDs']) > 0):\n self.equip_BSM = CF.Control['BSMVehicleIDs']\n if (len(CF.Control['BSMDSRCVehicleIDs']) > 0):\n self.BSM_DSRC = CF.Control['BSMDSRCVehicleIDs']\n if (len(CF.Control['BSMCellularVehicleIDs']) > 0):\n self.BSM_Cellular = CF.Control['BSMCellularVehicleIDs']\n if (len(CF.Control['BSMDualCommVehicleIDs']) > 0):\n self.BSM_DualComm = CF.Control['BSMDualCommVehicleIDs']\n\n self.equip_vehicles = self.equip_PDM + self.equip_BSM + self.equip_DualPDMBSM\n\n #TODO see if there is a way to reset the iterator\n if not self.vissim_file:\n _infile = self.open_trajectory_file( self.vissim_file, CF.Control['TrajectoryFileName'])\n else:\n _infile = self.open_trajectory_file( self.vissim_file, CF.Control['TrajectoryFileName'],\n skip_lines = line_skip )\n\n if CF.Control['AccelColumn'] != None:\n self.include_accel = True\n\n _lastbit = None\n self.total_len = 0\n for c, chunk in enumerate(_infile):\n\n if _lastbit is not None:\n chunk = pd.concat([chunk, _lastbit])\n\n if self.vissim_file:\n if (len(CF.Control['PDMVehicleTypes']) > 0) or \\\n (len(CF.Control['BSMVehicleTypes']) > 0) or \\\n (len(CF.Control['DualPDMBSMVehicleTypes']) > 0):\n try:\n chunk = chunk[['VehNr', 't', 'WorldX', 'WorldY', 'v', 'a', 'Type']]\n except:\n print('Error missing one of VISSIM key fields: VehNr, t, WorldX, WorldY, v, a, Type')\n sys.exit(None)\n chunk = chunk.rename(columns={'VehNr': 'vehicle_ID', 'v': 'speed', 't': 'time', 'WorldX': 'location_x',\n 'WorldY': 'location_y', 'a': 'accel_instantaneous', 'Type': 'vehicle_type'})\n else:\n try:\n chunk = chunk[['VehNr', 't', 'WorldX', 'WorldY', 'v', 'a']]\n except:\n print('Error missing one of VISSIM key fields: VehNr, t, WorldX, WorldY, v, a')\n sys.exit(None)\n chunk = chunk.rename(columns={'VehNr': 'vehicle_ID', 'v': 'speed', 't': 'time', 'WorldX': 'location_x',\n 'WorldY': 'location_y', 'a': 'accel_instantaneous',})\n else:\n if (len(CF.Control['PDMVehicleTypes']) > 0) or \\\n (len(CF.Control['BSMVehicleTypes']) > 0) or \\\n (len(CF.Control['DualPDMBSMVehicleTypes']) > 0):\n\n if not self.include_accel:\n chunk = chunk[[CF.Control['IDColumn'], CF.Control['TimeColumn'], CF.Control['XColumn'],\n CF.Control['YColumn'], CF.Control['SpdColumn'], CF.Control['TypeColumn']]]\n chunk = chunk.rename(columns={\n CF.Control['IDColumn']: 'vehicle_ID',\n CF.Control['SpdColumn']: 'speed',\n CF.Control['TimeColumn']: 'time',\n CF.Control['XColumn']: 'location_x',\n CF.Control['YColumn']: 'location_y',\n CF.Control['TypeColumn']: 'vehicle_type'\n })\n\n else:\n\n chunk = chunk[[CF.Control['IDColumn'], CF.Control['TimeColumn'], CF.Control['XColumn'],\n CF.Control['YColumn'], CF.Control['SpdColumn'], CF.Control['TypeColumn'],\n CF.Control['AccelColumn'] ]]\n\n chunk = chunk.rename(columns={\n CF.Control['IDColumn']: 'vehicle_ID',\n CF.Control['SpdColumn']: 'speed',\n CF.Control['TimeColumn']: 'time',\n CF.Control['XColumn']: 'location_x',\n CF.Control['YColumn']: 'location_y',\n CF.Control['TypeColumn']: 'vehicle_type',\n CF.Control['AccelColumn']: 'accel_instantaneous',\n })\n\n\n else:\n\n if not self.include_accel:\n\n chunk = chunk[[CF.Control['IDColumn'], CF.Control['TimeColumn'], CF.Control['XColumn'],\n CF.Control['YColumn'], CF.Control['SpdColumn']]]\n chunk = chunk.rename(columns={\n CF.Control['IDColumn']: 'vehicle_ID',\n CF.Control['SpdColumn']: 'speed',\n CF.Control['TimeColumn']: 'time',\n CF.Control['XColumn']: 'location_x',\n CF.Control['YColumn']: 'location_y'\n })\n else:\n chunk = chunk[[CF.Control['IDColumn'], CF.Control['TimeColumn'], CF.Control['XColumn'],\n CF.Control['YColumn'], CF.Control['SpdColumn'], CF.Control['AccelColumn']]]\n chunk = chunk.rename(columns={\n CF.Control['IDColumn']: 'vehicle_ID',\n CF.Control['SpdColumn']: 'speed',\n CF.Control['TimeColumn']: 'time',\n CF.Control['XColumn']: 'location_x',\n CF.Control['YColumn']: 'location_y',\n CF.Control['AccelColumn']: 'accel_instantaneous',\n })\n\n if (len(CF.Control['BSMVehicleTypes']) == 0) and (len(CF.Control['BSMVehicleIDs']) == 0) and \\\n (CF.Control['BSMMarketPenetration'] == None) and (CF.Control['DualPDMBSMMarketPenetration'] == None) \\\n and (len(CF.Control['DualPDMBSMVehicleTypes']) == 0) and (len(CF.Control['DualPDMBSMVehicleIDs']) == 0):\n chunk = chunk[(chunk['time'] % 1 == 0)] #remove 1/10 Second values\n\n #Determine Equipped vehicles based on Vehicle Type\n if (len(CF.Control['PDMVehicleTypes']) > 0) or (len(CF.Control['BSMVehicleTypes']) > 0) or (len(CF.Control['DualPDMBSMVehicleTypes']) > 0):\n\n if (len(CF.Control['DualPDMBSMVehicleTypes']) > 0):\n chunk_dualpdmbsm = chunk[chunk['vehicle_type'].isin(CF.Control['DualPDMBSMVehicleTypes'])]\n self.equip_DualPDMBSM = list(set(self.equip_DualPDMBSM + list(set(chunk_dualpdmbsm['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMBSMDSRCVehicleTypes']) > 0):\n chunk_pdmbsm_dsrc = chunk_dualpdmbsm[chunk_dualpdmbsm['vehicle_type'].isin(CF.Control['PDMBSMDSRCVehicleTypes'])]\n self.PDMBSM_DSRC = list(set(self.PDMBSM_DSRC + list(set(chunk_pdmbsm_dsrc['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMBSMCellularVehicleTypes']) > 0):\n chunk_pdmbsm_cellular = chunk_dualpdmbsm[chunk_dualpdmbsm['vehicle_type'].isin(CF.Control['PDMBSMCellularVehicleTypes'])]\n self.PDMBSM_Cellular = list(set(self.PDMBSM_Cellular + list(set(chunk_pdmbsm_cellular['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMBSMDualCommVehicleTypes']) > 0):\n chunk_pdmbsm_dualcomm = chunk_dualpdmbsm[chunk_dualpdmbsm['vehicle_type'].isin(CF.Control['PDMBSMDualCommVehicleTypes'])]\n self.PDMBSM_DualComm = list(set(self.PDMBSM_DualComm + list(set(chunk_pdmbsm_dualcomm['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMVehicleTypes']) > 0):\n chunk_pdm = chunk[chunk['vehicle_type'].isin(CF.Control['PDMVehicleTypes'])]\n self.equip_PDM = list(set(self.equip_PDM + list(set(chunk_pdm['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMDSRCVehicleTypes']) > 0):\n chunk_pdm_dsrc = chunk_pdm[chunk_pdm['vehicle_type'].isin(CF.Control['PDMDSRCVehicleTypes'])]\n self.PDM_DSRC = list(set(self.PDM_DSRC + list(set(chunk_pdm_dsrc['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMCellularVehicleTypes']) > 0):\n chunk_pdm_cellular = chunk_pdm[chunk_pdm['vehicle_type'].isin(CF.Control['PDMCellularVehicleTypes'])]\n self.PDM_Cellular = list(set(self.PDM_Cellular + list(set(chunk_pdm_cellular['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['PDMDualCommVehicleTypes']) > 0):\n chunk_pdm_dualcomm = chunk_pdm[chunk_pdm['vehicle_type'].isin(CF.Control['PDMDualCommVehicleTypes'])]\n self.PDM_DualComm = list(set(self.PDM_DualComm + list(set(chunk_pdm_dualcomm['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['BSMVehicleTypes']) > 0):\n chunk_bsm = chunk[chunk['vehicle_type'].isin(CF.Control['BSMVehicleTypes'])]\n self.equip_BSM = list(set(self.equip_BSM + list(set(chunk_bsm['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['BSMDSRCVehicleTypes']) > 0):\n chunk_bsm_dsrc = chunk_bsm[chunk_bsm['vehicle_type'].isin(CF.Control['BSMDSRCVehicleTypes'])]\n self.BSM_DSRC = list(set(self.BSM_DSRC + list(set(chunk_bsm_dsrc['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['BSMCellularVehicleTypes']) > 0):\n chunk_bsm_cellular = chunk_bsm[chunk_bsm['vehicle_type'].isin(CF.Control['BSMCellularVehicleTypes'])]\n self.BSM_Cellular = list(set(self.BSM_Cellular + list(set(chunk_bsm_cellular['vehicle_ID'].tolist()))))\n\n if (len(CF.Control['BSMDualCommVehicleTypes']) > 0):\n chunk_bsm_dualcomm = chunk_bsm[chunk_bsm['vehicle_type'].isin(CF.Control['BSMDualCommVehicleTypes'])]\n self.BSM_DualComm = list(set(self.BSM_DualComm + list(set(chunk_bsm_dualcomm['vehicle_ID'].tolist()))))\n\n self.equip_vehicles = self.equip_PDM + self.equip_BSM + self.equip_DualPDMBSM\n\n chunk = chunk[chunk['vehicle_ID'].isin(self.equip_vehicles)]\n\n\n if len(chunk) > 0:\n chunk = chunk.sort('time')\n\n #If the data needs to be broken into multiple files\n if os.path.getsize(CF.Control['TrajectoryFileName']) > self.CHUNK_SIZE:\n _last_tp = chunk.tail(1)['time'].values[0]\n _lastbit = chunk[ chunk['time'] == _last_tp ]\n chunk = chunk[ chunk['time'] != _last_tp ]\n\n _tmp_file = tmpfile.NamedTemporaryFile(delete=False)\n chunk.to_pickle(_tmp_file.name)\n self.temp_files.append(_tmp_file.name)\n\n self.total_len += len(chunk)\n\n\n self.equip_vehicles = sorted(self.equip_vehicles)\n\n self.DSRC_list = self.PDM_DSRC + self.BSM_DSRC + self.PDMBSM_DSRC\n self.cellular_list = self.PDM_Cellular + self.BSM_Cellular + self.PDMBSM_Cellular\n self.dualcomm_list = self.PDM_DualComm + self.BSM_DualComm + self.PDMBSM_DualComm\n\n if self.total_len == 0:\n print ('Error Vehicle IDs never found or trajectory file has no data')\n sys.exit(0)\n\n if CF.Control['OutputLevel'] > 0:\n logger.info(\"%s number of lines loaded\" % (str(self.total_len)))\n\n if CF.Control['OutputLevel'] >= 1:\n logger.info ('Total number of equipped vehicle = %d' % (len(self.equip_vehicles)))\n logger.info (\"Number of PDM vehicles transmitting via DSRC(%d), Cellular(%d), and DSRC or Cellular(%d)\"\n % (len(self.PDM_DSRC), len(self.PDM_Cellular ), len(self.PDM_DualComm)))\n logger.info (\"Number of BSM vehicles transmitting via DSRC(%d), Cellular(%d), and DSRC or Cellular(%d)\"\n % (len(self.BSM_DSRC), len(self.BSM_Cellular), len(self.BSM_DualComm)))\n logger.info (\"Number of Dual PDM-BSM vehicles transmitting DSRC(%d), Cellular(%d), and DSRC or Cellular(%d)\"\n % (len(self.PDMBSM_DSRC), len(self.PDMBSM_Cellular), len(self.PDMBSM_DualComm)))\n\n\n\n def read(self, fileType):\n for tmp_file in self.temp_files:\n df = pd.read_pickle(tmp_file)\n\n # print df.head(5)\n if fileType == 'VISSIM':\n df['location_x'] = df['location_x'] * 100 / 2.54 / 12 #Convert meters to feet\n df['location_y'] = df['location_y'] * 100 / 2.54 / 12 #Convert meters to feet\n\n\n #Remove and VehicleID that are in the same time period\n df.drop_duplicates(cols=['vehicle_ID', 'time'], take_last=True, inplace=True)\n grps = df.groupby('time')\n for tp, grp in grps:\n yield tp, grp.sort('vehicle_ID')\n\n\n\n#*************************************************************************\nclass Trajectories_Tests(unittest.TestCase):\n\n def setUp(self):\n from TCALoadControl import ControlFiles\n from numpy import arange\n import random\n\n r = random.Random()\n r.seed(1234)\n\n self.CF = ControlFiles('test.xml')\n\n self.CF.control_values['OutputLevel'][0] = 0\n\n self.CF.control_values['XColumn'][0] = 'x'\n self.CF.control_values['YColumn'][0] = 'y'\n self.CF.control_values['TimeColumn'][0] = 'time'\n self.CF.control_values['IDColumn'][0] = 'ID'\n self.CF.control_values['SpdColumn'][0] = 'spd'\n self.CF.control_values['TypeColumn'][0] = 'type'\n\n\n with open('test_csv_input_file_del_me.csv', 'wb') as fout:\n\n self.vehicles = sorted(['B856', 'C234', 'D098', 'E342', 'W908', 'P342', 'Q231', 'T932', 'P212', 'A093'])\n\n fout.write('ID,time,spd,x,y,type\\n')\n for num in arange(0, 10.5, 0.5):\n\n for c, vehicle in enumerate(self.vehicles):\n spd = r.randint(0,70) * 1.0\n x = r.random()\n y = r.random()\n\n if c==9:\n type_val = '3'\n elif c>4:\n type_val = '2'\n else:\n type_val = '1'\n\n fout.write(vehicle + ',' + str(num)+ ',' + str(spd) + ',' + str(x)[0:5] + ',' + str(y)[0:5] + ',' + type_val + '\\n')\n\n with open('test_csv_input_file_del_me_with_acc.csv', 'wb') as fout:\n\n self.vehicles = sorted(['B856', 'C234', 'D098', 'E342', 'W908', 'P342', 'Q231', 'T932', 'P212', 'A093'])\n\n a_val = range(-4, 6, 1)\n\n fout.write('ID,time,spd,x,y,type,acc\\n')\n for num in arange(0, 10.5, 0.5):\n\n for c, vehicle in enumerate(self.vehicles):\n spd = r.randint(0,70) * 1.0\n a = a_val[c] * 1.1\n x = r.random()\n y = r.random()\n\n if c==9:\n type_val = '3'\n elif c>4:\n type_val = '2'\n else:\n type_val = '1'\n\n fout.write(vehicle + ',' + str(num)+ ',' + str(spd) + ',' + str(x)[0:5] + ',' + str(y)[0:5]\n + ',' + type_val + ',' + str(a) + '\\n')\n\n\n\n with open('test_vissim_input_file_del_me.fzp', 'wb') as fout:\n fout.write(r\"\"\"Vehicle Record\n\nFile: c:\\users\\m28050\\documents\\projects\\fhwa\\tca\\v_2 vissim\\intersection.inp\nComment:\nDate: Monday, September 30, 2013 11:21:06 AM\nVISSIM: 5.40-08 [38878]\n\nVehNr : Number of the Vehicle\nv : Speed [mph] at the end of the simulation step\nType : Number of the Vehicle Type\nt : Simulation Time [s]\nWorldX : World coordinate x (vehicle front end at the end of the simulation step)\nWorldY : World coordinate y (vehicle front end at the end of the simulation step)\n\n VehNr; v; Type; t; WorldX; WorldY; a;\n 1; 32.18; 1; 54.8; -4925.8665; -2581.9891; -2.32\n 2; 32.33; 2; 55.0; -4928.7503; -2581.9898; 4.52\n 3; 32.47; 2; 55.2; -4931.6471; -2581.9906; 13.23\n 4; 32.62; 1; 55.4; -4934.5570; -2581.9913; 0.23\n 5; 32.77; 1; 55.6; -4937.4800; -2581.9920; -2.21\n 6; 32.91; 2; 55.8; -4940.4160; -2581.9928; -12.32\n 7; 33.06; 3; 56.0; -4943.3651; -2581.9935; 0.44\n \"\"\")\n\n\n\n # @unittest.skip(\"testing skipping\")\n def test_load_read_csv(self):\n\n self.CF.control_values['TrajectoryFileName'][0] = 'test_csv_input_file_del_me.csv'\n self.CF.control_values['BSMMarketPenetration'][0] = 20\n self.CF.control_values['BSMDSRCMarketPenetration'][0] = 50\n self.CF.control_values['BSMCellularMarketPenetration'][0] = 50\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n assert len(trj.equip_vehicles) == 2 # 20% of 10\n assert len(trj.equip_PDM) == 0\n assert len(trj.DSRC_list) == 1\n assert len(trj.cellular_list) == 1\n assert trj.DSRC_list[0] != trj.cellular_list[0]\n\n c = 0\n for tp, df in trj.read(False):\n c=c+1\n assert c == 21 # every 10th of a sec BSM\n\n # @unittest.skip(\"testing skipping\")\n def test_load_read_csv_with_accel(self):\n\n self.CF.control_values['TrajectoryFileName'][0] = 'test_csv_input_file_del_me_with_acc.csv'\n self.CF.control_values['AccelColumn'][0] = 'acc'\n self.CF.control_values['BSMMarketPenetration'][0] = 20\n self.CF.control_values['BSMDSRCMarketPenetration'][0] = 50\n self.CF.control_values['BSMCellularMarketPenetration'][0] = 50\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n c = 0\n for tp, df in trj.read(False):\n if c==1:\n assert df.loc[12, 'accel_instantaneous'] == -2.2\n c=c+1\n assert c == 21 # every 10th of a sec BSM\n\n\n\n\n # @unittest.skip(\"testing skipping\")\n def test_load_read_csv_by_IDs(self):\n\n self.CF.control_values['TrajectoryFileName'][0] = 'test_csv_input_file_del_me.csv'\n self.CF.control_values['PDMVehicleIDs'][0] = ['Q231']\n self.CF.control_values['PDMDSRCVehicleIDs'][0] = ['Q231']\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n c = 0\n for tp, df in trj.read(False):\n c=c+1\n assert c == 11 #Every 1 sec (PDM)\n assert len(trj.PDM_DSRC) == 1\n\n # @unittest.skip(\"testing skipping\")\n def test_load_read_vissim(self):\n self.CF.control_values['TrajectoryFileName'][0] = 'test_vissim_input_file_del_me.fzp'\n self.CF.control_values['FileType'][0] = 'VISSIM'\n self.CF.control_values['PDMVehicleTypes'][0] = [1]\n self.CF.control_values['BSMVehicleTypes'][0] = [2]\n self.CF.control_values['PDMDSRCVehicleTypes'][0] = [1]\n self.CF.control_values['BSMDualCommVehicleTypes'][0] = [2]\n\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n assert len(trj.equip_vehicles) == 6\n assert trj.equip_PDM == [1, 4, 5]\n assert trj.equip_BSM == [2, 3, 6]\n assert trj.PDM_DSRC == [1, 4, 5]\n assert trj.BSM_DualComm == [2, 3, 6]\n\n c = 0\n for tp, df in trj.read(True):\n if c ==0:\n assert df['vehicle_ID'][0] == 1\n assert df['time'][0] == 54.8\n c=c+1\n assert c == 6\n\n # @unittest.skip(\"testing skipping\")\n def test_mix_PDE_BSM_markpen(self):\n\n self.CF.control_values['TrajectoryFileName'][0] = 'test_csv_input_file_del_me.csv'\n self.CF.control_values['BSMMarketPenetration'][0] = 20\n self.CF.control_values['PDMMarketPenetration'][0] = 20\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n assert trj.equip_vehicles == ['C234', 'E342', 'P212', 'Q231']\n assert trj.equip_PDM == ['C234', 'P212']\n assert trj.equip_BSM == ['Q231', 'E342']\n\n c = 0\n for tp, df in trj.read(False):\n c=c+1\n assert c == 21 #every 10th of a sec\n\n # @unittest.skip(\"testing skipping\")\n def test_mix_PDE_BSM_type(self):\n\n self.CF.control_values['TrajectoryFileName'][0] = 'test_csv_input_file_del_me.csv'\n self.CF.control_values['PDMVehicleTypes'][0] = [1]\n self.CF.control_values['BSMVehicleTypes'][0] = [2]\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n assert trj.equip_vehicles == self.vehicles[0:9] #all but last one which is type 3\n assert sorted(trj.equip_PDM) == self.vehicles[0:5] #first 5 vehicles\n assert sorted(trj.equip_BSM) == self.vehicles[5:9] #vehicle 6-8\n\n c = 0\n for tp, df in trj.read(False):\n c=c+1\n assert c == 21 #every 10th of a sec\n\n # @unittest.skip(\"testing skipping\")\n def test_mix_PDE_BSM_type_equal(self):\n\n with open('test_csv_input_file_del_me.csv', 'a') as fout:\n fout.write('GGG1,10.0,34.2,23.4,2324.2,1\\n')\n\n self.CF.control_values['TrajectoryFileName'][0] = 'test_csv_input_file_del_me.csv'\n self.CF.control_values['BSMMarketPenetration'][0] = 50\n self.CF.control_values['PDMMarketPenetration'][0] = 50\n self.CF.map_dictionary()\n\n trj = Trajectories()\n trj.load(self.CF)\n\n assert len(trj.equip_vehicles) == 11\n assert len(trj.equip_PDM) == 6\n assert len(trj.equip_BSM) == 5\n\n\n def tearDown(self):\n import os\n os.remove('test_csv_input_file_del_me_with_acc.csv')\n os.remove('test_csv_input_file_del_me.csv')\n os.remove('test_vissim_input_file_del_me.fzp')\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"scipy.spatial.KDTree"
],
[
"numpy.arange",
"pandas.concat",
"pandas.read_pickle",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
muntazirabidi/boss-sbi | [
"fae016eb10b64153391499276d238ccdf660df88"
] | [
"bin/make_halo_cnf_data.py"
] | [
"import os\nimport numpy as np \nfrom simbig import halos as Halos\n\nnp.random.seed(918234) \n\ntheta_x_pairs = []\nfor i in range(1000): \n # read in halo catalog\n halos = Halos.Quijote_LHC_HR(i, z=0.5)\n\n # impose random halo mass limit as a proxy for baryonic effect \n Mlim = np.random.uniform(12.5, 13.0)\n\n theta_cosmo = Halos.Quijote_LHC_cosmo(i)\n\n # observable: I'm goign to use Nhalo as a proxy for some observable \n Nhalos = np.sum(np.array(halos['Mass']) > Mlim)\n \n # (parameter, data) pair\n theta_x = np.concatenate([theta_cosmo, [Mlim], [Nhalos]])\n theta_x_pairs.append(theta_x) \n\nnp.save(os.path.join(os.environ['QUIJOTE_DIR'], 'chang', 'halo_cnf_data.npy'), np.array(theta_x_pairs))\n"
] | [
[
"numpy.concatenate",
"numpy.random.uniform",
"numpy.array",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ludovicdmt/python-meegkit | [
"4aa4ba49354b996be20eda41660a550d1bd31f9a"
] | [
"meegkit/utils/trca.py"
] | [
"\"\"\"TRCA utils.\"\"\"\nimport numpy as np\n\nfrom scipy.signal import filtfilt, cheb1ord, cheby1\nfrom scipy import stats\n\n\ndef round_half_up(num, decimals=0):\n \"\"\"Round half up round the last decimal of the number.\n\n The rules are:\n from 0 to 4 rounds down\n from 5 to 9 rounds up\n\n Parameters\n ----------\n num : float\n Number to round\n decimals : number of decimals\n\n Returns\n -------\n num rounded\n \"\"\"\n multiplier = 10 ** decimals\n return int(np.floor(num * multiplier + 0.5) / multiplier)\n\n\ndef normfit(data, ci=0.95):\n \"\"\"Compute the mean, std and confidence interval for them.\n\n Parameters\n ----------\n data : array, shape=()\n Input data.\n ci : float\n Confidence interval (default=0.95).\n\n Returns\n -------\n m : mean\n sigma : std deviation\n [m - h, m + h] : confidence interval of the mean\n [sigmaCI_lower, sigmaCI_upper] : confidence interval of the std\n \"\"\"\n arr = 1.0 * np.array(data)\n num = len(arr)\n avg, std_err = np.mean(arr), stats.sem(arr)\n h_int = std_err * stats.t.ppf((1 + ci) / 2., num - 1)\n var = np.var(data, ddof=1)\n var_ci_upper = var * (num - 1) / stats.chi2.ppf((1 - ci) / 2, num - 1)\n var_ci_lower = var * (num - 1) / stats.chi2.ppf(1 - (1 - ci) / 2, num - 1)\n sigma = np.sqrt(var)\n sigma_ci_lower = np.sqrt(var_ci_lower)\n sigma_ci_upper = np.sqrt(var_ci_upper)\n\n return avg, sigma, [avg - h_int, avg +\n h_int], [sigma_ci_lower, sigma_ci_upper]\n\n\ndef itr(n, p, t):\n \"\"\"Compute information transfer rate (ITR).\n\n Definition in [1]_.\n\n Parameters\n ----------\n n : int\n Number of targets.\n p : float\n Target identification accuracy (0 <= p <= 1).\n t : float\n Average time for a selection (s).\n\n Returns\n -------\n itr : float\n Information transfer rate [bits/min]\n\n References\n ----------\n .. [1] M. Cheng, X. Gao, S. Gao, and D. Xu,\n \"Design and Implementation of a Brain-Computer Interface With High\n Transfer Rates\", IEEE Trans. Biomed. Eng. 49, 1181-1186, 2002.\n\n \"\"\"\n itr = 0\n\n if (p < 0 or 1 < p):\n raise ValueError('Accuracy need to be between 0 and 1.')\n elif (p < 1 / n):\n raise ValueError('ITR might be incorrect because accuracy < chance')\n itr = 0\n elif (p == 1):\n itr = np.log2(n) * 60 / t\n else:\n itr = (np.log2(n) + p * np.log2(p) + (1 - p) *\n np.log2((1 - p) / (n - 1))) * 60 / t\n\n return itr\n\n\ndef bandpass(eeg, sfreq, Wp, Ws):\n \"\"\"Filter bank design for decomposing EEG data into sub-band components.\n\n Parameters\n ----------\n eeg : np.array, shape=(n_samples, n_chans[, n_trials])\n Training data.\n sfreq : int\n Sampling frequency of the data.\n Wp : 2-tuple\n Passband for Chebyshev filter.\n Ws : 2-tuple\n Stopband for Chebyshev filter.\n\n Returns\n -------\n y: np.array, shape=(n_trials, n_chans, n_samples)\n Sub-band components decomposed by a filter bank.\n\n See Also\n --------\n scipy.signal.cheb1ord :\n Chebyshev type I filter order selection.\n\n \"\"\"\n # Chebyshev type I filter order selection.\n N, Wn = cheb1ord(Wp, Ws, 3, 40, fs=sfreq)\n\n # Chebyshev type I filter design\n B, A = cheby1(N, 0.5, Wn, btype=\"bandpass\", fs=sfreq)\n\n # the arguments 'axis=0, padtype='odd', padlen=3*(max(len(B),len(A))-1)'\n # correspond to Matlab filtfilt : https://dsp.stackexchange.com/a/47945\n y = filtfilt(B, A, eeg, axis=0, padtype='odd',\n padlen=3 * (max(len(B), len(A)) - 1))\n return y\n"
] | [
[
"scipy.stats.chi2.ppf",
"numpy.log2",
"scipy.signal.cheby1",
"numpy.sqrt",
"scipy.stats.t.ppf",
"numpy.mean",
"numpy.floor",
"numpy.var",
"scipy.stats.sem",
"numpy.array",
"scipy.signal.cheb1ord"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jraman/tensorflow | [
"9028828d3b8a2a622f7203a317002cc749531695",
"9028828d3b8a2a622f7203a317002cc749531695"
] | [
"tensorflow/python/framework/ops.py",
"tensorflow/python/keras/engine/network.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes and functions used to construct graphs.\"\"\"\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport sys\nimport threading\nimport types\n\nimport numpy as np\nimport six\nfrom six.moves import map # pylint: disable=redefined-builtin\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.core.framework import op_def_pb2\nfrom tensorflow.core.framework import versions_pb2\nfrom tensorflow.core.protobuf import config_pb2\n# pywrap_tensorflow must be imported first to avoid profobuf issues.\n# (b/143110113)\n# pylint: disable=invalid-import-order,g-bad-import-order,unused-import\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python import pywrap_tfe\n# pylint: enable=invalid-import-order,g-bad-import-order,unused-import\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import core\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import registry\nfrom tensorflow.python.framework import tensor_conversion_registry\nfrom tensorflow.python.framework import tensor_like\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import traceable_stack\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import decorator_utils\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import lock_util\nfrom tensorflow.python.util import memory\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_stack\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import kwarg_only\nfrom tensorflow.python.util.tf_export import tf_export\n\nag_ctx = LazyLoader(\n \"ag_ctx\", globals(),\n \"tensorflow.python.autograph.core.ag_ctx\")\n\n\n# Temporary global switches determining if we should enable the work-in-progress\n# calls to the C API. These will be removed once all functionality is supported.\n_USE_C_API = True\n_USE_C_SHAPES = True\n\n_api_usage_gauge = monitoring.BoolGauge(\n \"/tensorflow/api/ops_eager_execution\",\n \"Whether ops.enable_eager_execution() is called.\")\n\n\n# pylint: disable=protected-access\n_TensorLike = tensor_like._TensorLike\n_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE\n# pylint: enable=protected-access\n\n\ndef tensor_id(tensor):\n \"\"\"Returns a unique identifier for this Tensor.\"\"\"\n return tensor._id # pylint: disable=protected-access\n\n\nclass _UserDeviceSpec(object):\n \"\"\"Store user-specified device and provide computation of merged device.\"\"\"\n\n def __init__(self, device_name_or_function):\n self._device_name_or_function = device_name_or_function\n self.display_name = str(self._device_name_or_function)\n self.function = device_name_or_function\n self.raw_string = None\n\n if isinstance(device_name_or_function, pydev.MergeDevice):\n self.is_null_merge = device_name_or_function.is_null_merge\n\n elif callable(device_name_or_function):\n self.is_null_merge = False\n dev_func = self._device_name_or_function\n func_name = function_utils.get_func_name(dev_func)\n func_code = function_utils.get_func_code(dev_func)\n if func_code:\n fname = func_code.co_filename\n lineno = func_code.co_firstlineno\n else:\n fname = \"unknown\"\n lineno = -1\n self.display_name = \"%s<%s, %d>\" % (func_name, fname, lineno)\n\n elif device_name_or_function is None:\n # NOTE(taylorrobie): This MUST be False. None signals a break in the\n # device stack, so `is_null_merge` must be False for such a case to\n # allow callers to safely skip over null merges without missing a None.\n self.is_null_merge = False\n\n else:\n self.raw_string = device_name_or_function\n self.function = pydev.merge_device(device_name_or_function)\n self.is_null_merge = self.function.is_null_merge\n\n # We perform this check in __init__ because it is of non-trivial cost,\n # and self.string_merge is typically called many times.\n self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)\n\n def string_merge(self, node_def):\n if self.fast_string_merge:\n return self.function.shortcut_string_merge(node_def)\n\n return compat.as_str(_device_string(self.function(node_def)))\n\n\nclass NullContextmanager(object):\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __enter__(self):\n pass\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n return False # False values do not suppress exceptions\n\n\ndef _override_helper(clazz_object, operator, func):\n \"\"\"Overrides (string) operator on Tensors to call func.\n\n Args:\n clazz_object: the class to override for; either Tensor or SparseTensor.\n operator: the string name of the operator to override.\n func: the function that replaces the overridden operator.\n\n Raises:\n ValueError: If operator has already been overwritten,\n or if operator is not allowed to be overwritten.\n \"\"\"\n existing = getattr(clazz_object, operator, None)\n if existing is not None:\n # Check to see if this is a default method-wrapper or slot wrapper which\n # will be true for the comparison operators.\n if not isinstance(existing, type(object.__lt__)):\n raise ValueError(\"operator %s cannot be overwritten again on class %s.\" %\n (operator, clazz_object))\n if operator not in Tensor.OVERLOADABLE_OPERATORS:\n raise ValueError(\"Overriding %s is disallowed\" % operator)\n setattr(clazz_object, operator, func)\n\n\ndef _as_graph_element(obj):\n \"\"\"Convert `obj` to a graph element if possible, otherwise return `None`.\n\n Args:\n obj: Object to convert.\n\n Returns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`.\n \"\"\"\n conv_fn = getattr(obj, \"_as_graph_element\", None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None\n\n\n_TENSOR_LIKE_TYPES = tuple()\n\n\ndef is_dense_tensor_like(t):\n \"\"\"EXPERIMENTAL: Returns true if `t` implements the tensor interface.\n\n See `register_dense_tensor_like_type()` for the current definition of a\n \"tensor-like type\".\n\n Args:\n t: An object.\n\n Returns:\n True iff `t` is an instance of one of the registered \"tensor-like\" types.\n \"\"\"\n return isinstance(t, _TENSOR_LIKE_TYPES)\n\n\ndef register_dense_tensor_like_type(tensor_type):\n \"\"\"EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.\n\n A \"tensor-like type\" can represent a single dense tensor, and implements\n the `name`, `dtype` and `shape` properties.\n\n Args:\n tensor_type: A type implementing the tensor interface.\n\n Raises:\n TypeError: If `tensor_type` does not implement the tensor interface.\n \"\"\"\n if not (hasattr(tensor_type, \"name\") and\n isinstance(tensor_type.name, property)):\n raise TypeError(\"Type %s does not define a `name` property\" %\n tensor_type.__name__)\n if not (hasattr(tensor_type, \"dtype\") and\n isinstance(tensor_type.dtype, property)):\n raise TypeError(\"Type %s does not define a `dtype` property\" %\n tensor_type.__name__)\n if not (hasattr(tensor_type, \"shape\") and\n isinstance(tensor_type.shape, property)):\n raise TypeError(\"Type %s does not define a `shape` property\" %\n tensor_type.__name__)\n # We expect this list to be small, so choose quadratic complexity\n # for registration, so that we have a tuple that can be used for\n # more efficient `isinstance` checks later.\n global _TENSOR_LIKE_TYPES\n _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])\n\n\ndef uid():\n \"\"\"A unique (within this program execution) integer.\"\"\"\n return pywrap_tfe.TFE_Py_UID()\n\n\ndef numpy_text(tensor, is_repr=False):\n \"\"\"Human readable representation of a tensor's numpy value.\"\"\"\n if tensor.dtype.is_numpy_compatible:\n # pylint: disable=protected-access\n text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())\n # pylint: enable=protected-access\n else:\n text = \"<unprintable>\"\n if \"\\n\" in text:\n text = \"\\n\" + text\n return text\n\n@tf_export(v1=[\"enable_tensor_equality\"])\ndef enable_tensor_equality():\n \"\"\"Compare Tensors with element-wise comparison and thus be unhashable.\n\n Comparing tensors with element-wise allows comparisons such as\n tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are\n unhashable. Thus tensors can no longer be directly used in sets or as a key in\n a dictionary.\n \"\"\"\n Tensor._USE_EQUALITY = True # pylint: disable=protected-access\n\n@tf_export(v1=[\"disable_tensor_equality\"])\ndef disable_tensor_equality():\n \"\"\"Compare Tensors by their id and be hashable.\n\n This is a legacy behaviour of TensorFlow and is highly discouraged.\n \"\"\"\n Tensor._USE_EQUALITY = False # pylint: disable=protected-access\n\n\n@tf_export(\"Tensor\")\nclass Tensor(_TensorLike):\n \"\"\"A tensor represents a rectangular array of data.\n\n When writing a TensorFlow program, the main object you manipulate and pass\n around is the `tf.Tensor`. A `tf.Tensor` object represents a rectangular array\n of arbitrary dimension, filled with data of a specific data type.\n\n A `tf.Tensor` has the following properties:\n\n * a data type (float32, int32, or string, for example)\n * a shape\n\n Each element in the Tensor has the same data type, and the data type is always\n known.\n\n In eager execution, which is the default mode in TensorFlow, results are\n calculated immediately.\n\n >>> # Compute some values using a Tensor\n >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])\n >>> e = tf.matmul(c, d)\n >>> print(e)\n tf.Tensor(\n [[1. 3.]\n [3. 7.]], shape=(2, 2), dtype=float32)\n\n\n Note that during eager execution, you may discover your `Tensors` are actually\n of type `EagerTensor`. This is an internal detail, but it does give you\n access to a useful function, `numpy`:\n\n >>> type(e)\n <class '...ops.EagerTensor'>\n >>> print(e.numpy())\n [[1. 3.]\n [3. 7.]]\n\n TensorFlow can define computations without immediately executing them, most\n commonly inside `tf.function`s, as well as in (legacy) Graph mode. In those\n cases, the shape (that is, the rank of the Tensor and the size of\n each dimension) might be only partially known.\n\n Most operations produce tensors of fully-known shapes if the shapes of their\n inputs are also fully known, but in some cases it's only possible to find the\n shape of a tensor at execution time.\n\n There are specialized tensors; for these, see `tf.Variable`, `tf.constant`,\n `tf.placeholder`, `tf.SparseTensor`, and `tf.RaggedTensor`.\n\n For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor`).\n \"\"\"\n\n # List of Python operators that we allow to override.\n OVERLOADABLE_OPERATORS = {\n # Binary.\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__div__\",\n \"__rdiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__mod__\",\n \"__rmod__\",\n \"__lt__\",\n \"__le__\",\n \"__gt__\",\n \"__ge__\",\n \"__ne__\",\n \"__eq__\",\n \"__and__\",\n \"__rand__\",\n \"__or__\",\n \"__ror__\",\n \"__xor__\",\n \"__rxor__\",\n \"__getitem__\",\n \"__pow__\",\n \"__rpow__\",\n # Unary.\n \"__invert__\",\n \"__neg__\",\n \"__abs__\",\n \"__matmul__\",\n \"__rmatmul__\"\n }\n\n # Whether to allow hashing or numpy-style equality\n _USE_EQUALITY = tf2.enabled()\n\n def __init__(self, op, value_index, dtype):\n \"\"\"Creates a new `Tensor`.\n\n Args:\n op: An `Operation`. `Operation` that computes this tensor.\n value_index: An `int`. Index of the operation's endpoint that produces\n this tensor.\n dtype: A `DType`. Type of elements stored in this tensor.\n\n Raises:\n TypeError: If the op is not an `Operation`.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op needs to be an Operation: %s\" % op)\n self._op = op\n self._value_index = value_index\n self._dtype = dtypes.as_dtype(dtype)\n # This will be set by self._as_tf_output().\n self._tf_output = None\n # This will be set by self.shape().\n self._shape_val = None\n # List of operations that use this Tensor as input. We maintain this list\n # to easily navigate a computation graph.\n self._consumers = []\n self._id = uid()\n self._name = None\n\n @staticmethod\n def _create_with_tf_output(op, value_index, dtype, tf_output):\n ret = Tensor(op, value_index, dtype)\n ret._tf_output = tf_output\n return ret\n\n @property\n def op(self):\n \"\"\"The `Operation` that produces this tensor as an output.\"\"\"\n return self._op\n\n @property\n def dtype(self):\n \"\"\"The `DType` of elements in this tensor.\"\"\"\n return self._dtype\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this tensor.\"\"\"\n return self._op.graph\n\n @property\n def name(self):\n \"\"\"The string name of this tensor.\"\"\"\n if self._name is None:\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n self._name = \"%s:%d\" % (self._op.name, self._value_index)\n return self._name\n\n @property\n def device(self):\n \"\"\"The name of the device on which this tensor will be produced, or None.\"\"\"\n return self._op.device\n\n @property\n def shape(self):\n \"\"\"Returns the `TensorShape` that represents the shape of this tensor.\n\n The shape is computed using shape inference functions that are\n registered in the Op for each `Operation`. See\n `tf.TensorShape`\n for more details of what a shape represents.\n\n The inferred shape of a tensor is used to provide shape\n information without having to execute the underlying kernel. This\n can be used for debugging and providing early error messages. For\n example:\n\n ```python\n >>> c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n >>> print(c.shape) # will be TensorShape([2, 3])\n (2, 3)\n\n >>> d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n >>> print(d.shape)\n (4, 2)\n\n # Raises a ValueError, because `c` and `d` do not have compatible\n # inner dimensions.\n >>> e = tf.matmul(c, d)\n Traceback (most recent call last):\n ...\n tensorflow.python.framework.errors_impl.InvalidArgumentError: Matrix\n size-incompatible: In[0]: [2,3], In[1]: [4,2] [Op:MatMul] name: MatMul/\n\n # This works because we have compatible shapes.\n >>> f = tf.matmul(c, d, transpose_a=True, transpose_b=True)\n >>> print(f.shape)\n (3, 4)\n\n ```\n\n In some cases, the inferred shape may have unknown dimensions. If\n the caller has additional information about the values of these\n dimensions, `Tensor.set_shape()` can be used to augment the\n inferred shape.\n\n Returns:\n A `tf.TensorShape` representing the shape of this tensor.\n\n \"\"\"\n if self._shape_val is None:\n self._shape_val = self._c_api_shape()\n return self._shape_val\n\n def _c_api_shape(self):\n \"\"\"Returns the TensorShape of this tensor according to the C API.\"\"\"\n c_graph = self._op._graph._c_graph # pylint: disable=protected-access\n shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(\n c_graph, self._as_tf_output())\n if unknown_shape:\n return tensor_shape.unknown_shape()\n else:\n shape_vec = [None if d == -1 else d for d in shape_vec]\n return tensor_shape.TensorShape(shape_vec)\n\n @property\n def _shape(self):\n logging.warning(\"Tensor._shape is private, use Tensor.shape \"\n \"instead. Tensor._shape will eventually be removed.\")\n return self.shape\n\n @_shape.setter\n def _shape(self, value):\n raise ValueError(\n \"Tensor._shape cannot be assigned, use Tensor.set_shape instead.\")\n\n def _disallow_when_autograph_disabled(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed: AutoGraph is disabled in this function.\"\n \" Try decorating it directly with @tf.function.\".format(task))\n\n def _disallow_when_autograph_enabled(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed: AutoGraph did not convert this function. Try\"\n \" decorating it directly with @tf.function.\".format(task))\n\n def _disallow_in_graph_mode(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed in Graph execution. Use Eager execution or decorate\"\n \" this function with @tf.function.\".format(task))\n\n def _disallow_bool_casting(self):\n if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n self._disallow_when_autograph_disabled(\n \"using a `tf.Tensor` as a Python `bool`\")\n elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:\n self._disallow_when_autograph_enabled(\n \"using a `tf.Tensor` as a Python `bool`\")\n else:\n # Default: V1-style Graph execution.\n self._disallow_in_graph_mode(\"using a `tf.Tensor` as a Python `bool`\")\n\n def _disallow_iteration(self):\n if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n self._disallow_when_autograph_disabled(\"iterating over `tf.Tensor`\")\n elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:\n self._disallow_when_autograph_enabled(\"iterating over `tf.Tensor`\")\n else:\n # Default: V1-style Graph execution.\n self._disallow_in_graph_mode(\"iterating over `tf.Tensor`\")\n\n def __iter__(self):\n if not context.executing_eagerly():\n self._disallow_iteration()\n\n shape = self._shape_tuple()\n if shape is None:\n raise TypeError(\"Cannot iterate over a tensor with unknown shape.\")\n if not shape:\n raise TypeError(\"Cannot iterate over a scalar tensor.\")\n if shape[0] is None:\n raise TypeError(\n \"Cannot iterate over a tensor with unknown first dimension.\")\n return _TensorIterator(self, shape[0])\n\n def _shape_as_list(self):\n if self.shape.ndims is not None:\n return [dim.value for dim in self.shape.dims]\n else:\n return None\n\n def _shape_tuple(self):\n shape = self._shape_as_list()\n if shape is None:\n return None\n return tuple(shape)\n\n def _rank(self):\n \"\"\"Integer rank of this Tensor, if known, else None.\n\n Returns:\n Integer rank or None\n \"\"\"\n return self.shape.ndims\n\n def get_shape(self):\n \"\"\"Alias of `tf.Tensor.shape`.\"\"\"\n return self.shape\n\n def set_shape(self, shape):\n \"\"\"Updates the shape of this tensor.\n\n This method can be called multiple times, and will merge the given\n `shape` with the current shape of this tensor. It can be used to\n provide additional information about the shape of this tensor that\n cannot be inferred from the graph alone. For example, this can be used\n to provide additional information about the shapes of images:\n\n ```python\n _, image_data = tf.compat.v1.TFRecordReader(...).read(...)\n image = tf.image.decode_png(image_data, channels=3)\n\n # The height and width dimensions of `image` are data dependent, and\n # cannot be computed without executing the op.\n print(image.shape)\n ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])\n\n # We know that each image in this dataset is 28 x 28 pixels.\n image.set_shape([28, 28, 3])\n print(image.shape)\n ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])\n ```\n\n NOTE: This shape is not enforced at runtime. Setting incorrect shapes can\n result in inconsistencies between the statically-known graph and the runtime\n value of tensors. For runtime validation of the shape, use `tf.ensure_shape`\n instead.\n\n Args:\n shape: A `TensorShape` representing the shape of this tensor, a\n `TensorShapeProto`, a list, a tuple, or None.\n\n Raises:\n ValueError: If `shape` is not compatible with the current shape of\n this tensor.\n \"\"\"\n # Reset cached shape.\n self._shape_val = None\n\n # We want set_shape to be reflected in the C API graph for when we run it.\n if not isinstance(shape, tensor_shape.TensorShape):\n shape = tensor_shape.TensorShape(shape)\n dim_list = []\n if shape.dims is None:\n unknown_shape = True\n else:\n unknown_shape = False\n for dim in shape.dims:\n if dim.value is None:\n dim_list.append(-1)\n else:\n dim_list.append(dim.value)\n try:\n pywrap_tf_session.TF_GraphSetTensorShape_wrapper(\n self._op._graph._c_graph, # pylint: disable=protected-access\n self._as_tf_output(),\n dim_list,\n unknown_shape)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n @property\n def value_index(self):\n \"\"\"The index of this tensor in the outputs of its `Operation`.\"\"\"\n return self._value_index\n\n def consumers(self):\n \"\"\"Returns a list of `Operation`s that consume this tensor.\n\n Returns:\n A list of `Operation`s.\n \"\"\"\n consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(\n self._as_tf_output())\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(name)\n for name in consumer_names\n ]\n # pylint: enable=protected-access\n\n def _as_node_def_input(self):\n \"\"\"Return a value to use for the NodeDef \"input\" attribute.\n\n The returned string can be used in a NodeDef \"input\" attribute\n to indicate that the NodeDef uses this Tensor as input.\n\n Raises:\n ValueError: if this Tensor's Operation does not have a name.\n\n Returns:\n a string.\n \"\"\"\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n if self._value_index == 0:\n return self._op.name\n else:\n return \"%s:%d\" % (self._op.name, self._value_index)\n\n def _as_tf_output(self):\n # pylint: disable=protected-access\n # NOTE: Beyond preventing unnecessary (re-)allocation, the cached object\n # also guarantees that a dictionary of tf_output objects will retain a\n # deterministic (yet unsorted) order which prevents memory blowup in the\n # cache of executor(s) stored for every session.\n if self._tf_output is None:\n self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)\n return self._tf_output\n # pylint: enable=protected-access\n\n def __str__(self):\n return \"Tensor(\\\"%s\\\"%s%s%s)\" % (\n self.name,\n (\", shape=%s\" %\n self.get_shape()) if self.get_shape().ndims is not None else \"\",\n (\", dtype=%s\" % self._dtype.name) if self._dtype else \"\",\n (\", device=%s\" % self.device) if self.device else \"\")\n\n def __repr__(self):\n return \"<tf.Tensor '%s' shape=%s dtype=%s>\" % (self.name, self.get_shape(),\n self._dtype.name)\n\n def __hash__(self):\n g = getattr(self, \"graph\", None)\n if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and\n (g is None or g.building_function)):\n raise TypeError(\"Tensor is unhashable. \"\n \"Instead, use tensor.ref() as the key.\")\n else:\n return id(self)\n\n def __copy__(self):\n # TODO(b/77597810): get rid of Tensor copies.\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n # NOTE(mrry): This enables the Tensor's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Tensor class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Tensors interact\n # with ndarrays.\n __array_priority__ = 100\n\n def __array__(self):\n raise NotImplementedError(\"Cannot convert a symbolic Tensor ({}) to a numpy\"\n \" array.\".format(self.name))\n\n def __len__(self):\n raise TypeError(\"len is not well defined for symbolic Tensors. ({}) \"\n \"Please call `x.shape` rather than `len(x)` for \"\n \"shape information.\".format(self.name))\n\n @staticmethod\n def _override_operator(operator, func):\n _override_helper(Tensor, operator, func)\n\n def __bool__(self):\n \"\"\"Dummy method to prevent a tensor from being used as a Python `bool`.\n\n This overload raises a `TypeError` when the user inadvertently\n treats a `Tensor` as a boolean (most commonly in an `if` or `while`\n statement), in code that was not converted by AutoGraph. For example:\n\n ```python\n if tf.constant(True): # Will raise.\n # ...\n\n if tf.constant(5) < tf.constant(7): # Will raise.\n # ...\n ```\n\n Raises:\n `TypeError`.\n \"\"\"\n self._disallow_bool_casting()\n\n def __nonzero__(self):\n \"\"\"Dummy method to prevent a tensor from being used as a Python `bool`.\n\n This is the Python 2.x counterpart to `__bool__()` above.\n\n Raises:\n `TypeError`.\n \"\"\"\n self._disallow_bool_casting()\n\n def eval(self, feed_dict=None, session=None):\n \"\"\"Evaluates this tensor in a `Session`.\n\n Note: If you are not using `compat.v1` libraries, you should not need this,\n (or `feed_dict` or `Session`). In eager execution (or within `tf.function`)\n you do not need to call `eval`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for the operation that produces this\n tensor.\n\n *N.B.* Before invoking `Tensor.eval()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values. See\n `tf.Session.run` for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to evaluate this tensor. If\n none, the default session will be used.\n\n Returns:\n A numpy array corresponding to the value of this tensor.\n \"\"\"\n return _eval_using_default_session(self, feed_dict, self.graph, session)\n\n @deprecation.deprecated(None, \"Use ref() instead.\")\n def experimental_ref(self):\n return self.ref()\n\n def ref(self):\n # tf.Variable also has the same ref() API. If you update the\n # documentation here, please update tf.Variable.ref() as well.\n \"\"\"Returns a hashable reference object to this Tensor.\n\n The primary use case for this API is to put tensors in a set/dictionary.\n We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer\n available starting Tensorflow 2.0.\n\n The following will raise an exception starting 2.0\n\n >>> x = tf.constant(5)\n >>> y = tf.constant(10)\n >>> z = tf.constant(10)\n >>> tensor_set = {x, y, z}\n Traceback (most recent call last):\n ...\n TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.\n >>> tensor_dict = {x: 'five', y: 'ten'}\n Traceback (most recent call last):\n ...\n TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.\n\n Instead, we can use `tensor.ref()`.\n\n >>> tensor_set = {x.ref(), y.ref(), z.ref()}\n >>> x.ref() in tensor_set\n True\n >>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}\n >>> tensor_dict[y.ref()]\n 'ten'\n\n Also, the reference object provides `.deref()` function that returns the\n original Tensor.\n\n >>> x = tf.constant(5)\n >>> x.ref().deref()\n <tf.Tensor: shape=(), dtype=int32, numpy=5>\n \"\"\"\n return object_identity.Reference(self)\n\n\n# TODO(agarwal): consider getting rid of this.\nclass _EagerTensorBase(Tensor):\n \"\"\"Base class for EagerTensor.\"\"\"\n\n # __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and\n # only work for scalars; values are cast as per numpy.\n def __complex__(self):\n return complex(self._numpy())\n\n def __int__(self):\n return int(self._numpy())\n\n def __long__(self):\n return long(self._numpy())\n\n def __float__(self):\n return float(self._numpy())\n\n def __index__(self):\n return self._numpy().__index__()\n\n def __bool__(self):\n return bool(self._numpy())\n\n __nonzero__ = __bool__\n\n def __format__(self, format_spec):\n return self._numpy().__format__(format_spec)\n\n def __reduce__(self):\n return convert_to_tensor, (self._numpy(),)\n\n def __copy__(self):\n # Eager Tensors are immutable so it's safe to return themselves as a copy.\n return self\n\n def __deepcopy__(self, memo):\n # Eager Tensors are immutable so it's safe to return themselves as a copy.\n del memo\n return self\n\n def __str__(self):\n return \"tf.Tensor(%s, shape=%s, dtype=%s)\" % (numpy_text(self), self.shape,\n self.dtype.name)\n\n def __repr__(self):\n return \"<tf.Tensor: shape=%s, dtype=%s, numpy=%s>\" % (\n self.shape, self.dtype.name, numpy_text(self, is_repr=True))\n\n def __len__(self):\n \"\"\"Returns the length of the first dimension in the Tensor.\"\"\"\n if not self.shape.ndims:\n raise TypeError(\"Scalar tensor has no `len()`\")\n # pylint: disable=protected-access\n try:\n return self._shape_tuple()[0]\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n\n def _numpy_internal(self):\n raise NotImplementedError()\n\n def _numpy(self):\n # pylint: disable=protected-access\n try:\n return self._numpy_internal()\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n\n @property\n def dtype(self):\n # Note: using the intern table directly here as this is\n # performance-sensitive in some models.\n return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access\n\n def numpy(self):\n \"\"\"Copy of the contents of this Tensor into a NumPy array or scalar.\n\n Unlike NumPy arrays, Tensors are immutable, so this method has to copy\n the contents to ensure safety. Use `memoryview` to get a readonly\n view of the contents without doing a copy:\n\n >>> t = tf.constant([42])\n >>> np.array(memoryview(t))\n array([42], dtype=int32)\n\n Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor\n is on GPU, it will have to be transferred to CPU first in order for\n `memoryview` to work.\n\n Returns:\n A NumPy array of the same shape and dtype or a NumPy scalar, if this\n Tensor has rank 0.\n\n Raises:\n ValueError: If the dtype of this Tensor does not have a compatible\n NumPy dtype.\n \"\"\"\n # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.\n maybe_arr = self._numpy() # pylint: disable=protected-access\n return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr\n\n @property\n def backing_device(self):\n \"\"\"Returns the name of the device holding this tensor's memory.\n\n `.backing_device` is usually the same as `.device`, which returns\n the device on which the kernel of the operation that produced this tensor\n ran. However, some operations can produce tensors on a different device\n (e.g., an operation that executes on the GPU but produces output tensors\n in host memory).\n \"\"\"\n raise NotImplementedError()\n\n def _datatype_enum(self):\n raise NotImplementedError()\n\n def _shape_tuple(self):\n \"\"\"The shape of this Tensor, as a tuple.\n\n This is more performant than tuple(shape().as_list()) as it avoids\n two list and one object creation. Marked private for now as from an API\n perspective, it would be better to have a single performant way of\n getting a shape rather than exposing shape() and shape_tuple()\n (and heaven forbid, shape_list() etc. as well!). Punting on that for now,\n but ideally one would work things out and remove the need for this method.\n\n Returns:\n tuple with the shape.\n \"\"\"\n raise NotImplementedError()\n\n def _rank(self):\n \"\"\"Integer rank of this Tensor.\n\n Unlike regular Tensors, the rank is always known for EagerTensors.\n\n This is more performant than len(self._shape_tuple())\n\n Returns:\n Integer rank\n \"\"\"\n raise NotImplementedError()\n\n def _num_elements(self):\n \"\"\"Number of elements of this Tensor.\n\n Unlike regular Tensors, the number of elements is always known for\n EagerTensors.\n\n This is more performant than tensor.shape.num_elements\n\n Returns:\n Long - num elements in the tensor\n \"\"\"\n raise NotImplementedError()\n\n def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name\n raise NotImplementedError()\n\n @staticmethod\n def _override_operator(name, func):\n setattr(_EagerTensorBase, name, func)\n\n def _copy_nograd(self, ctx=None, device_name=None):\n \"\"\"Copies tensor to dest device, but doesn't record the operation.\"\"\"\n # Creates a new tensor on the dest device.\n if ctx is None:\n ctx = context.context()\n if device_name is None:\n device_name = ctx.device_name\n # pylint: disable=protected-access\n try:\n ctx.ensure_initialized()\n new_tensor = self._copy_to_device(device_name)\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n return new_tensor\n\n def _copy(self, ctx=None, device_name=None):\n \"\"\"Copies tensor to dest device.\"\"\"\n new_tensor = self._copy_nograd(ctx, device_name)\n # Record the copy on tape and define backprop copy as well.\n if context.executing_eagerly():\n self_device = self.device\n\n def grad_fun(dresult):\n return [\n dresult._copy(device_name=self_device)\n if hasattr(dresult, \"_copy\") else dresult\n ]\n\n tape.record_operation(\"_copy\", [new_tensor], [self], grad_fun)\n return new_tensor\n # pylint: enable=protected-access\n\n @property\n def shape(self):\n if self._tensor_shape is None: # pylint: disable=access-member-before-definition\n # pylint: disable=protected-access\n try:\n # `_tensor_shape` is declared and defined in the definition of\n # `EagerTensor`, in C.\n self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n\n return self._tensor_shape\n\n def get_shape(self):\n \"\"\"Alias of Tensor.shape.\"\"\"\n return self.shape\n\n def _shape_as_list(self):\n \"\"\"The shape of the tensor as a list.\"\"\"\n return list(self._shape_tuple())\n\n @property\n def ndim(self):\n \"\"\"Returns the number of Tensor dimensions.\"\"\"\n return self.shape.ndims\n\n @deprecation.deprecated(None, \"Use tf.identity instead.\")\n def cpu(self):\n \"\"\"A copy of this Tensor with contents backed by host memory.\"\"\"\n return self._copy(context.context(), \"CPU:0\")\n\n @deprecation.deprecated(None, \"Use tf.identity instead.\")\n def gpu(self, gpu_index=0):\n \"\"\"A copy of this Tensor with contents backed by memory on the GPU.\n\n Arguments:\n gpu_index: Identifies which GPU to place the contents on the returned\n Tensor in.\n\n Returns:\n A GPU-memory backed Tensor object initialized with the same contents\n as this Tensor.\n \"\"\"\n return self._copy(context.context(), \"GPU:\" + str(gpu_index))\n\n def set_shape(self, shape):\n if not self.shape.is_compatible_with(shape):\n raise ValueError(\n \"Tensor's shape %s is not compatible with supplied shape %s\" %\n (self.shape, shape))\n\n # Methods not supported / implemented for Eager Tensors.\n @property\n def op(self):\n raise AttributeError(\n \"Tensor.op is meaningless when eager execution is enabled.\")\n\n @property\n def graph(self):\n raise AttributeError(\n \"Tensor.graph is meaningless when eager execution is enabled.\")\n\n @property\n def name(self):\n raise AttributeError(\n \"Tensor.name is meaningless when eager execution is enabled.\")\n\n @property\n def value_index(self):\n raise AttributeError(\n \"Tensor.value_index is meaningless when eager execution is enabled.\")\n\n def consumers(self):\n raise NotImplementedError(\n \"Tensor.consumers is meaningless when eager execution is enabled.\")\n\n def _add_consumer(self, consumer):\n raise NotImplementedError(\n \"_add_consumer not supported when eager execution is enabled.\")\n\n def _as_node_def_input(self):\n raise NotImplementedError(\n \"_as_node_def_input not supported when eager execution is enabled.\")\n\n def _as_tf_output(self):\n raise NotImplementedError(\n \"_as_tf_output not supported when eager execution is enabled.\")\n\n def eval(self, feed_dict=None, session=None):\n raise NotImplementedError(\n \"eval is not supported when eager execution is enabled, \"\n \"is .numpy() what you're looking for?\")\n\n\n# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and\n# registers it with the current module.\nEagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase)\n\n\nregister_dense_tensor_like_type(Tensor)\n\n\n@tf_export(v1=[\"convert_to_tensor\"])\ndef convert_to_tensor_v1(value,\n dtype=None,\n name=None,\n preferred_dtype=None,\n dtype_hint=None):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars. For example:\n\n ```python\n import numpy as np\n\n def my_func(arg):\n arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n return tf.matmul(arg, arg) + arg\n\n # The following calls are equivalent.\n value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n ```\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n name: Optional name to use if a new `Tensor` is created.\n preferred_dtype: Optional element type for the returned tensor, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n dtype_hint: same meaning as preferred_dtype, and overrides it.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n \"\"\"\n preferred_dtype = deprecation.deprecated_argument_lookup(\n \"dtype_hint\", dtype_hint, \"preferred_dtype\", preferred_dtype)\n return convert_to_tensor_v2(value, dtype, preferred_dtype, name)\n\n\n@tf_export(\"convert_to_tensor\", v1=[])\ndef convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars. For example:\n\n >>> def my_func(arg):\n ... arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n ... return arg\n\n >>> # The following calls are equivalent.\n >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n >>> print(value_1)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n >>> print(value_2)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n >>> print(value_3)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n dtype_hint: Optional element type for the returned tensor, used when dtype\n is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so dtype_hint can be used as a soft preference.\n If the conversion to `dtype_hint` is not possible, this argument has no\n effect.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n \"\"\"\n return convert_to_tensor(\n value=value,\n dtype=dtype,\n name=name,\n preferred_dtype=dtype_hint,\n as_ref=False)\n\n\ndef _error_prefix(name):\n return \"\" if name is None else \"%s: \" % name\n\n\ndef convert_to_tensor(value,\n dtype=None,\n name=None,\n as_ref=False,\n preferred_dtype=None,\n dtype_hint=None,\n ctx=None,\n accepted_result_types=(Tensor,)):\n \"\"\"Implementation of the public convert_to_tensor.\"\"\"\n # TODO(b/142518781): Fix all call-sites and remove redundant arg\n preferred_dtype = preferred_dtype or dtype_hint\n if isinstance(value, EagerTensor):\n if ctx is None:\n ctx = context.context()\n if not ctx.executing_eagerly():\n graph = get_default_graph()\n if not graph.building_function:\n raise RuntimeError(\"Attempting to capture an EagerTensor without \"\n \"building a function.\")\n return graph.capture(value, name=name)\n\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n if isinstance(value, Tensor):\n if dtype is not None and not dtype.is_compatible_with(value.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\" %\n (dtype.name, value.dtype.name, value))\n return value\n\n if preferred_dtype is not None:\n preferred_dtype = dtypes.as_dtype(preferred_dtype)\n for base_type, conversion_func in tensor_conversion_registry.get(type(value)):\n # If dtype is None but preferred_dtype is not None, we try to\n # cast to preferred_dtype first.\n ret = None\n if dtype is None and preferred_dtype is not None:\n try:\n ret = conversion_func(\n value, dtype=preferred_dtype, name=name, as_ref=as_ref)\n except (TypeError, ValueError):\n # Could not coerce the conversion to use the preferred dtype.\n pass\n else:\n if (ret is not NotImplemented and\n ret.dtype.base_dtype != preferred_dtype.base_dtype):\n raise TypeError(\"convert_to_tensor did not convert to \"\n \"the preferred dtype: %s vs %s \" %\n (ret.dtype.base_dtype, preferred_dtype.base_dtype))\n\n if ret is None:\n ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\n\n if ret is NotImplemented:\n continue\n\n if not isinstance(ret, accepted_result_types):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned non-Tensor: %r\" %\n (_error_prefix(name), conversion_func, base_type, ret))\n if dtype and not dtype.is_compatible_with(ret.dtype):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned incompatible \"\n \"dtype: requested = %s, actual = %s\" %\n (_error_prefix(name), conversion_func, base_type, dtype.name,\n ret.dtype.name))\n return ret\n raise TypeError(\"%sCannot convert %r with type %s to Tensor: \"\n \"no conversion function registered.\" %\n (_error_prefix(name), value, type(value)))\n\n\ninternal_convert_to_tensor = convert_to_tensor\n\n\ndef internal_convert_n_to_tensor(values,\n dtype=None,\n name=None,\n as_ref=False,\n preferred_dtype=None,\n ctx=None):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n preferred_dtype: Optional element type for the returned tensors, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n ctx: The value of context.context().\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError(\"values must be a sequence.\")\n ret = []\n if ctx is None:\n ctx = context.context()\n for i, value in enumerate(values):\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n convert_to_tensor(\n value,\n dtype=dtype,\n name=n,\n as_ref=as_ref,\n preferred_dtype=preferred_dtype,\n ctx=ctx))\n return ret\n\n\ndef convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n preferred_dtype: Optional element type for the returned tensors, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n return internal_convert_n_to_tensor(\n values=values,\n dtype=dtype,\n name=name,\n preferred_dtype=preferred_dtype,\n as_ref=False)\n\n\ndef convert_to_tensor_or_composite(value, dtype=None, name=None):\n \"\"\"Converts the given object to a `Tensor` or `CompositeTensor`.\n\n If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\n is converted to a `Tensor` using `convert_to_tensor()`.\n\n Args:\n value: A `CompositeTensor` or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n return internal_convert_to_tensor_or_composite(\n value=value, dtype=dtype, name=name, as_ref=False)\n\n\ndef internal_convert_to_tensor_or_composite(value,\n dtype=None,\n name=None,\n as_ref=False):\n \"\"\"Converts the given object to a `Tensor` or `CompositeTensor`.\n\n If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\n is converted to a `Tensor` using `convert_to_tensor()`.\n\n Args:\n value: A `CompositeTensor`, or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n if isinstance(value, composite_tensor.CompositeTensor):\n value_dtype = getattr(value, \"dtype\", None)\n if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\" %\n (dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))\n return value\n else:\n return convert_to_tensor(\n value,\n dtype=dtype,\n name=name,\n as_ref=as_ref,\n accepted_result_types=(Tensor, composite_tensor.CompositeTensor))\n\n\ndef internal_convert_n_to_tensor_or_composite(values,\n dtype=None,\n name=None,\n as_ref=False):\n \"\"\"Converts `values` to a list of `Tensor` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\n Args:\n values: A list of `None`, `CompositeTensor`, or objects that can be consumed\n by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A list of `Tensor`, `CompositeTensor`, and/or `None` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError(\"values must be a sequence.\")\n ret = []\n for i, value in enumerate(values):\n if value is None:\n ret.append(value)\n else:\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n internal_convert_to_tensor_or_composite(\n value, dtype=dtype, name=n, as_ref=as_ref))\n return ret\n\n\ndef convert_n_to_tensor_or_composite(values, dtype=None, name=None):\n \"\"\"Converts `values` to a list of `Output` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\n Args:\n values: A list of `None`, `CompositeTensor``, or objects that can be\n consumed by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n\n Returns:\n A list of `Tensor` and/or `CompositeTensor` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n return internal_convert_n_to_tensor_or_composite(\n values=values, dtype=dtype, name=name, as_ref=False)\n\n\ndef _device_string(dev_spec):\n if pydev.is_device_spec(dev_spec):\n return dev_spec.to_string()\n else:\n return dev_spec\n\n\ndef _NodeDef(op_type, name, attrs=None):\n \"\"\"Create a NodeDef proto.\n\n Args:\n op_type: Value for the \"op\" attribute of the NodeDef proto.\n name: Value for the \"name\" attribute of the NodeDef proto.\n attrs: Dictionary where the key is the attribute name (a string)\n and the value is the respective \"attr\" attribute of the NodeDef proto (an\n AttrValue).\n\n Returns:\n A node_def_pb2.NodeDef protocol buffer.\n \"\"\"\n node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),\n name=compat.as_bytes(name))\n if attrs:\n for k, v in six.iteritems(attrs):\n node_def.attr[k].CopyFrom(v)\n return node_def\n\n\n# Copied from core/framework/node_def_util.cc\n# TODO(mrry,josh11b): Consolidate this validation in C++ code.\n_VALID_OP_NAME_REGEX = re.compile(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\-/>]*$\")\n_VALID_SCOPE_NAME_REGEX = re.compile(\"^[A-Za-z0-9_.\\\\-/>]*$\")\n\n\ndef _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):\n \"\"\"Creates a TF_Operation.\n\n Args:\n graph: a `Graph`.\n node_def: `node_def_pb2.NodeDef` for the operation to create.\n inputs: A flattened list of `Tensor`s. This function handles grouping\n tensors into lists as per attributes in the `node_def`.\n control_inputs: A list of `Operation`s to set as control dependencies.\n op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not\n specified, is looked up from the `graph` using `node_def.op`.\n\n Returns:\n A wrapped TF_Operation*.\n \"\"\"\n if op_def is None:\n op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access\n # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.\n # Refactor so we don't have to do this here.\n inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)\n # pylint: disable=protected-access\n op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,\n compat.as_str(node_def.op),\n compat.as_str(node_def.name))\n if node_def.device:\n pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))\n # Add inputs\n for op_input in inputs:\n if isinstance(op_input, (list, tuple)):\n pywrap_tf_session.TF_AddInputList(op_desc,\n [t._as_tf_output() for t in op_input])\n else:\n pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())\n\n # Add control inputs\n for control_input in control_inputs:\n pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)\n # pylint: enable=protected-access\n\n # Add attrs\n for name, attr_value in node_def.attr.items():\n serialized = attr_value.SerializeToString()\n # TODO(skyewm): this creates and deletes a new TF_Status for every attr.\n # It might be worth creating a convenient way to re-use the same status.\n pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),\n serialized)\n\n try:\n c_op = pywrap_tf_session.TF_FinishOperation(op_desc)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n return c_op\n\n\n@tf_export(\"Operation\")\nclass Operation(object):\n \"\"\"Represents a graph node that performs computation on tensors.\n\n An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`\n objects as input, and produces zero or more `Tensor` objects as output.\n Objects of type `Operation` are created by calling a Python op constructor\n (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`\n context manager.\n\n For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an\n `Operation` of type \"MatMul\" that takes tensors `a` and `b` as input, and\n produces `c` as output.\n\n If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be\n executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for\n calling `tf.compat.v1.get_default_session().run(op)`.\n \"\"\"\n\n def __init__(self,\n node_def,\n g,\n inputs=None,\n output_types=None,\n control_inputs=None,\n input_types=None,\n original_op=None,\n op_def=None):\n r\"\"\"Creates an `Operation`.\n\n NOTE: This constructor validates the name of the `Operation` (passed\n as `node_def.name`). Valid `Operation` names match the following\n regular expression:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]*\n\n Args:\n node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for\n attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and\n `device`. The `input` attribute is irrelevant here as it will be\n computed when generating the model.\n g: `Graph`. The parent graph.\n inputs: list of `Tensor` objects. The inputs to this `Operation`.\n output_types: list of `DType` objects. List of the types of the `Tensors`\n computed by this operation. The length of this list indicates the\n number of output endpoints of the `Operation`.\n control_inputs: list of operations or tensors from which to have a control\n dependency.\n input_types: List of `DType` objects representing the types of the tensors\n accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x\n in inputs]`. Operations that expect reference-typed inputs must specify\n these explicitly.\n original_op: Optional. Used to associate the new `Operation` with an\n existing `Operation` (for example, a replica with the op that was\n replicated).\n op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type\n that this `Operation` represents.\n\n Raises:\n TypeError: if control inputs are not Operations or Tensors,\n or if `node_def` is not a `NodeDef`,\n or if `g` is not a `Graph`,\n or if `inputs` are not tensors,\n or if `inputs` and `input_types` are incompatible.\n ValueError: if the `node_def` name is not valid.\n \"\"\"\n # For internal use only: `node_def` can be set to a TF_Operation to create\n # an Operation for that op. This is useful for creating Operations for ops\n # indirectly created by C API methods, e.g. the ops created by\n # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields\n # should be None.\n\n if isinstance(node_def, node_def_pb2.NodeDef):\n if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:\n raise ValueError(\n \"Cannot create a tensor proto whose content is larger than 2GB.\")\n if not _VALID_OP_NAME_REGEX.match(node_def.name):\n raise ValueError(\"'%s' is not a valid node name\" % node_def.name)\n c_op = None\n elif type(node_def).__name__ == \"TF_Operation\":\n assert inputs is None\n assert output_types is None\n assert control_inputs is None\n assert input_types is None\n assert original_op is None\n assert op_def is None\n c_op = node_def\n else:\n raise TypeError(\"node_def needs to be a NodeDef: %s\" % node_def)\n\n if not isinstance(g, Graph):\n raise TypeError(\"g needs to be a Graph: %s\" % g)\n self._graph = g\n\n if inputs is None:\n inputs = []\n elif not isinstance(inputs, list):\n raise TypeError(\"inputs needs to be a list of Tensors: %s\" % inputs)\n for a in inputs:\n if not isinstance(a, Tensor):\n raise TypeError(\"input needs to be a Tensor: %s\" % a)\n if input_types is None:\n input_types = [i.dtype.base_dtype for i in inputs]\n else:\n if not all(\n x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):\n raise TypeError(\"In op '%s', input types (%s) are not compatible \"\n \"with expected types (%s)\" %\n (node_def.name, [i.dtype for i in inputs], input_types))\n\n # Build the list of control inputs.\n control_input_ops = []\n if control_inputs:\n for c in control_inputs:\n control_op = None\n if isinstance(c, Operation):\n control_op = c\n elif isinstance(c, (Tensor, IndexedSlices)):\n control_op = c.op\n else:\n raise TypeError(\"Control input must be an Operation, \"\n \"a Tensor, or IndexedSlices: %s\" % c)\n control_input_ops.append(control_op)\n\n # This will be set by self.inputs.\n self._inputs_val = None\n\n # pylint: disable=protected-access\n self._original_op = original_op\n self._traceback = tf_stack.extract_stack()\n\n # List of _UserDevSpecs holding code location of device context manager\n # invocations and the users original argument to them.\n self._device_code_locations = None\n # Dict mapping op name to file and line information for op colocation\n # context managers.\n self._colocation_code_locations = None\n self._control_flow_context = self.graph._get_control_flow_context()\n\n # Gradient function for this op. There are three ways to specify gradient\n # function, and first available gradient gets used, in the following order.\n # 1. self._gradient_function\n # 2. Gradient name registered by \"_gradient_op_type\" attribute.\n # 3. Gradient name registered by op.type.\n self._gradient_function = None\n\n # Initialize self._c_op.\n if c_op:\n self._c_op = c_op\n op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))\n name = self.name\n else:\n if op_def is None:\n op_def = self._graph._get_op_def(node_def.op)\n self._c_op = _create_c_op(self._graph, node_def, inputs,\n control_input_ops, op_def)\n name = compat.as_str(node_def.name)\n # pylint: enable=protected-access\n\n self._is_stateful = op_def.is_stateful\n\n # Initialize self._outputs.\n num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n self._outputs = []\n for i in range(num_outputs):\n tf_output = c_api_util.tf_output(self._c_op, i)\n output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)\n tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access\n self._outputs.append(tensor)\n\n self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access\n\n if not c_op:\n self._control_flow_post_processing(input_tensors=inputs)\n\n def _control_flow_post_processing(self, input_tensors=None):\n \"\"\"Add this op to its control flow context.\n\n This may add new ops and change this op's inputs. self.inputs must be\n available before calling this method.\n\n Args:\n input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs\n of this op, which should be equivalent to `self.inputs`. Pass this\n argument to avoid evaluating `self.inputs` unnecessarily.\n \"\"\"\n if input_tensors is None:\n input_tensors = self.inputs\n for input_tensor in input_tensors:\n control_flow_util.CheckInputFromValidContext(self, input_tensor.op)\n if self._control_flow_context is not None:\n self._control_flow_context.AddOp(self)\n\n def colocation_groups(self):\n \"\"\"Returns the list of colocation groups of the op.\"\"\"\n default_colocation_group = [compat.as_bytes(\"loc:@%s\" % self.name)]\n try:\n class_attr = self.get_attr(\"_class\")\n except ValueError:\n # This op has no explicit colocation group, so it is itself its\n # own root of a colocation group.\n return default_colocation_group\n\n attr_groups = [\n class_name for class_name in class_attr\n if class_name.startswith(b\"loc:@\")\n ]\n\n # If there are no colocation groups in the explicit _class field,\n # return the default colocation group.\n return attr_groups if attr_groups else default_colocation_group\n\n def values(self):\n \"\"\"DEPRECATED: Use outputs.\"\"\"\n return tuple(self.outputs)\n\n def _get_control_flow_context(self):\n \"\"\"Returns the control flow context of this op.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, ctx):\n \"\"\"Sets the current control flow context of this op.\n\n Args:\n ctx: a context object.\n \"\"\"\n self._control_flow_context = ctx\n\n @property\n def name(self):\n \"\"\"The full name of this operation.\"\"\"\n return pywrap_tf_session.TF_OperationName(self._c_op)\n\n @property\n def _id(self):\n \"\"\"The unique integer id of this operation.\"\"\"\n return self._id_value\n\n @property\n def device(self):\n \"\"\"The name of the device to which this op has been assigned, if any.\n\n Returns:\n The string name of the device to which this op has been\n assigned, or an empty string if it has not been assigned to a\n device.\n \"\"\"\n return pywrap_tf_session.TF_OperationDevice(self._c_op)\n\n @property\n def _device_assignments(self):\n \"\"\"Code locations for device context managers active at op creation.\n\n This property will return a list of traceable_stack.TraceableObject\n instances where .obj is a string representing the assigned device\n (or information about the function that would be applied to this op\n to compute the desired device) and the filename and lineno members\n record the location of the relevant device context manager.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 15: with tf.device('/gpu:0'):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the device context manager\n would have these member values:\n\n t_obj.obj -> '/gpu:0'\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._device_assignments would return the list [t_obj].\n\n Returns:\n [str: traceable_stack.TraceableObject, ...] as per this method's\n description, above.\n \"\"\"\n return self._device_code_locations or []\n\n @property\n def _colocation_dict(self):\n \"\"\"Code locations for colocation context managers active at op creation.\n\n This property will return a dictionary for which the keys are nodes with\n which this Operation is colocated, and for which the values are\n traceable_stack.TraceableObject instances. The TraceableObject instances\n record the location of the relevant colocation context manager but have the\n \"obj\" field set to None to prevent leaking private data.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 14: node_a = tf.constant(3, name='NODE_A')\n 15: with tf.compat.v1.colocate_with(node_a):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the colocation context manager\n would have these member values:\n\n t_obj.obj -> None\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._colocation_dict would return the dictionary\n\n { 'NODE_A': t_obj }\n\n Returns:\n {str: traceable_stack.TraceableObject} as per this method's description,\n above.\n \"\"\"\n locations_dict = self._colocation_code_locations or {}\n return locations_dict.copy()\n\n @property\n def _output_types(self):\n \"\"\"List this operation's output types.\n\n Returns:\n List of the types of the Tensors computed by this operation.\n Each element in the list is an integer whose value is one of\n the TF_DataType enums defined in pywrap_tf_session.h\n The length of this list indicates the number of output endpoints\n of the operation.\n \"\"\"\n num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n output_types = [\n int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))\n for i in xrange(num_outputs)\n ]\n\n return output_types\n\n def _tf_output(self, output_idx):\n \"\"\"Create and return a new TF_Output for output_idx'th output of this op.\"\"\"\n tf_output = pywrap_tf_session.TF_Output()\n tf_output.oper = self._c_op\n tf_output.index = output_idx\n return tf_output\n\n def _tf_input(self, input_idx):\n \"\"\"Create and return a new TF_Input for input_idx'th input of this op.\"\"\"\n tf_input = pywrap_tf_session.TF_Input()\n tf_input.oper = self._c_op\n tf_input.index = input_idx\n return tf_input\n\n def _set_device(self, device): # pylint: disable=redefined-outer-name\n \"\"\"Set the device of this operation.\n\n Args:\n device: string or device.. The device to set.\n \"\"\"\n self._set_device_from_string(compat.as_str(_device_string(device)))\n\n def _set_device_from_string(self, device_str):\n \"\"\"Fast path to set device if the type is known to be a string.\n\n This function is called frequently enough during graph construction that\n there are non-trivial performance gains if the caller can guarantee that\n the specified device is already a string.\n\n Args:\n device_str: A string specifying where to place this op.\n \"\"\"\n pywrap_tf_session.SetRequestedDevice(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n device_str)\n\n def _update_input(self, index, tensor):\n \"\"\"Update the input to this operation at the given index.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n index: the index of the input to update.\n tensor: the Tensor to be used as the input at the given index.\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n\n # Reset cached inputs.\n self._inputs_val = None\n pywrap_tf_session.UpdateEdge(\n self._graph._c_graph, # pylint: disable=protected-access\n tensor._as_tf_output(), # pylint: disable=protected-access\n self._tf_input(index))\n\n def _add_while_inputs(self, tensors):\n \"\"\"See AddWhileInputHack in python_api.h.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n tensors: list of Tensors\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n for tensor in tensors:\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n\n # Reset cached inputs.\n self._inputs_val = None\n pywrap_tf_session.AddWhileInputHack(\n self._graph._c_graph, # pylint: disable=protected-access\n tensor._as_tf_output(), # pylint: disable=protected-access\n self._c_op)\n\n def _add_control_inputs(self, ops):\n \"\"\"Add a list of new control inputs to this operation.\n\n Args:\n ops: the list of Operations to add as control input.\n\n Raises:\n TypeError: if ops is not a list of Operations.\n ValueError: if any op in ops is from a different graph.\n \"\"\"\n for op in ops:\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n pywrap_tf_session.AddControlInput(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n op._c_op) # pylint: disable=protected-access\n\n def _add_control_input(self, op):\n \"\"\"Add a new control input to this operation.\n\n Args:\n op: the Operation to add as control input.\n\n Raises:\n TypeError: if op is not an Operation.\n ValueError: if op is from a different graph.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n pywrap_tf_session.AddControlInput(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n op._c_op) # pylint: disable=protected-access\n\n def _remove_all_control_inputs(self):\n \"\"\"Removes any control inputs to this operation.\"\"\"\n pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access\n\n def _add_outputs(self, types, shapes):\n \"\"\"Adds new Tensors to self.outputs.\n\n Note: this is generally unsafe to use. This is used in certain situations in\n conjunction with _set_type_list_attr.\n\n Arguments:\n types: list of DTypes\n shapes: list of TensorShapes\n \"\"\"\n assert len(types) == len(shapes)\n orig_num_outputs = len(self.outputs)\n for i in range(len(types)):\n t = Tensor(self, orig_num_outputs + i, types[i])\n self._outputs.append(t)\n t.set_shape(shapes[i])\n\n def __str__(self):\n return str(self.node_def)\n\n def __repr__(self):\n return \"<tf.Operation '%s' type=%s>\" % (self.name, self.type)\n\n @property\n def outputs(self):\n \"\"\"The list of `Tensor` objects representing the outputs of this op.\"\"\"\n return self._outputs\n\n @property\n def inputs(self):\n \"\"\"The sequence of `Tensor` objects representing the data inputs of this op.\"\"\"\n if self._inputs_val is None:\n # pylint: disable=protected-access\n self._inputs_val = tuple(\n map(self.graph._get_tensor_by_tf_output,\n pywrap_tf_session.GetOperationInputs(self._c_op)))\n # pylint: enable=protected-access\n return self._inputs_val\n\n @property\n def _input_types(self):\n num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)\n input_types = [\n dtypes.as_dtype(\n pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))\n for i in xrange(num_inputs)\n ]\n return input_types\n\n @property\n def control_inputs(self):\n \"\"\"The `Operation` objects on which this op has a control dependency.\n\n Before this op is executed, TensorFlow will ensure that the\n operations in `self.control_inputs` have finished executing. This\n mechanism can be used to run ops sequentially for performance\n reasons, or to ensure that the side effects of an op are observed\n in the correct order.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(\n self._c_op)\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(\n pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops\n ]\n # pylint: enable=protected-access\n\n @property\n def _control_outputs(self):\n \"\"\"The `Operation` objects which have a control dependency on this op.\n\n Before any of the ops in self._control_outputs can execute tensorflow will\n ensure self has finished executing.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(\n self._c_op)\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(\n pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops\n ]\n # pylint: enable=protected-access\n\n @property\n def type(self):\n \"\"\"The type of the op (e.g. `\"MatMul\"`).\"\"\"\n return pywrap_tf_session.TF_OperationOpType(self._c_op)\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this operation.\"\"\"\n return self._graph\n\n @property\n def node_def(self):\n # pylint: disable=line-too-long\n \"\"\"Returns the `NodeDef` representation of this operation.\n\n Returns:\n A\n [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)\n protocol buffer.\n \"\"\"\n # pylint: enable=line-too-long\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n node_def = node_def_pb2.NodeDef()\n node_def.ParseFromString(compat.as_bytes(data))\n return node_def\n\n @property\n def op_def(self):\n # pylint: disable=line-too-long\n \"\"\"Returns the `OpDef` proto that represents the type of this op.\n\n Returns:\n An\n [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)\n protocol buffer.\n \"\"\"\n # pylint: enable=line-too-long\n return self._graph._get_op_def(self.type)\n\n @property\n def traceback(self):\n \"\"\"Returns the call stack from when this operation was constructed.\"\"\"\n return self._traceback\n\n def _set_attr(self, attr_name, attr_value):\n \"\"\"Private method used to set an attribute in the node_def.\"\"\"\n buf = pywrap_tf_session.TF_NewBufferFromString(\n compat.as_bytes(attr_value.SerializeToString()))\n try:\n self._set_attr_with_buf(attr_name, buf)\n finally:\n pywrap_tf_session.TF_DeleteBuffer(buf)\n\n def _set_attr_with_buf(self, attr_name, attr_buf):\n \"\"\"Set an attr in the node_def with a pre-allocated buffer.\"\"\"\n # pylint: disable=protected-access\n pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,\n attr_buf)\n # pylint: enable=protected-access\n\n def _set_func_attr(self, attr_name, func_name):\n \"\"\"Private method used to set a function attribute in the node_def.\"\"\"\n func = attr_value_pb2.NameAttrList(name=func_name)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))\n\n def _set_func_list_attr(self, attr_name, func_names):\n \"\"\"Private method used to set a list(function) attribute in the node_def.\"\"\"\n funcs = [attr_value_pb2.NameAttrList(name=func_name)\n for func_name in func_names]\n funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))\n\n def _set_type_list_attr(self, attr_name, types):\n \"\"\"Private method used to set a list(type) attribute in the node_def.\"\"\"\n if not types:\n return\n if isinstance(types[0], dtypes.DType):\n types = [dt.as_datatype_enum for dt in types]\n types_list = attr_value_pb2.AttrValue.ListValue(type=types)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))\n\n def _set_shape_list_attr(self, attr_name, shapes):\n \"\"\"Private method used to set a list(shape) attribute in the node_def.\"\"\"\n shapes = [s.as_proto() for s in shapes]\n shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))\n\n def _clear_attr(self, attr_name):\n \"\"\"Private method used to clear an attribute in the node_def.\"\"\"\n # pylint: disable=protected-access\n pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)\n # pylint: enable=protected-access\n\n def get_attr(self, name):\n \"\"\"Returns the value of the attr of this op with the given `name`.\n\n Args:\n name: The name of the attr to fetch.\n\n Returns:\n The value of the attr, as a Python object.\n\n Raises:\n ValueError: If this op does not have an attr with the given `name`.\n \"\"\"\n fields = (\"s\", \"i\", \"f\", \"b\", \"type\", \"shape\", \"tensor\", \"func\")\n try:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n x = attr_value_pb2.AttrValue()\n x.ParseFromString(data)\n\n oneof_value = x.WhichOneof(\"value\")\n if oneof_value is None:\n return []\n if oneof_value == \"list\":\n for f in fields:\n if getattr(x.list, f):\n if f == \"type\":\n return [dtypes.as_dtype(t) for t in x.list.type]\n else:\n return list(getattr(x.list, f))\n return []\n if oneof_value == \"type\":\n return dtypes.as_dtype(x.type)\n assert oneof_value in fields, \"Unsupported field type in \" + str(x)\n return getattr(x, oneof_value)\n\n def _get_attr_type(self, name):\n \"\"\"Returns the `DType` value of the attr of this op with the given `name`.\"\"\"\n try:\n dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)\n return _DTYPES_INTERN_TABLE[dtype_enum]\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n def _get_attr_bool(self, name):\n \"\"\"Returns the `bool` value of the attr of this op with the given `name`.\"\"\"\n try:\n return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n def _get_attr_int(self, name):\n \"\"\"Returns the `int` value of the attr of this op with the given `name`.\"\"\"\n try:\n return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n def run(self, feed_dict=None, session=None):\n \"\"\"Runs this operation in a `Session`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for this operation.\n\n *N.B.* Before invoking `Operation.run()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values. See\n `tf.Session.run` for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to run to this operation. If\n none, the default session will be used.\n \"\"\"\n _run_using_default_session(self, feed_dict, self.graph, session)\n\n_gradient_registry = registry.Registry(\"gradient\")\n\n\n@tf_export(\"RegisterGradient\")\nclass RegisterGradient(object):\n \"\"\"A decorator for registering the gradient function for an op type.\n\n This decorator is only used when defining a new op type. For an op\n with `m` inputs and `n` outputs, the gradient function is a function\n that takes the original `Operation` and `n` `Tensor` objects\n (representing the gradients with respect to each output of the op),\n and returns `m` `Tensor` objects (representing the partial gradients\n with respect to each input of the op).\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, the\n following gradient function would be registered:\n\n ```python\n @tf.RegisterGradient(\"Sub\")\n def _sub_grad(unused_op, grad):\n return grad, tf.negative(grad)\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n \"\"\"\n\n def __init__(self, op_type):\n \"\"\"Creates a new decorator with `op_type` as the Operation type.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not string.\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n self._op_type = op_type\n\n def __call__(self, f):\n \"\"\"Registers the function `f` as gradient function for `op_type`.\"\"\"\n _gradient_registry.register(f, self._op_type)\n return f\n\n\[email protected]_endpoints(\"NotDifferentiable\", \"NoGradient\")\n@tf_export(\"no_gradient\", v1=[\"no_gradient\", \"NotDifferentiable\", \"NoGradient\"])\ndef no_gradient(op_type):\n \"\"\"Specifies that ops of type `op_type` is not differentiable.\n\n This function should *not* be used for operations that have a\n well-defined gradient that is not yet implemented.\n\n This function is only used when defining a new op type. It may be\n used for ops such as `tf.size()` that are not differentiable. For\n example:\n\n ```python\n tf.no_gradient(\"Size\")\n ```\n\n The gradient computed for 'op_type' will then propagate zeros.\n\n For ops that have a well-defined gradient but are not yet implemented,\n no declaration should be made, and an error *must* be thrown if\n an attempt to request its gradient is made.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not a string.\n\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n _gradient_registry.register(None, op_type)\n\n\n# Aliases for the old names, will be eventually removed.\nNoGradient = no_gradient\nNotDifferentiable = no_gradient\n\n\ndef get_gradient_function(op):\n \"\"\"Returns the function that computes gradients for \"op\".\"\"\"\n if not op.inputs:\n return None\n\n gradient_function = op._gradient_function # pylint: disable=protected-access\n if gradient_function:\n return gradient_function\n\n try:\n op_type = op.get_attr(\"_gradient_op_type\")\n except ValueError:\n op_type = op.type\n return _gradient_registry.lookup(op_type)\n\n\ndef set_shape_and_handle_data_for_outputs(_):\n \"\"\"No op. TODO(b/74620627): Remove this.\"\"\"\n pass\n\n\nclass OpStats(object):\n \"\"\"A holder for statistics about an operator.\n\n This class holds information about the resource requirements for an op,\n including the size of its weight parameters on-disk and how many FLOPS it\n requires to execute forward inference.\n\n If you define a new operation, you can create a function that will return a\n set of information about its usage of the CPU and disk space when serialized.\n The function itself takes a Graph object that's been set up so you can call\n methods like get_tensor_by_name to help calculate the results, and a NodeDef\n argument.\n\n \"\"\"\n\n def __init__(self, statistic_type, value=None):\n \"\"\"Sets up the initial placeholders for the statistics.\"\"\"\n self.statistic_type = statistic_type\n self.value = value\n\n @property\n def statistic_type(self):\n return self._statistic_type\n\n @statistic_type.setter\n def statistic_type(self, statistic_type):\n self._statistic_type = statistic_type\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n def __iadd__(self, other):\n if other.statistic_type != self.statistic_type:\n raise ValueError(\"Can't add an OpStat of type %s to one of %s.\" %\n (self.statistic_type, other.statistic_type))\n if self.value is None:\n self.value = other.value\n elif other.value is not None:\n self._value += other.value\n return self\n\n\n_stats_registry = registry.Registry(\"statistical functions\")\n\n\nclass RegisterStatistics(object):\n \"\"\"A decorator for registering the statistics function for an op type.\n\n This decorator can be defined for an op type so that it gives a\n report on the resources used by an instance of an operator, in the\n form of an OpStats object.\n\n Well-known types of statistics include these so far:\n\n - flops: When running a graph, the bulk of the computation happens doing\n numerical calculations like matrix multiplications. This type allows a node\n to return how many floating-point operations it takes to complete. The\n total number of FLOPs for a graph is a good guide to its expected latency.\n\n You can add your own statistics just by picking a new type string, registering\n functions for the ops you care about, and then calling get_stats_for_node_def.\n\n If a statistic for an op is registered multiple times, a KeyError will be\n raised.\n\n Since the statistics is counted on a per-op basis. It is not suitable for\n model parameters (capacity), which is expected to be counted only once, even\n if it is shared by multiple ops. (e.g. RNN)\n\n For example, you can define a new metric called doohickey for a Foo operation\n by placing this in your code:\n\n ```python\n @ops.RegisterStatistics(\"Foo\", \"doohickey\")\n def _calc_foo_bojangles(unused_graph, unused_node_def):\n return ops.OpStats(\"doohickey\", 20)\n ```\n\n Then in client code you can retrieve the value by making this call:\n\n ```python\n doohickey = ops.get_stats_for_node_def(graph, node_def, \"doohickey\")\n ```\n\n If the NodeDef is for an op with a registered doohickey function, you'll get\n back the calculated amount in doohickey.value, or None if it's not defined.\n\n \"\"\"\n\n def __init__(self, op_type, statistic_type):\n \"\"\"Saves the `op_type` as the `Operation` type.\"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string.\")\n if \",\" in op_type:\n raise TypeError(\"op_type must not contain a comma.\")\n self._op_type = op_type\n if not isinstance(statistic_type, six.string_types):\n raise TypeError(\"statistic_type must be a string.\")\n if \",\" in statistic_type:\n raise TypeError(\"statistic_type must not contain a comma.\")\n self._statistic_type = statistic_type\n\n def __call__(self, f):\n \"\"\"Registers \"f\" as the statistics function for \"op_type\".\"\"\"\n _stats_registry.register(f, self._op_type + \",\" + self._statistic_type)\n return f\n\n\ndef get_stats_for_node_def(graph, node, statistic_type):\n \"\"\"Looks up the node's statistics function in the registry and calls it.\n\n This function takes a Graph object and a NodeDef from a GraphDef, and if\n there's an associated statistics method, calls it and returns a result. If no\n function has been registered for the particular node type, it returns an empty\n statistics object.\n\n Args:\n graph: A Graph object that's been set up with the node's graph.\n node: A NodeDef describing the operator.\n statistic_type: A string identifying the statistic we're interested in.\n\n Returns:\n An OpStats object containing information about resource usage.\n \"\"\"\n\n try:\n stats_func = _stats_registry.lookup(node.op + \",\" + statistic_type)\n result = stats_func(graph, node)\n except LookupError:\n result = OpStats(statistic_type)\n return result\n\n\ndef name_from_scope_name(name):\n \"\"\"Returns the name of an op given the name of its scope.\n\n Args:\n name: the name of the scope.\n\n Returns:\n the name of the op (equal to scope name minus any trailing slash).\n \"\"\"\n return name[:-1] if (name and name[-1] == \"/\") else name\n\n\n_MUTATION_LOCK_GROUP = 0\n_SESSION_RUN_LOCK_GROUP = 1\n\n\n@tf_export(\"Graph\")\nclass Graph(object):\n \"\"\"A TensorFlow computation, represented as a dataflow graph.\n\n Graphs are used by `tf.function`s to represent the function's computations.\n Each graph contains a set of `tf.Operation` objects, which represent units of\n computation; and `tf.Tensor` objects, which represent the units of data that\n flow between operations.\n\n ### Using graphs directly (deprecated)\n\n A `tf.Graph` can be constructed and used directly without a `tf.function`, as\n was required in TensorFlow 1, but this is deprecated and it is recommended to\n use a `tf.function` instead. If a graph is directly used, other deprecated\n TensorFlow 1 classes are also required to execute the graph, such as a\n `tf.compat.v1.Session`.\n\n A default graph can be registered with the `tf.Graph.as_default` context\n manager. Then, operations will be added to the graph instead of being executed\n eagerly. For example:\n\n ```python\n g = tf.Graph()\n with g.as_default():\n # Define operations and tensors in `g`.\n c = tf.constant(30.0)\n assert c.graph is g\n ```\n\n `tf.compat.v1.get_default_graph()` can be used to obtain the default graph.\n\n Important note: This class *is not* thread-safe for graph construction. All\n operations should be created from a single thread, or external\n synchronization must be provided. Unless otherwise specified, all methods\n are not thread-safe.\n\n A `Graph` instance supports an arbitrary number of \"collections\"\n that are identified by name. For convenience when building a large\n graph, collections can store groups of related objects: for\n example, the `tf.Variable` uses a collection (named\n `tf.GraphKeys.GLOBAL_VARIABLES`) for\n all variables that are created during the construction of a graph. The caller\n may define additional collections by specifying a new name.\n \"\"\"\n\n def __init__(self):\n \"\"\"Creates a new, empty Graph.\"\"\"\n # Protects core state that can be returned via public accessors.\n # Thread-safety is provided on a best-effort basis to support buggy\n # programs, and is not guaranteed by the public `tf.Graph` API.\n #\n # NOTE(mrry): This does not protect the various stacks. A warning will\n # be reported if these are used from multiple threads\n self._lock = threading.RLock()\n # The group lock synchronizes Session.run calls with methods that create\n # and mutate ops (e.g. Graph.create_op()). This synchronization is\n # necessary because it's illegal to modify an operation after it's been run.\n # The group lock allows any number of threads to mutate ops at the same time\n # but if any modification is going on, all Session.run calls have to wait.\n # Similarly, if one or more Session.run calls are going on, all mutate ops\n # have to wait until all Session.run calls have finished.\n self._group_lock = lock_util.GroupLock(num_groups=2)\n self._nodes_by_id = {} # GUARDED_BY(self._lock)\n self._next_id_counter = 0 # GUARDED_BY(self._lock)\n self._nodes_by_name = {} # GUARDED_BY(self._lock)\n self._version = 0 # GUARDED_BY(self._lock)\n # Maps a name used in the graph to the next id to use for that name.\n self._names_in_use = {}\n self._stack_state_is_thread_local = False\n self._thread_local = threading.local()\n # Functions that will be applied to choose a device if none is specified.\n # In TF2.x or after switch_to_thread_local(),\n # self._thread_local._device_function_stack is used instead.\n self._graph_device_function_stack = traceable_stack.TraceableStack()\n # Default original_op applied to new ops.\n self._default_original_op = None\n # Current control flow context. It could be either CondContext or\n # WhileContext defined in ops/control_flow_ops.py\n self._control_flow_context = None\n # A new node will depend of the union of all of the nodes in the stack.\n # In TF2.x or after switch_to_thread_local(),\n # self._thread_local._control_dependencies_stack is used instead.\n self._graph_control_dependencies_stack = []\n # Arbitrary collections of objects.\n self._collections = {}\n # The graph-level random seed\n self._seed = None\n # A dictionary of attributes that should be applied to all ops.\n self._attr_scope_map = {}\n # A map from op type to the kernel label that should be used.\n self._op_to_kernel_label_map = {}\n # A map from op type to an alternative op type that should be used when\n # computing gradients.\n self._gradient_override_map = {}\n # A map from op type to a gradient function that should be used instead.\n self._gradient_function_map = {}\n # True if the graph is considered \"finalized\". In that case no\n # new operations can be added.\n self._finalized = False\n # Functions defined in the graph\n self._functions = collections.OrderedDict()\n # Default GraphDef versions\n self._graph_def_versions = versions_pb2.VersionDef(\n producer=versions.GRAPH_DEF_VERSION,\n min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)\n self._building_function = False\n # Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),\n # self._thread_local._colocation_stack is used instead.\n self._graph_colocation_stack = traceable_stack.TraceableStack()\n # Set of tensors that are dangerous to feed!\n self._unfeedable_tensors = object_identity.ObjectIdentitySet()\n # Set of operations that are dangerous to fetch!\n self._unfetchable_ops = set()\n # A map of tensor handle placeholder to tensor dtype.\n self._handle_feeders = {}\n # A map from tensor handle to its read op.\n self._handle_readers = {}\n # A map from tensor handle to its move op.\n self._handle_movers = {}\n # A map from tensor handle to its delete op.\n self._handle_deleters = {}\n # Allow optimizers and other objects to pseudo-uniquely key graphs (this key\n # will be shared when defining function graphs, for example, so optimizers\n # being called inside function definitions behave as if they were seeing the\n # actual outside graph).\n self._graph_key = \"grap-key-%d/\" % (uid(),)\n # A string with the last reduction method passed to\n # losses.compute_weighted_loss(), or None. This is required only for\n # backward compatibility with Estimator and optimizer V1 use cases.\n self._last_loss_reduction = None\n # Flag that is used to indicate whether loss has been scaled by optimizer.\n # If this flag has been set, then estimator uses it to scale losss back\n # before reporting. This is required only for backward compatibility with\n # Estimator and optimizer V1 use cases.\n self._is_loss_scaled_by_optimizer = False\n self._container = \"\"\n # Set to True if this graph is being built in an\n # AutomaticControlDependencies context.\n self._add_control_dependencies = False\n # Cache for OpDef protobufs retrieved via the C API.\n self._op_def_cache = {}\n # Cache for constant results of `broadcast_gradient_args()`. The keys are\n # tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the\n # values are tuples of reduction indices: (rx, ry).\n self._bcast_grad_args_cache = {}\n # Cache for constant results of `reduced_shape()`. The keys are pairs of\n # tuples: (input_shape_tuple, reduction_indices_tuple), and the values\n # are pairs of tuples: (output_shape_kept_dims, tile_scaling).\n self._reduced_shape_cache = {}\n\n # TODO(skyewm): fold as much of the above as possible into the C\n # implementation\n self._scoped_c_graph = c_api_util.ScopedTFGraph()\n # The C API requires all ops to have shape functions. Disable this\n # requirement (many custom ops do not have shape functions, and we don't\n # want to break these existing cases).\n pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)\n if tf2.enabled():\n self.switch_to_thread_local()\n\n # Note: this method is private because the API of tf.Graph() is public and\n # frozen, and this functionality is still not ready for public visibility.\n @tf_contextlib.contextmanager\n def _variable_creator_scope(self, creator, priority=100):\n \"\"\"Scope which defines a variable creation function.\n\n Args:\n creator: A callable taking `next_creator` and `kwargs`. See the\n `tf.variable_creator_scope` docstring.\n priority: Creators with a higher `priority` are called first. Within the\n same priority, creators are called inner-to-outer.\n\n Yields:\n `_variable_creator_scope` is a context manager with a side effect, but\n doesn't return a value.\n\n Raises:\n RuntimeError: If variable creator scopes are not properly nested.\n \"\"\"\n # This step keeps a reference to the existing stack, and it also initializes\n # self._thread_local._variable_creator_stack if it doesn't exist yet.\n old = self._variable_creator_stack\n new = list(old)\n new.append((priority, creator))\n # Sorting is stable, so we'll put higher-priority creators later in the list\n # but otherwise maintain registration order.\n new.sort(key=lambda item: item[0])\n self._thread_local._variable_creator_stack = new # pylint: disable=protected-access\n try:\n yield\n finally:\n if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access\n raise RuntimeError(\n \"Exiting variable_creator_scope without proper nesting.\")\n self._thread_local._variable_creator_stack = old # pylint: disable=protected-access\n\n # Note: this method is private because the API of tf.Graph() is public and\n # frozen, and this functionality is still not ready for public visibility.\n @property\n def _variable_creator_stack(self):\n if not hasattr(self._thread_local, \"_variable_creator_stack\"):\n self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access\n\n # This previously returned a copy of the stack instead of the stack itself,\n # to guard against accidental mutation. Consider, however, code that wants\n # to save and restore the variable creator stack:\n # def f():\n # original_stack = graph._variable_creator_stack\n # graph._variable_creator_stack = new_stack\n # ... # Some code\n # graph._variable_creator_stack = original_stack\n #\n # And lets say you have some code that calls this function with some\n # variable_creator:\n # def g():\n # with variable_scope.variable_creator_scope(creator):\n # f()\n # When exiting the variable creator scope, it would see a different stack\n # object than it expected leading to a \"Exiting variable_creator_scope\n # without proper nesting\" error.\n return self._thread_local._variable_creator_stack # pylint: disable=protected-access\n\n @_variable_creator_stack.setter\n def _variable_creator_stack(self, variable_creator_stack):\n self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access\n\n def _check_not_finalized(self):\n \"\"\"Check if the graph is finalized.\n\n Raises:\n RuntimeError: If the graph finalized.\n \"\"\"\n if self._finalized:\n raise RuntimeError(\"Graph is finalized and cannot be modified.\")\n\n def _add_op(self, op, op_name):\n \"\"\"Adds 'op' to the graph and returns the unique ID for the added Operation.\n\n Args:\n op: the Operation to add.\n op_name: the name of the Operation.\n\n Returns:\n An integer that is a unique ID for the added Operation.\n \"\"\"\n self._check_not_finalized()\n with self._lock:\n self._next_id_counter += 1\n op_id = self._next_id_counter\n self._nodes_by_id[op_id] = op\n self._nodes_by_name[op_name] = op\n self._version = max(self._version, op_id)\n return op_id\n\n @property\n def _c_graph(self):\n if self._scoped_c_graph:\n return self._scoped_c_graph.graph\n return None\n\n @property\n def version(self):\n \"\"\"Returns a version number that increases as ops are added to the graph.\n\n Note that this is unrelated to the\n `tf.Graph.graph_def_versions`.\n\n Returns:\n An integer version that increases as ops are added to the graph.\n \"\"\"\n if self._finalized:\n return self._version\n\n with self._lock:\n return self._version\n\n @property\n def graph_def_versions(self):\n # pylint: disable=line-too-long\n \"\"\"The GraphDef version information of this graph.\n\n For details on the meaning of each version, see\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).\n\n Returns:\n A `VersionDef`.\n \"\"\"\n # pylint: enable=line-too-long\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n version_def = versions_pb2.VersionDef()\n version_def.ParseFromString(compat.as_bytes(data))\n return version_def\n\n @property\n def seed(self):\n \"\"\"The graph-level random seed of this graph.\"\"\"\n return self._seed\n\n @seed.setter\n def seed(self, seed):\n self._seed = seed\n\n @property\n def finalized(self):\n \"\"\"True if this graph has been finalized.\"\"\"\n return self._finalized\n\n def finalize(self):\n \"\"\"Finalizes this graph, making it read-only.\n\n After calling `g.finalize()`, no new operations can be added to\n `g`. This method is used to ensure that no operations are added\n to a graph when it is shared between multiple threads, for example\n when using a `tf.compat.v1.train.QueueRunner`.\n \"\"\"\n self._finalized = True\n\n def _unsafe_unfinalize(self):\n \"\"\"Opposite of `finalize`.\n\n Internal interface.\n\n NOTE: Unfinalizing a graph could have negative impact on performance,\n especially in a multi-threaded environment. Unfinalizing a graph\n when it is in use by a Session may lead to undefined behavior. Ensure\n that all sessions using a graph are closed before calling this method.\n \"\"\"\n self._finalized = False\n\n def _get_control_flow_context(self):\n \"\"\"Returns the current control flow context.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, ctx):\n \"\"\"Sets the current control flow context.\n\n Args:\n ctx: a context object.\n \"\"\"\n self._control_flow_context = ctx\n\n def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):\n \"\"\"If this graph contains functions, copy them to `graph_def`.\"\"\"\n bytesize = starting_bytesize\n for f in self._functions.values():\n bytesize += f.definition.ByteSize()\n if bytesize >= (1 << 31) or bytesize < 0:\n raise ValueError(\"GraphDef cannot be larger than 2GB.\")\n graph_def.library.function.extend([f.definition])\n if f.grad_func_name:\n grad_def = function_pb2.GradientDef()\n grad_def.function_name = f.name\n grad_def.gradient_func = f.grad_func_name\n graph_def.library.gradient.extend([grad_def])\n\n def _as_graph_def(self, from_version=None, add_shapes=False):\n # pylint: disable=line-too-long\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using `tf.import_graph_def`) or used with the\n [C++ Session API](../../../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef` containing\n only the nodes that were added to this graph since its `version`\n property had the given value.\n add_shapes: If true, adds an \"_output_shapes\" list attr to each node with\n the inferred shapes of each of its outputs.\n\n Returns:\n A tuple containing a\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer, and the version of the graph to which that\n `GraphDef` corresponds.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n\n \"\"\"\n # pylint: enable=line-too-long\n with self._lock:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n graph = graph_pb2.GraphDef()\n graph.ParseFromString(compat.as_bytes(data))\n # Strip the experimental library field iff it's empty.\n if not graph.library.function:\n graph.ClearField(\"library\")\n\n if add_shapes:\n for node in graph.node:\n op = self._nodes_by_name[node.name]\n if op.outputs:\n node.attr[\"_output_shapes\"].list.shape.extend(\n [output.get_shape().as_proto() for output in op.outputs])\n for function_def in graph.library.function:\n defined_function = self._functions[function_def.signature.name]\n try:\n func_graph = defined_function.graph\n except AttributeError:\n # _DefinedFunction doesn't have a graph, _EagerDefinedFunction\n # does. Both rely on ops.py, so we can't really isinstance check\n # them.\n continue\n input_shapes = function_def.attr[\"_input_shapes\"]\n try:\n func_graph_inputs = func_graph.inputs\n except AttributeError:\n continue\n # TODO(b/141471245): Fix the inconsistency when inputs of func graph\n # are appended during gradient computation of while/cond.\n for input_tensor, _ in zip(func_graph_inputs,\n function_def.signature.input_arg):\n if input_tensor.dtype == dtypes.resource:\n # TODO(allenl): Save and restore handle data, then save the\n # resource placeholder's shape. Right now some shape functions get\n # confused if we set the shape of the resource placeholder (to a\n # scalar of course) and there isn't any handle data.\n input_shapes.list.shape.add().CopyFrom(\n tensor_shape.TensorShape(None).as_proto())\n else:\n input_shapes.list.shape.add().CopyFrom(\n input_tensor.get_shape().as_proto())\n for node in function_def.node_def:\n try:\n op = func_graph.get_operation_by_name(node.name)\n except KeyError:\n continue\n outputs = op.outputs\n\n if op.type == \"StatefulPartitionedCall\":\n # Filter out any extra outputs (possibly added by function\n # backpropagation rewriting).\n num_outputs = len(node.attr[\"Tout\"].list.type)\n outputs = outputs[:num_outputs]\n\n node.attr[\"_output_shapes\"].list.shape.extend(\n [output.get_shape().as_proto() for output in outputs])\n\n return graph, self._version\n\n def as_graph_def(self, from_version=None, add_shapes=False):\n # pylint: disable=line-too-long\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using `tf.import_graph_def`) or used with the\n [C++ Session API](../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef` containing\n only the nodes that were added to this graph since its `version`\n property had the given value.\n add_shapes: If true, adds an \"_output_shapes\" list attr to each node with\n the inferred shapes of each of its outputs.\n\n Returns:\n A\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n \"\"\"\n # pylint: enable=line-too-long\n result, _ = self._as_graph_def(from_version, add_shapes)\n return result\n\n def _is_function(self, name):\n \"\"\"Tests whether 'name' is registered in this graph's function library.\n\n Args:\n name: string op name.\n\n Returns:\n bool indicating whether or not 'name' is registered in function library.\n \"\"\"\n return compat.as_str(name) in self._functions\n\n def _get_function(self, name):\n \"\"\"Returns the function definition for 'name'.\n\n Args:\n name: string function name.\n\n Returns:\n The function def proto.\n \"\"\"\n return self._functions.get(compat.as_str(name), None)\n\n def _add_function(self, function):\n \"\"\"Adds a function to the graph.\n\n After the function has been added, you can call to the function by\n passing the function name in place of an op name to\n `Graph.create_op()`.\n\n Args:\n function: A `_DefinedFunction` object.\n\n Raises:\n ValueError: if another function is defined with the same name.\n \"\"\"\n name = function.name\n # Sanity checks on gradient definition.\n if (function.grad_func_name is not None) and (function.python_grad_func is\n not None):\n raise ValueError(\"Gradient defined twice for function %s\" % name)\n\n # Add function to graph\n # pylint: disable=protected-access\n gradient = (\n function._grad_func._c_func.func if function._grad_func else None)\n pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,\n gradient)\n # pylint: enable=protected-access\n\n self._functions[compat.as_str(name)] = function\n\n # Need a new-enough consumer to support the functions we add to the graph.\n if self._graph_def_versions.min_consumer < 12:\n self._graph_def_versions.min_consumer = 12\n\n @property\n def building_function(self):\n \"\"\"Returns True iff this graph represents a function.\"\"\"\n return self._building_function\n\n # Helper functions to create operations.\n @deprecated_args(None,\n \"Shapes are always computed; don't use the compute_shapes \"\n \"as it has no effect.\", \"compute_shapes\")\n def create_op(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n \"\"\"Creates an `Operation` in this graph.\n\n This is a low-level interface for creating an `Operation`. Most\n programs will not call this method directly, and instead use the\n Python op constructors, such as `tf.constant()`, which add ops to\n the default graph.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always\n computed).\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Raises:\n TypeError: if any of the inputs is not a `Tensor`.\n ValueError: if colocation conflicts with existing device assignment.\n\n Returns:\n An `Operation` object.\n \"\"\"\n del compute_shapes\n for idx, a in enumerate(inputs):\n if not isinstance(a, Tensor):\n raise TypeError(\"Input #%d is not a tensor: %s\" % (idx, a))\n return self._create_op_internal(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n def _create_op_internal(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n \"\"\"Creates an `Operation` in this graph.\n\n Implements `Graph.create_op()` without the overhead of the deprecation\n wrapper.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Raises:\n ValueError: if colocation conflicts with existing device assignment.\n\n Returns:\n An `Operation` object.\n \"\"\"\n self._check_not_finalized()\n if name is None:\n name = op_type\n # If a names ends with a '/' it is a \"name scope\" and we use it as-is,\n # after removing the trailing '/'.\n if name and name[-1] == \"/\":\n name = name_from_scope_name(name)\n else:\n name = self.unique_name(name)\n\n node_def = _NodeDef(op_type, name, attrs)\n\n input_ops = set(t.op for t in inputs)\n control_inputs = self._control_dependencies_for_inputs(input_ops)\n # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a\n # Session.run call cannot occur between creating and mutating the op.\n with self._mutation_lock():\n ret = Operation(\n node_def,\n self,\n inputs=inputs,\n output_types=dtypes,\n control_inputs=control_inputs,\n input_types=input_types,\n original_op=self._default_original_op,\n op_def=op_def)\n self._create_op_helper(ret, compute_device=compute_device)\n return ret\n\n def _create_op_from_tf_operation(self, c_op, compute_device=True):\n \"\"\"Creates an `Operation` in this graph from the supplied TF_Operation.\n\n This method is like create_op() except the new Operation is constructed\n using `c_op`. The returned Operation will have `c_op` as its _c_op\n field. This is used to create Operation objects around TF_Operations created\n indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).\n\n This function does not call Operation._control_flow_post_processing or\n Graph._control_dependencies_for_inputs (since the inputs may not be\n available yet). The caller is responsible for calling these methods.\n\n Args:\n c_op: a wrapped TF_Operation\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n self._check_not_finalized()\n ret = Operation(c_op, self)\n # If a name_scope was created with ret.name but no nodes were created in it,\n # the name will still appear in _names_in_use even though the name hasn't\n # been used. This is ok, just leave _names_in_use as-is in this case.\n # TODO(skyewm): make the C API guarantee no name conflicts.\n name_key = ret.name.lower()\n if name_key not in self._names_in_use:\n self._names_in_use[name_key] = 1\n self._create_op_helper(ret, compute_device=compute_device)\n return ret\n\n def _create_op_helper(self, op, compute_device=True):\n \"\"\"Common logic for creating an op in this graph.\"\"\"\n # Apply any additional attributes requested. Do not overwrite any existing\n # attributes.\n for key, value in self._attr_scope_map.items():\n try:\n op.get_attr(key)\n except ValueError:\n if callable(value):\n value = value(op.node_def)\n if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):\n raise TypeError(\n \"Callable for scope map key '%s' must return either None or \"\n \"an AttrValue protocol buffer; but it returned: %s\" %\n (key, value))\n if value:\n op._set_attr(key, value) # pylint: disable=protected-access\n\n # Apply a kernel label if one has been specified for this op type.\n try:\n kernel_label = self._op_to_kernel_label_map[op.type]\n op._set_attr(\"_kernel\", # pylint: disable=protected-access\n attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))\n except KeyError:\n pass\n\n op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access\n\n # Apply the overriding op type for gradients if one has been specified for\n # this op type.\n try:\n mapped_op_type = self._gradient_override_map[op.type]\n op._set_attr(\"_gradient_op_type\", # pylint: disable=protected-access\n attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))\n except KeyError:\n pass\n\n self._record_op_seen_by_control_dependencies(op)\n\n if compute_device:\n self._apply_device_functions(op)\n\n # Snapshot the colocation stack metadata before we might generate error\n # messages using it. Note that this snapshot depends on the actual stack\n # and is independent of the op's _class attribute.\n # pylint: disable=protected-access\n op._colocation_code_locations = self._snapshot_colocation_stack_metadata()\n # pylint: enable=protected-access\n\n if self._colocation_stack:\n all_colocation_groups = []\n for colocation_op in self._colocation_stack.peek_objs():\n all_colocation_groups.extend(colocation_op.colocation_groups())\n if colocation_op.device:\n # pylint: disable=protected-access\n op._set_device(colocation_op.device)\n # pylint: enable=protected-access\n\n all_colocation_groups = sorted(set(all_colocation_groups))\n # pylint: disable=protected-access\n op._set_attr(\n \"_class\",\n attr_value_pb2.AttrValue(\n list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))\n # pylint: enable=protected-access\n\n # Sets \"container\" attribute if\n # (1) self._container is not None\n # (2) \"is_stateful\" is set in OpDef\n # (3) \"container\" attribute is in OpDef\n # (4) \"container\" attribute is None\n if self._container and op._is_stateful: # pylint: disable=protected-access\n try:\n container_attr = op.get_attr(\"container\")\n except ValueError:\n # \"container\" attribute is not in OpDef\n pass\n else:\n if not container_attr:\n op._set_attr(\"container\", attr_value_pb2.AttrValue( # pylint: disable=protected-access\n s=compat.as_bytes(self._container)))\n\n def _add_new_tf_operations(self, compute_devices=True):\n \"\"\"Creates `Operations` in this graph for any new TF_Operations.\n\n This is useful for when TF_Operations are indirectly created by the C API\n outside of the Operation constructor (e.g. by TF_ImportGraphDef,\n TF_FinishWhile). This ensures there are corresponding Operations for all\n TF_Operations in the underlying TF_Graph.\n\n Args:\n compute_devices: (Optional.) If True, device functions will be executed to\n compute the device properties of each new Operation.\n\n Returns:\n A list of the new `Operation` objects.\n \"\"\"\n # Create all Operation objects before accessing their inputs since an op may\n # be created before its inputs.\n new_ops = [\n self._create_op_from_tf_operation(c_op, compute_device=compute_devices)\n for c_op in c_api_util.new_tf_operations(self)\n ]\n\n # pylint: disable=protected-access\n for op in new_ops:\n new_control_inputs = self._control_dependencies_for_inputs(op.inputs)\n op._add_control_inputs(new_control_inputs)\n op._control_flow_post_processing()\n # pylint: enable=protected-access\n\n return new_ops\n\n def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):\n \"\"\"Returns the object referred to by `obj`, as an `Operation` or `Tensor`.\n\n This function validates that `obj` represents an element of this\n graph, and gives an informative error message if it is not.\n\n This function is the canonical way to get/validate an object of\n one of the allowed types from an external argument reference in the\n Session API.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can\n also be any object with an `_as_graph_element()` method that returns a\n value of one of these types. Note: `_as_graph_element` will be called\n inside the graph's lock and so may not modify the graph.\n allow_tensor: If true, `obj` may refer to a `Tensor`.\n allow_operation: If true, `obj` may refer to an `Operation`.\n\n Returns:\n The `Tensor` or `Operation` in the Graph corresponding to `obj`.\n\n Raises:\n TypeError: If `obj` is not a type we support attempting to convert\n to types.\n ValueError: If `obj` is of an appropriate type but invalid. For\n example, an invalid string.\n KeyError: If `obj` is not an object in the graph.\n \"\"\"\n if self._finalized:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n\n with self._lock:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n\n def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):\n \"\"\"See `Graph.as_graph_element()` for details.\"\"\"\n # The vast majority of this function is figuring\n # out what an API user might be doing wrong, so\n # that we can give helpful error messages.\n #\n # Ideally, it would be nice to split it up, but we\n # need context to generate nice error messages.\n\n if allow_tensor and allow_operation:\n types_str = \"Tensor or Operation\"\n elif allow_tensor:\n types_str = \"Tensor\"\n elif allow_operation:\n types_str = \"Operation\"\n else:\n raise ValueError(\"allow_tensor and allow_operation can't both be False.\")\n\n temp_obj = _as_graph_element(obj)\n if temp_obj is not None:\n obj = temp_obj\n\n # If obj appears to be a name...\n if isinstance(obj, compat.bytes_or_text_types):\n name = compat.as_str(obj)\n\n if \":\" in name and allow_tensor:\n # Looks like a Tensor name and can be a Tensor.\n try:\n op_name, out_n = name.split(\":\")\n out_n = int(out_n)\n except:\n raise ValueError(\"The name %s looks a like a Tensor name, but is \"\n \"not a valid one. Tensor names must be of the \"\n \"form \\\"<op_name>:<output_index>\\\".\" % repr(name))\n if op_name in self._nodes_by_name:\n op = self._nodes_by_name[op_name]\n else:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, does not exist in the \"\n \"graph.\" % (repr(name), repr(op_name)))\n try:\n return op.outputs[out_n]\n except:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, exists but only has \"\n \"%s outputs.\" %\n (repr(name), repr(op_name), len(op.outputs)))\n\n elif \":\" in name and not allow_tensor:\n # Looks like a Tensor name but can't be a Tensor.\n raise ValueError(\"Name %s appears to refer to a Tensor, not a %s.\" %\n (repr(name), types_str))\n\n elif \":\" not in name and allow_operation:\n # Looks like an Operation name and can be an Operation.\n if name not in self._nodes_by_name:\n raise KeyError(\"The name %s refers to an Operation not in the \"\n \"graph.\" % repr(name))\n return self._nodes_by_name[name]\n\n elif \":\" not in name and not allow_operation:\n # Looks like an Operation name but can't be an Operation.\n if name in self._nodes_by_name:\n # Yep, it's an Operation name\n err_msg = (\"The name %s refers to an Operation, not a %s.\" %\n (repr(name), types_str))\n else:\n err_msg = (\"The name %s looks like an (invalid) Operation name, \"\n \"not a %s.\" % (repr(name), types_str))\n err_msg += (\" Tensor names must be of the form \"\n \"\\\"<op_name>:<output_index>\\\".\")\n raise ValueError(err_msg)\n\n elif isinstance(obj, Tensor) and allow_tensor:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Tensor %s is not an element of this graph.\" % obj)\n return obj\n elif isinstance(obj, Operation) and allow_operation:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Operation %s is not an element of this graph.\" % obj)\n return obj\n else:\n # We give up!\n raise TypeError(\"Can not convert a %s into a %s.\" %\n (type(obj).__name__, types_str))\n\n def get_operations(self):\n \"\"\"Return the list of operations in the graph.\n\n You can modify the operations in place, but modifications\n to the list such as inserts/delete have no effect on the\n list of operations known to the graph.\n\n This method may be called concurrently from multiple threads.\n\n Returns:\n A list of Operations.\n \"\"\"\n if self._finalized:\n return list(self._nodes_by_id.values())\n\n with self._lock:\n return list(self._nodes_by_id.values())\n\n def get_operation_by_name(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if not isinstance(name, six.string_types):\n raise TypeError(\"Operation names are strings (or similar), not %s.\" %\n type(name).__name__)\n return self.as_graph_element(name, allow_tensor=False, allow_operation=True)\n\n def _get_operation_by_name_unsafe(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This is a internal unsafe version of get_operation_by_name. It skips many\n checks and does not have user friendly error messages but runs considerably\n faster. This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if self._finalized:\n return self._nodes_by_name[name]\n\n with self._lock:\n return self._nodes_by_name[name]\n\n def _get_operation_by_tf_operation(self, tf_oper):\n op_name = pywrap_tf_session.TF_OperationName(tf_oper)\n return self._get_operation_by_name_unsafe(op_name)\n\n def get_tensor_by_name(self, name):\n \"\"\"Returns the `Tensor` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Tensor` to return.\n\n Returns:\n The `Tensor` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to a tensor in this graph.\n \"\"\"\n # Names should be strings.\n if not isinstance(name, six.string_types):\n raise TypeError(\"Tensor names are strings (or similar), not %s.\" %\n type(name).__name__)\n return self.as_graph_element(name, allow_tensor=True, allow_operation=False)\n\n def _get_tensor_by_tf_output(self, tf_output):\n \"\"\"Returns the `Tensor` representing `tf_output`.\n\n Note that there is only one such `Tensor`, i.e. multiple calls to this\n function with the same TF_Output value will always return the same `Tensor`\n object.\n\n Args:\n tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).\n\n Returns:\n The `Tensor` that represents `tf_output`.\n \"\"\"\n op = self._get_operation_by_tf_operation(tf_output.oper)\n return op.outputs[tf_output.index]\n\n @property\n def _last_id(self):\n return self._next_id_counter\n\n def _get_op_def(self, type): # pylint: disable=redefined-builtin\n \"\"\"Returns the `OpDef` proto for `type`. `type` is a string.\"\"\"\n # NOTE: No locking is required because the lookup and insertion operations\n # on Python dictionaries are atomic.\n try:\n return self._op_def_cache[type]\n except KeyError:\n with c_api_util.tf_buffer() as buf:\n # pylint: disable=protected-access\n pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),\n buf)\n # pylint: enable=protected-access\n data = pywrap_tf_session.TF_GetBuffer(buf)\n op_def = op_def_pb2.OpDef()\n op_def.ParseFromString(compat.as_bytes(data))\n self._op_def_cache[type] = op_def\n return op_def\n\n def as_default(self):\n \"\"\"Returns a context manager that makes this `Graph` the default graph.\n\n This method should be used if you want to create multiple graphs\n in the same process. For convenience, a global default graph is\n provided, and all ops will be added to this graph if you do not\n create a new graph explicitly.\n\n Use this method with the `with` keyword to specify that ops created within\n the scope of a block should be added to this graph. In this case, once\n the scope of the `with` is exited, the previous default graph is set again\n as default. There is a stack, so it's ok to have multiple nested levels\n of `as_default` calls.\n\n The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n The following code examples are equivalent:\n\n ```python\n # 1. Using Graph.as_default():\n g = tf.Graph()\n with g.as_default():\n c = tf.constant(5.0)\n assert c.graph is g\n\n # 2. Constructing and making default:\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n assert c.graph is g\n ```\n\n If eager execution is enabled ops created under this context manager will be\n added to the graph instead of executed eagerly.\n\n Returns:\n A context manager for using this graph as the default graph.\n \"\"\"\n return _default_graph_stack.get_controller(self)\n\n @property\n def collections(self):\n \"\"\"Returns the names of the collections known to this graph.\"\"\"\n return list(self._collections)\n\n def add_to_collection(self, name, value):\n \"\"\"Stores `value` in the collection with the given `name`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times.\n\n Args:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collection.\n \"\"\" # pylint: disable=g-doc-exception\n self._check_not_finalized()\n with self._lock:\n if name not in self._collections:\n self._collections[name] = [value]\n else:\n self._collections[name].append(value)\n\n def add_to_collections(self, names, value):\n \"\"\"Stores `value` in the collections given by `names`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times. This function makes sure that duplicates in\n `names` are ignored, but it will not check for pre-existing membership of\n `value` in any of the collections in `names`.\n\n `names` can be any iterable, but if `names` is a string, it is treated as a\n single collection name.\n\n Args:\n names: The keys for the collections to add to. The `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collections.\n \"\"\"\n # Make sure names are unique, but treat strings as a single collection name\n names = (names,) if isinstance(names, six.string_types) else set(names)\n for name in names:\n self.add_to_collection(name, value)\n\n def get_collection_ref(self, name):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n If the collection exists, this returns the list itself, which can\n be modified in place to change the collection. If the collection does\n not exist, it is created as an empty list and the list is returned.\n\n This is different from `get_collection()` which always returns a copy of\n the collection list if it exists and never creates an empty collection.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n\n Returns:\n The list of values in the collection with the given `name`, or an empty\n list if no value has been added to that collection.\n \"\"\" # pylint: disable=g-doc-exception\n with self._lock:\n coll_list = self._collections.get(name, None)\n if coll_list is None:\n coll_list = []\n self._collections[name] = coll_list\n return coll_list\n\n def get_collection(self, name, scope=None):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n This is different from `get_collection_ref()` which always returns the\n actual collection list if it exists in that it returns a new list each time\n it is called.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n \"\"\" # pylint: disable=g-doc-exception\n with self._lock:\n collection = self._collections.get(name, None)\n if collection is None:\n return []\n if scope is None:\n return list(collection)\n else:\n c = []\n regex = re.compile(scope)\n for item in collection:\n try:\n if regex.match(item.name):\n c.append(item)\n except AttributeError:\n # Collection items with no name are ignored.\n pass\n return c\n\n def get_all_collection_keys(self):\n \"\"\"Returns a list of collections used in this graph.\"\"\"\n with self._lock:\n return [x for x in self._collections if isinstance(x, six.string_types)]\n\n def clear_collection(self, name):\n \"\"\"Clears all values in a collection.\n\n Args:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n \"\"\"\n self._check_not_finalized()\n with self._lock:\n if name in self._collections:\n del self._collections[name]\n\n @tf_contextlib.contextmanager\n def _original_op(self, op):\n \"\"\"Python 'with' handler to help annotate ops with their originator.\n\n An op may have an 'original_op' property that indicates the op on which\n it was based. For example a replica op is based on the op that was\n replicated and a gradient op is based on the op that was differentiated.\n\n All ops created in the scope of this 'with' handler will have\n the given 'op' as their original op.\n\n Args:\n op: The Operation that all ops created in this scope will have as their\n original op.\n\n Yields:\n Nothing.\n \"\"\"\n old_original_op = self._default_original_op\n self._default_original_op = op\n try:\n yield\n finally:\n self._default_original_op = old_original_op\n\n @property\n def _name_stack(self):\n # This may be called from a thread where name_stack doesn't yet exist.\n if not hasattr(self._thread_local, \"_name_stack\"):\n self._thread_local._name_stack = \"\"\n return self._thread_local._name_stack\n\n @_name_stack.setter\n def _name_stack(self, name_stack):\n self._thread_local._name_stack = name_stack\n\n # pylint: disable=g-doc-return-or-yield,line-too-long\n @tf_contextlib.contextmanager\n def name_scope(self, name):\n \"\"\"Returns a context manager that creates hierarchical names for operations.\n\n A graph maintains a stack of name scopes. A `with name_scope(...):`\n statement pushes a new name onto the stack for the lifetime of the context.\n\n The `name` argument will be interpreted as follows:\n\n * A string (not ending with '/') will create a new name scope, in which\n `name` is appended to the prefix of all operations created in the\n context. If `name` has been used before, it will be made unique by\n calling `self.unique_name(name)`.\n * A scope previously captured from a `with g.name_scope(...) as\n scope:` statement will be treated as an \"absolute\" name scope, which\n makes it possible to re-enter existing scopes.\n * A value of `None` or the empty string will reset the current name scope\n to the top-level (empty) name scope.\n\n For example:\n\n ```python\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0, name=\"c\")\n assert c.op.name == \"c\"\n c_1 = tf.constant(6.0, name=\"c\")\n assert c_1.op.name == \"c_1\"\n\n # Creates a scope called \"nested\"\n with g.name_scope(\"nested\") as scope:\n nested_c = tf.constant(10.0, name=\"c\")\n assert nested_c.op.name == \"nested/c\"\n\n # Creates a nested scope called \"inner\".\n with g.name_scope(\"inner\"):\n nested_inner_c = tf.constant(20.0, name=\"c\")\n assert nested_inner_c.op.name == \"nested/inner/c\"\n\n # Create a nested scope called \"inner_1\".\n with g.name_scope(\"inner\"):\n nested_inner_1_c = tf.constant(30.0, name=\"c\")\n assert nested_inner_1_c.op.name == \"nested/inner_1/c\"\n\n # Treats `scope` as an absolute name scope, and\n # switches to the \"nested/\" scope.\n with g.name_scope(scope):\n nested_d = tf.constant(40.0, name=\"d\")\n assert nested_d.op.name == \"nested/d\"\n\n with g.name_scope(\"\"):\n e = tf.constant(50.0, name=\"e\")\n assert e.op.name == \"e\"\n ```\n\n The name of the scope itself can be captured by `with\n g.name_scope(...) as scope:`, which stores the name of the scope\n in the variable `scope`. This value can be used to name an\n operation that represents the overall result of executing the ops\n in a scope. For example:\n\n ```python\n inputs = tf.constant(...)\n with g.name_scope('my_layer') as scope:\n weights = tf.Variable(..., name=\"weights\")\n biases = tf.Variable(..., name=\"biases\")\n affine = tf.matmul(inputs, weights) + biases\n output = tf.nn.relu(affine, name=scope)\n ```\n\n NOTE: This constructor validates the given `name`. Valid scope\n names match one of the following regular expressions:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]* (for scopes at the root)\n [A-Za-z0-9_.\\\\-/]* (for other scopes)\n\n Args:\n name: A name for the scope.\n\n Returns:\n A context manager that installs `name` as a new name scope.\n\n Raises:\n ValueError: If `name` is not a valid scope name, according to the rules\n above.\n \"\"\"\n if name:\n if isinstance(name, compat.bytes_or_text_types):\n name = compat.as_str(name)\n\n if self._name_stack:\n # Scopes created in a nested scope may have initial characters\n # that are illegal as the initial character of an op name\n # (viz. '-', '\\', '/', and '_').\n if not _VALID_SCOPE_NAME_REGEX.match(name):\n raise ValueError(\"'%s' is not a valid scope name\" % name)\n else:\n # Scopes created in the root must match the more restrictive\n # op name regex, which constrains the initial character.\n if not _VALID_OP_NAME_REGEX.match(name):\n raise ValueError(\"'%s' is not a valid scope name\" % name)\n old_stack = self._name_stack\n if not name: # Both for name=None and name=\"\" we re-set to empty scope.\n new_stack = None\n elif name[-1] == \"/\":\n new_stack = name_from_scope_name(name)\n else:\n new_stack = self.unique_name(name)\n self._name_stack = new_stack\n try:\n yield \"\" if new_stack is None else new_stack + \"/\"\n finally:\n self._name_stack = old_stack\n\n # pylint: enable=g-doc-return-or-yield,line-too-long\n\n def unique_name(self, name, mark_as_used=True):\n \"\"\"Return a unique operation name for `name`.\n\n Note: You rarely need to call `unique_name()` directly. Most of\n the time you just need to create `with g.name_scope()` blocks to\n generate structured names.\n\n `unique_name` is used to generate structured names, separated by\n `\"/\"`, to help identify operations when debugging a graph.\n Operation names are displayed in error messages reported by the\n TensorFlow runtime, and in various visualization tools such as\n TensorBoard.\n\n If `mark_as_used` is set to `True`, which is the default, a new\n unique name is created and marked as in use. If it's set to `False`,\n the unique name is returned without actually being marked as used.\n This is useful when the caller simply wants to know what the name\n to be created will be.\n\n Args:\n name: The name for an operation.\n mark_as_used: Whether to mark this name as being used.\n\n Returns:\n A string to be passed to `create_op()` that will be used\n to name the operation being created.\n \"\"\"\n if self._name_stack:\n name = self._name_stack + \"/\" + name\n\n # For the sake of checking for names in use, we treat names as case\n # insensitive (e.g. foo = Foo).\n name_key = name.lower()\n i = self._names_in_use.get(name_key, 0)\n # Increment the number for \"name_key\".\n if mark_as_used:\n self._names_in_use[name_key] = i + 1\n if i > 0:\n base_name_key = name_key\n # Make sure the composed name key is not already used.\n while name_key in self._names_in_use:\n name_key = \"%s_%d\" % (base_name_key, i)\n i += 1\n # Mark the composed name_key as used in case someone wants\n # to call unique_name(\"name_1\").\n if mark_as_used:\n self._names_in_use[name_key] = 1\n\n # Return the new name with the original capitalization of the given name.\n name = \"%s_%d\" % (name, i - 1)\n return name\n\n def get_name_scope(self):\n \"\"\"Returns the current name scope.\n\n For example:\n\n ```python\n with tf.name_scope('scope1'):\n with tf.name_scope('scope2'):\n print(tf.compat.v1.get_default_graph().get_name_scope())\n ```\n would print the string `scope1/scope2`.\n\n Returns:\n A string representing the current name scope.\n \"\"\"\n return self._name_stack\n\n @tf_contextlib.contextmanager\n def _colocate_with_for_gradient(self, op, gradient_uid,\n ignore_existing=False):\n with self.colocate_with(op, ignore_existing):\n if gradient_uid is not None and self._control_flow_context is not None:\n self._control_flow_context.EnterGradientColocation(op, gradient_uid)\n try:\n yield\n finally:\n self._control_flow_context.ExitGradientColocation(op, gradient_uid)\n else:\n yield\n\n @tf_contextlib.contextmanager\n def colocate_with(self, op, ignore_existing=False):\n \"\"\"Returns a context manager that specifies an op to colocate with.\n\n Note: this function is not for public use, only for internal libraries.\n\n For example:\n\n ```python\n a = tf.Variable([1.0])\n with g.colocate_with(a):\n b = tf.constant(1.0)\n c = tf.add(a, b)\n ```\n\n `b` and `c` will always be colocated with `a`, no matter where `a`\n is eventually placed.\n\n **NOTE** Using a colocation scope resets any existing device constraints.\n\n If `op` is `None` then `ignore_existing` must be `True` and the new\n scope resets all colocation and device constraints.\n\n Args:\n op: The op to colocate all created ops with, or `None`.\n ignore_existing: If true, only applies colocation of this op within the\n context, rather than applying all colocation properties on the stack.\n If `op` is `None`, this value must be `True`.\n\n Raises:\n ValueError: if op is None but ignore_existing is False.\n\n Yields:\n A context manager that specifies the op with which to colocate\n newly created ops.\n \"\"\"\n if op is None and not ignore_existing:\n raise ValueError(\"Trying to reset colocation (op is None) but \"\n \"ignore_existing is not True\")\n op = _op_to_colocate_with(op, self)\n\n # By default, colocate_with resets the device function stack,\n # since colocate_with is typically used in specific internal\n # library functions where colocation is intended to be \"stronger\"\n # than device functions.\n #\n # In the future, a caller may specify that device_functions win\n # over colocation, in which case we can add support.\n device_fn_tmp = self._device_function_stack\n self._device_function_stack = traceable_stack.TraceableStack()\n\n if ignore_existing:\n current_stack = self._colocation_stack\n self._colocation_stack = traceable_stack.TraceableStack()\n\n if op is not None:\n # offset refers to the stack frame used for storing code location.\n # We use 4, the sum of 1 to use our caller's stack frame and 3\n # to jump over layers of context managers above us.\n self._colocation_stack.push_obj(op, offset=4)\n\n try:\n yield\n finally:\n # Restore device function stack\n self._device_function_stack = device_fn_tmp\n if op is not None:\n self._colocation_stack.pop_obj()\n\n # Reset the colocation stack if requested.\n if ignore_existing:\n self._colocation_stack = current_stack\n\n def _add_device_to_stack(self, device_name_or_function, offset=0):\n \"\"\"Add device to stack manually, separate from a context manager.\"\"\"\n total_offset = 1 + offset\n spec = _UserDeviceSpec(device_name_or_function)\n self._device_function_stack.push_obj(spec, offset=total_offset)\n return spec\n\n @tf_contextlib.contextmanager\n def device(self, device_name_or_function):\n # pylint: disable=line-too-long\n \"\"\"Returns a context manager that specifies the default device to use.\n\n The `device_name_or_function` argument may either be a device name\n string, a device function, or None:\n\n * If it is a device name string, all operations constructed in\n this context will be assigned to the device with that name, unless\n overridden by a nested `device()` context.\n * If it is a function, it will be treated as a function from\n Operation objects to device name strings, and invoked each time\n a new Operation is created. The Operation will be assigned to\n the device with the returned name.\n * If it is None, all `device()` invocations from the enclosing context\n will be ignored.\n\n For information about the valid syntax of device name strings, see\n the documentation in\n [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).\n\n For example:\n\n ```python\n with g.device('/device:GPU:0'):\n # All operations constructed in this context will be placed\n # on GPU 0.\n with g.device(None):\n # All operations constructed in this context will have no\n # assigned device.\n\n # Defines a function from `Operation` to device string.\n def matmul_on_gpu(n):\n if n.type == \"MatMul\":\n return \"/device:GPU:0\"\n else:\n return \"/cpu:0\"\n\n with g.device(matmul_on_gpu):\n # All operations of type \"MatMul\" constructed in this context\n # will be placed on GPU 0; all other operations will be placed\n # on CPU 0.\n ```\n\n **N.B.** The device scope may be overridden by op wrappers or\n other library code. For example, a variable assignment op\n `v.assign()` must be colocated with the `tf.Variable` `v`, and\n incompatible device scopes will be ignored.\n\n Args:\n device_name_or_function: The device name or function to use in the\n context.\n\n Yields:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If device scopes are not properly nested.\n \"\"\"\n self._add_device_to_stack(device_name_or_function, offset=2)\n old_top_of_stack = self._device_function_stack.peek_top_obj()\n try:\n yield\n finally:\n new_top_of_stack = self._device_function_stack.peek_top_obj()\n if old_top_of_stack is not new_top_of_stack:\n raise RuntimeError(\"Exiting device scope without proper scope nesting.\")\n self._device_function_stack.pop_obj()\n\n def _apply_device_functions(self, op):\n \"\"\"Applies the current device function stack to the given operation.\"\"\"\n # Apply any device functions in LIFO order, so that the most recently\n # pushed function has the first chance to apply a device to the op.\n # We apply here because the result can depend on the Operation's\n # signature, which is computed in the Operation constructor.\n # pylint: disable=protected-access\n prior_device_string = None\n for device_spec in self._device_function_stack.peek_objs():\n if device_spec.is_null_merge:\n continue\n\n if device_spec.function is None:\n break\n\n device_string = device_spec.string_merge(op)\n\n # Take advantage of the fact that None is a singleton and Python interns\n # strings, since identity checks are faster than equality checks.\n if device_string is not prior_device_string:\n op._set_device_from_string(device_string)\n prior_device_string = device_string\n op._device_code_locations = self._snapshot_device_function_stack_metadata()\n # pylint: enable=protected-access\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def container(self, container_name):\n \"\"\"Returns a context manager that specifies the resource container to use.\n\n Stateful operations, such as variables and queues, can maintain their\n states on devices so that they can be shared by multiple processes.\n A resource container is a string name under which these stateful\n operations are tracked. These resources can be released or cleared\n with `tf.Session.reset()`.\n\n For example:\n\n ```python\n with g.container('experiment0'):\n # All stateful Operations constructed in this context will be placed\n # in resource container \"experiment0\".\n v1 = tf.Variable([1.0])\n v2 = tf.Variable([2.0])\n with g.container(\"experiment1\"):\n # All stateful Operations constructed in this context will be\n # placed in resource container \"experiment1\".\n v3 = tf.Variable([3.0])\n q1 = tf.queue.FIFOQueue(10, tf.float32)\n # All stateful Operations constructed in this context will be\n # be created in the \"experiment0\".\n v4 = tf.Variable([4.0])\n q1 = tf.queue.FIFOQueue(20, tf.float32)\n with g.container(\"\"):\n # All stateful Operations constructed in this context will be\n # be placed in the default resource container.\n v5 = tf.Variable([5.0])\n q3 = tf.queue.FIFOQueue(30, tf.float32)\n\n # Resets container \"experiment0\", after which the state of v1, v2, v4, q1\n # will become undefined (such as uninitialized).\n tf.Session.reset(target, [\"experiment0\"])\n ```\n\n Args:\n container_name: container name string.\n\n Returns:\n A context manager for defining resource containers for stateful ops,\n yields the container name.\n \"\"\"\n original_container = self._container\n self._container = container_name\n try:\n yield self._container\n finally:\n self._container = original_container\n\n # pylint: enable=g-doc-return-or-yield\n\n class _ControlDependenciesController(object):\n \"\"\"Context manager for `control_dependencies()`.\"\"\"\n\n def __init__(self, graph, control_inputs):\n \"\"\"Create a new `_ControlDependenciesController`.\n\n A `_ControlDependenciesController` is the context manager for\n `with tf.control_dependencies()` blocks. These normally nest,\n as described in the documentation for `control_dependencies()`.\n\n The `control_inputs` argument list control dependencies that must be\n added to the current set of control dependencies. Because of\n uniquification the set can be empty even if the caller passed a list of\n ops. The special value `None` indicates that we want to start a new\n empty set of control dependencies instead of extending the current set.\n\n In that case we also clear the current control flow context, which is an\n additional mechanism to add control dependencies.\n\n Args:\n graph: The graph that this controller is managing.\n control_inputs: List of ops to use as control inputs in addition to the\n current control dependencies. None to indicate that the dependencies\n should be cleared.\n \"\"\"\n self._graph = graph\n if control_inputs is None:\n self._control_inputs_val = []\n self._new_stack = True\n else:\n self._control_inputs_val = control_inputs\n self._new_stack = False\n self._seen_nodes = set()\n self._old_stack = None\n self._old_control_flow_context = None\n\n# pylint: disable=protected-access\n\n def __enter__(self):\n if self._new_stack:\n # Clear the control_dependencies graph.\n self._old_stack = self._graph._control_dependencies_stack\n self._graph._control_dependencies_stack = []\n # Clear the control_flow_context too.\n self._old_control_flow_context = self._graph._get_control_flow_context()\n self._graph._set_control_flow_context(None)\n self._graph._push_control_dependencies_controller(self)\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n self._graph._pop_control_dependencies_controller(self)\n if self._new_stack:\n self._graph._control_dependencies_stack = self._old_stack\n self._graph._set_control_flow_context(self._old_control_flow_context)\n\n# pylint: enable=protected-access\n\n @property\n def control_inputs(self):\n return self._control_inputs_val\n\n def add_op(self, op):\n if isinstance(op, Tensor):\n op = op.ref()\n self._seen_nodes.add(op)\n\n def op_in_group(self, op):\n if isinstance(op, Tensor):\n op = op.ref()\n return op in self._seen_nodes\n\n def _push_control_dependencies_controller(self, controller):\n self._control_dependencies_stack.append(controller)\n\n def _pop_control_dependencies_controller(self, controller):\n assert self._control_dependencies_stack[-1] is controller\n self._control_dependencies_stack.pop()\n\n def _current_control_dependencies(self):\n ret = set()\n for controller in self._control_dependencies_stack:\n for op in controller.control_inputs:\n ret.add(op)\n return ret\n\n def _control_dependencies_for_inputs(self, input_ops):\n \"\"\"For an op that takes `input_ops` as inputs, compute control inputs.\n\n The returned control dependencies should yield an execution that\n is equivalent to adding all control inputs in\n self._control_dependencies_stack to a newly created op. However,\n this function attempts to prune the returned control dependencies\n by observing that nodes created within the same `with\n control_dependencies(...):` block may have data dependencies that make\n the explicit approach redundant.\n\n Args:\n input_ops: The data input ops for an op to be created.\n\n Returns:\n A list of control inputs for the op to be created.\n \"\"\"\n ret = []\n for controller in self._control_dependencies_stack:\n # If any of the input_ops already depends on the inputs from controller,\n # we say that the new op is dominated (by that input), and we therefore\n # do not need to add control dependencies for this controller's inputs.\n dominated = False\n for op in input_ops:\n if controller.op_in_group(op):\n dominated = True\n break\n if not dominated:\n # Don't add a control input if we already have a data dependency on i.\n # NOTE(mrry): We do not currently track transitive data dependencies,\n # so we may add redundant control inputs.\n ret.extend(c for c in controller.control_inputs if c not in input_ops)\n return ret\n\n def _record_op_seen_by_control_dependencies(self, op):\n \"\"\"Record that the given op depends on all registered control dependencies.\n\n Args:\n op: An Operation.\n \"\"\"\n for controller in self._control_dependencies_stack:\n controller.add_op(op)\n\n def control_dependencies(self, control_inputs):\n \"\"\"Returns a context manager that specifies control dependencies.\n\n Use with the `with` keyword to specify that all operations constructed\n within the context should have control dependencies on\n `control_inputs`. For example:\n\n ```python\n with g.control_dependencies([a, b, c]):\n # `d` and `e` will only run after `a`, `b`, and `c` have executed.\n d = ...\n e = ...\n ```\n\n Multiple calls to `control_dependencies()` can be nested, and in\n that case a new `Operation` will have control dependencies on the union\n of `control_inputs` from all active contexts.\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `a`, `b`, `c`, and `d`.\n ```\n\n You can pass None to clear the control dependencies:\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies(None):\n # Ops constructed here run normally, not waiting for either `a` or `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `c` and `d`, also not waiting\n # for either `a` or `b`.\n ```\n\n *N.B.* The control dependencies context applies *only* to ops that\n are constructed within the context. Merely using an op or tensor\n in the context does not add a control dependency. The following\n example illustrates this point:\n\n ```python\n # WRONG\n def my_func(pred, tensor):\n t = tf.matmul(tensor, tensor)\n with tf.control_dependencies([pred]):\n # The matmul op is created outside the context, so no control\n # dependency will be added.\n return t\n\n # RIGHT\n def my_func(pred, tensor):\n with tf.control_dependencies([pred]):\n # The matmul op is created in the context, so a control dependency\n # will be added.\n return tf.matmul(tensor, tensor)\n ```\n\n Also note that though execution of ops created under this scope will trigger\n execution of the dependencies, the ops created under this scope might still\n be pruned from a normal tensorflow graph. For example, in the following\n snippet of code the dependencies are never executed:\n\n ```python\n loss = model.loss()\n with tf.control_dependencies(dependencies):\n loss = loss + tf.constant(1) # note: dependencies ignored in the\n # backward pass\n return tf.gradients(loss, model.variables)\n ```\n\n This is because evaluating the gradient graph does not require evaluating\n the constant(1) op created in the forward pass.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the\n context. Can also be `None` to clear the control dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return self._ControlDependenciesController(self, None)\n # First convert the inputs to ops, and deduplicate them.\n # NOTE(mrry): Other than deduplication, we do not currently track direct\n # or indirect dependencies between control_inputs, which may result in\n # redundant control inputs.\n control_ops = []\n current = self._current_control_dependencies()\n for c in control_inputs:\n # The hasattr(handle) is designed to match ResourceVariables. This is so\n # control dependencies on a variable or on an unread variable don't\n # trigger reads.\n if (isinstance(c, IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n c = self.as_graph_element(c)\n if isinstance(c, Tensor):\n c = c.op\n elif not isinstance(c, Operation):\n raise TypeError(\"Control input must be Operation or Tensor: %s\" % c)\n if c not in current:\n control_ops.append(c)\n current.add(c)\n return self._ControlDependenciesController(self, control_ops)\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def _attr_scope(self, attr_map):\n \"\"\"EXPERIMENTAL: A context manager for setting attributes on operators.\n\n This context manager can be used to add additional\n attributes to operators within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # No extra attributes\n with g._attr_scope({\"_a\": tf.attr_value_pb2.AttrValue(b=False)}):\n f_2 = Foo() # Additional attribute _a=False\n with g._attr_scope({\"_a\": tf.attr_value_pb2.AttrValue(b=True)}):\n f_3 = Foo() # Additional attribute _a=False\n with g._attr_scope({\"_a\": None}):\n f_4 = Foo() # No additional attributes.\n\n Args:\n attr_map: A dictionary mapping attr name strings to AttrValue protocol\n buffers or None.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If attr_map is not a dictionary mapping\n strings to AttrValue protobufs.\n \"\"\"\n if not isinstance(attr_map, dict):\n raise TypeError(\"attr_map must be a dictionary mapping \"\n \"strings to AttrValue protocol buffers\")\n # The saved_attrs dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_attrs = {}\n # Install the given attribute\n for name, attr in attr_map.items():\n if not (isinstance(name, six.string_types) and\n (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or\n callable(attr))):\n raise TypeError(\"attr_map must be a dictionary mapping \"\n \"strings to AttrValue protocol buffers or \"\n \"callables that emit AttrValue protocol buffers\")\n try:\n saved_attrs[name] = self._attr_scope_map[name]\n except KeyError:\n pass\n if attr is None:\n del self._attr_scope_map[name]\n else:\n self._attr_scope_map[name] = attr\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the attributes set for this context, and restore any saved\n # attributes.\n for name, attr in attr_map.items():\n try:\n self._attr_scope_map[name] = saved_attrs[name]\n except KeyError:\n del self._attr_scope_map[name]\n\n # pylint: enable=g-doc-return-or-yield\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def _kernel_label_map(self, op_to_kernel_label_map):\n \"\"\"EXPERIMENTAL: A context manager for setting kernel labels.\n\n This context manager can be used to select particular\n implementations of kernels within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # Uses the default registered kernel for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_2\"}):\n f_2 = Foo() # Uses the registered kernel with label \"v_2\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_3\"}):\n f_3 = Foo() # Uses the registered kernel with label \"v_3\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"\"}):\n f_4 = Foo() # Uses the default registered kernel\n # for the Foo op.\n\n Args:\n op_to_kernel_label_map: A dictionary mapping op type strings to kernel\n label strings.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If op_to_kernel_label_map is not a dictionary mapping\n strings to strings.\n \"\"\"\n if not isinstance(op_to_kernel_label_map, dict):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_labels dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_labels = {}\n # Install the given label\n for op_type, label in op_to_kernel_label_map.items():\n if not (isinstance(op_type, six.string_types) and\n isinstance(label, six.string_types)):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_labels[op_type] = self._op_to_kernel_label_map[op_type]\n except KeyError:\n pass\n self._op_to_kernel_label_map[op_type] = label\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, label in op_to_kernel_label_map.items():\n try:\n self._op_to_kernel_label_map[op_type] = saved_labels[op_type]\n except KeyError:\n del self._op_to_kernel_label_map[op_type]\n\n # pylint: enable=g-doc-return-or-yield\n\n @tf_contextlib.contextmanager\n def _override_gradient_function(self, gradient_function_map):\n \"\"\"Specify gradient function for the given op type.\"\"\"\n\n # This is an internal API and we don't need nested context for this.\n assert not self._gradient_function_map\n self._gradient_function_map = gradient_function_map\n yield\n self._gradient_function_map = {}\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def gradient_override_map(self, op_type_map):\n \"\"\"EXPERIMENTAL: A context manager for overriding gradient functions.\n\n This context manager can be used to override the gradient function\n that will be used for ops within the scope of the context.\n\n For example:\n\n ```python\n @tf.RegisterGradient(\"CustomSquare\")\n def _custom_square_grad(op, grad):\n # ...\n\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n s_1 = tf.square(c) # Uses the default gradient for tf.square.\n with g.gradient_override_map({\"Square\": \"CustomSquare\"}):\n s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the\n # gradient of s_2.\n ```\n\n Args:\n op_type_map: A dictionary mapping op type strings to alternative op type\n strings.\n\n Returns:\n A context manager that sets the alternative op type to be used for one\n or more ops created in that context.\n\n Raises:\n TypeError: If `op_type_map` is not a dictionary mapping strings to\n strings.\n \"\"\"\n if not isinstance(op_type_map, dict):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_mappings dictionary stores any currently-set mappings that\n # will be overridden by this context manager.\n saved_mappings = {}\n # Install the given label\n for op_type, mapped_op_type in op_type_map.items():\n if not (isinstance(op_type, six.string_types) and\n isinstance(mapped_op_type, six.string_types)):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_mappings[op_type] = self._gradient_override_map[op_type]\n except KeyError:\n pass\n self._gradient_override_map[op_type] = mapped_op_type\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, mapped_op_type in op_type_map.items():\n try:\n self._gradient_override_map[op_type] = saved_mappings[op_type]\n except KeyError:\n del self._gradient_override_map[op_type]\n\n # pylint: enable=g-doc-return-or-yield\n\n def prevent_feeding(self, tensor):\n \"\"\"Marks the given `tensor` as unfeedable in this graph.\"\"\"\n self._unfeedable_tensors.add(tensor)\n\n def is_feedable(self, tensor):\n \"\"\"Returns `True` if and only if `tensor` is feedable.\"\"\"\n return tensor not in self._unfeedable_tensors\n\n def prevent_fetching(self, op):\n \"\"\"Marks the given `op` as unfetchable in this graph.\"\"\"\n self._unfetchable_ops.add(op)\n\n def is_fetchable(self, tensor_or_op):\n \"\"\"Returns `True` if and only if `tensor_or_op` is fetchable.\"\"\"\n if isinstance(tensor_or_op, Tensor):\n return tensor_or_op.op not in self._unfetchable_ops\n else:\n return tensor_or_op not in self._unfetchable_ops\n\n def switch_to_thread_local(self):\n \"\"\"Make device, colocation and dependencies stacks thread-local.\n\n Device, colocation and dependencies stacks are not thread-local be default.\n If multiple threads access them, then the state is shared. This means that\n one thread may affect the behavior of another thread.\n\n After this method is called, the stacks become thread-local. If multiple\n threads access them, then the state is not shared. Each thread uses its own\n value; a thread doesn't affect other threads by mutating such a stack.\n\n The initial value for every thread's stack is set to the current value\n of the stack when `switch_to_thread_local()` was first called.\n \"\"\"\n if not self._stack_state_is_thread_local:\n self._stack_state_is_thread_local = True\n\n @property\n def _device_function_stack(self):\n if self._stack_state_is_thread_local:\n # This may be called from a thread where device_function_stack doesn't yet\n # exist.\n # pylint: disable=protected-access\n if not hasattr(self._thread_local, \"_device_function_stack\"):\n stack_copy_for_this_thread = self._graph_device_function_stack.copy()\n self._thread_local._device_function_stack = stack_copy_for_this_thread\n return self._thread_local._device_function_stack\n # pylint: enable=protected-access\n else:\n return self._graph_device_function_stack\n\n @property\n def _device_functions_outer_to_inner(self):\n user_device_specs = self._device_function_stack.peek_objs()\n device_functions = [spec.function for spec in user_device_specs]\n device_functions_outer_to_inner = list(reversed(device_functions))\n return device_functions_outer_to_inner\n\n def _snapshot_device_function_stack_metadata(self):\n \"\"\"Return device function stack as a list of TraceableObjects.\n\n Returns:\n [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj\n member is a displayable name for the user's argument to Graph.device, and\n the filename and lineno members point to the code location where\n Graph.device was called directly or indirectly by the user.\n \"\"\"\n snapshot = []\n for obj in self._device_function_stack.peek_traceable_objs():\n obj_copy = obj.copy_metadata()\n obj_copy.obj = obj.obj.display_name\n snapshot.append(obj_copy)\n return snapshot\n\n @_device_function_stack.setter\n def _device_function_stack(self, device_function_stack):\n if self._stack_state_is_thread_local:\n # pylint: disable=protected-access\n self._thread_local._device_function_stack = device_function_stack\n # pylint: enable=protected-access\n else:\n self._graph_device_function_stack = device_function_stack\n\n @property\n def _colocation_stack(self):\n \"\"\"Return thread-local copy of colocation stack.\"\"\"\n if self._stack_state_is_thread_local:\n # This may be called from a thread where colocation_stack doesn't yet\n # exist.\n # pylint: disable=protected-access\n if not hasattr(self._thread_local, \"_colocation_stack\"):\n stack_copy_for_this_thread = self._graph_colocation_stack.copy()\n self._thread_local._colocation_stack = stack_copy_for_this_thread\n return self._thread_local._colocation_stack\n # pylint: enable=protected-access\n else:\n return self._graph_colocation_stack\n\n def _snapshot_colocation_stack_metadata(self):\n \"\"\"Return colocation stack metadata as a dictionary.\"\"\"\n return {\n traceable_obj.obj.name: traceable_obj.copy_metadata()\n for traceable_obj in self._colocation_stack.peek_traceable_objs()\n }\n\n @_colocation_stack.setter\n def _colocation_stack(self, colocation_stack):\n if self._stack_state_is_thread_local:\n # pylint: disable=protected-access\n self._thread_local._colocation_stack = colocation_stack\n # pylint: enable=protected-access\n else:\n self._graph_colocation_stack = colocation_stack\n\n @property\n def _control_dependencies_stack(self):\n if self._stack_state_is_thread_local:\n # This may be called from a thread where control_dependencies_stack\n # doesn't yet exist.\n if not hasattr(self._thread_local, \"_control_dependencies_stack\"):\n self._thread_local._control_dependencies_stack = (\n self._graph_control_dependencies_stack[:])\n return self._thread_local._control_dependencies_stack\n else:\n return self._graph_control_dependencies_stack\n\n @_control_dependencies_stack.setter\n def _control_dependencies_stack(self, control_dependencies):\n if self._stack_state_is_thread_local:\n self._thread_local._control_dependencies_stack = control_dependencies\n else:\n self._graph_control_dependencies_stack = control_dependencies\n\n @property\n def _distribution_strategy_stack(self):\n \"\"\"A stack to maintain distribution strategy context for each thread.\"\"\"\n if not hasattr(self._thread_local, \"_distribution_strategy_stack\"):\n self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access\n return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access\n\n @_distribution_strategy_stack.setter\n def _distribution_strategy_stack(self, _distribution_strategy_stack):\n self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access\n _distribution_strategy_stack)\n\n @property\n def _global_distribute_strategy_scope(self):\n \"\"\"For implementing `tf.distribute.set_strategy()`.\"\"\"\n if not hasattr(self._thread_local, \"distribute_strategy_scope\"):\n self._thread_local.distribute_strategy_scope = None\n return self._thread_local.distribute_strategy_scope\n\n @_global_distribute_strategy_scope.setter\n def _global_distribute_strategy_scope(self, distribute_strategy_scope):\n self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)\n\n @property\n def _auto_cast_variable_read_dtype(self):\n \"\"\"The dtype that instances of `AutoCastVariable` will be casted to.\n\n This is None if `AutoCastVariables` should not be casted.\n\n See `AutoCastVariable` for more information.\n\n Returns:\n The dtype that instances of `AutoCastVariable` will be casted to.\n \"\"\"\n if not hasattr(self._thread_local, \"_auto_cast_variable_read_dtype\"):\n self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access\n return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access\n\n @_auto_cast_variable_read_dtype.setter\n def _auto_cast_variable_read_dtype(self, dtype):\n if dtype:\n dtype = dtypes.as_dtype(dtype)\n self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access\n\n @tf_contextlib.contextmanager\n def _enable_auto_casting_variables(self, dtype):\n \"\"\"Context manager to automatically cast AutoCastVariables.\n\n If an AutoCastVariable `var` is used under this context manager, it will be\n casted to `dtype` before being used.\n\n See `AutoCastVariable` for more information.\n\n Args:\n dtype: The dtype that AutoCastVariables should be casted to.\n\n Yields:\n Nothing.\n \"\"\"\n prev_read_dtype = self._auto_cast_variable_read_dtype\n try:\n self._auto_cast_variable_read_dtype = dtype\n yield\n finally:\n self._auto_cast_variable_read_dtype = prev_read_dtype\n\n def _mutation_lock(self):\n \"\"\"Returns a lock to guard code that creates & mutates ops.\n\n See the comment for self._group_lock for more info.\n \"\"\"\n return self._group_lock.group(_MUTATION_LOCK_GROUP)\n\n def _session_run_lock(self):\n \"\"\"Returns a lock to guard code for Session.run.\n\n See the comment for self._group_lock for more info.\n \"\"\"\n return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)\n\n\n# TODO(agarwal): currently device directives in an outer eager scope will not\n# apply to inner graph mode code. Fix that.\n\n\n@tf_export(v1=[\"device\"])\ndef device(device_name_or_function):\n \"\"\"Wrapper for `Graph.device()` using the default graph.\n\n See `tf.Graph.device` for more details.\n\n Args:\n device_name_or_function: The device name or function to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If eager execution is enabled and a function is passed in.\n \"\"\"\n if context.executing_eagerly():\n if callable(device_name_or_function):\n raise RuntimeError(\n \"tf.device does not support functions when eager execution \"\n \"is enabled.\")\n return context.device(device_name_or_function)\n elif executing_eagerly_outside_functions():\n @tf_contextlib.contextmanager\n def combined(device_name_or_function):\n with get_default_graph().device(device_name_or_function):\n if not callable(device_name_or_function):\n with context.device(device_name_or_function):\n yield\n else:\n yield\n return combined(device_name_or_function)\n else:\n return get_default_graph().device(device_name_or_function)\n\n\n@tf_export(\"device\", v1=[])\ndef device_v2(device_name):\n \"\"\"Specifies the device for ops created/executed in this context.\n\n This function specifies the device to be used for ops created/executed in a\n particular context. Nested contexts will inherit and also create/execute\n their ops on the specified device. If a specific device is not required,\n consider not using this function so that a device can be automatically\n assigned. In general the use of this function is optional. `device_name` can\n be fully specified, as in \"/job:worker/task:1/device:cpu:0\", or partially\n specified, containing only a subset of the \"/\"-separated fields. Any fields\n which are specified will override device annotations from outer scopes.\n\n For example:\n\n ```python\n with tf.device('/job:foo'):\n # ops created here have devices with /job:foo\n with tf.device('/job:bar/task:0/device:gpu:2'):\n # ops created here have the fully specified device above\n with tf.device('/device:gpu:1'):\n # ops created here have the device '/job:foo/device:gpu:1'\n ```\n\n Args:\n device_name: The device name to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If a function is passed in.\n \"\"\"\n if callable(device_name):\n raise RuntimeError(\"tf.device does not support functions.\")\n return device(device_name)\n\n\n@tf_export(v1=[\"container\"])\ndef container(container_name):\n \"\"\"Wrapper for `Graph.container()` using the default graph.\n\n Args:\n container_name: The container string to use in the context.\n\n Returns:\n A context manager that specifies the default container to use for newly\n created stateful ops.\n \"\"\"\n return get_default_graph().container(container_name)\n\n\ndef _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):\n if context.executing_eagerly():\n if op is not None:\n if not hasattr(op, \"device\"):\n op = internal_convert_to_tensor_or_indexed_slices(op)\n return device(op.device)\n else:\n return NullContextmanager()\n else:\n default_graph = get_default_graph()\n if isinstance(op, EagerTensor):\n if default_graph.building_function:\n return default_graph.device(op.device)\n else:\n raise ValueError(\"Encountered an Eager-defined Tensor during graph \"\n \"construction, but a function was not being built.\")\n return default_graph._colocate_with_for_gradient(\n op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)\n\n\n# Internal interface to colocate_with. colocate_with has been deprecated from\n# public API. There are still a few internal uses of colocate_with. Add internal\n# only API for those uses to avoid deprecation warning.\ndef colocate_with(op, ignore_existing=False):\n return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)\n\n\[email protected](\n date=None, instructions=\"Colocations handled automatically by placer.\")\n@tf_export(v1=[\"colocate_with\"])\ndef _colocate_with(op, ignore_existing=False):\n return colocate_with(op, ignore_existing)\n\n\n@tf_export(\"control_dependencies\")\ndef control_dependencies(control_inputs):\n \"\"\"Wrapper for `Graph.control_dependencies()` using the default graph.\n\n See `tf.Graph.control_dependencies`\n for more details.\n\n When eager execution is enabled, any callable object in the `control_inputs`\n list will be called.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the context.\n Can also be `None` to clear the control dependencies. If eager execution\n is enabled, any callable object in the `control_inputs` list will be\n called.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n \"\"\"\n if context.executing_eagerly():\n if control_inputs:\n # Execute any pending callables.\n for control in control_inputs:\n if callable(control):\n control()\n return NullContextmanager()\n else:\n return get_default_graph().control_dependencies(control_inputs)\n\n\nclass _DefaultStack(threading.local):\n \"\"\"A thread-local stack of objects for providing implicit defaults.\"\"\"\n\n def __init__(self):\n super(_DefaultStack, self).__init__()\n self._enforce_nesting = True\n self.stack = []\n\n def get_default(self):\n return self.stack[-1] if len(self.stack) >= 1 else None\n\n def reset(self):\n self.stack = []\n\n def is_cleared(self):\n return not self.stack\n\n @property\n def enforce_nesting(self):\n return self._enforce_nesting\n\n @enforce_nesting.setter\n def enforce_nesting(self, value):\n self._enforce_nesting = value\n\n @tf_contextlib.contextmanager\n def get_controller(self, default):\n \"\"\"A context manager for manipulating a default stack.\"\"\"\n self.stack.append(default)\n try:\n yield default\n finally:\n # stack may be empty if reset() was called\n if self.stack:\n if self._enforce_nesting:\n if self.stack[-1] is not default:\n raise AssertionError(\n \"Nesting violated for default stack of %s objects\" %\n type(default))\n self.stack.pop()\n else:\n self.stack.remove(default)\n\n\n_default_session_stack = _DefaultStack() # pylint: disable=protected-access\n\n\ndef default_session(session):\n \"\"\"Python \"with\" handler for defining a default session.\n\n This function provides a means of registering a session for handling\n Tensor.eval() and Operation.run() calls. It is primarily intended for use\n by session.Session, but can be used with any object that implements\n the Session.run() interface.\n\n Use with the \"with\" keyword to specify that Tensor.eval() and Operation.run()\n invocations within the scope of a block should be executed by a particular\n session.\n\n The default session applies to the current thread only, so it is always\n possible to inspect the call stack and determine the scope of a default\n session. If you create a new thread, and wish to use the default session\n in that thread, you must explicitly add a \"with ops.default_session(sess):\"\n block in that thread's function.\n\n Example:\n The following code examples are equivalent:\n\n # 1. Using the Session object directly:\n sess = ...\n c = tf.constant(5.0)\n sess.run(c)\n\n # 2. Using default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n result = c.eval()\n\n # 3. Overriding default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n with ops.default_session(...):\n c.eval(session=sess)\n\n Args:\n session: The session to be installed as the default session.\n\n Returns:\n A context manager for the default session.\n \"\"\"\n return _default_session_stack.get_controller(session)\n\n\n@tf_export(v1=[\"get_default_session\"])\ndef get_default_session():\n \"\"\"Returns the default session for the current thread.\n\n The returned `Session` will be the innermost session on which a\n `Session` or `Session.as_default()` context has been entered.\n\n NOTE: The default session is a property of the current thread. If you\n create a new thread, and wish to use the default session in that\n thread, you must explicitly add a `with sess.as_default():` in that\n thread's function.\n\n Returns:\n The default `Session` being used in the current thread.\n \"\"\"\n return _default_session_stack.get_default()\n\n\ndef _eval_using_default_session(tensors, feed_dict, graph, session=None):\n \"\"\"Uses the default session to evaluate one or more tensors.\n\n Args:\n tensors: A single Tensor, or a list of Tensor objects.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which the tensors are defined.\n session: (Optional) A different session to use to evaluate \"tensors\".\n\n Returns:\n Either a single numpy ndarray if \"tensors\" is a single tensor; or a list\n of numpy ndarrays that each correspond to the respective element in\n \"tensors\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot evaluate tensor using `eval()`: No default \"\n \"session is registered. Use `with \"\n \"sess.as_default()` or pass an explicit session to \"\n \"`eval(session=sess)`\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph. Pass an explicit session to \"\n \"`eval(session=sess)`.\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph.\")\n return session.run(tensors, feed_dict)\n\n\ndef _run_using_default_session(operation, feed_dict, graph, session=None):\n \"\"\"Uses the default session to run \"operation\".\n\n Args:\n operation: The Operation to be run.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which \"operation\" is defined.\n session: (Optional) A different session to use to run \"operation\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot execute operation using `run()`: No default \"\n \"session is registered. Use `with \"\n \"sess.as_default():` or pass an explicit session to \"\n \"`run(session=sess)`\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to execute operation: \"\n \"the operation's graph is different from the \"\n \"session's graph. Pass an explicit session to \"\n \"run(session=sess).\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to execute operation: \"\n \"the operation's graph is different from the session's \"\n \"graph.\")\n session.run(operation, feed_dict)\n\n\nclass _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access\n \"\"\"A thread-local stack of objects for providing an implicit default graph.\"\"\"\n\n def __init__(self):\n super(_DefaultGraphStack, self).__init__()\n self._global_default_graph = None\n\n def get_default(self):\n \"\"\"Override that returns a global default if the stack is empty.\"\"\"\n ret = super(_DefaultGraphStack, self).get_default()\n if ret is None:\n ret = self._GetGlobalDefaultGraph()\n return ret\n\n def _GetGlobalDefaultGraph(self):\n if self._global_default_graph is None:\n # TODO(mrry): Perhaps log that the default graph is being used, or set\n # provide some other feedback to prevent confusion when a mixture of\n # the global default graph and an explicit graph are combined in the\n # same process.\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def reset(self):\n super(_DefaultGraphStack, self).reset()\n self._global_default_graph = None\n\n @tf_contextlib.contextmanager\n def get_controller(self, default):\n context.context().context_switches.push(default.building_function,\n default.as_default,\n default._device_function_stack)\n try:\n with super(_DefaultGraphStack,\n self).get_controller(default) as g, context.graph_mode():\n yield g\n finally:\n # If an exception is raised here it may be hiding a related exception in\n # the try-block (just above).\n context.context().context_switches.pop()\n\n\n_default_graph_stack = _DefaultGraphStack()\n\n\n# Shared helper used in init_scope and executing_eagerly_outside_functions\n# to obtain the outermost context that is not building a function, and the\n# innermost non empty device stack.\ndef _get_outer_context_and_inner_device_stack():\n \"\"\"Get the outermost context not building a function.\"\"\"\n default_graph = get_default_graph()\n outer_context = None\n innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access\n\n if not _default_graph_stack.stack:\n # If the default graph stack is empty, then we cannot be building a\n # function. Install the global graph (which, in this case, is also the\n # default graph) as the outer context.\n if default_graph.building_function:\n raise RuntimeError(\"The global graph is building a function.\")\n outer_context = default_graph.as_default\n else:\n # Find a context that is not building a function.\n for stack_entry in reversed(context.context().context_switches.stack):\n if not innermost_nonempty_device_stack:\n innermost_nonempty_device_stack = stack_entry.device_stack\n if not stack_entry.is_building_function:\n outer_context = stack_entry.enter_context_fn\n break\n\n if outer_context is None:\n # As a last resort, obtain the global default graph; this graph doesn't\n # necessarily live on the graph stack (and hence it doesn't necessarily\n # live on the context stack), but it is stored in the graph stack's\n # encapsulating object.\n outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access\n\n if outer_context is None:\n # Sanity check; this shouldn't be triggered.\n raise RuntimeError(\"All graphs are building functions, and no \"\n \"eager context was previously active.\")\n\n return outer_context, innermost_nonempty_device_stack\n\n\n# pylint: disable=g-doc-return-or-yield,line-too-long\n@tf_export(\"init_scope\")\n@tf_contextlib.contextmanager\ndef init_scope():\n \"\"\"A context manager that lifts ops out of control-flow scopes and function-building graphs.\n\n There is often a need to lift variable initialization ops out of control-flow\n scopes, function-building graphs, and gradient tapes. Entering an\n `init_scope` is a mechanism for satisfying these desiderata. In particular,\n entering an `init_scope` has three effects:\n\n (1) All control dependencies are cleared the moment the scope is entered;\n this is equivalent to entering the context manager returned from\n `control_dependencies(None)`, which has the side-effect of exiting\n control-flow scopes like `tf.cond` and `tf.while_loop`.\n\n (2) All operations that are created while the scope is active are lifted\n into the lowest context on the `context_stack` that is not building a\n graph function. Here, a context is defined as either a graph or an eager\n context. Every context switch, i.e., every installation of a graph as\n the default graph and every switch into eager mode, is logged in a\n thread-local stack called `context_switches`; the log entry for a\n context switch is popped from the stack when the context is exited.\n Entering an `init_scope` is equivalent to crawling up\n `context_switches`, finding the first context that is not building a\n graph function, and entering it. A caveat is that if graph mode is\n enabled but the default graph stack is empty, then entering an\n `init_scope` will simply install a fresh graph as the default one.\n\n (3) The gradient tape is paused while the scope is active.\n\n When eager execution is enabled, code inside an init_scope block runs with\n eager execution enabled even when tracing a `tf.function`. For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n @tf.function\n def func():\n # A function constructs TensorFlow graphs,\n # it does not execute eagerly.\n assert not tf.executing_eagerly()\n with tf.init_scope():\n # Initialization runs with eager execution enabled\n assert tf.executing_eagerly()\n ```\n\n Raises:\n RuntimeError: if graph state is incompatible with this initialization.\n \"\"\"\n # pylint: enable=g-doc-return-or-yield,line-too-long\n\n if context.executing_eagerly():\n # Fastpath.\n with tape.stop_recording():\n yield\n else:\n # Retrieve the active name scope: entering an `init_scope` preserves\n # the name scope of the current context.\n scope = get_default_graph().get_name_scope()\n if scope and scope[-1] != \"/\":\n # Names that end with trailing slashes are treated by `name_scope` as\n # absolute.\n scope = scope + \"/\"\n\n outer_context, innermost_nonempty_device_stack = (\n _get_outer_context_and_inner_device_stack())\n\n outer_graph = None\n outer_device_stack = None\n try:\n with outer_context(), name_scope(\n scope, skip_on_eager=False), control_dependencies(\n None), tape.stop_recording():\n context_manager = NullContextmanager\n context_manager_input = None\n if not context.executing_eagerly():\n # The device stack is preserved when lifting into a graph. Eager\n # execution doesn't implement device stacks and in particular it\n # doesn't support device functions, so in general it's not possible\n # to do the same when lifting into the eager context.\n outer_graph = get_default_graph()\n outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access\n outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access\n elif innermost_nonempty_device_stack is not None:\n for device_spec in innermost_nonempty_device_stack.peek_objs():\n if device_spec.function is None:\n break\n if device_spec.raw_string:\n context_manager = context.device\n context_manager_input = device_spec.raw_string\n break\n # It is currently not possible to have a device function in V2,\n # but in V1 we are unable to apply device functions in eager mode.\n # This means that we will silently skip some of the entries on the\n # device stack in V1 + eager mode.\n\n with context_manager(context_manager_input):\n yield\n finally:\n # If an exception is raised here it may be hiding a related exception in\n # try-block (just above).\n if outer_graph is not None:\n outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access\n\n\n@tf_export(v1=[\"executing_eagerly_outside_functions\"])\ndef executing_eagerly_outside_functions():\n \"\"\"Returns True if executing eagerly, even if inside a graph function.\n\n This function will check the outermost context for the program and see if\n it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,\n which checks the current context and will return `False` within a\n `tf.function` body. It can be used to build library that behave differently\n in eager runtime and v1 session runtime (deprecated).\n\n Example:\n\n >>> tf.compat.v1.enable_eager_execution()\n >>> @tf.function\n ... def func():\n ... # A function constructs TensorFlow graphs, it does not execute eagerly,\n ... # but the outer most context is still eager.\n ... assert not tf.executing_eagerly()\n ... return tf.compat.v1.executing_eagerly_outside_functions()\n >>> func()\n <tf.Tensor: shape=(), dtype=bool, numpy=True>\n\n Returns:\n boolean, whether the outermost context is in eager mode.\n \"\"\"\n if context.executing_eagerly():\n return True\n else:\n outer_context, _ = _get_outer_context_and_inner_device_stack()\n with outer_context():\n return context.executing_eagerly()\n\n\ndef inside_function():\n return get_default_graph().building_function\n\n\n@tf_export(v1=[\"enable_eager_execution\"])\ndef enable_eager_execution(config=None, device_policy=None,\n execution_mode=None):\n \"\"\"Enables eager execution for the lifetime of this program.\n\n Eager execution provides an imperative interface to TensorFlow. With eager\n execution enabled, TensorFlow functions execute operations immediately (as\n opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)\n and\n return concrete values (as opposed to symbolic references to a node in a\n computational graph).\n\n For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n # After eager execution is enabled, operations are executed as they are\n # defined and Tensor objects hold concrete values, which can be accessed as\n # numpy.ndarray`s through the numpy() method.\n assert tf.multiply(6, 7).numpy() == 42\n ```\n\n Eager execution cannot be enabled after TensorFlow APIs have been used to\n create or execute graphs. It is typically recommended to invoke this function\n at program startup and not in a library (as most libraries should be usable\n both with and without eager execution).\n\n Args:\n config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the\n environment in which operations are executed. Note that\n `tf.compat.v1.ConfigProto` is also used to configure graph execution (via\n `tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`\n are not implemented (or are irrelevant) when eager execution is enabled.\n device_policy: (Optional.) Policy controlling how operations requiring\n inputs on a specific device (e.g., a GPU 0) handle inputs on a different\n device (e.g. GPU 1 or CPU). When set to None, an appropriate value will\n be picked automatically. The value picked may change between TensorFlow\n releases.\n Valid values:\n - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the\n placement is not correct.\n - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not\n on the right device but logs a warning.\n - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.\n Note that this may hide performance problems as there is no notification\n provided when operations are blocked on the tensor being copied between\n devices.\n - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies\n int32 tensors, raising errors on the other ones.\n execution_mode: (Optional.) Policy controlling how operations dispatched are\n actually executed. When set to None, an appropriate value will be picked\n automatically. The value picked may change between TensorFlow releases.\n Valid values:\n - tf.contrib.eager.SYNC: executes each operation synchronously.\n - tf.contrib.eager.ASYNC: executes each operation asynchronously. These\n operations may return \"non-ready\" handles.\n\n Raises:\n ValueError: If eager execution is enabled after creating/executing a\n TensorFlow graph, or if options provided conflict with a previous call\n to this function.\n \"\"\"\n _api_usage_gauge.get_cell().set(True)\n if context.default_execution_mode != context.EAGER_MODE:\n return enable_eager_execution_internal(\n config=config,\n device_policy=device_policy,\n execution_mode=execution_mode,\n server_def=None)\n\n\n@tf_export(v1=[\"disable_eager_execution\"])\ndef disable_eager_execution():\n \"\"\"Disables eager execution.\n\n This function can only be called before any Graphs, Ops, or Tensors have been\n created. It can be used at the beginning of the program for complex migration\n projects from TensorFlow 1.x to 2.x.\n \"\"\"\n _api_usage_gauge.get_cell().set(False)\n context.default_execution_mode = context.GRAPH_MODE\n c = context.context_safe()\n if c is not None:\n c._thread_local_data.is_eager = False # pylint: disable=protected-access\n\n\ndef enable_eager_execution_internal(config=None,\n device_policy=None,\n execution_mode=None,\n server_def=None):\n \"\"\"Enables eager execution for the lifetime of this program.\n\n Most of the doc string for enable_eager_execution is relevant here as well.\n\n Args:\n config: See enable_eager_execution doc string\n device_policy: See enable_eager_execution doc string\n execution_mode: See enable_eager_execution doc string\n server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on\n remote devices. GrpcServers need to be started by creating an identical\n server_def to this, and setting the appropriate task_indexes, so that the\n servers can communicate. It will then be possible to execute operations on\n remote devices.\n\n Raises:\n ValueError\n\n \"\"\"\n if config is not None and not isinstance(config, config_pb2.ConfigProto):\n raise TypeError(\"config must be a tf.ConfigProto, but got %s\" %\n type(config))\n if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,\n context.DEVICE_PLACEMENT_WARN,\n context.DEVICE_PLACEMENT_SILENT,\n context.DEVICE_PLACEMENT_SILENT_FOR_INT32):\n raise ValueError(\n \"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*\"\n )\n if execution_mode not in (None, context.SYNC, context.ASYNC):\n raise ValueError(\n \"execution_mode must be one of None, tf.contrib.eager.SYNC, \"\n \"tf.contrib.eager.ASYNC\")\n if context.default_execution_mode == context.GRAPH_MODE:\n graph_mode_has_been_used = (\n _default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access\n if graph_mode_has_been_used:\n raise ValueError(\n \"tf.enable_eager_execution must be called at program startup.\")\n context.default_execution_mode = context.EAGER_MODE\n # pylint: disable=protected-access\n with context._context_lock:\n if context._context is None:\n context._set_context_locked(context.Context(\n config=config,\n device_policy=device_policy,\n execution_mode=execution_mode,\n server_def=server_def))\n elif ((config is not None and config is not context._context._config) or\n (device_policy is not None and\n device_policy is not context._context._device_policy) or\n (execution_mode is not None and\n execution_mode is not context._context._execution_mode)):\n raise ValueError(\n \"Trying to change the options of an active eager\"\n \" execution. Context config: %s, specified config:\"\n \" %s. Context device policy: %s, specified device\"\n \" policy: %s. Context execution mode: %s, \"\n \" specified execution mode %s.\" %\n (context._context._config, config, context._context._device_policy,\n device_policy, context._context._execution_mode, execution_mode))\n else:\n # We already created everything, so update the thread local data.\n context._context._thread_local_data.is_eager = True\n\n # Monkey patch to get rid of an unnecessary conditional since the context is\n # now initialized.\n context.context = context.context_safe\n\n\ndef eager_run(main=None, argv=None):\n \"\"\"Runs the program with an optional main function and argv list.\n\n The program will run with eager execution enabled.\n\n Example:\n ```python\n import tensorflow as tf\n # Import subject to future changes:\n from tensorflow.contrib.eager.python import tfe\n\n def main(_):\n u = tf.constant(6.0)\n v = tf.constant(7.0)\n print(u * v)\n\n if __name__ == \"__main__\":\n tfe.run()\n ```\n\n Args:\n main: the main function to run.\n argv: the arguments to pass to it.\n \"\"\"\n enable_eager_execution()\n app.run(main, argv)\n\n\n@tf_export(v1=[\"reset_default_graph\"])\ndef reset_default_graph():\n \"\"\"Clears the default graph stack and resets the global default graph.\n\n NOTE: The default graph is a property of the current thread. This\n function applies only to the current thread. Calling this function while\n a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will\n result in undefined\n behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects\n after calling this function will result in undefined behavior.\n Raises:\n AssertionError: If this function is called within a nested graph.\n \"\"\"\n if not _default_graph_stack.is_cleared():\n raise AssertionError(\"Do not use tf.reset_default_graph() to clear \"\n \"nested graphs. If you need a cleared graph, \"\n \"exit the nesting and create a new graph.\")\n _default_graph_stack.reset()\n\n\n@tf_export(v1=[\"get_default_graph\"])\ndef get_default_graph():\n \"\"\"Returns the default graph for the current thread.\n\n The returned graph will be the innermost graph on which a\n `Graph.as_default()` context has been entered, or a global default\n graph if none has been explicitly created.\n\n NOTE: The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n Returns:\n The default `Graph` being used in the current thread.\n \"\"\"\n return _default_graph_stack.get_default()\n\n\ndef has_default_graph():\n \"\"\"Returns True if there is a default graph.\"\"\"\n return len(_default_graph_stack.stack) >= 1\n\n\ndef get_name_scope():\n \"\"\"Returns the current name scope in the default_graph.\n\n For example:\n\n ```python\n with tf.name_scope('scope1'):\n with tf.name_scope('scope2'):\n print(tf.get_name_scope())\n ```\n would print the string `scope1/scope2`.\n\n Returns:\n A string representing the current name scope.\n \"\"\"\n if context.executing_eagerly():\n return context.context().scope_name.rstrip(\"/\")\n return get_default_graph().get_name_scope()\n\n\ndef _assert_same_graph(original_item, item):\n \"\"\"Fail if the 2 items are from different graphs.\n\n Args:\n original_item: Original item to check against.\n item: Item to check.\n\n Raises:\n ValueError: if graphs do not match.\n \"\"\"\n if original_item.graph is not item.graph:\n raise ValueError(\"%s must be from the same graph as %s.\" %\n (item, original_item))\n\n\ndef _get_graph_from_inputs(op_input_list, graph=None):\n \"\"\"Returns the appropriate graph to use for the given inputs.\n\n This library method provides a consistent algorithm for choosing the graph\n in which an Operation should be constructed:\n\n 1. If the default graph is being used to construct a function, we\n use the default graph.\n 2. If the \"graph\" is specified explicitly, we validate that all of the inputs\n in \"op_input_list\" are compatible with that graph.\n 3. Otherwise, we attempt to select a graph from the first Operation-\n or Tensor-valued input in \"op_input_list\", and validate that all other\n such inputs are in the same graph.\n 4. If the graph was not specified and it could not be inferred from\n \"op_input_list\", we attempt to use the default graph.\n\n Args:\n op_input_list: A list of inputs to an operation, which may include `Tensor`,\n `Operation`, and other objects that may be converted to a graph element.\n graph: (Optional) The explicit graph to use.\n\n Raises:\n TypeError: If op_input_list is not a list or tuple, or if graph is not a\n Graph.\n ValueError: If a graph is explicitly passed and not all inputs are from it,\n or if the inputs are from multiple graphs, or we could not find a graph\n and there was no default graph.\n\n Returns:\n The appropriate graph to use for the given inputs.\n\n \"\"\"\n current_default_graph = get_default_graph()\n if current_default_graph.building_function:\n return current_default_graph\n\n op_input_list = tuple(op_input_list) # Handle generators correctly\n if graph and not isinstance(graph, Graph):\n raise TypeError(\"Input graph needs to be a Graph: %s\" % graph)\n\n # 1. We validate that all of the inputs are from the same graph. This is\n # either the supplied graph parameter, or the first one selected from one\n # the graph-element-valued inputs. In the latter case, we hold onto\n # that input in original_graph_element so we can provide a more\n # informative error if a mismatch is found.\n original_graph_element = None\n for op_input in op_input_list:\n # Determine if this is a valid graph_element.\n # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this\n # up.\n graph_element = None\n if (isinstance(op_input, (Operation, _TensorLike)) and\n ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck\n graph_element = op_input\n else:\n graph_element = _as_graph_element(op_input)\n\n if graph_element is not None:\n if not graph:\n original_graph_element = graph_element\n graph = graph_element.graph\n elif original_graph_element is not None:\n _assert_same_graph(original_graph_element, graph_element)\n elif graph_element.graph is not graph:\n raise ValueError(\"%s is not from the passed-in graph.\" % graph_element)\n\n # 2. If all else fails, we use the default graph, which is always there.\n return graph or current_default_graph\n\n\n@tf_export(v1=[\"GraphKeys\"])\nclass GraphKeys(object):\n \"\"\"Standard names to use for graph collections.\n\n The standard library uses various well-known names to collect and\n retrieve values associated with a graph. For example, the\n `tf.Optimizer` subclasses default to optimizing the variables\n collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is\n specified, but it is also possible to pass an explicit list of\n variables.\n\n The following standard keys are defined:\n\n * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared\n across distributed environment (model variables are subset of these). See\n `tf.compat.v1.global_variables`\n for more details.\n Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,\n and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.\n * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each\n machine. Usually used for temporarily variables, like counters.\n Note: use `tf.contrib.framework.local_variable` to add to this collection.\n * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the\n model for inference (feed forward). Note: use\n `tf.contrib.framework.model_variable` to add to this collection.\n * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will\n be trained by an optimizer. See\n `tf.compat.v1.trainable_variables`\n for more details.\n * `SUMMARIES`: the summary `Tensor` objects that have been created in the\n graph. See\n `tf.compat.v1.summary.merge_all`\n for more details.\n * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to\n produce input for a computation. See\n `tf.compat.v1.train.start_queue_runners`\n for more details.\n * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also\n keep moving averages. See\n `tf.compat.v1.moving_average_variables`\n for more details.\n * `REGULARIZATION_LOSSES`: regularization losses collected during graph\n construction.\n\n The following standard keys are _defined_, but their collections are **not**\n automatically populated as many of the others are:\n\n * `WEIGHTS`\n * `BIASES`\n * `ACTIVATIONS`\n \"\"\"\n\n # Key to collect Variable objects that are global (shared across machines).\n # Default collection for all variables, except local ones.\n GLOBAL_VARIABLES = \"variables\"\n # Key to collect local variables that are local to the machine and are not\n # saved/restored.\n LOCAL_VARIABLES = \"local_variables\"\n # Key to collect local variables which are used to accumulate interal state\n # to be used in tf.metrics.*.\n METRIC_VARIABLES = \"metric_variables\"\n # Key to collect model variables defined by layers.\n MODEL_VARIABLES = \"model_variables\"\n # Key to collect Variable objects that will be trained by the\n # optimizers.\n TRAINABLE_VARIABLES = \"trainable_variables\"\n # Key to collect summaries.\n SUMMARIES = \"summaries\"\n # Key to collect QueueRunners.\n QUEUE_RUNNERS = \"queue_runners\"\n # Key to collect table initializers.\n TABLE_INITIALIZERS = \"table_initializer\"\n # Key to collect asset filepaths. An asset represents an external resource\n # like a vocabulary file.\n ASSET_FILEPATHS = \"asset_filepaths\"\n # Key to collect Variable objects that keep moving averages.\n MOVING_AVERAGE_VARIABLES = \"moving_average_variables\"\n # Key to collect regularization losses at graph construction.\n REGULARIZATION_LOSSES = \"regularization_losses\"\n # Key to collect concatenated sharded variables.\n CONCATENATED_VARIABLES = \"concatenated_variables\"\n # Key to collect savers.\n SAVERS = \"savers\"\n # Key to collect weights\n WEIGHTS = \"weights\"\n # Key to collect biases\n BIASES = \"biases\"\n # Key to collect activations\n ACTIVATIONS = \"activations\"\n # Key to collect update_ops\n UPDATE_OPS = \"update_ops\"\n # Key to collect losses\n LOSSES = \"losses\"\n # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.\n SAVEABLE_OBJECTS = \"saveable_objects\"\n # Key to collect all shared resources used by the graph which need to be\n # initialized once per cluster.\n RESOURCES = \"resources\"\n # Key to collect all shared resources used in this graph which need to be\n # initialized once per session.\n LOCAL_RESOURCES = \"local_resources\"\n # Trainable resource-style variables.\n TRAINABLE_RESOURCE_VARIABLES = \"trainable_resource_variables\"\n\n # Key to indicate various ops.\n INIT_OP = \"init_op\"\n LOCAL_INIT_OP = \"local_init_op\"\n READY_OP = \"ready_op\"\n READY_FOR_LOCAL_INIT_OP = \"ready_for_local_init_op\"\n SUMMARY_OP = \"summary_op\"\n GLOBAL_STEP = \"global_step\"\n\n # Used to count the number of evaluations performed during a single evaluation\n # run.\n EVAL_STEP = \"eval_step\"\n TRAIN_OP = \"train_op\"\n\n # Key for control flow context.\n COND_CONTEXT = \"cond_context\"\n WHILE_CONTEXT = \"while_context\"\n\n # Used to store v2 summary names.\n _SUMMARY_COLLECTION = \"_SUMMARY_V2\"\n\n # List of all collections that keep track of variables.\n _VARIABLE_COLLECTIONS = [\n GLOBAL_VARIABLES,\n LOCAL_VARIABLES,\n METRIC_VARIABLES,\n MODEL_VARIABLES,\n TRAINABLE_VARIABLES,\n MOVING_AVERAGE_VARIABLES,\n CONCATENATED_VARIABLES,\n TRAINABLE_RESOURCE_VARIABLES,\n ]\n\n # Key for streaming model ports.\n # NOTE(yuanbyu): internal and experimental.\n _STREAMING_MODEL_PORTS = \"streaming_model_ports\"\n\n @decorator_utils.classproperty\n @deprecation.deprecated(None, \"Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.\")\n def VARIABLES(cls): # pylint: disable=no-self-argument\n return cls.GLOBAL_VARIABLES\n\n\ndef dismantle_graph(graph):\n \"\"\"Cleans up reference cycles from a `Graph`.\n\n Helpful for making sure the garbage collector doesn't need to run after a\n temporary `Graph` is no longer needed.\n\n Args:\n graph: A `Graph` object to destroy. Neither it nor any of its ops are usable\n after this function runs.\n \"\"\"\n memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access\n\n # Now clean up Operation<->Graph reference cycles by clearing all of the\n # attributes for the Graph and its ops.\n graph_operations = graph.get_operations()\n for op in graph_operations:\n op.__dict__ = {}\n graph.__dict__ = {}\n\n\n@tf_export(v1=[\"add_to_collection\"])\ndef add_to_collection(name, value):\n \"\"\"Wrapper for `Graph.add_to_collection()` using the default graph.\n\n See `tf.Graph.add_to_collection`\n for more details.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collection. @compatibility(eager)\n Collections are only supported in eager when variables are created inside\n an EagerVariableStore (e.g. as part of a layer or template).\n @end_compatibility\n \"\"\"\n get_default_graph().add_to_collection(name, value)\n\n\n@tf_export(v1=[\"add_to_collections\"])\ndef add_to_collections(names, value):\n \"\"\"Wrapper for `Graph.add_to_collections()` using the default graph.\n\n See `tf.Graph.add_to_collections`\n for more details.\n\n Args:\n names: The key for the collections. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collections. @compatibility(eager)\n Collections are only supported in eager when variables are created inside\n an EagerVariableStore (e.g. as part of a layer or template).\n @end_compatibility\n \"\"\"\n get_default_graph().add_to_collections(names, value)\n\n\n@tf_export(v1=[\"get_collection_ref\"])\ndef get_collection_ref(key):\n \"\"\"Wrapper for `Graph.get_collection_ref()` using the default graph.\n\n See `tf.Graph.get_collection_ref`\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n\n Returns:\n The list of values in the collection with the given `name`, or an empty\n list if no value has been added to that collection. Note that this returns\n the collection list itself, which can be modified in place to change the\n collection.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility\n \"\"\"\n return get_default_graph().get_collection_ref(key)\n\n\n@tf_export(v1=[\"get_collection\"])\ndef get_collection(key, scope=None):\n \"\"\"Wrapper for `Graph.get_collection()` using the default graph.\n\n See `tf.Graph.get_collection`\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n scope: (Optional.) If supplied, the resulting list is filtered to include\n only items whose `name` attribute matches using `re.match`. Items without\n a `name` attribute are never returned if a scope is supplied and the\n choice or `re.match` means that a `scope` without special tokens filters\n by prefix.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility\n \"\"\"\n return get_default_graph().get_collection(key, scope)\n\n\ndef get_all_collection_keys():\n \"\"\"Returns a list of collections used in the default graph.\"\"\"\n return get_default_graph().get_all_collection_keys()\n\n\ndef name_scope(name, default_name=None, values=None, skip_on_eager=True):\n \"\"\"Internal-only entry point for `name_scope*`.\n\n Internal ops do not use the public API and instead rely on\n `ops.name_scope` regardless of the execution mode. This function\n dispatches to the correct `name_scope*` implementation based on\n the arguments provided and the current mode. Specifically,\n\n * if `values` contains a graph tensor `Graph.name_scope` is used;\n * `name_scope_v1` is used in graph mode;\n * `name_scope_v2` -- in eager mode.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n skip_on_eager: Indicates to return NullContextmanager if executing eagerly.\n By default this is True since naming tensors and operations in eager mode\n have little use and cause unnecessary performance overhead. However, it is\n important to preserve variable names since they are often useful for\n debugging and saved models.\n\n Returns:\n `name_scope*` context manager.\n \"\"\"\n ctx = context.context()\n in_eager_mode = ctx.executing_eagerly()\n if not in_eager_mode:\n return internal_name_scope_v1(name, default_name, values)\n\n if skip_on_eager:\n return NullContextmanager()\n\n name = default_name if name is None else name\n if values:\n # The presence of a graph tensor in `values` overrides the context.\n # TODO(slebedev): this is Keras-specific and should be removed.\n # pylint: disable=unidiomatic-typecheck\n graph_value = next((value for value in values if type(value) == Tensor),\n None)\n # pylint: enable=unidiomatic-typecheck\n if graph_value is not None:\n return graph_value.graph.name_scope(name)\n\n return name_scope_v2(name or \"\")\n\n\nclass internal_name_scope_v1(object): # pylint: disable=invalid-name\n \"\"\"Graph-only version of `name_scope_v1`.\"\"\"\n\n @property\n def name(self):\n return self._name\n\n def __init__(self, name, default_name=None, values=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\n Raises:\n TypeError: if `default_name` is passed in but not a string.\n \"\"\"\n if not (default_name is None or isinstance(default_name, six.string_types)):\n raise TypeError(\n \"`default_name` type (%s) is not a string type. You likely meant to \"\n \"pass this into the `values` kwarg.\" % type(default_name))\n self._name = default_name if name is None else name\n self._default_name = default_name\n self._values = values\n\n def __enter__(self):\n \"\"\"Start the scope block.\n\n Returns:\n The scope name.\n\n Raises:\n ValueError: if neither `name` nor `default_name` is provided\n but `values` are.\n \"\"\"\n if self._name is None and self._values is not None:\n # We only raise an error if values is not None (provided) because\n # currently tf.name_scope(None) (values=None then) is sometimes used as\n # an idiom to reset to top scope.\n raise ValueError(\n \"At least one of name (%s) and default_name (%s) must be provided.\"\n % (self._name, self._default_name))\n\n g = get_default_graph()\n if self._values and not g.building_function:\n # Specialize based on the knowledge that `_get_graph_from_inputs()`\n # ignores `inputs` when building a function.\n g_from_inputs = _get_graph_from_inputs(self._values)\n if g_from_inputs is not g:\n g = g_from_inputs\n self._g_manager = g.as_default()\n self._g_manager.__enter__()\n else:\n self._g_manager = None\n else:\n self._g_manager = None\n\n try:\n self._name_scope = g.name_scope(self._name)\n return self._name_scope.__enter__()\n except:\n if self._g_manager is not None:\n self._g_manager.__exit__(*sys.exc_info())\n raise\n\n def __exit__(self, *exc_info):\n self._name_scope.__exit__(*exc_info)\n if self._g_manager is not None:\n self._g_manager.__exit__(*exc_info)\n\n\n# Named like a function for backwards compatibility with the\n# @tf_contextlib.contextmanager version, which was switched to a class to avoid\n# some object creation overhead.\n@tf_export(v1=[\"name_scope\"])\nclass name_scope_v1(object): # pylint: disable=invalid-name\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager validates that the given `values` are from the\n same graph, makes that graph the default graph, and pushes a\n name scope in that graph (see\n `tf.Graph.name_scope`\n for more details on that).\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(name, \"MyOp\", [a, b, c]) as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n \"\"\"\n\n @property\n def name(self):\n return self._name\n\n def __init__(self, name, default_name=None, values=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\n Raises:\n TypeError: if `default_name` is passed in but not a string.\n \"\"\"\n self._name_scope = name_scope(\n name, default_name, values, skip_on_eager=False)\n self._name = default_name if name is None else name\n\n def __enter__(self):\n return self._name_scope.__enter__()\n\n def __exit__(self, *exc_info):\n return self._name_scope.__exit__(*exc_info)\n\n\ndef enter_eager_name_scope(ctx, name):\n \"\"\"Updates the eager context to enter the given name scope.\"\"\"\n old_name = ctx.scope_name\n if not name:\n scope_name = \"\"\n else:\n if name.endswith(\"/\"):\n # A trailing slash breaks out of nested name scopes, indicating a\n # fully specified scope name, for compatibility with Graph.name_scope.\n scope_name = name\n else:\n scope_name = name + \"/\"\n if old_name:\n scope_name = old_name + scope_name\n ctx.scope_name = scope_name\n return scope_name, old_name\n\n\n@tf_export(\"name_scope\", v1=[])\nclass name_scope_v2(object):\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(\"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n\n When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,\n and `MyOp/c`.\n\n If the scope name already exists, the name will be made unique by appending\n `_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,\n etc.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The prefix to use on all names created within the name scope.\n\n Raises:\n ValueError: If name is None, or not a string.\n \"\"\"\n if name is None or not isinstance(name, six.string_types):\n raise ValueError(\"name for name_scope must be a string.\")\n self._name = name\n self._exit_fns = []\n\n @property\n def name(self):\n return self._name\n\n def __enter__(self):\n \"\"\"Start the scope block.\n\n Returns:\n The scope name.\n\n Raises:\n ValueError: if neither `name` nor `default_name` is provided\n but `values` are.\n \"\"\"\n ctx = context.context()\n if ctx.executing_eagerly():\n scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)\n self._exit_fns.append(\n lambda *a: setattr(ctx, \"scope_name\", old_scope_name))\n else:\n scope = get_default_graph().name_scope(self._name)\n scope_name = scope.__enter__()\n self._exit_fns.append(scope.__exit__)\n return scope_name\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n exit_fn = self._exit_fns.pop()\n exit_fn(type_arg, value_arg, traceback_arg)\n return False # False values do not suppress exceptions\n\n\ndef strip_name_scope(name, export_scope):\n \"\"\"Removes name scope from a name.\n\n Args:\n name: A `string` name.\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n Name with name scope removed, or the original name if export_scope\n is None.\n \"\"\"\n if export_scope:\n if export_scope[-1] == \"/\":\n export_scope = export_scope[:-1]\n\n try:\n # Strips export_scope/, export_scope///,\n # ^export_scope/, loc:@export_scope/.\n str_to_replace = r\"([\\^]|loc:@|^)\" + export_scope + r\"[\\/]+(.*)\"\n return re.sub(str_to_replace, r\"\\1\\2\", compat.as_str(name), count=1)\n except TypeError as e:\n # If the name is not of a type we can process, simply return it.\n logging.warning(e)\n return name\n else:\n return name\n\n\ndef prepend_name_scope(name, import_scope):\n \"\"\"Prepends name scope to a name.\n\n Args:\n name: A `string` name.\n import_scope: Optional `string`. Name scope to add.\n\n Returns:\n Name with name scope added, or the original name if import_scope\n is None.\n \"\"\"\n if import_scope:\n if import_scope[-1] == \"/\":\n import_scope = import_scope[:-1]\n\n try:\n str_to_replace = r\"([\\^]|loc:@|^)(.*)\"\n return re.sub(str_to_replace, r\"\\1\" + import_scope + r\"/\\2\",\n compat.as_str(name))\n except TypeError as e:\n # If the name is not of a type we can process, simply return it.\n logging.warning(e)\n return name\n else:\n return name\n\n\n# pylint: disable=g-doc-return-or-yield\n# pylint: disable=not-context-manager\n@tf_export(v1=[\"op_scope\"])\n@tf_contextlib.contextmanager\ndef op_scope(values, name, default_name=None):\n \"\"\"DEPRECATED. Same as name_scope above, just different argument order.\"\"\"\n logging.warn(\"tf.op_scope(values, name, default_name) is deprecated,\"\n \" use tf.name_scope(name, default_name, values)\")\n with name_scope(name, default_name=default_name, values=values) as scope:\n yield scope\n\n\n_proto_function_registry = registry.Registry(\"proto functions\")\n\n\ndef register_proto_function(collection_name,\n proto_type=None,\n to_proto=None,\n from_proto=None):\n \"\"\"Registers `to_proto` and `from_proto` functions for collection_name.\n\n `to_proto` function converts a Python object to the corresponding protocol\n buffer, and returns the protocol buffer.\n\n `from_proto` function converts protocol buffer into a Python object, and\n returns the object..\n\n Args:\n collection_name: Name of the collection.\n proto_type: Protobuf type, such as `saver_pb2.SaverDef`,\n `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..\n to_proto: Function that implements Python object to protobuf conversion.\n from_proto: Function that implements protobuf to Python object conversion.\n \"\"\"\n if to_proto and not callable(to_proto):\n raise TypeError(\"to_proto must be callable.\")\n if from_proto and not callable(from_proto):\n raise TypeError(\"from_proto must be callable.\")\n\n _proto_function_registry.register((proto_type, to_proto, from_proto),\n collection_name)\n\n\ndef get_collection_proto_type(collection_name):\n \"\"\"Returns the proto_type for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[0]\n except LookupError:\n return None\n\n\ndef get_to_proto_function(collection_name):\n \"\"\"Returns the to_proto function for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[1]\n except LookupError:\n return None\n\n\ndef get_from_proto_function(collection_name):\n \"\"\"Returns the from_proto function for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[2]\n except LookupError:\n return None\n\n\ndef _operation_conversion_error(op, dtype=None, name=None, as_ref=False):\n \"\"\"Produce a nice error if someone converts an Operation to a Tensor.\"\"\"\n raise TypeError((\"Can't convert Operation '%s' to Tensor \"\n \"(target dtype=%r, name=%r, as_ref=%r)\") %\n (op.name, dtype, name, as_ref))\n\n\ndef _op_to_colocate_with(v, graph):\n \"\"\"Operation object corresponding to v to use for colocation constraints.\"\"\"\n if v is None:\n return None\n if isinstance(v, Operation):\n return v\n # We always want to colocate with the reference op.\n # When 'v' is a ResourceVariable, the reference op is the handle creating op.\n #\n # What this should be is:\n # if isinstance(v, ResourceVariable):\n # return v.handle.op\n # However, that would require a circular import dependency.\n # As of October 2018, there were attempts underway to remove\n # colocation constraints altogether. Assuming that will\n # happen soon, perhaps this hack to work around the circular\n # import dependency is acceptable.\n if hasattr(v, \"handle\") and isinstance(v.handle, Tensor):\n if graph.building_function:\n return graph.capture(v.handle).op\n else:\n return v.handle.op\n return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op\n\n\ndef _is_keras_symbolic_tensor(x):\n return hasattr(x, \"graph\") and getattr(x.graph, \"name\", None) == \"keras_graph\"\n\n\ntensor_conversion_registry.register_tensor_conversion_function(\n Operation, _operation_conversion_error)\n\n\n# These symbols were originally defined in this module; import them for\n# backwards compatibility until all references have been updated to access\n# them from the indexed_slices.py module.\nIndexedSlices = indexed_slices.IndexedSlices\nIndexedSlicesValue = indexed_slices.IndexedSlicesValue\nconvert_to_tensor_or_indexed_slices = \\\n indexed_slices.convert_to_tensor_or_indexed_slices\nconvert_n_to_tensor_or_indexed_slices = \\\n indexed_slices.convert_n_to_tensor_or_indexed_slices\ninternal_convert_to_tensor_or_indexed_slices = \\\n indexed_slices.internal_convert_to_tensor_or_indexed_slices\ninternal_convert_n_to_tensor_or_indexed_slices = \\\n indexed_slices.internal_convert_n_to_tensor_or_indexed_slices\nregister_tensor_conversion_function = \\\n tensor_conversion_registry.register_tensor_conversion_function\n\n\n# Helper functions for op wrapper modules generated by `python_op_gen`.\n\n\ndef to_raw_op(f):\n \"\"\"Make a given op wrapper function `f` raw.\n\n Raw op wrappers can only be called with keyword arguments.\n\n Args:\n f: An op wrapper function to make raw.\n\n Returns:\n Raw `f`.\n \"\"\"\n # Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail\n # due to double-registration.\n f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,\n f.__closure__)\n return kwarg_only(f)\n\n\ndef raise_from_not_ok_status(e, name):\n message = e.message + (\" name: \" + name if name is not None else \"\")\n # pylint: disable=protected-access\n six.raise_from(core._status_to_exception(e.code, message), None)\n # pylint: enable=protected-access\n\n\ndef add_exit_callback_to_default_func_graph(fn):\n \"\"\"Add a callback to run when the default function graph goes out of scope.\n\n Usage:\n\n ```python\n @tf.function\n def fn(x, v):\n expensive = expensive_object(v)\n add_exit_callback_to_default_func_graph(lambda: expensive.release())\n return g(x, expensive)\n\n fn(x=tf.constant(...), v=...)\n # `expensive` has been released.\n ```\n\n Args:\n fn: A callable that takes no arguments and whose output is ignored.\n To be executed when exiting func graph scope.\n\n Raises:\n RuntimeError: If executed when the current default graph is not a FuncGraph,\n or not currently executing in function creation mode (e.g., if inside\n an init_scope).\n \"\"\"\n default_graph = get_default_graph()\n if not default_graph._building_function: # pylint: disable=protected-access\n raise RuntimeError(\n \"Cannot add scope exit callbacks when not building a function. \"\n \"Default graph: {}\".format(default_graph))\n default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access\n\n\ndef _reconstruct_sequence_inputs(op_def, inputs, attrs):\n \"\"\"Regroups a flat list of input tensors into scalar and sequence inputs.\n\n Args:\n op_def: The `op_def_pb2.OpDef` (for knowing the input types)\n inputs: a list of input `Tensor`s to the op.\n attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define\n how long each sequence is)\n\n Returns:\n A list of `Tensor`s (corresponding to scalar inputs) and lists of\n `Tensor`s (corresponding to sequence inputs).\n \"\"\"\n grouped_inputs = []\n i = 0\n for input_arg in op_def.input_arg:\n if input_arg.number_attr:\n input_len = attrs[input_arg.number_attr].i\n is_sequence = True\n elif input_arg.type_list_attr:\n input_len = len(attrs[input_arg.type_list_attr].list.type)\n is_sequence = True\n else:\n input_len = 1\n is_sequence = False\n\n if is_sequence:\n grouped_inputs.append(inputs[i:i + input_len])\n else:\n grouped_inputs.append(inputs[i])\n i += input_len\n\n assert i == len(inputs)\n return grouped_inputs\n\n\nclass _TensorIterator(object):\n \"\"\"Iterates over the leading dim of a Tensor. Performs no error checks.\"\"\"\n\n def __init__(self, tensor, dim0):\n self._tensor = tensor\n self._index = 0\n self._limit = dim0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index == self._limit:\n raise StopIteration\n result = self._tensor[self._index]\n self._index += 1\n return result\n\n next = __next__ # python2.x compatibility.\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"A `Network` is way to compose layers: the topological form of a `Model`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport itertools\nimport json\nimport os\n\nimport numpy as np\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import compile_utils\nfrom tensorflow.python.keras.engine import input_layer as input_layer_module\nfrom tensorflow.python.keras.engine import node as node_module\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.saving import hdf5_format\nfrom tensorflow.python.keras.saving import save\nfrom tensorflow.python.keras.saving.saved_model import network_serialization\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import py_checkpoint_reader\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import layer_utils as trackable_layer_utils\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.tracking import util as trackable_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import serialization\nfrom tensorflow.python.util import tf_inspect\n\n\n# pylint: disable=g-import-not-at-top\ntry:\n import h5py\nexcept ImportError:\n h5py = None\n\ntry:\n import yaml\nexcept ImportError:\n yaml = None\n# pylint: enable=g-import-not-at-top\n\n\nclass Network(base_layer.Layer):\n \"\"\"A `Network` is a composition of layers.\n\n `Network` is the topological form of a \"model\". A `Model`\n is simply a `Network` with added training routines.\n\n Two types of `Networks` exist: Graph Networks and Subclass Networks. Graph\n networks are used in the Keras Functional and Sequential APIs. Subclassed\n networks are used when a user subclasses the `Model` class. In general,\n more Keras features are supported with Graph Networks than with Subclassed\n Networks, specifically:\n\n - Model cloning (`keras.models.clone`)\n - Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()`\n - Whole-model saving (`model.save()`)\n\n A Graph Network can be instantiated by passing two arguments to `__init__`.\n The first argument is the `keras.Input` Tensors that represent the inputs\n to the Network. The second argument specifies the output Tensors that\n represent the outputs of this Network. Both arguments can be a nested\n structure of Tensors.\n\n Example:\n\n ```\n inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}\n t = keras.layers.Dense(1, activation='relu')(inputs['x1'])\n outputs = keras.layers.Add()([t, inputs['x2'])\n network = Network(inputs, outputs)\n ```\n\n A Graph Network constructed using the Functional API can also include raw\n TensorFlow functions, with the exception of functions that create Variables\n or assign ops.\n\n Example:\n\n ```\n inputs = keras.Input(shape=(10,))\n x = keras.layers.Dense(1)(inputs)\n outputs = tf.nn.relu(x)\n network = Network(inputs, outputs)\n ```\n\n Subclassed Networks can be instantiated via `name` and (optional) `dynamic`\n keyword arguments. Subclassed Networks keep track of their Layers, and their\n `call` method can be overridden. Subclassed Networks are typically created\n indirectly, by subclassing the `Model` class.\n\n Example:\n\n ```\n class MyModel(keras.Model):\n def __init__(self):\n super(MyModel, self).__init__(name='my_model', dynamic=False)\n\n self.layer1 = keras.layers.Dense(10, activation='relu')\n\n def call(self, inputs):\n return self.layer1(inputs)\n ```\n\n Allowed args in `super().__init__`:\n name: String name of the model.\n dynamic: (Subclassed models only) Set this to `True` if your model should\n only be run eagerly, and should not be used to generate a static\n computation graph. This attribute is automatically set for Functional API\n models.\n trainable: Boolean, whether the model's variables should be trainable.\n dtype: (Subclassed models only) Default dtype of the model's weights (\n default of `None` means use the type of the first input). This attribute\n has no effect on Functional API models, which do not have weights of their\n own.\n \"\"\"\n\n # See tf.Module for the usage of this property.\n # The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to\n # flatten the key since it is trying to convert Trackable/Layer to a string.\n _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(\n ('_layer_call_argspecs', '_compiled_trainable_state'),\n base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES\n ))\n\n def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called\n # Signature detection\n if (len(args) == 2 or\n len(args) == 1 and 'outputs' in kwargs or\n 'inputs' in kwargs and 'outputs' in kwargs):\n # Graph network\n self._init_graph_network(*args, **kwargs)\n else:\n # Subclassed network\n self._init_subclassed_network(**kwargs)\n\n tf_utils.assert_no_legacy_layers(self.layers)\n\n # Several Network methods have \"no_automatic_dependency_tracking\"\n # annotations. Since Network does automatic dependency tracking on attribute\n # assignment, including for common data structures such as lists, by default\n # we'd have quite a few empty dependencies which users don't care about (or\n # would need some way to ignore dependencies automatically, which is confusing\n # when applied to user code). Some attributes, such as _layers, would cause\n # structural issues (_layers being the place where Layers assigned to tracked\n # attributes are stored).\n #\n # Aside from these aesthetic and structural issues, useless dependencies on\n # empty lists shouldn't cause issues; adding or removing them will not break\n # checkpoints, but may cause \"all Python objects matched\" assertions to fail\n # (in which case less strict assertions may be substituted if necessary).\n @trackable.no_automatic_dependency_tracking\n def _base_init(self, name=None, **kwargs):\n # The following are implemented as property functions:\n # self.trainable_weights\n # self.non_trainable_weights\n # self.input_spec\n # self.losses\n # self.updates\n\n generic_utils.validate_kwargs(kwargs, {'trainable', 'dtype', 'dynamic',\n 'autocast'})\n\n super(Network, self).__init__(name=name, **kwargs)\n\n self.output_names = None\n self.input_names = None\n self._is_compiled = False\n self._saved_model_inputs_spec = None\n\n # This is True for Sequential networks and Functional networks.\n self._compute_output_and_mask_jointly = False\n\n if not hasattr(self, 'optimizer'):\n # Don't reset optimizer if already set.\n self.optimizer = None\n\n self._scope = None # Never used.\n self._reuse = None # Never used.\n if context.executing_eagerly():\n self._graph = None\n else:\n self._graph = ops.get_default_graph() # Used in symbolic mode only.\n\n self._trackable_saver = (\n trackable_utils.saver_with_op_caching(self))\n\n @trackable.no_automatic_dependency_tracking\n def _init_graph_network(self, inputs, outputs, name=None, **kwargs):\n generic_utils.validate_kwargs(\n kwargs, {'trainable'},\n 'Functional models may only specify `name` and `trainable` keyword '\n 'arguments during initialization. Got an unexpected argument:')\n # Normalize and set self.inputs, self.outputs.\n if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:\n inputs = inputs[0]\n if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:\n outputs = outputs[0]\n self._nested_outputs = outputs\n self._nested_inputs = inputs\n self.inputs = nest.flatten(inputs)\n self.outputs = nest.flatten(outputs)\n\n if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):\n base_layer_utils.create_keras_history(self._nested_outputs)\n\n self._base_init(name=name, **kwargs)\n self._validate_graph_inputs_and_outputs()\n\n # A Network does not create weights of its own, thus it is already\n # built.\n self.built = True\n self._build_input_shape = nest.map_structure(lambda x: x.shape, inputs)\n self._compute_output_and_mask_jointly = True\n self._is_graph_network = True\n # `_expects_training_arg` is True since the `training` argument is always\n # present in the signature of the `call` method of a graph network.\n self._expects_training_arg = True\n self._expects_mask_arg = True\n # A graph network does not autocast inputs, as its layers will cast them\n # instead.\n self._autocast = False\n\n self._input_layers = []\n self._output_layers = []\n self._input_coordinates = []\n self._output_coordinates = []\n\n self._supports_ragged_inputs = None\n\n # This is for performance optimization when calling the Network on new\n # inputs. Every time the Network is called on a set on input tensors,\n # we compute the output tensors, output masks and output shapes in one pass,\n # then cache them here. When any of these outputs is queried later, we\n # retrieve it from there instead of recomputing it.\n self._output_mask_cache = {}\n self._output_tensor_cache = {}\n self._output_shape_cache = {}\n\n # Build self._output_layers:\n for x in self.outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n self._output_layers.append(layer)\n self._output_coordinates.append((layer, node_index, tensor_index))\n\n # Build self._input_layers:\n for x in self.inputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n # It's supposed to be an input layer, so only one node\n # and one tensor output.\n assert node_index == 0\n assert tensor_index == 0\n self._input_layers.append(layer)\n self._input_coordinates.append((layer, node_index, tensor_index))\n\n # Keep track of the network's nodes and layers.\n nodes, nodes_by_depth, layers, _ = _map_graph_network(\n self.inputs, self.outputs)\n self._network_nodes = nodes\n self._nodes_by_depth = nodes_by_depth\n self._layers = layers\n self._layer_call_argspecs = {}\n for layer in self._layers:\n self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n layer._attribute_sentinel.add_parent(self._attribute_sentinel)\n\n # Create the node linking internal inputs to internal outputs.\n node_module.Node(\n outbound_layer=self,\n inbound_layers=[],\n node_indices=[],\n tensor_indices=[],\n input_tensors=self._nested_inputs,\n output_tensors=self._nested_outputs)\n\n # Build self.input_names and self.output_names.\n self._set_output_names()\n self.input_names = []\n self._feed_input_names = []\n self._feed_inputs = []\n self._feed_input_shapes = []\n for layer in self._input_layers:\n self.input_names.append(layer.name)\n if layer.is_placeholder:\n self._feed_input_names.append(layer.name)\n # Use batch_input_shape here because non-eager composite tensors may not\n # have a shape attribute that's meaningful (sparse, for instance, has\n # a tensor that's non-constant and needs to be fed). This means that\n # input layers that create placeholders will need to have the\n # batch_input_shape attr to allow for input shape validation.\n self._feed_input_shapes.append(layer._batch_input_shape)\n self._feed_inputs.append(layer.input)\n\n self._compute_tensor_usage_count()\n self._set_save_spec(self._nested_inputs)\n\n def _set_output_names(self):\n \"\"\"Assigns unique names to the Network's outputs.\n\n Output layers with multiple output tensors would otherwise lead to duplicate\n names in self.output_names.\n \"\"\"\n uniquified = []\n output_names = set()\n prefix_count = {}\n for layer in self._output_layers:\n proposal = layer.name\n while proposal in output_names:\n existing_count = prefix_count.get(layer.name, 1)\n proposal = '{}_{}'.format(layer.name, existing_count)\n prefix_count[layer.name] = existing_count + 1\n output_names.add(proposal)\n uniquified.append(proposal)\n self.output_names = uniquified\n\n @trackable.no_automatic_dependency_tracking\n def _init_subclassed_network(self, name=None, **kwargs):\n self._base_init(name=name, **kwargs)\n self._is_graph_network = False\n self._init_call_fn_args()\n self._autocast = kwargs.get('autocast',\n base_layer_utils.v2_dtype_behavior_enabled())\n self._supports_ragged_inputs = None\n self.outputs = None\n self.inputs = None\n self.built = False\n self._build_input_shape = None\n\n @property\n @trackable_layer_utils.cache_recursive_attribute('dynamic')\n def dynamic(self):\n if self._is_graph_network:\n return any(layer.dynamic for layer in self.layers)\n return self._dynamic or any(layer.dynamic for layer in self.layers)\n\n @property\n def _layer_checkpoint_dependencies(self):\n \"\"\"Dictionary of layer dependencies to be included in the checkpoint.\"\"\"\n # Use getattr because this function can be called from __setattr__, at which\n # point the _is_graph_network attribute has not been created.\n if (not getattr(self, '_is_graph_network', False) and\n base_layer_utils.is_subclassed(self)):\n return {} # Only add layer dependencies for graph networks\n\n weight_layer_index = 0\n\n dependencies = {}\n for layer_index, layer in enumerate(self.layers):\n try:\n if layer.weights:\n # Keep a separate index for layers which have weights. This allows\n # users to insert Layers without weights anywhere in the network\n # without breaking checkpoints.\n dependencies['layer_with_weights-%d' % weight_layer_index] = layer\n weight_layer_index += 1\n except ValueError:\n # The layer might have weights, but may not be built yet. We just treat\n # it as layer without weight.\n pass\n\n # Even if it doesn't have weights, we should still track everything in\n # case it has/will have Trackable dependencies.\n dependencies['layer-%d' % layer_index] = layer\n return dependencies\n\n @property\n def _checkpoint_dependencies(self):\n dependencies = [\n trackable.TrackableReference(name=name, ref=layer)\n for name, layer in self._layer_checkpoint_dependencies.items()]\n dependencies.extend(super(Network, self)._checkpoint_dependencies)\n return dependencies\n\n def _lookup_dependency(self, name):\n layer_dependencies = self._layer_checkpoint_dependencies\n if name in layer_dependencies:\n return layer_dependencies[name]\n return super(Network, self)._lookup_dependency(name)\n\n def _handle_deferred_layer_dependencies(self, layers):\n \"\"\"Handles layer checkpoint dependencies that are added after init.\"\"\"\n layer_checkpoint_dependencies = self._layer_checkpoint_dependencies\n layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()}\n for layer in layers:\n if layer in layer_to_name:\n self._handle_deferred_dependencies(name=layer_to_name[layer],\n trackable=layer)\n\n def __setattr__(self, name, value):\n if not getattr(self, '_self_setattr_tracking', True):\n super(Network, self).__setattr__(name, value)\n return\n\n if all(\n isinstance(v, (base_layer.Layer,\n data_structures.TrackableDataStructure)) or\n trackable_layer_utils.has_weights(v) for v in nest.flatten(value)):\n try:\n self._is_graph_network\n except AttributeError:\n # six.raise_from supresses the original AttributeError from being raised\n six.raise_from(\n RuntimeError('It looks like you are subclassing `Model` and you '\n 'forgot to call `super(YourClass, self).__init__()`.'\n ' Always start with this line.'), None)\n\n super(Network, self).__setattr__(name, value)\n\n # Keep track of metric instance created in subclassed model/layer.\n # We do this so that we can maintain the correct order of metrics by adding\n # the instance to the `metrics` list as soon as it is created.\n from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top\n if isinstance(value, metrics_module.Metric):\n self._metrics.append(value)\n\n @property\n @trackable_layer_utils.cache_recursive_attribute('stateful')\n def stateful(self):\n return any(getattr(layer, 'stateful', False) for layer in self.layers)\n\n def reset_states(self):\n for layer in self.layers:\n if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):\n layer.reset_states()\n\n @property\n def state_updates(self):\n \"\"\"Returns the `updates` from all layers that are stateful.\n\n This is useful for separating training updates and\n state updates, e.g. when we need to update a layer's internal state\n during prediction.\n\n Returns:\n A list of update ops.\n \"\"\"\n state_updates = []\n for layer in self.layers:\n if getattr(layer, 'stateful', False):\n if hasattr(layer, 'updates'):\n state_updates += layer.updates\n return state_updates\n\n @property\n def weights(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Returns:\n A list of variables.\n \"\"\"\n return self._dedup_weights(self._undeduplicated_weights)\n\n @property\n def _undeduplicated_weights(self):\n \"\"\"Returns the undeduplicated list of all layer variables/weights.\"\"\"\n self._assert_weights_created()\n weights = []\n for layer in self._layers:\n weights += layer.weights\n weights += (self._trainable_weights + self._non_trainable_weights)\n return weights\n\n @property\n @tracking.cached_per_instance\n def _should_compute_mask(self):\n return self._is_graph_network and super(Network, self)._should_compute_mask\n\n def compute_mask(self, inputs, mask):\n if not self._is_graph_network:\n return None\n\n # TODO(omalleyt): b/123540974 This function is not really safe to call\n # by itself because it will duplicate any updates and losses in graph\n # mode by `call`ing the Layers again.\n output_tensors = self._run_internal_graph(inputs, mask=mask)\n return nest.map_structure(lambda t: t._keras_mask, output_tensors)\n\n @property\n def layers(self):\n return list(\n trackable_layer_utils.filter_empty_layer_containers(self._layers))\n\n def get_layer(self, name=None, index=None):\n \"\"\"Retrieves a layer based on either its name (unique) or index.\n\n If `name` and `index` are both provided, `index` will take precedence.\n Indices are based on order of horizontal graph traversal (bottom-up).\n\n Arguments:\n name: String, name of layer.\n index: Integer, index of layer.\n\n Returns:\n A layer instance.\n\n Raises:\n ValueError: In case of invalid layer name or index.\n \"\"\"\n # TODO(fchollet): We could build a dictionary based on layer names\n # since they are constant, but we have not done that yet.\n if index is not None:\n if len(self.layers) <= index:\n raise ValueError('Was asked to retrieve layer at index ' + str(index) +\n ' but model only has ' + str(len(self.layers)) +\n ' layers.')\n else:\n return self.layers[index]\n else:\n if not name:\n raise ValueError('Provide either a layer name or layer index.')\n for layer in self.layers:\n if layer.name == name:\n return layer\n raise ValueError('No such layer: ' + name)\n\n @property\n def trainable_weights(self):\n self._assert_weights_created()\n return self._dedup_weights(\n trackable_layer_utils.gather_trainable_weights(\n trainable=self.trainable,\n sub_layers=self._layers,\n extra_variables=self._trainable_weights))\n\n @property\n def non_trainable_weights(self):\n self._assert_weights_created()\n return self._dedup_weights(\n trackable_layer_utils.gather_non_trainable_weights(\n trainable=self.trainable,\n sub_layers=self._layers,\n extra_variables=self._non_trainable_weights +\n self._trainable_weights))\n\n @property\n def input_spec(self):\n \"\"\"Gets the network's input specs.\n\n Returns:\n A list of `InputSpec` instances (one per input to the model)\n or a single instance if the model has only one input.\n \"\"\"\n return\n\n @base_layer_utils.default\n def build(self, input_shape):\n \"\"\"Builds the model based on input shapes received.\n\n This is to be used for subclassed models, which do not know at instantiation\n time what their inputs look like.\n\n This method only exists for users who want to call `model.build()` in a\n standalone way (as a substitute for calling the model on real data to\n build it). It will never be called by the framework (and thus it will\n never throw unexpected errors in an unrelated workflow).\n\n Args:\n input_shape: Single tuple, TensorShape, or list of shapes, where shapes\n are tuples, integers, or TensorShapes.\n\n Raises:\n ValueError:\n 1. In case of invalid user-provided data (not of type tuple,\n list, or TensorShape).\n 2. If the model requires call arguments that are agnostic\n to the input shapes (positional or kwarg in call signature).\n 3. If not all layers were properly built.\n 4. If float type inputs are not supported within the layers.\n\n In each of these cases, the user should build their model by calling it\n on real tensor data.\n \"\"\"\n if self._is_graph_network:\n super(Network, self).build(input_shape)\n return\n\n # If subclass network\n if input_shape is None:\n raise ValueError('Input shape must be defined when calling build on a '\n 'model subclass network.')\n valid_types = (tuple, list, tensor_shape.TensorShape)\n if not isinstance(input_shape, valid_types):\n raise ValueError('Specified input shape is not one of the valid types. '\n 'Please specify a batch input shape of type tuple or '\n 'list of input shapes. User provided '\n 'input type: {}'.format(type(input_shape)))\n\n if input_shape and not self.inputs:\n # We create placeholders for the `None`s in the shape and build the model\n # in a Graph. Since tf.Variable is compatible with both eager execution\n # and graph building, the variables created after building the model in\n # a Graph are still valid when executing eagerly.\n if context.executing_eagerly():\n graph = func_graph.FuncGraph('build_graph')\n else:\n graph = backend.get_graph()\n with graph.as_default():\n if isinstance(input_shape, list):\n x = [base_layer_utils.generate_placeholders_from_shape(shape)\n for shape in input_shape]\n elif isinstance(input_shape, dict):\n x = {\n k: base_layer_utils.generate_placeholders_from_shape(shape)\n for k, shape in input_shape.items()\n }\n else:\n x = base_layer_utils.generate_placeholders_from_shape(input_shape)\n\n kwargs = {}\n call_signature = self._call_full_argspec\n call_args = call_signature.args\n # Exclude `self`, `inputs`, and any argument with a default value.\n if len(call_args) > 2:\n if call_signature.defaults:\n call_args = call_args[2:-len(call_signature.defaults)]\n else:\n call_args = call_args[2:]\n for arg in call_args:\n if arg == 'training':\n # Case where `training` is a positional arg with no default.\n kwargs['training'] = False\n else:\n # Has invalid call signature with unknown positional arguments.\n raise ValueError(\n 'Currently, you cannot build your model if it has '\n 'positional or keyword arguments that are not '\n 'inputs to the model, but are required for its '\n '`call` method. Instead, in order to instantiate '\n 'and build your model, `call` your model on real '\n 'tensor data with all expected call arguments.')\n elif len(call_args) < 2:\n # Signature without `inputs`.\n raise ValueError('You can only call `build` on a model if its `call` '\n 'method accepts an `inputs` argument.')\n try:\n self.call(x, **kwargs)\n except (errors.InvalidArgumentError, TypeError):\n raise ValueError('You cannot build your model by calling `build` '\n 'if your layers do not support float type inputs. '\n 'Instead, in order to instantiate and build your '\n 'model, `call` your model on real tensor data (of '\n 'the correct dtype).')\n\n super(Network, self).build(input_shape)\n\n def call(self, inputs, training=None, mask=None):\n \"\"\"Calls the model on new inputs.\n\n In this case `call` just reapplies\n all ops in the graph to the new inputs\n (e.g. build a new computational graph from the provided inputs).\n\n Arguments:\n inputs: A tensor or list of tensors.\n training: Boolean or boolean scalar tensor, indicating whether to run\n the `Network` in training mode or inference mode.\n mask: A mask or list of masks. A mask can be\n either a tensor or None (no mask).\n\n Returns:\n A tensor if there is a single output, or\n a list of tensors if there are more than one outputs.\n \"\"\"\n if not self._is_graph_network:\n raise NotImplementedError('When subclassing the `Model` class, you should'\n ' implement a `call` method.')\n\n return self._run_internal_graph(\n inputs, training=training, mask=mask,\n convert_kwargs_to_constants=base_layer_utils.call_context().saving)\n\n def compute_output_shape(self, input_shape):\n if not self._is_graph_network:\n return super(Network, self).compute_output_shape(input_shape)\n\n # Convert any shapes in tuple format to TensorShapes.\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n\n if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):\n raise ValueError('Invalid input_shape argument ' + str(input_shape) +\n ': model has ' + str(len(self._input_layers)) +\n ' tensor inputs.')\n\n # Use the tuple of TensorShape as the cache key, since tuple is hashable\n # and can be used as hash key.\n cache_key = tuple(tf_utils.convert_shapes(input_shape, to_tuples=True))\n if cache_key in self._output_shape_cache:\n # Cache hit. Return shapes as TensorShapes.\n return self._output_shape_cache[cache_key]\n\n layers_to_output_shapes = {}\n for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):\n # It's an input layer: then `compute_output_shape` is identity,\n # and there is only one node and one tensor..\n shape_key = layer.name + '_0_0'\n layers_to_output_shapes[shape_key] = shape\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n # Iterate over nodes, by depth level.\n if len(depth_keys) > 1:\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n if layer in self._input_layers:\n # We've already covered the input layers\n # a few lines above.\n continue\n # Potentially redundant list,\n # same size as node.input_tensors.\n layer_input_shapes = []\n for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():\n input_layer_key = inbound_layer.name + '_%s_%s' % (node_id,\n tensor_id)\n layer_input_shapes.append(layers_to_output_shapes[input_layer_key])\n layer_input_shapes = nest.pack_sequence_as(node.inbound_layers,\n layer_input_shapes)\n # Layers expect shapes to be tuples for `compute_output_shape`.\n layer_input_shapes = tf_utils.convert_shapes(\n layer_input_shapes, to_tuples=True)\n layer_output_shapes = layer.compute_output_shape(layer_input_shapes)\n # Convert back to TensorShapes.\n layer_output_shapes = tf_utils.convert_shapes(\n layer_output_shapes, to_tuples=False)\n\n node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access\n for j, shape in enumerate(nest.flatten(layer_output_shapes)):\n shape_key = layer.name + '_%s_%s' % (node_index, j)\n layers_to_output_shapes[shape_key] = shape\n\n # Read final output shapes from layers_to_output_shapes.\n output_shapes = []\n for i in range(len(self._output_layers)):\n layer, node_index, tensor_index = self._output_coordinates[i]\n shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)\n output_shapes.append(layers_to_output_shapes[shape_key])\n output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)\n # Store in cache.\n self._output_shape_cache[cache_key] = output_shapes\n\n # Return shapes as TensorShapes.\n return output_shapes\n\n def _run_internal_graph(self, inputs, training=None, mask=None,\n convert_kwargs_to_constants=False):\n \"\"\"Computes output tensors for new inputs.\n\n # Note:\n - Can be run on non-Keras tensors.\n\n Arguments:\n inputs: Tensor or nested structure of Tensors.\n training: Boolean learning phase.\n mask: (Optional) Tensor or nested structure of Tensors.\n convert_kwargs_to_constants: Whether to convert Tensor kwargs to\n constants. This is used when tracing the model call function during\n saving to ensure that external tensors aren't captured.\n\n Returns:\n Two lists: output_tensors, output_masks\n \"\"\"\n # Note: masking support is relevant mainly for Keras.\n # It cannot be factored out without having the fully reimplement the network\n # calling logic on the Keras side. We choose to incorporate it in\n # Network because 1) it may be useful to fully support in tf.layers in\n # the future and 2) Keras is a major user of Network. If you don't\n # use masking, it does not interfere with regular behavior at all and you\n # can ignore it.\n\n if isinstance(inputs, dict) and isinstance(self._nested_inputs,\n (list, tuple)):\n # Backwards compat: Allows passing a dict to a Model constructed with a\n # list. Matches dict keys to input names.\n inputs = [\n inputs[inp._keras_history.layer.name] for inp in self._nested_inputs\n ]\n else:\n inputs = nest.flatten(inputs)\n\n if mask is None:\n masks = [None for _ in range(len(inputs))]\n else:\n masks = nest.flatten(mask)\n\n for input_t, mask in zip(inputs, masks):\n input_t._keras_mask = mask\n\n # Dictionary mapping reference tensors to computed tensors.\n tensor_dict = {}\n\n for x, y in zip(self.inputs, inputs):\n # Set shape and dtype based on `keras.Input`s.\n if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):\n try:\n y.set_shape(y.shape.merge_with(x.shape))\n except ValueError:\n logging.warning(\n 'Model was constructed with shape {} for input {}, but it was '\n 're-called on a Tensor with incompatible shape {}.'\n .format(x, x.shape, y.shape))\n if isinstance(x, (ops.Tensor, composite_tensor.CompositeTensor)):\n y = math_ops.cast(y, dtype=x.dtype)\n\n x_id = str(id(x))\n tensor_dict[x_id] = [y] * self._tensor_usage_count[x_id]\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n # Ignore the InputLayers when computing the graph.\n depth_keys = depth_keys[1:]\n\n for depth in depth_keys:\n nodes = self._nodes_by_depth[depth]\n for node in nodes:\n # This is always a single layer, never a list.\n layer = node.outbound_layer\n\n if all(\n str(id(tensor)) in tensor_dict\n for tensor in nest.flatten(node.input_tensors)):\n\n # Call layer (reapplying ops to new inputs).\n computed_tensors = nest.map_structure(\n lambda t: tensor_dict[str(id(t))].pop(), node.input_tensors)\n\n # Ensure `training` arg propagation if applicable.\n kwargs = copy.copy(node.arguments) if node.arguments else {}\n if convert_kwargs_to_constants:\n kwargs = _map_tensors_to_constants(kwargs)\n\n argspec = self._layer_call_argspecs[layer].args\n if 'training' in argspec:\n kwargs.setdefault('training', training)\n if (type(kwargs['training']) is ops.Tensor and # pylint: disable=unidiomatic-typecheck\n any([kwargs['training'] is x\n for x in backend._GRAPH_LEARNING_PHASES.values()])):\n kwargs['training'] = training # Materialize placeholder.\n\n # Map Keras tensors in kwargs to their computed value.\n def _map_tensor_if_from_keras_layer(t):\n if (isinstance(t,\n (ops.Tensor, composite_tensor.CompositeTensor)) and\n hasattr(t, '_keras_history')):\n t_id = str(id(t))\n return tensor_dict[t_id].pop()\n return t\n\n kwargs = nest.map_structure(_map_tensor_if_from_keras_layer, kwargs)\n\n # Compute outputs.\n output_tensors = layer(computed_tensors, **kwargs)\n\n # Update tensor_dict.\n for x, y in zip(\n nest.flatten(node.output_tensors), nest.flatten(output_tensors)):\n x_id = str(id(x))\n tensor_dict[x_id] = [y] * self._tensor_usage_count[x_id]\n\n output_tensors = []\n output_shapes = []\n for x in self.outputs:\n assert str(id(x)) in tensor_dict, 'Could not compute output ' + str(x)\n tensor = tensor_dict[str(id(x))].pop()\n output_shapes.append(x.shape)\n output_tensors.append(tensor)\n\n if output_shapes is not None:\n input_shapes = [x.shape for x in inputs]\n cache_key = tuple(tf_utils.convert_shapes(input_shapes, to_tuples=True))\n self._output_shape_cache[cache_key] = nest.pack_sequence_as(\n self._nested_outputs, output_shapes)\n\n output_tensors = nest.pack_sequence_as(self._nested_outputs, output_tensors)\n return output_tensors\n\n def get_config(self):\n if not self._is_graph_network:\n raise NotImplementedError\n return copy.deepcopy(get_network_config(self))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Instantiates a Model from its config (output of `get_config()`).\n\n Arguments:\n config: Model config dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n A model instance.\n\n Raises:\n ValueError: In case of improperly formatted config dict.\n \"\"\"\n input_tensors, output_tensors, created_layers = reconstruct_from_config(\n config, custom_objects)\n model = cls(inputs=input_tensors, outputs=output_tensors,\n name=config.get('name'))\n connect_ancillary_layers(model, created_layers)\n return model\n\n def save(self,\n filepath,\n overwrite=True,\n include_optimizer=True,\n save_format=None,\n signatures=None,\n options=None):\n \"\"\"Saves the model to Tensorflow SavedModel or a single HDF5 file.\n\n The savefile includes:\n - The model architecture, allowing to re-instantiate the model.\n - The model weights.\n - The state of the optimizer, allowing to resume training\n exactly where you left off.\n\n This allows you to save the entirety of the state of a model\n in a single file.\n\n Saved models can be reinstantiated via `keras.models.load_model`.\n The model returned by `load_model` is a compiled model ready to be used\n (unless the saved model was never compiled in the first place).\n\n Models built with the Sequential and Functional API can be saved to both the\n HDF5 and SavedModel formats. Subclassed models can only be saved with the\n SavedModel format.\n\n Note that the model weights may have different scoped names after being\n loaded. Scoped names include the model/layer names, such as\n \"dense_1/kernel:0\"`. It is recommended that you use the layer properties to\n access specific variables, e.g. `model.get_layer(\"dense_1\").kernel`.\n\n Arguments:\n filepath: String, path to SavedModel or H5 file to save the model.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n save_format: Either 'tf' or 'h5', indicating whether to save the model\n to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and\n 'h5' in TF 1.X.\n signatures: Signatures to save with the SavedModel. Applicable to the\n 'tf' format only. Please see the `signatures` argument in\n `tf.saved_model.save` for details.\n options: Optional `tf.saved_model.SaveOptions` object that specifies\n options for saving to SavedModel.\n\n Example:\n\n ```python\n from keras.models import load_model\n\n model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'\n del model # deletes the existing model\n\n # returns a compiled model\n # identical to the previous one\n model = load_model('my_model.h5')\n ```\n \"\"\"\n save.save_model(self, filepath, overwrite, include_optimizer, save_format,\n signatures, options)\n\n def save_weights(self, filepath, overwrite=True, save_format=None):\n \"\"\"Saves all layer weights.\n\n Either saves in HDF5 or in TensorFlow format based on the `save_format`\n argument.\n\n When saving in HDF5 format, the weight file has:\n - `layer_names` (attribute), a list of strings\n (ordered names of model layers).\n - For every layer, a `group` named `layer.name`\n - For every such layer group, a group attribute `weight_names`,\n a list of strings\n (ordered names of weights tensor of the layer).\n - For every weight in the layer, a dataset\n storing the weight value, named after the weight tensor.\n\n When saving in TensorFlow format, all objects referenced by the network are\n saved in the same format as `tf.train.Checkpoint`, including any `Layer`\n instances or `Optimizer` instances assigned to object attributes. For\n networks constructed from inputs and outputs using `tf.keras.Model(inputs,\n outputs)`, `Layer` instances used by the network are tracked/saved\n automatically. For user-defined classes which inherit from `tf.keras.Model`,\n `Layer` instances must be assigned to object attributes, typically in the\n constructor. See the documentation of `tf.train.Checkpoint` and\n `tf.keras.Model` for details.\n\n While the formats are the same, do not mix `save_weights` and\n `tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be\n loaded using `Model.load_weights`. Checkpoints saved using\n `tf.train.Checkpoint.save` should be restored using the corresponding\n `tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over\n `save_weights` for training checkpoints.\n\n The TensorFlow format matches objects and variables by starting at a root\n object, `self` for `save_weights`, and greedily matching attribute\n names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this\n is the `Checkpoint` even if the `Checkpoint` has a model attached. This\n means saving a `tf.keras.Model` using `save_weights` and loading into a\n `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match\n the `Model`'s variables. See the [guide to training\n checkpoints](https://www.tensorflow.org/guide/checkpoint) for details\n on the TensorFlow format.\n\n Arguments:\n filepath: String, path to the file to save the weights to. When saving\n in TensorFlow format, this is the prefix used for checkpoint files\n (multiple files are generated). Note that the '.h5' suffix causes\n weights to be saved in HDF5 format.\n overwrite: Whether to silently overwrite any existing file at the\n target location, or provide the user with a manual prompt.\n save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or\n '.keras' will default to HDF5 if `save_format` is `None`. Otherwise\n `None` defaults to 'tf'.\n\n Raises:\n ImportError: If h5py is not available when attempting to save in HDF5\n format.\n ValueError: For invalid/unknown format arguments.\n \"\"\"\n self._assert_weights_created()\n filepath_is_h5 = _is_hdf5_filepath(filepath)\n if save_format is None:\n if filepath_is_h5:\n save_format = 'h5'\n else:\n save_format = 'tf'\n else:\n user_format = save_format.lower().strip()\n if user_format in ('tensorflow', 'tf'):\n save_format = 'tf'\n elif user_format in ('hdf5', 'h5', 'keras'):\n save_format = 'h5'\n else:\n raise ValueError(\n 'Unknown format \"%s\". Was expecting one of {\"tf\", \"h5\"}.' % (\n save_format,))\n if save_format == 'tf' and filepath_is_h5:\n raise ValueError(\n ('save_weights got save_format=\"tf\"/\"tensorflow\", but the '\n 'filepath (\"%s\") looks like an HDF5 file. Omit the \".h5\"/\".keras\" '\n 'when saving in TensorFlow format.')\n % filepath)\n\n if save_format == 'h5' and h5py is None:\n raise ImportError(\n '`save_weights` requires h5py when saving in hdf5.')\n if save_format == 'tf':\n check_filepath = filepath + '.index'\n else:\n check_filepath = filepath\n # If file exists and should not be overwritten:\n if not overwrite and os.path.isfile(check_filepath):\n proceed = ask_to_proceed_with_overwrite(check_filepath)\n if not proceed:\n return\n if save_format == 'h5':\n with h5py.File(filepath, 'w') as f:\n hdf5_format.save_weights_to_hdf5_group(f, self.layers)\n else:\n if context.executing_eagerly():\n session = None\n else:\n session = backend.get_session()\n optimizer = getattr(self, 'optimizer', None)\n if (optimizer\n and not isinstance(optimizer, trackable.Trackable)):\n logging.warning(\n ('This model was compiled with a Keras optimizer (%s) but is being '\n 'saved in TensorFlow format with `save_weights`. The model\\'s '\n 'weights will be saved, but unlike with TensorFlow optimizers in '\n 'the TensorFlow format the optimizer\\'s state will not be '\n 'saved.\\n\\nConsider using a TensorFlow optimizer from `tf.train`.')\n % (optimizer,))\n self._trackable_saver.save(filepath, session=session)\n # Record this checkpoint so it's visible from tf.train.latest_checkpoint.\n checkpoint_management.update_checkpoint_state_internal(\n save_dir=os.path.dirname(filepath),\n model_checkpoint_path=filepath,\n save_relative_paths=True,\n all_model_checkpoint_paths=[filepath])\n\n def load_weights(self, filepath, by_name=False, skip_mismatch=False):\n \"\"\"Loads all layer weights, either from a TensorFlow or an HDF5 weight file.\n\n If `by_name` is False weights are loaded based on the network's\n topology. This means the architecture should be the same as when the weights\n were saved. Note that layers that don't have weights are not taken into\n account in the topological ordering, so adding or removing layers is fine as\n long as they don't have weights.\n\n If `by_name` is True, weights are loaded into layers only if they share the\n same name. This is useful for fine-tuning or transfer-learning models where\n some of the layers have changed.\n\n Only topological loading (`by_name=False`) is supported when loading weights\n from the TensorFlow format. Note that topological loading differs slightly\n between TensorFlow and HDF5 formats for user-defined classes inheriting from\n `tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the\n TensorFlow format loads based on the object-local names of attributes to\n which layers are assigned in the `Model`'s constructor.\n\n Arguments:\n filepath: String, path to the weights file to load. For weight files in\n TensorFlow format, this is the file prefix (the same as was passed\n to `save_weights`).\n by_name: Boolean, whether to load weights by name or by topological\n order. Only topological loading is supported for weight files in\n TensorFlow format.\n skip_mismatch: Boolean, whether to skip loading of layers where there is\n a mismatch in the number of weights, or a mismatch in the shape of\n the weight (only valid when `by_name=True`).\n\n Returns:\n When loading a weight file in TensorFlow format, returns the same status\n object as `tf.train.Checkpoint.restore`. When graph building, restore\n ops are run automatically as soon as the network is built (on first call\n for user-defined classes inheriting from `Model`, immediately if it is\n already built).\n\n When loading weights in HDF5 format, returns `None`.\n\n Raises:\n ImportError: If h5py is not available and the weight file is in HDF5\n format.\n ValueError: If `skip_mismatch` is set to `True` when `by_name` is\n `False`.\n \"\"\"\n\n if skip_mismatch and not by_name:\n raise ValueError(\n 'When calling model.load_weights, skip_mismatch can only be set to '\n 'True when by_name is True.')\n\n if _is_hdf5_filepath(filepath):\n save_format = 'h5'\n else:\n try:\n py_checkpoint_reader.NewCheckpointReader(filepath)\n save_format = 'tf'\n except errors_impl.DataLossError:\n # The checkpoint is not readable in TensorFlow format. Try HDF5.\n save_format = 'h5'\n if save_format == 'tf':\n status = self._trackable_saver.restore(filepath)\n if by_name:\n raise NotImplementedError(\n 'Weights may only be loaded based on topology into Models when '\n 'loading TensorFlow-formatted weights (got by_name=True to '\n 'load_weights).')\n if not context.executing_eagerly():\n session = backend.get_session()\n # Restore existing variables (if any) immediately, and set up a\n # streaming restore for any variables created in the future.\n trackable_utils.streaming_restore(status=status, session=session)\n status.assert_nontrivial_match()\n return status\n if h5py is None:\n raise ImportError(\n '`load_weights` requires h5py when loading weights from HDF5.')\n if self._is_graph_network and not self.built:\n raise NotImplementedError(\n 'Unable to load weights saved in HDF5 format into a subclassed '\n 'Model which has not created its variables yet. Call the Model '\n 'first, then load the weights.')\n self._assert_weights_created()\n with h5py.File(filepath, 'r') as f:\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n if by_name:\n hdf5_format.load_weights_from_hdf5_group_by_name(\n f, self.layers, skip_mismatch=skip_mismatch)\n else:\n hdf5_format.load_weights_from_hdf5_group(f, self.layers)\n\n def _updated_config(self):\n \"\"\"Util shared between different serialization methods.\n\n Returns:\n Model config with Keras version information added.\n \"\"\"\n from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top\n\n config = self.get_config()\n model_config = {\n 'class_name': self.__class__.__name__,\n 'config': config,\n 'keras_version': keras_version,\n 'backend': backend.backend()\n }\n return model_config\n\n def to_json(self, **kwargs):\n \"\"\"Returns a JSON string containing the network configuration.\n\n To load a network from a JSON save file, use\n `keras.models.model_from_json(json_string, custom_objects={})`.\n\n Arguments:\n **kwargs: Additional keyword arguments\n to be passed to `json.dumps()`.\n\n Returns:\n A JSON string.\n \"\"\"\n model_config = self._updated_config()\n return json.dumps(\n model_config, default=serialization.get_json_type, **kwargs)\n\n def to_yaml(self, **kwargs):\n \"\"\"Returns a yaml string containing the network configuration.\n\n To load a network from a yaml save file, use\n `keras.models.model_from_yaml(yaml_string, custom_objects={})`.\n\n `custom_objects` should be a dictionary mapping\n the names of custom losses / layers / etc to the corresponding\n functions / classes.\n\n Arguments:\n **kwargs: Additional keyword arguments\n to be passed to `yaml.dump()`.\n\n Returns:\n A YAML string.\n\n Raises:\n ImportError: if yaml module is not found.\n \"\"\"\n if yaml is None:\n raise ImportError(\n 'Requires yaml module installed (`pip install pyyaml`).')\n return yaml.dump(self._updated_config(), **kwargs)\n\n def summary(self, line_length=None, positions=None, print_fn=None):\n \"\"\"Prints a string summary of the network.\n\n Arguments:\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements\n in each line. If not provided,\n defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use. Defaults to `print`.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n\n Raises:\n ValueError: if `summary()` is called before the model is built.\n \"\"\"\n if not self.built:\n raise ValueError('This model has not yet been built. '\n 'Build the model first by calling `build()` or calling '\n '`fit()` with some data, or specify '\n 'an `input_shape` argument in the first layer(s) for '\n 'automatic build.')\n layer_utils.print_summary(self,\n line_length=line_length,\n positions=positions,\n print_fn=print_fn)\n\n def _validate_graph_inputs_and_outputs(self):\n \"\"\"Validates the inputs and outputs of a Graph Network.\"\"\"\n # Check for redundancy in inputs.\n if len({id(i) for i in self.inputs}) != len(self.inputs):\n raise ValueError('The list of inputs passed to the model '\n 'is redundant. '\n 'All inputs should only appear once.'\n ' Found: ' + str(self.inputs))\n\n for x in self.inputs:\n # Check that x has appropriate `_keras_history` metadata.\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise ValueError('Input tensors to a ' + cls_name + ' ' +\n 'must come from `tf.keras.Input`. '\n 'Received: ' + str(x) +\n ' (missing previous layer metadata).')\n # Check that x is an input tensor.\n # pylint: disable=protected-access\n layer = x._keras_history.layer\n if len(layer._inbound_nodes) > 1 or (\n layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):\n cls_name = self.__class__.__name__\n logging.warning(cls_name + ' inputs must come from '\n '`tf.keras.Input` (thus holding past layer metadata), '\n 'they cannot be the output of '\n 'a previous non-Input layer. '\n 'Here, a tensor specified as '\n 'input to \"' + self.name + '\" was not an Input tensor, '\n 'it was generated by layer ' + layer.name + '.\\n'\n 'Note that input tensors are '\n 'instantiated via `tensor = tf.keras.Input(shape)`.\\n'\n 'The tensor that caused the issue was: ' + str(x.name))\n if isinstance(x, ragged_tensor.RaggedTensor):\n self._supports_ragged_inputs = True\n\n # Check compatibility of batch sizes of Input Layers.\n input_batch_sizes = [\n training_utils.get_static_batch_size(x._keras_history.layer)\n for x in self.inputs\n ]\n consistent_batch_size = None\n for batch_size in input_batch_sizes:\n if batch_size is not None:\n if (consistent_batch_size is not None and\n batch_size != consistent_batch_size):\n raise ValueError('The specified batch sizes of the Input Layers'\n ' are incompatible. Found batch sizes: {}'.format(\n input_batch_sizes))\n consistent_batch_size = batch_size\n\n for x in self.outputs:\n if not hasattr(x, '_keras_history'):\n cls_name = self.__class__.__name__\n raise ValueError('Output tensors to a ' + cls_name + ' must be '\n 'the output of a TensorFlow `Layer` '\n '(thus holding past layer metadata). Found: ' + str(x))\n\n def _insert_layers(self, layers, relevant_nodes=None):\n \"\"\"Inserts Layers into the Network after Network creation.\n\n This is only valid for Keras Graph Networks. Layers added via this function\n will be included in the `call` computation and `get_config` of this Network.\n They will not be added to the Network's outputs.\n\n\n Arguments:\n layers: Arbitrary nested structure of Layers. Layers must be reachable\n from one or more of the `keras.Input` Tensors that correspond to this\n Network's inputs.\n relevant_nodes: Nodes from the Layers that should be considered part of\n this Network. If `None`, all Nodes will be considered part of this\n Network.\n\n Raises:\n ValueError: If the layers depend on `Input`s not found in this Model.\n \"\"\"\n layers = nest.flatten(layers)\n tf_utils.assert_no_legacy_layers(layers)\n node_to_depth = {}\n for depth, nodes in self._nodes_by_depth.items():\n node_to_depth.update({node: depth for node in nodes})\n # The nodes of these Layers that are relevant to this Network. If not\n # provided, assume all Nodes are relevant\n if not relevant_nodes:\n relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])\n network_nodes = set(relevant_nodes + list(node_to_depth.keys()))\n\n def _get_min_depth(node):\n \"\"\"Gets the minimum depth at which node can be computed.\"\"\"\n min_depth = 0\n for layer, node_id, _, _ in node.iterate_inbound(include_arguments=True):\n inbound_node = layer._inbound_nodes[node_id]\n if inbound_node in node_to_depth:\n min_depth = min(min_depth, node_to_depth[inbound_node])\n elif inbound_node not in network_nodes:\n continue\n else:\n # Previous relevant nodes haven't been processed yet.\n return None\n # New node is one shallower than its shallowest input.\n return min_depth - 1\n\n # Insert nodes into `_nodes_by_depth` and other node attrs.\n unprocessed_nodes = copy.copy(relevant_nodes)\n i = 0\n while unprocessed_nodes:\n i += 1\n # Do a sanity check. This can occur if `Input`s from outside this Model\n # are being relied on.\n if i > 10000:\n raise ValueError('Layers could not be added due to missing '\n 'dependencies.')\n\n node = unprocessed_nodes.pop(0)\n depth = _get_min_depth(node)\n if depth is None: # Defer until inbound nodes are processed.\n unprocessed_nodes.append(node)\n continue\n node_key = _make_node_key(node.outbound_layer.name,\n node.outbound_layer._inbound_nodes.index(node))\n if node_key not in self._network_nodes:\n node_to_depth[node] = depth\n self._network_nodes.add(node_key)\n self._nodes_by_depth[depth].append(node)\n\n # Insert layers and update other layer attrs.\n layer_set = set(self._layers)\n deferred_layers = []\n for layer in layers:\n if layer not in layer_set:\n self._layers.append(layer)\n deferred_layers.append(layer)\n self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n\n # This allows the added layer to broadcast mutations to the current\n # layer, which is necessary to ensure cache correctness.\n layer._attribute_sentinel.add_parent(self._attribute_sentinel)\n layer_set.add(layer)\n self._handle_deferred_layer_dependencies(deferred_layers)\n\n self._compute_tensor_usage_count()\n\n def _compute_tensor_usage_count(self):\n \"\"\"Compute the #. of tensor usages for all the output tensors of layers.\n\n The computed tensor usage count is saved as `self._tensor_usage_count`. This\n is later used for saving memory in eager computation by releasing\n no-longer-needed tensors as early as possible.\n \"\"\"\n tensor_usage_count = collections.Counter()\n available_tensors = set(str(id(tensor)) for tensor in self.inputs)\n\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n depth_keys = depth_keys[1:]\n\n for depth in depth_keys:\n for node in self._nodes_by_depth[depth]:\n input_tensors = {\n str(id(tensor)) for tensor in nest.flatten(node.input_tensors)\n }\n if input_tensors.issubset(available_tensors):\n kwargs = copy.copy(node.arguments) if node.arguments else {}\n\n for tensor in nest.flatten(kwargs):\n if (isinstance(tensor,\n (ops.Tensor, composite_tensor.CompositeTensor)) and\n hasattr(tensor, '_keras_history')):\n tensor_usage_count[str(id(tensor))] += 1\n\n for tensor in nest.flatten(node.input_tensors):\n tensor_usage_count[str(id(tensor))] += 1\n\n for output_tensor in nest.flatten(node.output_tensors):\n available_tensors.add(str(id(output_tensor)))\n\n for tensor in self.outputs:\n tensor_usage_count[str(id(tensor))] += 1\n\n self._tensor_usage_count = tensor_usage_count\n\n def _assert_weights_created(self):\n \"\"\"Asserts that all the weights for the network have been created.\n\n For a non-dynamic network, the weights must already be created after the\n layer has been called. For a dynamic network, the exact list of weights can\n never be known for certain since it may change at any time during execution.\n\n We run this check right before accessing weights or getting the Numpy value\n for the current weights. Otherwise, if the layer has never been called,\n the user would just get an empty list, which is misleading.\n\n Raises:\n ValueError: if the weights of the network has not yet been created.\n \"\"\"\n if self.dynamic:\n return\n if (not self._is_graph_network and\n 'build' in self.__class__.__dict__ and\n not self.built):\n # For any model that has customized build() method but hasn't\n # been invoked yet, this will cover both sequential and subclass model.\n raise ValueError('Weights for model %s have not yet been created. '\n 'Weights are created when the Model is first called on '\n 'inputs or `build()` is called with an `input_shape`.' %\n self.name)\n\n def _graph_network_add_loss(self, symbolic_loss):\n new_nodes, new_layers = _map_subgraph_network(self.inputs, [symbolic_loss])\n # Losses must be keyed on inputs no matter what in order to be supported in\n # DistributionStrategy.\n add_loss_layer = base_layer.AddLoss(\n unconditional=False, dtype=symbolic_loss.dtype)\n add_loss_layer(symbolic_loss)\n new_nodes.extend(add_loss_layer.inbound_nodes)\n new_layers.append(add_loss_layer)\n self._insert_layers(new_layers, new_nodes)\n\n def _graph_network_add_metric(self, value, aggregation, name):\n new_nodes, new_layers = _map_subgraph_network(self.inputs, [value])\n add_metric_layer = base_layer.AddMetric(\n aggregation, name, dtype=value.dtype)\n add_metric_layer(value)\n new_nodes.extend(add_metric_layer.inbound_nodes)\n new_layers.append(add_metric_layer)\n self._insert_layers(new_layers, new_nodes)\n\n @trackable.no_automatic_dependency_tracking\n def _set_save_spec(self, inputs):\n if self._saved_model_inputs_spec is not None:\n return # Already set.\n\n input_names = self.input_names\n if not input_names:\n input_names = compile_utils.create_pseudo_input_names(inputs)\n\n flat_inputs = nest.flatten(inputs)\n specs = []\n for name, tensor in zip(input_names, flat_inputs):\n specs.append(\n tf_utils.get_tensor_spec(tensor, dynamic_batch=False, name=name))\n specs = nest.pack_sequence_as(inputs, specs)\n\n self._saved_model_inputs_spec = specs\n\n def _get_save_spec(self, dynamic_batch=True):\n if self._saved_model_inputs_spec is None:\n return None\n\n return nest.map_structure(\n lambda t: tf_utils.get_tensor_spec(t, dynamic_batch=dynamic_batch),\n self._saved_model_inputs_spec)\n\n @property\n def _trackable_saved_model_saver(self):\n return network_serialization.NetworkSavedModelSaver(self)\n\n\ndef _is_hdf5_filepath(filepath):\n return (filepath.endswith('.h5') or filepath.endswith('.keras') or\n filepath.endswith('.hdf5'))\n\n\ndef _make_node_key(layer_name, node_index):\n return layer_name + '_ib-' + str(node_index)\n\n\ndef _map_graph_network(inputs, outputs):\n \"\"\"Validates a network's topology and gather its layers and nodes.\n\n Arguments:\n inputs: List of input tensors.\n outputs: List of outputs tensors.\n\n Returns:\n A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.\n - nodes: list of Node instances.\n - nodes_by_depth: dict mapping ints (depth) to lists of node instances.\n - layers: list of Layer instances.\n - layers_by_depth: dict mapping ints (depth) to lists of layer instances.\n\n Raises:\n ValueError: In case the network is not valid (e.g. disconnected graph).\n \"\"\"\n # Network_nodes: set of nodes included in the graph of layers\n # (not all nodes included in the layers are relevant to the current graph).\n network_nodes = set() # ids of all nodes relevant to the Network\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n layer_indices = {} # dict {layer: index in traversal}\n nodes_in_decreasing_depth = []\n\n def build_map(tensor,\n finished_nodes,\n nodes_in_progress,\n layer,\n node_index,\n tensor_index):\n \"\"\"Builds a map of the graph of layers.\n\n This recursively updates the map `layer_indices`,\n the list `nodes_in_decreasing_depth` and the set `network_nodes`.\n\n Arguments:\n tensor: Some tensor in a graph.\n finished_nodes: Set of nodes whose subgraphs have been traversed\n completely. Useful to prevent duplicated work.\n nodes_in_progress: Set of nodes that are currently active on the\n recursion stack. Useful to detect cycles.\n layer: Layer from which `tensor` comes from. If not provided,\n will be obtained from `tensor._keras_history`.\n node_index: Node index from which `tensor` comes from.\n tensor_index: Tensor_index from which `tensor` comes from.\n\n Raises:\n ValueError: if a cycle is detected.\n \"\"\"\n node = layer._inbound_nodes[node_index] # pylint: disable=protected-access\n\n # Prevent cycles.\n if node in nodes_in_progress:\n raise ValueError('The tensor ' + str(tensor) + ' at layer \"' +\n layer.name + '\" is part of a cycle.')\n\n # Don't repeat work for shared subgraphs\n if node in finished_nodes:\n return\n\n node_key = _make_node_key(layer.name, node_index)\n # Update network_nodes.\n network_nodes.add(node_key)\n\n # Store the traversal order for layer sorting.\n if layer not in layer_indices:\n layer_indices[layer] = len(layer_indices)\n\n nodes_in_progress.add(node)\n\n # Propagate to all previous tensors connected to this node.\n for layer, node_index, tensor_index, tensor in node.iterate_inbound(\n include_arguments=True):\n build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index,\n tensor_index)\n\n finished_nodes.add(node)\n nodes_in_progress.remove(node)\n nodes_in_decreasing_depth.append(node)\n\n finished_nodes = set()\n nodes_in_progress = set()\n for x in outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n build_map(x, finished_nodes, nodes_in_progress,\n layer=layer,\n node_index=node_index,\n tensor_index=tensor_index)\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.outbound_layer, 0)\n # If we've seen this layer before at a higher depth,\n # we should use that depth instead of the node depth.\n # This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.outbound_layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n # The \"depth\" of a node is the max of the depths\n # of all nodes it is connected to + 1.\n for node_dep in node._get_all_node_dependencies():\n previous_depth = nodes_depths.get(node_dep, 0)\n nodes_depths[node_dep] = max(depth + 1, previous_depth)\n\n # Handle inputs that are not connected to outputs.\n # We do not error out here because the inputs may be used to compute losses\n # and metrics.\n for input_t in inputs:\n input_layer = input_t._keras_history[0]\n if input_layer not in layers_depths:\n layers_depths[input_layer] = 0\n layer_indices[input_layer] = -1\n nodes_depths[input_layer._inbound_nodes[0]] = 0\n network_nodes.add(_make_node_key(input_layer.name, 0))\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = collections.defaultdict(list)\n for node, depth in nodes_depths.items():\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = collections.defaultdict(list)\n for layer, depth in layers_depths.items():\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers ordered by depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Network.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n layers.extend(layers_for_depth)\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = set()\n for x in inputs:\n computable_tensors.add(id(x))\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.outbound_layer\n if layer:\n for x in nest.flatten(node.input_tensors):\n if id(x) not in computable_tensors:\n raise ValueError('Graph disconnected: '\n 'cannot obtain value for tensor ' + str(x) +\n ' at layer \"' + layer.name + '\". '\n 'The following previous layers '\n 'were accessed without issue: ' +\n str(layers_with_complete_input))\n for x in nest.flatten(node.output_tensors):\n computable_tensors.add(id(x))\n layers_with_complete_input.append(layer.name)\n\n # Ensure name unicity, which will be crucial for serialization\n # (since serialized nodes refer to layers by their name).\n all_names = [layer.name for layer in layers]\n for name in all_names:\n if all_names.count(name) != 1:\n raise ValueError('The name \"' + name + '\" is used ' +\n str(all_names.count(name)) + ' times in the model. '\n 'All layer names should be unique.')\n return network_nodes, nodes_by_depth, layers, layers_by_depth\n\n\ndef _map_subgraph_network(inputs, outputs):\n \"\"\"Returns the nodes and layers in the topology from `inputs` to `outputs`.\n\n Args:\n inputs: List of input tensors.\n outputs: List of output tensors.\n\n Returns:\n A tuple of List{Node] and List[Layer].\n \"\"\"\n base_layer_utils.create_keras_history(outputs)\n # Keep only nodes and layers in the topology between inputs and outputs.\n _, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs)\n return nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers\n\n\ndef _should_skip_first_node(layer):\n \"\"\"Returns True if the first layer node should not be saved or loaded.\"\"\"\n # Networks start with a pre-existing node linking their input to output.\n return issubclass(layer.__class__, Network) and layer._is_graph_network\n\n\ndef _serialize_tensors(kwargs):\n \"\"\"Serializes Tensors passed to `call`.\"\"\"\n\n def _serialize_keras_tensor(t):\n \"\"\"Serializes a single Tensor passed to `call`.\"\"\"\n if hasattr(t, '_keras_history'):\n kh = t._keras_history\n return [kh.layer.name, kh.node_index, kh.tensor_index]\n\n if isinstance(t, np.ndarray):\n return t.tolist()\n\n if isinstance(t, ops.Tensor):\n return backend.get_value(t).tolist()\n\n return t\n\n return nest.map_structure(_serialize_keras_tensor, kwargs)\n\n\ndef _map_tensors_to_constants(kwargs):\n\n def _map_to_constants(t):\n if not hasattr(t, '_keras_history') and isinstance(t, ops.Tensor):\n return constant_op.constant(backend.get_value(t))\n return t\n\n return nest.map_structure(_map_to_constants, kwargs)\n\n\ndef _deserialize_keras_tensors(kwargs, layer_map):\n \"\"\"Deserializes Keras Tensors passed to `call`..\"\"\"\n\n def _deserialize_keras_tensor(t):\n \"\"\"Deserializes a single Keras Tensor passed to `call`.\"\"\"\n if isinstance(t, tf_utils.ListWrapper):\n t = t.as_list()\n layer_name = t[0]\n node_index = t[1]\n tensor_index = t[2]\n\n layer = layer_map[layer_name]\n node = layer._inbound_nodes[node_index]\n return nest.flatten(node.output_tensors)[tensor_index]\n return t\n\n kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True)\n return nest.map_structure(_deserialize_keras_tensor, kwargs)\n\n\ndef connect_ancillary_layers(model, created_layers):\n \"\"\"Adds layers that are not connected to the outputs to the model.\"\"\"\n # Layers not connected to outputs, such as those added in `add_loss`.\n ancillary_layers = [\n layer for layer in created_layers.values() if layer not in model.layers\n ]\n if ancillary_layers:\n relevant_nodes = nest.flatten([\n layer.inbound_nodes[1:]\n if _should_skip_first_node(layer) else layer.inbound_nodes\n for layer in created_layers.values()\n ])\n model._insert_layers(ancillary_layers, relevant_nodes)\n return model\n\n\ndef reconstruct_from_config(config, custom_objects=None, created_layers=None):\n \"\"\"Reconstructs graph from config object.\n\n Args:\n config: Dictionary returned from Network.get_config()\n custom_objects: Optional dictionary mapping names (strings) to custom\n classes or functions to be considered during deserialization.\n created_layers: Optional dictionary mapping names to Layer objects. Any\n layer not in this dictionary will be be created and added to the dict.\n This function will add new nodes to all layers (excluding InputLayers),\n instead of re-using pre-existing nodes in the layers.\n\n Returns:\n Tuple of (input tensors, output tensors, dictionary of created layers)\n \"\"\"\n # Layer instances created during the graph reconstruction process.\n created_layers = created_layers or collections.OrderedDict()\n\n # Maps input data (tuple of inbound layer name, node index) from the config\n # to node indices in the newly generated model. The node indices may be\n # different if the layers have already been called previously.\n node_index_map = {}\n node_count_by_layer = {}\n\n # Dictionary mapping layer instances to\n # node data that specifies a layer call.\n # It acts as a queue that maintains any unprocessed\n # layer call until it becomes possible to process it\n # (i.e. until the input tensors to the call all exist).\n unprocessed_nodes = {}\n\n def add_unprocessed_node(layer, node_data):\n if layer not in unprocessed_nodes:\n unprocessed_nodes[layer] = [node_data]\n else:\n unprocessed_nodes[layer].append(node_data)\n\n def get_node_index(layer, config_node_index):\n \"\"\"Returns node index in layer (might differ from config_node_index).\"\"\"\n if isinstance(layer, input_layer_module.InputLayer):\n return 0\n return node_index_map.get((layer.name, config_node_index), None)\n\n def process_node(layer, node_data):\n \"\"\"Deserialize a node.\n\n Arguments:\n layer: layer instance.\n node_data: Nested structure of `ListWrapper`.\n\n Raises:\n ValueError: In case of improperly formatted `node_data`.\n \"\"\"\n input_tensors = []\n for input_data in nest.flatten(node_data):\n input_data = input_data.as_list()\n inbound_layer_name = input_data[0]\n inbound_node_index = input_data[1]\n inbound_tensor_index = input_data[2]\n if len(input_data) == 3:\n kwargs = {}\n elif len(input_data) == 4:\n kwargs = input_data[3]\n kwargs = _deserialize_keras_tensors(kwargs, created_layers)\n else:\n raise ValueError('Improperly formatted model config.')\n\n inbound_layer = created_layers[inbound_layer_name]\n inbound_node_index = get_node_index(inbound_layer, inbound_node_index)\n\n if inbound_node_index is None:\n add_unprocessed_node(layer, node_data)\n return\n inbound_node = inbound_layer._inbound_nodes[inbound_node_index]\n input_tensors.append(\n nest.flatten(inbound_node.output_tensors)[inbound_tensor_index])\n input_tensors = nest.pack_sequence_as(node_data, input_tensors)\n # Call layer on its inputs, thus creating the node\n # and building the layer if needed.\n if input_tensors is not None:\n input_tensors = base_layer_utils.unnest_if_single_tensor(input_tensors)\n output_tensors = layer(input_tensors, **kwargs)\n\n # Update node index map.\n output_index = nest.flatten(output_tensors)[0]._keras_history.node_index\n node_index_map[(layer.name, node_count_by_layer[layer])] = output_index\n node_count_by_layer[layer] += 1\n\n def process_layer(layer_data):\n \"\"\"Deserializes a layer, then call it on appropriate inputs.\n\n Arguments:\n layer_data: layer config dict.\n\n Raises:\n ValueError: In case of improperly formatted `layer_data` dict.\n \"\"\"\n layer_name = layer_data['name']\n\n if layer_name in created_layers:\n layer = created_layers[layer_name]\n else:\n # Instantiate layer.\n from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n\n layer = deserialize_layer(layer_data, custom_objects=custom_objects)\n created_layers[layer_name] = layer\n\n node_count_by_layer[layer] = int(_should_skip_first_node(layer))\n\n # Gather layer inputs and convert to `ListWrapper` objects.\n inbound_nodes_data = layer_data['inbound_nodes']\n inbound_nodes_data = tf_utils.convert_inner_node_data(\n inbound_nodes_data, wrap=True)\n for node_data in inbound_nodes_data:\n # We don't process nodes (i.e. make layer calls)\n # on the fly because the inbound node may not yet exist,\n # in case of layer shared at different topological depths\n # (e.g. a model such as A(B(A(B(x)))))\n add_unprocessed_node(layer, node_data)\n\n # First, we create all layers and enqueue nodes to be processed\n for layer_data in config['layers']:\n process_layer(layer_data)\n # Then we process nodes in order of layer depth.\n # Nodes that cannot yet be processed (if the inbound node\n # does not yet exist) are re-enqueued, and the process\n # is repeated until all nodes are processed.\n while unprocessed_nodes:\n for layer_data in config['layers']:\n layer = created_layers[layer_data['name']]\n if layer in unprocessed_nodes:\n for node_data in unprocessed_nodes.pop(layer):\n process_node(layer, node_data)\n\n input_tensors = []\n output_tensors = []\n\n input_layers = tf_utils.convert_inner_node_data(\n config['input_layers'], wrap=True)\n for layer_data in nest.flatten(input_layers):\n layer_name, node_index, tensor_index = layer_data.as_list()\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n node_index = get_node_index(layer, node_index)\n layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])\n\n output_layers = tf_utils.convert_inner_node_data(\n config['output_layers'], wrap=True)\n for layer_data in nest.flatten(output_layers):\n layer_name, node_index, tensor_index = layer_data.as_list()\n assert layer_name in created_layers\n layer = created_layers[layer_name]\n node_index = get_node_index(layer, node_index)\n layer_output_tensors = layer._inbound_nodes[node_index].output_tensors\n output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])\n\n input_tensors = nest.pack_sequence_as(input_layers, input_tensors)\n output_tensors = nest.pack_sequence_as(output_layers, output_tensors)\n return input_tensors, output_tensors, created_layers\n\n\ndef get_network_config(network, serialize_layer_fn=None):\n \"\"\"Builds the config, which consists of the node graph and serialized layers.\n\n Args:\n network: A Network object.\n serialize_layer_fn: Function used to serialize layers.\n\n Returns:\n Config dictionary.\n \"\"\"\n serialize_layer_fn = (\n serialize_layer_fn or generic_utils.serialize_keras_object)\n config = {\n 'name': network.name,\n }\n node_conversion_map = {}\n for layer in network.layers:\n kept_nodes = 1 if _should_skip_first_node(layer) else 0\n for original_node_index, node in enumerate(layer._inbound_nodes):\n node_key = _make_node_key(layer.name, original_node_index)\n if node_key in network._network_nodes:\n node_conversion_map[node_key] = kept_nodes\n kept_nodes += 1\n layer_configs = []\n for layer in network.layers: # From the earliest layers on.\n filtered_inbound_nodes = []\n for original_node_index, node in enumerate(layer._inbound_nodes):\n node_key = _make_node_key(layer.name, original_node_index)\n if node_key in network._network_nodes:\n # The node is relevant to the model:\n # add to filtered_inbound_nodes.\n if node.arguments:\n kwargs = _serialize_tensors(node.arguments)\n try:\n json.dumps(kwargs)\n except TypeError:\n logging.warning(\n 'Layer ' + layer.name +\n ' was passed non-serializable keyword arguments: ' +\n str(node.arguments) + '. They will not be included '\n 'in the serialized model (and thus will be missing '\n 'at deserialization time).')\n kwargs = {}\n else:\n kwargs = {}\n if node.inbound_layers:\n node_data = []\n for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():\n node_key = _make_node_key(inbound_layer.name, node_id)\n new_node_index = node_conversion_map.get(node_key, 0)\n node_data.append(\n tf_utils.ListWrapper(\n [inbound_layer.name, new_node_index, tensor_id, kwargs]))\n node_data = nest.pack_sequence_as(node.input_tensors, node_data)\n if not nest.is_sequence(node_data):\n node_data = [node_data]\n # Convert ListWrapper to list for backwards compatible configs.\n node_data = tf_utils.convert_inner_node_data(node_data)\n filtered_inbound_nodes.append(node_data)\n\n layer_config = serialize_layer_fn(layer)\n layer_config['name'] = layer.name\n layer_config['inbound_nodes'] = filtered_inbound_nodes\n layer_configs.append(layer_config)\n config['layers'] = layer_configs\n\n # Gather info about inputs and outputs.\n model_inputs = []\n for i in range(len(network._input_layers)):\n layer, node_index, tensor_index = network._input_coordinates[i]\n node_key = _make_node_key(layer.name, node_index)\n if node_key not in network._network_nodes:\n continue\n new_node_index = node_conversion_map[node_key]\n model_inputs.append(\n tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))\n model_inputs = nest.pack_sequence_as(network._nested_inputs, model_inputs)\n # Preserve external Keras compat for Models with single input.\n if not nest.is_sequence(model_inputs):\n model_inputs = [model_inputs]\n model_inputs = tf_utils.convert_inner_node_data(model_inputs)\n config['input_layers'] = model_inputs\n\n model_outputs = []\n for i in range(len(network._output_layers)):\n layer, node_index, tensor_index = network._output_coordinates[i]\n node_key = _make_node_key(layer.name, node_index)\n if node_key not in network._network_nodes:\n continue\n new_node_index = node_conversion_map[node_key]\n model_outputs.append(\n tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))\n model_outputs = nest.pack_sequence_as(network._nested_outputs, model_outputs)\n # Preserve external Keras compat for Models with single output.\n if not nest.is_sequence(model_outputs):\n model_outputs = [model_outputs]\n model_outputs = tf_utils.convert_inner_node_data(model_outputs)\n config['output_layers'] = model_outputs\n return config\n"
] | [
[
"tensorflow.python.util.function_utils.get_func_code",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrBool",
"tensorflow.python.eager.tape.stop_recording",
"tensorflow.python.eager.context.context",
"tensorflow.python.util.tf_export.kwarg_only",
"tensorflow.python.client.pywrap_tf_session.TF_OperationOpType",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.client.pywrap_tf_session.ClearAttr",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrInt",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.client.pywrap_tf_session.TF_Input",
"tensorflow.python.util.tf_stack.extract_stack",
"tensorflow.python.client.pywrap_tf_session.SetRequireShapeInferenceFns",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.client.pywrap_tf_session.TF_AddControlInput",
"tensorflow.python.client.pywrap_tf_session.GetOperationInputs",
"tensorflow.core.framework.attr_value_pb2.NameAttrList",
"tensorflow.python.client.pywrap_tf_session.TF_OperationDevice",
"tensorflow.python.client.pywrap_tf_session.TF_FinishOperation",
"tensorflow.core.framework.function_pb2.GradientDef",
"tensorflow.python.framework.tensor_conversion_registry.register_tensor_conversion_function",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrValueProto",
"tensorflow.python.client.pywrap_tf_session.SetRequestedDevice",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetControlInputs_wrapper",
"tensorflow.python.framework.c_api_util.tf_output",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrType",
"tensorflow.python.eager.context.device",
"tensorflow.python.framework.device.is_device_spec",
"tensorflow.python.client.pywrap_tf_session.RemoveAllControlInputs",
"tensorflow.python.client.pywrap_tf_session.TF_GraphCopyFunction",
"tensorflow.python.client.pywrap_tf_session.TF_GraphToGraphDef",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.framework.c_api_util.tf_buffer",
"tensorflow.python.framework.registry.Registry",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.framework.c_api_util.new_tf_operations",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.client.pywrap_tf_session.TF_DeleteBuffer",
"tensorflow.core.framework.versions_pb2.VersionDef",
"tensorflow.python.util.lock_util.GroupLock",
"tensorflow.python.framework.c_api_util.ScopedTFGraph",
"tensorflow.python.platform.app.run",
"tensorflow.python.client.pywrap_tf_session.SetAttr",
"tensorflow.python.client.pywrap_tf_session.TF_OperationNumOutputs",
"tensorflow.python.eager.tape.record_operation",
"tensorflow.python.client.pywrap_tf_session.AddControlInput",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetControlOutputs_wrapper",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.pywrap_tfe.TFE_Py_InitEagerTensor",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.core.framework.op_def_pb2.OpDef",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.util.compat.as_str",
"tensorflow.python.client.pywrap_tf_session.TF_GraphVersions",
"tensorflow.python.client.pywrap_tf_session.TF_OperationOutputType",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.pywrap_tfe.TFE_Py_UID",
"tensorflow.python.util.object_identity.ObjectIdentitySet",
"tensorflow.core.framework.node_def_pb2.NodeDef",
"tensorflow.python.tf2.enabled",
"tensorflow.python.util.memory.dismantle_ordered_dict",
"tensorflow.python.util.deprecation.deprecated_argument_lookup",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.client.pywrap_tf_session.TF_Output",
"tensorflow.python.eager.context.Context",
"tensorflow.python.eager.context.context_safe",
"tensorflow.python.framework.traceable_stack.TraceableStack",
"tensorflow.python.util.function_utils.get_func_name",
"tensorflow.python.framework.device.merge_device",
"tensorflow.python.eager.monitoring.BoolGauge",
"tensorflow.python.client.pywrap_tf_session.TF_OperationNumInputs",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.util.object_identity.Reference",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.client.pywrap_tf_session.TF_OperationToNodeDef",
"tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue",
"tensorflow.python.client.pywrap_tf_session.TF_OperationName",
"tensorflow.python.ops.control_flow_util.CheckInputFromValidContext",
"tensorflow.python.client.pywrap_tf_session.TF_GetBuffer",
"tensorflow.core.framework.graph_pb2.GraphDef"
],
[
"tensorflow.python.training.tracking.layer_utils.filter_empty_layer_containers",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.training.tracking.util.saver_with_op_caching",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group_by_name",
"tensorflow.python.training.py_checkpoint_reader.NewCheckpointReader",
"tensorflow.python.training.tracking.layer_utils.has_weights",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.engine.base_layer_utils.generate_placeholders_from_shape",
"tensorflow.python.keras.utils.layer_utils.print_summary",
"tensorflow.python.keras.layers.deserialize",
"tensorflow.python.keras.utils.tf_utils.assert_no_legacy_layers",
"tensorflow.python.training.tracking.layer_utils.gather_trainable_weights",
"tensorflow.python.keras.utils.tf_utils.convert_shapes",
"tensorflow.python.keras.backend._GRAPH_LEARNING_PHASES.values",
"tensorflow.python.training.tracking.layer_utils.gather_non_trainable_weights",
"tensorflow.python.training.tracking.base.TrackableReference",
"tensorflow.python.keras.backend.backend",
"tensorflow.python.keras.engine.base_layer.AddMetric",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.keras.saving.save.save_model",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.keras.engine.base_layer_utils.unnest_if_single_tensor",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.saving.saved_model.network_serialization.NetworkSavedModelSaver",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group",
"tensorflow.python.keras.saving.hdf5_format.save_weights_to_hdf5_group",
"tensorflow.python.keras.engine.base_layer_utils.call_context",
"tensorflow.python.training.tracking.layer_utils.cache_recursive_attribute",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.utils.tf_utils.convert_inner_node_data",
"tensorflow.python.keras.engine.compile_utils.create_pseudo_input_names",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.keras.utils.tf_utils.get_tensor_spec",
"tensorflow.python.keras.engine.base_layer.AddLoss",
"tensorflow.python.keras.utils.io_utils.ask_to_proceed_with_overwrite",
"tensorflow.python.keras.engine.training_utils.get_static_batch_size",
"tensorflow.python.keras.engine.node.Node",
"tensorflow.python.keras.engine.base_layer_utils.create_keras_history",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.keras.engine.base_layer_utils.v2_dtype_behavior_enabled",
"tensorflow.python.training.tracking.util.streaming_restore",
"tensorflow.python.keras.utils.generic_utils.validate_kwargs",
"tensorflow.python.keras.engine.base_layer_utils.is_subclassed",
"tensorflow.python.keras.utils.tf_utils.ListWrapper",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.util.nest.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8"
]
}
] |
ragavvenkatesan/models | [
"420a88c7af20dae8d79dbc1b4351fef41be361c8"
] | [
"research/compression/distillation/resnet.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the preactivation form of Residual Networks\n(also known as ResNet v2).\n\nResidual networks (ResNets) were originally proposed in:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nThe full preactivation 'v2' ResNet variant implemented in this module was\nintroduced by:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe key difference of the full preactivation 'v2' variant compared to the\n'v1' variant in [1] is the use of batch normalization before every weight layer\nrather than after.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport tensorflow as tf\n\n_BATCH_NORM_DECAY = 0.997\n_BATCH_NORM_EPSILON = 1e-5\n\n\n################################################################################\n# Functions for input processing.\n################################################################################\ndef process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,\n parse_record_fn, num_epochs=1, num_parallel_calls=1):\n \"\"\"Given a Dataset with raw records, parse each record into images and labels,\n and return an iterator over the records.\n Args:\n dataset: A Dataset representing raw records\n is_training: A boolean denoting whether the input is for training.\n batch_size: The number of samples per batch.\n shuffle_buffer: The buffer size to use when shuffling records. A larger\n value results in better randomness, but smaller values reduce startup\n time and use less memory.\n parse_record_fn: A function that takes a raw record and returns the\n corresponding (image, label) pair.\n num_epochs: The number of epochs to repeat the dataset.\n num_parallel_calls: The number of records that are processed in parallel.\n This can be optimized per data set but for generally homogeneous data\n sets, should be approximately the number of available CPU cores.\n\n Returns:\n Dataset of (image, label) pairs ready for iteration.\n \"\"\"\n # We prefetch a batch at a time, This can help smooth out the time taken to\n # load input files as we go through shuffling and processing.\n dataset = dataset.prefetch(buffer_size=batch_size)\n if is_training:\n # Shuffle the records. Note that we shuffle before repeating to ensure\n # that the shuffling respects epoch boundaries.\n dataset = dataset.shuffle(buffer_size=shuffle_buffer)\n\n # If we are training over multiple epochs before evaluating, repeat the\n # dataset for the appropriate number of epochs.\n dataset = dataset.repeat(num_epochs)\n\n # Parse the raw records into images and labels\n dataset = dataset.map(lambda value: parse_record_fn(value, is_training),\n num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.batch(batch_size)\n\n # Operations between the final prefetch and the get_next call to the iterator\n # will happen synchronously during run time. We prefetch here again to\n # background all of the above processing work and keep it out of the\n # critical training path.\n dataset = dataset.prefetch(1)\n\n return dataset\n\n\n################################################################################\n# Functions building the ResNet model.\n################################################################################\ndef batch_norm_relu(inputs, training, data_format):\n \"\"\"Performs a batch normalization followed by a ReLU.\"\"\"\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n inputs = tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)\n inputs = tf.nn.relu(inputs)\n return inputs\n\n\ndef fixed_padding(inputs, kernel_size, data_format):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n A tensor with the same format as the input with the data either intact\n (if kernel_size == 1) or padded (if kernel_size > 1).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs\n\n\ndef conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):\n \"\"\"Strided 2-D convolution with explicit padding.\"\"\"\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n\n\ndef building_block(inputs, filters, training, projection_shortcut, strides,\n data_format):\n \"\"\"Standard building block for residual networks with BN before convolutions.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n filters: The number of filters for the convolutions.\n training: A Boolean for whether the model is in training or inference\n mode. Needed for batch normalization.\n projection_shortcut: The function to use for projection shortcuts\n (typically a 1x1 convolution when downsampling the input).\n strides: The block's stride. If greater than 1, this block will ultimately\n downsample the input.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n The output tensor of the block.\n \"\"\"\n shortcut = inputs\n inputs = batch_norm_relu(inputs, training, data_format)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, training, data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut\n\n\ndef bottleneck_block(inputs, filters, training, projection_shortcut,\n strides, data_format):\n \"\"\"Bottleneck block variant for residual networks with BN before convolutions.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n filters: The number of filters for the first two convolutions. Note\n that the third and final convolution will use 4 times as many filters.\n training: A Boolean for whether the model is in training or inference\n mode. Needed for batch normalization.\n projection_shortcut: The function to use for projection shortcuts\n (typically a 1x1 convolution when downsampling the input).\n strides: The block's stride. If greater than 1, this block will ultimately\n downsample the input.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n The output tensor of the block.\n \"\"\"\n shortcut = inputs\n inputs = batch_norm_relu(inputs, training, data_format)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=1, strides=1,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, training, data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, training, data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,\n data_format=data_format)\n\n return inputs + shortcut\n\n\ndef block_layer(inputs, filters, block_fn, blocks, strides, training, name,\n data_format):\n \"\"\"Creates one layer of blocks for the ResNet model.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n filters: The number of filters for the first convolution of the layer.\n block_fn: The block to use within the model, either `building_block` or\n `bottleneck_block`.\n blocks: The number of blocks contained in the layer.\n strides: The stride to use for the first convolution of the layer. If\n greater than 1, this layer will ultimately downsample the input.\n training: Either True or False, whether we are currently training the\n model. Needed for batch norm.\n name: A string name for the tensor output of the block layer.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n The output tensor of the block layer.\n \"\"\"\n # Bottleneck blocks end with 4x the number of filters as they start with\n filters_out = 4 * filters if block_fn is bottleneck_block else filters\n\n def projection_shortcut(inputs):\n return conv2d_fixed_padding(\n inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,\n data_format=data_format)\n\n # Only the first block per block_layer uses projection_shortcut and strides\n inputs = block_fn(inputs, filters, training, projection_shortcut, strides,\n data_format)\n\n for _ in range(1, blocks):\n inputs = block_fn(inputs, filters, training, None, 1, data_format)\n\n return tf.identity(inputs, name)\n\n\nclass Model(object):\n \"\"\"Base class for building the Resnet v2 Model.\n \"\"\"\n\n def __init__(self, resnet_size, num_classes, num_filters, kernel_size,\n conv_stride, first_pool_size, first_pool_stride, probe_pool_size,\n second_pool_size, second_pool_stride, probe_pool_stride,\n block_fn, block_sizes, pool_type, num_probes,\n block_strides, final_size, data_format=None):\n \"\"\"Creates a model for classifying an image.\n\n Args:\n resnet_size: A single integer for the size of the ResNet model.\n probe_pool_size: Number to pool the probes by.\n probe_pool_stride: stride size for the probe pooling layer \n num_classes: The number of classes used as labels.\n num_filters: The number of filters to use for the first block layer\n of the model. This number is then doubled for each subsequent block\n layer.\n kernel_size: The kernel size to use for convolution.\n conv_stride: stride size for the initial convolutional layer\n first_pool_size: Pool size to be used for the first pooling layer.\n If none, the first pooling layer is skipped.\n first_pool_stride: stride size for the first pooling layer. Not used\n if first_pool_size is None.\n second_pool_size: Pool size to be used for the second pooling layer.\n second_pool_stride: stride size for the final pooling layer\n block_fn: Which block layer function should be used? Pass in one of\n the two functions defined above: building_block or bottleneck_block\n block_sizes: A list containing n values, where n is the number of sets of\n block layers desired. Each value should be the number of blocks in the\n i-th set.\n pool_type: 'max' or 'mean'.\n block_strides: List of integers representing the desired stride size for\n each of the sets of block layers. Should be same length as block_sizes.\n final_size: The expected size of the model after the second pooling.\n data_format: Input format ('channels_last', 'channels_first', or None).\n If set to None, the format is dependent on whether a GPU is available.\n \"\"\"\n self.resnet_size = resnet_size\n\n if not data_format:\n data_format = (\n 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')\n\n self.data_format = data_format\n self.num_classes = num_classes\n self.num_filters = num_filters\n self.kernel_size = kernel_size\n self.conv_stride = conv_stride\n self.first_pool_size = first_pool_size\n self.first_pool_stride = first_pool_stride\n self.second_pool_size = second_pool_size\n self.second_pool_stride = second_pool_stride\n self.probe_pool_size = probe_pool_size\n self.probe_pool_stride = probe_pool_stride\n self.block_fn = block_fn\n self.block_sizes = block_sizes\n self.block_strides = block_strides\n self.final_size = final_size\n self.pool_type = pool_type\n self.num_probes = num_probes\n\n def __call__(self, inputs, training):\n \"\"\"Add operations to classify a batch of input images.\n\n Args:\n inputs: A Tensor representing a batch of input images.\n training: A boolean. Set to True to add operations required only when\n training the classifier.\n\n Returns:\n A logits Tensor with shape [<batch_size>, self.num_classes].\n \"\"\"\n with tf.variable_scope('input_transforms'):\n if self.data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n with tf.variable_scope('mentor') as scope:\n # mentor\n mentor = conv2d_fixed_padding(\n inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,\n strides=self.conv_stride, data_format=self.data_format)\n mentor = tf.identity(mentor, 'mentor_' + 'initial_conv')\n\n if self.first_pool_size:\n mentor = tf.layers.max_pooling2d(\n inputs=mentor, pool_size=self.first_pool_size,\n strides=self.first_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentor = tf.identity(mentor, 'mentor_' + 'initial_max_pool')\n\n mentor_probes = []\n probe_count = 0\n for i, num_blocks in enumerate(self.block_sizes[0]):\n num_filters = self.num_filters * (2**i)\n mentor = block_layer(\n inputs=mentor, filters=num_filters, block_fn=self.block_fn,\n blocks=num_blocks, strides=self.block_strides[i],\n training=training, name='mentor_' + 'block_layer{}'.format(i + 1),\n data_format=self.data_format)\n \n if probe_count < self.num_probes: \n if self.probe_pool_size > 0:\n if self.pool_type == 'max':\n mentor_probe = tf.layers.max_pooling2d(\n inputs=mentor, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentor_probe = tf.identity(mentor, 'mentor_'+'probe_max_pool_' \\\n + str(i))\n elif self.pool_type == 'mean':\n mentor_probe = tf.layers.average_pooling2d(\n inputs=mentor, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentor_probe = tf.identity(mentor, 'mentor_'+'probe_mean_pool_' \\\n + str(i)) \n else:\n mentor_probe = mentor \n mentor_probes.append(mentor_probe)\n probe_count+=1\n mentor = batch_norm_relu(mentor, training, self.data_format)\n mentor = tf.layers.average_pooling2d(\n inputs=mentor, pool_size=self.second_pool_size,\n strides=self.second_pool_stride, padding='VALID',\n data_format=self.data_format)\n mentor = tf.identity(mentor, 'mentor_' + 'final_avg_pool')\n\n mentor = tf.reshape(mentor, [-1, self.final_size])\n mentor = tf.layers.dense(inputs=mentor, units=self.num_classes)\n mentor = tf.identity(mentor, 'mentor_' + 'final_dense')\n mentor_probes.append(mentor)\n\n with tf.variable_scope('mentee') as scope:\n # mentee\n mentee = conv2d_fixed_padding(\n inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,\n strides=self.conv_stride, data_format=self.data_format)\n mentee = tf.identity(mentee, 'mentee_' + 'initial_conv')\n\n if self.first_pool_size:\n mentee = tf.layers.max_pooling2d(\n inputs=mentee, pool_size=self.first_pool_size,\n strides=self.first_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentee = tf.identity(mentee, 'mentee_' + 'initial_max_pool')\n \n probe_count = 0\n mentee_probes = []\n for i, num_blocks in enumerate(self.block_sizes[1]):\n num_filters = self.num_filters * (2**i)\n mentee = block_layer(\n inputs=mentee, filters=num_filters, block_fn=self.block_fn,\n blocks=num_blocks, strides=self.block_strides[i],\n training=training, name='mentee_' + 'block_layer{}'.format(i + 1),\n data_format=self.data_format)\n if probe_count < self.num_probes: \n if self.probe_pool_size > 0:\n if self.pool_type == 'max':\n mentee_probe = tf.layers.max_pooling2d(\n inputs=mentee, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \\\n + str(i))\n elif self.pool_type == 'mean':\n mentee_probe = tf.layers.average_pooling2d(\n inputs=mentee, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \\\n + str(i)) \n else:\n mentee_probe=mentee \n mentee_probes.append(mentee_probe)\n probe_count+=1\n\n mentee = batch_norm_relu(mentee, training, self.data_format)\n mentee = tf.layers.average_pooling2d(\n inputs=mentee, pool_size=self.second_pool_size,\n strides=self.second_pool_stride, padding='VALID',\n data_format=self.data_format)\n mentee = tf.identity(mentee, 'mentee_' + 'final_avg_pool')\n mentee = tf.reshape(mentee, [-1, self.final_size])\n mentee = tf.layers.dense(inputs=mentee, units=self.num_classes)\n mentee = tf.identity(mentee, 'mentee_' + 'final_dense') \n mentee_probes.append(mentee)\n\n probe_cost = tf.constant(0.)\n for mentor_feat, mentee_feat in zip(mentor_probes, mentee_probes):\n probe_cost = probe_cost + tf.losses.mean_squared_error (\n mentor_feat, mentee_feat)\n return (mentor, mentee, probe_cost)\n\n################################################################################\n# Functions for running training/eval/validation loops for the model.\n################################################################################\n\ndef learning_rate_with_decay(\n batch_size, batch_denom, num_images, boundary_epochs, decay_rates):\n \"\"\"Get a learning rate that decays step-wise as training progresses.\n\n Args:\n batch_size: the number of examples processed in each training batch.\n batch_denom: this value will be used to scale the base learning rate.\n `0.1 * batch size` is divided by this number, such that when\n batch_denom == batch_size, the initial learning rate will be 0.1.\n num_images: total number of images that will be used for training.\n boundary_epochs: list of ints representing the epochs at which we\n decay the learning rate.\n decay_rates: list of floats representing the decay rates to be used\n for scaling the learning rate. Should be the same length as\n boundary_epochs.\n\n Returns:\n Returns a function that takes a single argument - the number of batches\n trained so far (global_step)- and returns the learning rate to be used\n for training the next batch.\n \"\"\"\n with tf.variable_scope('learning_rate'):\n initial_learning_rate = 0.01 * batch_size / batch_denom\n batches_per_epoch = num_images / batch_size\n\n # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.\n boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]\n vals = [initial_learning_rate * decay for decay in decay_rates]\n\n def learning_rate_fn(global_step):\n global_step = tf.cast(global_step, tf.int32)\n rval = tf.train.piecewise_constant(global_step, boundaries, vals)\n return rval\n return learning_rate_fn\n\ndef learning_rate_with_decay_2( initial_learning_rate,\n batch_size, batch_denom, num_images, boundary_epochs, decay_rates):\n \"\"\"Get a learning rate that decays step-wise as training progresses.\n\n Args:\n batch_size: the number of examples processed in each training batch.\n batch_denom: this value will be used to scale the base learning rate.\n `0.1 * batch size` is divided by this number, such that when\n batch_denom == batch_size, the initial learning rate will be 0.1.\n num_images: total number of images that will be used for training.\n boundary_epochs: list of ints representing the epochs at which we\n decay the learning rate.\n decay_rates: list of floats representing the decay rates to be used\n for scaling the learning rate. Should be the same length as\n boundary_epochs.\n\n Returns:\n Returns a function that takes a single argument - the number of batches\n trained so far (global_step)- and returns the learning rate to be used\n for training the next batch.\n \"\"\"\n with tf.variable_scope('learning_rate'):\n batches_per_epoch = num_images / batch_size\n\n boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]\n vals = [initial_learning_rate * decay for decay in decay_rates]\n\n def learning_rate_fn(global_step):\n global_step = tf.cast(global_step, tf.int32)\n rval = tf.train.piecewise_constant(global_step, boundaries, vals)\n return rval\n return learning_rate_fn\n\n\ndef distillation_coeff_fn(intital_distillation, global_step):\n global_step = tf.cast(global_step, tf.int32)\n rval = tf.train.exponential_decay (\n intital_distillation,\n global_step, \n 100000,\n 0.55,\n staircase = False)\n return rval\n\ndef resnet_model_fn(features, labels, mode, model_class, trainee, \n distillation_coeff, probes_coeff, resnet_size, num_probes,\n weight_decay_coeff, learning_rate_fn_mentor, \n learning_rate_fn_mentee, learning_rate_fn_finetune,\n momentum, data_format, pool_probes, pool_type,\n temperature=1, optimizer='momentum', \n loss_filter_fn=None):\n \"\"\"Shared functionality for different resnet model_fns.\n\n Initializes the ResnetModel representing the model layers\n and uses that model to build the necessary EstimatorSpecs for\n the `mode` in question. For training, this means building losses,\n the optimizer, and the train op that get passed into the EstimatorSpec.\n For evaluation and prediction, the EstimatorSpec is returned without\n a train op, but with the necessary parameters for the given mode.\n\n Args:\n features: tensor representing input images\n labels: tensor representing class labels for all input images\n mode: current estimator mode; should be one of\n `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`\n model_class: a class representing a TensorFlow model that has a __call__\n function. We assume here that this is a subclass of ResnetModel.\n trainee: A string either `'mentee'` or `'mentor`'.\n resnet_size: A list of two integers for the size of the ResNet model for \n mentor followed by mentee.\n weight_decay_coeff: weight decay rate used to regularize learned variables.\n distillation_coeff: Weight for distillation.\n probes_coeff: weight for probes.\n learning_rate_fn_mentor: function that returns the current learning rate given\n the current global_step\n learning_rate_fn_mentee: function that returns the current learning rate given\n the current global_step\n learning_rate_fn_finetune: function that returns the current learning rate given\n the current global_step \n num_probes: How many equally spaced probes do we need. \n momentum: momentum term used for optimization.\n data_format: Input format ('channels_last', 'channels_first', or None).\n If set to None, the format is dependent on whether a GPU is available.\n temperature: A value of temperature to use for distillation. Defaults to 1\n so that it will remain backward compatible.\n loss_filter_fn: function that takes a string variable name and returns\n True if the var should be included in loss calculation, and False\n otherwise. If None, batch_normalization variables will be excluded\n from the loss.\n pool_probes: Downsampling for probes.\n pool_type: 'max' or 'mean'.\n optimizer: 'adam', 'adadelta' and 'momentum' are options.\n Returns:\n EstimatorSpec parameterized according to the input params and the\n current mode.\n \"\"\"\n with tf.variable_scope('inputs'):\n # Generate a summary node for the images\n tf.summary.image('images', features, max_outputs=6)\n\n model = model_class(resnet_size = resnet_size,\n pool_probes = pool_probes, \n pool_type = pool_type, \n num_probes = num_probes,\n data_format = data_format)\n logits_mentor, logits_mentee, probe_cost = model(features, \n mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions_mentor = {\n 'classes': tf.argmax(logits_mentor, axis=1),\n 'probabilities': tf.nn.softmax(logits_mentor, \n name='softmax_tensor_mentor'),\n }\n\n predictions_mentee = {\n 'classes': tf.argmax(logits_mentee, axis=1),\n 'probabilities': tf.nn.softmax(logits_mentee, \n name='softmax_tensor_mentee'),\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n if trainee == 'mentor':\n return tf.estimator.EstimatorSpec(mode=mode, \n predictions=predictions_mentor)\n elif trainee == 'mentee' or trainee == 'finetune':\n return tf.estimator.EstimatorSpec(mode=mode, \n predictions=predictions_mentee)\n\n with tf.variable_scope('distillery'):\n temperature_softmax_mentor = tf.nn.softmax((tf.div(logits_mentor, \n temperature)), name ='softmax_temperature_tensor_mentor')\n distillation_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits = tf.div(logits_mentee,temperature),\n labels = temperature_softmax_mentor))\n\n tf.identity(distillation_loss, name='distillation_loss')\n tf.summary.scalar('distillation_loss', distillation_loss)\n tf.summary.scalar('scaled_distillation_loss', distillation_coeff *\n distillation_loss)\n\n with tf.variable_scope('cross_entropy'):\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n cross_entropy_mentor = tf.losses.softmax_cross_entropy(\n logits=logits_mentor, onehot_labels=labels)\n # Create a tensor named cross_entropy for logging purposes.\n tf.identity(cross_entropy_mentor, name='cross_entropy_mentor')\n tf.summary.scalar('cross_entropy_mentor', cross_entropy_mentor) \n\n cross_entropy_mentee = tf.losses.softmax_cross_entropy(\n logits=logits_mentee, onehot_labels=labels)\n tf.identity(cross_entropy_mentee, name='cross_entropy_mentee')\n tf.summary.scalar('cross_entropy_mentee', cross_entropy_mentee)\n\n # If no loss_filter_fn is passed, assume we want the default behavior,\n # which is that batch_normalization variables are excluded from loss.\n if not loss_filter_fn:\n def loss_filter_fn(name):\n return 'batch_normalization' not in name\n\n mentor_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='mentor')\n mentee_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='mentee') \n\n with tf.variable_scope('regularizers'): \n if weight_decay_coeff > 0:\n l2_mentor = weight_decay_coeff * tf.add_n(\n [tf.nn.l2_loss(v) for v in mentor_variables\n if loss_filter_fn(v.name)])\n l2_mentee = weight_decay_coeff * tf.add_n(\n [tf.nn.l2_loss(v) for v in mentee_variables\n if loss_filter_fn(v.name)]) \n else:\n l2_mentor = tf.constant(0.)\n l2_mentee = tf.constant(0.)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n with tf.variable_scope('learning_rates'):\n global_step = tf.train.get_or_create_global_step()\n learning_rate_mentor = learning_rate_fn_mentor(global_step)\n learning_rate_mentee = learning_rate_fn_mentee(global_step)\n learning_rate_finetune = learning_rate_fn_finetune(global_step) \n\n tf.identity(learning_rate_mentor, name='learning_rate_mentor' )\n tf.summary.scalar('learning_rate_mentor', learning_rate_mentor)\n tf.identity(learning_rate_mentee, name='learning_rate_mentee' )\n tf.summary.scalar('learning_rate_mentee', learning_rate_mentee)\n tf.identity(learning_rate_finetune, name='learning_rate_finetune' )\n tf.summary.scalar('learning_rate_finetune', learning_rate_finetune)\n\n with tf.variable_scope('mentor_cumulative_loss'):\n # Add weight decay and distillation to the loss.\n loss_mentor = cross_entropy_mentor + l2_mentor\n tf.summary.scalar('objective', loss_mentor) \n \n with tf.variable_scope('mentee_cumulative_loss'): \n distillation_coeff_decayed = distillation_coeff_fn(distillation_coeff, \n global_step) \n probe_scale = probes_coeff * distillation_coeff_decayed \n\n tf.identity(probe_cost, name='probe_cost') \n tf.summary.scalar('probe_loss', probe_cost)\n tf.summary.scalar('scaled_probe_loss', probe_scale *\n probe_cost)\n tf.identity(distillation_coeff, name='distillation_coeff_decayed')\n tf.summary.scalar('coeff',distillation_coeff_decayed) \n\n loss_mentee = cross_entropy_mentee + l2_mentee + \\\n distillation_coeff_decayed * distillation_loss + \\\n probe_scale * probe_cost\n tf.summary.scalar('objective', loss_mentee) \n \n with tf.variable_scope('mentee_finetune'):\n loss_finetune = cross_entropy_mentee + l2_mentee\n tf.summary.scalar('objective', loss_finetune) \n\n if optimizer[0] == 'momentum':\n with tf.variable_scope('mentor_momentum_optimizer'): \n optimizer_mentor = tf.train.MomentumOptimizer(\n learning_rate=learning_rate_mentor,\n momentum=momentum)\n elif optimizer[0] == 'adam':\n with tf.variable_scope('mentor_adam_optimizer'): \n optimizer_mentor = tf.train.AdamOptimizer(\n learning_rate=learning_rate_mentor)\n elif optimizer[0] == 'adadelta':\n with tf.variable_scope('mentor_adadelta_optimizer'): \n optimizer_mentor = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate_mentor)\n\n if optimizer[1] == 'momentum':\n with tf.variable_scope('mentee_momentum_optimizer'): \n optimizer_mentee = tf.train.MomentumOptimizer(\n learning_rate=learning_rate_mentee,\n momentum=momentum)\n elif optimizer[1] == 'adam':\n with tf.variable_scope('mentee_adam_optimizer'): \n optimizer_mentee = tf.train.AdamOptimizer(\n learning_rate=learning_rate_mentee)\n elif optimizer[1] == 'adadelta':\n with tf.variable_scope('mentee_adadelta_optimizer'): \n optimizer_mentee = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate_mentee)\n\n if optimizer[2] == 'momentum':\n with tf.variable_scope('finetune_momentum_optimizer'): \n optimizer_finetune = tf.train.MomentumOptimizer(\n learning_rate=learning_rate_finetune,\n momentum=momentum)\n elif optimizer[2] == 'adam':\n with tf.variable_scope('finetune_adam_optimizer'): \n optimizer_finetune = tf.train.AdamOptimizer(\n learning_rate=learning_rate_finetune)\n elif optimizer[2] == 'adadelta':\n with tf.variable_scope('finetune_adadelta_optimizer'): \n optimizer_finetune = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate_finetune)\n\n # Batch norm requires update ops to be added as a dependency to train_op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n with tf.variable_scope('optimizers'):\n train_op_mentor = optimizer_mentor.minimize(loss_mentor, \n global_step, \n var_list = mentor_variables)\n train_op_mentee = optimizer_mentee.minimize(loss_mentee, \n global_step, \n var_list = mentee_variables) \n train_op_finetune = optimizer_finetune.minimize(loss_finetune, \n global_step, \n var_list = mentee_variables) \n else:\n with tf.variable_scope('mentor_cumulative_loss'):\n # Add weight decay and distillation to the loss.\n loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor\n with tf.variable_scope('mentee_cumulative_loss'): \n loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee\n with tf.variable_scope('mentee_finetune'):\n loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee\n train_op_mentor = None\n train_op_mentee = None\n train_op_finetune = None\n\n with tf.variable_scope('metrics'):\n accuracy_mentor = tf.metrics.accuracy(\n tf.argmax(labels, axis=1), predictions_mentor['classes'])\n accuracy_mentee = tf.metrics.accuracy(\n tf.argmax(labels, axis=1), predictions_mentee['classes']) \n metrics = {'accuracy_mentor': accuracy_mentor,\n 'accuracy_mentee': accuracy_mentee}\n\n # Create a tensor named train_accuracy for logging purposes\n tf.identity(accuracy_mentor[1], name='train_accuracy_mentor')\n tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1])\n tf.identity(accuracy_mentee[1], name='train_accuracy_mentee')\n tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1])\n\n saver=tf.train.Saver(var_list = tf.global_variables())\n\n if trainee == 'mentor':\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_mentor,\n loss=loss_mentor,\n train_op=train_op_mentor,\n scaffold=tf.train.Scaffold(saver=saver),\n eval_metric_ops=metrics)\n\n elif trainee == 'mentee':\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_mentee,\n loss=loss_mentee,\n train_op=train_op_mentee,\n scaffold=tf.train.Scaffold(saver=saver),\n eval_metric_ops=metrics)\n elif trainee == 'finetune':\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_mentee,\n loss=loss_finetune,\n train_op=train_op_finetune,\n scaffold=tf.train.Scaffold(saver=saver),\n eval_metric_ops=metrics) \n\n\ndef resnet_main(flags, model_function, input_function):\n # Using the Winograd non-fused algorithms provides a small performance boost.\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n\n # Set up a RunConfig to only save checkpoints once per training cycle.\n run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)\n mentor = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags.model_dir, \n config=run_config,\n params={\n 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],\n 'data_format': flags.data_format,\n 'batch_size': flags.batch_size,\n 'distillation_coeff': flags.distillation_coeff,\n 'probes_coeff': flags.probes_coeff,\n 'weight_decay_coeff': flags.weight_decay_coeff,\n 'optimizer': [flags.mentor_optimizer,\n flags.mentee_optimizer,\n flags.finetune_optimizer],\n 'temperature': flags.temperature, \n 'num_probes': flags.num_probes,\n 'pool_probes': flags.pool_probes,\n 'train_epochs_mentor': flags.train_epochs_mentor,\n 'train_epochs_mentee': flags.train_epochs_mentee,\n 'train_epochs_finetune': flags.train_epochs_finetune,\n 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,\n 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,\n 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,\n 'pool_type': flags.pool_type, \n 'trainee': 'mentor'\n })\n\n for i in range(flags.train_epochs_mentor // flags.epochs_per_eval):\n tensors_to_log = {\n 'learning_rate': 'learning_rates/learning_rate_mentor',\n 'cross_entropy': 'cross_entropy/cross_entropy_mentor' ,\n 'train_accuracy': 'metrics/train_accuracy_mentor'\n }\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n def input_fn_train():\n return input_function(True, flags.data_dir, flags.batch_size,\n flags.epochs_per_eval, flags.num_parallel_calls)\n\n print(' *********************** ' )\n print(' Starting a mentor training cycle. [' + str(i) + '/' \n + str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']')\n print(' *********************** ' ) \n \n mentor.train(input_fn=input_fn_train, hooks=[logging_hook])\n\n print('Starting to evaluate.')\n # Evaluate the model and print results\n def input_fn_eval():\n return input_function(False, flags.data_dir, flags.batch_size,\n 1, flags.num_parallel_calls)\n\n eval_results = mentor.evaluate(input_fn=input_fn_eval)\n print(eval_results)\n\n mentee = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags.model_dir, \n config=run_config,\n params={\n 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],\n 'data_format': flags.data_format,\n 'batch_size': flags.batch_size,\n 'distillation_coeff': flags.distillation_coeff,\n 'probes_coeff': flags.probes_coeff, \n 'optimizer': [flags.mentor_optimizer,\n flags.mentee_optimizer,\n flags.finetune_optimizer],\n 'weight_decay_coeff': flags.weight_decay_coeff, \n 'temperature': flags.temperature,\n 'num_probes': flags.num_probes, \n 'pool_probes': flags.pool_probes, \n 'train_epochs_mentor': flags.train_epochs_mentor,\n 'train_epochs_mentee': flags.train_epochs_mentee,\n 'train_epochs_finetune': flags.train_epochs_finetune,\n 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,\n 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,\n 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, \n 'pool_type': flags.pool_type, \n 'trainee': 'mentee'\n })\n\n for i in range(flags.train_epochs_mentee // flags.epochs_per_eval):\n tensors_to_log = {\n 'learning_rate': 'learning_rates/learning_rate_mentee',\n 'cross_entropy': 'cross_entropy/cross_entropy_mentee',\n 'train_accuracy': 'metrics/train_accuracy_mentee',\n 'distillation_loss': 'distillery/distillation_loss',\n 'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed'\n }\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n def input_fn_train():\n return input_function(True, flags.data_dir, flags.batch_size,\n flags.epochs_per_eval, flags.num_parallel_calls)\n\n print(' *********************** ' )\n print(' Starting a mentee training cycle. [' + str(i) + '/' \n + str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']')\n print(' *********************** ' )\n\n mentee.train(input_fn=input_fn_train, hooks=[logging_hook])\n\n print('Starting to evaluate.')\n # Evaluate the model and print results\n def input_fn_eval():\n return input_function(False, flags.data_dir, flags.batch_size,\n 1, flags.num_parallel_calls)\n\n eval_results = mentee.evaluate(input_fn=input_fn_eval)\n print(eval_results)\n\n finetune = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags.model_dir, \n config=run_config,\n params={\n 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],\n 'data_format': flags.data_format,\n 'batch_size': flags.batch_size,\n 'distillation_coeff': flags.distillation_coeff,\n 'probes_coeff': flags.probes_coeff, \n 'optimizer': [flags.mentor_optimizer,\n flags.mentee_optimizer,\n flags.finetune_optimizer],\n 'weight_decay_coeff': flags.weight_decay_coeff, \n 'temperature': flags.temperature,\n 'num_probes': flags.num_probes, \n 'pool_probes': flags.pool_probes,\n 'train_epochs_mentor': flags.train_epochs_mentor,\n 'train_epochs_mentee': flags.train_epochs_mentee,\n 'train_epochs_finetune': flags.train_epochs_finetune,\n 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,\n 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,\n 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,\n 'pool_type': flags.pool_type, \n 'trainee': 'finetune'\n })\n\n for i in range(flags.train_epochs_finetune // flags.epochs_per_eval):\n tensors_to_log = {\n 'learning_rate': 'learning_rates/learning_rate_mentee',\n 'cross_entropy': 'cross_entropy/cross_entropy_mentee',\n 'train_accuracy': 'metrics/train_accuracy_mentee',\n }\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n def input_fn_train():\n return input_function(True, flags.data_dir, flags.batch_size,\n flags.epochs_per_eval, flags.num_parallel_calls)\n\n print(' *********************** ' )\n print(' Starting a mentee finetune cycle. [' + str(i) + '/' \n + str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']')\n print(' *********************** ' )\n\n finetune.train(input_fn=input_fn_train, hooks=[logging_hook])\n\n print('Starting to evaluate.')\n # Evaluate the model and print results\n def input_fn_eval():\n return input_function(False, flags.data_dir, flags.batch_size,\n 1, flags.num_parallel_calls)\n\n eval_results = finetune.evaluate(input_fn=input_fn_eval)\n print(eval_results)\n\nclass ResnetArgParser(argparse.ArgumentParser):\n \"\"\"Arguments for configuring and running a Resnet Model.\n \"\"\"\n\n def __init__(self, resnet_size_choices=None):\n super(ResnetArgParser, self).__init__()\n self.add_argument(\n '--data_dir', type=str, default='./resnet_data',\n help='The directory where the input data is stored.')\n\n self.add_argument(\n '--num_parallel_calls', type=int, default=5,\n help='The number of records that are processed in parallel '\n 'during input processing. This can be optimized per data set but '\n 'for generally homogeneous data sets, should be approximately the '\n 'number of available CPU cores.')\n\n self.add_argument(\n '--model_dir', type=str, default='./resnet_model',\n help='The directory where the model will be stored.')\n\n self.add_argument(\n '--resnet_size_mentor', type=int, default=50,\n choices=resnet_size_choices,\n help='The size of the ResNet Mentor model to use.')\n\n self.add_argument(\n '--resnet_size_mentee', type=int, default=10,\n choices=resnet_size_choices,\n help='The size of the ResNet Mentee model to use.')\n\n self.add_argument(\n '--train_epochs_mentor', type=int, default=100,\n help='The number of epochs to use for training.')\n\n self.add_argument(\n '--train_epochs_mentee', type=int, default=100,\n help='The number of epochs to use for training.')\n\n self.add_argument(\n '--train_epochs_finetune', type=int, default=100,\n help='The number of epochs to use for training.')\n\n self.add_argument(\n '--epochs_per_eval', type=int, default=1,\n help='The number of training epochs to run between evaluations.')\n\n self.add_argument(\n '--batch_size', type=int, default=32,\n help='Batch size for training and evaluation.')\n\n self.add_argument(\n '--mentor_optimizer', type=str, default='momentum',\n help='Optimizer for training and evaluation.')\n\n self.add_argument(\n '--mentee_optimizer', type=str, default='momentum',\n help='Optimizer for training and evaluation.')\n\n self.add_argument(\n '--finetune_optimizer', type=str, default='momentum',\n help='Optimizer for training and evaluation.')\n\n self.add_argument(\n '--data_format', type=str, default=None,\n choices=['channels_first', 'channels_last'],\n help='A flag to override the data format used in the model. '\n 'channels_first provides a performance boost on GPU but '\n 'is not always compatible with CPU. If left unspecified, '\n 'the data format will be chosen automatically based on '\n 'whether TensorFlow was built for CPU or GPU.')\n\n self.add_argument(\n '--distillation_coeff', type=float, default=0.01,\n help='Coefficient of distillation to be applied from parent to'\n 'child. This is only useful when performing distillaiton.')\n\n self.add_argument(\n '--probes_coeff', type=float, default=0.0001,\n help='Coefficient of weight to be applied from parent to'\n 'child. This is only useful when performing mentoring.')\n\n self.add_argument(\n '--weight_decay_coeff', type=float, default=0.0002,\n help='Coefficient of weight to be applied from to the'\n 'weight decay regularizer.')\n\n self.add_argument(\n '--temperature', type=float, default=3,\n help='Temperature to be used for the softmax layer')\n\n self.add_argument(\n '--num_probes', type=int, default=0,\n help='Number of probes to be used')\n\n self.add_argument(\n '--pool_probes', type=int, default=2,\n help='Maxpool probes by')\n\n self.add_argument(\n '--initial_learning_rate_mentor', type=float, default=0.001,\n help='Set initial learning rate for mentor') \n\n self.add_argument(\n '--initial_learning_rate_mentee', type=float, default=0.001,\n help='Set initial learning rate for mentee') \n\n self.add_argument(\n '--initial_learning_rate_finetune', type=float, default=0.001,\n help='Set initial learning rate finetune') \n\n self.add_argument(\n '--pool_type', type=str, default='max',\n help='Pool type for probes.') "
] | [
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.control_dependencies",
"tensorflow.test.is_built_with_cuda",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.nn.l2_loss",
"tensorflow.pad",
"tensorflow.train.AdamOptimizer",
"tensorflow.estimator.RunConfig",
"tensorflow.summary.scalar",
"tensorflow.layers.batch_normalization",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.layers.dense",
"tensorflow.train.get_or_create_global_step",
"tensorflow.train.exponential_decay",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.train.piecewise_constant",
"tensorflow.div",
"tensorflow.train.MomentumOptimizer",
"tensorflow.argmax",
"tensorflow.estimator.Estimator",
"tensorflow.identity",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.variance_scaling_initializer",
"tensorflow.nn.relu",
"tensorflow.losses.mean_squared_error",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.train.Scaffold",
"tensorflow.reshape",
"tensorflow.layers.max_pooling2d",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.layers.average_pooling2d",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
dumpmemory/google-research | [
"bc87d010ab9086b6e92c3f075410fa6e1f27251b",
"bc87d010ab9086b6e92c3f075410fa6e1f27251b",
"bc87d010ab9086b6e92c3f075410fa6e1f27251b",
"bc87d010ab9086b6e92c3f075410fa6e1f27251b",
"bc87d010ab9086b6e92c3f075410fa6e1f27251b"
] | [
"minigrid_basics/examples/rw_four_directions.py",
"aux_tasks/auxiliary_mc/discounted_dqn_agent.py",
"contrastive_rl/env_utils.py",
"hypertransformer/tf/utils/cache_miniimagenet.py",
"hypertransformer/tf/core/layerwise_test.py"
] | [
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Example that uses Gym-Minigrid, a custom environment, and custom actions.\n\nGym-Minigrid has a larger action space that is not standard in reinforcement\nlearning. By default, the actions are {rotate left, rotate right, forward, pick\nup object, drop object, toggle/activate object, done}. This example uses a class\noverridden to have the standard 4 directional actions: {left, right, up, down}.\n\nHere we have a random agent interacting with the environment. In this case, we\nalso use a custom environment, which is likely what one will do in their\nresearch. We are writing the agent observations to the disk just as a simple way\nto get some feedback of what is going on.\n\nSample run:\n\n ```\n python -m minigrid_basics.examples.rw_four_directions \\\n --gin_bindings=\"MonMiniGridEnv.stochasticity=0.1\"\n ```\n\n\"\"\"\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport gin\nimport gym\nimport gym_minigrid # pylint: disable=unused-import\nfrom gym_minigrid.wrappers import RGBImgObsWrapper\nimport matplotlib.pylab as plt\nimport tensorflow as tf\n\nfrom minigrid_basics.custom_wrappers import tabular_wrapper # pylint: disable=unused-import\nfrom minigrid_basics.envs import mon_minigrid\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('file_path', '/tmp/rw_four_directions',\n 'Path in which we will save the observations.')\nflags.DEFINE_multi_string(\n 'gin_bindings', [],\n 'Gin bindings to override default parameter values '\n '(e.g. \"MonMiniGridEnv.stochasticity=0.1\").')\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n gin.parse_config_files_and_bindings(\n [os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')],\n bindings=FLAGS.gin_bindings,\n skip_unknown=False)\n env_id = mon_minigrid.register_environment()\n env = gym.make(env_id)\n env = RGBImgObsWrapper(env) # Get pixel observations\n # Get tabular observation and drop the 'mission' field:\n env = tabular_wrapper.TabularWrapper(env, get_rgb=True)\n env.reset()\n\n num_frames = 0\n max_num_frames = 500\n\n if not tf.io.gfile.exists(FLAGS.file_path):\n tf.io.gfile.makedirs(FLAGS.file_path)\n\n undisc_return = 0\n while num_frames < max_num_frames:\n # Act randomly\n obs, reward, done, _ = env.step(env.action_space.sample())\n undisc_return += reward\n num_frames += 1\n\n print('t:', num_frames, ' s:', obs['state'])\n # Draw environment frame just for simple visualization\n plt.imshow(obs['image'])\n path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames))\n plt.savefig(path)\n plt.clf()\n\n if done:\n break\n\n print('Undiscounted return: %.2f' % undisc_return)\n env.close()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"DQN Agent with time input.\"\"\"\nimport collections\nimport functools\nfrom typing import Tuple\n\nfrom dopamine.jax import losses\nfrom dopamine.jax import networks\nfrom dopamine.jax.agents.dqn import dqn_agent\nfrom flax import linen as nn\nimport gin\nimport jax\nimport jax.numpy as jnp\nimport numpy as onp\nimport optax\nimport tensorflow as tf\n\nfrom aux_tasks.auxiliary_mc import gammas_monte_carlo_replay_buffer as monte_carlo_rb\nfrom aux_tasks.auxiliary_mc import networks as aux_mc_networks\n\nAuxiliaryPredictionDQNNetworkType = collections.namedtuple(\n 'dqn_network_with_random_rewards', ['q_values', 'aux_prediction'])\n\n\[email protected]\nclass DQNNetworkWithAuxiliaryPredictions(nn.Module):\n \"\"\"Generates q_values with per-state auxiliary predictions.\n\n Attributes:\n num_actions: int, number of actions the agent can take at any state.\n num_predictions: int, number of auxiliary predictions.\n rng_key: int, Fixed rng for random reward generation.\n inputs_preprocessed: bool, Whether inputs are already preprocessed.\n \"\"\"\n num_actions: int\n num_predictions: int\n inputs_preprocessed: bool = False\n\n @nn.compact\n def __call__(self, x):\n\n initializer = nn.initializers.xavier_uniform()\n if not self.inputs_preprocessed:\n x = networks.preprocess_atari_inputs(x)\n\n hidden_sizes = [32, 64, 64]\n kernel_sizes = [8, 4, 3]\n stride_sizes = [4, 2, 1]\n for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,\n stride_sizes):\n x = nn.Conv(\n features=hidden_size,\n kernel_size=(kernel_size, kernel_size),\n strides=(stride_size, stride_size),\n kernel_init=initializer)(x)\n x = nn.relu(x)\n features = x.reshape((-1)) # flatten\n x = nn.Dense(features=512, kernel_init=initializer)(features)\n x = nn.relu(x)\n q_values = nn.Dense(features=self.num_actions, kernel_init=initializer)(x)\n\n # MSE loss for Auxiliary task MC predictions.\n auxiliary_pred = nn.Dense(features=512, kernel_init=initializer)(features)\n auxiliary_pred = nn.relu(auxiliary_pred)\n auxiliary_pred = nn.Dense(\n features=self.num_predictions, kernel_init=initializer)(auxiliary_pred)\n return AuxiliaryPredictionDQNNetworkType(q_values, auxiliary_pred)\n\n\[email protected]\nclass ImpalaEncoderWithAuxiliaryPredictions(nn.Module):\n \"\"\"Impala Network generating q_values with per-state auxiliary predictions.\"\"\"\n num_actions: int\n num_predictions: int\n inputs_preprocessed: bool = False\n stack_sizes: Tuple[int, Ellipsis] = (16, 32, 32)\n num_blocks: int = 2\n\n def setup(self):\n self.encoder = aux_mc_networks.ImpalaEncoder()\n\n @nn.compact\n def __call__(self, x, key=None):\n # Generate a random number generation key if not provided\n initializer = nn.initializers.xavier_uniform()\n if not self.inputs_preprocessed:\n x = networks.preprocess_atari_inputs(x)\n\n x = self.encoder(x)\n features = x.reshape((-1)) # flatten\n\n x = nn.Dense(\n features=512, kernel_init=initializer)(features)\n x = nn.relu(x)\n q_values = nn.Dense(features=self.num_actions, kernel_init=initializer)(x)\n\n # MSE loss for Auxiliary task MC predictions.\n auxiliary_pred = nn.Dense(features=512, kernel_init=initializer)(features)\n auxiliary_pred = nn.relu(auxiliary_pred)\n auxiliary_pred = nn.Dense(\n features=self.num_predictions, kernel_init=initializer)(auxiliary_pred)\n\n return AuxiliaryPredictionDQNNetworkType(q_values, auxiliary_pred)\n\n\[email protected]\nclass RandomRewardNetwork(nn.Module):\n \"\"\"Generates random rewards using a noisy network.\n\n Attributes:\n num_actions: int, number of actions the agent can take at any state.\n num_rewards: int, number of random rewards to generate.\n rng_key: int, Fixed rng for random reward generation.\n inputs_preprocessed: bool, Whether inputs are already preprocessed.\n \"\"\"\n num_actions: int\n num_rewards: int\n inputs_preprocessed: bool = False\n\n @nn.compact\n def __call__(self, x, rng_key):\n\n initializer = nn.initializers.xavier_uniform()\n if not self.inputs_preprocessed:\n x = networks.preprocess_atari_inputs(x)\n\n hidden_sizes = [32, 64, 64]\n kernel_sizes = [8, 4, 3]\n stride_sizes = [4, 2, 1]\n for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,\n stride_sizes):\n x = nn.Conv(\n features=hidden_size,\n kernel_size=(kernel_size, kernel_size),\n strides=(stride_size, stride_size),\n kernel_init=initializer)(x)\n x = nn.relu(x)\n features = x.reshape((-1)) # flatten\n\n # Use a fixed random seed for NoisyNetwork.\n net = networks.NoisyNetwork(rng_key=rng_key, eval_mode=False)\n # Return `self.num_rewards` random outputs.\n rewards = net(features, self.num_rewards)\n x = jax.nn.sigmoid(features) # clip rewards between -1 and 1\n return rewards\n\n\[email protected](jax.jit, static_argnames=('network_def'))\ndef get_rewards(network_def, params, state, rng_key):\n return network_def.apply(params, state, rng_key=rng_key)\n\n\[email protected](\n jax.jit,\n static_argnames=('network_def', 'optimizer', 'cumulative_gamma',\n 'loss_type'))\ndef train(network_def,\n online_params,\n target_params,\n optimizer,\n optimizer_state,\n states,\n auxiliary_mc_returns,\n actions,\n next_states,\n rewards,\n terminals,\n cumulative_gamma,\n auxloss_weight=0.0):\n \"\"\"Run the training step.\"\"\"\n def loss_fn(params, target, auxiliary_target):\n def q_online(state):\n return network_def.apply(params, state)\n\n model_output = jax.vmap(q_online)(states)\n q_values = jnp.squeeze(model_output.q_values)\n replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)\n td_loss = jnp.mean(jax.vmap(losses.mse_loss)(target, replay_chosen_q))\n\n # Auxiliary task loss.\n auxiliary_predictions = jnp.squeeze(model_output.aux_prediction)\n aux_loss = jnp.mean(jax.vmap(losses.mse_loss)(\n auxiliary_predictions, auxiliary_target))\n loss = ((1. - auxloss_weight) * td_loss +\n auxloss_weight * aux_loss)\n return loss, (td_loss, aux_loss)\n\n def q_target(state):\n return network_def.apply(target_params, state)\n\n target = dqn_agent.target_q(q_target, next_states, rewards, terminals,\n cumulative_gamma)\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (loss, component_losses), grad = grad_fn(online_params, target,\n auxiliary_mc_returns)\n td_loss, aux_loss = component_losses\n updates, optimizer_state = optimizer.update(grad, optimizer_state,\n params=online_params)\n online_params = optax.apply_updates(online_params, updates)\n return optimizer_state, online_params, loss, td_loss, aux_loss\n\n\[email protected]\nclass DiscountedJaxDQNAgentWithAuxiliaryMC(dqn_agent.JaxDQNAgent):\n \"\"\"An implementation of the DQN agent with replay buffer logging to disk.\"\"\"\n\n def __init__(self,\n num_actions,\n network=DQNNetworkWithAuxiliaryPredictions,\n num_rewards=2,\n auxloss_weight=0.0,\n summary_writer=None,\n preprocess_fn=None,\n seed=None):\n \"\"\"Initializes the agent and constructs the components of its graph.\n\n Args:\n num_actions: int, number of actions the agent can take at any state.\n network: Jax network to use for training.\n num_rewards: int, Number of random rewards to generate at each step.\n auxloss_weight: float: weight for aux loss.\n summary_writer: Tensorflow summary writer for logging summaries.\n preprocess_fn: Preprocessing function.\n seed: int, Agent seed.\n \"\"\"\n network = functools.partial(network, num_predictions=num_rewards)\n self.num_rewards = num_rewards\n self._auxloss_weight = auxloss_weight\n super().__init__(\n num_actions, network=network, summary_writer=summary_writer, seed=seed,\n preprocess_fn=preprocess_fn)\n # Create network for random reward generation.\n\n def _build_replay_buffer(self):\n \"\"\"Creates a monte carlo replay buffer used by the agent.\"\"\"\n\n return monte_carlo_rb.OutOfGraphReplayBufferdiscountedWithMC(\n observation_shape=self.observation_shape,\n stack_size=self.stack_size,\n update_horizon=self.update_horizon,\n gamma=self.gamma,\n observation_dtype=self.observation_dtype,\n list_of_discounts=onp.linspace(0.1, 0.999, self.num_rewards))\n # Pass a compy of `extra_storage_types` to avoid updating it when\n # updating `extra_monte_carlo_storage_types`.\n # extra_monte_carlo_storage_types=extra_storage_types[:],\n # reverse_fill=True)\n\n def _train_step(self):\n \"\"\"Runs a single training step.\"\"\"\n # Run a train op at the rate of self.update_period if enough training steps\n # have been run. This matches the Nature DQN behaviour.\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n states = self.preprocess_fn(self.replay_elements['state'])\n next_states = self.preprocess_fn(self.replay_elements['next_state'])\n self.optimizer_state, self.online_params, loss, td_loss, auxloss = train(\n self.network_def,\n self.online_params,\n self.target_network_params,\n self.optimizer,\n self.optimizer_state,\n states,\n # List of monte carlo returns for all gamma.\n self.replay_elements['monte_carlo_gamma'],\n self.replay_elements['action'],\n next_states,\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n self.cumulative_gamma,\n self._auxloss_weight)\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n with self.summary_writer.as_default():\n tf.summary.scalar('Losses/Aggregate', loss, step=self.training_steps)\n tf.summary.scalar(\n 'Losses/Auxiliary',\n auxloss,\n step=self.training_steps)\n tf.summary.scalar('Losses/TD', td_loss, step=self.training_steps)\n self.summary_writer.flush()\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility for loading the goal-conditioned environments.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport ant_env\nimport fetch_envs\nimport gym\nimport metaworld\nimport numpy as np\nimport point_env\n\nos.environ['SDL_VIDEODRIVER'] = 'dummy'\n\n\ndef euler2quat(euler):\n \"\"\"Convert Euler angles to quaternions.\"\"\"\n euler = np.asarray(euler, dtype=np.float64)\n assert euler.shape[-1] == 3, 'Invalid shape euler {}'.format(euler)\n\n ai, aj, ak = euler[Ellipsis, 2] / 2, -euler[Ellipsis, 1] / 2, euler[Ellipsis, 0] / 2\n si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)\n ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)\n cc, cs = ci * ck, ci * sk\n sc, ss = si * ck, si * sk\n\n quat = np.empty(euler.shape[:-1] + (4,), dtype=np.float64)\n quat[Ellipsis, 0] = cj * cc + sj * ss\n quat[Ellipsis, 3] = cj * sc - sj * cs\n quat[Ellipsis, 2] = -(cj * ss + sj * cc)\n quat[Ellipsis, 1] = cj * cs - sj * sc\n return quat\n\n\ndef load(env_name):\n \"\"\"Loads the train and eval environments, as well as the obs_dim.\"\"\"\n # pylint: disable=invalid-name\n kwargs = {}\n if env_name == 'sawyer_push':\n CLASS = SawyerPush\n max_episode_steps = 150\n elif env_name == 'sawyer_drawer':\n CLASS = SawyerDrawer\n max_episode_steps = 150\n elif env_name == 'sawyer_drawer_image':\n CLASS = SawyerDrawerImage\n max_episode_steps = 50\n kwargs['task'] = 'openclose'\n elif env_name == 'sawyer_window_image':\n CLASS = SawyerWindowImage\n kwargs['task'] = 'openclose'\n max_episode_steps = 50\n elif env_name == 'sawyer_push_image':\n CLASS = SawyerPushImage\n max_episode_steps = 150\n kwargs['start_at_obj'] = True\n elif env_name == 'sawyer_bin':\n CLASS = SawyerBin\n max_episode_steps = 150\n elif env_name == 'sawyer_bin_image':\n CLASS = SawyerBinImage\n max_episode_steps = 150\n elif env_name == 'sawyer_window':\n CLASS = SawyerWindow\n max_episode_steps = 150\n elif env_name == 'fetch_reach':\n CLASS = fetch_envs.FetchReachEnv\n max_episode_steps = 50\n elif env_name == 'fetch_push':\n CLASS = fetch_envs.FetchPushEnv\n max_episode_steps = 50\n elif env_name == 'fetch_reach_image':\n CLASS = fetch_envs.FetchReachImage\n max_episode_steps = 50\n elif env_name == 'fetch_push_image':\n CLASS = fetch_envs.FetchPushImage\n max_episode_steps = 50\n kwargs['rand_y'] = True\n elif env_name.startswith('ant_'):\n _, map_name = env_name.split('_')\n assert map_name in ['umaze', 'medium', 'large']\n CLASS = ant_env.AntMaze\n kwargs['map_name'] = map_name\n kwargs['non_zero_reset'] = True\n if map_name == 'umaze':\n max_episode_steps = 700\n else:\n max_episode_steps = 1000\n elif env_name.startswith('point_image'):\n CLASS = point_env.PointImage\n kwargs['walls'] = env_name.split('_')[-1]\n if '11x11' in env_name:\n max_episode_steps = 100\n else:\n max_episode_steps = 50\n elif env_name.startswith('point_'):\n CLASS = point_env.PointEnv\n kwargs['walls'] = env_name.split('_')[-1]\n if '11x11' in env_name:\n max_episode_steps = 100\n else:\n max_episode_steps = 50\n else:\n raise NotImplementedError('Unsupported environment: %s' % env_name)\n\n # Disable type checking in line below because different environments have\n # different kwargs, which pytype doesn't reason about.\n gym_env = CLASS(**kwargs) # pytype: disable=wrong-keyword-args\n obs_dim = gym_env.observation_space.shape[0] // 2\n return gym_env, obs_dim, max_episode_steps\n\n\nclass SawyerPush(metaworld.envs.mujoco.env_dict.ALL_V2_ENVIRONMENTS['push-v2']):\n \"\"\"Wrapper for the SawyerPush environment.\"\"\"\n\n def __init__(self,\n goal_min_x=-0.1,\n goal_min_y=0.5,\n goal_max_x=0.1,\n goal_max_y=0.9):\n super(SawyerPush, self).__init__()\n self._random_reset_space.low[3] = goal_min_x\n self._random_reset_space.low[4] = goal_min_y\n self._random_reset_space.high[3] = goal_max_x\n self._random_reset_space.high[4] = goal_max_y\n self._partially_observable = False\n self._freeze_rand_vec = False\n self._set_task_called = True\n self.reset()\n self._freeze_rand_vec = False # Set False to randomize the goal position.\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full(14, -np.inf),\n high=np.full(14, np.inf),\n dtype=np.float32)\n\n def _get_obs(self):\n finger_right, finger_left = (self._get_site_pos('rightEndEffector'),\n self._get_site_pos('leftEndEffector'))\n tcp_center = (finger_right + finger_left) / 2.0\n gripper_distance = np.linalg.norm(finger_right - finger_left)\n gripper_distance = np.clip(gripper_distance / 0.1, 0., 1.)\n obj = self._get_pos_objects()\n # Note: we should ignore the target gripper distance. The arm goal is set\n # to be the same as the puck goal.\n state = np.concatenate([tcp_center, obj, [gripper_distance]])\n goal = np.concatenate([self._target_pos, self._target_pos, [0.5]])\n return np.concatenate([state, goal]).astype(np.float32)\n\n def step(self, action):\n obs = super(SawyerPush, self).step(action)\n dist = np.linalg.norm(self._target_pos - self._get_pos_objects())\n r = float(dist < 0.05) # Taken from the metaworld code.\n return obs, r, False, {}\n\n\nclass SawyerDrawer(\n metaworld.envs.mujoco.env_dict.ALL_V2_ENVIRONMENTS['drawer-close-v2']):\n \"\"\"Wrapper for the SawyerDrawer environment.\"\"\"\n\n def __init__(self):\n super(SawyerDrawer, self).__init__()\n self._random_reset_space.low[0] = 0\n self._random_reset_space.high[0] = 0\n self._partially_observable = False\n self._freeze_rand_vec = False\n self._set_task_called = True\n self._target_pos = np.zeros(0) # We will overwrite this later.\n self.reset()\n self._freeze_rand_vec = False # Set False to randomize the goal position.\n\n def _get_pos_objects(self):\n return self.get_body_com('drawer_link') + np.array([.0, -.16, 0.0])\n\n def reset_model(self):\n super(SawyerDrawer, self).reset_model()\n self._set_obj_xyz(np.random.uniform(-0.15, 0.0))\n self._target_pos = self._get_pos_objects().copy()\n\n self._set_obj_xyz(np.random.uniform(-0.15, 0.0))\n return self._get_obs()\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full(8, -np.inf),\n high=np.full(8, np.inf),\n dtype=np.float32)\n\n def _get_obs(self):\n finger_right, finger_left = (self._get_site_pos('rightEndEffector'),\n self._get_site_pos('leftEndEffector'))\n tcp_center = (finger_right + finger_left) / 2.0\n obj = self._get_pos_objects()\n # Arm position is same as drawer position. We only provide the drawer\n # Y coordinate.\n return np.concatenate([tcp_center, [obj[1]],\n self._target_pos, [self._target_pos[1]]])\n\n def step(self, action):\n obs = super(SawyerDrawer, self).step(action)\n return obs, 0.0, False, {}\n\n\nclass SawyerWindow(\n metaworld.envs.mujoco.env_dict.ALL_V2_ENVIRONMENTS['window-open-v2']):\n \"\"\"Wrapper for the SawyerWindow environment.\"\"\"\n\n def __init__(self):\n super(SawyerWindow, self).__init__()\n self._random_reset_space.low[:2] = np.array([0.0, 0.8])\n self._random_reset_space.high[:2] = np.array([0.0, 0.8])\n self._partially_observable = False\n self._freeze_rand_vec = False\n self._set_task_called = True\n self._target_pos = np.zeros(3) # We will overwrite this later.\n self.reset()\n self._freeze_rand_vec = False # Set False to randomize the goal position.\n\n def reset_model(self):\n super(SawyerWindow, self).reset_model()\n self.data.set_joint_qpos('window_slide', np.random.uniform(0.0, 0.2))\n self._target_pos = self._get_pos_objects().copy()\n self.data.set_joint_qpos('window_slide', np.random.uniform(0.0, 0.2))\n return self._get_obs()\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full(8, -np.inf),\n high=np.full(8, np.inf),\n dtype=np.float32)\n\n def _get_obs(self):\n finger_right, finger_left = (self._get_site_pos('rightEndEffector'),\n self._get_site_pos('leftEndEffector'))\n tcp_center = (finger_right + finger_left) / 2.0\n obj = self._get_pos_objects()\n # Arm position is same as window position. Only use X position of window.\n return np.concatenate([tcp_center, [obj[0]],\n self._target_pos,\n [self._target_pos[0]]]).astype(np.float32)\n\n def step(self, action):\n obs = super(SawyerWindow, self).step(action)\n return obs, 0.0, False, {}\n\n\nclass SawyerBin(\n metaworld.envs.mujoco.env_dict.ALL_V2_ENVIRONMENTS['bin-picking-v2']):\n \"\"\"Wrapper for the SawyerBin environment.\"\"\"\n\n def __init__(self):\n self._goal = np.zeros(3)\n super(SawyerBin, self).__init__()\n self._partially_observable = False\n self._freeze_rand_vec = False\n self._set_task_called = True\n self.reset()\n self._freeze_rand_vec = False # Set False to randomize the goal position.\n\n def reset(self):\n super(SawyerBin, self).reset()\n body_id = self.model.body_name2id('bin_goal')\n pos1 = self.sim.data.body_xpos[body_id].copy()\n pos1 += np.random.uniform(-0.05, 0.05, 3)\n pos2 = self._get_pos_objects().copy()\n t = np.random.random()\n self._goal = t * pos1 + (1 - t) * pos2\n self._goal[2] = np.random.uniform(0.03, 0.12)\n return self._get_obs()\n\n def step(self, action):\n super(SawyerBin, self).step(action)\n dist = np.linalg.norm(self._goal - self._get_pos_objects())\n r = float(dist < 0.05) # Taken from metaworld\n done = False\n info = {}\n return self._get_obs(), r, done, info\n\n def _get_obs(self):\n pos_hand = self.get_endeff_pos()\n finger_right, finger_left = (\n self._get_site_pos('rightEndEffector'),\n self._get_site_pos('leftEndEffector')\n )\n gripper_distance_apart = np.linalg.norm(finger_right - finger_left)\n gripper_distance_apart = np.clip(gripper_distance_apart / 0.1, 0., 1.)\n obs = np.concatenate((pos_hand, [gripper_distance_apart],\n self._get_pos_objects()))\n goal = np.concatenate([self._goal + np.array([0.0, 0.0, 0.03]),\n [0.4], self._goal])\n return np.concatenate([obs, goal]).astype(np.float32)\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full(2 * 7, -np.inf),\n high=np.full(2 * 7, np.inf),\n dtype=np.float32)\n\n\nclass SawyerDrawerImage(SawyerDrawer):\n \"\"\"Wrapper for the SawyerDrawer environment with image observations.\"\"\"\n\n def __init__(self, camera='corner2', task='openclose'):\n self._task = task\n self._camera_name = camera\n self._dist = []\n self._dist_vec = []\n super(SawyerDrawerImage, self).__init__()\n\n def reset_metrics(self):\n self._dist_vec = []\n self._dist = []\n\n def step(self, action):\n _, _, done, info = super(SawyerDrawerImage, self).step(action)\n y = self._get_pos_objects()[1]\n # L1 distance between current and target drawer location.\n dist = abs(y - self._goal_y)\n self._dist.append(dist)\n r = float(dist < 0.04)\n img = self._get_img()\n return np.concatenate([img, self._goal_img], axis=-1), r, done, info\n\n def _move_hand_to_obj(self):\n for _ in range(20):\n self.data.set_mocap_pos(\n 'mocap', self._get_pos_objects() + np.array([0.0, 0.0, 0.03]))\n self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))\n self.do_simulation([-1, 1], self.frame_skip)\n\n def reset(self):\n if self._dist:\n self._dist_vec.append(self._dist)\n self._dist = []\n\n # reset the cameras\n camera_name = 'behindGripper'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 30.0\n\n camera_name = 'topview'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 20.0\n self.model.cam_pos[index][1] = 0.7\n\n camera_name = 'corner2'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 8.0\n self.model.cam_pos[index][0] = 1.5\n self.model.cam_pos[index][1] = -0.2\n self.model.cam_pos[index][2] = 1.1\n\n camera_name = 'corner3'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 30.0\n self.model.cam_pos[index][0] = 0.3\n self.model.cam_pos[index][1] = 0.45\n self.model.cam_pos[index][2] = 0.7\n\n # Get the goal image.\n super(SawyerDrawerImage, self).reset()\n self._move_hand_to_obj()\n self._goal_y = self._get_pos_objects()[1]\n self._goal_img = self._get_img()\n\n # Reset the environment again.\n super(SawyerDrawerImage, self).reset()\n if self._task == 'close':\n self._set_obj_xyz(-0.15)\n elif self._task == 'open':\n self._set_obj_xyz(0.0)\n else:\n assert self._task == 'openclose'\n self._set_obj_xyz(np.random.choice([-0.15, 0.0]))\n self._move_hand_to_obj()\n img = self._get_img()\n\n # Add the initial distance.\n y = self._get_pos_objects()[1]\n # L1 distance between current and target drawer location.\n dist = abs(y - self._goal_y)\n self._dist.append(dist)\n return np.concatenate([img, self._goal_img], axis=-1)\n\n def _get_img(self):\n assert self._camera_name in ['behindGripper', 'topview',\n 'corner2', 'corner3']\n # Hide the goal marker position\n self._set_pos_site('goal', np.inf * self._target_pos)\n # IMPORTANT: Pull the context to the current thread.\n for ctx in self.sim.render_contexts:\n ctx.opengl_context.make_context_current()\n\n img = self.render(offscreen=True,\n resolution=(64, 64),\n camera_name=self._camera_name)\n if self._camera_name in ['behindGripper']:\n img = img[::-1]\n return img.flatten()\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full((64*64*6), 0),\n high=np.full((64*64*6), 255),\n dtype=np.uint8)\n\n\nclass SawyerPushImage(\n metaworld.envs.mujoco.env_dict.ALL_V2_ENVIRONMENTS['push-v2']):\n \"\"\"Wrapper for the SawyerPush environment with image observations.\"\"\"\n\n def __init__(self, camera='corner2', rand_y=True, start_at_obj=False):\n self._start_at_obj = start_at_obj\n self._rand_y = rand_y\n self._camera_name = camera\n self._dist = []\n self._dist_vec = []\n super(SawyerPushImage, self).__init__()\n self._partially_observable = False\n self._freeze_rand_vec = False\n self._set_task_called = True\n self.reset()\n self._freeze_rand_vec = False # Set False to randomize the goal position.\n\n def reset(self):\n if self._dist:\n self._dist_vec.append(self._dist)\n self._dist = []\n\n camera_name = 'corner'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 20.0\n self.model.cam_pos[index][2] = 0.5\n self.model.cam_pos[index][0] = -1.0\n\n camera_name = 'corner2'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 45\n self.model.cam_pos[index][0] = 0.7\n self.model.cam_pos[index][1] = 0.65\n self.model.cam_pos[index][2] = 0.1\n self.model.cam_quat[index] = euler2quat(\n np.array([-np.pi / 2, np.pi / 2, 0.0]))\n\n # Get the goal image.\n s = super(SawyerPushImage, self).reset()\n self._goal = s[:7][3:6]\n self._goal[1] += np.random.uniform(0.0, 0.25)\n if self._rand_y:\n self._goal[0] += np.random.uniform(-0.1, 0.1)\n self._set_obj_xyz(self._goal)\n for _ in range(200):\n self.data.set_mocap_pos('mocap', self._get_pos_objects())\n self._set_obj_xyz(self._goal)\n self.do_simulation([-1, 1], self.frame_skip)\n self._goal_img = self._get_img()\n\n # Reset the environment again.\n s = super(SawyerPushImage, self).reset()\n obj = s[:7][3:6] + np.array([0.0, -0.2, 0.0])\n self._set_obj_xyz(obj)\n self.do_simulation([-1, 1], self.frame_skip)\n if self._start_at_obj:\n for _ in range(20):\n self.data.set_mocap_pos('mocap', self._get_pos_objects())\n self.do_simulation([-1, 1], self.frame_skip)\n img = self._get_img()\n\n # Add the first distances\n obj = self.get_body_com('obj')\n dist = np.linalg.norm(obj - self._goal)\n self._dist.append(dist)\n return np.concatenate([img, self._goal_img], axis=-1)\n\n def step(self, action):\n super(SawyerPushImage, self).step(action)\n obj = self.get_body_com('obj')\n dist = np.linalg.norm(obj - self._goal)\n r = float(dist < 0.05) # Taken from the metaworld code.\n self._dist.append(dist)\n img = self._get_img()\n done = False\n info = {}\n return np.concatenate([img, self._goal_img], axis=-1), r, done, info\n\n def _get_img(self):\n if self._camera_name.startswith('default-'):\n camera_name = self._camera_name.split('default-')[1]\n else:\n camera_name = self._camera_name\n # Hide the goal marker position.\n self._set_pos_site('goal', np.inf * self._target_pos)\n # IMPORTANT: Pull the context to the current thread.\n for ctx in self.sim.render_contexts:\n ctx.opengl_context.make_context_current()\n img = self.render(offscreen=True, resolution=(64, 64),\n camera_name=camera_name)\n if camera_name in ['behindGripper']:\n img = img[::-1]\n return img.flatten()\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full((64*64*6), 0),\n high=np.full((64*64*6), 255),\n dtype=np.uint8)\n\n\nclass SawyerWindowImage(SawyerWindow):\n \"\"\"Wrapper for the SawyerWindow environment with image observations.\"\"\"\n\n def __init__(self, task=None, start_at_obj=True):\n self._start_at_obj = start_at_obj\n self._task = task\n self._camera_name = 'corner2'\n self._dist = []\n self._dist_vec = []\n super(SawyerWindowImage, self).__init__()\n\n def reset_metrics(self):\n self._dist_vec = []\n self._dist = []\n\n def step(self, action):\n _, _, done, info = super(SawyerWindowImage, self).step(action)\n x = self.data.get_joint_qpos('window_slide')\n # L1 distance between current and target drawer location.\n dist = abs(x - self._goal_x)\n self._dist.append(dist)\n r = (dist < 0.05)\n img = self._get_img()\n return np.concatenate([img, self._goal_img], axis=-1), r, done, info\n\n def reset(self):\n if self._dist:\n self._dist_vec.append(self._dist)\n self._dist = []\n\n # Reset the cameras.\n camera_name = 'corner2'\n index = self.model.camera_name2id(camera_name)\n if self._start_at_obj:\n self.model.cam_fovy[index] = 10.0\n self.model.cam_pos[index][0] = 1.5\n self.model.cam_pos[index][1] = -0.1\n self.model.cam_pos[index][2] = 1.1\n else:\n self.model.cam_fovy[index] = 17.0\n self.model.cam_pos[index][1] = -0.1\n self.model.cam_pos[index][2] = 1.1\n\n # Get the goal image.\n super(SawyerWindowImage, self).reset()\n goal_slide_pos = np.random.uniform(0, 0.2)\n for _ in range(20):\n self.data.set_mocap_pos('mocap', self._get_pos_objects())\n self.data.set_joint_qpos('window_slide', goal_slide_pos)\n self.do_simulation([-1, 1], self.frame_skip)\n self._goal_x = goal_slide_pos\n self._goal_img = self._get_img()\n\n # Reset the environment again.\n super(SawyerWindowImage, self).reset()\n if self._task == 'open':\n init_slide_pos = 0.0\n elif self._task == 'close':\n init_slide_pos = 0.2\n else:\n assert self._task == 'openclose'\n init_slide_pos = np.random.choice([0.0, 0.2])\n\n if self._start_at_obj:\n for _ in range(50):\n self.data.set_mocap_pos('mocap', self._get_pos_objects())\n self.data.set_joint_qpos('window_slide', init_slide_pos)\n self.do_simulation([-1, 1], self.frame_skip)\n else:\n self.data.set_joint_qpos('window_slide', init_slide_pos)\n self.do_simulation([-1, 1], self.frame_skip)\n img = self._get_img()\n\n # Add the initial distance.\n x = self.data.get_joint_qpos('window_slide')\n # L1 distance between current and target drawer location.\n dist = abs(x - self._goal_x)\n self._dist.append(dist)\n return np.concatenate([img, self._goal_img], axis=-1)\n\n def _get_img(self):\n assert self._camera_name in ['corner', 'topview', 'corner3',\n 'behindGripper', 'corner2']\n # Hide the goal marker position.\n self._set_pos_site('goal', np.inf * self._target_pos)\n # IMPORTANT: Pull the context to the current thread.\n for ctx in self.sim.render_contexts:\n ctx.opengl_context.make_context_current()\n img = self.render(offscreen=True,\n resolution=(64, 64),\n camera_name=self._camera_name)\n if self._camera_name in ['corner', 'topview', 'behindGripper']:\n img = img[::-1]\n return img.flatten()\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full((64*64*6), 0),\n high=np.full((64*64*6), 255),\n dtype=np.uint8)\n\n\nclass SawyerBinImage(\n metaworld.envs.mujoco.env_dict.ALL_V2_ENVIRONMENTS['bin-picking-v2']):\n \"\"\"Wrapper for the SawyerBin environment with image observations.\"\"\"\n\n def __init__(self, camera='corner2', start_at_obj=True, alias=False):\n self._alias = alias\n self._start_at_obj = start_at_obj\n self._dist = []\n self._dist_vec = []\n self._camera_name = camera\n super(SawyerBinImage, self).__init__()\n self._partially_observable = False\n self._freeze_rand_vec = False\n self._set_task_called = True\n self.reset()\n self._freeze_rand_vec = False # Set False to randomize the goal position.\n\n def reset_metrics(self):\n self._dist_vec = []\n self._dist = []\n\n def _hand_obj_dist(self):\n body_id = self.model.body_name2id('hand')\n hand_pos = self.sim.data.body_xpos[body_id]\n obj_pos = self._get_pos_objects()\n return np.linalg.norm(hand_pos - obj_pos)\n\n def _obj_goal_dist(self):\n obj_pos = self._get_pos_objects()\n return np.linalg.norm(self._goal[:2] - obj_pos[:2])\n\n def step(self, action):\n super(SawyerBinImage, self).step(action)\n dist = self._obj_goal_dist()\n self._dist.append(dist)\n r = float(dist < 0.05) # Success if within 5cm of the goal.\n img = self._get_img()\n done = False\n info = {}\n return np.concatenate([img, self._goal_img], axis=-1), r, done, info\n\n def reset(self):\n if self._dist:\n self._dist_vec.append(self._dist)\n self._dist = []\n\n # reset the cameras\n camera_name = 'corner2'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_fovy[index] = 14.0\n self.model.cam_pos[index][0] = 1.3\n self.model.cam_pos[index][1] = -0.05\n self.model.cam_pos[index][2] = 0.9\n\n camera_name = 'topview'\n index = self.model.camera_name2id(camera_name)\n self.model.cam_pos[index][1] = 0.7\n self.model.cam_pos[index][2] = 0.9\n\n # Get the goal image.\n super(SawyerBinImage, self).reset()\n body_id = self.model.body_name2id('bin_goal')\n obj_pos = self.sim.data.body_xpos[body_id].copy()\n obj_pos[:2] += np.random.uniform(-0.05, 0.05, 2)\n obj_pos[2] = 0.05\n self._set_obj_xyz(obj_pos)\n hand_offset = np.random.uniform([0.0, 0.0, 0.0],\n [0.0, 0.0, 0.05])\n for t in range(40):\n self.data.set_mocap_pos('mocap', obj_pos + hand_offset)\n self.do_simulation((t > 20) * np.array([1.0, -1.0]), self.frame_skip)\n self._goal = self._get_pos_objects().copy()\n self._goal_img = self._get_img()\n\n # Reset the environment again.\n super(SawyerBinImage, self).reset()\n obj_pos = self._get_pos_objects()\n if self._start_at_obj:\n for t in range(40):\n self.data.set_mocap_pos('mocap', obj_pos + np.array([0.0, 0.0, 0.05]))\n self.do_simulation((t > 40) * np.array([1.0, -1.0]), self.frame_skip)\n img = self._get_img()\n\n # Add the initial distance.\n self._dist.append(self._obj_goal_dist())\n return np.concatenate([img, self._goal_img], axis=-1)\n\n def _get_img(self):\n if self._camera_name.startswith('default-'):\n camera_name = self._camera_name.split('default-')[1]\n else:\n camera_name = self._camera_name\n assert camera_name in ['corner', 'topview', 'corner3',\n 'behindGripper', 'corner2']\n # IMPORTANT: Pull the context to the current thread.\n for ctx in self.sim.render_contexts:\n ctx.opengl_context.make_context_current()\n resolution = (64, 64)\n img = self.render(offscreen=True, resolution=resolution,\n camera_name=camera_name)\n if camera_name in ['corner', 'topview', 'behindGripper']:\n img = img[::-1]\n return img.flatten()\n\n @property\n def observation_space(self):\n return gym.spaces.Box(\n low=np.full((64*64*6), 0),\n high=np.full((64*64*6), 255),\n dtype=np.uint8)\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Converts miniimagenet dataset from pickled files to NumPy.\"\"\"\n\nimport dataclasses\nimport os\nimport pickle\n\nfrom typing import Any, Dict, Sequence\n\nfrom absl import app\nfrom absl import flags\n\nimport numpy as np\n\nINPUT_PATH = flags.DEFINE_string(\n 'input_path', '', 'Path with miniImageNet pickle files.')\nOUTPUT_PATH = flags.DEFINE_string(\n 'output_path', '', 'Path with miniImageNet pickle files.')\n\n\[email protected]\nclass Sources:\n data: Dict[Any, Any] = dataclasses.field(default_factory=dict)\n\n\ndef pickle_path(root, split):\n path = os.path.join(root, f'mini-imagenet-cache-{split}.pkl')\n if not os.path.exists(path):\n raise RuntimeError(f'Pickle file {path} is not found!')\n return path\n\n\ndef get_data(root):\n data = {split: pickle.loads(open(pickle_path(root, split), 'rb').read())\n for split in ['train', 'test', 'val']}\n return Sources(data=data)\n\n\ndef get_combined(data):\n outputs = []\n for split in ['train', 'val', 'test']:\n classes = data.data[split]['class_dict']\n images = data.data[split]['image_data']\n for values in classes.values():\n from_class = np.min(values)\n to_class = np.max(values) + 1\n outputs.append(images[from_class:to_class])\n return np.stack(outputs, axis=0)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n data = get_data(INPUT_PATH.value)\n combined = get_combined(data)\n assert combined.shape == (100, 600, 84, 84, 3)\n try:\n os.makedirs(OUTPUT_PATH.value)\n finally:\n np.save(os.path.join(OUTPUT_PATH.value, 'miniimagenet'), combined)\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for `layerwise.py`.\"\"\"\n\nimport tensorflow.compat.v1 as tf\n\nfrom hypertransformer.tf.core import common_ht\nfrom hypertransformer.tf.core import layerwise\nfrom hypertransformer.tf.core import layerwise_defs # pylint:disable=unused-import\n\n\ndef make_layerwise_model_config():\n \"\"\"Makes 'layerwise' model config.\"\"\"\n return common_ht.LayerwiseModelConfig()\n\n\nclass LayerwiseTest(tf.test.TestCase):\n\n def test_number_of_trained_cnn_layers_param_should_give_trained_weights(self):\n \"\"\"Tests the layerswise model with both generated and trained weights.\"\"\"\n tf.reset_default_graph()\n model_config = make_layerwise_model_config()\n model_config.number_of_trained_cnn_layers = 1\n model = layerwise.build_model(\n model_config.cnn_model_name, model_config=model_config)\n images = tf.random.normal((100, 28, 28, 1))\n labels = tf.random.uniform((100,),\n minval=0, maxval=10,\n dtype=tf.dtypes.int32)\n weights = model.train(images, labels)\n self.assertIsNone(weights.weight_blocks[0])\n for weight_block in weights.weight_blocks[1:]:\n self.assertIsNotNone(weight_block)\n model.evaluate(images, weight_blocks=weights)\n self.assertIsInstance(model.layers[0].conv.weights[0], tf.Variable,\n 'First layer is trained directly and should be a'\n 'Variable.')\n self.assertIsInstance(model.layers[1].conv.weights[0], tf.Tensor,\n 'All other layers except for the first are computed'\n 'from a Transformer and should be Tensors.')\n\n def test_negative_number_of_trained_cnn_layers_param_trains_last_layers(self):\n \"\"\"Tests the layerswise model with both generated and trained weights.\"\"\"\n tf.reset_default_graph()\n model_config = make_layerwise_model_config()\n model_config.number_of_trained_cnn_layers = -1\n model = layerwise.build_model(\n model_config.cnn_model_name, model_config=model_config)\n images = tf.random.normal((100, 28, 28, 1))\n labels = tf.random.uniform((100,),\n minval=0, maxval=10,\n dtype=tf.dtypes.int32)\n weights = model.train(images, labels)\n self.assertIsNone(weights.weight_blocks[-2])\n for weight_block in weights.weight_blocks[:-2]:\n self.assertIsNotNone(weight_block)\n model.evaluate(images, weight_blocks=weights)\n self.assertIsInstance(model.layers[-2].conv.weights[0], tf.Variable,\n 'Last layer before the head is trained directly and '\n 'should be a Variable.')\n self.assertIsInstance(model.layers[0].conv.weights[0], tf.Tensor,\n 'All other layers except for the last are computed'\n 'from a Transformer and should be Tensors.')\n\n def test_layer_with_activation_after_bn_different_activation_before_bn(self):\n \"\"\"Tests the option to use activation before or after batchnorm.\"\"\"\n tf.reset_default_graph()\n model_config = make_layerwise_model_config()\n act_fn = tf.ones_like\n layer_act_after = layerwise.ConvLayer(\n name='test_layer_activation_after',\n model_config=model_config,\n act_fn=act_fn,\n act_after_bn=True)\n layer_act_before = layerwise.ConvLayer(\n name='test_layer_activation_before',\n model_config=model_config,\n act_fn=act_fn,\n act_after_bn=False)\n images = tf.random.normal((100, 28, 28, 3))\n out_after = layer_act_after(images)\n out_before = layer_act_before(images)\n\n sess = tf.InteractiveSession()\n sess.run(tf.initializers.global_variables())\n self.assertAllEqual(out_after, tf.ones_like(out_after),\n 'When evaluating layerwise.ConvLayer activation after'\n 'BatchNorm was not computed properly.')\n self.assertAllEqual(out_before, tf.zeros_like(out_before),\n 'When evaluating layerwise.ConvLayer activation before'\n 'BatchNorm was not computed properly.')\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.makedirs",
"matplotlib.pylab.imshow",
"matplotlib.pylab.clf",
"matplotlib.pylab.savefig"
],
[
"tensorflow.summary.scalar",
"numpy.linspace"
],
[
"numpy.random.random",
"numpy.clip",
"numpy.asarray",
"numpy.random.choice",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.full",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.max",
"numpy.stack",
"numpy.min"
],
[
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.initializers.global_variables",
"tensorflow.compat.v1.random.uniform",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.InteractiveSession",
"tensorflow.compat.v1.random.normal",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.reset_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VladimirYugay/diw | [
"d1a760f1911e9d09fbe038abffc3aa76d384f86a"
] | [
"scripts/run_mots_depth_inference.py"
] | [
"\"\"\" Script for running depth inference assuming MOTS dataset structure \"\"\"\nimport logging\nimport os\nimport sys\nfrom pathlib import Path, PurePath\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom IPython.core import ultratb\nfrom PIL import Image\n\nimport diw\nfrom diw.model import Model, get_vars_to_save_and_restore\n\nsys.excepthook = ultratb.FormattedTB(mode=\"Verbose\", color_scheme=\"Linux\", call_pdb=1)\n\n_logger = logging.getLogger(__name__)\n\n\ndef load_image(img_file):\n \"\"\"Load image from disk. Output value range: [0,255].\"\"\"\n return Image.open(img_file).convert(\"RGB\")\n\n\ndef resize_img(img, img_shape):\n \"\"\" resizes an image \"\"\"\n return img.resize(img_shape, Image.LANCZOS).convert(\"RGB\")\n\n\ndef plot_image(image, image_type=\"RGB\"):\n \"\"\" plots image with matplotlib \"\"\"\n plt.figure()\n color_map = None\n if image_type != \"RGB\":\n color_map = plt.cm.get_cmap(\"plasma\").reversed()\n plt.imshow(image, cmap=color_map)\n plt.show() # display it\n return plt\n\n\[email protected]()\[email protected](\n \"--checkpoint_dir\",\n \"checkpoint_dir\",\n default=\"./data/checkpoints/test\",\n type=click.Path(exists=True),\n help=\"Path to the model checkpoint\",\n)\[email protected](\n \"--data_dir\",\n \"data_dir\",\n default=\"./data/test/mots_data\",\n type=click.Path(exists=True),\n help=\"Path to MOTS data\",\n)\[email protected](\n \"--save_img\",\n \"save_img\",\n flag_value=True,\n help=\"Flag to whether save the image of the depth (besides numpy array)\",\n)\[email protected]_option(diw.__version__)\ndef main(data_dir, checkpoint_dir, save_img):\n if save_img:\n plt.figure()\n height, width = 128, 416\n os.environ[\"TF_FORCE_GPU_ALLOW_GROWTH\"] = \"true\" # to fix CUDA bug\n inference_model = Model(\n is_training=False, batch_size=1, img_height=height, img_width=width\n )\n checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n vars_to_restore = get_vars_to_save_and_restore(checkpoint)\n saver = tf.train.Saver(vars_to_restore)\n with tf.Session() as sess:\n saver.restore(sess, checkpoint)\n sequence_paths = [p for p in Path(data_dir).glob(\"*\") if p.is_dir()]\n for seq_path in sequence_paths:\n model_name = PurePath(checkpoint_dir).parts[-1]\n (seq_path / model_name).mkdir(parents=True, exist_ok=True)\n if save_img:\n (seq_path / (model_name + \"_depth_images\")).mkdir(\n parents=True, exist_ok=True\n )\n img_paths = sorted(\n [p for p in (seq_path / \"img1\").glob(\"*\") if p.is_file()],\n key=lambda path: str(path),\n )\n for img_path in img_paths:\n img_name = img_path.parts[-1].split(\".\")[0]\n print(\"Processing sequence: {}, image: {}\".format(seq_path, img_name))\n image = load_image(str(img_path))\n image = resize_img(image, (width, height))\n image = np.array(image)\n image = image[None, ...]\n depth = inference_model.inference_depth(image, sess)\n depth = depth[0, :, :, 0]\n np.save(str(seq_path / model_name / img_name), depth)\n if save_img:\n plt.imshow(depth, plt.cm.get_cmap(\"plasma\").reversed())\n plt.savefig(\n str(seq_path / (model_name + \"_depth_images\"))\n + \"/\"\n + (img_name + \".png\")\n )\n plt.clf()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.cm.get_cmap",
"tensorflow.compat.v1.Session",
"matplotlib.pyplot.clf",
"numpy.array",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.train.Saver",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
soft-matter/pimsviewer | [
"9263ece121a58a0504c6e4d319ec6e18d1bb460a"
] | [
"pimsviewer/dimension.py"
] | [
"import os\nimport numpy as np\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import QDir, Qt, QTimer, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap\nfrom PyQt5.QtWidgets import (QHBoxLayout, QSlider, QWidget, QAction, QApplication, QFileDialog, QLabel, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QStatusBar, QVBoxLayout, QDockWidget, QPushButton, QStyle, QLineEdit, QCheckBox, QInputDialog)\n\nclass Dimension(QWidget):\n\n _playing = False\n _size = 0\n _position = 0\n _mergeable = False\n _merge = False\n _playable = False\n _fps = 5.0\n _max_playback_fps = 5.0\n\n play_event = pyqtSignal(QWidget)\n\n def __init__(self, name, size=0):\n super(Dimension, self).__init__()\n\n self.name = name\n self._size = size\n\n dirname = os.path.dirname(os.path.realpath(__file__))\n uic.loadUi(os.path.join(dirname, 'dimension.ui'), self)\n\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.playButton.clicked.connect(self.click_event)\n\n self.playTimer = QTimer()\n self.playTimer.timeout.connect(self.play_tick)\n\n self.posButton.pressed.connect(self.update_position_from_btn)\n\n self.slider.setMaximum(self.size-1)\n self.slider.valueChanged.connect(self.update_position_from_slider)\n\n self.mergeButton.clicked.connect(self.update_merge)\n\n if not self.mergeable:\n self.mergeButton.hide()\n\n self._merge = self.mergeButton.isChecked()\n\n self.fps = self._fps\n self.fpsButton.pressed.connect(self.fps_changed)\n\n self.hide()\n\n def merge_image_over_dimension(self, image):\n # problem here: could be two axes with same size\n # TODO: think of a clever fix for this\n try:\n ix = image.shape.index(self._size)\n except ValueError:\n return image\n\n if self.name != 'c':\n # I don't know what to do, sum over axis\n image = np.sum(image, axis=ix)\n\n return image\n\n def enable(self):\n if not self.playable:\n return\n\n self.setEnabled(True)\n self.playButton.setEnabled(True)\n self.posButton.setEnabled(True)\n self.slider.setEnabled(True)\n self.fpsButton.setEnabled(True)\n\n if self.mergeable:\n self.mergeButton.setEnabled(True)\n self.mergeButton.show()\n\n self.show()\n\n def disable(self):\n self.setEnabled(False)\n self.playButton.setEnabled(False)\n self.posButton.setEnabled(False)\n self.slider.setEnabled(False)\n self.fpsButton.setEnabled(False)\n self.mergeButton.setEnabled(False)\n\n def fps_changed(self):\n fps, ok = QInputDialog.getDouble(self, \"Playback framerate\", \"New playback framerate\", self.fps)\n\n if ok:\n self.fps = fps\n\n def click_event(self):\n if not self.playable:\n return\n\n if not self.playing:\n self.playing = True\n else:\n self.playing = False\n\n def play_tick(self):\n if not self.playing:\n return\n\n if self._fps > self._max_playback_fps:\n self.position += int(round(self._fps / self._max_playback_fps))\n else:\n self.position += 1\n\n @property\n def size(self):\n return self._size\n\n @size.setter\n def size(self, size):\n self._size = size\n self.position = 0\n self.playing = False\n self.slider.setMinimum(0)\n self.slider.setMaximum(self.size-1)\n\n @property\n def fps(self):\n return self._fps\n\n @fps.setter\n def fps(self, fps):\n fps = float(fps)\n\n self._fps = fps\n play_fps = fps if fps < self._max_playback_fps else self._max_playback_fps\n self.playTimer.setInterval(int(round(1000.0 / play_fps)))\n self.fpsButton.setText('%d fps' % self.fps)\n\n @property\n def playable(self):\n return self._playable\n\n @playable.setter\n def playable(self, playable):\n self._playable = bool(playable)\n\n @property\n def playing(self):\n return self._playing\n\n @playing.setter\n def playing(self, playing):\n self._playing = bool(playing)\n if self._playing:\n self.playTimer.start()\n else:\n self.playTimer.stop()\n\n @property\n def position(self):\n return self._position\n\n def update_position_from_slider(self):\n position = self.slider.value()\n if position >= 0:\n self.position = position\n\n def update_position_from_btn(self):\n position, ok = QInputDialog.getInt(self, \"'%s' position\" % self.name, \"New '%s' position (0-%d)\" % (self.name, self.size-1), self.position, 0, self.size-1)\n\n if ok:\n self.position = position\n\n @position.setter\n def position(self, position):\n old_position = self.position\n\n while position < 0:\n position += self.size\n\n if position < self.size:\n self._position = position\n else:\n self._position = position - self.size\n\n self.slider.setValue(self.position)\n self.posButton.setText('%s=%d' % (self.name, self.position))\n\n if old_position != self.position:\n self.play_event.emit(self)\n\n def update_merge(self):\n self.merge = self.mergeButton.isChecked()\n\n @property\n def merge(self):\n return self._merge\n\n @merge.setter\n def merge(self, merge):\n if not self.mergeable:\n merge = False\n\n if merge != self._merge:\n self._merge = bool(merge)\n self.mergeButton.setChecked(self._merge)\n self.play_event.emit(self)\n\n @property\n def mergeable(self):\n return self._mergeable\n\n @mergeable.setter\n def mergeable(self, mergeable):\n self._mergeable = bool(mergeable)\n if not mergeable:\n self.merge = False\n\n def __len__(self):\n return self.size\n\n def __str__(self):\n classname = self.__class__.__name__\n playing = \"playing\" if self.playing else \"not playing\"\n return \"<%s %s of length %d (%s)>\" % (classname, self.name, self.size, playing)\n\n def __repr__(self):\n return self.__str__()\n\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lone17/deform-conv | [
"126ebcc283a4325c474332fa170f57d52a59e34d"
] | [
"deform_conv/utils.py"
] | [
"from __future__ import absolute_import, division\n\nfrom tensorflow.python import debug as tf_debug\nimport keras.backend as K\n\n\ndef keras_set_tf_debug():\n sess = K.get_session()\n sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n K.set_session(sess)\n"
] | [
[
"tensorflow.python.debug.LocalCLIDebugWrapperSession"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
katsugeneration/tf2-ndg-benchmarks | [
"ba2d07ef997fac87b3991a54c0a234f7c5425b0f"
] | [
"tf2_ndg_benckmarks/metrics/embedding.py"
] | [
"\"\"\"\nCopyright:\n Copyright 2019 by Katsuya SHIMABUKURO.\nLicense:\n MIT, see LICENSE for details.\n\"\"\"\nimport pathlib\nimport gzip\nimport requests\nimport tqdm\nimport numpy as np\nfrom gensim.models import KeyedVectors\n\n\nFILE_ID = '0B7XkCwpI5KDYNlNUTTlSS21pQmM'\nSOURCE_URL = 'https://drive.google.com/uc?export=download&id={file_id}'\nSOURCE_URL_WITH_CONFIRM = 'https://drive.google.com/uc?export=download&confirm={code}&id={file_id}'\n\n\nclass EmbeddingBase(object):\n \"\"\"Embedding based score calculator base.\"\"\"\n\n def __init__(\n self,\n emb_path: str = '/tmp/vector.bin'):\n \"\"\"Embedding class initialization.\n\n Args:\n emb_path (str): Embedding binary file path. When emb_path is not found, start to download from internet.\n\n \"\"\"\n self.emb_path = emb_path\n\n _emb_path = pathlib.Path(self.emb_path)\n if _emb_path.exists():\n self._load()\n return\n\n _emb_gz_path = pathlib.Path(self.emb_path + '.gz')\n\n # Downloas Google pre-trained vector bin from Google Drive\n\n # Get confirmation code\n res = requests.get(SOURCE_URL.format(**{'file_id': FILE_ID}))\n cookies = res.cookies\n res.close()\n code = cookies[next(filter(lambda k: '_warning_' in k, cookies.keys()))]\n\n # Download file.\n res = requests.get(\n SOURCE_URL_WITH_CONFIRM.format(**{'file_id': FILE_ID, 'code': code}),\n cookies=cookies,\n stream=True)\n pbar = tqdm.tqdm(unit=\"B\", unit_scale=True, desc='Download Google news corpus pre-trained vectors.')\n chunck_size = 1024\n with _emb_gz_path.open('wb') as w:\n for chunck in res.iter_content(chunck_size):\n w.write(chunck)\n pbar.update(len(chunck))\n pbar.close()\n res.close()\n\n # Decompress gzip file.\n with _emb_gz_path.open('rb') as f:\n with _emb_path.open('wb') as w:\n w.write(gzip.decompress(f.read()))\n\n self._load()\n\n def _load(self):\n \"\"\"Load word2vec model.\"\"\"\n self.model = KeyedVectors.load_word2vec_format(self.emb_path, binary=True)\n assert 'dog' in self.model\n\n def _get_vectors_from_sentene(self, sentence):\n \"\"\"Return contains word vector list.\"\"\"\n return [self.model.get_vector(w) for w in sentence.split(' ') if w in self.model]\n\n def _calc_cosine_sim(self, vectors1, vectors2):\n \"\"\"Calculate cosine similarity.\"\"\"\n vectors1 /= np.linalg.norm(vectors1, axis=-1, keepdims=True)\n vectors2 /= np.linalg.norm(vectors2, axis=-1, keepdims=True)\n return np.dot(vectors1, vectors2.T)\n\n\nclass Average(EmbeddingBase):\n \"\"\"Embedding based average score calculator.\"\"\"\n\n def sentence_score(\n self,\n reference: str,\n hypothesis: str) -> float:\n \"\"\"Embedding Average metrics.\n\n Args:\n reference (str): reference sentence.\n hypothesis: (str): hypothesis sentence.\n\n Return:\n float: Embedding Average score\n\n \"\"\"\n emb_ref = np.sum(self._get_vectors_from_sentene(reference), axis=0)\n emb_hyp = np.sum(self._get_vectors_from_sentene(hypothesis), axis=0)\n return self._calc_cosine_sim(emb_ref, emb_hyp)\n\n\nclass VectorExtrema(EmbeddingBase):\n \"\"\"Embedding based vector extrema score calculator.\"\"\"\n\n def sentence_score(\n self,\n reference: str,\n hypothesis: str) -> float:\n \"\"\"Embedding Vector Extrema metrics.\n\n Args:\n reference (str): reference sentence.\n hypothesis: (str): hypothesis sentence.\n\n Return:\n float: Embedding Vector Extrema score\n\n \"\"\"\n def extema(vectors):\n vec_max = np.max(vectors, axis=0)\n vec_min = np.min(vectors, axis=0)\n return list(map(lambda x, y: x if np.abs(x) > np.abs(y) else y, vec_max, vec_min))\n\n extema_ref = extema(self._get_vectors_from_sentene(reference))\n extema_hyp = extema(self._get_vectors_from_sentene(hypothesis))\n return self._calc_cosine_sim(extema_ref, extema_hyp)\n\n\nclass GreedyMatching(EmbeddingBase):\n \"\"\"Embedding based greedy matching score calculator.\"\"\"\n\n def sentence_score(\n self,\n reference: str,\n hypothesis: str) -> float:\n \"\"\"Embedding greedy matching metrics.\n\n Args:\n reference (str): reference sentence.\n hypothesis: (str): hypothesis sentence.\n\n Return:\n float: Embedding Greedy Matching score\n\n \"\"\"\n embs_ref = np.array(self._get_vectors_from_sentene(reference))\n embs_hyp = np.array(self._get_vectors_from_sentene(hypothesis))\n\n cs_matrix = self._calc_cosine_sim(embs_ref, embs_hyp) # len(embs_ref) x len(embs_hyp) matrix\n greedy_ref = np.max(cs_matrix, axis=0).mean()\n greedy_hyp = np.max(cs_matrix, axis=1).mean()\n return (greedy_ref + greedy_hyp) / 2.0\n"
] | [
[
"numpy.dot",
"numpy.abs",
"numpy.min",
"numpy.linalg.norm",
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EconForge/dolo | [
"9bb75b8f6ea87578393fe748003092ffb745e8d6"
] | [
"dolo/algos/simulations.py"
] | [
"import numpy\nimport pandas\nimport xarray as xr\nimport numpy as np\n\nfrom dolo.compiler.model import Model\nfrom dolo.numeric.optimize.ncpsolve import ncpsolve\nfrom dolo.numeric.optimize.newton import newton as newton_solver\nfrom dolo.numeric.optimize.newton import SerialDifferentiableFunction\n\n## TODO: extend for mc process\n\n\ndef response(model, dr, varname, T=40, impulse: float = None):\n\n i_exo = model.symbols[\"exogenous\"].index(varname)\n\n if impulse is None:\n try:\n impulse = numpy.sqrt(\n model.exogenous.Σ[i_exo, i_exo]\n ) # works only for IID/AR1\n except:\n impulse = numpy.sqrt(model.exogenous.σ) # works only for IID/AR1\n\n e1 = numpy.zeros(len(model.symbols[\"exogenous\"]))\n e1[i_exo] = impulse\n\n exogenous = model.exogenous\n print(exogenous)\n print(T, e1)\n m_simul = model.exogenous.response(T - 1, e1) # this is an xarray T x V\n m_simul = m_simul.expand_dims(\"N\")\n m_simul = m_simul.transpose(\"T\", \"N\", \"V\").data\n\n sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False)\n\n irf = sim.sel(N=0)\n\n return irf\n\n\ndef find_index(sim, values):\n sh = sim.shape\n N = sh[0]\n T = sh[1]\n indices = np.zeros((N, T), dtype=int)\n for n in range(N):\n for t in range(T):\n v = sim[n, t, :]\n ind = np.where((values == v[None, :]).all(axis=1))[0][0]\n indices[n, t] = ind\n return indices\n\n\nfrom dolo.numeric.grids import CartesianGrid, UnstructuredGrid\nfrom dolo.algos.results import AlgoResult\nfrom dolo.numeric.decision_rule import DecisionRule\n\n\ndef simulate(\n model: Model,\n dr: DecisionRule,\n *,\n process=None,\n N=1,\n T=40,\n s0=None,\n i0=None,\n m0=None,\n driving_process=None,\n seed=42,\n stochastic=True,\n):\n \"\"\"Simulate a model using the specified decision rule.\n\n Parameters\n ----------\n\n model: Model\n\n dr: decision rule\n\n process:\n\n s0: ndarray\n initial state where all simulations start\n\n driving_process: ndarray\n realization of exogenous driving process (drawn randomly if None)\n\n N: int\n number of simulations\n T: int\n horizon for the simulations\n seed: int\n used to initialize the random number generator. Use it to replicate\n exact same results among simulations\n discard: boolean (False)\n if True, then all simulations containing at least one non finite value\n are discarded\n\n Returns\n -------\n xarray.DataArray:\n returns a ``T x N x n_v`` array where ``n_v``\n is the number of variables.\n \"\"\"\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n calib = model.calibration\n parms = numpy.array(calib[\"parameters\"])\n\n if s0 is None:\n s0 = calib[\"states\"]\n\n n_x = len(model.symbols[\"controls\"])\n n_s = len(model.symbols[\"states\"])\n\n s_simul = numpy.zeros((T, N, n_s))\n x_simul = numpy.zeros((T, N, n_x))\n\n s_simul[0, :, :] = s0[None, :]\n\n # are we simulating a markov chain or a continuous process ?\n if driving_process is not None:\n if len(driving_process.shape) == 3:\n m_simul = driving_process\n sim_type = \"continuous\"\n if m0 is None:\n m0 = model.calibration[\"exogenous\"]\n x_simul[0, :, :] = dr.eval_ms(m0[None, :], s0[None, :])[0, :]\n elif len(driving_process.shape) == 2:\n i_simul = driving_process\n nodes = dr.exo_grid.nodes\n m_simul = nodes[i_simul]\n # inds = i_simul.ravel()\n # m_simul = np.reshape( np.concatenate( [nodes[i,:][None,:] for i in inds.ravel()], axis=0 ), inds.shape + (-1,) )\n sim_type = \"discrete\"\n x_simul[0, :, :] = dr.eval_is(i0, s0[None, :])[0, :]\n else:\n raise Exception(\"Incorrect specification of driving values.\")\n m0 = m_simul[0, :, :]\n else:\n from dolo.numeric.processes import DiscreteProcess\n\n if process is None:\n if hasattr(dr, \"dprocess\") and hasattr(dr.dprocess, \"simulate\"):\n process = dr.dprocess\n else:\n process = model.exogenous\n\n # detect type of simulation\n if not isinstance(process, DiscreteProcess):\n sim_type = \"continuous\"\n else:\n sim_type = \"discrete\"\n\n if sim_type == \"discrete\":\n if i0 is None:\n i0 = 0\n dp = process\n m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)\n i_simul = find_index(m_simul, dp.values)\n m0 = dp.node(i0)\n x0 = dr.eval_is(i0, s0[None, :])[0, :]\n else:\n m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)\n if isinstance(m_simul, xr.DataArray):\n m_simul = m_simul.data\n sim_type = \"continuous\"\n if m0 is None:\n m0 = model.calibration[\"exogenous\"]\n x0 = dr.eval_ms(m0[None, :], s0[None, :])[0, :]\n x_simul[0, :, :] = x0[None, :]\n\n f = model.functions[\"arbitrage\"]\n g = model.functions[\"transition\"]\n\n numpy.random.seed(seed)\n\n mp = m0\n for i in range(T):\n m = m_simul[i, :, :]\n s = s_simul[i, :, :]\n if sim_type == \"discrete\":\n i_m = i_simul[i, :]\n xx = [\n dr.eval_is(i_m[ii], s[ii, :][None, :])[0, :] for ii in range(s.shape[0])\n ]\n x = np.row_stack(xx)\n else:\n x = dr.eval_ms(m, s)\n\n x_simul[i, :, :] = x\n\n ss = g(mp, s, x, m, parms)\n if i < T - 1:\n s_simul[i + 1, :, :] = ss\n mp = m\n\n if \"auxiliary\" not in model.functions: # TODO: find a better test than this\n l = [s_simul, x_simul]\n varnames = model.symbols[\"states\"] + model.symbols[\"controls\"]\n else:\n aux = model.functions[\"auxiliary\"]\n a_simul = aux(\n m_simul.reshape((N * T, -1)),\n s_simul.reshape((N * T, -1)),\n x_simul.reshape((N * T, -1)),\n parms,\n )\n a_simul = a_simul.reshape(T, N, -1)\n l = [m_simul, s_simul, x_simul, a_simul]\n varnames = (\n model.symbols[\"exogenous\"]\n + model.symbols[\"states\"]\n + model.symbols[\"controls\"]\n + model.symbols[\"auxiliaries\"]\n )\n\n simul = numpy.concatenate(l, axis=2)\n\n if sim_type == \"discrete\":\n varnames = [\"_i_m\"] + varnames\n simul = np.concatenate([i_simul[:, :, None], simul], axis=2)\n\n data = xr.DataArray(\n simul,\n dims=[\"T\", \"N\", \"V\"],\n coords={\"T\": range(T), \"N\": range(N), \"V\": varnames},\n )\n\n return data\n\n\ndef tabulate(\n model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs\n):\n\n import numpy\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n states_names = model.symbols[\"states\"]\n controls_names = model.symbols[\"controls\"]\n index = states_names.index(str(state))\n\n if bounds is None:\n try:\n endo_grid = dr.endo_grid\n bounds = [endo_grid.min[index], endo_grid.max[index]]\n except:\n domain = model.domain\n bounds = [domain.min[index], domain.max[index]]\n if bounds is None:\n raise Exception(\"No bounds provided for simulation or by model.\")\n\n values = numpy.linspace(bounds[0], bounds[1], n_steps)\n\n if s0 is None:\n s0 = model.calibration[\"states\"]\n\n svec = numpy.row_stack([s0] * n_steps)\n svec[:, index] = values\n\n try:\n dp = dr.dprocess\n except:\n dp = model.exogenous.discretize()\n\n if (i0 is None) and (m0 is None):\n from dolo.numeric.grids import UnstructuredGrid\n\n if isinstance(dp.grid, UnstructuredGrid):\n n_ms = dp.n_nodes\n [q, r] = divmod(n_ms, 2)\n i0 = q - 1 + r\n else:\n m0 = model.calibration[\"exogenous\"]\n\n if i0 is not None:\n m = dp.node(i0)\n xvec = dr.eval_is(i0, svec)\n elif m0 is not None:\n m = m0\n xvec = dr.eval_ms(m0, svec)\n\n mm = numpy.row_stack([m] * n_steps)\n l = [mm, svec, xvec]\n\n series = (\n model.symbols[\"exogenous\"] + model.symbols[\"states\"] + model.symbols[\"controls\"]\n )\n\n if \"auxiliary\" in model.functions:\n p = model.calibration[\"parameters\"]\n pp = numpy.row_stack([p] * n_steps)\n avec = model.functions[\"auxiliary\"](mm, svec, xvec, pp)\n l.append(avec)\n series.extend(model.symbols[\"auxiliaries\"])\n\n import pandas\n\n tb = numpy.concatenate(l, axis=1)\n df = pandas.DataFrame(tb, columns=series)\n\n return df\n\n\ndef tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12, 13]):\n\n import numpy\n import xarray as xr\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n if s0 is None:\n s0 = model.calibration[\"states\"]\n if states is None:\n states = model.symbols[\"states\"]\n assert len(states) == 2\n domain = model.get_domain()\n lps = [numpy.linspace(*domain[s], n[i]) for i, s in enumerate(states)]\n i_x = model.symbols[\"states\"].index(states[0])\n i_y = model.symbols[\"states\"].index(states[1])\n vals = []\n vstates = []\n s = s0.copy()\n for xx in lps[0]:\n vv = []\n s[i_x] = xx\n for yy in lps[1]:\n s[i_y] = yy\n x = dr.eval_is(i0, s)\n vv.append(numpy.concatenate([s, x]))\n vals.append(vv)\n vv = numpy.array(vals)\n controls = model.symbols[\"states\"] + model.symbols[\"controls\"]\n # tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords=[lps[0], lps[1], 'V'])\n tab = xr.DataArray(\n vv,\n dims=[states[0], states[1], \"V\"],\n coords={states[0]: lps[0], states[1]: lps[1], \"V\": controls},\n )\n return tab\n\n\ndef plot3d(tab, varname):\n X = numpy.array(tab[tab.dims[0]])\n Y = numpy.array(tab[tab.dims[1]])\n Z = numpy.array(tab.loc[:, :, varname])\n data = [go.Surface(x=X, y=Y, z=Z)]\n layout = go.Layout(\n title=\"Equity\",\n autosize=False,\n width=500,\n height=500,\n # xaxis=go.XAxis(title=tab.dims[0]),\n # yaxis={'title':tab.dims[1]},\n # zaxis={'title':varname},\n xaxis=dict(\n title=\"x Axis\",\n nticks=7,\n titlefont=dict(family=\"Courier New, monospace\", size=18, color=\"#7f7f7f\"),\n ),\n margin=dict(l=65, r=50, b=65, t=90),\n )\n fig = go.Figure(data=data, layout=layout)\n return iplot(fig, filename=\"graph_\" + varname)\n\n\ndef plot_decision_rule(plot_controls=None, **kwargs):\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None)\n\n from matplotlib import pyplot\n\n if isinstance(plot_controls, str):\n cn = plot_controls\n pyplot.plot(values, df[cn], **kwargs)\n else:\n for cn in plot_controls:\n pyplot.plot(values, df[cn], label=cn, **kwargs)\n pyplot.legend()\n pyplot.xlabel(\"state = {} | mstate = {}\".format(state, i0))\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.random.seed",
"numpy.linspace",
"pandas.DataFrame",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.row_stack",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TS-SE-GROUP/icme2019 | [
"fe9b31db7bf19b08d5e5d41a259f0a297eb21766"
] | [
"mdeepctr/models/xdeepfm.py"
] | [
"# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.(https://arxiv.org/pdf/1803.05170.pdf)\n\"\"\"\nimport tensorflow as tf\nfrom ..input_embedding import preprocess_input_embedding\nfrom ..layers.core import PredictionLayer, MLP\nfrom ..layers.interaction import CIN\nfrom ..utils import check_feature_config_dict\nfrom ..layers.utils import concat_fun\n\n\ndef xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,):\n \"\"\"Instantiates the xDeepFM architecture.\n\n :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}\n :param embedding_size: positive integer,sparse feature embedding_size\n :param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network\n :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit\n :param cin_activation: activation function used on feature maps\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: L2 regularizer strength applied to embedding vector\n :param l2_reg_deep: L2 regularizer strength applied to deep net\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param keep_prob: float in (0,1]. keep_prob used in deep net\n :param activation: Activation function to use in deep net\n :param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``\n :param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net\n :return: A Keras model instance.\n \"\"\"\n check_feature_config_dict(feature_dim_dict)\n\n deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,\n l2_reg_embedding, l2_reg_linear, init_std,\n seed, True)\n\n fm_input = concat_fun(deep_emb_list, axis=1)\n\n if len(cin_layer_size) > 0:\n exFM_out = CIN(cin_layer_size, cin_activation,\n cin_split_half, seed)(fm_input)\n exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out)\n\n deep_input = tf.keras.layers.Flatten()(fm_input)\n \n output=[]\n for _ in range(output_dim):\n \n deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,\n use_bn, seed)(deep_input)\n deep_logit = tf.keras.layers.Dense(\n 1, use_bias=False, activation=None)(deep_out)\n \n if len(hidden_size) == 0 and len(cin_layer_size) == 0: # only linear\n final_logit = linear_logit\n elif len(hidden_size) == 0 and len(cin_layer_size) > 0: # linear + CIN\n final_logit = tf.keras.layers.add([linear_logit, exFM_logit])\n elif len(hidden_size) > 0 and len(cin_layer_size) == 0: # linear + Deep\n final_logit = tf.keras.layers.add([linear_logit, deep_logit])\n elif len(hidden_size) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep\n final_logit = tf.keras.layers.add(\n [linear_logit, deep_logit, exFM_logit])\n else:\n raise NotImplementedError\n \n output.append(PredictionLayer(final_activation)(final_logit))\n\n model = tf.keras.models.Model(inputs=inputs_list, outputs=output)\n return model\n"
] | [
[
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.models.Model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
xysun/playground | [
"20f9a7e0eb3d24e7cd32d8afd94b767b8fcc00b4"
] | [
"pommerman/envs/v0.py"
] | [
"\"\"\"The baseline Pommerman environment.\n\nThis evironment acts as game manager for Pommerman. Further environments,\nsuch as in v1.py, will inherit from this.\n\"\"\"\nimport json\nimport os\n\nimport numpy as np\nimport time\nfrom gym import spaces\nfrom gym.utils import seeding\nimport gym\n\nfrom .. import characters\nfrom .. import constants\nfrom .. import forward_model\nfrom .. import graphics\nfrom .. import utility\n\n\nclass Pomme(gym.Env):\n '''The base pommerman env.'''\n metadata = {\n 'render.modes': ['human', 'rgb_array', 'rgb_pixel'],\n }\n\n def __init__(self,\n render_fps=None,\n game_type=None,\n board_size=None,\n agent_view_size=None,\n num_rigid=None,\n num_wood=None,\n num_items=None,\n max_steps=1000,\n is_partially_observable=False,\n env=None,\n **kwargs):\n self._render_fps = render_fps\n self._agents = None\n self._game_type = game_type\n self._board_size = board_size\n self._agent_view_size = agent_view_size\n self._num_rigid = num_rigid\n self._num_wood = num_wood\n self._num_items = num_items\n self._max_steps = max_steps\n self._viewer = None\n self._is_partially_observable = is_partially_observable\n self._env = env\n\n self.training_agent = None\n self.model = forward_model.ForwardModel()\n\n # This can be changed through set_render_mode\n # or from the cli tool using '--render_mode=MODE_TYPE'\n self._mode = 'human'\n\n # Observation and Action Spaces. These are both geared towards a single\n # agent even though the environment expects actions and returns\n # observations for all four agents. We do this so that it's clear what\n # the actions and obs are for a single agent. Wrt the observations,\n # they are actually returned as a dict for easier understanding.\n self._set_action_space()\n self._set_observation_space()\n\n def _set_action_space(self):\n self.action_space = spaces.Discrete(6)\n\n def set_render_mode(self, mode):\n self._mode = mode\n\n def _set_observation_space(self):\n \"\"\"The Observation Space for each agent.\n\n There are a total of 3*board_size^2+12 observations:\n - all of the board (board_size^2)\n - bomb blast strength (board_size^2).\n - bomb life (board_size^2)\n - agent's position (2)\n - player ammo counts (1)\n - blast strength (1)\n - can_kick (1)\n - teammate (one of {AgentDummy.value, Agent3.value}).\n - enemies (three of {AgentDummy.value, Agent3.value}).\n \"\"\"\n bss = self._board_size**2\n min_obs = [0] * 3 * bss + [0] * 5 + [constants.Item.AgentDummy.value\n ] * 4\n max_obs = [len(constants.Item)] * bss + [self._board_size\n ] * bss + [25] * bss\n max_obs += [self._board_size] * 2 + [self._num_items] * 2 + [1]\n max_obs += [constants.Item.Agent3.value] * 4\n self.observation_space = spaces.Box(\n np.array(min_obs), np.array(max_obs))\n\n def set_agents(self, agents):\n self._agents = agents\n\n def set_training_agent(self, agent_id):\n self.training_agent = agent_id\n\n def set_init_game_state(self, game_state_file):\n \"\"\"Set the initial game state.\n\n The expected game_state_file JSON format is:\n - agents: list of agents serialized (agent_id, is_alive, position,\n ammo, blast_strength, can_kick)\n - board: board matrix topology (board_size^2)\n - board_size: board size\n - bombs: list of bombs serialized (position, bomber_id, life,\n blast_strength, moving_direction)\n - flames: list of flames serialized (position, life)\n - items: list of item by position\n - step_count: step count\n\n Args:\n game_state_file: JSON File input.\n \"\"\"\n self._init_game_state = None\n if game_state_file:\n with open(game_state_file, 'r') as f:\n self._init_game_state = json.loads(f.read())\n\n def make_board(self):\n self._board = utility.make_board(self._board_size, self._num_rigid,\n self._num_wood)\n\n def make_items(self):\n self._items = utility.make_items(self._board, self._num_items)\n\n def act(self, obs):\n agents = [agent for agent in self._agents \\\n if agent.agent_id != self.training_agent]\n return self.model.act(agents, obs, self.action_space)\n\n def get_observations(self):\n self.observations = self.model.get_observations(\n self._board, self._agents, self._bombs,\n self._is_partially_observable, self._agent_view_size,\n self._game_type, self._env)\n return self.observations\n\n def _get_rewards(self):\n return self.model.get_rewards(self._agents, self._game_type,\n self._step_count, self._max_steps)\n\n def _get_done(self):\n return self.model.get_done(self._agents, self._step_count,\n self._max_steps, self._game_type,\n self.training_agent)\n\n def _get_info(self, done, rewards):\n return self.model.get_info(done, rewards, self._game_type, self._agents)\n\n def reset(self):\n assert (self._agents is not None)\n\n if self._init_game_state is not None:\n self.set_json_info()\n else:\n self._step_count = 0\n self.make_board()\n self.make_items()\n self._bombs = []\n self._flames = []\n self._powerups = []\n for agent_id, agent in enumerate(self._agents):\n pos = np.where(self._board == utility.agent_value(agent_id))\n row = pos[0][0]\n col = pos[1][0]\n agent.set_start_position((row, col))\n agent.reset()\n\n return self.get_observations()\n\n def seed(self, seed=None):\n gym.spaces.prng.seed(seed)\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, actions):\n max_blast_strength = self._agent_view_size or 10\n result = self.model.step(\n actions,\n self._board,\n self._agents,\n self._bombs,\n self._items,\n self._flames,\n max_blast_strength=max_blast_strength)\n self._board, self._agents, self._bombs, self._items, self._flames = \\\n result[:5]\n\n done = self._get_done()\n obs = self.get_observations()\n reward = self._get_rewards()\n info = self._get_info(done, reward)\n\n self._step_count += 1\n return obs, reward, done, info\n\n def render(self,\n mode=None,\n close=False,\n record_pngs_dir=None,\n record_json_dir=None,\n do_sleep=True):\n if close:\n self.close()\n return\n\n mode = mode or self._mode or 'human'\n\n if mode == 'rgb_array':\n rgb_array = graphics.PixelViewer.rgb_array(\n self._board, self._board_size, self._agents,\n self._is_partially_observable, self._agent_view_size)\n return rgb_array[0]\n\n if self._viewer is None:\n if mode == 'rgb_pixel':\n self._viewer = graphics.PixelViewer(\n board_size=self._board_size,\n agents=self._agents,\n agent_view_size=self._agent_view_size,\n partially_observable=self._is_partially_observable)\n else:\n self._viewer = graphics.PommeViewer(\n board_size=self._board_size,\n agents=self._agents,\n partially_observable=self._is_partially_observable,\n agent_view_size=self._agent_view_size,\n game_type=self._game_type)\n\n self._viewer.set_board(self._board)\n self._viewer.set_agents(self._agents)\n self._viewer.set_step(self._step_count)\n self._viewer.render()\n\n # Register all agents which need human input with Pyglet.\n # This needs to be done here as the first `imshow` creates the\n # window. Using `push_handlers` allows for easily creating agents\n # that use other Pyglet inputs such as joystick, for example.\n for agent in self._agents:\n if agent.has_user_input():\n self._viewer.window.push_handlers(agent)\n else:\n self._viewer.set_board(self._board)\n self._viewer.set_agents(self._agents)\n self._viewer.set_step(self._step_count)\n self._viewer.render()\n\n if record_pngs_dir:\n self._viewer.save(record_pngs_dir)\n if record_json_dir:\n self.save_json(record_json_dir)\n\n if do_sleep:\n time.sleep(1.0 / self._render_fps)\n\n def close(self):\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n\n for agent in self._agents:\n agent.shutdown()\n\n @staticmethod\n def featurize(obs):\n board = obs[\"board\"].reshape(-1).astype(np.float32)\n bomb_blast_strength = obs[\"bomb_blast_strength\"].reshape(-1) \\\n .astype(np.float32)\n bomb_life = obs[\"bomb_life\"].reshape(-1).astype(np.float32)\n position = utility.make_np_float(obs[\"position\"])\n ammo = utility.make_np_float([obs[\"ammo\"]])\n blast_strength = utility.make_np_float([obs[\"blast_strength\"]])\n can_kick = utility.make_np_float([obs[\"can_kick\"]])\n\n teammate = utility.make_np_float([obs[\"teammate\"].value])\n enemies = utility.make_np_float([e.value for e in obs[\"enemies\"]])\n return np.concatenate(\n (board, bomb_blast_strength, bomb_life, position, ammo,\n blast_strength, can_kick, teammate, enemies))\n\n def save_json(self, record_json_dir):\n info = self.get_json_info()\n count = \"{0:0=3d}\".format(self._step_count)\n suffix = count + '.json'\n path = os.path.join(record_json_dir, suffix)\n with open(path, 'w') as f:\n f.write(json.dumps(info, sort_keys=True, indent=4))\n\n def get_json_info(self):\n \"\"\"Returns a json snapshot of the current game state.\"\"\"\n ret = {\n 'board_size': self._board_size,\n 'step_count': self._step_count,\n 'board': self._board,\n 'agents': self._agents,\n 'bombs': self._bombs,\n 'flames': self._flames,\n 'items': [[k, i] for k, i in self._items.items()]\n }\n for key, value in ret.items():\n ret[key] = json.dumps(value, cls=utility.PommermanJSONEncoder)\n return ret\n\n def set_json_info(self):\n \"\"\"Sets the game state as the init_game_state.\"\"\"\n board_size = int(self._init_game_state['board_size'])\n self._board_size = board_size\n self._step_count = int(self._init_game_state['step_count'])\n\n board_array = json.loads(self._init_game_state['board'])\n self._board = np.ones((board_size, board_size)).astype(np.uint8)\n self._board *= constants.Item.Passage.value\n for x in range(self._board_size):\n for y in range(self._board_size):\n self._board[x, y] = board_array[x][y]\n\n self._items = {}\n item_array = json.loads(self._init_game_state['items'])\n for i in item_array:\n self._items[tuple(i[0])] = i[1]\n\n agent_array = json.loads(self._init_game_state['agents'])\n for a in agent_array:\n agent = next(x for x in self._agents \\\n if x.agent_id == a['agent_id'])\n agent.set_start_position((a['position'][0], a['position'][1]))\n agent.reset(\n int(a['ammo']), bool(a['is_alive']), int(a['blast_strength']),\n bool(a['can_kick']))\n\n self._bombs = []\n bomb_array = json.loads(self._init_game_state['bombs'])\n for b in bomb_array:\n bomber = next(x for x in self._agents \\\n if x.agent_id == b['bomber_id'])\n moving_direction = b['moving_direction']\n if moving_direction is not None:\n moving_direction = constants.Action(moving_direction)\n self._bombs.append(\n characters.Bomb(bomber, tuple(b['position']), int(b['life']),\n int(b['blast_strength']), moving_direction))\n\n self._flames = []\n flame_array = json.loads(self._init_game_state['flames'])\n for f in flame_array:\n self._flames.append(\n characters.Flame(tuple(f['position']), f['life']))\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shubhamsingh987/PySyft | [
"ff967e3735bd7d47667d1d3e5038ba1493ca2e90",
"ff967e3735bd7d47667d1d3e5038ba1493ca2e90",
"ff967e3735bd7d47667d1d3e5038ba1493ca2e90"
] | [
"syft/frameworks/torch/fl/utils.py",
"test/workers/test_websocket_worker.py",
"test/workers/test_virtual.py"
] | [
"import syft as sy\nimport torch\nfrom typing import Dict\nfrom typing import Any\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef extract_batches_per_worker(federated_train_loader: sy.FederatedDataLoader):\n \"\"\"Extracts the batches from the federated_train_loader and stores them\n in a dictionary (keys = data.location).\n\n Args:\n federated_train_loader: the connection object we use to send responses.\n back to the client.\n\n \"\"\"\n logging_interval = 100\n batches = {}\n for worker_id in federated_train_loader.workers:\n worker = federated_train_loader.federated_dataset.datasets[worker_id].location\n batches[worker] = []\n\n for batch_idx, (data, target) in enumerate(federated_train_loader):\n if batch_idx % logging_interval == 0:\n logger.debug(\"Extracted %s batches from federated_train_loader\", batch_idx)\n batches[data.location].append((data, target))\n\n return batches\n\n\ndef add_model(dst_model, src_model):\n \"\"\"Add the parameters of two models.\n\n Args:\n dst_model (torch.nn.Module): the model to which the src_model will be added.\n src_model (torch.nn.Module): the model to be added to dst_model.\n Returns:\n torch.nn.Module: the resulting model of the addition.\n\n \"\"\"\n\n params1 = src_model.named_parameters()\n params2 = dst_model.named_parameters()\n dict_params2 = dict(params2)\n with torch.no_grad():\n for name1, param1 in params1:\n if name1 in dict_params2:\n dict_params2[name1].set_(param1.data + dict_params2[name1].data)\n return dst_model\n\n\ndef scale_model(model, scale):\n \"\"\"Scale the parameters of a model.\n\n Args:\n model (torch.nn.Module): the models whose parameters will be scaled.\n scale (float): the scaling factor.\n Returns:\n torch.nn.Module: the module with scaled parameters.\n\n \"\"\"\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)\n return model\n\n\ndef federated_avg(models: Dict[Any, torch.nn.Module]) -> torch.nn.Module:\n \"\"\"Calculate the federated average of a dictionary containing models.\n The models are extracted from the dictionary\n via the models.values() command.\n\n Args:\n models (Dict[Any, torch.nn.Module]): a dictionary of models\n for which the federated average is calculated.\n\n Returns:\n torch.nn.Module: the module with averaged parameters.\n \"\"\"\n nr_models = len(models)\n model_list = list(models.values())\n model = model_list[0]\n for i in range(1, nr_models):\n model = add_model(model, model_list[i])\n model = scale_model(model, 1.0 / nr_models)\n return model\n\n\ndef accuracy(pred_softmax, target):\n \"\"\"Calculate the accuray of a given prediction.\n\n This functions assumes pred_softmax to be converted into the final prediction by taking the argmax.\n\n Args:\n pred_softmax: array type(float), providing nr_classes values per element in target.\n target: array type(int), correct classes, taking values in range [0, nr_classes).\n\n Returns:\n accuracy: float, fraction of correct predictions.\n\n \"\"\"\n nr_elems = len(target)\n pred = pred_softmax.argmax(dim=1)\n return (pred.float() == target.view(pred.shape).float()).sum().numpy() / float(nr_elems)\n\n\ndef create_gaussian_mixture_toy_data(nr_samples: int): # pragma: no cover\n \"\"\" Create a simple toy data for binary classification\n\n The data is drawn from two normal distributions\n target = 1: mu = 2, sigma = 1\n target = 0: mu = 0, sigma = 1\n The dataset is balanced with an equal number of positive and negative samples\n\n Args:\n nr_samples: number of samples to generate\n\n Returns:\n data, targets\n\n\n \"\"\"\n sample_dim = 2\n one_half = int(nr_samples / 2)\n X1 = torch.randn(one_half, sample_dim, requires_grad=True) - 5\n X2 = torch.randn(one_half, sample_dim, requires_grad=True) + 5\n X = torch.cat([X1, X2], dim=0)\n Y1 = torch.zeros(one_half, requires_grad=False).long()\n Y2 = torch.ones(one_half, requires_grad=False).long()\n Y = torch.cat([Y1, Y2], dim=0)\n return X, Y\n\n\ndef iris_data_partial():\n \"\"\"\n\n Returns: 30 samples from the iris data set: https://archive.ics.uci.edu/ml/datasets/iris\n\n \"\"\"\n data = [\n [5.1, 3.5, 1.4, 0.2],\n [4.9, 3.0, 1.4, 0.2],\n [4.7, 3.2, 1.3, 0.2],\n [4.6, 3.1, 1.5, 0.2],\n [5.0, 3.6, 1.4, 0.2],\n [5.4, 3.9, 1.7, 0.4],\n [4.6, 3.4, 1.4, 0.3],\n [5.0, 3.4, 1.5, 0.2],\n [4.4, 2.9, 1.4, 0.2],\n [4.9, 3.1, 1.5, 0.1],\n ]\n\n target_to_string = {0: \"Iris-setosa\", 1: \"Iris-versicolor\", 2: \"Iris-virginica\"}\n targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n data += [\n [7.0, 3.2, 4.7, 1.4],\n [6.4, 3.2, 4.5, 1.5],\n [6.9, 3.1, 4.9, 1.5],\n [5.5, 2.3, 4.0, 1.3],\n [6.5, 2.8, 4.6, 1.5],\n [5.7, 2.8, 4.5, 1.3],\n [6.3, 3.3, 4.7, 1.6],\n [4.9, 2.4, 3.3, 1.0],\n [6.6, 2.9, 4.6, 1.3],\n [5.2, 2.7, 3.9, 1.4],\n ]\n\n targets += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\n data += [\n [6.3, 3.3, 6.0, 2.5],\n [5.8, 2.7, 5.1, 1.9],\n [7.1, 3.0, 5.9, 2.1],\n [6.3, 2.9, 5.6, 1.8],\n [6.5, 3.0, 5.8, 2.2],\n [7.6, 3.0, 6.6, 2.1],\n [4.9, 2.5, 4.5, 1.7],\n [7.3, 2.9, 6.3, 1.8],\n [6.7, 2.5, 5.8, 1.8],\n [7.2, 3.6, 6.1, 2.5],\n ]\n\n targets += [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n\n return torch.tensor(data), torch.tensor(targets)\n",
"import io\nfrom os.path import exists, join\nimport time\nfrom socket import gethostname\nfrom OpenSSL import crypto, SSL\nimport pytest\nimport torch\nimport syft as sy\nfrom syft.generic.frameworks.hook import hook_args\nfrom syft.frameworks.torch.fl import utils\n\nfrom syft.workers.websocket_client import WebsocketClientWorker\nfrom syft.workers.websocket_server import WebsocketServerWorker\n\nfrom test.conftest import instantiate_websocket_client_worker\n\n\nPRINT_IN_UNITTESTS = False\n\n\[email protected](\"secure\", [True, False])\ndef test_websocket_worker_basic(hook, start_proc, secure, tmpdir):\n \"\"\"Evaluates that you can do basic tensor operations using\n WebsocketServerWorker in insecure and secure mode.\"\"\"\n\n def create_self_signed_cert(cert_path, key_path):\n # create a key pair\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 1024)\n\n # create a self-signed cert\n cert = crypto.X509()\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(1000)\n cert.set_pubkey(k)\n cert.sign(k, \"sha1\")\n\n # store keys and cert\n open(cert_path, \"wb\").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(key_path, \"wb\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n\n kwargs = {\n \"id\": \"secure_fed\" if secure else \"not_secure_fed\",\n \"host\": \"localhost\",\n \"port\": 8766,\n \"hook\": hook,\n }\n\n if secure:\n # Create cert and keys\n cert_path = tmpdir.join(\"test.crt\")\n key_path = tmpdir.join(\"test.key\")\n create_self_signed_cert(cert_path, key_path)\n kwargs[\"cert_path\"] = cert_path\n kwargs[\"key_path\"] = key_path\n\n process_remote_worker = start_proc(WebsocketServerWorker, **kwargs)\n\n time.sleep(0.1)\n x = torch.ones(5)\n\n if secure:\n # unused args\n del kwargs[\"cert_path\"]\n del kwargs[\"key_path\"]\n\n kwargs[\"secure\"] = secure\n remote_proxy = instantiate_websocket_client_worker(**kwargs)\n\n x = x.send(remote_proxy)\n y = x + x\n y = y.get()\n\n assert (y == torch.ones(5) * 2).all()\n\n del x\n\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n process_remote_worker.terminate()\n\n\ndef test_websocket_workers_search(hook, start_remote_worker):\n \"\"\"Evaluates that a client can search and find tensors that belong\n to another party\"\"\"\n # Args for initializing the websocket server and client\n server, remote_proxy = start_remote_worker(id=\"fed2\", hook=hook, port=8767)\n\n # Sample tensor to store on the server\n sample_data = torch.tensor([1, 2, 3, 4]).tag(\"#sample_data\", \"#another_tag\")\n _ = sample_data.send(remote_proxy)\n\n # Search for the tensor located on the server by using its tag\n results = remote_proxy.search([\"#sample_data\", \"#another_tag\"])\n\n assert results\n assert results[0].owner.id == \"me\"\n assert results[0].location.id == \"fed2\"\n\n # Search multiple times should still work\n results = remote_proxy.search([\"#sample_data\", \"#another_tag\"])\n\n assert results\n assert results[0].owner.id == \"me\"\n assert results[0].location.id == \"fed2\"\n\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_list_objects_remote(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-list-objects\", hook=hook, port=8765)\n remote_proxy.clear_objects()\n\n x = torch.tensor([1, 2, 3]).send(remote_proxy)\n\n res = remote_proxy.list_tensors_remote()\n\n res_dict = eval(res.replace(\"tensor\", \"torch.tensor\"))\n assert len(res_dict) == 1\n\n y = torch.tensor([4, 5, 6]).send(remote_proxy)\n res = remote_proxy.list_tensors_remote()\n res_dict = eval(res.replace(\"tensor\", \"torch.tensor\"))\n assert len(res_dict) == 2\n\n # delete x before terminating the websocket connection\n del x\n del y\n time.sleep(0.1)\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_objects_count_remote(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-count-objects\", hook=hook, port=8764)\n remote_proxy.clear_objects()\n\n x = torch.tensor([1, 2, 3]).send(remote_proxy)\n\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 1\n\n y = torch.tensor([4, 5, 6]).send(remote_proxy)\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 2\n\n x.get()\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 1\n\n # delete remote object before terminating the websocket connection\n del y\n time.sleep(0.1)\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_clear_objects_remote(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-clear-objects\", hook=hook, port=8769)\n\n x = torch.tensor([1, 2, 3]).send(remote_proxy, garbage_collect_data=False)\n y = torch.tensor(4).send(remote_proxy, garbage_collect_data=False)\n\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 2\n\n remote_proxy.clear_objects_remote()\n nr_objects = remote_proxy.objects_count_remote()\n assert nr_objects == 0\n\n remote_proxy.close()\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_connect_close(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-connect-close\", hook=hook, port=8770)\n\n x = torch.tensor([1, 2, 3])\n x_ptr = x.send(remote_proxy)\n\n assert remote_proxy.tensors_count_remote() == 1\n\n remote_proxy.close()\n\n time.sleep(0.1)\n\n remote_proxy.connect()\n\n assert remote_proxy.tensors_count_remote() == 1\n\n x_val = x_ptr.get()\n assert (x_val == x).all()\n\n remote_proxy.close()\n remote_proxy.remove_worker_from_local_worker_registry()\n\n time.sleep(0.1)\n\n server.terminate()\n\n\ndef test_websocket_worker_multiple_output_response(hook, start_remote_worker):\n \"\"\"Evaluates that you can do basic tensor operations using\n WebsocketServerWorker.\"\"\"\n server, remote_proxy = start_remote_worker(id=\"socket_multiple_output\", hook=hook, port=8771)\n\n x = torch.tensor([1.0, 3, 2])\n x = x.send(remote_proxy)\n\n p1, p2 = torch.sort(x)\n x1, x2 = p1.get(), p2.get()\n\n assert (x1 == torch.tensor([1.0, 2, 3])).all()\n assert (x2 == torch.tensor([0, 2, 1])).all()\n\n x.get() # retrieve remote object before closing the websocket connection\n\n remote_proxy.close()\n server.terminate()\n\n\ndef test_send_command_whitelist(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(\n id=\"worker_call_api_good_methods\", hook=hook, port=8772\n )\n whitelisted_methods = {\n \"torch\": {\"tensor\": [1, 2, 3], \"rand\": (2, 3), \"randn\": (2, 3), \"zeros\": (2, 3)}\n }\n\n for framework, methods in whitelisted_methods.items():\n attr = getattr(remote_proxy.remote, framework)\n\n for method, inp in methods.items():\n x = getattr(attr, method)(inp)\n\n if \"rand\" not in method:\n assert (x.get() == getattr(torch, method)(inp)).all()\n\n remote_proxy.close()\n server.terminate()\n\n\ndef test_send_command_not_whitelisted(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(\n id=\"worker_call_api_bad_method\", hook=hook, port=8773\n )\n\n method_not_exist = \"openmind\"\n\n for framework in remote_proxy.remote.frameworks:\n if framework in dir(remote_proxy.remote):\n attr = getattr(remote_proxy.remote, framework)\n with pytest.raises(AttributeError):\n getattr(attr, method_not_exist)\n\n remote_proxy.close()\n server.terminate()\n\n\[email protected]\ndef test_evaluate(hook, start_proc): # pragma: no cover\n\n sy.local_worker.clear_objects()\n sy.generic.frameworks.hook.hook_args.hook_method_args_functions = {}\n sy.generic.frameworks.hook.hook_args.hook_method_response_functions = {}\n sy.generic.frameworks.hook.hook_args.get_tensor_type_functions = {}\n sy.generic.frameworks.hook.hook_args.register_response_functions = {}\n\n data, target = utils.iris_data_partial()\n\n dataset = sy.BaseDataset(data=data, targets=target)\n\n kwargs = {\"id\": \"evaluate_remote\", \"host\": \"localhost\", \"port\": 8790, \"hook\": hook}\n dataset_key = \"iris\"\n # TODO: check why unit test sometimes fails when WebsocketServerWorker is started from the unit test. Fails when run after test_federated_client.py\n # process_remote_worker = start_proc(WebsocketServerWorker, dataset=(dataset, dataset_key), verbose=True, **kwargs)\n\n remote_proxy = instantiate_websocket_client_worker(**kwargs)\n\n def loss_fn(pred, target):\n return torch.nn.functional.cross_entropy(input=pred, target=target)\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = torch.nn.Linear(4, 3)\n\n torch.nn.init.xavier_normal_(self.fc1.weight)\n\n def forward(self, x):\n x = torch.nn.functional.relu(self.fc1(x))\n return x\n\n model_untraced = Net()\n model = torch.jit.trace(model_untraced, data)\n loss_traced = torch.jit.trace(loss_fn, (torch.tensor([[0.3, 0.5, 0.2]]), torch.tensor([1])))\n\n pred = model(data)\n loss_before = loss_fn(target=target, pred=pred)\n if PRINT_IN_UNITTESTS: # pragma: no cover\n print(f\"Loss: {loss_before}\")\n\n # Create and send train config\n train_config = sy.TrainConfig(\n batch_size=4,\n model=model,\n loss_fn=loss_traced,\n model_id=None,\n loss_fn_id=None,\n optimizer_args=None,\n epochs=1,\n )\n train_config.send(remote_proxy)\n\n result = remote_proxy.evaluate(\n dataset_key=dataset_key, return_histograms=True, nr_bins=3, return_loss=True\n )\n\n len_dataset = result[\"nr_predictions\"]\n hist_target = result[\"histogram_target\"]\n\n if PRINT_IN_UNITTESTS: # pragma: no cover\n print(f\"Evaluation result before training: {result}\")\n\n assert len_dataset == 30\n assert (hist_target == [10, 10, 10]).all()\n\n remote_proxy.close()\n remote_proxy.remove_worker_from_local_worker_registry()\n # process_remote_worker.terminate()\n",
"from time import time\nfrom unittest.mock import patch\n\nimport pytest\nimport torch\n\nimport syft as sy\nfrom syft import serde\nfrom syft.generic.pointers.object_wrapper import ObjectWrapper\nfrom syft.messaging.message import ObjectMessage\nfrom syft.messaging.message import ObjectRequestMessage\nfrom syft.workers.virtual import VirtualWorker\n\nfrom syft.exceptions import GetNotPermittedError\nfrom syft.exceptions import ObjectNotFoundError\n\n\ndef test_send_msg():\n \"\"\"Tests sending a message with a specific ID\n\n This is a simple test to ensure that the BaseWorker interface\n can properly send/receive a message containing a tensor.\n \"\"\"\n\n # get pointer to local worker\n me = sy.torch.hook.local_worker\n\n # pending time to simulate lantency (optional)\n me.message_pending_time = 0.1\n\n # create a new worker (to send the object to)\n worker_id = sy.ID_PROVIDER.pop()\n bob = VirtualWorker(sy.torch.hook, id=f\"bob{worker_id}\")\n\n # initialize the object and save it's id\n obj = torch.Tensor([100, 100])\n obj_id = obj.id\n\n # Send data to bob\n start_time = time()\n me.send_msg(ObjectMessage(obj), bob)\n elapsed_time = time() - start_time\n\n me.message_pending_time = 0\n\n # ensure that object is now on bob's machine\n assert obj_id in bob.object_store._objects\n # ensure that object was sent 0.1 secs later\n assert elapsed_time > 0.1\n\n\ndef test_send_msg_using_tensor_api():\n \"\"\"Tests sending a message with a specific ID\n\n This is a simple test to ensure that the high level tensor .send()\n method correctly sends a message to another worker.\n \"\"\"\n\n # create worker to send object to\n worker_id = sy.ID_PROVIDER.pop()\n bob = VirtualWorker(sy.torch.hook, id=f\"bob{worker_id}\")\n\n # create a tensor to send (default on local_worker)\n obj = torch.Tensor([100, 100])\n\n # save the object's id\n obj_id = obj.id\n\n # send the object to Bob (from local_worker)\n _ = obj.send(bob)\n\n # ensure tensor made it to Bob\n assert obj_id in bob.object_store._objects\n\n\ndef test_recv_msg():\n \"\"\"Tests the recv_msg command with 2 tests\n\n The first test uses recv_msg to send an object to alice.\n\n The second test uses recv_msg to request the object\n previously sent to alice.\"\"\"\n\n # TEST 1: send tensor to alice\n\n # create a worker to send data to\n worker_id = sy.ID_PROVIDER.pop()\n alice = VirtualWorker(sy.torch.hook, id=f\"alice{worker_id}\")\n\n # create object to send\n obj = torch.Tensor([100, 100])\n\n # create/serialize message\n message = ObjectMessage(obj)\n bin_msg = serde.serialize(message)\n\n # have alice receive message\n alice.recv_msg(bin_msg)\n\n # ensure that object is now in alice's registry\n assert obj.id in alice.object_store._objects\n\n # Test 2: get tensor back from alice\n\n # Create message: Get tensor from alice\n message = ObjectRequestMessage(obj.id, None, \"\")\n\n # serialize message\n bin_msg = serde.serialize(message)\n\n # call receive message on alice\n resp = alice.recv_msg(bin_msg)\n\n obj_2 = sy.serde.deserialize(resp)\n\n # assert that response is correct type\n assert type(resp) == bytes\n\n # ensure that the object we receive is correct\n assert obj_2.id == obj.id\n\n\ndef tests_worker_convenience_methods():\n \"\"\"Tests send and get object methods on BaseWorker\n\n This test comes in two parts. The first uses the simple\n BaseWorker.send_obj and BaseWorker.request_obj to send a\n tensor to Alice and to get the worker back from Alice.\n\n The second part shows that the same methods work between\n bob and alice directly.\n \"\"\"\n\n me = sy.torch.hook.local_worker\n worker_id = sy.ID_PROVIDER.pop()\n bob = VirtualWorker(sy.torch.hook, id=f\"bob{worker_id}\")\n worker_id = sy.ID_PROVIDER.pop()\n alice = VirtualWorker(sy.torch.hook, id=f\"alice{worker_id}\")\n obj = torch.Tensor([100, 100])\n\n # Send data to alice\n me.send_obj(obj, alice)\n\n # Get data from alice\n resp_alice = me.request_obj(obj.id, alice)\n\n assert (resp_alice == obj).all()\n\n obj2 = torch.Tensor([200, 200])\n\n # Set data on self\n bob.set_obj(obj2)\n\n # Get data from self\n resp_bob_self = bob.get_obj(obj2.id)\n\n assert (resp_bob_self == obj2).all()\n\n # Get data from bob as alice\n resp_bob_alice = alice.request_obj(obj2.id, bob)\n\n assert (resp_bob_alice == obj2).all()\n\n\ndef test_search():\n worker_id = sy.ID_PROVIDER.pop()\n bob = VirtualWorker(sy.torch.hook, id=f\"bob{worker_id}\")\n\n x = (\n torch.tensor([1, 2, 3, 4, 5])\n .tag(\"#fun\", \"#mnist\")\n .describe(\"The images in the MNIST training dataset.\")\n .send(bob)\n )\n\n y = (\n torch.tensor([1, 2, 3, 4, 5])\n .tag(\"#not_fun\", \"#cifar\")\n .describe(\"The images in the MNIST training dataset.\")\n .send(bob)\n )\n\n z = (\n torch.tensor([1, 2, 3, 4, 5])\n .tag(\"#fun\", \"#boston_housing\")\n .describe(\"The images in the MNIST training dataset.\")\n .send(bob)\n )\n\n a = (\n torch.tensor([1, 2, 3, 4, 5])\n .tag(\"#not_fun\", \"#boston_housing\")\n .describe(\"The images in the MNIST training dataset.\")\n .send(bob)\n )\n\n assert len(bob.search(\"#fun\")) == 2\n assert len(bob.search(\"#mnist\")) == 1\n assert len(bob.search(\"#cifar\")) == 1\n assert len(bob.search(\"#not_fun\")) == 2\n assert len(bob.search([\"#not_fun\", \"#boston_housing\"])) == 1\n\n\ndef test_obj_not_found(workers):\n \"\"\"Test for useful error message when trying to call a method on\n a tensor which does not exist on a worker anymore.\"\"\"\n\n bob = workers[\"bob\"]\n\n x = torch.tensor([1, 2, 3, 4, 5]).send(bob)\n\n bob.object_store.clear_objects()\n\n with pytest.raises(ObjectNotFoundError):\n y = x + x\n\n\ndef test_get_not_permitted(workers):\n bob = workers[\"bob\"]\n x = torch.tensor([1, 2, 3, 4, 5]).send(bob)\n with patch.object(torch.Tensor, \"allow\") as mock_allowed_to_get:\n mock_allowed_to_get.return_value = False\n with pytest.raises(GetNotPermittedError):\n x.get()\n mock_allowed_to_get.assert_called_once()\n\n\ndef test_spinup_time(hook):\n \"\"\"Tests to ensure that virtual workers intialized with 10000 data points\n load in under 1 seconds. This is needed to ensure that virtual workers\n spun up inside web frameworks are created quickly enough to not cause timeout errors\"\"\"\n data = []\n for i in range(10000):\n data.append(torch.Tensor(5, 5).random_(100))\n start_time = time()\n dummy = sy.VirtualWorker(hook, id=\"dummy\", data=data)\n end_time = time()\n assert (end_time - start_time) < 1\n\n\ndef test_send_jit_scriptmodule(hook, workers): # pragma: no cover\n bob = workers[\"bob\"]\n\n @torch.jit.script\n def foo(x):\n return x + 2\n\n foo_wrapper = ObjectWrapper(obj=foo, id=99)\n foo_ptr = hook.local_worker.send(foo_wrapper, bob)\n\n res = foo_ptr(torch.tensor(4))\n assert res == torch.tensor(6)\n\n\ndef test_send_command_whitelist(hook, workers):\n bob = workers[\"bob\"]\n whitelisted_methods = {\n \"torch\": {\"tensor\": [1, 2, 3], \"rand\": (2, 3), \"randn\": (2, 3), \"zeros\": (2, 3)}\n }\n\n for framework, methods in whitelisted_methods.items():\n attr = getattr(bob.remote, framework)\n\n for method, inp in methods.items():\n x = getattr(attr, method)(inp)\n\n if \"rand\" not in method:\n assert (x.get() == getattr(torch, method)(inp)).all()\n\n\ndef test_send_command_not_whitelisted(hook, workers):\n bob = workers[\"bob\"]\n\n method_not_exist = \"openmind\"\n\n for framework in bob.remote.frameworks:\n if framework in dir(bob.remote):\n attr = getattr(bob.remote, framework)\n\n with pytest.raises(AttributeError):\n getattr(attr, method_not_exist)\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.cat",
"torch.randn",
"torch.tensor",
"torch.no_grad"
],
[
"torch.ones",
"torch.jit.trace",
"torch.nn.functional.cross_entropy",
"torch.nn.init.xavier_normal_",
"torch.tensor",
"torch.nn.Linear",
"torch.sort"
],
[
"torch.Tensor",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kartik-gatsby/optimized-ising-model | [
"1a9b0210deb26d73f93aec5b0804baaebf9c6ff9"
] | [
"ising_low.py"
] | [
"import numpy as np\nfrom random import random\nimport matplotlib.pyplot as plt\nimport time\nimport logging\n\nlogging.basicConfig(level=logging.INFO,filename='simulation.log', filemode='w',format='%(asctime)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')\nnp.seterr(all='warn')\n\n#################################################\n# #\n# SIMULATION MACROS #\n# #\n#################################################\n\"\"\"__________________________________________\nSimulation MACROs:\nT_max and T_min is range of temperature.\nnt is number of Temperature points.\nsweeps are number of mc steps per spin.\nmin_meas is minimum number Measurement.\nj_knife_factor is jack knife factor is used when number of measurement interval < 2 x Correlation time.\nAll some_variables0 are default value.\n------------------------------------------\"\"\"\nlogging.info(\"Starting Ising Model Simulation\")\nT_min = 1.5; T_max = 3\nnt = int((T_max-T_min)*10+1)\nsweeps0 = 1000\nmax_sweeps = sweeps0*10\nmin_meas = 100\nj_knife_factor0 = 1\nstartTime = time.time()\nT = np.linspace(T_min, T_max, nt)\n\"\"\"\nWe will work with expanding lattices. We will store expanded lattice for particular temperature. Stored lattice would be used as initial configuration for higher dimenssion lattic size. We have two methods for expanding lattice: zooming and stacking. We recommend stacking for use.\n\"\"\"\nstates = {_: None for _ in T}\n#lattice_sizes = 3**(np.arange(2,5))\n################OR##################\nlattice_sizes = 2**(np.arange(4,8))\n\n#################################################\n# #\n# FUNCTIONS #\n# #\n#################################################\n\"\"\"Onsagar's solutions\"\"\"\ndef onsagar_specific_heat(X):\n const = -(2/2.269)**2*2/np.pi\n return const*np.log(abs(np.ones(len(X))-X/2.269))\ndef onsagar_mag(X):\n lst1 = (1-(np.sinh(np.log(1+np.sqrt(2))*2.269/X[X<2.269]))**(-4))**(1/8)\n lst2 = 0*X[X>=2.269]\n return np.concatenate((lst1,lst2))\n\n\n\"\"\"Monte Carlo Metropolis algorithm\"\"\"\ndef monteCarlo(n, state, energy, mag, beta, sweeps,max_sweeps):\n if sweeps > max_sweeps:\n sweeps = max_sweeps\n exp_betas = np.exp(-beta*np.arange(0,9))\n energies, mags = np.zeros(sweeps), np.zeros(sweeps)\n # random state indices\n J = np.random.randint(0, n, size=(sweeps, n*n))\n K = np.random.randint(0, n, size=(sweeps, n*n))\n #loop\n for t in range(sweeps):\n for tt in range(n*n):\n # random indices\n j, k = J[t, tt], K[t, tt]\n s = state[j,k]\n neighbour_sum = (state[(j-1)%n, k] +\n state[j, (k-1)%n] + state[j, (k+1)%n] +\n state[(j+1)%n, k])\n energy_diff = 2*s*neighbour_sum\n if energy_diff < 0 or random() < exp_betas[energy_diff]:\n s *= -1\n energy += energy_diff\n mag += 2*s\n state[j, k] = s\n energies[t], mags[t] = energy, mag\n return energies, mags\n\n\n\"\"\"Calculation of auto-correlation\"\"\" \ndef autocorrelation(M):\n start_time = time.time()\n tau = 1\n sweeps = len(M)\n auto = np.zeros(sweeps)\n for t in range(sweeps):\n some_time = sweeps-t\n first_term = np.average(M[:some_time]*M[t:sweeps])\n S1 = np.average(M[:some_time])\n S2 = np.average(M[t:sweeps])\n auto_temp = first_term - S1*S2\n if auto_temp > 0:\n auto[t] = auto_temp\n else:#remove oscillating part\n break \n if auto[0] != 0:\n auto = auto[auto>0]\n auto = auto/auto[0] #normalization\n len_auto = len(auto)\n if len_auto > 1: #draw a straight line if you have atleast two points\n tau = int(-1/np.polyfit(np.arange(len_auto), np.log(auto), 1, w=np.sqrt(auto))[0])\n tau = max(tau,1)\n logging.info(f\"Correlation time = {tau}\")\n return tau\n\n\n\"\"\"\nCalculation of specific heat or Susceptibility and errorbar.\nCX is Specific Heat or Susceptibility.\nCX_i is Specific Heat or Susceptibility without i-th measurement.\n\"\"\"\ndef jackKnife(EM,factor=1):\n n = len(EM)\n CX = np.var(EM)\n CX_i = np.zeros(n)\n for i in range(n):\n CX_i[i] = np.var(np.delete(EM,i))\n under = np.sum(np.square(np.full(n,CX) - CX_i))\n CX_err = np.sqrt(under*factor)\n return CX, CX_err\n\n\"\"\"\nStacking Lattices: Stacking z lattice and taking advantage of periodic boundary condition. The energy and magnetization would also increase as system size increase as they are extensive state variables. Other trick to explore is Zoom.\n\"\"\"\ndef stackLattice(z,state,energy,mag):\n h_stack_state = state\n for _ in range(z-1):\n h_stack_state = np.hstack((h_stack_state,state))\n v_stack_state = h_stack_state\n for _ in range(z-1):\n v_stack_state = np.vstack((v_stack_state,h_stack_state))\n return (v_stack_state, z*z*energy, z*z*mag)\n\n#################################################\n# #\n# MAIN #\n# #\n#################################################\n\"\"\"we will plot the following wrt temperature, T\"\"\"\nplotEnergy = np.zeros(nt)\nplotMag = np.zeros(nt)\nplotChi = np.zeros(nt)\nplotChi_err = np.zeros(nt)\nplotSH = np.zeros(nt)\nplotSH_err = np.zeros(nt)\nplotCorrelation = np.zeros(nt)\n\n\n\"\"\"\nPreparing n x n lattice with all spins up.\nHere, z is a zoom factor or a stacking factor.\n\"\"\"\nn = min(lattice_sizes)\nN = n*n\nz = lattice_sizes[1]//lattice_sizes[0]\nstate = np.ones((n,n),dtype=\"int\")\nenergy, mag = -N, N\n\"\"\"lattice size loop\"\"\"\nfor n in lattice_sizes:\n logging.info(f\"Lattice size is {n}x{n}\")\n print(f\"Lattice size is {n}x{n}\")\n N = n*n\n \"\"\"temperature loop\"\"\"\n for k in range(nt):\n temp = T[k]\n Beta=1/temp\n if states[temp] != None:\n (state,energy,mag) = states[temp]\n logging.info(\"_\"*35)\n logging.info(\"Temperature is %0.2f, time elapsed %d\" %(temp,time.time()-startTime))\n sweeps = sweeps0; j_knife_factor = j_knife_factor0; measurements = 0\n E, M = np.zeros(0), np.zeros(0)\n while measurements < min_meas:\n energies, mags = monteCarlo(n, state, energy, mag, Beta, sweeps, max_sweeps//10)\n energy, mag = energies[-1], mags[-1]\n E = np.concatenate((E,energies))\n M = np.concatenate((M,mags))\n delta_int = eq_time = 2*autocorrelation(M)\n measurements = len(E[eq_time::delta_int])\n logging.info(f\"{measurements} measurements are possible\")\n if measurements < min_meas:\n _energies_ = len(E)\n if _energies_ < max_sweeps:\n sweeps = delta_int*(min_meas-measurements)\n logging.info(f\"\\tdoing {sweeps} more sweeps\")\n else:\n delta_int = (_energies_-eq_time)//min_meas\n j_knife_factor = eq_time/delta_int\n measurements = len(E[eq_time::delta_int])\n logging.info(f\"We will do {measurements} measurements\")\n \n \n #doing measurements\n E = E[eq_time::delta_int]\n M = M[eq_time::delta_int]\n plotMag[k] = np.average(M)/N\n Chi, Chi_err = jackKnife(M,j_knife_factor)\n plotChi[k] =Chi*Beta/N\n plotChi_err[k] =Chi_err*Beta/N\n plotEnergy[k] = np.average(E)/N\n sp_heat, sp_heat_err = jackKnife(E,j_knife_factor)\n plotSH[k] = sp_heat*Beta*Beta/N\n plotSH_err[k] = sp_heat_err*Beta*Beta/N\n plotCorrelation[k] = eq_time//2\n \n \n #lattice expansion\n states[temp] = stackLattice(z,state,energy,mag)\n #states[temp] = zoomLattice(z,state,energy,mag)\n \n \n #PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS#\n f = plt.figure(figsize=(16, 9));\n title_name = \"Size:\"+str(n)+\"x\"+str(n)\n plt.title(title_name, color='b');\n\n sp = f.add_subplot(2, 2, 1 );\n plt.scatter(T, plotEnergy, s=50, marker='o', color='IndianRed')\n plt.xlabel(\"Temperature (T)\", fontsize=20);\n plt.ylabel(\"Energy \", fontsize=20); plt.axis('tight');\n\n sp = f.add_subplot(2, 2, 2 );\n plt.scatter(T, abs(np.array(plotMag)), s=50, marker='o', color='IndianRed', label = \"data\")\n temp_list = np.linspace(T_min, T_max, 10000)\n plt.plot(temp_list, onsagar_mag(temp_list) , color='blue', label = \"Onsager Solution\") \n plt.legend()\n plt.xlabel(\"Temperature (T)\", fontsize=20); \n plt.ylabel(\"Magnetization \", fontsize=20); plt.axis('tight');\n\n sp = f.add_subplot(2, 2, 3 );\n plt.errorbar(T, plotSH, yerr = plotSH_err, fmt='o', color='IndianRed', label = \"data\")\n plt.plot(temp_list, onsagar_specific_heat(temp_list), color='RoyalBlue', label = \"Onsager Solution\") \n plt.legend()\n plt.xlabel(\"Temperature (T)\", fontsize=20); \n plt.ylabel(\"Specific Heat \", fontsize=20); plt.axis('tight'); \n\n sp = f.add_subplot(2, 2, 4 );\n plt.errorbar(T, plotChi, yerr = plotChi_err, fmt='o', color='IndianRed', label = \"data\")\n plt.xlabel(\"Temperature (T)\", fontsize=20); \n plt.ylabel(\"Susceptibility\", fontsize=20); plt.axis('tight');\n\n timeIs = time.strftime(\"%H-%M-%S\")\n plt.savefig(timeIs+'.pdf')\n \n #storing measurements in in a file\n with open(str(n)+\"data\",\"w\") as file:\n file.write(\"##Temp\\tEnergy\\tMag\\tSp_ht\\tSp_ht_err\\tChi\\tChi_err\\ttau\\n\")\n for i in range(nt):\n file.write(str(T[i])+\"\\t\"+str(plotEnergy[i])+\"\\t\"+str(plotMag[i])+\"\\t\"+str(plotSH[i])+\"\\t\"+str(plotSH_err[i])+\"\\t\"+str(plotChi[i])+\"\\t\"+str(plotChi_err[i])+\"\\t\"+str(plotCorrelation[i])+\"\\t\"+\"\\n\")\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"numpy.concatenate",
"numpy.seterr",
"numpy.var",
"numpy.random.randint",
"numpy.hstack",
"numpy.arange",
"numpy.full",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.delete",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"numpy.ones",
"matplotlib.pyplot.xlabel",
"numpy.average",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RangeKing/PaddleViT | [
"0e25958686e04ed8872cf67fba0dfd6918e9b4dd",
"0e25958686e04ed8872cf67fba0dfd6918e9b4dd",
"0e25958686e04ed8872cf67fba0dfd6918e9b4dd",
"0e25958686e04ed8872cf67fba0dfd6918e9b4dd"
] | [
"image_classification/MLP-Mixer/load_pytorch_weights.py",
"image_classification/PiT/main_multi_gpu_distill.py",
"image_classification/XCiT/load_pytorch_weights.py",
"facial_expression/datasets.py"
] | [
"# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"convert pytorch model weights to paddle pdparams\"\"\"\nimport os\nimport numpy as np\nimport paddle\nimport torch\nimport timm\nfrom mlp_mixer import build_mlp_mixer as build_model\nfrom config import get_config\n\n\ndef print_model_named_params(model):\n print('----------------------------------')\n for name, param in model.named_parameters():\n print(name, param.shape)\n print('----------------------------------')\n\n\ndef print_model_named_buffers(model):\n print('----------------------------------')\n for name, param in model.named_buffers():\n print(name, param.shape)\n print('----------------------------------')\n\n\ndef torch_to_paddle_mapping(model_name, config):\n mapping = [\n ('stem.proj', 'patch_embed.patch_embed'),\n ]\n\n for stage_idx in range(config.MODEL.MIXER.DEPTH):\n th_prefix = f'blocks.{stage_idx}'\n pp_prefix = f'mixer_layers.{stage_idx}'\n\n layer_mapping = [\n (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),\n (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),\n (f'{th_prefix}.mlp_tokens.fc1', f'{pp_prefix}.mlp_tokens.fc1'),\n (f'{th_prefix}.mlp_tokens.fc2', f'{pp_prefix}.mlp_tokens.fc2'),\n (f'{th_prefix}.mlp_channels.fc1', f'{pp_prefix}.mlp_channels.fc1'),\n (f'{th_prefix}.mlp_channels.fc2', f'{pp_prefix}.mlp_channels.fc2'),\n ]\n mapping.extend(layer_mapping)\n\n head_mapping = [\n ('norm', 'norm'),\n ('head', 'head'),\n ]\n mapping.extend(head_mapping)\n\n return mapping\n\n\n\ndef convert(torch_model, paddle_model, model_name, config):\n def _set_value(th_name, pd_name, transpose=True):\n th_shape = th_params[th_name].shape\n pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list\n #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'\n print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')\n if isinstance(th_params[th_name], torch.nn.parameter.Parameter):\n value = th_params[th_name].data.numpy()\n else:\n value = th_params[th_name].numpy()\n\n if len(value.shape) == 2 and transpose:\n value = value.transpose((1, 0))\n pd_params[pd_name].set_value(value)\n\n # 1. get paddle and torch model parameters\n pd_params = {}\n th_params = {}\n for name, param in paddle_model.named_parameters():\n pd_params[name] = param\n for name, param in torch_model.named_parameters():\n th_params[name] = param\n\n for name, param in paddle_model.named_buffers():\n pd_params[name] = param\n for name, param in torch_model.named_buffers():\n th_params[name] = param\n\n # 2. get name mapping pairs\n mapping = torch_to_paddle_mapping(model_name, config)\n\n\n missing_keys_th = []\n missing_keys_pd = []\n zip_map = list(zip(*mapping))\n th_keys = list(zip_map[0])\n pd_keys = list(zip_map[1])\n\n for key in th_params:\n missing = False\n if key not in th_keys:\n missing = True\n if key.endswith('.weight'):\n if key[:-7] in th_keys:\n missing = False\n if key.endswith('.bias'):\n if key[:-5] in th_keys:\n missing = False\n if missing:\n missing_keys_th.append(key)\n\n for key in pd_params:\n missing = False\n if key not in pd_keys:\n missing = True\n if key.endswith('.weight'):\n if key[:-7] in pd_keys:\n missing = False\n if key.endswith('.bias'):\n if key[:-5] in pd_keys:\n missing = False\n if missing:\n missing_keys_pd.append(key)\n\n\n print('====================================')\n print('missing_keys_pytorch:')\n print(missing_keys_th)\n print('missing_keys_paddle:')\n print(missing_keys_pd)\n print('====================================')\n\n # 3. set torch param values to paddle params: may needs transpose on weights\n for th_name, pd_name in mapping:\n if th_name in th_params and pd_name in pd_params: # nn.Parameters\n _set_value(th_name, pd_name)\n else:\n if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:\n th_name_w = f'{th_name}.weight'\n pd_name_w = f'{pd_name}.weight'\n _set_value(th_name_w, pd_name_w)\n if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:\n th_name_b = f'{th_name}.bias'\n pd_name_b = f'{pd_name}.bias'\n _set_value(th_name_b, pd_name_b)\n\n return paddle_model\n\n\ndef main():\n paddle.set_device('cpu')\n model_name_list = [\n 'mixer_b16_224',\n 'mixer_l16_224',\n ]\n\n for model_name in model_name_list:\n print(f'============= NOW: {model_name} =============')\n sz = 224\n config = get_config(f'./configs/{model_name}.yaml')\n\n paddle_model = build_model(config)\n\n paddle_model.eval()\n print_model_named_params(paddle_model)\n print_model_named_buffers(paddle_model)\n\n print('+++++++++++++++++++++++++++++++++++')\n device = torch.device('cpu')\n torch_model = timm.create_model(model_name, pretrained=True)\n torch_model = torch_model.to(device)\n torch_model.eval()\n print_model_named_params(torch_model)\n print_model_named_buffers(torch_model)\n\n # convert weights\n paddle_model = convert(torch_model, paddle_model, model_name, config)\n\n # check correctness\n x = np.random.randn(2, 3, sz, sz).astype('float32')\n x_paddle = paddle.to_tensor(x)\n x_torch = torch.Tensor(x).to(device)\n\n out_torch = torch_model(x_torch)\n out_paddle = paddle_model(x_paddle)\n\n out_torch = out_torch.data.cpu().numpy()\n out_paddle = out_paddle.cpu().numpy()\n\n print(out_torch.shape, out_paddle.shape)\n print(out_torch[0, 0:100])\n print('========================================================')\n print(out_paddle[0, 0:100])\n assert np.allclose(out_torch, out_paddle, atol = 1e-3)\n\n # save weights for paddle model\n model_path = os.path.join(f'./{model_name}.pdparams')\n paddle.save(paddle_model.state_dict(), model_path)\n print(f'{model_name} done')\n print('all done')\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PiT train and eval using multiple GPU without teacher model and distillation\"\"\"\nimport sys\nimport os\nimport time\nimport argparse\nimport random\nimport math\nimport numpy as np\nimport paddle\nfrom datasets import get_dataloader\nfrom datasets import get_dataset\nfrom config import get_config\nfrom config import update_config\nfrom utils import AverageMeter\nfrom utils import get_logger\nfrom utils import write_log\nfrom utils import all_reduce_mean\nfrom utils import skip_weight_decay_fn\nfrom mixup import Mixup\nfrom model_ema import ModelEma\nfrom losses import LabelSmoothingCrossEntropyLoss\nfrom losses import SoftTargetCrossEntropyLoss\nfrom losses import DistillationLoss\nfrom regnet import build_regnet as build_teacher_model\nfrom pit import build_pit as build_model\n\n\ndef get_arguments():\n \"\"\"return argumeents, this will overwrite the config by (1) yaml file (2) argument values\"\"\"\n parser = argparse.ArgumentParser('PiT')\n parser.add_argument('-cfg', type=str, default=None)\n parser.add_argument('-dataset', type=str, default=None)\n parser.add_argument('-data_path', type=str, default=None)\n parser.add_argument('-output', type=str, default=None)\n parser.add_argument('-batch_size', type=int, default=None)\n parser.add_argument('-batch_size_eval', type=int, default=None)\n parser.add_argument('-image_size', type=int, default=None)\n parser.add_argument('-accum_iter', type=int, default=None)\n parser.add_argument('-pretrained', type=str, default=None)\n parser.add_argument('-teacher_model_path', type=str, default=None)\n parser.add_argument('-resume', type=str, default=None)\n parser.add_argument('-last_epoch', type=int, default=None)\n parser.add_argument('-eval', action='store_true')\n parser.add_argument('-amp', action='store_true')\n arguments = parser.parse_args()\n return arguments\n\n\ndef train(dataloader,\n model,\n optimizer,\n criterion,\n epoch,\n total_epochs,\n total_batches,\n debug_steps=100,\n accum_iter=1,\n model_ema=None,\n mixup_fn=None,\n amp_grad_scaler=None,\n local_logger=None,\n master_logger=None):\n \"\"\"Training for one epoch\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n optimizer: nn.optimizer\n criterion: nn.XXLoss\n epoch: int, current epoch\n total_epochs: int, total num of epochs\n total_batches: int, total num of batches for one epoch\n debug_steps: int, num of iters to log info, default: 100\n accum_iter: int, num of iters for accumulating gradients, default: 1\n model_ema: ModelEma, model moving average instance\n mixup_fn: Mixup, mixup instance, default: None\n amp_grad_scaler: GradScaler, if not None pass the GradScaler and enable AMP, default: None\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n train_loss_meter.avg: float, average loss on current process/gpu\n train_acc_meter.avg: float, average acc@1 on current process/gpu\n master_loss_meter.avg: float, average loss on all processes/gpus\n master_acc_meter.avg: float, average acc@1 on all processes/gpus\n train_time: float, training time\n \"\"\"\n time_st = time.time()\n train_loss_meter = AverageMeter()\n train_acc_meter = AverageMeter()\n master_loss_meter = AverageMeter()\n master_acc_meter = AverageMeter()\n\n model.train()\n optimizer.clear_grad()\n\n for batch_id, data in enumerate(dataloader):\n # get data\n images = data[0]\n label = data[1]\n label_orig = label.clone()\n batch_size = images.shape[0]\n\n if mixup_fn is not None:\n images, label = mixup_fn(images, label_orig)\n\n # forward\n with paddle.amp.auto_cast(amp_grad_scaler is not None):\n output = model(images)\n loss = criterion(images, output, label)\n\n loss_value = loss.item()\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n loss = loss / accum_iter\n\n # backward and step\n if amp_grad_scaler is None: # fp32\n loss.backward()\n if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n optimizer.step()\n optimizer.clear_grad()\n else: # amp\n scaled_loss = amp_grad_scaler.scale(loss)\n scaled_loss.backward()\n if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n # amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188\n amp_grad_scaler.step(optimizer)\n amp_grad_scaler.update()\n optimizer.clear_grad()\n\n if model_ema is not None and paddle.distributed.get_rank() == 0:\n model_ema.update(model)\n\n # average of output and kd_output, same as eval mode\n pred = paddle.nn.functional.softmax((output[0] + output[1]) / 2)\n acc = paddle.metric.accuracy(pred,\n label_orig if mixup_fn else label_orig.unsqueeze(1)).item()\n\n # sync from other gpus for overall loss and acc\n master_loss = all_reduce_mean(loss_value)\n master_acc = all_reduce_mean(acc)\n master_batch_size = all_reduce_mean(batch_size)\n\n master_loss_meter.update(master_loss, master_batch_size)\n master_acc_meter.update(master_acc, master_batch_size)\n train_loss_meter.update(loss_value, batch_size)\n train_acc_meter.update(acc, batch_size)\n\n if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader):\n general_message = (f\"Epoch[{epoch:03d}/{total_epochs:03d}], \"\n f\"Step[{batch_id:04d}/{total_batches:04d}], \"\n f\"Lr: {optimizer.get_lr():04f}, \")\n local_message = (general_message +\n f\"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), \"\n f\"Avg Acc: {train_acc_meter.avg:.4f}\")\n master_message = (general_message +\n f\"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), \"\n f\"Avg Acc: {master_acc_meter.avg:.4f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n\n paddle.distributed.barrier()\n train_time = time.time() - time_st\n return (train_loss_meter.avg,\n train_acc_meter.avg,\n master_loss_meter.avg,\n master_acc_meter.avg,\n train_time)\n\n\[email protected]_grad()\ndef validate(dataloader,\n model,\n criterion,\n total_batches,\n debug_steps=100,\n local_logger=None,\n master_logger=None):\n \"\"\"Validation for the whole dataset\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n total_batches: int, total num of batches for one epoch\n debug_steps: int, num of iters to log info, default: 100\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n val_loss_meter.avg: float, average loss on current process/gpu\n val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus\n val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus\n master_loss_meter.avg: float, average loss on all processes/gpus\n master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus\n master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus\n val_time: float, validation time\n \"\"\"\n model.eval()\n val_loss_meter = AverageMeter()\n val_acc1_meter = AverageMeter()\n val_acc5_meter = AverageMeter()\n master_loss_meter = AverageMeter()\n master_acc1_meter = AverageMeter()\n master_acc5_meter = AverageMeter()\n\n time_st = time.time()\n\n for batch_id, data in enumerate(dataloader):\n # get data\n images = data[0]\n label = data[1]\n batch_size = images.shape[0]\n\n output = model(images)\n loss = criterion(output, label)\n loss_value = loss.item()\n\n pred = paddle.nn.functional.softmax(output)\n acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()\n acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item()\n\n # sync from other gpus for overall loss and acc\n master_loss = all_reduce_mean(loss_value)\n master_acc1 = all_reduce_mean(acc1)\n master_acc5 = all_reduce_mean(acc5)\n master_batch_size = all_reduce_mean(batch_size)\n\n master_loss_meter.update(master_loss, master_batch_size)\n master_acc1_meter.update(master_acc1, master_batch_size)\n master_acc5_meter.update(master_acc5, master_batch_size)\n val_loss_meter.update(loss_value, batch_size)\n val_acc1_meter.update(acc1, batch_size)\n val_acc5_meter.update(acc5, batch_size)\n\n if batch_id % debug_steps == 0:\n local_message = (f\"Step[{batch_id:04d}/{total_batches:04d}], \"\n f\"Avg Loss: {val_loss_meter.avg:.4f}, \"\n f\"Avg Acc@1: {val_acc1_meter.avg:.4f}, \"\n f\"Avg Acc@5: {val_acc5_meter.avg:.4f}\")\n master_message = (f\"Step[{batch_id:04d}/{total_batches:04d}], \"\n f\"Avg Loss: {master_loss_meter.avg:.4f}, \"\n f\"Avg Acc@1: {master_acc1_meter.avg:.4f}, \"\n f\"Avg Acc@5: {master_acc5_meter.avg:.4f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n paddle.distributed.barrier()\n val_time = time.time() - time_st\n return (val_loss_meter.avg,\n val_acc1_meter.avg,\n val_acc5_meter.avg,\n master_loss_meter.avg,\n master_acc1_meter.avg,\n master_acc5_meter.avg,\n val_time)\n\n\ndef main_worker(*args):\n \"\"\"main method for each process\"\"\"\n # STEP 0: Preparation\n paddle.device.set_device('gpu')\n paddle.distributed.init_parallel_env()\n world_size = paddle.distributed.get_world_size()\n local_rank = paddle.distributed.get_rank()\n config = args[0]\n last_epoch = config.TRAIN.LAST_EPOCH\n seed = config.SEED + local_rank\n paddle.seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n local_logger, master_logger = get_logger(config.SAVE)\n message = (f'----- world_size = {world_size}, local_rank = {local_rank} \\n'\n f'----- {config}')\n write_log(local_logger, master_logger, message)\n\n # STEP 1: Create model\n model = build_model(config)\n\n # define model ema\n model_ema = None\n if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:\n model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)\n if config.TRAIN.MODEL_EMA_FORCE_CPU:\n model_ema.to('cpu')\n\n # STEP 2: Create train and val dataloader\n if not config.EVAL:\n dataset_train = args[1]\n dataloader_train = get_dataloader(config, dataset_train, True, True)\n total_batch_train = len(dataloader_train)\n message = f'----- Total # of train batch (single gpu): {total_batch_train}'\n write_log(local_logger, master_logger, message)\n\n dataset_val = args[2]\n dataloader_val = get_dataloader(config, dataset_val, False, True)\n total_batch_val = len(dataloader_val)\n message = f'----- Total # of val batch (single gpu): {total_batch_val}'\n write_log(local_logger, master_logger, message)\n\n # STEP 3: (Optional) Define Mixup function\n mixup_fn = None\n if (config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or\n config.TRAIN.CUTMIX_MINMAX is not None):\n mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,\n cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,\n cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,\n prob=config.TRAIN.MIXUP_PROB,\n switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,\n mode=config.TRAIN.MIXUP_MODE,\n label_smoothing=config.TRAIN.SMOOTHING)#\n\n # STEP 4: Define loss/criterion\n if mixup_fn is not None:\n criterion = SoftTargetCrossEntropyLoss()\n elif config.TRAIN.SMOOTHING:\n criterion = LabelSmoothingCrossEntropyLoss()\n else:\n criterion = paddle.nn.CrossEntropyLoss()\n # Use CrossEntropyLoss for val\n criterion_val = paddle.nn.CrossEntropyLoss()\n\n # STEP 5: Create Teacher model and distill loss\n teacher_model = None\n if not config.EVAL:\n if config.TRAIN.DISTILLATION_TYPE != 'none':\n write_log(local_logger, master_logger,\n f'----- Load teacher model: {config.TRAIN.TEACHER_MODEL}')\n teacher_model = build_teacher_model()\n assert os.path.isfile(config.TRAIN.TEACHER_MODEL)\n teacher_model_state = paddle.load(config.TRAIN.TEACHER_MODEL)\n teacher_model.set_state_dict(teacher_model_state)\n teacher_model.eval()\n teacher_model = paddle.DataParallel(teacher_model)\n # wrap the criterion:\n criterion = DistillationLoss(criterion,\n teacher_model,\n config.TRAIN.DISTILLATION_TYPE,\n config.TRAIN.DISTILLATION_ALPHA,\n config.TRAIN.DISTILLATION_TAU)\n\n # STEP 5: Define optimizer and lr_scheduler\n if not config.EVAL:\n # set lr according to batch size and world size\n if config.TRAIN.LINEAR_SCALED_LR is not None:\n effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size\n config.TRAIN.BASE_LR = (\n config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR\n )\n config.TRAIN.WARMUP_START_LR = (\n config.TRAIN.WARMUP_START_LR* effective_batch_size / config.TRAIN.LINEAR_SCALED_LR\n )\n config.TRAIN.END_LR = (\n config.TRAIN.END_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR\n )\n message = (f'Base lr is scaled to: {config.TRAIN.BASE_LR}, '\n f'warmup start lr is scaled to: {config.TRAIN.WARMUP_START_LR}, '\n f'end lr is scaled to: {config.TRAIN.BASE_LR}')\n write_log(local_logger, master_logger, message)\n # define scaler for amp training\n amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None\n # warmup + cosine lr scheduler\n if config.TRAIN.WARMUP_EPOCHS > 0:\n cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(\n learning_rate=config.TRAIN.BASE_LR,\n T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS,\n eta_min=config.TRAIN.END_LR,\n last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr()\n lr_scheduler = paddle.optimizer.lr.LinearWarmup(\n learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup\n warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet\n start_lr=config.TRAIN.WARMUP_START_LR,\n end_lr=config.TRAIN.BASE_LR,\n last_epoch=config.TRAIN.LAST_EPOCH)\n else:\n lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(\n learning_rate=config.TRAIN.BASE_LR,\n T_max=config.TRAIN.NUM_EPOCHS,\n eta_min=config.TRAIN.END_LR,\n last_epoch=config.TRAIN.LAST_EPOCH)\n\n # set gradient clip\n if config.TRAIN.GRAD_CLIP:\n clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)\n else:\n clip = None\n # set optimizer\n optimizer = paddle.optimizer.AdamW(\n parameters=model.parameters(),\n learning_rate=lr_scheduler, # set to scheduler\n beta1=config.TRAIN.OPTIMIZER.BETAS[0],\n beta2=config.TRAIN.OPTIMIZER.BETAS[1],\n weight_decay=config.TRAIN.WEIGHT_DECAY,\n epsilon=config.TRAIN.OPTIMIZER.EPS,\n grad_clip=clip,\n apply_decay_param_fun=skip_weight_decay_fn(\n model, # skip bn and bias\n ['pos_embed', 'cls_token', 'dist_token']), # skip custom ops\n )\n\n # STEP 6: (Optional) Load pretrained model weights for evaluation or finetuning\n if config.MODEL.PRETRAINED:\n assert os.path.isfile(config.MODEL.PRETRAINED) is True\n model_state = paddle.load(config.MODEL.PRETRAINED)\n if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch\n # pretrain only load model weight, opt and epoch are ignored\n if 'model_ema' in model_state:\n model_state = model_state['model_ema']\n else:\n model_state = model_state['model']\n model.set_state_dict(model_state)\n message = f\"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}\"\n write_log(local_logger, master_logger, message)\n\n # STEP 7: (Optional) Load model weights and status for resume training\n if config.MODEL.RESUME:\n assert os.path.isfile(config.MODEL.RESUME) is True\n model_state = paddle.load(config.MODEL.RESUME)\n if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch\n model.set_state_dict(model_state['model'])\n\n if 'optimizer' in model_state:\n optimizer.set_state_dict(model_state['optimizer'])\n if 'epoch' in model_state:\n config.TRAIN.LAST_EPOCH = model_state['epoch']\n last_epoch = model_state['epoch']\n if 'lr_scheduler' in model_state:\n lr_scheduler.set_state_dict(model_state['lr_scheduler'])\n if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None:\n amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler'])\n if config.TRAIN.MODEL_EMA and local_rank == 0:\n model_ema.module.set_state_dict(model_state['model_ema'])\n\n lr_scheduler.step(last_epoch + 1)\n\n message = (f\"----- Resume Training: Load model from {config.MODEL.RESUME}, w/t \"\n f\"opt = [{'optimizer' in model_state}], \"\n f\"lr_scheduler = [{'lr_scheduler' in model_state}], \"\n f\"model_ema = [{'model_ema' in model_state}], \"\n f\"epoch = [{model_state.get('epoch', -1)}], \"\n f\"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]\")\n write_log(local_logger, master_logger, message)\n else: # direct load pdparams without other items\n message = f\"----- Resume Training: Load {config.MODEL.RESUME}, w/o opt/epoch/scaler\"\n write_log(local_logger, master_logger, message, 'warning')\n model.set_state_dict(model_state)\n lr_scheduler.step(last_epoch + 1)\n\n # STEP 8: Enable model data parallelism on multi processes\n model = paddle.DataParallel(model)\n\n # STEP 9: (Optional) Run evaluation and return\n if config.EVAL:\n write_log(local_logger, master_logger, \"----- Start Validation\")\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batches=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_message = (\"----- Validation: \" +\n f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n master_message = (\"----- Validation: \" +\n f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n return\n\n # STEP 10: Run training\n write_log(local_logger, master_logger, f\"----- Start training from epoch {last_epoch+1}.\")\n for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1):\n # Train one epoch\n write_log(local_logger, master_logger, f\"Train epoch {epoch}. LR={optimizer.get_lr():.6e}\")\n train_loss, train_acc, avg_loss, avg_acc, train_time = train(\n dataloader=dataloader_train,\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n epoch=epoch,\n total_epochs=config.TRAIN.NUM_EPOCHS,\n total_batches=total_batch_train,\n debug_steps=config.REPORT_FREQ,\n accum_iter=config.TRAIN.ACCUM_ITER,\n model_ema=model_ema,\n mixup_fn=mixup_fn,\n amp_grad_scaler=amp_grad_scaler,\n local_logger=local_logger,\n master_logger=master_logger)\n\n # update lr\n lr_scheduler.step()\n\n general_message = (f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \"\n f\"Lr: {optimizer.get_lr():.4f}, \"\n f\"time: {train_time:.2f}, \")\n local_message = (general_message +\n f\"Train Loss: {train_loss:.4f}, \"\n f\"Train Acc: {train_acc:.4f}\")\n master_message = (general_message +\n f\"Train Loss: {avg_loss:.4f}, \"\n f\"Train Acc: {avg_acc:.4f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n\n # Evaluation (optional)\n if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}')\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batches=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_message = (f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n master_message = (f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n\n # Save model weights and training status\n if local_rank == 0:\n if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n model_path = os.path.join(\n config.SAVE, f\"Epoch-{epoch}-Loss-{avg_loss}.pdparams\")\n state_dict = dict()\n state_dict['model'] = model.state_dict()\n if model_ema is not None:\n state_dict['model_ema'] = model_ema.state_dict()\n state_dict['optimizer'] = optimizer.state_dict()\n state_dict['epoch'] = epoch\n if lr_scheduler is not None:\n state_dict['lr_scheduler'] = lr_scheduler.state_dict()\n if amp_grad_scaler is not None:\n state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict()\n paddle.save(state_dict, model_path)\n message = (f\"----- Save model: {model_path}\")\n write_log(local_logger, master_logger, message)\n\n\ndef main():\n # config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments\n config = update_config(get_config(), get_arguments())\n\n # set output folder\n config.SAVE = os.path.join(config.SAVE,\n f\"{'eval' if config.EVAL else 'train'}-{time.strftime('%Y%m%d-%H-%M')}\")\n if not os.path.exists(config.SAVE):\n os.makedirs(config.SAVE, exist_ok=True)\n\n # get train dataset if in train mode and val dataset\n dataset_train = get_dataset(config, is_train=True) if not config.EVAL else None\n dataset_val = get_dataset(config, is_train=False)\n\n # dist spawn lunch: use CUDA_VISIBLE_DEVICES to set available gpus\n paddle.distributed.spawn(main_worker, args=(config, dataset_train, dataset_val))\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"convert pytorch model weights to paddle pdparams\"\"\"\nimport os\nimport numpy as np\nimport paddle\nimport torch\nimport timm\nfrom xcit_torch.xcit import *\nfrom xcit import build_xcit as build_model\nfrom config import get_config\n\n\ndef print_model_named_params(model):\n print('----------------------------------')\n for name, param in model.named_parameters():\n print(name, param.shape)\n print('----------------------------------')\n\n\ndef print_model_named_buffers(model):\n print('----------------------------------')\n for name, param in model.named_buffers():\n print(name, param.shape)\n print('----------------------------------')\n\n\ndef torch_to_paddle_mapping(model_name, config):\n mapping = [\n ('cls_token', 'cls_token'),\n ('pos_embeder.token_projection', 'pos_embeder.token_projection'),\n ('patch_embed.proj.0.0', 'patch_embed.proj.0.0'), # bn\n ('patch_embed.proj.0.1', 'patch_embed.proj.0.1'), # bn\n ('patch_embed.proj.2.0', 'patch_embed.proj.2.0'),\n ('patch_embed.proj.2.1', 'patch_embed.proj.2.1'),\n ('patch_embed.proj.4.0', 'patch_embed.proj.4.0'),\n ('patch_embed.proj.4.1', 'patch_embed.proj.4.1'), # bn\n ('patch_embed.proj.6.0', 'patch_embed.proj.6.0'),\n ('patch_embed.proj.6.1', 'patch_embed.proj.6.1'), # bn\n ]\n\n for stage_idx in range(config.MODEL.DEPTH):\n pp_prefix = f'blocks.{stage_idx}'\n th_prefix = f'blocks.{stage_idx}'\n\n layer_mapping = [\n (f'{th_prefix}.gamma1', f'{pp_prefix}.gamma1'),\n (f'{th_prefix}.gamma2', f'{pp_prefix}.gamma2'),\n (f'{th_prefix}.gamma3', f'{pp_prefix}.gamma3'),\n\n (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),\n (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),\n (f'{th_prefix}.norm3', f'{pp_prefix}.norm3'),\n\n (f'{th_prefix}.attn.temperature', f'{pp_prefix}.attn.temperature'),\n (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),\n (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),\n\n (f'{th_prefix}.local_mp.conv1', f'{pp_prefix}.local_mp.conv1'),\n (f'{th_prefix}.local_mp.conv2', f'{pp_prefix}.local_mp.conv2'),\n (f'{th_prefix}.local_mp.bn', f'{pp_prefix}.local_mp.bn'),\n\n (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),\n (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),\n ]\n mapping.extend(layer_mapping)\n\n for i in range(2):\n layer_mapping = [\n (f'cls_attn_blocks.{i}.gamma1', f'cls_attn_blocks.{i}.gamma1'),\n (f'cls_attn_blocks.{i}.gamma2', f'cls_attn_blocks.{i}.gamma2'),\n (f'cls_attn_blocks.{i}.norm1', f'cls_attn_blocks.{i}.norm1'),\n (f'cls_attn_blocks.{i}.norm2', f'cls_attn_blocks.{i}.norm2'),\n (f'cls_attn_blocks.{i}.attn.qkv', f'cls_attn_blocks.{i}.attn.qkv'),\n (f'cls_attn_blocks.{i}.attn.proj', f'cls_attn_blocks.{i}.attn.proj'),\n (f'cls_attn_blocks.{i}.mlp.fc1', f'cls_attn_blocks.{i}.mlp.fc1'),\n (f'cls_attn_blocks.{i}.mlp.fc2', f'cls_attn_blocks.{i}.mlp.fc2'),\n ]\n mapping.extend(layer_mapping)\n\n head_mapping = [\n ('norm', 'norm'),\n ('head', 'head'),\n ]\n mapping.extend(head_mapping)\n\n return mapping\n\n\n\ndef convert(torch_model, paddle_model, model_name, config):\n def _set_value(th_name, pd_name, transpose=True):\n th_shape = th_params[th_name].shape\n pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list\n #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'\n print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')\n if isinstance(th_params[th_name], torch.nn.parameter.Parameter):\n value = th_params[th_name].cpu().data.numpy()\n else:\n value = th_params[th_name].cpu().numpy()\n\n if len(value.shape) == 2 and transpose:\n value = value.transpose((1, 0))\n pd_params[pd_name].set_value(value)\n\n # 1. get paddle and torch model parameters\n pd_params = {}\n th_params = {}\n for name, param in paddle_model.named_parameters():\n pd_params[name] = param\n for name, param in torch_model.named_parameters():\n th_params[name] = param\n\n for name, param in paddle_model.named_buffers():\n pd_params[name] = param\n for name, param in torch_model.named_buffers():\n th_params[name] = param\n\n # 2. get name mapping pairs\n mapping = torch_to_paddle_mapping(model_name, config)\n\n\n missing_keys_th = []\n missing_keys_pd = []\n zip_map = list(zip(*mapping))\n th_keys = list(zip_map[0])\n pd_keys = list(zip_map[1])\n\n for key in th_params:\n missing = False\n if key not in th_keys:\n missing = True\n if key.endswith('.weight'):\n if key[:-7] in th_keys:\n missing = False\n if key.endswith('.bias'):\n if key[:-5] in th_keys:\n missing = False\n if key.endswith('.running_mean'):\n if key[:-13] in th_keys:\n missing = False\n if key.endswith('.running_var'):\n if key[:-12] in th_keys:\n missing = False\n if missing:\n missing_keys_th.append(key)\n\n for key in pd_params:\n missing = False\n if key not in pd_keys:\n missing = True\n if key.endswith('.weight'):\n if key[:-7] in pd_keys:\n missing = False\n if key.endswith('.bias'):\n if key[:-5] in pd_keys:\n missing = False\n if key.endswith('._mean'):\n if key[:-6] in pd_keys:\n missing = False\n if key.endswith('._variance'):\n if key[:-10] in pd_keys:\n missing = False\n if missing:\n missing_keys_pd.append(key)\n\n\n print('====================================')\n print('missing_keys_pytorch:')\n print(missing_keys_th)\n print('missing_keys_paddle:')\n print(missing_keys_pd)\n print('====================================')\n\n # 3. set torch param values to paddle params: may needs transpose on weights\n for th_name, pd_name in mapping:\n if th_name in th_params and pd_name in pd_params: # nn.Parameters\n _set_value(th_name, pd_name)\n else:\n if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:\n th_name_w = f'{th_name}.weight'\n pd_name_w = f'{pd_name}.weight'\n _set_value(th_name_w, pd_name_w)\n if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:\n th_name_b = f'{th_name}.bias'\n pd_name_b = f'{pd_name}.bias'\n _set_value(th_name_b, pd_name_b)\n if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params:\n th_name_w = f'{th_name}.running_mean'\n pd_name_w = f'{pd_name}._mean'\n _set_value(th_name_w, pd_name_w)\n if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params:\n th_name_b = f'{th_name}.running_var'\n pd_name_b = f'{pd_name}._variance'\n _set_value(th_name_b, pd_name_b)\n\n return paddle_model\n\n\ndef main():\n paddle.set_device('gpu')\n\n #m_sz = ['nano']\n m_sz = ['nano', 'tiny', 'small', 'medium','large']\n p_sz = ['p8', 'p16']\n sz = [12, 24]\n img_szs = [224, 384]\n dists = [True, False]\n\n model_name_list = []\n\n for m in m_sz:\n for p in p_sz:\n for s in sz:\n for img_sz in img_szs:\n if m == 'nano' and s == 24:\n continue\n if m == 'medium' and s == 12:\n continue\n if m == 'large' and s == 12:\n continue\n name = f'xcit_{m}_{s}_{p}_{img_sz}'\n model_name_list.append(name)\n print(model_name_list)\n\n for model_name in model_name_list:\n for dist in dists:\n print(f'============= NOW: {model_name} DIST: {dist} =============')\n\n if dist:\n if os.path.isfile(os.path.join(f'./{model_name}_dist.pdparams')):\n print('pdparams exists, skip')\n continue\n else:\n if os.path.isfile(os.path.join(f'./{model_name}.pdparams')):\n print('pdparams exists, skip')\n continue\n\n\n sz = int(model_name[-3:])\n if sz == 384 and not dist:\n continue\n config = get_config(f'./configs/{model_name}.yaml')\n\n #config.defrost()\n #config.TRAIN.DISTILLATION_TYPE = 'none' if not dist else 'hard'\n paddle_model = build_model(config)\n \n paddle_model.eval()\n print_model_named_params(paddle_model)\n print_model_named_buffers(paddle_model)\n \n print('+++++++++++++++++++++++++++++++++++')\n device = torch.device('cuda:1')\n\n #torch_model = timm.create_model(model_name, pretrained=True)\n torch_model = eval(f'{model_name[:-4]}(pretrained=False)')\n if dist:\n state_dict = torch.load(f'/workspace/pth_models_0330/xcit_pth/{model_name}_dist.pth')\n else:\n state_dict = torch.load(f'/workspace/pth_models_0330/xcit_pth/{model_name}.pth')\n #print(state_dict.keys())\n #print(state_dict['model_ema'].keys())\n #torch_model.load_state_dict(state_dict['model_ema'], strict=True)\n torch_model.load_state_dict(state_dict['model'], strict=True)\n torch_model = torch_model.to(device)\n torch_model.eval()\n print_model_named_params(torch_model)\n print_model_named_buffers(torch_model)\n \n # convert weights\n paddle_model = convert(torch_model, paddle_model, model_name, config)\n \n # check correctness\n x = np.random.randn(1, 3, sz, sz).astype('float32')\n x_paddle = paddle.to_tensor(x)\n x_torch = torch.Tensor(x).to(device)\n \n out_torch = torch_model(x_torch)\n out_paddle = paddle_model(x_paddle)\n \n out_torch = out_torch.data.cpu().numpy()\n out_paddle = out_paddle.cpu().numpy()\n \n print(out_torch.shape, out_paddle.shape)\n print(out_torch[0, 0:100])\n print('========================================================')\n print(out_paddle[0, 0:100])\n assert np.allclose(out_torch, out_paddle, atol = 1e-4)\n \n # save weights for paddle model\n if dist:\n model_path = os.path.join(f'./{model_name}_distill.pdparams')\n else:\n model_path = os.path.join(f'./{model_name}.pdparams')\n\n paddle.save(paddle_model.state_dict(), model_path)\n print(f'{model_name} done')\n print('all done')\n\n\nif __name__ == \"__main__\":\n main()\n",
"import os\nimport math\nimport numpy as np\nimport random\nimport glob\nimport PIL\nfrom paddle.io import Dataset\nfrom paddle.io import DataLoader\nfrom paddle.io import DistributedBatchSampler\nfrom paddle.vision import transforms\nfrom paddle.vision import image_load\nfrom random_erasing import RandomErasing\nfrom config import get_config\n\n\nclass ABAWDataset(Dataset):\n def __init__(self, file_folder, anno_folder, data_list=None, class_type='all', is_train=True, transform_ops=None):\n super().__init__()\n assert class_type in ['all', 'coarse', 'negative']\n anno_folder = os.path.join(anno_folder, 'Train_Set' if is_train else \"Validation_Set\")\n class_names_original = ['Neutral', 'Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Other']\n class_names_coarse = ['Neutral', 'Happiness', 'Surprise', 'Other', 'Negative']\n class_names_negative = ['Anger', 'Disgust', 'Fear', 'Sadness']\n class_mapping = {\n 'all': None,\n 'coarse': [0, 4, 4, 4, 1, 4, 2, 3],\n 'negative': [-1, 0, 1, 2, -1, 3, -1, -1]\n }\n self.transforms = transform_ops\n self.file_folder = file_folder\n\n if data_list is not None and os.path.isfile(data_list):\n print(f'----- Loading data list form: {data_list}')\n self.data_list = []\n with open(data_list, 'r') as infile:\n for line in infile:\n self.data_list.append(\n (line.split(' ')[0], int(line.split(' ')[1]))\n )\n else:\n print(f'----- Generating data list form: {anno_folder}')\n save_path = f'./train_list_{class_type}.txt' if is_train else f'./val_list_{class_type}.txt' \n self.data_list = self.gen_list(file_folder,\n anno_folder,\n class_mapping=class_mapping[class_type],\n save_path=save_path)\n print(f'----- Total images: {len(self.data_list)}')\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, index):\n data = image_load(os.path.join(self.file_folder, self.data_list[index][0])).convert('RGB')\n data = self.transforms(data)\n label = self.data_list[index][1]\n image_path = self.data_list[index][0]\n\n return data, label, image_path\n\n def gen_list(self, file_folder, anno_folder, class_mapping=None, save_path=None):\n \"\"\"Generate list of data samples where each line contains image path and its label\n Input:\n file_folder: folder path of images (aligned)\n anno_folder: folder path of annotations, e.g., ./EXPR_Classification_Challenge/\n class_mapping: list, class mapping for negative and coarse\n save_path: path of a txt file for saving list, default None\n Output:\n out_list: list of tuple contains relative file path and its label \n \"\"\"\n out_list = []\n for label_file in glob.glob(os.path.join(anno_folder, '*.txt')):\n with open(label_file, 'r') as infile:\n print(f'----- Reading labels from: {os.path.basename(label_file)}')\n vid_name = os.path.basename(label_file)[0:-4]\n for idx, line in enumerate(infile):\n if idx == 0:\n classnames = line.split(',')\n else:\n label = int(line)\n if label == -1: # eliminate data with -1 label\n continue\n if class_mapping is not None:\n label = class_mapping[label]\n if label == -1: # eliminate data with -1 label (negative)\n continue\n\n image_name = f'{str(idx).zfill(5)}.jpg'\n if os.path.isfile(os.path.join(file_folder, vid_name, image_name)):\n out_list.append((os.path.join(vid_name, image_name), label)) # tuple\n if save_path is not None:\n with open(save_path, 'w') as ofile:\n for path, label in out_list:\n ofile.write(f'{path} {label}\\n')\n print(f'List saved to: {save_path}')\n\n return out_list\n \n\nclass RandomApply():\n def __init__(self, transforms, prob=0.5):\n self.prob = prob\n self.transforms = transforms\n def __call__(self, x):\n if random.random() > self.prob:\n for t in self.transforms:\n x = t(x)\n return x\n\nclass GaussianBlur():\n def __init__(self, sigma_min=0.1, sigma_max=2.0):\n self.sigma_min = sigma_min\n self.sigma_max = sigma_max\n\n def __call__(self, x):\n sigma = np.random.uniform(self.sigma_min, self.sigma_max)\n x = x.filter(PIL.ImageFilter.GaussianBlur(radius=sigma))\n return x\n\n\ndef get_train_transforms(config):\n aug_op_list = []\n aug_op_list.append(RandomApply([transforms.RandomRotation(degrees=6)], prob=0.5))\n aug_op_list.append(\n transforms.RandomResizedCrop((config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE),\n scale=(0.08, 1.0), ratio=(1., 1.), interpolation='bicubic'))\n aug_op_list.append(transforms.RandomHorizontalFlip())\n aug_op_list.append(RandomApply([transforms.Grayscale()], prob=0.2))\n aug_op_list.append(RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4)], prob=0.8))\n aug_op_list.append(RandomApply([GaussianBlur(0.1, 2.0)], prob=0.5))\n aug_op_list.append(transforms.ToTensor())\n aug_op_list.append(transforms.Normalize(mean=config.DATA.IMAGENET_MEAN,\n std=config.DATA.IMAGENET_STD))\n if config.TRAIN.RANDOM_ERASE_PROB > 0.:\n random_erasing = RandomErasing(prob=config.TRAIN.RANDOM_ERASE_PROB,\n mode=config.TRAIN.RANDOM_ERASE_MODE,\n max_count=config.TRAIN.RANDOM_ERASE_COUNT,\n num_splits=config.TRAIN.RANDOM_ERASE_SPLIT)\n aug_op_list.append(random_erasing)\n transforms_train = transforms.Compose(aug_op_list)\n return transforms_train\n\n\ndef get_val_transforms(config):\n scale_size = int(math.floor(config.DATA.IMAGE_SIZE / config.DATA.CROP_PCT))\n transforms_val = transforms.Compose([\n transforms.Resize(scale_size, 'bicubic'), # single int for resize shorter side of image\n transforms.CenterCrop((config.DATA.IMAGE_SIZE, config.DATA.IMAGE_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize(mean=config.DATA.IMAGENET_MEAN, std=config.DATA.IMAGENET_STD)])\n return transforms_val\n\n\ndef get_dataset(config, is_train=True):\n if config.DATA.DATASET == \"ABAW\":\n if is_train:\n transform_ops = get_train_transforms(config)\n else:\n transform_ops = get_val_transforms(config)\n dataset = ABAWDataset(file_folder=config.DATA.DATA_FOLDER,\n anno_folder=config.DATA.ANNO_FOLDER,\n data_list=config.DATA.DATA_LIST_TRAIN if is_train else config.DATA.DATA_LIST_VAL,\n class_type=config.DATA.CLASS_TYPE,\n is_train=is_train,\n transform_ops=transform_ops)\n else:\n raise NotImplementedError(\n \"Wrong dataset name: [{config.DATA.DATASET}]. Only 'imagenet2012' is supported now\")\n return dataset\n\n\ndef get_dataloader(config, dataset, is_train=True, use_dist_sampler=False):\n \"\"\"Get dataloader from dataset, allows multiGPU settings.\n Multi-GPU loader is implements as distributedBatchSampler.\n\n Args:\n config: see config.py for details\n dataset: paddle.io.dataset object\n is_train: bool, when False, shuffle is off and BATCH_SIZE_EVAL is used, default: True\n use_dist_sampler: if True, DistributedBatchSampler is used, default: False\n Returns:\n dataloader: paddle.io.DataLoader object.\n \"\"\"\n batch_size = config.DATA.BATCH_SIZE if is_train else config.DATA.BATCH_SIZE_EVAL\n\n if use_dist_sampler is True:\n sampler = DistributedBatchSampler(dataset=dataset,\n batch_size=batch_size,\n shuffle=is_train,\n drop_last=is_train)\n dataloader = DataLoader(dataset=dataset,\n batch_sampler=sampler,\n num_workers=config.DATA.NUM_WORKERS)\n else:\n dataloader = DataLoader(dataset=dataset,\n batch_size=batch_size,\n num_workers=config.DATA.NUM_WORKERS,\n shuffle=is_train,\n drop_last=is_train)\n return dataloader\n\n\n\ndef main():\n config = get_config()\n # train dataset\n #transform_ops = get_train_transforms(config)\n #dataset = ABAWDataset(file_folder='./abaw_dataset/aligned_MTCNNXue/',\n # anno_folder='./abaw_dataset/Third_ABAW_Annotations/EXPR_Classification_Challenge/Train_Set',\n # #data_list='./train_list_all.txt',\n # data_list=None,\n # class_type='negative',\n # is_train=True,\n # transform_ops=transform_ops)\n # val dataset\n transform_ops = get_val_transforms(config)\n dataset = ABAWDataset(file_folder='./abaw_dataset/aligned_MTCNNXue/',\n anno_folder='./abaw_dataset/Third_ABAW_Annotations/EXPR_Classification_Challenge',\n data_list=None,\n class_type='coarse',\n is_train=False,\n transform_ops=transform_ops)\n\n for idx, sample in enumerate(dataset):\n if idx == 10:\n break\n print(sample[0], sample[1])\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.device",
"numpy.random.randn",
"numpy.allclose",
"torch.Tensor"
],
[
"numpy.random.seed"
],
[
"numpy.allclose",
"torch.Tensor",
"torch.load",
"numpy.random.randn",
"torch.device"
],
[
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Priyansh2/csnli | [
"de31f3f5ae0a956496b76a4643fa9ce7f3736d29"
] | [
"nmt/onmt/translate/Translator.py"
] | [
"import torch\nfrom torch.autograd import Variable\n\nimport onmt.translate.Beam\nimport onmt.io\n\n\nclass Translator(object):\n \"\"\"\n Uses a model to translate a batch of sentences.\n\n\n Args:\n model (:obj:`onmt.modules.NMTModel`):\n NMT model to use for translation\n fields (dict of Fields): data fields\n beam_size (int): size of beam to use\n n_best (int): number of translations produced\n max_length (int): maximum length output to produce\n global_scores (:obj:`GlobalScorer`):\n object to rescore final translations\n copy_attn (bool): use copy attention during translation\n cuda (bool): use cuda\n beam_trace (bool): trace beam search for debugging\n \"\"\"\n def __init__(self, model, fields,\n beam_size, n_best=1,\n max_length=100,\n global_scorer=None,\n copy_attn=False,\n cuda=False,\n beam_trace=False,\n min_length=0,\n stepwise_penalty=False):\n self.model = model\n self.fields = fields\n self.n_best = n_best\n self.max_length = max_length\n self.global_scorer = global_scorer\n self.copy_attn = copy_attn\n self.beam_size = beam_size\n self.cuda = cuda\n self.min_length = min_length\n self.stepwise_penalty = stepwise_penalty\n\n # for debugging\n self.beam_accum = None\n if beam_trace:\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n\n def translate_batch(self, batch, data):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n\n\n Todo:\n Shouldn't need the original dataset.\n \"\"\"\n\n # (0) Prep each of the components of the search.\n # And helper method for reducing verbosity.\n beam_size = self.beam_size\n batch_size = batch.batch_size\n data_type = data.data_type\n vocab = self.fields[\"tgt\"].vocab\n beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,\n cuda=self.cuda,\n global_scorer=self.global_scorer,\n pad=vocab.stoi[onmt.io.PAD_WORD],\n eos=vocab.stoi[onmt.io.EOS_WORD],\n bos=vocab.stoi[onmt.io.BOS_WORD],\n min_length=self.min_length,\n stepwise_penalty=self.stepwise_penalty)\n for __ in range(batch_size)]\n\n # Help functions for working with beams and batches\n def var(a): return Variable(a, volatile=True)\n\n def rvar(a): return var(a.repeat(1, beam_size, 1))\n\n def bottle(m):\n return m.view(batch_size * beam_size, -1)\n\n def unbottle(m):\n return m.view(beam_size, batch_size, -1)\n\n # (1) Run the encoder on the src.\n src = onmt.io.make_features(batch, 'src', data_type)\n src_lengths = None\n if data_type == 'text':\n _, src_lengths = batch.src\n\n enc_states, memory_bank = self.model.encoder(src, src_lengths)\n dec_states = self.model.decoder.init_decoder_state(\n src, memory_bank, enc_states)\n\n if src_lengths is None:\n src_lengths = torch.Tensor(batch_size).type_as(memory_bank.data)\\\n .long()\\\n .fill_(memory_bank.size(0))\n\n # (2) Repeat src objects `beam_size` times.\n src_map = rvar(batch.src_map.data) \\\n if data_type == 'text' and self.copy_attn else None\n memory_bank = rvar(memory_bank.data)\n memory_lengths = src_lengths.repeat(beam_size)\n dec_states.repeat_beam_size_times(beam_size)\n\n # (3) run the decoder to generate sentences, using beam search.\n for i in range(self.max_length):\n if all((b.done() for b in beam)):\n break\n\n # Construct batch x beam_size nxt words.\n # Get all the pending current beam words and arrange for forward.\n inp = var(torch.stack([b.get_current_state() for b in beam])\n .t().contiguous().view(1, -1))\n\n # Turn any copied words to UNKs\n # 0 is unk\n if self.copy_attn:\n inp = inp.masked_fill(\n inp.gt(len(self.fields[\"tgt\"].vocab) - 1), 0)\n\n # Temporary kludge solution to handle changed dim expectation\n # in the decoder\n inp = inp.unsqueeze(2)\n\n # Run one step.\n dec_out, dec_states, attn = self.model.decoder(\n inp, memory_bank, dec_states, memory_lengths=memory_lengths)\n dec_out = dec_out.squeeze(0)\n # dec_out: beam x rnn_size\n\n # (b) Compute a vector of batch x beam word scores.\n if not self.copy_attn:\n out = self.model.generator.forward(dec_out).data\n out = unbottle(out)\n # beam x tgt_vocab\n beam_attn = unbottle(attn[\"std\"])\n else:\n out = self.model.generator.forward(dec_out,\n attn[\"copy\"].squeeze(0),\n src_map)\n # beam x (tgt_vocab + extra_vocab)\n out = data.collapse_copy_scores(\n unbottle(out.data),\n batch, self.fields[\"tgt\"].vocab, data.src_vocabs)\n # beam x tgt_vocab\n out = out.log()\n beam_attn = unbottle(attn[\"copy\"])\n # (c) Advance each beam.\n for j, b in enumerate(beam):\n b.advance(out[:, j],\n beam_attn.data[:, j, :memory_lengths[j]])\n dec_states.beam_update(j, b.get_current_origin(), beam_size)\n\n # (4) Extract sentences from beam.\n ret = self._from_beam(beam)\n ret[\"gold_score\"] = [0] * batch_size\n if \"tgt\" in batch.__dict__:\n ret[\"gold_score\"] = self._run_target(batch, data)\n ret[\"batch\"] = batch\n return ret\n\n def _from_beam(self, beam):\n ret = {\"predictions\": [],\n \"scores\": [],\n \"attention\": []}\n for b in beam:\n n_best = self.n_best\n scores, ks = b.sort_finished(minimum=n_best)\n hyps, attn = [], []\n for i, (times, k) in enumerate(ks[:n_best]):\n hyp, att = b.get_hyp(times, k)\n hyps.append(hyp)\n attn.append(att)\n ret[\"predictions\"].append(hyps)\n ret[\"scores\"].append(scores)\n ret[\"attention\"].append(attn)\n return ret\n\n def _run_target(self, batch, data):\n data_type = data.data_type\n if data_type == 'text':\n _, src_lengths = batch.src\n else:\n src_lengths = None\n src = onmt.io.make_features(batch, 'src', data_type)\n tgt_in = onmt.io.make_features(batch, 'tgt')[:-1]\n\n # (1) run the encoder on the src\n enc_states, memory_bank = self.model.encoder(src, src_lengths)\n dec_states = \\\n self.model.decoder.init_decoder_state(src, memory_bank, enc_states)\n\n # (2) if a target is specified, compute the 'goldScore'\n # (i.e. log likelihood) of the target under the model\n tt = torch.cuda if self.cuda else torch\n gold_scores = tt.FloatTensor(batch.batch_size).fill_(0)\n dec_out, dec_states, attn = self.model.decoder(\n tgt_in, memory_bank, dec_states, memory_lengths=src_lengths)\n\n tgt_pad = self.fields[\"tgt\"].vocab.stoi[onmt.io.PAD_WORD]\n for dec, tgt in zip(dec_out, batch.tgt[1:].data):\n # Log prob of each word.\n out = self.model.generator.forward(dec)\n tgt = tgt.unsqueeze(1)\n scores = out.data.gather(1, tgt)\n scores.masked_fill_(tgt.eq(tgt_pad), 0)\n gold_scores += scores\n return gold_scores\n"
] | [
[
"torch.Tensor",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Erotemic/vtool_ibeis | [
"b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5",
"b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5",
"b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5",
"b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5"
] | [
"tests/test_coverage_max_reduce.py",
"vtool_ibeis/other.py",
"vtool_ibeis/distance.py",
"vtool_ibeis/segmentation.py"
] | [
"#Is it possible to use numpy.ufunc.reduce over an iterator of ndarrays?\n\n#I have a generator function that yields ndarrays (all of the same shape and dtype) and I would like to find the maximum value at each index.\n\n#Currently I have code that looks like this:\n\n\ndef main():\n import numpy as np\n import cv2\n\n shape = (250, 300)\n dsize = shape[::-1]\n\n affmat_list = np.array([\n [[ 1.57351554e+00, 0.00000000e+00, 1.09061039e+02],\n [ -3.61827926e-01, 7.46059970e-01, 2.50669551e+01]],\n [[ 3.05754491e+00, 0.00000000e+00, 8.28024922e+01],\n [ -2.13866309e-01, 1.72124200e+00, 1.72744669e+02]],\n [[ 2.58008254e+00, 0.00000000e+00, 1.52155447e+02],\n [ -2.08041241e+00, 2.46195663e+00, 1.09493821e+02]],\n [[ 2.01791864e+00, 0.00000000e+00, 2.45704669e+02],\n [ -1.07590956e+00, 3.33499949e+00, 1.66233498e+02]],\n [[ 3.32012638e+00, 0.00000000e+00, 1.03847866e+02],\n [ -2.36557589e+00, 3.02063109e+00, 1.59907802e+02]],\n [[ 4.94371474e+00, 0.00000000e+00, 7.92717193e+01],\n [ -2.67846198e+00, 3.66854256e+00, 1.47888210e+02]]])\n\n fx2_score = np.ones(len(affmat_list))\n\n patch = np.array([\n [ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014],\n [ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],\n [ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],\n [ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],\n [ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],\n [ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],\n [ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],\n [ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],\n [ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],\n [ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0038, 0.0038, 0.0039, 0.0038, 0.0038, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],\n [ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],\n [ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],\n [ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],\n [ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],\n [ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],\n [ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],\n [ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],\n [ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],\n [ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014]\n ])\n\n def warped_patch_generator():\n padded_patch = np.zeros(shape, dtype=np.float32)\n patch_h, patch_w = patch.shape\n warped = np.zeros(shape, dtype=np.float32)\n for count, (M, score) in enumerate(zip(affmat_list, fx2_score)):\n print(count)\n np.multiply(patch, score, out=padded_patch[:patch.shape[0], :patch.shape[1]] )\n cv2.warpAffine(padded_patch, M, dsize, dst=warped,\n flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,\n borderValue=0)\n yield warped\n #yield warped\n\n print(\"THREE\")\n from six.moves import reduce\n import functools\n dstimg3 = np.zeros(shape, dtype=np.float32)\n maximum_partial = functools.partial(np.maximum, out=dstimg3)\n dstimg3 = reduce(maximum_partial, warped_patch_generator())\n\n print(\"ONE\")\n dstimg1 = np.zeros(shape, dtype=np.float32)\n print(\"ONE\")\n for warped in warped_patch_generator():\n #dstimg1 = np.maximum(dstimg1, warped)\n np.maximum(dstimg1, warped, out=dstimg1)\n\n print(\"FOUR\")\n input_copy_ = np.array([w.copy() for w in warped_patch_generator()])\n dstimg4 = input_copy_.max(0)\n\n print(\"TWO\")\n dstimg2 = np.zeros(shape, dtype=np.float32)\n input_iter_ = list((w for w in warped_patch_generator()))\n np.maximum.reduce(input_iter_, axis=0, dtype=np.float32, out=dstimg2)\n\n x = np.where(dstimg1.ravel() != dstimg2.ravel())[0]\n print(dstimg2.take(x))\n print(dstimg1.take(x))\n np.allclose(dstimg1, dstimg2)\n\n import matplotlib.pyplot as plt\n plt.figure(1)\n plt.subplot(221)\n plt.imshow(dstimg1)\n plt.subplot(222)\n plt.imshow(dstimg2)\n plt.subplot(223)\n plt.imshow(dstimg3)\n plt.subplot(224)\n plt.imshow(dstimg4)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n#I would have thought that I would be allowed to write something like this:\n# dstimg = np.maximum.reduce(warped_patch_generator())\n",
"# -*- coding: utf-8 -*\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport utool as ut\nimport ubelt as ub\nimport functools # NOQA\nfrom six import next\nfrom six.moves import zip, range\n\n\ndef safe_vstack(tup, default_shape=(0,), default_dtype=np.float):\n \"\"\" stacks a tuple even if it is empty \"\"\"\n try:\n return np.vstack(tup)\n except ValueError:\n return np.empty(default_shape, dtype=default_dtype)\n\n\ndef pad_vstack(arrs, fill_value=0):\n \"\"\" Stacks values and pads arrays with different lengths with zeros \"\"\"\n total = max(map(len, arrs))\n padded = [np.hstack([a, np.full(total - len(a), fill_value)]) for a in arrs]\n return np.vstack(padded)\n\n\ndef safe_cat(tup, axis=0, default_shape=(0,), default_dtype=np.float):\n \"\"\"\n stacks a tuple even if it is empty\n Also deals with numpy bug where cat fails if an element in sequence is empty\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> import vtool_ibeis as vt\n >>> # test1\n >>> tup = []\n >>> ut.assert_eq(vt.safe_cat(tup, axis=0).shape, (0,))\n >>> # test2\n >>> tup = (np.array([[1, 2, 3]]), np.array([[]]))\n >>> s = vt.safe_cat(tup, axis=0)\n >>> print(ub.hzcat(['s = ', ub.repr2(s)])\n >>> ut.assert_eq(s.shape, (1, 3))\n >>> # test3\n >>> tup = (np.array([[1, 2, 3]]), np.array([[3, 4, 5]]))\n >>> s = vt.safe_cat(tup, axis=1)\n >>> print(ub.hzcat(['s = ', ub.repr2(s)])\n >>> ut.assert_eq(s.shape, (1, 6))\n >>> # test3\n >>> tup = (np.array(1), np.array(2), np.array(3))\n >>> s = vt.safe_cat(tup, axis=1)\n >>> print(ub.hzcat(['s = ', ub.repr2(s)])\n >>> ut.assert_eq(s.shape, (1, 6))\n \"\"\"\n if tup is None or len(tup) == 0:\n stack = np.empty(default_shape, dtype=default_dtype)\n else:\n try:\n stack = np.concatenate(tup, axis=axis)\n except ValueError as ex1:\n try:\n # Ensure everything is at least a 1d array\n tup_ = [np.atleast_1d(np.asarray(a)) for a in tup]\n # remove empty parts\n tup_ = [a for a in tup_ if a.size > 0]\n stack = np.concatenate(tup_, axis=axis)\n except ValueError:\n # if axis == 0:\n # stack = np.hstack(tup)\n # elif axis == 1:\n # stack = np.vstack(tup)\n # elif axis == 3:\n # stack = np.dstack(tup)\n # else:\n raise ex1\n return stack\n # try:\n # return np.concatenate(tup, axis=axis)\n # except ValueError:\n\n\ndef median_abs_dev(arr_list, **kwargs):\n \"\"\"\n References:\n https://en.wikipedia.org/wiki/Median_absolute_deviation\n \"\"\"\n return np.median(np.abs(arr_list - np.median(arr_list, **kwargs)), **kwargs)\n\n\ndef argsort_groups(scores_list, reverse=False, rng=np.random, randomize_levels=True):\n \"\"\"\n Sorts each group normally, but randomizes order of level values.\n\n TODO: move to vtool_ibeis\n\n Args:\n scores_list (list):\n reverse (bool): (default = True)\n rng (module): random number generator(default = numpy.random)\n\n CommandLine:\n python -m ibeis.init.filter_annots --exec-argsort_groups\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> scores_list = [\n >>> np.array([np.nan, np.nan], dtype=np.float32),\n >>> np.array([np.nan, 2], dtype=np.float32),\n >>> np.array([4, 1, 1], dtype=np.float32),\n >>> np.array([7, 3, 3, 0, 9, 7, 5, 8], dtype=np.float32),\n >>> np.array([2, 4], dtype=np.float32),\n >>> np.array([np.nan, 4, np.nan, 8, np.nan, 9], dtype=np.float32),\n >>> ]\n >>> reverse = True\n >>> rng = np.random.RandomState(0)\n >>> idxs_list = argsort_groups(scores_list, reverse, rng)\n >>> result = 'idxs_list = %s' % (ut.repr4(idxs_list, with_dtype=False),)\n >>> print(result)\n\n \"\"\"\n scores_list_ = [np.array(scores, copy=True).astype(np.float) for scores in scores_list]\n breakers_list = [rng.rand(len(scores)) for scores in scores_list_]\n # replace nan with -inf, or inf randomize order between equal values\n replval = -np.inf if reverse else np.inf\n # Ensure that nans are ordered last\n for scores in scores_list_:\n scores[np.isnan(scores)] = replval\n # The last column is sorted by first with lexsort\n scorebreaker_list = [np.array((breakers, scores))\n for scores, breakers in zip(scores_list_, breakers_list)]\n if reverse:\n idxs_list = [np.lexsort(scorebreaker)[::-1] for scorebreaker in scorebreaker_list]\n else:\n idxs_list = [np.lexsort(scorebreaker) for scorebreaker in scorebreaker_list]\n return idxs_list\n\n\ndef check_sift_validity(sift_uint8, lbl=None, verbose=ut.NOT_QUIET):\n \"\"\"\n checks if a SIFT descriptor is valid\n \"\"\"\n if lbl is None:\n lbl = ut.get_varname_from_stack(sift_uint8, N=1)\n print('[checksift] Checking valididty of %d SIFT descriptors. lbl=%s' % (\n sift_uint8.shape[0], lbl))\n is_correct_shape = len(sift_uint8.shape) == 2 and sift_uint8.shape[1] == 128\n is_correct_dtype = sift_uint8.dtype == np.uint8\n if not is_correct_shape:\n print('[checksift] * incorrect shape = %r' % (sift_uint8.shape,))\n elif verbose:\n print('[checksift] * correct shape = %r' % (sift_uint8.shape,))\n\n if not is_correct_dtype:\n print('[checksift] * incorrect dtype = %r' % (sift_uint8.dtype,))\n elif verbose:\n print('[checksift] * correct dtype = %r' % (sift_uint8.dtype,))\n\n num_sifts = sift_uint8.shape[0]\n sift_float01 = sift_uint8 / 512.0\n\n # Check L2 norm\n sift_norm = np.linalg.norm(sift_float01, axis=1)\n is_normal = np.isclose(sift_norm, 1.0, atol=.04)\n bad_locs_norm = np.where(np.logical_not(is_normal))[0]\n if len(bad_locs_norm) > 0:\n print('[checksift] * bad norm = %4d/%d' % (len(bad_locs_norm), num_sifts))\n else:\n print('[checksift] * correctly normalized')\n\n # Check less than thresh=.2\n # This check actually is not valid because the SIFT descriptors is\n # normalized after it is thresholded\n #bad_locs_thresh = np.where((sift_float01 > .2).sum(axis=1))[0]\n #print('[checksift] * bad thresh = %4d/%d' % (len(bad_locs_thresh), num_sifts))\n #if len(bad_locs_thresh) > 0:\n # above_thresh = sift_float01[(sift_float01 > .2)]\n # print('[checksift] * components under thresh = %d' % (sift_float01 <= 2).sum())\n # print('[checksift] * components above thresh stats = ' +\n # ut.get_stats_str(above_thresh, precision=2))\n\n isok = len(bad_locs_norm) == 0 and is_correct_shape and is_correct_dtype\n if not isok:\n print('[checksift] ERROR. SIFT CHECK FAILED')\n return isok\n\n\ndef get_crop_slices(isfill):\n fill_colxs = [np.where(row)[0] for row in isfill]\n fill_rowxs = [np.where(col)[0] for col in isfill.T]\n nRows, nCols = isfill.shape[0:2]\n\n filled_columns = intersect1d_reduce(fill_colxs)\n filled_rows = intersect1d_reduce(fill_rowxs)\n consec_rows_list = ut.group_consecutives(filled_rows)\n consec_cols_list = ut.group_consecutives(filled_columns)\n\n def get_consec_endpoint(consec_index_list, endpoint):\n \"\"\"\n consec_index_list = consec_cols_list\n endpoint = 0\n \"\"\"\n for consec_index in consec_index_list:\n if np.any(np.array(consec_index) == endpoint):\n return consec_index\n\n def get_min_consec_endpoint(consec_rows_list, endpoint):\n consec_index = get_consec_endpoint(consec_rows_list, endpoint)\n if consec_index is None:\n return endpoint\n return max(consec_index)\n\n def get_max_consec_endpoint(consec_rows_list, endpoint):\n consec_index = get_consec_endpoint(consec_rows_list, endpoint)\n if consec_index is None:\n return endpoint + 1\n return min(consec_index)\n\n consec_rows_top = get_min_consec_endpoint(consec_rows_list, 0)\n consec_rows_bottom = get_max_consec_endpoint(consec_rows_list, nRows - 1)\n remove_cols_left = get_min_consec_endpoint(consec_cols_list, 0)\n remove_cols_right = get_max_consec_endpoint(consec_cols_list, nCols - 1)\n rowslice = slice(consec_rows_top, consec_rows_bottom)\n colslice = slice(remove_cols_left, remove_cols_right)\n return rowslice, colslice\n\n\ndef get_undirected_edge_ids(directed_edges):\n r\"\"\"\n Args:\n directed_edges (ndarray[ndims=2]):\n\n Returns:\n list: edgeid_list\n\n CommandLine:\n python -m vtool_ibeis.other --exec-get_undirected_edge_ids\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> directed_edges = np.array([[1, 2], [2, 1], [2, 3], [3, 1], [1, 1], [2, 3], [3, 2]])\n >>> edgeid_list = get_undirected_edge_ids(directed_edges)\n >>> result = ('edgeid_list = %s' % (ub.repr2(edgeid_list),))\n >>> print(result)\n edgeid_list = [0 0 1 2 3 1 1]\n \"\"\"\n #import vtool_ibeis as vt\n undirected_edges = to_undirected_edges(directed_edges)\n edgeid_list = compute_unique_data_ids(undirected_edges)\n return edgeid_list\n\n\ndef to_undirected_edges(directed_edges, upper=False):\n assert len(directed_edges.shape) == 2 and directed_edges.shape[1] == 2\n #flipped = qaid_arr < daid_arr\n if upper:\n flipped = directed_edges.T[0] > directed_edges.T[1]\n else:\n flipped = directed_edges.T[0] < directed_edges.T[1]\n # standardize edge order\n edges_dupl = directed_edges.copy()\n edges_dupl[flipped, 0:2] = edges_dupl[flipped, 0:2][:, ::-1]\n undirected_edges = edges_dupl\n return undirected_edges\n\n\ndef find_best_undirected_edge_indexes(directed_edges, score_arr=None):\n r\"\"\"\n Args:\n directed_edges (ndarray[ndims=2]):\n score_arr (ndarray):\n\n Returns:\n list: unique_edge_xs\n\n CommandLine:\n python -m vtool_ibeis.other --test-find_best_undirected_edge_indexes\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> directed_edges = np.array([[1, 2], [2, 1], [2, 3], [3, 1], [1, 1], [2, 3], [3, 2]])\n >>> score_arr = np.array([1, 1, 1, 1, 1, 1, 2])\n >>> unique_edge_xs = find_best_undirected_edge_indexes(directed_edges, score_arr)\n >>> result = str(unique_edge_xs)\n >>> print(result)\n [0 3 4 6]\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> directed_edges = np.array([[1, 2], [2, 1], [2, 3], [3, 1], [1, 1], [2, 3], [3, 2]])\n >>> score_arr = None\n >>> unique_edge_xs = find_best_undirected_edge_indexes(directed_edges, score_arr)\n >>> result = str(unique_edge_xs)\n >>> print(result)\n [0 2 3 4]\n \"\"\"\n import vtool_ibeis as vt\n #assert len(directed_edges.shape) == 2 and directed_edges.shape[1] == 2\n ##flipped = qaid_arr < daid_arr\n #flipped = directed_edges.T[0] < directed_edges.T[1]\n ## standardize edge order\n #edges_dupl = directed_edges.copy()\n #edges_dupl[flipped, 0:2] = edges_dupl[flipped, 0:2][:, ::-1]\n #edgeid_list = vt.compute_unique_data_ids(edges_dupl)\n edgeid_list = get_undirected_edge_ids(directed_edges)\n unique_edgeids, groupxs = vt.group_indices(edgeid_list)\n # if there is more than one edge in a group take the one with the highest score\n if score_arr is None:\n unique_edge_xs_list = [groupx[0] for groupx in groupxs]\n else:\n assert len(score_arr) == len(directed_edges)\n score_groups = vt.apply_grouping(score_arr, groupxs)\n score_argmaxs = [score_group.argmax() for score_group in score_groups]\n unique_edge_xs_list = [\n groupx[argmax] for groupx, argmax in zip(groupxs, score_argmaxs)\n ]\n unique_edge_xs = np.array(sorted(unique_edge_xs_list), dtype=np.int32)\n return unique_edge_xs\n\n\ndef argsort_records(arrays, reverse=False):\n r\"\"\"\n Sorts arrays that form records.\n Same as lexsort(arrays[::-1]) --- ie. rows are reversed.\n\n Args:\n arrays (ndarray): array of records\n reverse (bool): (default = False)\n\n Returns:\n ndarray: sortx - sorted indicies\n\n CommandLine:\n python -m vtool_ibeis.other --exec-argsort_records\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arrays = np.array([\n >>> [1, 1, 1, 2, 2, 2, 3, 4, 5],\n >>> [2, 0, 2, 6, 4, 3, 2, 5, 6],\n >>> [1, 1, 0, 2, 3, 4, 5, 6, 7],\n >>> ],)\n >>> reverse = False\n >>> sortx = argsort_records(arrays, reverse)\n >>> result = ('sortx = %s' % (str(sortx),))\n >>> print('lxsrt = %s' % (np.lexsort(arrays[::-1]),))\n >>> print(result)\n sortx = [1 2 0 5 4 3 6 7 8]\n \"\"\"\n sorting_records = np.rec.fromarrays(arrays)\n sort_stride = (-reverse * 2) + 1\n sortx = sorting_records.argsort()[::sort_stride]\n return sortx\n\n\ndef unique_rows(arr, directed=True):\n \"\"\"\n Order or columns does not matter if directed = False\n \"\"\"\n if directed:\n idx_list = compute_unique_data_ids(arr)\n else:\n idx_list = get_undirected_edge_ids(arr)\n _, unique_rowx = np.unique(idx_list, return_index=True)\n unique_arr = arr.take(unique_rowx, axis=0)\n return unique_arr\n\n\ndef compute_ndarray_unique_rowids_unsafe(arr):\n \"\"\"\n arr = np.random.randint(2, size=(10000, 10))\n vt.compute_unique_data_ids_(list(map(tuple, arr)))\n len(vt.compute_unique_data_ids_(list(map(tuple, arr))))\n len(np.unique(vt.compute_unique_data_ids_(list(map(tuple, arr)))))\n\n %timeit vt.compute_unique_data_ids_(list(map(tuple, arr)))\n %timeit compute_ndarray_unique_rowids_unsafe(arr)\n\n \"\"\"\n # no checks performed\n void_dtype = np.dtype((np.void, arr.dtype.itemsize * arr.shape[1]))\n #assert arr.flags['C_CONTIGUOUS']\n arr_void_view = arr.view(void_dtype)\n unique, rowids = np.unique(arr_void_view, return_inverse=True)\n return rowids\n #np.ascontiguousarray(arr).data == arr.data\n #assert arr.data == arr_void_view.data\n\n\ndef nonunique_row_flags(arr):\n import vtool_ibeis as vt\n unique_rowx = unique_row_indexes(arr)\n unique_flags = vt.index_to_boolmask(unique_rowx, len(arr))\n nonunique_flags = np.logical_not(unique_flags)\n return nonunique_flags\n\n\ndef nonunique_row_indexes(arr):\n \"\"\" rows that are not unique (does not include the first instance of each pattern)\n\n Args:\n arr (ndarray): 2d array\n\n Returns:\n ndarray: nonunique_rowx\n\n SeeAlso:\n unique_row_indexes\n nonunique_row_flags\n\n CommandLine:\n python -m vtool_ibeis.other --test-unique_row_indexes\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0], [.534, .432], [.534, .432], [1, 0], [0, 1]])\n >>> nonunique_rowx = unique_row_indexes(arr)\n >>> result = ('nonunique_rowx = %s' % (ub.repr2(nonunique_rowx),))\n >>> print(result)\n nonunique_rowx = np.array([4, 6, 7, 8], dtype=np.int64)\n \"\"\"\n nonunique_flags = nonunique_row_flags(arr)\n nonunique_rowx = np.where(nonunique_flags)[0]\n return nonunique_rowx\n\n\ndef compute_unique_data_ids(data):\n \"\"\"\n This is actually faster than compute_unique_integer_data_ids it seems\n\n CommandLine:\n python -m vtool_ibeis.other --test-compute_unique_data_ids\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> data = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0, 0], [.534, .432], [.534, .432], [1, 0], [0, 1]])\n >>> dataid_list = compute_unique_data_ids(data)\n >>> result = 'dataid_list = ' + ub.repr2(dataid_list, with_dtype=True)\n >>> print(result)\n dataid_list = np.array([0, 1, 2, 3, 0, 4, 4, 2, 1], dtype=np.int32)\n \"\"\"\n # construct a unique id for every edge\n hashable_rows = [tuple(row_.tolist()) for row_ in data]\n dataid_list = np.array(compute_unique_data_ids_(hashable_rows), dtype=np.int32)\n return dataid_list\n\n\ndef compute_unique_data_ids_(hashable_rows, iddict_=None):\n if iddict_ is None:\n iddict_ = {}\n for row in hashable_rows:\n if row not in iddict_:\n iddict_[row] = len(iddict_)\n dataid_list = ut.dict_take(iddict_, hashable_rows)\n return dataid_list\n\n\ndef compute_unique_arr_dataids(arr):\n \"\"\" specialized version for speed when arr is an ndarray \"\"\"\n iddict_ = {}\n hashable_rows = list(map(tuple, arr.tolist()))\n for row in hashable_rows:\n if row not in iddict_:\n iddict_[row] = len(iddict_)\n dataid_list = np.array([iddict_[row] for row in hashable_rows])\n return dataid_list\n\n\ndef compute_unique_integer_data_ids(data):\n r\"\"\"\n This is actually slower than compute_unique_data_ids it seems\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> # build test data\n >>> data = np.array([[0, 0], [0, 1], [1, 1], [0, 0], [0, 0], [0, 1], [1, 1], [0, 0], [9, 0]])\n >>> data = np.random.randint(1000, size=(1000, 2))\n >>> # execute function\n >>> result1 = compute_unique_data_ids(data)\n >>> result2 = compute_unique_integer_data_ids(data)\n >>> # verify results\n >>> print(result)\n\n %timeit compute_unique_data_ids(data)\n %timeit compute_unique_integer_data_ids(data)\n \"\"\"\n # construct a unique id for every edge\n ncols = data.shape[1]\n # get the number of decimal places to shift\n exp_step = np.ceil(np.log10(data.max()))\n offsets = [int(10 ** (ix * exp_step)) for ix in reversed(range(0, ncols))]\n dataid_list = np.array([\n sum([\n item * offset\n for item, offset in zip(row, offsets)\n ])\n for row in data])\n return dataid_list\n\n\ndef trytake(list_, index_list):\n return None if list_ is None else list_take_(list_, index_list)\n\n\ndef list_take_(list_, index_list):\n if isinstance(list_, np.ndarray):\n return list_.take(index_list, axis=0)\n else:\n return list(ub.take(list_, index_list))\n\n\ndef compress2(arr, flag_list, axis=None, out=None):\n \"\"\"\n Wrapper around numpy compress that makes the signature more similar to take\n \"\"\"\n return np.compress(flag_list, arr, axis=axis, out=out)\n\n\ndef take2(arr, index_list, axis=None, out=None):\n \"\"\"\n Wrapper around numpy compress that makes the signature more similar to take\n \"\"\"\n return np.take(arr, index_list, axis=axis, out=out)\n\n\ndef list_compress_(list_, flag_list):\n if isinstance(list_, np.ndarray):\n return list_.compress(flag_list, axis=0)\n else:\n return list(ub.compress(list_, flag_list))\n\n\ndef index_partition(item_list, part1_items):\n \"\"\"\n returns two lists. The first are the indecies of items in item_list that\n are in part1_items. the second is the indices in item_list that are not\n in part1_items. items in part1_items that are not in item_list are\n ignored\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> item_list = ['dist', 'fg', 'distinctiveness']\n >>> part1_items = ['fg', 'distinctiveness']\n >>> part1_indexes, part2_indexes = index_partition(item_list, part1_items)\n >>> ut.assert_eq(part1_indexes.tolist(), [1, 2])\n >>> ut.assert_eq(part2_indexes.tolist(), [0])\n \"\"\"\n part1_indexes_ = [\n item_list.index(item)\n for item in part1_items\n if item in item_list\n ]\n part1_indexes = np.array(part1_indexes_)\n part2_indexes = np.setdiff1d(np.arange(len(item_list)), part1_indexes)\n # FIXME: use dtype np.int_\n part1_indexes = part1_indexes.astype(np.int32)\n part2_indexes = part2_indexes.astype(np.int32)\n return part1_indexes, part2_indexes\n\n\n# def partition_Nones(item_list):\n# \"\"\"\n# Example:\n# >>> # ENABLE_DOCTEST\n# >>> from vtool_ibeis.other import * # NOQA\n# >>> item_list = ['foo', None, None, 'bar']\n# >>> part1_indexes, part2_indexes = partition_Nones(item_list)\n# \"\"\"\n# # part1_indexes_ = ut.list_where(item_list)\n# part1_indexes_ = [index for index, item in enumerate(item_list) if item is not None]\n# part1_indexes = np.array(part1_indexes_)\n# part2_indexes = np.setdiff1d(np.arange(len(item_list)), part1_indexes)\n# return part1_indexes, part2_indexes\n\n\ndef rebuild_partition(part1_vals, part2_vals, part1_indexes, part2_indexes):\n r\"\"\"\n Inverts work done by index_partition\n\n Args:\n part1_vals (list):\n part2_vals (list):\n part1_indexes (dict):\n part2_indexes (dict):\n\n CommandLine:\n python -m vtool_ibeis.other --test-rebuild_partition\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> item_list = ['dist', 'fg', 'distinctiveness']\n >>> part1_items = ['fg', 'distinctiveness']\n >>> part1_indexes, part2_indexes = index_partition(item_list, part1_items)\n >>> part1_vals = ut.take(item_list, part1_indexes)\n >>> part2_vals = ut.take(item_list, part2_indexes)\n >>> val_list = rebuild_partition(part1_vals, part2_vals, part1_indexes, part2_indexes)\n >>> assert val_list == item_list, 'incorrect inversin'\n >>> print(val_list)\n \"\"\"\n val_list = [None] * (len(part1_indexes) + len(part2_indexes))\n for idx, val in zip(part1_indexes, part1_vals):\n val_list[idx] = val\n for idx, val in zip(part2_indexes, part2_vals):\n val_list[idx] = val\n return val_list\n\n\ndef weighted_average_scoring(fsv, weight_filtxs, nonweight_filtxs):\n r\"\"\"\n does \\frac{\\sum_i w^f_i * w^d_i * r_i}{\\sum_i w^f_i, w^d_i}\n to get a weighed average of ratio scores\n\n If we normalize the weight part to add to 1 then we can get per-feature\n scores.\n\n References:\n http://en.wikipedia.org/wiki/Weighted_arithmetic_mean\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> fsv = np.array([\n ... [ 0.82992172, 1.56136119, 0.66465378],\n ... [ 0.8000412 , 2.14719748, 1. ],\n ... [ 0.80848503, 2.6816361 , 1. ],\n ... [ 0.86761665, 2.70189977, 1. ],\n ... [ 0.8004055 , 1.58753884, 0.92178345],])\n >>> weight_filtxs = np.array([1, 2], dtype=np.int32)\n >>> nonweight_filtxs = np.array([0], dtype=np.int32)\n >>> new_fs = weighted_average_scoring(fsv, weight_filtxs, nonweight_filtxs)\n >>> result = new_fs\n >>> print(result)\n\n \"\"\"\n weight_fs = fsv.T.take(weight_filtxs, axis=0).T.prod(axis=1)\n nonweight_fs = fsv.T.take(nonweight_filtxs, axis=0).T.prod(axis=1)\n weight_fs_norm01 = weight_fs / weight_fs.sum()\n #weight_fs_norm01[np.isnan(weight_fs_norm01)] = 0.0\n # If weights are nan, fill them with zeros\n weight_fs_norm01 = np.nan_to_num(weight_fs_norm01)\n new_fs = np.multiply(nonweight_fs, weight_fs_norm01)\n return new_fs\n\n\ndef assert_zipcompress(arr_list, flags_list, axis=None):\n num_flags = [len(flags) for flags in flags_list]\n if axis is None:\n num_arrs = [arr.size for arr in arr_list]\n else:\n num_arrs = [arr.shape[axis] for arr in arr_list]\n assert num_flags == num_arrs, 'not able to zipcompress'\n\n\ndef zipcompress_safe(arr_list, flags_list, axis=None):\n arr_list = list(arr_list)\n flags_list = list(flags_list)\n assert_zipcompress(arr_list, flags_list, axis=axis)\n return zipcompress(arr_list, flags_list, axis)\n\n\ndef zipcompress(arr_list, flags_list, axis=None):\n return [np.compress(flags, arr, axis=axis) for arr, flags in zip(arr_list, flags_list)]\n\n\ndef ziptake(arr_list, indices_list, axis=None):\n return [arr.take(indices, axis=axis) for arr, indices in zip(arr_list, indices_list)]\n\n\ndef zipcat(arr1_list, arr2_list, axis=None):\n r\"\"\"\n Args:\n arr1_list (list):\n arr2_list (list):\n axis (None): (default = None)\n\n Returns:\n list:\n\n CommandLine:\n python -m vtool_ibeis.other --exec-zipcat --show\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr1_list = [np.array([0, 0, 0]), np.array([0, 0, 0, 0])]\n >>> arr2_list = [np.array([1, 1, 1]), np.array([1, 1, 1, 1])]\n >>> axis = None\n >>> arr3_list = zipcat(arr1_list, arr2_list, axis)\n >>> arr3_list0 = zipcat(arr1_list, arr2_list, axis=0)\n >>> arr3_list1 = zipcat(arr1_list, arr2_list, axis=1)\n >>> arr3_list2 = zipcat(arr1_list, arr2_list, axis=2)\n >>> print('arr3_list = %s' % (ut.repr3(arr3_list),))\n >>> print('arr3_list0 = %s' % (ut.repr3(arr3_list0),))\n >>> print('arr3_list2 = %s' % (ut.repr3(arr3_list2),))\n \"\"\"\n import vtool_ibeis as vt\n assert len(arr1_list) == len(arr2_list), 'lists must correspond'\n if axis is None:\n arr1_iter = arr1_list\n arr2_iter = arr2_list\n else:\n arr1_iter = [vt.atleast_nd(arr1, axis + 1) for arr1 in arr1_list]\n arr2_iter = [vt.atleast_nd(arr2, axis + 1) for arr2 in arr2_list]\n arrs_iter = list(zip(arr1_iter, arr2_iter))\n arr3_list = [np.concatenate(arrs, axis=axis) for arrs in arrs_iter]\n return arr3_list\n\n\ndef atleast_nd(arr, n, tofront=False):\n r\"\"\"\n View inputs as arrays with at least n dimensions.\n TODO: Commit to numpy\n\n Args:\n arr (array_like): One array-like object. Non-array inputs are\n converted to arrays. Arrays that already have n or more dimensions\n are preserved.\n n (int):\n tofront (bool): if True new dims are added to the front of the array\n\n CommandLine:\n python -m vtool_ibeis.other --exec-atleast_nd --show\n\n Returns:\n ndarray :\n An array with ``a.ndim >= n``. Copies are avoided where possible,\n and views with three or more dimensions are returned. For example,\n a 1-D array of shape ``(N,)`` becomes a view of shape\n ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a view of shape\n ``(M, N, 1)``.\n\n See Also:\n atleast_1d, atleast_2d, atleast_3d\n\n Example0:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> n = 2\n >>> arr = np.array([1, 1, 1])\n >>> arr_ = atleast_nd(arr, n)\n >>> result = ub.repr2(arr_.tolist())\n >>> print(result)\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> n = 4\n >>> arr1 = [1, 1, 1]\n >>> arr2 = np.array(0)\n >>> arr3 = np.array([[[[[1]]]]])\n >>> arr1_ = atleast_nd(arr1, n)\n >>> arr2_ = atleast_nd(arr2, n)\n >>> arr3_ = atleast_nd(arr3, n)\n >>> result1 = ub.repr2(arr1_.tolist())\n >>> result2 = ub.repr2(arr2_.tolist())\n >>> result3 = ub.repr2(arr3_.tolist())\n >>> result = '\\n'.join([result1, result2, result3])\n >>> print(result)\n \"\"\"\n arr_ = np.asanyarray(arr)\n ndims = len(arr_.shape)\n if n is not None and ndims < n:\n # append the required number of dimensions to the end\n if tofront:\n expander = (None,) * (n - ndims) + (Ellipsis,)\n else:\n expander = (Ellipsis,) + (None,) * (n - ndims)\n arr_ = arr_[expander]\n return arr_\n\n\ndef ensure_shape(arr, dimshape):\n \"\"\"\n Ensures that an array takes a certain shape. The total size of the array\n must not change.\n\n Args:\n arr (ndarray): array to change the shape of\n dimshape (tuple): desired shape (Nones can be used to broadcast\n dimensions)\n\n Returns:\n ndarray: arr_ - the input array, which has been modified inplace.\n\n CommandLine:\n python -m vtool_ibeis.other ensure_shape\n\n Doctest:\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr = np.zeros((7, 7))\n >>> dimshape = (None, None, 3)\n >>> arr2 = ensure_shape(np.array([[1, 2]]), (None, 2))\n >>> assert arr2.shape == (1, 2)\n >>> arr3 = ensure_shape(np.array([]), (None, 2))\n >>> assert arr3.shape == (0, 2)\n \"\"\"\n if isinstance(dimshape, tuple):\n n = len(dimshape)\n else:\n n = dimshape\n dimshape = None\n arr_ = atleast_nd(arr, n)\n if dimshape is not None:\n newshape = tuple([\n d1 if d2 is None else d2\n for d1, d2 in zip(arr_.shape, dimshape)])\n arr_.shape = newshape\n return arr_\n\n\ndef significant_shape(arr):\n \"\"\" find the shape without trailing 1's \"\"\"\n sig_dim = 0\n for i, dim in enumerate(arr.shape, start=1):\n if dim != 1:\n sig_dim = i\n sig_shape = arr.shape[0:sig_dim]\n return sig_shape\n\n\ndef atleast_shape(arr, dimshape):\n \"\"\"\n Ensures that an array takes a certain shape. The total size of the array\n must not change.\n\n Args:\n arr (ndarray): array to change the shape of\n dimshape (tuple): desired shape (Nones can be used to broadcast\n dimensions)\n\n Returns:\n ndarray: arr_ - the input array, which has been modified inplace.\n\n CommandLine:\n python -m vtool_ibeis.other ensure_shape\n\n Doctest:\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr = np.zeros((7, 7))\n >>> assert atleast_shape(arr, (1, 1, 3,)).shape == (7, 7, 3)\n >>> assert atleast_shape(arr, (1, 1, 2, 4,)).shape == (7, 7, 2, 4)\n >>> assert atleast_shape(arr, (1, 1,)).shape == (7, 7,)\n >>> assert atleast_shape(arr, (1, 1, 1)).shape == (7, 7, 1)\n >>> assert atleast_shape(np.zeros(()), (1,)).shape == (1,)\n >>> assert atleast_shape(np.zeros(()), tuple()).shape == tuple()\n >>> assert atleast_shape(np.zeros(()), (1, 2, 3,)).shape == (1, 2, 3)\n >>> ut.assert_raises(ValueError, atleast_shape, arr, (2, 2))\n >>> assert atleast_shape(np.zeros((7, 7, 3)), (1, 1, 3)).shape == (7, 7, 3)\n >>> ut.assert_raises(ValueError, atleast_shape, np.zeros((7, 7, 3)), (1, 1, 4))\n\n \"\"\"\n n = len(dimshape)\n sig_shape = significant_shape(arr)\n if n < len(sig_shape):\n raise ValueError(\n 'len(dimshape)={} must be >= than '\n 'len(significant_shape(arr)={})'.format(n, sig_shape))\n arr_ = atleast_nd(arr, n)\n for d1, d2 in zip(arr_.shape, dimshape):\n if d2 > 1 and d1 != 1 and d1 != d2:\n raise ValueError('cannot broadcast {} to {}'.format(\n arr_.shape, dimshape\n ))\n reps = tuple(1 if d2 is None or (d1 == d2) else d2\n for d1, d2 in zip(arr_.shape, dimshape))\n arr_ = np.tile(arr_, reps)\n return arr_\n\n\ndef atleast_3channels(arr, copy=True):\n r\"\"\"\n Ensures that there are 3 channels in the image\n\n Args:\n arr (ndarray[N, M, ...]): the image\n copy (bool): Always copies if True, if False, then copies only when the\n size of the array must change.\n\n Returns:\n ndarray: with shape (N, M, C), where C in {3, 4}\n\n CommandLine:\n python -m vtool_ibeis.other atleast_3channels\n\n Doctest:\n >>> from vtool_ibeis.image import * # NOQA\n >>> import vtool_ibeis as vt\n >>> assert atleast_3channels(np.zeros((10, 10))).shape[-1] == 3\n >>> assert atleast_3channels(np.zeros((10, 10, 1))).shape[-1] == 3\n >>> assert atleast_3channels(np.zeros((10, 10, 3))).shape[-1] == 3\n >>> assert atleast_3channels(np.zeros((10, 10, 4))).shape[-1] == 4\n \"\"\"\n # atleast_shape(arr, (None, None, 3))\n ndims = len(arr.shape)\n if ndims == 2:\n res = np.tile(arr[:, :, None], 3)\n return res\n elif ndims == 3:\n h, w, c = arr.shape\n if c == 1:\n res = np.tile(arr, 3)\n elif c in [3, 4]:\n res = arr.copy() if copy else arr\n else:\n raise ValueError('Cannot handle ndims={}'.format(ndims))\n else:\n raise ValueError('Cannot handle arr.shape={}'.format(arr.shape))\n return res\n\n\ndef iter_reduce_ufunc(ufunc, arr_iter, out=None):\n \"\"\"\n constant memory iteration and reduction\n\n applys ufunc from left to right over the input arrays\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr_list = [\n ... np.array([0, 1, 2, 3, 8, 9]),\n ... np.array([4, 1, 2, 3, 4, 5]),\n ... np.array([0, 5, 2, 3, 4, 5]),\n ... np.array([1, 1, 6, 3, 4, 5]),\n ... np.array([0, 1, 2, 7, 4, 5])\n ... ]\n >>> memory = np.array([9, 9, 9, 9, 9, 9])\n >>> gen_memory = memory.copy()\n >>> def arr_gen(arr_list, gen_memory):\n ... for arr in arr_list:\n ... gen_memory[:] = arr\n ... yield gen_memory\n >>> print('memory = %r' % (memory,))\n >>> print('gen_memory = %r' % (gen_memory,))\n >>> ufunc = np.maximum\n >>> res1 = iter_reduce_ufunc(ufunc, iter(arr_list), out=None)\n >>> res2 = iter_reduce_ufunc(ufunc, iter(arr_list), out=memory)\n >>> res3 = iter_reduce_ufunc(ufunc, arr_gen(arr_list, gen_memory), out=memory)\n >>> print('res1 = %r' % (res1,))\n >>> print('res2 = %r' % (res2,))\n >>> print('res3 = %r' % (res3,))\n >>> print('memory = %r' % (memory,))\n >>> print('gen_memory = %r' % (gen_memory,))\n >>> assert np.all(res1 == res2)\n >>> assert np.all(res2 == res3)\n \"\"\"\n # Get first item in iterator\n try:\n initial = next(arr_iter)\n except StopIteration:\n return None\n # Populate the outvariable if specified otherwise make a copy of the first\n # item to be the output memory\n if out is not None:\n out[:] = initial\n else:\n out = initial.copy()\n # Iterate and reduce\n for arr in arr_iter:\n ufunc(out, arr, out=out)\n return out\n\n\ndef clipnorm(arr, min_, max_, out=None):\n \"\"\"\n normalizes arr to the range 0 to 1 using min_ and max_ as clipping bounds\n \"\"\"\n if max_ == 1 and min_ == 0:\n if out is not None:\n out[:] = arr\n else:\n out = arr.copy()\n return out\n out_args = tuple() if out is None else (out,)\n arr_ = np.subtract(arr, min_, *out_args)\n arr_ = np.divide(arr_, max_ - min_, *out_args)\n arr_ = np.clip(arr_, 0.0, 1.0, *out_args)\n return arr_\n\n\ndef intersect1d_reduce(arr_list, assume_unique=False):\n arr_iter = iter(arr_list)\n out = next(arr_iter)\n for arr in arr_iter:\n out = np.intersect1d(out, arr, assume_unique=assume_unique)\n return out\n\n\ndef componentwise_dot(arr1, arr2):\n \"\"\"\n a dot product is a componentwise multiplication of\n two vector and then a sum.\n\n Args:\n arr1 (ndarray)\n arr2 (ndarray):\n\n Returns:\n ndarray: cosangle\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> np.random.seed(0)\n >>> arr1 = np.random.rand(3, 128)\n >>> arr1 = arr1 / np.linalg.norm(arr1, axis=1)[:, None]\n >>> arr2 = arr1\n >>> cosangle = componentwise_dot(arr1, arr2)\n >>> result = str(cosangle)\n >>> print(result)\n [ 1. 1. 1.]\n \"\"\"\n cosangle = np.multiply(arr1, arr2).sum(axis=-1).T\n return cosangle\n\n\ndef intersect2d_indices(A, B):\n r\"\"\"\n Args:\n A (ndarray[ndims=2]):\n B (ndarray[ndims=2]):\n\n Returns:\n tuple: (ax_list, bx_list)\n\n CommandLine:\n python -m vtool_ibeis.other --test-intersect2d_indices\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> # build test data\n >>> A = np.array([[ 158, 171], [ 542, 297], [ 955, 1113], [ 255, 1254], [ 976, 1255], [ 170, 1265]])\n >>> B = np.array([[ 117, 211], [ 158, 171], [ 255, 1254], [ 309, 328], [ 447, 1148], [ 750, 357], [ 976, 1255]])\n >>> # execute function\n >>> (ax_list, bx_list) = intersect2d_indices(A, B)\n >>> # verify results\n >>> result = str((ax_list, bx_list))\n >>> print(result)\n \"\"\"\n flag_list1, flag_list2 = intersect2d_flags(A, B)\n ax_list = np.flatnonzero(flag_list1)\n bx_list = np.flatnonzero(flag_list2)\n return ax_list, bx_list\n\n\ndef intersect2d_flags(A, B):\n r\"\"\"\n Checks intersection of rows of A against rows of B\n\n Args:\n A (ndarray[ndims=2]):\n B (ndarray[ndims=2]):\n\n Returns:\n tuple: (flag_list1, flag_list2)\n\n CommandLine:\n python -m vtool_ibeis.other --test-intersect2d_flags\n\n SeeAlso:\n np.in1d - the one dimensional version\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> A = np.array([[609, 307], [ 95, 344], [ 1, 690]])\n >>> B = np.array([[ 422, 1148], [ 422, 968], [ 481, 1148], [ 750, 1132], [ 759, 159]])\n >>> (flag_list1, flag_list2) = intersect2d_flags(A, B)\n >>> result = str((flag_list1, flag_list2))\n >>> print(result)\n \"\"\"\n A_, B_, C_ = intersect2d_structured_numpy(A, B)\n flag_list1 = flag_intersection(A_, C_)\n flag_list2 = flag_intersection(B_, C_)\n return flag_list1, flag_list2\n\n\ndef flag_intersection(arr1, arr2):\n r\"\"\"\n Flags the rows in `arr1` that contain items in `arr2`\n\n Returns:\n ndarray: flags where len(flags) == len(arr1)\n\n Example0:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr1 = np.array([0, 1, 2, 3, 4, 5])\n >>> arr2 = np.array([2, 6, 4])\n >>> flags = flag_intersection(arr1, arr2)\n >>> assert len(flags) == len(arr1)\n >>> result = ('flags = %s' % (ub.repr2(flags),))\n >>> print(result)\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> import vtool_ibeis as vt\n >>> arr1 = np.array([[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5]])\n >>> arr2 = np.array([[0, 2], [0, 6], [0, 4], [3, 0]])\n >>> arr1, arr2 = vt.structure_rows(arr1, arr2)\n >>> flags = flag_intersection(arr1, arr2)\n >>> assert len(flags) == len(arr1)\n >>> result = ('flags = %s' % (ub.repr2(flags),))\n >>> print(result)\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr1 = np.array([0, 1, 2, 3, 4, 5])\n >>> arr2 = np.array([])\n >>> flags = flag_intersection(arr1, arr2)\n >>> assert len(flags) == len(arr1)\n >>> flags = flag_intersection(np.array([]), np.array([2, 6, 4]))\n >>> assert len(flags) == 0\n\n Timeit:\n >>> setup = ut.codeblock(\n >>> r'''\n import vtool_ibeis as vt\n import numpy as np\n rng = np.random.RandomState(0)\n arr1 = rng.randint(0, 100, 100000).reshape(-1, 2)\n arr2 = rng.randint(0, 100, 1000).reshape(-1, 2)\n arr1_, arr2_ = vt.structure_rows(arr1, arr2)\n ''')\n >>> stmt_list = ut.codeblock(\n >>> '''\n np.array([row in arr2_ for row in arr1_])\n np.logical_or.reduce([arr1_ == row_ for row_ in arr2_]).ravel()\n vt.iter_reduce_ufunc(np.logical_or, (arr1_ == row_ for row_ in arr2_)).ravel()\n ''').split('\\n')\n >>> out = ut.timeit_compare(stmt_list, setup=setup, iterations=3)\n \"\"\"\n import vtool_ibeis as vt\n if arr1.size == 0 or arr2.size == 0:\n flags = np.full(arr1.shape[0], False, dtype=np.bool)\n #return np.empty((0,), dtype=np.bool)\n else:\n # flags = np.logical_or.reduce([arr1 == row for row in arr2]).T[0]\n flags = vt.iter_reduce_ufunc(np.logical_or, (arr1 == row_ for row_ in arr2)).ravel()\n return flags\n\n\ndef structure_rows(*arrs):\n r\"\"\"\n CommandLine:\n python -m vtool_ibeis.other structure_rows\n\n SeeAlso:\n unstructure_rows\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arr1 = np.array([[609, 307], [ 95, 344], [ 1, 690]])\n >>> arr2 = np.array([[ 422, 1148], [ 422, 968], [ 481, 1148], [ 750, 1132], [ 759, 159]])\n >>> arrs = (arr1, arr2)\n >>> structured_arrs = structure_rows(*arrs)\n >>> unstructured_arrs = unstructure_rows(*structured_arrs)\n >>> assert np.all(unstructured_arrs[0] == arrs[0])\n >>> assert np.all(unstructured_arrs[1] == arrs[1])\n >>> union_ = np.union1d(*structured_arrs)\n >>> union, = unstructure_rows(union_)\n >>> assert len(union.shape) == 2\n \"\"\"\n arr0 = arrs[0]\n ncols = arr0.shape[1]\n dtype = {'names': ['f%d' % (i,) for i in range(ncols)],\n 'formats': ncols * [arr0.dtype]}\n for arr in arrs:\n assert len(arr.shape) == 2, 'arrays must be 2d'\n assert arr.dtype == arr0.dtype, 'arrays must share the same dtype'\n assert arr.shape[1] == ncols, 'arrays must share column shape'\n structured_arrs = []\n for arr in arrs:\n arr_ = np.ascontiguousarray(arr).view(dtype)\n structured_arrs.append(arr_)\n return structured_arrs\n\n\ndef unstructure_rows(*structured_arrs):\n r\"\"\"\n SeeAlso:\n structure_rows\n \"\"\"\n # TODO: assert arr.dtype.fields are all the same type\n unstructured_arrs = [arr.view(list(arr.dtype.fields.values())[0][0])\n for arr in structured_arrs]\n unstructured_arrs = []\n for arr_ in structured_arrs:\n dtype = list(arr_.dtype.fields.values())[0][0]\n arr = arr_.view(dtype).reshape(-1, 2)\n unstructured_arrs.append(arr)\n return unstructured_arrs\n\n\ndef intersect2d_structured_numpy(arr1, arr2, assume_unique=False):\n \"\"\"\n Args:\n arr1: unstructured 2d array\n arr2: unstructured 2d array\n\n Returns:\n A_, B_, C_ - structured versions of arr1, and arr2, and their structured intersection\n\n References:\n http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array\n http://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays\n \"\"\"\n ncols = arr1.shape[1]\n assert arr1.dtype == arr2.dtype, (\n 'arr1 and arr2 must have the same dtypes.'\n 'arr1.dtype=%r, arr2.dtype=%r' % (arr1.dtype, arr2.dtype))\n # [('f%d' % i, arr1.dtype) for i in range(ncols)]\n #dtype = np.dtype([('f%d' % i, arr1.dtype) for i in range(ncols)])\n #dtype = {'names': ['f{}'.format(i) for i in range(ncols)],\n # 'formats': ncols * [arr1.dtype]}\n dtype = {'names': ['f%d' % (i,) for i in range(ncols)],\n 'formats': ncols * [arr1.dtype]}\n #try:\n A_ = np.ascontiguousarray(arr1).view(dtype)\n B_ = np.ascontiguousarray(arr2).view(dtype)\n C_ = np.intersect1d(A_, B_, assume_unique=assume_unique)\n #C = np.intersect1d(arr1.view(dtype),\n # arr2.view(dtype),\n # assume_unique=assume_unique)\n #except ValueError:\n # C = np.intersect1d(A.copy().view(dtype),\n # B.copy().view(dtype),\n # assume_unique=assume_unique)\n return A_, B_, C_\n\n\ndef intersect2d_numpy(A, B, assume_unique=False, return_indices=False):\n \"\"\"\n References::\n http://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays/8317155#8317155\n\n Args:\n A (ndarray[ndims=2]):\n B (ndarray[ndims=2]):\n assume_unique (bool):\n\n Returns:\n ndarray[ndims=2]: C\n\n CommandLine:\n python -m vtool_ibeis.other --test-intersect2d_numpy\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> # build test data\n >>> A = np.array([[ 0, 78, 85, 283, 396, 400, 403, 412, 535, 552],\n ... [152, 98, 32, 260, 387, 285, 22, 103, 55, 261]]).T\n >>> B = np.array([[403, 85, 412, 85, 815, 463, 613, 552],\n ... [ 22, 32, 103, 116, 188, 199, 217, 254]]).T\n >>> assume_unique = False\n >>> # execute function\n >>> C, Ax, Bx = intersect2d_numpy(A, B, return_indices=True)\n >>> # verify results\n >>> result = str((C.T, Ax, Bx))\n >>> print(result)\n (array([[ 85, 403, 412],\n [ 32, 22, 103]]), array([2, 6, 7]), array([0, 1, 2]))\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> A = np.array([[1, 2, 3], [1, 1, 1]])\n >>> B = np.array([[1, 2, 3], [1, 2, 14]])\n >>> C, Ax, Bx = intersect2d_numpy(A, B, return_indices=True)\n >>> result = str((C, Ax, Bx))\n >>> print(result)\n (array([[1, 2, 3]]), array([0]), array([0]))\n \"\"\"\n nrows, ncols = A.shape\n A_, B_, C_ = intersect2d_structured_numpy(A, B, assume_unique)\n # This last bit is optional if you're okay with \"C\" being a structured array...\n C = C_.view(A.dtype).reshape(-1, ncols)\n if return_indices:\n ax_list = np.flatnonzero(flag_intersection(A_, C_))\n bx_list = np.flatnonzero(flag_intersection(B_, C_))\n return C, ax_list, bx_list\n else:\n return C\n\n\ndef nearest_point(x, y, pts, mode='random'):\n \"\"\" finds the nearest point(s) in pts to (x, y) \"\"\"\n dists = (pts.T[0] - x) ** 2 + (pts.T[1] - y) ** 2\n fx = dists.argmin()\n mindist = dists[fx]\n other_fx = np.where(mindist == dists)[0]\n if len(other_fx) > 0:\n if mode == 'random':\n np.random.shuffle(other_fx)\n fx = other_fx[0]\n if mode == 'all':\n fx = other_fx\n if mode == 'first':\n fx = fx\n return fx, mindist\n\n\ndef get_uncovered_mask(covered_array, covering_array):\n r\"\"\"\n Args:\n covered_array (ndarray):\n covering_array (ndarray):\n\n Returns:\n ndarray: flags\n\n CommandLine:\n python -m vtool_ibeis.other --test-get_uncovered_mask\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> covered_array = [1, 2, 3, 4, 5]\n >>> covering_array = [2, 4, 5]\n >>> flags = get_uncovered_mask(covered_array, covering_array)\n >>> result = str(flags)\n >>> print(result)\n [ True False True False False]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> covered_array = [1, 2, 3, 4, 5]\n >>> covering_array = []\n >>> flags = get_uncovered_mask(covered_array, covering_array)\n >>> result = str(flags)\n >>> print(result)\n [ True True True True True]\n\n Example3:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> covered_array = np.array([\n ... [1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... ], dtype=np.int32)\n >>> covering_array = [2, 4, 5]\n >>> flags = get_uncovered_mask(covered_array, covering_array)\n >>> result = ub.repr2(flags, with_dtype=True)\n >>> print(result)\n np.array([[ True, False, True],\n [False, False, True],\n [ True, True, True]], dtype=bool)\n\n Ignore::\n covering_array = [1, 2, 3, 4, 5, 6, 7]\n %timeit get_uncovered_mask(covered_array, covering_array)\n 100000 loops, best of 3: 18.6 µs per loop\n %timeit get_uncovered_mask2(covered_array, covering_array)\n 100000 loops, best of 3: 16.9 µs per loop\n\n\n \"\"\"\n import vtool_ibeis as vt\n if len(covering_array) == 0:\n return np.ones(np.shape(covered_array), dtype=np.bool)\n else:\n flags_iter = (np.not_equal(covered_array, item) for item in covering_array)\n mask_array = vt.iter_reduce_ufunc(np.logical_and, flags_iter)\n return mask_array\n #if len(covering_array) == 0:\n # return np.ones(np.shape(covered_array), dtype=np.bool)\n #else:\n # flags_list = (np.not_equal(covered_array, item) for item in covering_array)\n # mask_array = and_lists(*flags_list)\n # return mask_array\n\n\n#def get_uncovered_mask2(covered_array, covering_array):\n# if len(covering_array) == 0:\n# return np.ones(np.shape(covered_array), dtype=np.bool)\n# else:\n# flags_iter = (np.not_equal(covered_array, item) for item in covering_array)\n# mask_array = vt.iter_reduce_ufunc(np.logical_and, flags_iter)\n# return mask_array\n\n\ndef get_covered_mask(covered_array, covering_array):\n return ~get_uncovered_mask(covered_array, covering_array)\n\n\ndef mult_lists(*args):\n return np.multiply.reduce(args)\n\n\ndef or_lists(*args):\n \"\"\"\n Like np.logical_and, but can take more than 2 arguments\n\n SeeAlso:\n and_lists\n \"\"\"\n flags = np.logical_or.reduce(args)\n return flags\n\n\ndef and_lists(*args):\n \"\"\"\n Like np.logical_and, but can take more than 2 arguments\n\n CommandLine:\n python -m vtool_ibeis.other --test-and_lists\n\n SeeAlso:\n or_lists\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arg1 = np.array([1, 1, 1, 1,])\n >>> arg2 = np.array([1, 1, 0, 1,])\n >>> arg3 = np.array([0, 1, 0, 1,])\n >>> args = (arg1, arg2, arg3)\n >>> flags = and_lists(*args)\n >>> result = str(flags)\n >>> print(result)\n [False True False True]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> size = 10000\n >>> rng = np.random.RandomState(0)\n >>> arg1 = rng.randint(2, size=size)\n >>> arg2 = rng.randint(2, size=size)\n >>> arg3 = rng.randint(2, size=size)\n >>> args = (arg1, arg2, arg3)\n >>> flags = and_lists(*args)\n >>> # ensure equal division\n >>> segments = 5\n >>> validx = np.where(flags)[0]\n >>> endx = int(segments * (validx.size // (segments)))\n >>> parts = np.split(validx[:endx], segments)\n >>> result = str(list(map(np.sum, parts)))\n >>> print(result)\n [243734, 714397, 1204989, 1729375, 2235191]\n\n %timeit reduce(np.logical_and, args)\n %timeit np.logical_and.reduce(args) # wins with more data\n \"\"\"\n return np.logical_and.reduce(args)\n\n\ndef rowwise_operation(arr1, arr2, op):\n \"\"\"\n DEPRICATE THIS IS POSSIBLE WITH STRICTLY BROADCASTING AND\n USING np.newaxis\n\n DEPRICATE, numpy has better ways of doing this.\n Is the rowwise name correct? Should it be colwise?\n\n performs an operation between an\n (N x A x B ... x Z) array with an\n (N x 1) array\n \"\"\"\n # FIXME: not sure this is the correct terminology\n assert arr1.shape[0] == arr2.shape[0]\n broadcast_dimensions = arr1.shape[1:] # need padding for\n tileshape = tuple(list(broadcast_dimensions) + [1])\n arr2_ = np.rollaxis(np.tile(arr2, tileshape), -1)\n rowwise_result = op(arr1, arr2_)\n return rowwise_result\n\n\ndef colwise_operation(arr1, arr2, op):\n arr1T = arr1.T\n arr2T = arr2.T\n rowwise_result = rowwise_operation(arr1T, arr2T, op)\n colwise_result = rowwise_result.T\n return colwise_result\n\n\ndef compare_matrix_columns(matrix, columns, comp_op=np.equal, logic_op=np.logical_or):\n \"\"\"\n REPLACE WITH:\n qfx2_invalid = logic_op.reduce([comp_op([:, None], qfx2_normnid) for col1 in qfx2_topnid.T])\n\n \"\"\"\n # FIXME: Generalize\n #row_matrix = matrix.T\n #row_list = columns.T\n return compare_matrix_to_rows(matrix.T, columns.T, comp_op=comp_op, logic_op=logic_op).T\n\n\ndef compare_matrix_to_rows(row_matrix, row_list, comp_op=np.equal, logic_op=np.logical_or):\n \"\"\"\n Compares each row in row_list to each row in row matrix using comp_op\n Both must have the same number of columns.\n Performs logic_op on the results of each individual row\n\n SeeAlso:\n ibeis.algo.hots.nn_weights.mark_name_valid_normalizers\n\n compop = np.equal\n logic_op = np.logical_or\n \"\"\"\n row_result_list = [np.array([comp_op(matrow, row) for matrow in row_matrix])\n for row in row_list]\n output = row_result_list[0]\n for row_result in row_result_list[1:]:\n logic_op(output, row_result, out=output)\n #output = logic_op(output, row_result)\n return output\n\n\ndef norm01(array, dim=None):\n \"\"\"\n normalizes a numpy array from 0 to 1 based in its extent\n\n Args:\n array (ndarray):\n dim (int):\n\n Returns:\n ndarray:\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> array = np.array([ 22, 1, 3, 2, 10, 42, ])\n >>> dim = None\n >>> array_norm = norm01(array, dim)\n >>> result = ub.repr2(array_norm, precision=3)\n >>> print(result)\n \"\"\"\n if not ut.is_float(array):\n array = array.astype(np.float32)\n array_max = array.max(dim)\n array_min = array.min(dim)\n array_exnt = np.subtract(array_max, array_min)\n array_norm = np.divide(np.subtract(array, array_min), array_exnt)\n return array_norm\n\n\ndef weighted_geometic_mean_unnormalized(data, weights):\n import vtool_ibeis as vt\n terms = [x ** w for x, w in zip(data, weights)]\n termprod = vt.iter_reduce_ufunc(np.multiply, iter(terms))\n return termprod\n\n\ndef weighted_geometic_mean(data, weights):\n r\"\"\"\n Args:\n data (list of ndarrays):\n weights (ndarray):\n\n Returns:\n ndarray: gmean_\n\n CommandLine:\n python -m vtool_ibeis.other --test-weighted_geometic_mean\n\n References:\n https://en.wikipedia.org/wiki/Weighted_geometric_mean\n\n SeeAlso:\n scipy.stats.mstats.gmean\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> data = [.9, .5]\n >>> weights = np.array([1.0, .5])\n >>> gmean_ = weighted_geometic_mean(data, weights)\n >>> result = ('gmean_ = %.3f' % (gmean_,))\n >>> print(result)\n gmean_ = 0.740\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> rng = np.random.RandomState(0)\n >>> img1 = rng.rand(4, 4)\n >>> img2 = rng.rand(4, 4)\n >>> data = [img1, img2]\n >>> weights = np.array([.5, .5])\n >>> gmean_ = weighted_geometic_mean(data, weights)\n >>> result = ub.hzcat(['gmean_ = ', ub.repr2(gmean_, precision=2, with_dtype=True)])\n >>> print(result)\n\n Ignore:\n res1 = ((img1 ** .5 * img2 ** .5)) ** 1\n res2 = np.sqrt(img1 * img2)\n \"\"\"\n import vtool_ibeis as vt\n terms = [np.asarray(x ** w) for x, w in zip(data, weights)]\n termprod = vt.iter_reduce_ufunc(np.multiply, iter(terms))\n exponent = 1 / np.sum(weights)\n gmean_ = termprod ** exponent\n return gmean_\n\n\ndef grab_webcam_image():\n \"\"\"\n References:\n http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html\n\n CommandLine:\n python -m vtool_ibeis.other --test-grab_webcam_image --show\n\n Example:\n >>> # SCRIPT\n >>> from vtool_ibeis.other import * # NOQA\n >>> import vtool_ibeis as vt\n >>> img = grab_webcam_image()\n >>> # xdoctest: +REQUIRES(--show)\n >>> import plottool_ibeis as pt\n >>> pt.imshow(img)\n >>> vt.imwrite('webcap.jpg', img)\n >>> ut.show_if_requested()\n \"\"\"\n import cv2\n cap = cv2.VideoCapture(0)\n # Capture frame-by-frame\n ret, img = cap.read()\n # When everything done, release the capture\n cap.release()\n return img\n\n\n#def xor_swap(arr1, arr2, inplace=True):\n# if not inplace:\n# arr1 = arr1.copy()\n# arr2 = arr2.copy()\n# np.bitwise_xor(arr1, arr2, out=arr1)\n# np.bitwise_xor(arr1, arr2, out=arr2)\n# np.bitwise_xor(arr1, arr2, out=arr1)\n# return arr1, arr2\n\n\ndef find_first_true_indices(flags_list):\n \"\"\"\n TODO: move to vtool_ibeis\n\n returns a list of indexes where the index is the first True position\n in the corresponding sublist or None if it does not exist\n\n in other words: for each row finds the smallest True column number or None\n\n Args:\n flags_list (list): list of lists of booleans\n\n CommandLine:\n python -m utool.util_list --test-find_first_true_indices\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> # build test data\n >>> flags_list = [[True, False, True],\n ... [False, False, False],\n ... [False, True, True],\n ... [False, False, True]]\n >>> # execute function\n >>> index_list = find_first_true_indices(flags_list)\n >>> # verify results\n >>> result = str(index_list)\n >>> print(result)\n [0, None, 1, 2]\n \"\"\"\n def tryget_fisrt_true(flags):\n index_list = np.where(flags)[0]\n index = None if len(index_list) == 0 else index_list[0]\n return index\n index_list = [tryget_fisrt_true(flags) for flags in flags_list]\n return index_list\n\n\ndef find_k_true_indicies(flags_list, k):\n r\"\"\"\n Uses output of either this function or find_first_true_indices\n to find the next index of true flags\n\n Args:\n flags_list (list): list of lists of booleans\n\n CommandLine:\n python -m utool.util_list --test-find_next_true_indices\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> flags_list = [[False, False, True],\n ... [False, False, False],\n ... [False, True, True],\n ... [True, True, True]]\n >>> k = 2\n >>> indices = find_k_true_indicies(flags_list, k)\n >>> result = str(indices)\n >>> print(result)\n [array([2]), None, array([1, 2]), array([0, 1])]\n \"\"\"\n\n if False:\n import vtool_ibeis as vt\n flags_list = np.array(flags_list)\n rowxs, colxs = np.where(flags_list)\n first_k_groupxs = [groupx[0:k] for groupx in vt.group_indices(rowxs)[1]]\n chosen_xs = np.hstack(first_k_groupxs)\n flat_xs = np.ravel_multi_index((rowxs.take(chosen_xs), colxs.take(chosen_xs)), flags_list.shape)\n flat_xs\n def tryget_k_true(flags):\n index_list = np.where(flags)[0]\n index = None if len(index_list) == 0 else index_list[0:k]\n return index\n index_list = [tryget_k_true(flags) for flags in flags_list]\n return index_list\n\n\ndef find_next_true_indices(flags_list, offset_list):\n r\"\"\"\n Uses output of either this function or find_first_true_indices\n to find the next index of true flags\n\n Args:\n flags_list (list): list of lists of booleans\n\n CommandLine:\n python -m utool.util_list --test-find_next_true_indices\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> # build test data\n >>> flags_list = [[True, False, True],\n ... [False, False, False],\n ... [False, True, True],\n ... [False, False, True]]\n >>> offset_list = find_first_true_indices(flags_list)\n >>> # execute function\n >>> index_list = find_next_true_indices(flags_list, offset_list)\n >>> # verify results\n >>> result = str(index_list)\n >>> print(result)\n [2, None, 2, None]\n \"\"\"\n def tryget_next_true(flags, offset_):\n offset = offset_ + 1\n relative_flags = flags[offset:]\n rel_index_list = np.where(relative_flags)[0]\n index = None if len(rel_index_list) == 0 else rel_index_list[0] + offset\n return index\n index_list = [None if offset is None else tryget_next_true(flags, offset)\n for flags, offset in zip(flags_list, offset_list)]\n return index_list\n\n\ndef ensure_rng(seed=None):\n \"\"\"\n Returns a numpy random number generator given a seed.\n \"\"\"\n if seed is None:\n rng = np.random\n elif isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n rng = np.random.RandomState(seed)\n return rng\n\n\ndef safe_extreme(arr, op, fill=np.nan, finite=False, nans=True):\n \"\"\"\n Applies an exterme operation to an 1d array (typically max/min) but ensures\n a value is always returned even in operations without identities. The\n default identity must be specified using the `fill` argument.\n\n Args:\n arr (ndarray): 1d array to take extreme of\n op (func): vectorized operation like np.max to apply to array\n fill (float): return type if arr has no elements (default = nan)\n finite (bool): if True ignores non-finite values (default = False)\n nans (bool): if False ignores nans (default = True)\n \"\"\"\n if arr is None:\n extreme = fill\n else:\n arr = np.asarray(arr)\n if finite:\n arr = arr.compress(np.isfinite(arr))\n if not nans:\n arr = arr.compress(np.logical_not(np.isnan(arr)))\n if len(arr) == 0:\n extreme = fill\n else:\n extreme = op(arr)\n return extreme\n\n\ndef safe_argmax(arr, fill=np.nan, finite=False, nans=True):\n \"\"\"\n Doctest:\n >>> from vtool_ibeis.other import *\n >>> assert safe_argmax([np.nan, np.nan], nans=False) == 0\n >>> assert safe_argmax([-100, np.nan], nans=False) == 0\n >>> assert safe_argmax([np.nan, -100], nans=False) == 1\n >>> assert safe_argmax([-100, 0], nans=False) == 1\n >>> assert np.isnan(safe_argmax([]))\n \"\"\"\n if len(arr) == 0:\n return fill\n extreme = safe_max(arr, fill=fill, finite=finite, nans=nans)\n if np.isnan(extreme):\n arg_extreme = np.where(np.isnan(arr))[0][0]\n else:\n arg_extreme = np.where(arr == extreme)[0][0]\n return arg_extreme\n\n\ndef safe_max(arr, fill=np.nan, finite=False, nans=True):\n r\"\"\"\n Args:\n arr (ndarray): 1d array to take max of\n fill (float): return type if arr has no elements (default = nan)\n finite (bool): if True ignores non-finite values (default = False)\n nans (bool): if False ignores nans (default = True)\n\n CommandLine:\n python -m vtool_ibeis.other safe_max --show\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arrs = [[], [np.nan], [-np.inf, np.nan, np.inf], [np.inf], [np.inf, 1], [0, 1]]\n >>> arrs = [np.array(arr) for arr in arrs]\n >>> fill = np.nan\n >>> results1 = [safe_max(arr, fill, finite=False, nans=True) for arr in arrs]\n >>> results2 = [safe_max(arr, fill, finite=True, nans=True) for arr in arrs]\n >>> results3 = [safe_max(arr, fill, finite=True, nans=False) for arr in arrs]\n >>> results4 = [safe_max(arr, fill, finite=False, nans=False) for arr in arrs]\n >>> results = [results1, results2, results3, results4]\n >>> result = ('results = %s' % (ub.repr2(results, nl=1),))\n >>> print(result)\n results = [\n [nan, nan, nan, inf, inf, 1],\n [nan, nan, nan, nan, 1.0, 1],\n [nan, nan, nan, nan, 1.0, 1],\n [nan, nan, inf, inf, inf, 1],\n ]\n \"\"\"\n return safe_extreme(arr, np.max, fill, finite, nans)\n\n\ndef safe_min(arr, fill=np.nan, finite=False, nans=True):\n \"\"\"\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> arrs = [[], [np.nan], [-np.inf, np.nan, np.inf], [np.inf], [np.inf, 1], [0, 1]]\n >>> arrs = [np.array(arr) for arr in arrs]\n >>> fill = np.nan\n >>> results1 = [safe_min(arr, fill, finite=False, nans=True) for arr in arrs]\n >>> results2 = [safe_min(arr, fill, finite=True, nans=True) for arr in arrs]\n >>> results3 = [safe_min(arr, fill, finite=True, nans=False) for arr in arrs]\n >>> results4 = [safe_min(arr, fill, finite=False, nans=False) for arr in arrs]\n >>> results = [results1, results2, results3, results4]\n >>> result = ('results = %s' % (ub.repr2(results, nl=1),))\n >>> print(result)\n results = [\n [nan, nan, nan, inf, 1.0, 0],\n [nan, nan, nan, nan, 1.0, 0],\n [nan, nan, nan, nan, 1.0, 0],\n [nan, nan, -inf, inf, 1.0, 0],\n ]\n \"\"\"\n return safe_extreme(arr, np.min, fill, finite, nans)\n\n\ndef safe_div(a, b):\n return None if a is None or b is None else a / b\n\n\ndef multigroup_lookup_naive(lazydict, keys_list, subkeys_list, custom_func):\n r\"\"\"\n Slow version of multigroup_lookup. Makes a call to custom_func for each\n item in zip(keys_list, subkeys_list).\n\n SeeAlso:\n vt.multigroup_lookup\n \"\"\"\n data_lists = []\n for keys, subkeys in zip(keys_list, subkeys_list):\n subvals_list = [\n custom_func(lazydict, key, [subkey])[0]\n for key, subkey in zip(keys, subkeys)\n ]\n data_lists.append(subvals_list)\n return data_lists\n\n\ndef multigroup_lookup(lazydict, keys_list, subkeys_list, custom_func):\n r\"\"\"\n Efficiently calls custom_func for each item in zip(keys_list, subkeys_list)\n by grouping subkeys to minimize the number of calls to custom_func.\n\n We are given multiple lists of keys, and subvals.\n The goal is to group the subvals by keys and apply the subval lookups\n (a call to a function) to the key only once and at the same time.\n\n Args:\n lazydict (dict of utool.LazyDict):\n keys_list (list):\n subkeys_list (list):\n custom_func (func): must have signature custom_func(lazydict, key, subkeys)\n\n SeeAlso:\n vt.multigroup_lookup_naive - unoptomized version, but simple to read\n\n Example:\n >>> # SLOW_DOCTEST\n >>> # xdoctest: +SKIP\n >>> from vtool_ibeis.other import * # NOQA\n >>> import vtool_ibeis as vt\n >>> fpath_list = [ut.grab_test_imgpath(key) for key in ut.util_grabdata.get_valid_test_imgkeys()]\n >>> lazydict = {count: vt.testdata_annot_metadata(fpath) for count, fpath in enumerate(fpath_list)}\n >>> aids_list = np.array([(3, 2), (0, 2), (1, 2), (2, 3)])\n >>> fms = np.array([[2, 5], [2, 3], [2, 1], [3, 4]])\n >>> keys_list = aids_list.T\n >>> subkeys_list = fms.T\n >>> def custom_func(lazydict, key, subkeys):\n >>> annot = lazydict[key]\n >>> kpts = annot['kpts']\n >>> rchip = annot['rchip']\n >>> kpts_m = kpts.take(subkeys, axis=0)\n >>> warped_patches = vt.get_warped_patches(rchip, kpts_m)[0]\n >>> return warped_patches\n >>> data_lists1 = multigroup_lookup(lazydict, keys_list, subkeys_list, custom_func)\n >>> data_lists2 = multigroup_lookup_naive(lazydict, keys_list, subkeys_list, custom_func)\n >>> vt.sver_c_wrapper.asserteq(data_lists1, data_lists2)\n\n Example:\n >>> keys_list = [np.array([]), np.array([]), np.array([])]\n >>> subkeys_list = [np.array([]), np.array([]), np.array([])]\n \"\"\"\n import vtool_ibeis as vt\n # Group the keys in each multi-list individually\n multi_groups = [vt.group_indices(keys) for keys in keys_list]\n # Combine keys across multi-lists usings a dict_stack\n dict_list = [dict(zip(k, v)) for k, v in multi_groups]\n nested_order = ut.dict_stack2(dict_list, default=[])\n # Use keys and values for explicit ordering\n group_key_list = list(nested_order.keys())\n if len(group_key_list) == 0:\n return multigroup_lookup_naive(lazydict, keys_list, subkeys_list, custom_func)\n group_subxs_list = list(nested_order.values())\n # Extract unique and flat subkeys.\n # Maintain an information to invert back into multi-list form\n group_uf_subkeys_list = []\n group_invx_list = []\n group_cumsum_list = []\n for key, subxs in zip(group_key_list, group_subxs_list):\n # Group subkeys for each key\n subkey_group = vt.ziptake(subkeys_list, subxs, axis=0)\n flat_subkeys, group_cumsum = ut.invertible_flatten2(subkey_group)\n unique_subkeys, invx = np.unique(flat_subkeys, return_inverse=True)\n # Append info\n group_uf_subkeys_list.append(unique_subkeys)\n group_invx_list.append(invx)\n group_cumsum_list.append(group_cumsum)\n # Apply custom function (lookup) to unique each key and its flat subkeys\n group_subvals_list = [\n custom_func(lazydict, key, subkeys)\n for key, subkeys in zip(group_key_list, group_uf_subkeys_list)\n ]\n # Efficiently invert values back into input shape\n # First invert the subkey groupings\n multi_subvals_list = [[] for _ in range(len(multi_groups))]\n _iter = zip(group_key_list, group_subvals_list, group_cumsum_list, group_invx_list)\n for key, subvals, group_cumsum, invx in _iter:\n nonunique_subvals = list(ub.take(subvals, invx))\n unflat_subvals_list = ut.unflatten2(nonunique_subvals, group_cumsum)\n for subvals_list, unflat_subvals in zip(multi_subvals_list, unflat_subvals_list):\n subvals_list.append(unflat_subvals)\n # Then invert the key groupings\n data_lists = []\n multi_groupxs_list = list(zip(*group_subxs_list))\n for subvals_list, groupxs in zip(multi_subvals_list, multi_groupxs_list):\n datas = vt.invert_apply_grouping(subvals_list, groupxs)\n data_lists.append(datas)\n return data_lists\n\n\ndef asserteq(output1, output2, thresh=1E-8, nestpath=None, level=0, lbl1=None,\n lbl2=None, output_lbl=None, verbose=True, iswarning=False):\n \"\"\"\n recursive equality checks\n\n asserts that output1 and output2 are close to equal.\n \"\"\"\n failed = False\n if lbl1 is None:\n lbl1 = ut.get_varname_from_stack(output1, N=1)\n if lbl2 is None:\n lbl2 = ut.get_varname_from_stack(output2, N=1)\n # Setup\n if nestpath is None:\n # record the path through the nested structure as testing goes on\n nestpath = []\n # print out these variables in all error cases\n common_keys = ['lbl1', 'lbl2', 'level', 'nestpath']\n # CHECK: types\n try:\n assert type(output1) == type(output2), 'types are not equal'\n except AssertionError as ex:\n print(type(output1))\n print(type(output2))\n ut.printex(ex, 'FAILED TYPE CHECKS',\n keys=common_keys + [(type, 'output1'), (type, 'output2')],\n iswarning=iswarning)\n failed = True\n if not iswarning:\n raise\n # CHECK: length\n if hasattr(output1, '__len__'):\n try:\n assert len(output1) == len(output2), 'lens are not equal'\n except AssertionError as ex:\n keys = common_keys + [(len, 'output1'), (len, 'output2'), ]\n ut.printex(ex, 'FAILED LEN CHECKS. ', keys=keys)\n raise\n # CHECK: ndarrays\n if isinstance(output1, np.ndarray):\n ndarray_keys = ['output1.shape', 'output2.shape']\n # CHECK: ndarray shape\n try:\n assert output1.shape == output2.shape, 'ndarray shapes are unequal'\n except AssertionError as ex:\n keys = common_keys + ndarray_keys\n ut.printex(ex, 'FAILED NUMPY SHAPE CHECKS.', keys=keys,\n iswarning=iswarning)\n failed = True\n if not iswarning:\n raise\n # CHECK: ndarray equality\n try:\n passed, error = ut.almost_eq(output1, output2, thresh,\n ret_error=True)\n assert np.all(passed), 'ndarrays are unequal.'\n except AssertionError as ex:\n # Statistics on value difference and value difference\n # above the thresholds\n diff_stats = ut.get_stats(error) # NOQA\n error_stats = ut.get_stats(error[error >= thresh]) # NOQA\n keys = common_keys + ndarray_keys + [\n (len, 'output1'), (len, 'output2'), ('diff_stats'),\n ('error_stats'), ('thresh'),\n ]\n PRINT_VAL_SAMPLE = True\n if PRINT_VAL_SAMPLE:\n keys += ['output1', 'output2']\n ut.printex(ex, 'FAILED NUMPY CHECKS.', keys=keys,\n iswarning=iswarning)\n failed = True\n if not iswarning:\n raise\n # CHECK: list/tuple items\n elif isinstance(output1, (tuple, list)):\n for count, (item1, item2) in enumerate(zip(output1, output2)):\n # recursive call\n try:\n asserteq(\n item1, item2, lbl1=lbl2, lbl2=lbl1, thresh=thresh,\n nestpath=nestpath + [count], level=level + 1)\n except AssertionError as ex:\n ut.printex(ex, 'recursive call failed',\n keys=common_keys + ['item1', 'item2', 'count'],\n iswarning=iswarning)\n failed = True\n if not iswarning:\n raise\n # CHECK: scalars\n else:\n try:\n assert output1 == output2, 'output1 != output2'\n except AssertionError as ex:\n print('nestpath= %r' % (nestpath,))\n ut.printex(ex, 'FAILED SCALAR CHECK.',\n keys=common_keys + ['output1', 'output2'],\n iswarning=iswarning)\n failed = True\n if not iswarning:\n raise\n if verbose and level == 0:\n if not failed:\n print('PASSED %s == %s' % (lbl1, lbl2))\n else:\n print('WARNING %s != %s' % (lbl1, lbl2))\n\n\ndef compare_implementations(func1, func2, args, show_output=False, lbl1='', lbl2='', output_lbl=None):\n \"\"\"\n tests two different implementations of the same function\n \"\"\"\n print('+ --- BEGIN COMPARE IMPLEMENTATIONS ---')\n func1_name = ut.get_funcname(func1)\n func2_name = ut.get_funcname(func2)\n print('func1_name = %r' % (func1_name,))\n print('func2_name = %r' % (func2_name,))\n # test both versions\n with ub.Timer('time func1=' + func1_name) as t1:\n output1 = func1(*args)\n with ub.Timer('time func2=' + func2_name) as t2:\n output2 = func2(*args)\n if t2.ellapsed == 0:\n t2.ellapsed = 1e9\n print('speedup = %r' % (t1.ellapsed / t2.ellapsed))\n try:\n asserteq(output1, output2, lbl1=lbl1, lbl2=lbl2, output_lbl=output_lbl)\n print('implementations are in agreement :) ')\n except AssertionError as ex:\n # prints out a nested list corresponding to nested structure\n ut.printex(ex, 'IMPLEMENTATIONS DO NOT AGREE', keys=[\n ('func1_name'),\n ('func2_name'), ]\n )\n raise\n finally:\n depth_profile1 = ut.depth_profile(output1)\n depth_profile2 = ut.depth_profile(output2)\n type_profile1 = ut.list_type_profile(output1)\n type_profile2 = ut.list_type_profile(output2)\n print('depth_profile1 = ' + ub.repr2(depth_profile1))\n print('depth_profile2 = ' + ub.repr2(depth_profile2))\n print('type_profile1 = ' + (type_profile1))\n print('type_profile2 = ' + (type_profile2))\n print('L ___ END COMPARE IMPLEMENTATIONS ___')\n return output1\n\n\ndef greedy_setcover(universe, subsets, weights=None):\n \"\"\"\n Copied implmentation of greedy set cover from stack overflow. Needs work.\n\n References:\n http://stackoverflow.com/questions/7942312/of-greedy-set-cover-faster\n\n Example:\n >>> # SLOW_DOCTEST\n >>> # xdoctest: +SKIP\n >>> from vtool_ibeis.other import * # NOQA\n >>> import vtool_ibeis as vt\n >>> universe = set([1,2,3,4])\n >>> subsets = [set([1,2]), set([1]), set([1,2,3]), set([1]), set([3,4]),\n >>> set([4]), set([1,2]), set([3,4]), set([1,2,3,4])]\n >>> weights = [1, 1, 2, 2, 2, 3, 3, 4, 4]\n >>> chosen, costs = greedy_setcover(universe, subsets, weights)\n >>> print('Cover: %r' % (chosen,))\n >>> print('Total Cost: %r=sum(%r)' % (sum(costs), costs))\n \"\"\"\n #unchosen = subsets.copy()\n uncovered = universe\n chosen = []\n costs = []\n\n def findMin(subsets, uncovered, weights):\n minCost = np.inf\n minElement = -1\n for i, s in enumerate(subsets):\n num_isect = len(s.intersection(uncovered))\n try:\n cost = weights[i] / num_isect\n if cost < minCost:\n minCost = cost\n minElement = i\n except ZeroDivisionError:\n pass\n return subsets[minElement], weights[minElement]\n\n while len(uncovered) != 0:\n S_i, cost = findMin(subsets, uncovered, weights)\n chosen.append(S_i)\n uncovered = uncovered.difference(S_i)\n costs.append(cost)\n return chosen, costs\n\n\ndef find_elbow_point(curve):\n \"\"\"\n Finds the on the curve point furthest from the line defined by the\n endpoints of the curve.\n\n Args:\n curve (ndarray): a monotonic curve\n\n Returns:\n int: tradeoff_idx - this is an elbow point in the curve\n\n References:\n http://stackoverflow.com/questions/2018178/trade-off-point-on-curve\n\n CommandLine:\n python -m vtool_ibeis.other find_elbow_point --show\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> curve = np.exp(np.linspace(0, 10, 100))\n >>> tradeoff_idx = find_elbow_point(curve)\n >>> result = ('tradeoff_idx = %s' % (ub.repr2(tradeoff_idx),))\n >>> print(result)\n >>> assert tradeoff_idx == 76\n >>> # xdoctest: +REQUIRES(--show)\n >>> import plottool_ibeis as pt\n >>> import vtool_ibeis as vt\n >>> point = [tradeoff_idx, curve[tradeoff_idx]]\n >>> segment = np.array([[0, len(curve) - 1], [curve[0], curve[-1]]])\n >>> e1, e2 = segment.T\n >>> dist_point = vt.closest_point_on_line_segment(point, e1, e2)\n >>> dist_line = np.array([dist_point, point]).T\n >>> pt.plot(curve, 'r', label='curve')\n >>> pt.plot(point[0], point[1], 'go', markersize=10, label='tradeoff point')\n >>> pt.plot(dist_line[0], dist_line[1], '-xb')\n >>> pt.plot(segment[0], segment[1], '-xb')\n >>> pt.legend()\n >>> ut.show_if_requested()\n \"\"\"\n num_points = len(curve)\n all_coords = np.vstack((np.arange(num_points), curve)).T\n np.array([np.arange(num_points), curve])\n first_point = all_coords[0]\n line_vec = all_coords[-1] - all_coords[0]\n line_vec_norm = line_vec / np.sqrt(np.sum(line_vec ** 2))\n vec_from_first = all_coords - first_point\n tiled_line_vec_norm = np.tile(line_vec_norm, (num_points, 1))\n scalar_product = np.sum(vec_from_first * tiled_line_vec_norm, axis=1)\n vec_from_first_parallel = np.outer(scalar_product, line_vec_norm)\n vec_to_line = vec_from_first - vec_from_first_parallel\n dist_to_line = np.sqrt(np.sum(vec_to_line ** 2, axis=1))\n tradeoff_idx = np.argmax(dist_to_line)\n return tradeoff_idx\n\n\ndef zstar_value(conf_level=.95):\n \"\"\"\n References:\n http://stackoverflow.com/questions/28242593/correct-way-to-obtain-confidence-interval-with-scipy\n \"\"\"\n import scipy.stats as spstats\n #distribution =\n #spstats.t.interval(.95, df=(ss - 1))[1]\n #spstats.norm.interval(.95, df=1)[1]\n zstar = spstats.norm.interval(conf_level)[1]\n #zstar = spstats.norm.ppf(spstats.norm.cdf(0) + (conf_level / 2))\n return zstar\n\n\ndef calc_error_bars_from_sample(sample_size, num_positive, pop, conf_level=.95):\n \"\"\"\n Determines a error bars of sample\n\n References:\n https://www.qualtrics.com/blog/determining-sample-size/\n http://www.surveysystem.com/sscalc.htm\n https://en.wikipedia.org/wiki/Sample_size_determination\n http://www.surveysystem.com/sample-size-formula.htm\n http://courses.wcupa.edu/rbove/Berenson/10th%20ed%20CD-ROM%20topics/section8_7.pdf\n https://en.wikipedia.org/wiki/Standard_normal_table\n https://www.unc.edu/~rls/s151-2010/class23.pdf\n \"\"\"\n #zValC_lookup = {.95: 3.8416, .99: 6.6564,}\n # We sampled ss from a population of pop and got num_positive true cases.\n ss = sample_size\n # Calculate at this confidence level\n zval = zstar_value(conf_level)\n # Calculate our plus/minus error in positive percentage\n pos_frac = (num_positive / ss)\n pf = (pop - ss) / (pop - 1)\n err_frac = zval * np.sqrt((pos_frac) * (1 - pos_frac) * pf / ss)\n lines = []\n lines.append('population_size = %r' % (pop,))\n lines.append('sample_size = %r' % (ss,))\n lines.append('num_positive = %r' % (num_positive,))\n lines.append('positive rate is %.2f%% ± %.2f%% @ %r confidence' % (\n 100 * pos_frac, 100 * err_frac, conf_level))\n lines.append('positive num is %d ± %d @ %r confidence' % (\n int(np.round(pop * pos_frac)), int(np.round(pop * err_frac)), conf_level))\n print(ut.msgblock('Calculate Sample Error Margin', '\\n'.join(lines)))\n\n\ndef calc_sample_from_error_bars(err_frac, pop, conf_level=.95, prior=.5):\n \"\"\"\n Determines a reasonable sample size to achieve desired error bars.\n\n import sympy\n p, n, N, z = sympy.symbols('prior, ss, pop, zval')\n me = sympy.symbols('err_frac')\n expr = (z * sympy.sqrt((p * (1 - p) / n) * ((N - n) / (N - 1))))\n equation = sympy.Eq(me, expr)\n nexpr = sympy.solve(equation, [n])[0]\n nexpr = sympy.simplify(nexpr)\n\n import autopep8\n print(autopep8.fix_lines(['ss = ' + str(nexpr)], autopep8._get_options({}, False)))\n\n ss = -pop * prior* (zval**2) *(prior - 1) / ((err_frac ** 2) * pop - (err_frac**2) - prior * (zval**2) * (prior - 1))\n ss = pop * prior * zval ** 2 * (prior - 1) / (-err_frac ** 2 * pop + err_frac ** 2 + prior * zval ** 2 * (prior - 1))\n \"\"\"\n # How much confidence ydo you want (in fraction of positive results)\n #zVal_lookup = {.95: 1.96, .99: 2.58,}\n zval = zstar_value(conf_level)\n\n std = .5\n zval * std * (1 - std) / err_frac\n\n #margin_error = err_frac\n #margin_error = zval * np.sqrt(prior * (1 - prior) / ss)\n\n #margin_error_small = zval * np.sqrt((prior * (1 - prior) / ss) * ((pop - ss) / (pop - 1)))\n #prior = .5 # initial uncertainty\n\n # Used for large samples\n #ss_large = (prior * (1 - prior)) / ((margin_error / zval) ** 2)\n\n # Used for small samples\n ss_numer = pop * prior * zval ** 2 * (1 - prior)\n ss_denom = (err_frac ** 2 * pop + err_frac ** 2 + prior * zval ** 2 * (1 - prior))\n ss_small = ss_numer / ss_denom\n\n #ss_ = ((zval ** 2) * 0.25) / (err_frac ** 2)\n #ss = int(np.ceil(ss_ / (1 + ((ss_ - 1) / pop))))\n ss = int(np.ceil(ss_small))\n lines = []\n lines.append('population_size = %r' % (pop,))\n lines.append('positive_prior = %r' % (prior,))\n lines.append('Desired confidence = %.2f' % (conf_level,))\n lines.append('Desired error rate is %.2f%%' % (err_frac * 100))\n lines.append('Desired number of errors is %d' % (int(round(err_frac * pop))))\n lines.append('Need sample sample size of %r to achive requirements' % (ss,))\n print(ut.msgblock('Calculate Required Sample Size', '\\n'.join(lines)))\n\n\ndef inbounds(num, low, high, eq=False):\n r\"\"\"\n Args:\n num (scalar or ndarray):\n low (scalar or ndarray):\n high (scalar or ndarray):\n eq (bool):\n\n Returns:\n scalar or ndarray: is_inbounds\n\n CommandLine:\n xdoctest -m ~/code/vtool_ibeis/vtool_ibeis/other.py inbounds\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> import utool as ut\n >>> num = np.array([[ 0. , 0.431, 0.279],\n ... [ 0.204, 0.352, 0.08 ],\n ... [ 0.107, 0.325, 0.179]])\n >>> low = .1\n >>> high = .4\n >>> eq = False\n >>> is_inbounds = inbounds(num, low, high, eq)\n >>> result = ub.repr2(is_inbounds, with_dtype=True)\n >>> print(result)\n\n \"\"\"\n import operator as op\n less = op.le if eq else op.lt\n greater = op.ge if eq else op.gt\n and_ = np.logical_and if isinstance(num, np.ndarray) else op.and_\n is_inbounds = and_(greater(num, low), less(num, high))\n return is_inbounds\n\n\ndef fromiter_nd(iter_, shape, dtype):\n \"\"\"\n Like np.fromiter but handles iterators that generated\n n-dimensional arrays. Slightly faster than np.array.\n\n maybe commit to numpy?\n\n Args:\n iter_ (iter): an iterable that generates homogenous ndarrays\n shape (tuple): the expected output shape\n dtype (dtype): the numpy datatype of the generated ndarrays\n\n Note:\n The iterable must yeild a numpy array. It cannot yeild a Python list.\n\n CommandLine:\n python -m vtool_ibeis.other fromiter_nd --show\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> dtype = np.float\n >>> total = 11\n >>> rng = np.random.RandomState(0)\n >>> iter_ = (rng.rand(5, 7, 3) for _ in range(total))\n >>> shape = (total, 5, 7, 3)\n >>> result = fromiter_nd(iter_, shape, dtype)\n >>> assert result.shape == shape\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.other import * # NOQA\n >>> dtype = np.int\n >>> qfxs = np.array([1, 2, 3])\n >>> dfxs = np.array([4, 5, 6])\n >>> iter_ = (np.array(x) for x in ut.product(qfxs, dfxs))\n >>> total = len(qfxs) * len(dfxs)\n >>> shape = (total, 2)\n >>> result = fromiter_nd(iter_, shape, dtype)\n >>> assert result.shape == shape\n \"\"\"\n num_rows = shape[0]\n chunksize = np.prod(shape[1:])\n itemsize = np.dtype(dtype).itemsize\n # Create dtype that makes an entire ndarray appear as a single item\n chunk_dtype = np.dtype((np.void, itemsize * chunksize))\n arr = np.fromiter(iter_, count=num_rows, dtype=chunk_dtype)\n # Convert back to original dtype and shape\n arr = arr.view(dtype)\n arr.shape = shape\n return arr\n\n\ndef make_video2(images, outdir):\n import vtool_ibeis as vt\n from os.path import join\n n = str(int(np.ceil(np.log10(len(images)))))\n fmt = 'frame_%0' + n + 'd.png'\n ub.ensuredir(outdir)\n for count, img in enumerate(images):\n fname = join(outdir, fmt % (count))\n vt.imwrite(fname, img)\n\n\ndef make_video(images, outvid=None, fps=5, size=None,\n is_color=True, format='XVID'):\n \"\"\"\n Create a video from a list of images.\n\n References:\n http://www.xavierdupre.fr/blog/2016-03-30_nojs.html\n http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html\n\n @param outvid output video\n @param images list of images to use in the video\n @param fps frame per second\n @param size size of each frame\n @param is_color color\n @param format see http://www.fourcc.org/codecs.php\n\n The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/.\n By default, the video will have the size of the first image.\n It will resize every image to this size before adding them to the video.\n \"\"\"\n # format = 'MJPG'\n # format = 'FMP4'\n import cv2\n fourcc = cv2.VideoWriter_fourcc(*str(format))\n vid = None\n for img in images:\n if vid is None:\n if size is None:\n size = img.shape[1], img.shape[0]\n vid = cv2.VideoWriter(outvid, fourcc, float(fps), size, is_color)\n if size[0] != img.shape[1] and size[1] != img.shape[0]:\n img = cv2.resize(img, size)\n vid.write(img)\n vid.release()\n return vid\n\n\ndef take_col_per_row(arr, colx_list):\n \"\"\" takes a column from each row\n\n Ignore:\n num_rows = 1000\n num_cols = 4\n\n arr = np.arange(10 * 4).reshape(10, 4)\n colx_list = (np.random.rand(10) * 4).astype(np.int)\n\n %timeit np.array([row[cx] for (row, cx) in zip(arr, colx_list)])\n %timeit arr.ravel().take(np.ravel_multi_index((np.arange(len(colx_list)), colx_list), arr.shape))\n %timeit arr.ravel().take(colx_list + np.arange(arr.shape[0]) * arr.shape[1])\n \"\"\"\n # out = np.array([row[cx] for (row, cx) in zip(arr, colx_list)])\n multix_list = np.ravel_multi_index((np.arange(len(colx_list)), colx_list), arr.shape)\n out = arr.ravel().take(multix_list)\n return out\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n xdoctest -m vtool_ibeis.other\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport utool as ut\nimport ubelt as ub\nimport itertools\nfrom six.moves import range, zip\nfrom collections import OrderedDict\nimport scipy.spatial.distance as spdist\nfrom .util_math import TAU\n\nTEMP_VEC_DTYPE = np.float64\n\n\ndef testdata_hist():\n import vtool_ibeis as vt\n rng = np.random.RandomState(0)\n hist1 = vt.demodata.testdata_dummy_sift(rng=rng)\n hist2 = vt.demodata.testdata_dummy_sift(rng=rng)\n return hist1, hist2\n\n\ndef testdata_sift2():\n sift1 = np.zeros(128)\n sift2 = np.ones(128)\n sift3 = np.zeros(128)\n sift4 = np.zeros(128)\n sift5 = np.zeros(128)\n sift1[0] = 1\n sift3[-1] = 1\n sift4[0::2] = 1\n sift5[1::2] = 1\n\n def normalize_sift(sift):\n # normalize\n sift_norm = sift / np.linalg.norm(sift)\n # clip\n sift_norm = np.clip(sift_norm, 0, .2)\n # re-normalize\n sift_norm = sift_norm / np.linalg.norm(sift_norm)\n # cast hack\n sift_norm = np.clip(sift_norm * 512.0, 0, 255).astype(np.uint8)\n return sift_norm\n sift1 = normalize_sift(sift1)\n sift2 = normalize_sift(sift2)\n sift3 = normalize_sift(sift3)\n sift4 = normalize_sift(sift4)\n sift5 = normalize_sift(sift5)\n\n return sift1, sift2, sift3, sift4, sift5\n\n\ndef wrapped_distance(arr1, arr2, base, out=None):\n \"\"\"\n base = TAU corresponds to ori diff\n \"\"\"\n arr_diff = np.subtract(arr1, arr2)\n abs_diff = np.abs(arr_diff)\n mod_diff1 = np.mod(abs_diff, base)\n mod_diff2 = np.subtract(base, mod_diff1)\n arr_dist = np.minimum(mod_diff1, mod_diff2)\n if out is not None:\n out[:] = arr_dist\n return arr_dist\n\n\ndef signed_ori_distance(ori1, ori2):\n r\"\"\"\n Args:\n ori1 (ndarray):\n ori2 (ndarray):\n\n Returns:\n ndarray: ori_dist\n\n CommandLine:\n python -m vtool_ibeis.distance --exec-signed_ori_distance\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> ori1 = np.array([0, 0, 3, 4, 0, 0])\n >>> ori2 = np.array([3, 4, 0, 0, np.pi, np.pi - .1])\n >>> ori_dist = signed_ori_distance(ori1, ori2)\n >>> result = ('ori_dist = %s' % (ub.repr2(ori_dist, precision=3),))\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n \"\"\"\n ori_dist = ori2 - ori1\n ori_dist = (ori_dist + np.pi) % TAU - np.pi\n return ori_dist\n\n\ndef ori_distance(ori1, ori2, out=None):\n r\"\"\"\n Returns the unsigned distance between two angles\n\n References:\n http://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles\n\n Timeit:\n >>> #xdoctest: +SKIP\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> import utool as ut\n >>> setup = ub.codeblock(\n >>> r'''\n # STARTBLOCK\n import numpy as np\n tau = np.pi * 2\n rng = np.random.RandomState(53)\n ori1 = (rng.rand(100000) * tau) - np.pi\n ori2 = (rng.rand(100000) * tau) - np.pi\n\n def func_outvars():\n ori_dist = np.abs(ori1 - ori2)\n np.mod(ori_dist, tau, out=ori_dist)\n np.minimum(ori_dist, np.subtract(tau, ori_dist), out=ori_dist)\n return ori_dist\n\n def func_orig():\n ori_dist = np.abs(ori1 - ori2) % tau\n ori_dist = np.minimum(ori_dist, tau - ori_dist)\n return ori_dist\n # ENDBLOCK\n ''')\n >>> stmt_list = ub.codeblock(\n >>> '''\n func_outvars()\n func_orig()\n '''\n >>> ).split('\\n')\n >>> ut.util_dev.rrr()\n >>> ut.util_dev.timeit_compare(stmt_list, setup, int(1E3))\n\n CommandLine:\n python -m vtool_ibeis.distance --test-ori_distance\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> rng = np.random.RandomState(0)\n >>> ori1 = (rng.rand(10) * TAU) - np.pi\n >>> ori2 = (rng.rand(10) * TAU) - np.pi\n >>> dist_ = ori_distance(ori1, ori2)\n >>> result = ub.repr2(ori1, precision=1)\n >>> result += '\\n' + ub.repr2(ori2, precision=1)\n >>> result += '\\n' + ub.repr2(dist_, precision=1)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> ori1 = np.array([ 0.3, 7.0, 0.0, 3.1], dtype=np.float64)\n >>> ori2 = np.array([ 6.8, -1.0, 0.0, -3.1], dtype=np.float64)\n >>> dist_ = ori_distance(ori1, ori2)\n >>> result = ub.repr2(dist_, precision=2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n\n Example3:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> ori1 = .3\n >>> ori2 = 6.8\n >>> dist_ = ori_distance(ori1, ori2)\n >>> result = ub.repr2(dist_, precision=2)\n >>> print(result)\n\n Ignore:\n # This also works\n ori_dist = np.abs(np.arctan2(np.sin(ori1 - ori2), np.cos(ori1 - ori2)))\n %timeit np.abs(np.arctan2(np.sin(ori1 - ori2), np.cos(ori1 - ori2)))\n \"\"\"\n return cyclic_distance(ori1, ori2, modulo=TAU, out=out)\n\n\ndef cyclic_distance(arr1, arr2, modulo, out=None):\n r\"\"\"\n returns an unsigned distance\n\n Args:\n arr1 (ndarray):\n arr2 (ndarray):\n modulo (float or int):\n out (ndarray): (default = None)\n\n Returns:\n ndarray: arr_dist\n\n CommandLine:\n python -m vtool_ibeis.distance cyclic_distance\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> out = None\n >>> modulo = 8\n >>> offset = 0 # doesnt matter what offset is\n >>> arr1 = np.hstack([np.arange(offset, modulo + offset), np.nan])\n >>> arr2 = arr1[:, None]\n >>> arr_dist = cyclic_distance(arr1, arr2, modulo, out)\n >>> result = ('arr_dist =\\n%s' % (ub.repr2(arr_dist),))\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n \"\"\"\n arr_diff = np.subtract(arr1, arr2, out=out)\n abs_diff = np.abs(arr_diff, out=out)\n mod_diff1 = np.mod(abs_diff, modulo, out=out)\n mod_diff2 = np.subtract(modulo, mod_diff1)\n arr_dist = np.minimum(mod_diff1, mod_diff2, out=out)\n return arr_dist\n\n\ndef signed_cyclic_distance(arr1, arr2, modulo, out=None):\n arr_diff = np.subtract(arr1, arr2, out=out)\n half_mod = modulo / 2\n arr_dist = (arr_diff + half_mod) % modulo - half_mod\n return arr_dist\n\n\ndef det_distance(det1, det2):\n \"\"\" Returns how far off determinants are from one another\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> rng = np.random.RandomState(53)\n >>> det1 = rng.rand(5)\n >>> det2 = rng.rand(5)\n >>> scaledist = det_distance(det1, det2)\n >>> result = ub.repr2(scaledist, precision=2, threshold=2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n \"\"\"\n det_dist = det1 / det2\n # Flip ratios that are less than 1\n _flip_flag = det_dist < 1\n det_dist[_flip_flag] = np.reciprocal(det_dist[_flip_flag])\n return det_dist\n\n\ndef L1(hist1, hist2, dtype=TEMP_VEC_DTYPE):\n \"\"\" returns L1 (aka manhatten or grid) distance between two histograms \"\"\"\n return (np.abs(np.asarray(hist1, dtype) - np.asarray(hist2, dtype))).sum(-1)\n\n\ndef L2_sqrd(hist1, hist2, dtype=TEMP_VEC_DTYPE):\n \"\"\" returns the squared L2 distance\n\n # FIXME:\n if hist1.shape = (0,) and hist.shape = (0,) then result=0.0\n\n SeeAlso:\n L2\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> import numpy\n >>> ut.exec_funckw(L2_sqrd, globals())\n >>> rng = np.random.RandomState(53)\n >>> hist1 = rng.rand(5, 2)\n >>> hist2 = rng.rand(5, 2)\n >>> l2dist = L2_sqrd(hist1, hist2)\n >>> result = ub.repr2(l2dist, precision=2, threshold=2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> hist1 = 3\n >>> hist2 = 0\n >>> result = L2_sqrd(hist1, hist2)\n >>> print(result)\n \"\"\"\n # Carefull, this will not return the correct result if the types are unsigned.\n hist1_ = np.asarray(hist1, dtype)\n hist2_ = np.asarray(hist2, dtype)\n return ((hist1_ - hist2_) ** 2).sum(-1) # this is faster\n\n\ndef understanding_pseudomax_props(mode=2):\n \"\"\"\n Function showing some properties of distances between normalized pseudomax vectors\n\n CommandLine:\n python -m vtool_ibeis.distance --test-understanding_pseudomax_props\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> for mode in [0, 1, 2, 3]:\n ... print('+---')\n ... print('mode = %r' % (mode,))\n ... result = understanding_pseudomax_props(mode)\n ... print('L___')\n >>> print(result)\n \"\"\"\n import vtool_ibeis as vt\n pseudo_max = 512\n rng = np.random.RandomState(0)\n num = 10\n if mode == 0:\n dim = 2\n p1_01 = (vt.normalize_rows(rng.rand(num, dim)))\n p2_01 = (vt.normalize_rows(rng.rand(num, dim)))\n elif mode == 1:\n p1_01 = vt.demodata.testdata_dummy_sift(num, rng) / pseudo_max\n p2_01 = vt.demodata.testdata_dummy_sift(num, rng) / pseudo_max\n elif mode == 2:\n # Build theoretically maximally distant normalized vectors (type 1)\n dim = 128\n p1_01 = np.zeros((1, dim))\n p2_01 = np.zeros((1, dim))\n p2_01[:, 0::2] = 1\n p1_01[:, 1::2] = 1\n p1_01 = vt.normalize_rows(p1_01)\n p2_01 = vt.normalize_rows(p2_01)\n elif mode == 3:\n # Build theoretically maximally distant vectors (type 2)\n # This mode will clip if cast to uint8, thus failing the test\n dim = 128\n p1_01 = np.zeros((1, dim))\n p2_01 = np.zeros((1, dim))\n p2_01[:, 0] = 1\n p1_01[:, 1:] = 1\n p1_01 = vt.normalize_rows(p1_01)\n p2_01 = vt.normalize_rows(p2_01)\n pass\n print('ndims = %r' % (p1_01.shape[1]))\n\n p1_01 = p1_01.astype(TEMP_VEC_DTYPE)\n p2_01 = p2_01.astype(TEMP_VEC_DTYPE)\n\n p1_256 = p1_01 * pseudo_max\n p2_256 = p2_01 * pseudo_max\n\n dist_sqrd_01 = vt.L2_sqrd(p1_01, p2_01)\n dist_sqrd_256 = vt.L2_sqrd(p1_256, p2_256)\n\n dist_01 = np.sqrt(dist_sqrd_01)\n dist_256 = np.sqrt(dist_sqrd_256)\n\n print('dist_sqrd_01 = %s' % (ub.repr2(dist_sqrd_01, precision=2),))\n print('dist_sqrd_256 = %s' % (ub.repr2(dist_sqrd_256, precision=2),))\n print('dist_01 = %s' % (ub.repr2(dist_01, precision=2),))\n print('dist_256 = %s' % (ub.repr2(dist_256, precision=2),))\n\n print('--')\n print('sqrt(2) = %f' % (np.sqrt(2)))\n print('--')\n\n assert np.all(dist_01 == vt.L2(p1_01, p2_01))\n assert np.all(dist_256 == vt.L2(p1_256, p2_256))\n\n const_sqrd = dist_sqrd_256 / dist_sqrd_01\n const = dist_256 / dist_01\n\n print('const = %r' % (const[0],))\n print('const_sqrd = %r' % (const_sqrd[0],))\n print('1 / const = %r' % (1 / const[0],))\n print('1 / const_sqrd = %r' % (1 / const_sqrd[0],))\n\n assert ub.allsame(const)\n assert ub.allsame(const_sqrd)\n\n assert np.all(const == np.sqrt(const_sqrd))\n\n # Assert that distance conversions work\n assert np.all(dist_256 / const == dist_01)\n assert np.all(dist_sqrd_256 / const_sqrd == dist_sqrd_01)\n print('Conversions work')\n\n print('Maximal L2 distance between any two NON-NEGATIVE L2-NORMALIZED'\n ' vectors should always be sqrt(2)')\n\n\ndef L2(hist1, hist2):\n \"\"\" returns L2 (aka euclidean or standard) distance between two histograms \"\"\"\n return np.sqrt(L2_sqrd(hist1, hist2))\n\n\ndef hist_isect(hist1, hist2):\n \"\"\" returns histogram intersection distance between two histograms \"\"\"\n numer = (np.dstack([hist1, hist2])).min(-1).sum(-1)\n denom = hist2.sum(-1)\n hisect_dist = 1 - (numer / denom)\n if len(hisect_dist) == 1:\n hisect_dist = hisect_dist[0]\n return hisect_dist\n\nVALID_DISTS = [\n 'L1',\n 'L2',\n 'L2_sift',\n 'L2_sqrd',\n 'bar_L2_sift',\n 'bar_cos_sift',\n 'cos_sift',\n 'det_distance',\n 'emd',\n 'hist_isect',\n 'nearest_point',\n 'ori_distance',\n]\n\n\ndef compute_distances(hist1, hist2, dist_list=['L1', 'L2']):\n r\"\"\"\n Args:\n hist1 (ndarray):\n hist2 (ndarray):\n dist_list (list): (default = ['L1', 'L2'])\n\n Returns:\n dict: dist_dict\n\n CommandLine:\n python -m vtool_ibeis.distance --test-compute_distances\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> hist1 = np.array([[1, 2], [2, 1], [0, 0]])\n >>> hist2 = np.array([[1, 2], [3, 1], [2, 2]])\n >>> dist_list = ['L1', 'L2']\n >>> dist_dict = compute_distances(hist1, hist2, dist_list)\n >>> result = ub.repr2(dist_dict, precision=3)\n >>> print(result)\n \"\"\"\n dtype_ = np.float64\n hist1 = np.array(hist1, dtype=dtype_)\n hist2 = np.array(hist2, dtype=dtype_)\n # TODO: enumerate value distances\n dist_funcs = [globals()[type_] for type_ in dist_list]\n val_list = [func(hist1, hist2) for func in dist_funcs]\n dist_dict = OrderedDict(list(zip(dist_list, val_list)))\n return dist_dict\n\n\ndef bar_L2_sift(hist1, hist2):\n \"\"\"\n Normalized SIFT L2\n\n Args:\n hist1 (ndarray): Nx128 array of uint8 with pseudomax trick\n hist2 (ndarray): Nx128 array of uint8 with pseudomax trick\n\n CommandLine:\n python -m vtool_ibeis.distance --test-bar_L2_sift\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> hist1, hist2 = testdata_hist()\n >>> barl2_dist = bar_L2_sift(hist1, hist2)\n >>> result = ub.repr2(barl2_dist, precision=2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n \"\"\"\n return 1.0 - L2_sift(hist1, hist2)\n\n\ndef L2_sift(hist1, hist2):\n \"\"\"\n Normalized SIFT L2\n\n Args:\n hist1 (ndarray): Nx128 array of uint8 with pseudomax trick\n hist2 (ndarray): Nx128 array of uint8 with pseudomax trick\n\n Returns:\n ndarray: euclidean distance between 0-1 normalized sift descriptors\n\n CommandLine:\n python -m vtool_ibeis.distance --test-L2_sift\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> hist1, hist2 = testdata_hist()\n >>> sift1, sift2, sift3, sift4, sift5 = testdata_sift2()\n >>> l2_dist = L2_sift(hist1, hist2)\n >>> max_dist = L2_sift(sift4, sift5)\n >>> assert np.isclose(max_dist, 1.0)\n >>> result = ub.repr2(l2_dist, precision=2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n \"\"\"\n # The corret number is 512, because thats what is used in siftdesc.cpp\n # remove the pseudo max hack\n psuedo_max = 512.0\n max_l2_dist = np.sqrt(2) # maximum L2 distance should always be sqrt 2\n sift1 = hist1.astype(TEMP_VEC_DTYPE) / psuedo_max\n sift2 = hist2.astype(TEMP_VEC_DTYPE) / psuedo_max\n l2_dist = L2(sift1, sift2)\n sift_dist = l2_dist / max_l2_dist\n return sift_dist\n\n\ndef L2_root_sift(hist1, hist2):\n \"\"\"\n Normalized Root-SIFT L2\n\n Args:\n hist1 (ndarray): Nx128 array of uint8 with pseudomax trick\n hist2 (ndarray): Nx128 array of uint8 with pseudomax trick\n\n Returns:\n ndarray: euclidean distance between 0-1 normalized sift descriptors\n \"\"\"\n # remove the pseudo max hack\n psuedo_max = 512.0\n max_root_l2_dist = 2 # This is a guess\n sift1 = hist1.astype(TEMP_VEC_DTYPE) / psuedo_max\n sift2 = hist2.astype(TEMP_VEC_DTYPE) / psuedo_max\n root_sift1 = np.sqrt(sift1)\n root_sift2 = np.sqrt(sift2)\n l2_dist = L2(root_sift1, root_sift2)\n # Usure if correct;\n l2_root_dist = l2_dist / max_root_l2_dist\n return l2_root_dist\n\n\ndef L2_sift_sqrd(hist1, hist2):\n \"\"\"\n Normalized SIFT L2**2\n\n Args:\n hist1 (ndarray): Nx128 array of uint8 with pseudomax trick\n hist2 (ndarray): Nx128 array of uint8 with pseudomax trick\n\n Returns:\n ndarray: squared euclidean distance between 0-1 normalized sift descriptors\n \"\"\"\n # remove the pseudo max hack\n psuedo_max = 512.0\n max_l2_dist_sqrd = 2\n sift1 = hist1.astype(TEMP_VEC_DTYPE) / psuedo_max\n sift2 = hist2.astype(TEMP_VEC_DTYPE) / psuedo_max\n l2_sqrd_dist = L2_sqrd(sift1, sift2)\n return l2_sqrd_dist / max_l2_dist_sqrd\n\n\ndef bar_cos_sift(hist1, hist2):\n \"\"\" 1 - cos dist \"\"\"\n return 1.0 - cos_sift(hist1, hist2)\n\n\ndef cos_sift(hist1, hist2):\n \"\"\"\n cos dist\n\n CommandLine:\n python -m vtool_ibeis.distance --test-cos_sift\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> hist1, hist2 = testdata_hist()\n >>> l2_dist = cos_sift(hist1, hist2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> result = ub.repr2(l2_dist, precision=2)\n >>> print(result)\n \"\"\"\n psuedo_max = 512.0\n sift1 = hist1.astype(TEMP_VEC_DTYPE) / psuedo_max\n sift2 = hist2.astype(TEMP_VEC_DTYPE) / psuedo_max\n return (sift1 * sift2).sum(-1)\n\n\ndef cosine_dist(hist1, hist2):\n return (hist1 * hist2).sum(-1)\n\n\ndef _assert_siftvec(sift):\n import vtool_ibeis as vt\n assert vt.check_sift_validity(sift)\n\n\ndef emd(hist1, hist2, cost_matrix='sift'):\n \"\"\"\n earth mover's distance by robjects(lpSovle::lp.transport)\n require: lpsolve55-5.5.0.9.win32-py2.7.exe\n\n CommandLine:\n python -m vtool_ibeis.distance --test-emd\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> hist1, hist2 = testdata_hist()\n >>> emd_dists = emd(hist1, hist2)\n >>> result = ub.repr2(emd_dists, precision=2)\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> print(result)\n np.array([ 2063.99, 2078.02, 2109.03, 2011.99, 2130.99, 2089.01,\n 2030.99, 2294.98, 2026.02, 2426.01])\n\n References:\n pip install pyemd\n https://github.com/andreasjansson/python-emd\n http://www.cs.huji.ac.il/~werman/Papers/ECCV2008.pdf\n http://stackoverflow.com/questions/15706339/compute-emd-2umpy-arrays-using-opencv\n http://www.cs.huji.ac.il/~ofirpele/FastEMD/code/\n http://www.cs.huji.ac.il/~ofirpele/publications/ECCV2008.pdf\n \"\"\"\n import pyemd\n if cost_matrix == 'sift':\n # Build cost matrix where bin-to-bin cost is 0,\n # neighbor cost is 1, and other cost is 2\n N = 8\n cost_matrix = np.full((128, 128), 2)\n i, j = np.meshgrid(np.arange(128), np.arange(128))\n cost_matrix[i == j] = 0\n absdiff = np.abs(i - j)\n is_neighbor = np.abs(np.minimum(absdiff, N - absdiff)) == 1\n cost_matrix[is_neighbor] = 1.0\n #print(cost_matrix[0:16, 0:16])\n\n if len(hist1.shape) == 2:\n dist = np.array([\n pyemd.emd(hist1_.astype(np.float), hist2_.astype(np.float), cost_matrix)\n for hist1_, hist2_ in zip(hist1, hist2)])\n else:\n dist = pyemd.emd(hist1.astype(np.float), hist2.astype(np.float), cost_matrix)\n return dist\n\n\ndef nearest_point(x, y, pts, conflict_mode='next', __next_counter=[0]):\n \"\"\" finds the nearest point(s) in pts to (x, y)\n\n TODO: depricate\n \"\"\"\n #with ut.embed_on_exception_context:\n dists = (pts.T[0] - x) ** 2 + (pts.T[1] - y) ** 2\n fx = dists.argmin()\n mindist = dists[fx]\n other_fx = np.where(mindist == dists)[0]\n if len(other_fx) > 0:\n if conflict_mode == 'random':\n np.random.shuffle(other_fx)\n fx = other_fx[0]\n elif conflict_mode == 'next':\n __next_counter[0] += 1\n idx = __next_counter[0] % len(other_fx)\n fx = other_fx[idx]\n elif conflict_mode == 'all':\n fx = other_fx\n elif conflict_mode == 'first':\n fx = fx\n else:\n raise AssertionError('unknown conflict_mode=%r' % (conflict_mode,))\n return fx, mindist\n\n\ndef closest_point(pt, pt_arr, distfunc=L2_sqrd):\n \"\"\" finds the nearest point(s) in pts to (x, y)\n pt = np.array([1])\n pt_arr = np.array([1.1, 2, .95, 20])[:, None]\n distfunc = vt.L2_sqrd\n \"\"\"\n #import vtool_ibeis as vt\n assert len(pt_arr) > 0\n dists = distfunc(pt, pt_arr)\n xlist = dists.argsort()\n if len(xlist) > 1:\n if dists[xlist[0]] == dists[xlist[1]]:\n print('conflict')\n index = xlist[0]\n dist = dists[index]\n return index, dist\n\n\ndef haversine(latlon1, latlon2):\n r\"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n\n Args:\n latlon1 (ndarray):\n latlon2 (ndarray):\n\n References:\n en.wikipedia.org/wiki/Haversine_formula\n gis.stackexchange.com/questions/81551/matching-gps-tracks\n stackoverflow.com/questions/4913349/haversine-distance-gps-points\n\n CommandLine:\n python -m vtool_ibeis.distance --exec-haversine\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> import scipy.spatial.distance as spdist\n >>> import vtool_ibeis as vt\n >>> import functools\n >>> gpsarr_track_list_ = [\n ... np.array([[ -80.21895315, -158.81099213],\n ... [ -12.08338926, 67.50368014],\n ... [ -11.08338926, 67.50368014],\n ... [ -11.08338926, 67.50368014],]\n ... ),\n ... np.array([[ 9.77816711, -17.27471498],\n ... [ -51.67678814, -158.91065495],])\n ... ]\n >>> latlon1 = gpsarr_track_list_[0][0]\n >>> latlon2 = gpsarr_track_list_[0][1]\n >>> kilometers = vt.haversine(latlon1, latlon2)\n >>> haversin_pdist = functools.partial(spdist.pdist, metric=vt.haversine)\n >>> dist_vector_list = list(map(haversin_pdist, gpsarr_track_list_))\n >>> dist_matrix_list = list(map(spdist.squareform, dist_vector_list))\n >>> #xdoctest: +IGNORE_WHITESPACE\n >>> result = ('dist_matrix_list = %s' % (ut.repr3(dist_matrix_list, precision=2, with_dtype=True),))\n >>> print(result)\n \"\"\"\n # FIXME; lat, lon should be different columns not different rows\n # convert decimal degrees to radians\n lat1, lon1 = np.radians(latlon1)\n lat2, lon2 = np.radians(latlon2)\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = (np.sin(dlat / 2) ** 2) + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon / 2) ** 2)\n c = 2 * np.arcsin(np.sqrt(a))\n EARTH_RADIUS_KM = 6367.0\n kilometers = EARTH_RADIUS_KM * c\n return kilometers\n\n\ndef safe_pdist(arr, *args, **kwargs):\n \"\"\"\n Kwargs:\n metric = ut.absdiff\n\n SeeAlso:\n scipy.spatial.distance.pdist\n \"\"\"\n if arr is None or len(arr) < 2:\n return None\n else:\n if len(arr.shape) == 1:\n return spdist.pdist(arr[:, None], *args, **kwargs)\n else:\n return spdist.pdist(arr, *args, **kwargs)\n\n\ndef pdist_indicies(num):\n return list(itertools.combinations(range(num), 2))\n\n\ndef pdist_argsort(x):\n \"\"\"\n Sorts 2d indicies by their distnace matrix output from scipy.spatial.distance\n x = np.array([ 3.05555556e-03, 1.47619797e+04, 1.47619828e+04])\n\n Args:\n x (ndarray):\n\n Returns:\n ndarray: sortx_2d\n\n CommandLine:\n python -m vtool_ibeis.distance --test-pdist_argsort\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.distance import * # NOQA\n >>> x = np.array([ 21695.78, 10943.76, 10941.44, 25867.64, 10752.03,\n >>> 10754.35, 4171.86, 2.32, 14923.89, 14926.2 ],\n >>> dtype=np.float64)\n >>> sortx_2d = pdist_argsort(x)\n >>> result = ('sortx_2d = %s' % (str(sortx_2d),))\n >>> print(result)\n sortx_2d = [(2, 3), (1, 4), (1, 2), (1, 3), (0, 3), (0, 2), (2, 4), (3, 4), (0, 1), (0, 4)]\n \"\"\"\n OLD = True\n #compare_idxs = [(r, c) for r, c in itertools.product(range(len(x) / 2),\n #range(len(x) / 2)) if (c > r)]\n if OLD:\n mat = spdist.squareform(x)\n matu = np.triu(mat)\n sortx_row, sortx_col = np.unravel_index(matu.ravel().argsort(), matu.shape)\n # only take where col is larger than row due to upper triu\n sortx_2d = [(r, c) for r, c in zip(sortx_row, sortx_col) if (c > r)]\n else:\n num_rows = len(x) // 2\n compare_idxs = ut.flatten([[(r, c) for c in range(r + 1, num_rows)]\n for r in range(num_rows)])\n sortx = x.argsort()\n sortx_2d = ut.take(compare_idxs, sortx)\n return sortx_2d\n\n\nif __name__ == '__main__':\n r\"\"\"\n CommandLine:\n python -m vtool_ibeis.distance all\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n",
"from __future__ import absolute_import, division, print_function\nfrom six.moves import range, zip # NOQA\nimport numpy as np\nimport cv2\nimport utool as ut\nimport ubelt as ub\n\nDEBUG_SEGM = False\n\n\ndef printDBG(msg):\n if DEBUG_SEGM:\n print(msg)\n pass\n\n\ndef resize_img_and_bbox(img_fpath, bbox_, new_size=None, sqrt_area=400.0):\n printDBG('[segm] imread(%r) ' % img_fpath)\n full_img = cv2.imread(img_fpath)\n (full_h, full_w) = full_img.shape[:2] # Image Shape\n printDBG('[segm] full_img.shape=%r' % (full_img.shape,))\n (rw_, rh_) = bbox_[2:]\n # Ensure that we know the new chip size\n if new_size is None:\n target_area = float(sqrt_area) ** 2\n\n def _resz(w, h):\n ht = np.sqrt(target_area * h / w)\n wt = w * ht / h\n return (int(round(wt)), int(round(ht)))\n new_size_ = _resz(rw_, rh_)\n else:\n new_size_ = new_size\n # Get Scale Factors\n fx = new_size_[0] / rw_\n fy = new_size_[1] / rh_\n printDBG('[segm] fx=%r fy=%r' % (fx, fy))\n dsize = (int(round(fx * full_w)), int(round(fy * full_h)))\n printDBG('[segm] dsize=%r' % (dsize,))\n # Resize the image\n img_resz = cv2.resize(full_img, dsize, interpolation=cv2.INTER_LANCZOS4)\n # Get new ANNOTATION in resized image\n bbox_resz = np.array(np.round(bbox_ * fx), dtype=np.int64)\n return img_resz, bbox_resz\n\n\ndef clean_mask(mask, num_dilate=3, num_erode=3, window_frac=.025):\n \"\"\"\n Clean the mask\n (num_erode, num_dilate) = (1, 1)\n (w, h) = (10, 10)\n \"\"\"\n w = h = int(round(min(mask.shape) * window_frac))\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (w, h))\n _mask = mask\n # compute the closing\n for ix in range(num_dilate):\n _mask = cv2.dilate(_mask, element)\n for ix in range(num_erode):\n _mask = cv2.erode(_mask, element)\n return _mask\n\n\ndef fill_holes(mask):\n mode = cv2.RETR_CCOMP\n method = cv2.CHAIN_APPROX_SIMPLE\n image, contours, hierarchy = cv2.findContours(mask, mode, method)\n out = cv2.drawContours(image, contours, -1, (1, 0, 0))\n return out\n\n\ndef demo_grabcut(bgr_img):\n r\"\"\"\n Args:\n img (ndarray[uint8_t, ndim=2]): image data\n\n CommandLine:\n python -m vtool_ibeis.segmentation --test-demo_grabcut --show\n\n SeeAlso:\n python -m ibeis.algo.preproc.preproc_probchip --test-postprocess_dev\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from vtool_ibeis.segmentation import * # NOQA\n >>> # build test data\n >>> import utool as ut\n >>> import plottool_ibeis as pt\n >>> import vtool_ibeis as vt\n >>> img_fpath = ut.grab_test_imgpath('easy1.png')\n >>> bgr_img = vt.imread(img_fpath)\n >>> # execute function\n >>> print(bgr_img.shape)\n >>> result = demo_grabcut(bgr_img)\n >>> # verify results\n >>> print(result)\n >>> ## xdoctest: +REQUIRES(--show)\n >>> pt.show_if_requested()\n \"\"\"\n import plottool_ibeis as pt\n from plottool_ibeis import interact_impaint\n label_colors = [ 255, 170, 50, 0]\n label_values = [cv2.GC_FGD, cv2.GC_PR_FGD, cv2.GC_PR_BGD, cv2.GC_BGD]\n h, w = bgr_img.shape[0:2]\n init_mask = np.zeros((h, w), dtype=np.float32) # Initialize: mask\n # Set inside to cv2.GC_PR_FGD (probably forground)\n init_mask[ :, :] = label_colors[label_values.index(cv2.GC_PR_BGD)]\n # Set border to cv2.GC_BGD (definitely background)\n init_mask[ 0, :] = label_colors[label_values.index(cv2.GC_BGD)]\n init_mask[-1, :] = label_colors[label_values.index(cv2.GC_BGD)]\n init_mask[:, 0] = label_colors[label_values.index(cv2.GC_BGD)]\n init_mask[:, -1] = label_colors[label_values.index(cv2.GC_BGD)]\n #import vtool_ibeis as vt\n cached_mask_fpath = 'tmp_mask.png'\n if ub.argflag('--nocache'):\n ut.delete(cached_mask_fpath)\n print('unique init mask colors')\n print(np.unique(init_mask))\n custom_mask = interact_impaint.cached_impaint(bgr_img, cached_mask_fpath,\n label_colors=label_colors,\n init_mask=init_mask)\n print('unique custom mask colors')\n print(np.unique(custom_mask))\n print('delete tmp_mask.png to redo')\n #if ut.checkpath(cached_mask_fpath):\n # custom_mask = vt.imread(cached_mask_fpath, grayscale=True)\n #else:\n # custom_mask = interact_impaint.impaint_mask(bgr_img, label_colors, init_mask=init_mask)\n # vt.imwrite(cached_mask_fpath, custom_mask)\n\n prior_mask = custom_mask.copy()\n\n # Convert colors to out labels\n label_locs = [custom_mask == color for color in label_colors]\n # Put user labels in there\n for label_loc, value in zip(label_locs, label_values):\n prior_mask[label_loc] = value\n prior_mask = prior_mask.astype(np.uint8)\n print('running grabcut')\n #print('prior_mask.dtype = %r' % (prior_mask.dtype,))\n #print('bgr_img.dtype = %r' % (bgr_img.dtype,))\n with ut.Timer('grabcut'):\n post_mask = grabcut(bgr_img, prior_mask)\n if post_mask.dtype == np.uint8:\n post_mask = post_mask.astype(np.float) / 255.0\n seg_chip = mask_colored_img(bgr_img, post_mask, 'bgr')\n print('finished running grabcut')\n pt.imshow(post_mask * 255, pnum=(1, 2, 1))\n pt.imshow(seg_chip, pnum=(1, 2, 2))\n\n\ndef grabcut(bgr_img, prior_mask, binary=True):\n \"\"\"\n Referencs:\n http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html\n \"\"\"\n # Grab Cut Parameters\n (h, w) = bgr_img.shape[0:2]\n rect = (0, 0, w, h)\n num_iters = 5\n mode = cv2.GC_INIT_WITH_MASK\n bgd_model = np.zeros((1, 13 * 5), np.float64)\n fgd_model = np.zeros((1, 13 * 5), np.float64)\n # Grab Cut Execution\n post_mask = prior_mask.copy()\n cv2.grabCut(bgr_img, post_mask, rect, bgd_model, fgd_model, num_iters, mode=mode)\n if binary:\n is_forground = (post_mask == cv2.GC_FGD) + (post_mask == cv2.GC_PR_FGD)\n post_mask = np.where(is_forground, 255, 0).astype('uint8')\n else:\n label_colors = [ 255, 170, 50, 0]\n label_values = [cv2.GC_FGD, cv2.GC_PR_FGD, cv2.GC_PR_BGD, cv2.GC_BGD]\n pos_list = [post_mask == value for value in label_values]\n for pos, color in zip(pos_list, label_colors):\n post_mask[pos] = color\n return post_mask\n\n\ninto_hsv_flags = {\n 'bgr': cv2.COLOR_BGR2HSV,\n 'rgb': cv2.COLOR_RGB2HSV,\n}\n\nfrom_hsv_flags = {\n 'bgr': cv2.COLOR_HSV2BGR,\n}\n\n\ndef mask_colored_img(img_rgb, mask, encoding='bgr'):\n if mask.dtype == np.uint8:\n mask = mask.astype(np.float) / 255.0\n into_hsv_flag = into_hsv_flags[encoding]\n from_hsv_flag = from_hsv_flags[encoding]\n # Mask out value component\n img_hsv = cv2.cvtColor(img_rgb, into_hsv_flag)\n img_hsv = np.array(img_hsv, dtype=np.float) / 255.0\n VAL_INDEX = 2\n img_hsv[:, :, VAL_INDEX] *= mask\n img_hsv = np.array(np.round(img_hsv * 255.0), dtype=np.uint8)\n masked_img_rgb = cv2.cvtColor(img_hsv, from_hsv_flag)\n return masked_img_rgb\n\n\n# Open CV relevant values:\n# grabcut_mode = cv2.GC_EVAL\n# grabcut_mode = cv2.GC_INIT_WITH_RECT\n# cv2.GC_BGD, cv2.GC_PR_BGD, cv2.GC_PR_FGD, cv2.GC_FGD\n#@profile\ndef grabcut2(rgb_chip):\n (h, w) = rgb_chip.shape[0:2]\n _mask = np.zeros((h, w), dtype=np.uint8) # Initialize: mask\n # Set inside to cv2.GC_PR_FGD (probably forground)\n _mask[ :, :] = cv2.GC_PR_FGD\n # Set border to cv2.GC_BGD (definitely background)\n _mask[ 0, :] = cv2.GC_BGD\n _mask[-1, :] = cv2.GC_BGD\n _mask[:, 0] = cv2.GC_BGD\n _mask[:, -1] = cv2.GC_BGD\n # Grab Cut Parameters\n rect = (0, 0, w, h)\n num_iters = 5\n mode = cv2.GC_INIT_WITH_MASK\n bgd_model = np.zeros((1, 13 * 5), np.float64)\n fgd_model = np.zeros((1, 13 * 5), np.float64)\n # Grab Cut Execution\n cv2.grabCut(rgb_chip, _mask, rect, bgd_model, fgd_model, num_iters, mode=mode)\n is_forground = (_mask == cv2.GC_FGD) + (_mask == cv2.GC_PR_FGD)\n chip_mask = np.where(is_forground, 255, 0).astype('uint8')\n # Crop\n chip_mask = clean_mask(chip_mask)\n chip_mask = np.array(chip_mask, np.float) / 255.0\n # Mask value component of HSV space\n seg_chip = mask_colored_img(rgb_chip, chip_mask, 'rgb')\n return seg_chip\n\n\ndef segment(img_fpath, bbox_, new_size=None):\n \"\"\" Runs grabcut \"\"\"\n printDBG('[segm] segment(img_fpath=%r, bbox=%r)>' % (img_fpath, bbox_))\n num_iters = 5\n bgd_model = np.zeros((1, 13 * 5), np.float64)\n fgd_model = np.zeros((1, 13 * 5), np.float64)\n mode = cv2.GC_INIT_WITH_MASK\n # Initialize\n # !!! CV2 READS (H,W) !!!\n # WH Unsafe\n img_resz, bbox_resz = resize_img_and_bbox(img_fpath, bbox_, new_size=new_size)\n # WH Unsafe\n (img_h, img_w) = img_resz.shape[:2] # Image Shape\n printDBG(' * img_resz.shape=%r' % ((img_h, img_w),))\n # WH Safe\n tlbr = ut.xywh_to_tlbr(bbox_resz, (img_w, img_h)) # Rectangle ANNOTATION\n (x1, y1, x2, y2) = tlbr\n rect = tuple(bbox_resz) # Initialize: rect\n printDBG(' * rect=%r' % (rect,))\n printDBG(' * tlbr=%r' % (tlbr,))\n # WH Unsafe\n _mask = np.zeros((img_h, img_w), dtype=np.uint8) # Initialize: mask\n _mask[y1:y2, x1:x2] = cv2.GC_PR_FGD # Set ANNOTATION to cv2.GC_PR_FGD\n # Grab Cut\n tt = ut.Timer(' * cv2.grabCut()', verbose=DEBUG_SEGM)\n cv2.grabCut(img_resz, _mask, rect, bgd_model, fgd_model, num_iters, mode=mode)\n tt.toc()\n img_mask = np.where((_mask == cv2.GC_FGD) + (_mask == cv2.GC_PR_FGD), 255, 0).astype('uint8')\n # Crop\n chip = img_resz[y1:y2, x1:x2]\n chip_mask = img_mask[y1:y2, x1:x2]\n chip_mask = clean_mask(chip_mask)\n chip_mask = np.array(chip_mask, np.float) / 255.0\n # Mask the value of HSV\n chip_hsv = cv2.cvtColor(chip, cv2.COLOR_RGB2HSV)\n chip_hsv = np.array(chip_hsv, dtype=np.float) / 255.0\n chip_hsv[:, :, 2] *= chip_mask\n chip_hsv = np.array(np.round(chip_hsv * 255.0), dtype=np.uint8)\n seg_chip = cv2.cvtColor(chip_hsv, cv2.COLOR_HSV2RGB)\n return seg_chip, img_mask\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n xdoctest -m vtool_ibeis.segmentation\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.maximum",
"numpy.allclose",
"numpy.multiply",
"matplotlib.pyplot.subplot",
"numpy.maximum.reduce",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.take",
"numpy.sqrt",
"numpy.asarray",
"numpy.nan_to_num",
"numpy.dtype",
"numpy.concatenate",
"numpy.all",
"numpy.round",
"numpy.where",
"numpy.divide",
"numpy.hstack",
"numpy.unique",
"numpy.clip",
"numpy.arange",
"numpy.subtract",
"numpy.logical_and.reduce",
"numpy.lexsort",
"numpy.flatnonzero",
"numpy.full",
"numpy.intersect1d",
"numpy.logical_or.reduce",
"numpy.asanyarray",
"numpy.argmax",
"numpy.ceil",
"numpy.outer",
"numpy.isclose",
"numpy.logical_not",
"numpy.multiply",
"numpy.isnan",
"numpy.ascontiguousarray",
"numpy.median",
"numpy.not_equal",
"numpy.fromiter",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.multiply.reduce",
"scipy.stats.norm.interval",
"numpy.isfinite",
"numpy.rec.fromarrays",
"numpy.linalg.norm",
"numpy.compress",
"numpy.tile",
"numpy.empty",
"numpy.random.shuffle",
"numpy.shape",
"numpy.prod",
"numpy.vstack"
],
[
"numpy.radians",
"numpy.minimum",
"numpy.sqrt",
"numpy.asarray",
"numpy.all",
"scipy.spatial.distance.squareform",
"numpy.where",
"numpy.clip",
"numpy.arange",
"numpy.subtract",
"numpy.full",
"numpy.sin",
"numpy.triu",
"numpy.reciprocal",
"numpy.zeros",
"numpy.array",
"numpy.random.RandomState",
"numpy.abs",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.ones",
"numpy.cos",
"numpy.dstack",
"scipy.spatial.distance.pdist",
"numpy.mod"
],
[
"numpy.sqrt",
"numpy.unique",
"numpy.round",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.