repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
PlatterDataset/feature | [
"2ebdc1b28498b709a0c91e60c19bfc731006bc50"
] | [
"synchronization/SyncNetInstance.py"
] | [
"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n# Video 25 FPS, Audio 16000HZ\n\nimport torch\nimport numpy\nimport time, pdb, argparse, subprocess, os, math, glob\nimport cv2\nimport python_speech_features\n\nfrom scipy import signal\nfrom scipy.io import wavfile\nfrom SyncNetModel import *\nfrom shutil import rmtree\n\n\n# ==================== Get OFFSET ====================\ndef get_median(data1):\n data = sorted(data1)\n size = len(data)\n if size % 2 == 0: # 判断列表长度为偶数\n median = (data[size//2]+data[size//2-1])/2\n data[0] = median\n if size % 2 == 1: # 判断列表长度为奇数\n median = data[(size-1)//2]\n data[0] = median\n return data[0]\n\n\ndef calc_pdist(feat1, feat2, vshift=40):\n \n win_size = vshift*2+1\n\n feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))\n\n dists = []\n\n for i in range(0,len(feat1)):\n\n dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))\n\n return dists\n\n# ==================== MAIN DEF ====================\n\nclass SyncNetInstance(torch.nn.Module):\n\n def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):\n super(SyncNetInstance, self).__init__();\n\n self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();\n\n def evaluate(self, opt, videofile, num):\n\n self.__S__.eval();\n\n # ========== ==========\n # Convert files\n # ========== ==========\n\n if os.path.exists(os.path.join(opt.tmp_dir,opt.reference)):\n rmtree(os.path.join(opt.tmp_dir,opt.reference))\n\n os.makedirs(os.path.join(opt.tmp_dir,opt.reference))\n\n command = (\"ffmpeg -y -i %s -threads 1 -f image2 %s\" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'%06d.jpg'))) \n output = subprocess.call(command, shell=True, stdout=None)\n\n command = (\"ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s\" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))) \n output = subprocess.call(command, shell=True, stdout=None)\n \n # ========== ==========\n # Load video \n # ========== ==========\n\n images = []\n \n flist = glob.glob(os.path.join(opt.tmp_dir,opt.reference,'*.jpg'))\n flist.sort()\n\n for fname in flist:\n images.append(cv2.imread(fname))\n\n im = numpy.stack(images,axis=3)\n im = numpy.expand_dims(im,axis=0)\n im = numpy.transpose(im,(0,3,4,1,2))\n\n imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())\n\n # ========== ==========\n # Load audio\n # ========== ==========\n\n sample_rate, audio = wavfile.read(os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))\n mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))\n mfcc = numpy.stack([numpy.array(i) for i in mfcc])\n torch.save(mfcc,'./mfcc_saver/mfcc'+str(num)+'.pt')\n ww = open('./mfcc_saver/mfcc'+str(num)+'.txt','w')\n ww.write(str(mfcc))\n cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)\n cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())\n\n # ========== ==========\n # Check audio and video input length\n # ========== ==========\n\n if (float(len(audio))/16000) != (float(len(images))/25) :\n print(\"WARNING: Audio (%.4fs) and video (%.4fs) lengths are different.\"%(float(len(audio))/16000,float(len(images))/25))\n\n min_length = min(len(images),math.floor(len(audio)/640))\n \n # ========== ==========\n # Generate video and audio feats\n # ========== ==========\n\n lastframe = min_length-5\n im_feat = []\n cc_feat = []\n wr = open('./'+str(opt.reference)+'_'+str(num)+'_resultoff.txt','w')\n tS = time.time()\n for i in range(0,lastframe,opt.batch_size):\n \n im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]\n im_in = torch.cat(im_batch,0)\n im_out = self.__S__.forward_lip(im_in.cuda());\n im_feat.append(im_out.data.cpu())\n\n cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]\n cc_in = torch.cat(cc_batch,0)\n cc_out = self.__S__.forward_aud(cc_in.cuda())\n cc_feat.append(cc_out.data.cpu())\n\n im_feat = torch.cat(im_feat,0)\n cc_feat = torch.cat(cc_feat,0)\n\n # ========== ==========\n # Compute offset\n # ========== ==========\n \n print('Compute time %.3f sec.' % (time.time()-tS))\n\n dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)\n mdist = torch.mean(torch.stack(dists,1),1)\n off = []\n avg_dist = []\n\n for t in range(0,len(im_feat)):\n tt = 10000\n offy = 0\n of = 0\n of_m = 0\n dis_mid = 0\n dis_min = 1000000000\n for k in range(0,len(dists[t])):\n if t == 0:\n avg_dist.append(dists[t][k])\n else:\n avg_dist[k] += dists[t][k]\n\n if (t+1)% 100 == 0 or t == len(im_feat)-1:\n if avg_dist[k] < dis_min:\n dis_min = avg_dist[k]\n of = k\n\n if dists[t][k]<tt:\n tt = dists[t][k]\n offy = k\n if (t+1)%100 == 0 or t == len(im_feat) -1:\n dis_mid = get_median(avg_dist)\n for k in range(len(avg_dist)):\n avg_dist[k] = 0\n wr.write(str(t%100)+' ')\n wr.write(str((opt.vshift-of) * 0.04)+'s ')\n if (t+1)%100 != 0:\n wr.write(\"conf = \"+str((dis_mid.item()-dis_min.item())/((t+1)%100))+'\\n')#confidence改成medium\n else:\n wr.write(\"conf = \"+str((dis_mid.item()-dis_min.item())/100)+'\\n')\n off.append(opt.vshift-offy)\n off = numpy.array(off)\n\n minval, minidx = torch.min(mdist,0)\n\n offset = opt.vshift-minidx\n conf = torch.median(mdist) - minval\n\n fdist = numpy.stack([dist[minidx].numpy() for dist in dists])\n # fdist = numpy.pad(fdist, (3,3), 'constant', constant_values=15)\n fconf = torch.median(mdist).numpy() - fdist\n fconfm = signal.medfilt(fconf,kernel_size=9)\n \n numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n print('Framewise conf: ')\n print(fconfm)\n print('AV offset: \\t%d \\nMin dist: \\t%.3f\\nConfidence: \\t%.3f' % (offset,minval,conf))\n\n dists_npy = numpy.array([ dist.numpy() for dist in dists ])\n return off, conf.numpy(), dists_npy\n\n def extract_feature(self, opt, videofile):\n\n self.__S__.eval();\n \n # ========== ==========\n # Load video \n # ========== ==========\n cap = cv2.VideoCapture(videofile)\n\n frame_num = 1;\n images = []\n while frame_num:\n frame_num += 1\n ret, image = cap.read()\n if ret == 0:\n break\n\n images.append(image)\n\n im = numpy.stack(images,axis=3)\n im = numpy.expand_dims(im,axis=0)\n im = numpy.transpose(im,(0,3,4,1,2))\n\n imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())\n \n # ========== ==========\n # Generate video feats\n # ========== ==========\n\n lastframe = len(images)-4\n im_feat = []\n\n tS = time.time()\n for i in range(0,lastframe,opt.batch_size):\n \n im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]\n im_in = torch.cat(im_batch,0)\n im_out = self.__S__.forward_lipfeat(im_in.cuda());\n im_feat.append(im_out.data.cpu())\n\n im_feat = torch.cat(im_feat,0)\n\n # ========== ==========\n # Compute offset\n # ========== ==========\n \n print('Compute time %.3f sec.' % (time.time()-tS))\n\n return im_feat\n\n\n def loadParameters(self, path):\n loaded_state = torch.load(path, map_location=lambda storage, loc: storage);\n\n self_state = self.__S__.state_dict();\n\n for name, param in loaded_state.items():\n\n self_state[name].copy_(param);\n"
] | [
[
"torch.min",
"torch.stack",
"numpy.transpose",
"numpy.array",
"scipy.signal.medfilt",
"torch.load",
"torch.median",
"torch.nn.functional.pad",
"numpy.set_printoptions",
"numpy.expand_dims",
"numpy.stack",
"torch.cat"
]
] |
googleinterns/deepspeech-reconstruction | [
"72f28d1e9064d221b3421c302a8725a8c71859ee"
] | [
"src/deepspeech_training/util/config.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport tensorflow.compat.v1 as tfv1\n\nfrom attrdict import AttrDict\nfrom xdg import BaseDirectory as xdg\n\nfrom src.flags import FLAGS\nfrom .gpu import get_available_gpus\nfrom .logging import log_error\nfrom .text import Alphabet, UTF8Alphabet\nfrom .helpers import parse_file_size\n\nclass ConfigSingleton:\n _config = None\n\n def __getattr__(self, name):\n if not ConfigSingleton._config:\n raise RuntimeError(\"Global configuration not yet initialized.\")\n if not hasattr(ConfigSingleton._config, name):\n raise RuntimeError(\"Configuration option {} not found in config.\".format(name))\n return ConfigSingleton._config[name]\n\n\nConfig = ConfigSingleton() # pylint: disable=invalid-name\n\ndef initialize_globals():\n c = AttrDict()\n\n # Read-buffer\n FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)\n\n # Set default dropout rates\n if FLAGS.dropout_rate2 < 0:\n FLAGS.dropout_rate2 = FLAGS.dropout_rate\n if FLAGS.dropout_rate3 < 0:\n FLAGS.dropout_rate3 = FLAGS.dropout_rate\n if FLAGS.dropout_rate6 < 0:\n FLAGS.dropout_rate6 = FLAGS.dropout_rate\n\n # Set default checkpoint dir\n if not FLAGS.checkpoint_dir:\n FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))\n\n if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:\n FLAGS.load_train = 'auto'\n\n if FLAGS.load_evaluate not in ['last', 'best', 'auto']:\n FLAGS.load_evaluate = 'auto'\n\n # Set default summary dir\n if not FLAGS.summary_dir:\n FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))\n\n # Standard session configuration that'll be used for all new sessions.\n c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))\n\n # CPU device\n c.cpu_device = '/cpu:0'\n\n # Available GPU devices\n c.available_devices = get_available_gpus(c.session_config)\n\n # If there is no GPU available, we fall back to CPU based operation\n if not c.available_devices:\n c.available_devices = [c.cpu_device]\n\n if FLAGS.utf8:\n c.alphabet = UTF8Alphabet()\n else:\n c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))\n\n # Geometric Constants\n # ===================\n\n # For an explanation of the meaning of the geometric constants, please refer to\n # doc/Geometry.md\n\n # Number of MFCC features\n c.n_input = 26 # TODO: Determine this programmatically from the sample rate\n\n # The number of frames in the context\n c.n_context = 9 # TODO: Determine the optimal value using a validation data set\n\n # Number of units in hidden layers\n c.n_hidden = FLAGS.n_hidden\n\n c.n_hidden_1 = c.n_hidden\n\n c.n_hidden_2 = c.n_hidden\n\n c.n_hidden_5 = c.n_hidden\n\n # LSTM cell state dimension\n c.n_cell_dim = c.n_hidden\n\n # The number of units in the third layer, which feeds in to the LSTM\n c.n_hidden_3 = c.n_cell_dim\n\n # Units in the sixth layer = number of characters in the target language plus one\n c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label\n\n # Size of audio window in samples\n if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:\n log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '\n 'by --audio_sample_rate value ({}) must be an integer value. Adjust '\n 'your --feature_win_len value or resample your audio accordingly.'\n ''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))\n sys.exit(1)\n\n c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)\n\n # Stride for feature computations in samples\n if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:\n log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '\n 'by --audio_sample_rate value ({}) must be an integer value. Adjust '\n 'your --feature_win_step value or resample your audio accordingly.'\n ''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))\n sys.exit(1)\n\n c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)\n\n if FLAGS.one_shot_infer:\n if not os.path.exists(FLAGS.one_shot_infer):\n log_error('Path specified in --one_shot_infer is not a valid file.')\n sys.exit(1)\n\n if FLAGS.train_cudnn and FLAGS.load_cudnn:\n log_error('Trying to use --train_cudnn, but --load_cudnn '\n 'was also specified. The --load_cudnn flag is only '\n 'needed when converting a CuDNN RNN checkpoint to '\n 'a CPU-capable graph. If your system is capable of '\n 'using CuDNN RNN, you can just specify the CuDNN RNN '\n 'checkpoint normally with --save_checkpoint_dir.')\n sys.exit(1)\n\n # If separate save and load flags were not specified, default to load and save\n # from the same dir.\n if not FLAGS.save_checkpoint_dir:\n FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir\n\n if not FLAGS.load_checkpoint_dir:\n FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir\n\n ConfigSingleton._config = c # pylint: disable=protected-access\n"
] | [
[
"tensorflow.compat.v1.GPUOptions"
]
] |
Jasonkks/mlcnet | [
"8f89c860c709733c8baa663607004fc48d76291d"
] | [
"pcdet/datasets/augmentor/data_augmentor.py"
] | [
"from functools import partial\nimport torch\nimport random\nimport numpy as np\nfrom ...ops.roiaware_pool3d import roiaware_pool3d_utils\nfrom ...utils import common_utils, box_utils\nfrom . import augmentor_utils, database_sampler\n\n\nclass DataAugmentor(object):\n def __init__(self, root_path, augmentor_configs, class_names, logger=None):\n self.root_path = root_path\n self.class_names = class_names\n self.logger = logger\n\n self.data_augmentor_queue = []\n aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \\\n else augmentor_configs.AUG_CONFIG_LIST\n\n for cur_cfg in aug_config_list:\n if not isinstance(augmentor_configs, list):\n if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:\n continue\n cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)\n self.data_augmentor_queue.append(cur_augmentor)\n\n def gt_sampling(self, config=None):\n db_sampler = database_sampler.DataBaseSampler(\n root_path=self.root_path,\n sampler_cfg=config,\n class_names=self.class_names,\n logger=self.logger\n )\n return db_sampler\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['logger']\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def object_size_normalization(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.object_size_normalization, config=config)\n \n gt_boxes, points = data_dict['gt_boxes'], data_dict['points']\n if gt_boxes.shape[1] > 7:\n gt_boxes = gt_boxes[:,:7]\n offset = np.array(config['OFFSET'])\n # get masks of points inside boxes\n point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(\n torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()\n\n num_obj = gt_boxes.shape[0]\n obj_points_list = []\n gt_boxes_size = gt_boxes[:, 3:6]\n new_gt_boxes_size = gt_boxes_size + offset\n scale_factor = new_gt_boxes_size / gt_boxes_size\n # scale the objects\n for i in range(num_obj):\n point_mask = point_masks[i]\n obj_points = points[point_mask > 0] # get object points within the gt box\n obj_points[:, :3] -= gt_boxes[i, :3] # relative to box center\n obj_points[:, :3] *= scale_factor[i] # scale\n obj_points[:, :3] += gt_boxes[i, :3] # back to global coordinate\n obj_points_list.append(obj_points)\n\n # remove points inside boxes\n points = box_utils.remove_points_in_boxes3d(points, gt_boxes)\n # scale the boxes\n gt_boxes[:, 3:6] *= scale_factor\n # remove points inside boxes\n points = box_utils.remove_points_in_boxes3d(points, gt_boxes)\n\n # merge points\n # points = box_utils.remove_points_in_boxes3d(points, gt_boxes)\n obj_points = np.concatenate(obj_points_list, axis=0)\n points = np.concatenate([points, obj_points], axis=0)\n\n data_dict['points'] = points\n data_dict['gt_boxes'][:,:7] = gt_boxes\n return data_dict\n \n def random_world_flip(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_flip, config=config)\n \n gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None\n points = data_dict['points']\n\n for cur_axis in config['ALONG_AXIS_LIST']:\n assert cur_axis in ['x', 'y']\n if 'gt_boxes' in data_dict:\n gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(\n gt_boxes, points, return_enable=True\n )\n else:\n points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(\n points, return_enable=True\n )\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'] = gt_boxes\n data_dict['points'] = points\n data_dict['world_flip_enabled'] = world_flip_enabled\n return data_dict\n\n def random_world_rotation(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_rotation, config=config)\n rot_range = config['WORLD_ROT_ANGLE']\n if not isinstance(rot_range, list):\n rot_range = [-rot_range, rot_range]\n\n if 'gt_boxes' in data_dict:\n gt_boxes, points, world_rotation = augmentor_utils.global_rotation(\n data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True\n )\n else:\n points, world_rotation = augmentor_utils.global_rotation_points(\n data_dict['points'], rot_range=rot_range, return_rotation=True\n )\n\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'] = gt_boxes\n data_dict['points'] = points\n data_dict['world_rotation'] = world_rotation\n return data_dict\n\n def random_world_scaling(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_scaling, config=config)\n if 'gt_boxes' in data_dict:\n gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(\n data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']\n )\n else:\n points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])\n \n data_dict['world_scaling'] = scale_ratio\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'] = gt_boxes\n data_dict['points'] = points\n return data_dict\n\n def random_world_scaling_xyz(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_scaling_xyz, config=config)\n gt_boxes = data_dict['gt_boxes']\n points = data_dict['points']\n scale_range = config['SCALE_RANGE']\n noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)\n points[:, :3] *= noise_scale\n gt_boxes[:, :3] *= noise_scale\n gt_boxes[:, 3:6] *= noise_scale\n data_dict['points'] = points\n data_dict['gt_boxes'] = gt_boxes\n data_dict['world_scaling_xyz'] = noise_scale\n return data_dict\n\n def jitter_point_cloud(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.jitter_point_cloud, config=config)\n sigma = config['SIGMA']\n clip = config['CLIP']\n assert(clip > 0)\n points = data_dict['points']\n jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)\n points += jittered_data\n data_dict['points'] = points\n data_dict['jittered'] = True\n data_dict['jitter_values'] = jittered_data\n return data_dict\n\n def random_world_shift(self, data_dict=None, config=None):\n if data_dict is None:\n return partial(self.random_world_shift, config=config)\n shift_range = config['RANGE']\n shifts = np.random.uniform(-shift_range, shift_range, 3)\n data_dict['points'] += shifts\n data_dict['world_shifts'] = shifts\n return data_dict\n\n def forward(self, data_dict, augment=True):\n \"\"\"\n Args:\n data_dict:\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]\n gt_names: optional, (N), string\n ...\n\n Returns:\n \"\"\"\n if augment:\n for cur_augmentor in self.data_augmentor_queue:\n data_dict = cur_augmentor(data_dict=data_dict)\n\n if 'gt_boxes' in data_dict:\n data_dict['gt_boxes'][:, 6] = common_utils.limit_period(\n data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi\n )\n if 'road_plane' in data_dict:\n data_dict.pop('road_plane')\n if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:\n gt_boxes_mask = data_dict['gt_boxes_mask']\n data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]\n data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]\n data_dict.pop('gt_boxes_mask')\n return data_dict\n"
] | [
[
"numpy.random.uniform",
"numpy.random.randn",
"torch.from_numpy",
"numpy.array",
"numpy.concatenate"
]
] |
reetikaag/human-activity-recognition | [
"1e6760a88ca52fe9a8a8ca60d000cd3426851156"
] | [
"Efficient-3DCNNs/thop/count_hooks.py"
] | [
"import argparse\n\nimport torch\nimport torch.nn as nn\n\nmultiply_adds = 1\n\n\ndef count_conv2d(m, x, y):\n\t# TODO: add support for pad and dilation\n\tx = x[0]\n\n\tcin = m.in_channels\n\tcout = m.out_channels\n\tkh, kw = m.kernel_size\n\tbatch_size = x.size()[0]\n\n\tout_w = y.size(2)\n\tout_h = y.size(3)\n\n\t# ops per output element\n\t# kernel_mul = kh * kw * cin\n\t# kernel_add = kh * kw * cin - 1\n\tkernel_ops = multiply_adds * kh * kw * cin // m.groups\n\tbias_ops = 1 if m.bias is not None else 0\n\tops_per_element = kernel_ops + bias_ops\n\n\t# total ops\n\t# num_out_elements = y.numel()\n\toutput_elements = batch_size * out_w * out_h * cout\n\ttotal_ops = output_elements * ops_per_element\n\n\t# in case same conv is used multiple times\n\tm.total_ops += torch.Tensor([int(total_ops)])\n\n\ndef count_conv3d(m, x, y):\n\t# TODO: add support for pad and dilation\n\tx = x[0]\n\n\tcin = m.in_channels\n\tcout = m.out_channels\n\tkd, kh, kw = m.kernel_size\n\tbatch_size = x.size()[0]\n \n\tout_d = y.size(2)\n\tout_w = y.size(3)\n\tout_h = y.size(4)\n\n\t# ops per output element\n\t# kernel_mul = kh * kw * cin\n\t# kernel_add = kh * kw * cin - 1\n\tkernel_ops = multiply_adds * kd * kh * kw * cin // m.groups\n\tbias_ops = 1 if m.bias is not None else 0\n\tops_per_element = kernel_ops + bias_ops\n\n\t# total ops\n\t# num_out_elements = y.numel()\n\toutput_elements = batch_size * out_d * out_w * out_h * cout\n\ttotal_ops = output_elements * ops_per_element\n\n\t# in case same conv is used multiple times\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_bn2d(m, x, y):\n\tx = x[0]\n\n\tnelements = x.numel()\n\ttotal_sub = nelements\n\ttotal_div = nelements\n\ttotal_ops = total_sub + total_div\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_relu(m, x, y):\n\tx = x[0]\n\n\tnelements = x.numel()\n\ttotal_ops = nelements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_softmax(m, x, y):\n\tx = x[0]\n\n\tbatch_size, nfeatures = x.size()\n\n\ttotal_exp = nfeatures\n\ttotal_add = nfeatures - 1\n\ttotal_div = nfeatures\n\ttotal_ops = batch_size * (total_exp + total_add + total_div)\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_maxpool(m, x, y):\n\tkernel_ops = torch.prod(torch.Tensor([m.kernel_size])) - 1\n\tnum_elements = y.numel()\n\ttotal_ops = kernel_ops * num_elements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_avgpool(m, x, y):\n\ttotal_add = torch.prod(torch.Tensor([m.kernel_size])) - 1\n\ttotal_div = 1\n\tkernel_ops = total_add + total_div\n\tnum_elements = y.numel()\n\ttotal_ops = kernel_ops * num_elements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n\n\ndef count_linear(m, x, y):\n\t# per output element\n\ttotal_mul = m.in_features\n\ttotal_add = m.in_features - 1\n\tnum_elements = y.numel()\n\ttotal_ops = (total_mul + total_add) * num_elements\n\n\tm.total_ops += torch.Tensor([int(total_ops)]).to(\"cuda\")\n"
] | [
[
"torch.Tensor"
]
] |
haorang/285 | [
"3b7369b8eb4433952c9cdf27d4feaa015a6c40e4"
] | [
"stable_baselines3/dqn/dqn.py"
] | [
"from typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport numpy as np\nimport torch as th\nfrom torch.nn import functional as F\n\nfrom stable_baselines3.common import logger\nfrom stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm\nfrom stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule\nfrom stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update\nfrom stable_baselines3.dqn.policies import DQNPolicy\n\n\nclass DQN(OffPolicyAlgorithm):\n \"\"\"\n Deep Q-Network (DQN)\n\n Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236\n Default hyperparameters are taken from the nature paper,\n except for the optimizer and learning rate that were taken from Stable Baselines defaults.\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1) default 1 for hard update\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.\n :param gradient_steps: How many gradient steps to do after each rollout\n (see ``train_freq`` and ``n_episodes_rollout``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.\n Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param target_update_interval: update the target network every ``target_update_interval``\n environment steps.\n :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced\n :param exploration_initial_eps: initial value of random action probability\n :param exploration_final_eps: final value of random action probability\n :param max_grad_norm: The maximum value for the gradient clipping\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[DQNPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 1e-4,\n buffer_size: int = 1000000,\n learning_starts: int = 50000,\n batch_size: Optional[int] = 32,\n tau: float = 1.0,\n gamma: float = 0.99,\n train_freq: int = 4,\n gradient_steps: int = 1,\n n_episodes_rollout: int = -1,\n optimize_memory_usage: bool = False,\n target_update_interval: int = 10000,\n exploration_fraction: float = 0.1,\n exploration_initial_eps: float = 1.0,\n exploration_final_eps: float = 0.05,\n max_grad_norm: float = 10,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(DQN, self).__init__(\n policy,\n env,\n DQNPolicy,\n learning_rate,\n buffer_size,\n learning_starts,\n batch_size,\n tau,\n gamma,\n train_freq,\n gradient_steps,\n n_episodes_rollout,\n action_noise=None, # No action noise\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n create_eval_env=create_eval_env,\n seed=seed,\n sde_support=False,\n optimize_memory_usage=optimize_memory_usage,\n )\n\n self.exploration_initial_eps = exploration_initial_eps\n self.exploration_final_eps = exploration_final_eps\n self.exploration_fraction = exploration_fraction\n self.target_update_interval = target_update_interval\n self.max_grad_norm = max_grad_norm\n # \"epsilon\" for the epsilon-greedy exploration\n self.exploration_rate = 0.0\n # Linear schedule will be defined in `_setup_model()`\n self.exploration_schedule = None\n self.q_net, self.q_net_target = None, None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super(DQN, self)._setup_model()\n self._create_aliases()\n self.exploration_schedule = get_linear_fn(\n self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction\n )\n\n def _create_aliases(self) -> None:\n self.q_net = self.policy.q_net\n self.q_net_target = self.policy.q_net_target\n\n def _on_step(self) -> None:\n \"\"\"\n Update the exploration rate and target network if needed.\n This method is called in ``collect_rollouts()`` after each step in the environment.\n \"\"\"\n if self.num_timesteps % self.target_update_interval == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n logger.record(\"rollout/exploration rate\", self.exploration_rate)\n\n def train(self, gradient_steps: int, batch_size: int = 100) -> None:\n # Update learning rate according to schedule\n self._update_learning_rate(self.policy.optimizer)\n\n losses = []\n for gradient_step in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)\n\n with th.no_grad():\n # Compute the target Q values\n target_q = self.q_net_target(replay_data.next_observations)\n # Follow greedy policy: use the one with the highest value\n target_q, _ = target_q.max(dim=1)\n # Avoid potential broadcast issue\n target_q = target_q.reshape(-1, 1)\n # 1-step TD target\n target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q\n\n # Get current Q estimates\n current_q = self.q_net(replay_data.observations)\n\n # Retrieve the q-values for the actions from the replay buffer\n current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())\n\n # Compute Huber loss (less sensitive to outliers)\n loss = F.smooth_l1_loss(current_q, target_q)\n losses.append(loss.item())\n\n # Optimize the policy\n self.policy.optimizer.zero_grad()\n loss.backward()\n # Clip gradient norm\n th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.policy.optimizer.step()\n\n # Increase update counter\n self._n_updates += gradient_steps\n\n logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n logger.record(\"train/loss\", np.mean(losses))\n\n def predict(\n self,\n observation: np.ndarray,\n state: Optional[np.ndarray] = None,\n mask: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n \"\"\"\n Overrides the base_class predict function to include epsilon-greedy exploration.\n\n :param observation: the input observation\n :param state: The last states (can be None, used in recurrent policies)\n :param mask: The last masks (can be None, used in recurrent policies)\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next state\n (used in recurrent policies)\n \"\"\"\n if not deterministic and np.random.rand() < self.exploration_rate:\n if is_vectorized_observation(observation, self.observation_space):\n n_batch = observation.shape[0]\n action = np.array([self.action_space.sample() for _ in range(n_batch)])\n else:\n action = np.array(self.action_space.sample())\n else:\n action, state = self.policy.predict(observation, state, mask, deterministic)\n return action, state\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"DQN\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n ) -> OffPolicyAlgorithm:\n\n return super(DQN, self).learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n eval_env=eval_env,\n eval_freq=eval_freq,\n n_eval_episodes=n_eval_episodes,\n tb_log_name=tb_log_name,\n eval_log_path=eval_log_path,\n reset_num_timesteps=reset_num_timesteps,\n )\n\n def _excluded_save_params(self) -> List[str]:\n return super(DQN, self)._excluded_save_params() + [\"q_net\", \"q_net_target\"]\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"policy.optimizer\"]\n\n return state_dicts, []\n"
] | [
[
"numpy.mean",
"torch.no_grad",
"numpy.random.rand",
"torch.nn.functional.smooth_l1_loss"
]
] |
b1quint/astropy | [
"a170a74739e4356c169429a42e554f9777b53f4d"
] | [
"astropy/stats/lombscargle/implementations/tests/test_mle.py"
] | [
"import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit\n\n\[email protected]\ndef t():\n rand = np.random.RandomState(42)\n return 10 * rand.rand(10)\n\n\[email protected]('freq', [1.0, 2])\[email protected]('dy', [None, 2.0])\[email protected]('bias', [True, False])\ndef test_design_matrix(t, freq, dy, bias):\n X = design_matrix(t, freq, dy, bias=bias)\n assert X.shape == (t.shape[0], 2 + bool(bias))\n if bias:\n assert_allclose(X[:, 0], 1. / (dy or 1.0))\n assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))\n assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))\n\n\[email protected]('nterms', range(4))\ndef test_multiterm_design_matrix(t, nterms):\n dy = 2.0\n freq = 1.5\n X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)\n assert X.shape == (t.shape[0], 1 + 2 * nterms)\n assert_allclose(X[:, 0], 1. / dy)\n for i in range(1, nterms + 1):\n assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)\n assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)\n\n\[email protected]('nterms', range(1, 4))\[email protected]('freq', [1, 2])\[email protected]('fit_mean', [True, False])\ndef test_exact_mle_fit(nterms, freq, fit_mean):\n rand = np.random.RandomState(42)\n t = 10 * rand.rand(30)\n theta = -1 + rand.rand(2 * nterms + 1)\n y = np.zeros(t.shape)\n if fit_mean:\n y = theta[0] * np.ones(t.shape)\n for i in range(1, nterms + 1):\n y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)\n y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)\n\n y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,\n center_data=False, fit_mean=fit_mean)\n assert_allclose(y, y_fit)\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.cos",
"numpy.random.RandomState",
"numpy.testing.assert_allclose",
"numpy.sin"
]
] |
chaekit/pytorch | [
"132f5c1f36698361149ea99ca3504bd2acfdc19f"
] | [
"torch/nn/parallel/distributed.py"
] | [
"import copy\nimport inspect\nimport itertools\nimport logging\nimport os\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import NamedTuple\n\nimport torch\nimport torch.distributed as dist\n\nRPC_AVAILABLE = False\nif dist.is_available():\n from torch.distributed.distributed_c10d import ReduceOp\n from torch.distributed.distributed_c10d import _get_default_group\nif torch.distributed.rpc.is_available():\n RPC_AVAILABLE = True\n from torch.distributed.rpc import RRef\nfrom torch._utils import _get_device_index\n\nfrom ..modules import Module\nfrom ._functions import _get_stream\nfrom .scatter_gather import scatter_kwargs, gather, is_namedtuple\n\n\ndef _find_tensors(obj):\n r\"\"\"\n Recursively find all tensors contained in the specified object.\n \"\"\"\n if RPC_AVAILABLE and isinstance(obj, RRef):\n # If the current node is the owner of the RRef, unwrap it and try to\n # find Tensors.\n # TODO: Expand to remote RRefs.\n if obj.is_owner():\n return _find_tensors(obj.local_value())\n if isinstance(obj, torch.Tensor):\n return [obj]\n if isinstance(obj, (list, tuple)):\n return itertools.chain(*map(_find_tensors, obj))\n if isinstance(obj, dict):\n return itertools.chain(*map(_find_tensors, obj.values()))\n return []\n\n\ndef _dump_DDP_relevant_env_vars():\n relevant_env_vars = [\n \"RANK\",\n \"LOCAL_RANK\",\n \"WORLD_SIZE\",\n \"MASTER_PORT\",\n \"MASTER_ADDR\",\n \"CUDA_VISIBLE_DEVICES\",\n \"GLOO_SOCKET_IFNAME\",\n \"GLOO_DEVICE_TRANSPORT\",\n \"NCCL_SOCKET_IFNAME\",\n \"NCCL_BLOCKING_WAIT\",\n \"NCCL_DEBUG\",\n \"NCCL_DEBUG_SUBSYS\",\n \"NCCL_IB_DISABLE\",\n # More NCCL env vars:\n \"NCCL_P2P_DISABLE\",\n \"NCCL_P2P_LEVEL\",\n \"NCCL_SHM_DISABLE\",\n \"NCCL_SOCKET_NTHREADS\",\n \"NCCL_NSOCKS_PERTHREAD\",\n \"NCCL_BUFFSIZE\",\n \"NCCL_NTHREADS\",\n \"NCCL_RINGS\",\n \"NCCL_MAX_NCHANNELS\",\n \"NCCL_MIN_NCHANNELS\",\n \"NCCL_CHECKS_DISABLE\",\n \"NCCL_CHECK_POINTERS\",\n \"NCCL_LAUNCH_MODE\",\n \"NCCL_IB_HCA\",\n \"NCCL_IB_TIMEOUT\",\n \"NCCL_IB_RETRY_CNT\",\n \"NCCL_IB_GID_INDEX\",\n \"NCCL_IB_SL\",\n \"NCCL_IB_TC\",\n \"NCCL_IB_AR_THRESHOLD\",\n \"NCCL_IB_CUDA_SUPPORT\",\n \"NCCL_NET_GDR_LEVEL\",\n \"NCCL_NET_GDR_READ\",\n \"NCCL_SINGLE_RING_THRESHOLD\",\n \"NCCL_LL_THRESHOLD\",\n \"NCCL_TREE_THRESHOLD\",\n \"NCCL_ALGO\",\n \"NCCL_PROTO\",\n \"NCCL_IGNORE_CPU_AFFINITY\",\n \"NCCL_DEBUG_FILE\",\n \"NCCL_COLLNET_ENABLE\",\n \"NCCL_TOPO_FILE\",\n \"NCCL_TOPO_DUMP_FILE\",\n ]\n formatted_output = \"\"\n for var in relevant_env_vars:\n value = os.environ[var] if var in os.environ else \"N/A\"\n formatted_output += \"env:%s=%s\\n\" % (var, value)\n print(formatted_output)\n\n\nclass _DDPUnevenInputsConfig(NamedTuple):\n ddp_join_enabled: bool\n ddp_join_divide_by_initial_world_size: bool\n\n\nclass DistributedDataParallel(Module):\n r\"\"\"Implements distributed data parallelism that is based on\n ``torch.distributed`` package at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. The module is replicated on each machine and each device, and\n each such replica handles a portion of the input. During the backwards\n pass, gradients from each node are averaged.\n\n The batch size should be larger than the number of GPUs used locally.\n\n See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.\n The same constraints on input as in :class:`torch.nn.DataParallel` apply.\n\n Creation of this class requires that ``torch.distributed`` to be already\n initialized, by calling :func:`torch.distributed.init_process_group`.\n\n ``DistributedDataParallel`` is proven to be significantly faster than\n :class:`torch.nn.DataParallel` for single-node multi-GPU data\n parallel training.\n\n To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn\n up ``N`` processes, ensuring that each process exclusively works on a single\n GPU from 0 to N-1. This can be done by either setting\n ``CUDA_VISIBLE_DEVICES`` for every process or by calling:\n\n >>> torch.cuda.set_device(i)\n\n where i is from 0 to N-1. In each process, you should refer the following\n to construct this module:\n\n >>> torch.distributed.init_process_group(\n >>> backend='nccl', world_size=N, init_method='...'\n >>> )\n >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)\n\n In order to spawn up multiple processes per node, you can use either\n ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.\n\n .. note::\n Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__\n for a brief introduction to all features related to distributed training.\n\n .. note::\n ``DistributedDataParallel`` can be used in conjunction with\n :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce\n per-rank optimizer states memory footprint. Please refer to\n `ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__\n for more details.\n\n .. note:: ``nccl`` backend is currently the fastest and highly recommended\n backend when using GPUs. This applies to both single-node and\n multi-node distributed training.\n\n .. note:: This module also supports mixed-precision distributed training.\n This means that your model can have different types of parameters such\n as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these\n mixed types of parameters will just work fine.\n\n .. note:: If you use ``torch.save`` on one process to checkpoint the module,\n and ``torch.load`` on some other processes to recover it, make sure that\n ``map_location`` is configured properly for every process. Without\n ``map_location``, ``torch.load`` would recover the module to devices\n where the module was saved from.\n\n .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the\n gradient will be ``M`` times smaller when compared to the same model\n trained on a single node with ``batch=M*N`` if the loss is summed (NOT\n averaged as usual) across instances in a batch (because the gradients\n between different nodes are averaged). You should take this into\n consideration when you want to obtain a mathematically equivalent\n training process compared to the local training counterpart. But in most\n cases, you can just treat a DistributedDataParallel wrapped model, a\n DataParallel wrapped model and an ordinary model on a single GPU as the\n same (E.g. using the same learning rate for equivalent batch size).\n\n .. note::\n Parameters are never broadcast between processes. The module performs\n an all-reduce step on gradients and assumes that they will be modified\n by the optimizer in all processes in the same way. Buffers\n (e.g. BatchNorm stats) are broadcast from the module in process of rank\n 0, to all other replicas in the system in every iteration.\n\n .. note::\n If you are using DistributedDataParallel in conjunction with the\n :ref:`distributed-rpc-framework`, you should always use\n :meth:`torch.distributed.autograd.backward` to compute gradients and\n :class:`torch.distributed.optim.DistributedOptimizer` for optimizing\n parameters.\n\n Example::\n\n >>> import torch.distributed.autograd as dist_autograd\n >>> from torch.nn.parallel import DistributedDataParallel as DDP\n >>> from torch import optim\n >>> from torch.distributed.optim import DistributedOptimizer\n >>> from torch.distributed.rpc import RRef\n >>>\n >>> t1 = torch.rand((3, 3), requires_grad=True)\n >>> t2 = torch.rand((3, 3), requires_grad=True)\n >>> rref = rpc.remote(\"worker1\", torch.add, args=(t1, t2))\n >>> ddp_model = DDP(my_model)\n >>>\n >>> # Setup optimizer\n >>> optimizer_params = [rref]\n >>> for param in ddp_model.parameters():\n >>> optimizer_params.append(RRef(param))\n >>>\n >>> dist_optim = DistributedOptimizer(\n >>> optim.SGD,\n >>> optimizer_params,\n >>> lr=0.05,\n >>> )\n >>>\n >>> with dist_autograd.context() as context_id:\n >>> pred = ddp_model(rref.to_here())\n >>> loss = loss_func(pred, loss)\n >>> dist_autograd.backward(context_id, loss)\n >>> dist_optim.step()\n\n .. note::\n To let a non-DDP model load a state dict from a DDP model,\n :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`\n needs to be applied to strip the prefix \"module.\" in the DDP state dict before loading.\n\n .. warning::\n Constructor, forward method, and differentiation of the output (or a\n function of the output of this module) are distributed synchronization\n points. Take that into account in case different processes might be\n executing different code.\n\n .. warning::\n This module assumes all parameters are registered in the model by the\n time it is created. No parameters should be added nor removed later.\n Same applies to buffers.\n\n .. warning::\n This module assumes all parameters are registered in the model of each\n distributed processes are in the same order. The module itself will\n conduct gradient ``allreduce`` following the reverse order of the\n registered parameters of the model. In other words, it is users'\n responsibility to ensure that each distributed process has the exact\n same model and thus the exact same parameter registration order.\n\n .. warning::\n This module allows parameters with non-rowmajor-contiguous strides.\n For example, your model may contain some parameters whose\n :class:`torch.memory_format` is ``torch.contiguous_format``\n and others whose format is ``torch.channels_last``. However,\n corresponding parameters in different processes must have the\n same strides.\n\n .. warning::\n This module doesn't work with :func:`torch.autograd.grad` (i.e. it will\n only work if gradients are to be accumulated in ``.grad`` attributes of\n parameters).\n\n .. warning::\n If you plan on using this module with a ``nccl`` backend or a ``gloo``\n backend (that uses Infiniband), together with a DataLoader that uses\n multiple workers, please change the multiprocessing start method to\n ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately\n Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will\n likely experience deadlocks if you don't change this setting.\n\n .. warning::\n Forward and backward hooks defined on :attr:`module` and its submodules\n won't be invoked anymore, unless the hooks are initialized in the\n :meth:`forward` method.\n\n .. warning::\n You should never try to change your model's parameters after wrapping\n up your model with ``DistributedDataParallel``. Because, when\n wrapping up your model with ``DistributedDataParallel``, the constructor\n of ``DistributedDataParallel`` will register the additional gradient\n reduction functions on all the parameters of the model itself at the\n time of construction. If you change the model's parameters afterwards,\n gradient redunction functions no longer match the correct set of\n parameters.\n\n .. warning::\n Using ``DistributedDataParallel`` in conjunction with the\n :ref:`distributed-rpc-framework` is experimental and subject to change.\n\n .. warning::\n The ``gradient_as_bucket_view`` mode does not yet work with Automatic\n Mixed Precision (AMP). AMP maintains stashed gradients that are used for\n unscaling gradients. With ``gradient_as_bucket_view=True``, these\n stashed gradients will point to communication buckets in the first\n iteration. In the next iteration, the communication buckets are mutated\n and thus these stashed gradients will be unexpectedly mutated as well,\n which might lead to wrong results.\n\n Args:\n module (Module): module to be parallelized\n device_ids (list of int or torch.device): CUDA devices.\n 1) For single-device modules, ``device_ids`` can\n contain exactly one device id, which represents the only\n CUDA device where the input module corresponding to this process resides.\n Alternatively, ``device_ids`` can also be ``None``.\n 2) For multi-device modules and CPU modules,\n ``device_ids`` must be ``None``.\n\n When ``device_ids`` is ``None`` for both cases,\n both the input data for the forward pass and the actual module\n must be placed on the correct device.\n (default: ``None``)\n output_device (int or torch.device): Device location of output for\n single-device CUDA modules. For multi-device modules and\n CPU modules, it must be ``None``, and the module itself\n dictates the output location. (default: ``device_ids[0]``\n for single-device modules)\n broadcast_buffers (bool): Flag that enables syncing (broadcasting)\n buffers of the module at beginning of the ``forward``\n function. (default: ``True``)\n process_group: The process group to be used for distributed data\n all-reduction. If ``None``, the default process group, which\n is created by :func:`torch.distributed.init_process_group`,\n will be used. (default: ``None``)\n bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into\n multiple buckets so that gradient reduction of each\n bucket can potentially overlap with backward computation.\n :attr:`bucket_cap_mb` controls the bucket size in\n MegaBytes (MB). (default: 25)\n find_unused_parameters (bool): Traverse the autograd graph from all\n tensors contained in the return value of the\n wrapped module's ``forward`` function. Parameters\n that don't receive gradients as part of this\n graph are preemptively marked as being ready to\n be reduced. Note that all ``forward`` outputs\n that are derived from module parameters must\n participate in calculating loss and later the\n gradient computation. If they don't, this wrapper\n will hang waiting for autograd to produce\n gradients for those parameters. Any outputs\n derived from module parameters that are otherwise\n unused can be detached from the autograd graph\n using ``torch.Tensor.detach``. (default: ``False``)\n check_reduction: This argument is deprecated.\n gradient_as_bucket_view (bool): This is a prototype feature and subject\n to changes. When set to ``True``, gradients will be views\n pointing to different offsets of ``allreduce`` communication\n buckets. This can reduce peak memory usage, where the\n saved memory size will be equal to the total gradients\n size. Moreover, it avoids the overhead of copying between\n gradients and ``allreduce`` communication buckets. When\n gradients are views, ``detach_()`` cannot be called on the\n gradients. If hitting such errors, please fix it by\n referring to the :meth:`~torch.optim.Optimizer.zero_grad`\n function in ``torch/optim/optimizer.py`` as a solution.\n\n\n Attributes:\n module (Module): the module to be parallelized.\n\n Example::\n\n >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')\n >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)\n \"\"\"\n\n def __init__(\n self,\n module,\n device_ids=None,\n output_device=None,\n dim=0,\n broadcast_buffers=True,\n process_group=None,\n bucket_cap_mb=25,\n find_unused_parameters=False,\n check_reduction=False,\n gradient_as_bucket_view=False,\n ):\n\n super(DistributedDataParallel, self).__init__()\n\n assert any((p.requires_grad for p in module.parameters())), (\n \"DistributedDataParallel is not needed when a module \"\n \"doesn't have any parameter that requires a gradient.\"\n )\n\n if device_ids is not None and len(device_ids) > 1:\n raise ValueError(\"device_ids can only be None or contain a single element.\")\n\n self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1\n distinct_device_types = {p.device.type for p in module.parameters()}\n if len(distinct_device_types) != 1:\n raise ValueError(\n \"DistributedDataParallel's input module must be on \"\n \"the same type of devices, but input module parameters locate in {}.\".format(\n distinct_device_types\n )\n )\n self.device_type = list(distinct_device_types)[0]\n\n if (\n device_ids is None\n or len(device_ids) == 0 # For backward compatibility.\n or self.device_type == \"cpu\"\n or self.is_multi_device_module\n ):\n if device_ids or output_device:\n raise ValueError(\n \"DistributedDataParallel device_ids and output_device arguments \"\n \"only work with single-device/multiple-device GPU modules or CPU modules, \"\n \"but got device_ids {}, output_device {}, and module parameters {}.\".format(\n device_ids,\n output_device,\n {p.device for p in module.parameters()},\n )\n )\n\n self.device_ids = None\n self.output_device = None\n else:\n self.device_ids = [_get_device_index(x, True) for x in device_ids]\n\n if output_device is None:\n output_device = device_ids[0]\n\n self.output_device = _get_device_index(output_device, True)\n\n if process_group is None:\n self.process_group = _get_default_group()\n else:\n self.process_group = process_group\n\n self.dim = dim\n self.module = module\n self.device = list(self.module.parameters())[0].device\n self.broadcast_buffers = broadcast_buffers\n self.find_unused_parameters = find_unused_parameters\n self.require_backward_grad_sync = True\n self.require_forward_param_sync = True\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False\n )\n self.gradient_as_bucket_view = gradient_as_bucket_view\n if hasattr(module, \"_ddp_params_and_buffers_to_ignore\"):\n self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore\n else:\n self.parameters_to_ignore = []\n\n if check_reduction:\n # This argument is no longer used since the reducer\n # will ensure reduction completes even if some parameters\n # do not receive gradients.\n warnings.warn(\n \"The `check_reduction` argument in `DistributedDataParallel` \"\n \"module is deprecated. Please avoid using it.\"\n )\n\n # Check that a module does not have Uninitialized parameters\n for param in module.parameters():\n if isinstance(param, torch.nn.parameter.UninitializedParameter):\n raise RuntimeError(\n \"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. \"\n \"Run a dummy forward pass to correctly initialize the modules\"\n )\n # used for intra-node param sync and inter-node sync as wel\n self.broadcast_bucket_size = int(250 * 1024 * 1024)\n\n # reduction bucket size\n self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)\n # Whether to perform input tensor CPU to GPU copies on a side-stream\n self.use_side_stream_for_tensor_copies = (\n os.environ.get(\"PYTORCH_DDP_USE_SIDE_STREAM\", \"1\") == \"1\"\n )\n\n # TODO(wayi@): Remove this field since SPMD is no longer supported,\n # and also remove all the relevant unnecessary loops.\n # Module replication within process (single-process multi device)\n self._module_copies = [self.module]\n # Build parameters for reducer.\n parameters, expect_sparse_gradient = self._build_params_for_reducer()\n # Verify model equivalence.\n dist._verify_model_across_ranks(self.process_group, parameters)\n # Sync params and buffers. Ensures all DDP models start off at the same value.\n self._sync_params_and_buffers(authoritative_rank=0)\n # Builds reducer.\n self._ddp_init_helper(parameters, expect_sparse_gradient)\n\n def _sync_params_and_buffers(self, authoritative_rank=0):\n module_states = []\n for name, param in self.module.state_dict().items():\n if name not in self.parameters_to_ignore:\n module_states.append(param)\n\n if len(module_states) > 0:\n self._distributed_broadcast_coalesced(\n module_states, self.broadcast_bucket_size, authoritative_rank\n )\n\n def _ddp_init_helper(self, parameters, expect_sparse_gradient):\n \"\"\"\n Initialization helper function that does the following:\n (1) bucketing the parameters for reductions\n (2) resetting the bucketing states\n (3) registering the grad hooks\n (4) Logging constructin-time DDP logging data\n (5) passing a handle of DDP to SyncBatchNorm Layer\n \"\"\"\n # The bucket size limit is specified in the constructor.\n # Additionally, we allow for a single small bucket for parameters\n # that are defined first, such that their gradients don't spill into\n # a much larger bucket, adding unnecessary latency after gradient\n # computation finishes. Experiments showed 1MB is a reasonable value.\n bucket_indices = dist._compute_bucket_assignment_by_size(\n parameters[0],\n [dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],\n expect_sparse_gradient[0],\n )\n\n # Note: reverse list of buckets because we want to approximate the\n # order in which their gradients are produced, and assume they\n # are used in the forward pass in the order they are defined.\n self.reducer = dist.Reducer(\n parameters,\n list(reversed(bucket_indices)),\n self.process_group,\n expect_sparse_gradient,\n self.bucket_bytes_cap,\n self.find_unused_parameters,\n self.gradient_as_bucket_view,\n )\n\n self.logger = dist.Logger(self.reducer)\n\n # Set logging data that can be got during construction time.\n self.logger.set_construction_data_and_log(\n self.module.__class__.__name__,\n [] if self.device_ids is None else self.device_ids,\n -1 if self.output_device is None else self.output_device,\n self.broadcast_buffers,\n )\n\n # passing a handle to torch.nn.SyncBatchNorm layer\n self._passing_sync_batchnorm_handle(self._module_copies)\n\n def __getstate__(self):\n self._check_default_group()\n attrs = copy.copy(self.__dict__)\n del attrs[\"process_group\"]\n del attrs[\"reducer\"]\n del attrs[\"logger\"]\n return attrs\n\n def __setstate__(self, state):\n # If serializable, then the process group should be the default one\n self.process_group = _get_default_group()\n super(DistributedDataParallel, self).__setstate__(state)\n self.__dict__.setdefault(\"require_forward_param_sync\", True)\n self.__dict__.setdefault(\"require_backward_grad_sync\", True)\n parameters, expect_sparse_gradient = self._build_params_for_reducer()\n self._ddp_init_helper(parameters, expect_sparse_gradient)\n\n def _build_params_for_reducer(self):\n # Build tuple of (module, parameter) for all parameters that require grads.\n modules_and_parameters = [\n [\n (module, parameter)\n for module_name, module in replica.named_modules()\n for parameter in [\n param\n # Note that we access module.named_parameters instead of\n # parameters(module). parameters(module) is only needed in the\n # single-process multi device case, where it accesses replicated\n # parameters through _former_parameters.\n for param_name, param in module.named_parameters(recurse=False)\n if param.requires_grad\n and f\"{module_name}.{param_name}\"\n not in self.parameters_to_ignore\n ]\n ]\n for replica in self._module_copies\n ]\n\n # Deduplicate any parameters that might be shared across child modules.\n memo = set()\n modules_and_parameters = [\n # \"p not in memo\" is the deduplication check.\n # \"not memo.add(p)\" is always True, and it's only there to cause \"add(p)\" if needed.\n [(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]\n for replica_mps in modules_and_parameters\n ]\n\n # Build list of parameters.\n parameters = [\n list(parameter for _, parameter in replica)\n for replica in modules_and_parameters\n ]\n\n # Checks if a module will produce a sparse gradient.\n def produces_sparse_gradient(module):\n if isinstance(module, torch.nn.Embedding) or isinstance(\n module, torch.nn.EmbeddingBag\n ):\n return module.sparse\n return False\n\n # Build list of booleans indicating whether or not to expect sparse\n # gradients for the corresponding parameters.\n expect_sparse_gradient = [\n list(produces_sparse_gradient(module) for module, _ in replica)\n for replica in modules_and_parameters\n ]\n\n # The following modules_params and modules_buffers are used for\n # param/buffer sync in _sync_params.\n self.modules_params = [\n list(self._get_parameters(m)) for m in self._module_copies\n ]\n # Collect buffers for modules, filtering out buffers that should be ignored.\n named_module_buffers = [\n [(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]\n for m in self._module_copies\n ]\n self.modules_buffers = [\n [\n buffer\n for (buffer, buffer_name) in module_buffers\n if buffer_name not in self.parameters_to_ignore\n ]\n for module_buffers in named_module_buffers\n ]\n\n return parameters, expect_sparse_gradient\n\n def _get_parameters(self, m, recurse=True):\n \"\"\"\n Returns a generator of module parameters\n \"\"\"\n\n def model_parameters(m):\n ps = (\n m._former_parameters.values()\n if hasattr(m, \"_former_parameters\")\n else m.parameters(recurse=False)\n )\n for p in ps:\n yield p\n\n for m in m.modules() if recurse else [m]:\n for p in model_parameters(m):\n yield p\n\n def _check_default_group(self):\n pickle_not_supported = False\n try:\n if self.process_group != _get_default_group():\n pickle_not_supported = True\n except RuntimeError:\n pickle_not_supported = True\n\n if pickle_not_supported:\n raise RuntimeError(\n \"DDP Pickling/Unpickling are only supported \"\n \"when using DDP with the default process \"\n \"group. That is, when you have called \"\n \"init_process_group and have not passed \"\n \"process_group argument to DDP constructor\"\n )\n\n @contextmanager\n def no_sync(self):\n r\"\"\"\n A context manager to disable gradient synchronizations across DDP\n processes. Within this context, gradients will be accumulated on module\n variables, which will later be synchronized in the first\n forward-backward pass exiting the context.\n\n Example::\n\n >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)\n >>> with ddp.no_sync():\n >>> for input in inputs:\n >>> ddp(input).backward() # no synchronization, accumulate grads\n >>> ddp(another_input).backward() # synchronize grads\n \"\"\"\n old_require_backward_grad_sync = self.require_backward_grad_sync\n self.require_backward_grad_sync = False\n try:\n yield\n finally:\n self.require_backward_grad_sync = old_require_backward_grad_sync\n\n def forward(self, *inputs, **kwargs):\n self.reducer.save_thread_local_state()\n if torch.is_grad_enabled() and self.require_backward_grad_sync:\n self.logger.set_runtime_stats_and_log()\n self.reducer.prepare_for_forward()\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n ones = torch.ones(1, device=self.device)\n work = dist.all_reduce(ones, group=self.process_group, async_op=True)\n self.reducer._set_forward_pass_work_handle(\n work,\n self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,\n )\n\n # Calling _rebuild_buckets before forward compuation,\n # It may allocate new buckets before deallocating old buckets\n # inside _rebuild_buckets. To save peak memory usage,\n # call _rebuild_buckets before the peak memory usage increases\n # during forward computation.\n # This should be called only once during whole training period.\n if torch.is_grad_enabled() and self.reducer._rebuild_buckets():\n logging.info(\"Reducer buckets have been rebuilt in this iteration.\")\n\n if self.require_forward_param_sync:\n self._sync_params()\n\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n # Notify joined ranks whether they should sync in backwards pass or not.\n self._check_global_requires_backward_grad_sync(is_joined_rank=False)\n\n if self.device_ids:\n inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])\n output = self.module(*inputs[0], **kwargs[0])\n else:\n output = self.module(*inputs, **kwargs)\n\n if torch.is_grad_enabled() and self.require_backward_grad_sync:\n self.require_forward_param_sync = True\n # We'll return the output object verbatim since it is a freeform\n # object. We need to find any tensors in this object, though,\n # because we need to figure out which parameters were used during\n # this forward pass, to ensure we short circuit reduction for any\n # unused parameters. Only if `find_unused_parameters` is set.\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n self.require_forward_param_sync = False\n\n return output\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def _recursive_to(self, inputs, target_gpu):\n r\"\"\"\n Recursively moves input to the target_gpu.\n \"\"\"\n\n def to_map(obj):\n if isinstance(obj, torch.Tensor):\n if not self.use_side_stream_for_tensor_copies:\n return (obj.to(target_gpu),)\n else:\n # Perform CPU -> GPU copies in a background stream. This code is\n # motivated from similar logic in torch/nn/parallel/_functions.py\n stream = _get_stream(target_gpu)\n with torch.cuda.stream(stream):\n output = obj.to(target_gpu)\n # synchronize with the copy stream\n with torch.cuda.device(target_gpu):\n current_stream = torch.cuda.current_stream()\n # Sync the current stream with the copy stream\n current_stream.wait_stream(stream)\n # Ensure tensor memory is not reused until work on\n # main stream is complete\n output.record_stream(current_stream)\n return (output,)\n if is_namedtuple(obj):\n return [type(obj)(*args) for args in zip(*map(to_map, obj))]\n if isinstance(obj, tuple) and len(obj) > 0:\n return list(zip(*map(to_map, obj)))\n if isinstance(obj, list) and len(obj) > 0:\n return [list(i) for i in zip(*map(to_map, obj))]\n if isinstance(obj, dict) and len(obj) > 0:\n return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]\n return [obj]\n\n # Avoid reference cycle\n try:\n res = to_map(inputs)\n finally:\n to_map = None\n return res\n\n def to_kwargs(self, inputs, kwargs, device_id):\n inputs = self._recursive_to(inputs, device_id) if inputs else []\n kwargs = self._recursive_to(kwargs, device_id) if kwargs else []\n if len(inputs) < len(kwargs):\n inputs.extend([() for _ in range(len(kwargs) - len(inputs))])\n elif len(kwargs) < len(inputs):\n kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])\n inputs = tuple(inputs)\n kwargs = tuple(kwargs)\n return inputs, kwargs\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n def train(self, mode=True):\n super(DistributedDataParallel, self).train(mode)\n for module in self._module_copies[1:]:\n module.train(mode)\n return self\n\n # When running in join mode, schedules an allreduce to match the one in the\n # forward pass to determine the no. of currently active processes and whether\n # all processes have joined.\n def _schedule_shadow_all_reduce_for_fwd_pass(self):\n all_active_procs = torch.zeros(1, device=self.device)\n dist.all_reduce(all_active_procs, group=self.process_group)\n return all_active_procs.item()\n\n # When running in join mode, schedules an allreduce to notify joined ranks\n # of whether backwards pass synchronization will run this iteraton or not.\n def _check_global_requires_backward_grad_sync(self, is_joined_rank):\n if not is_joined_rank and self.require_backward_grad_sync:\n requires_sync_tensor = torch.ones(1, device=self.device)\n else:\n requires_sync_tensor = torch.zeros(1, device=self.device)\n\n work = dist.all_reduce(\n requires_sync_tensor, group=self.process_group, async_op=True\n )\n return work, requires_sync_tensor\n\n # When running in join mode, checks and performs sync of module buffers if\n # the models have buffers that should be synchronized in the forward pass.\n def _check_and_sync_module_buffers(self):\n if self.will_sync_module_buffers():\n authoritative_rank = self._find_common_rank(self._distributed_rank, False)\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank\n )\n\n # When running in join model, agrees upon a common rank and broadcast model\n # parameters to all other ranks.\n def _sync_final_model(self, is_last_joiner):\n # Agree upon the process that will be the authoritative model copy.\n # The current rank is a candidate for being the authoritative copy if\n # is_last_joiner=True. We break ties via picking the larger rank.\n self._authoritative_rank = self._find_common_rank(\n self._distributed_rank, is_last_joiner\n )\n self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)\n\n # Schedule allreduce ops to match those scheduled in the reducer's backward\n # pass.\n def _match_all_reduce_for_bwd_pass(self):\n allreduce_work = []\n # Schedule allreduce in the same order as Reducer schedules them, i.e.\n # the order of the buckets. Retrieving the bucket order from the reducer\n # ensures that we keep the same order in join mode, such as when bucket\n # order is rebuilt dynamically.\n all_bucket_tensors = self.reducer.get_bucket_tensors()\n for bucket_tensors in all_bucket_tensors:\n # Joined processes contribute zero gradient. In the case that\n # divide_by_initial_world_size=True, we divide grads by the static\n # world size, if not, the dividing factor is reduced by the number\n # of joined processes.\n zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]\n work = self.process_group.allreduce(zero_tensors)\n allreduce_work.append(work)\n for work in allreduce_work:\n work.wait()\n\n # Allreduces the used parameter mapping across ranks.\n def _match_unused_params_allreduce(self):\n locally_used_param_maps = self.reducer._get_local_used_maps()\n self.process_group.allreduce(locally_used_param_maps)\n\n @contextmanager\n def join(self, divide_by_initial_world_size=True, enable=True):\n r\"\"\"\n A context manager to be used in conjunction with an instance of\n :class:`torch.nn.parallel.DistributedDataParallel` to be\n able to train with uneven inputs across participating processes.\n\n This context manager will keep track of already-joined DDP processes,\n and \"shadow\" the forward and backward passes by inserting collective\n communication operations to match with the ones created by non-joined\n DDP processes. This will ensure each collective call has a corresponding\n call by already-joined DDP processes, preventing hangs or errors that\n would otherwise happen when training with uneven inputs across\n processes.\n\n Once all DDP processes have joined, the context manager will broadcast\n the model corresponding to the last joined process to all processes to\n ensure the model is the same across all processes\n (which is guaranteed by DDP).\n\n To use this to enable training with uneven inputs across processes,\n simply wrap this context manager around your training loop. No further\n modifications to the model or data loading is required.\n\n .. warning::\n This module currently does not support custom distributed collective\n operations in the forward pass, such as ``SyncBatchNorm`` or other\n custom defined collectives in the model's forward pass.\n\n Args:\n divide_by_initial_world_size (bool): If ``True``, will divide\n gradients by the initial ``world_size`` DDP training was launched\n with. If ``False``, will compute the effective world size\n (number of ranks that have not depleted their inputs yet) and\n divide gradients by that during allreduce. Set\n ``divide_by_initial_world_size=True`` to ensure every input\n sample including the uneven inputs have equal weight in terms of\n how much they contribute to the global gradient. This is\n achieved by always dividing the gradient by the initial\n ``world_size`` even when we encounter uneven inputs. If you set\n this to ``False``, we divide the gradient by the remaining\n number of nodes. This ensures parity with training on a smaller\n ``world_size`` although it also means the uneven inputs would\n contribute more towards the global gradient. Typically, you\n would want to set this to ``True`` for cases where the last few\n inputs of your training job are uneven. In extreme cases, where\n there is a large discrepancy in the number of inputs, setting\n this to ``False`` might provide better results.\n enable (bool): Whether to enable uneven input detection or not. Pass\n in ``enable=False`` to disable in cases where you know that\n inputs are even across participating processes. Default is\n ``True``.\n\n\n Example::\n\n >>> import torch\n >>> import torch.distributed as dist\n >>> import os\n >>> import torch.multiprocessing as mp\n >>> import torch.nn as nn\n >>> # On each spawned worker\n >>> def worker(rank):\n >>> dist.init_process_group(\"nccl\", rank=rank, world_size=2)\n >>> torch.cuda.set_device(rank)\n >>> model = nn.Linear(1, 1, bias=False).to(rank)\n >>> model = torch.nn.parallel.DistributedDataParallel(\n >>> model, device_ids=[rank], output_device=rank\n >>> )\n >>> # Rank 1 gets one more input than rank 0.\n >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]\n >>> with model.join():\n >>> for _ in range(5):\n >>> for inp in inputs:\n >>> loss = model(inp).sum()\n >>> loss.backward()\n >>> # Without the join() API, the below synchronization will hang\n >>> # blocking for rank 1's allreduce to complete.\n >>> torch.cuda.synchronize(device=rank)\n \"\"\"\n # Log uneven input API usage.\n self.logger._set_uneven_input_join()\n try:\n has_error = False\n self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(\n ddp_join_enabled=enable,\n ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,\n )\n yield\n except Exception as e:\n # Set to skip any processing in the finally block.\n has_error = True\n raise e\n finally:\n # Skip any processing to let the exception immediately be raised if\n # there was one.\n if enable and not has_error:\n all_procs_joined = False\n is_last_joiner = True\n i = 0\n WARN_THRESHOLD = 1000\n warnings.simplefilter(\"once\")\n while not all_procs_joined:\n if i > WARN_THRESHOLD:\n my_rank = self._distributed_rank\n warnings.warn(\n \"Detected uneven input skew of greater \"\n f\"than {WARN_THRESHOLD}. This means that rank {my_rank} \"\n f\"has at least {WARN_THRESHOLD} fewer inputs than \"\n \"other currently active ranks. This level of skew could \"\n \"lead to performance degradation during training.\"\n )\n # Schedules allreduce to match fwd pass allreduce in non-joined procs\n num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()\n if num_active_procs == 0:\n all_procs_joined = True\n else:\n # Some DDP process still needs to be joined.\n if is_last_joiner:\n is_last_joiner = False\n # It will rebuild buckets only once during training period\n self.reducer._rebuild_buckets()\n # Schedule a corresponding broadcast if we are syncing module\n # buffers in the forward pass.\n self._check_and_sync_module_buffers()\n\n (\n work,\n should_sync_backwards_tensor,\n ) = self._check_global_requires_backward_grad_sync(\n is_joined_rank=True\n )\n work.wait()\n # If nonzero, then we should sync in the bwd pass.\n should_sync_backwards = should_sync_backwards_tensor.item() != 0\n # Forward param sync is disabled in the next iteration\n # if we are skipping grad sync this iteration. Hence, we\n # set require_forward_param_sync appropriately here.\n self.require_forward_param_sync = should_sync_backwards\n if not should_sync_backwards:\n continue\n # Schedules one allreduce per gradient bucket to match\n # the backwards pass allreduce.\n self._match_all_reduce_for_bwd_pass()\n # Check if we need to allreduce locally unused params.\n if self.find_unused_parameters:\n self._match_unused_params_allreduce()\n # It will push rebuilt params only once during training period\n self.reducer._push_all_rebuilt_params()\n i += 1\n\n # All procs joined. Agree on authoritative rank and broadcast the model.\n self._sync_final_model(is_last_joiner)\n\n def register_comm_hook(self, state: object, hook: callable):\n r\"\"\"\n Registers a communication hook which is an enhancement that provides a\n flexible hook to users where they can specify how DDP aggregates gradients\n across multiple workers.\n\n This hook would be very useful for researchers to try out new ideas. For\n example, this hook can be used to implement several algorithms like GossipGrad\n and gradient compression which involve different communication strategies for\n parameter syncs while running Distributed DataParallel training.\n\n Args:\n state (object): Passed to the hook to maintain any state information during the training process.\n Examples include error feedback in gradient compression,\n peers to communicate with next in GossipGrad, etc.\n\n It is locally stored by each worker\n and shared by all the gradient tensors on the worker.\n hook (callable): Averages gradient tensors across workers and defined as:\n ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future``:\n\n This function is called once the bucket is ready. The\n hook can perform whatever processing is needed and return\n a Future indicating completion of any async work (ex: allreduce).\n If the hook doesn't perform any communication, it can also\n just return a completed Future. The Future should hold the\n new value of grad bucket's tensors. Once a bucket is ready,\n c10d reducer would call this hook and use the tensors returned\n by the Future and copy grads to individual parameters.\n\n We also provide an API called ``get_future`` to retrieve a\n Future associated with the completion of ``c10d.ProcessGroup.work``.\n\n .. warning ::\n Grad bucket's tensors will not be predivided by world_size. User is responsible\n to divide by the world_size in case of operations like allreduce.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n The Future object that hook returns should contain a result that has the same\n shape with the tensors inside grad bucket.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n ``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``\n which is an internal type and should be used with caution. It can still be used by\n ``register_comm_hook`` API, but it is subject to some subtle differences compared\n to ``torch.futures.Future``.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a noop hook that returns the same tensors.\n\n >>> def noop(state: object, bucket: dist.GradBucket): -> torch.futures.Future\n >>> fut = torch.futures.Future()\n >>> fut.set_result(bucket.get_tensors())\n >>> return fut\n\n >>> ddp.register_comm_hook(state = None, hook = noop)\n\n Example::\n Below is an example of a Parallel SGD algorithm where gradients are encoded before\n allreduce, and then decoded after allreduce.\n\n >>> def encode_and_decode(state: object, bucket: dist.GradBucket): -> torch.futures.Future\n >>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]\n >>> encoded_tensors = encode(tensors) # encode gradients\n >>> fut = process_group.allreduce(encoded_tensors).get_future()\n >>> # Define the then callback to decode.\n >>> def decode(fut):\n >>> decoded_tensors = decode(fut.value()) # decode gradients\n >>> return decoded_tensors\n >>> return fut.then(decode)\n\n >>> ddp.register_comm_hook(state = None, hook = encode_and_decode)\n \"\"\"\n self._check_comm_hook(hook)\n self.logger._set_comm_hook_name(hook.__qualname__)\n dist._register_comm_hook(self.reducer, state, hook)\n\n def _register_builtin_comm_hook(self, comm_hook_type):\n r\"\"\"\n Registers a built-in communication hook that specifies how DDP\n aggregates gradients across multiple workers.\n The built-in hooks aim to provide efficient C++ implementations for certain hooks,\n which might not be as efficient if implemented in Python using a Python communication hook.\n\n Args:\n comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as\n ALLREDUCE, FP16_COMPRESS, etc.\n\n .. warning ::\n DDP communication hook can only be registered once and should be registered\n before calling backward.\n\n .. warning ::\n DDP communication hook does not support single-process multiple-device mode.\n Gradbucket tensors should consist of only a single tensor.\n\n .. warning ::\n DDP communication hook is experimental and subject to change.\n\n Example::\n Below is an example of a FP16 compression where gradients are\n compressed into 16-bit floating-point numbers before allreduce, and\n then decompressed after allreduce.\n\n >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)\n\n \"\"\"\n self.logger._set_comm_hook_name(str(comm_hook_type))\n dist._register_builtin_comm_hook(self.reducer, comm_hook_type)\n\n def _distributed_broadcast_coalesced(\n self, tensors, buffer_size, authoritative_rank=0\n ):\n dist._broadcast_coalesced(\n self.process_group, tensors, buffer_size, authoritative_rank\n )\n\n def will_sync_module_buffers(self):\n return (\n self.require_forward_param_sync\n and self.broadcast_buffers\n and len(self.modules_buffers[0]) > 0\n )\n\n def _find_common_rank(self, input_rank, rank_cond):\n # -1 indicates that this rank is not under consideration to be the\n # common_rank\n rank_to_use = torch.tensor(\n [input_rank if rank_cond else -1],\n device=self.device,\n )\n dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)\n if rank_to_use.item() == -1:\n raise ValueError(\n \"BUG! Expected rank_cond to be true for at least one process.\"\n )\n return rank_to_use.item()\n\n def _sync_params(self):\n with torch.no_grad():\n # module buffer sync\n if self.will_sync_module_buffers():\n # Synchronize buffers across processes.\n # If we are running DDP with the join manager, we have to agree\n # upon a rank to sync module buffers from, since rank 0 may\n # already have been joined and have stale module buffers.\n if self.ddp_uneven_inputs_config.ddp_join_enabled:\n authoritative_rank = self._find_common_rank(\n self._distributed_rank, True\n )\n else:\n # The process with rank 0 is considered the authoritative copy.\n authoritative_rank = 0\n self._distributed_broadcast_coalesced(\n self.modules_buffers[0],\n self.broadcast_bucket_size,\n authoritative_rank,\n )\n\n def _passing_sync_batchnorm_handle(self, module_copies):\n for dev_idx, module in enumerate(module_copies):\n for layer in module.modules():\n if isinstance(layer, torch.nn.modules.SyncBatchNorm):\n assert (\n self.device_type != \"cpu\"\n ), \"SyncBatchNorm layers only work with GPU modules\"\n layer._specify_ddp_gpu_num(1)\n\n def _check_comm_hook(self, hook):\n if not callable(hook):\n raise TypeError(\"Communication hook must be callable.\")\n\n sig = inspect.signature(hook)\n if (\n sig.parameters[\"bucket\"].annotation != inspect._empty\n and sig.parameters[\"bucket\"].annotation != dist.GradBucket\n ):\n raise ValueError(\n \"Communication hook: bucket annotation should be dist.GradBucket.\"\n )\n\n if sig.return_annotation != inspect._empty and (\n sig.return_annotation != torch.futures.Future\n and sig.return_annotation != torch._C.Future\n ):\n raise ValueError(\n \"Communication hook: return annotation should be torch.futures.Future or torch._C.Future.\"\n )\n\n @property\n def _distributed_rank(self):\n return dist.get_rank(self.process_group)\n\n @staticmethod\n def _set_params_and_buffers_to_ignore_for_model(\n module, params_and_buffers_to_ignore\n ):\n # This is a workaround to set parameters and buffers DDP should ignore\n # during synchronization. It will be removed when the API is finalized\n # as part of addressing https://github.com/pytorch/pytorch/issues/43690.\n module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore\n\n def get_ddp_logging_data(self):\n r\"\"\"\n This interface can be called after DistributedDataParallel() is\n constructed. It returns DDPLoggingData for debugging and analysis.\n More detailed explanation of the fields in DDPLoggingData are in\n ``torch/c10/util/Logging.h``.\n \"\"\"\n return self.logger._get_ddp_logging_data()\n\n def set_ddp_runtime_logging_sample_rate(self, sample_rate):\n r\"\"\"\n This interface allows users to set sample_rate of collecting\n runtime stats. The runtime stats will be recorded for the\n first 10 iterations, after 10 iteratons runtime stats will be\n recorded once every \"sample_rate\" training iterations. In\n default, runtime stats are recorded for the first 10 iterations,\n after 10 iterations runtime stats are recorded once every\n \"kDDPRuntimeLoggingSampleRate=100\" training iterations.\n \"\"\"\n if sample_rate < 1:\n raise ValueError(\n \"DDP runtime logging sample rate should be equal or greater than 1\"\n )\n self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)\n"
] | [
[
"torch.distributed.distributed_c10d._get_default_group",
"torch.distributed._register_comm_hook",
"torch.no_grad",
"torch._utils._get_device_index",
"torch.is_grad_enabled",
"torch.distributed.Logger",
"torch.distributed._register_builtin_comm_hook",
"torch.distributed.is_available",
"torch.distributed._broadcast_coalesced",
"torch.cuda.current_stream",
"torch.distributed._verify_model_across_ranks",
"torch.ones",
"torch.distributed.get_rank",
"torch.tensor",
"torch.distributed._compute_bucket_assignment_by_size",
"torch.distributed.all_reduce",
"torch.cuda.device",
"torch.distributed.rpc.is_available",
"torch.zeros_like",
"torch.zeros",
"torch.cuda.stream"
]
] |
fahmirevo/sign-language-recognition | [
"ff5e3f4ffb7ecba15667be8870db62717f1fab66"
] | [
"test.py"
] | [
"from keras.models import load_model\nimport numpy as np\n\nX = np.load(\"dataset/X_test.npy\")\nY = np.load(\"dataset/Y_test.npy\")\n\nmodel = load_model(\"model\")\n\nscore = model.evaluate(X, Y)\n\nprint(score[0], score[1])\n\n# print(np.argmax(model.predict(X[:200]), axis=1))\n# print(np.argmax(model.predict(X), axis=1) == np.argmax(Y, axis=1))\n# print(model.predict(X[:50]))\n"
] | [
[
"numpy.load"
]
] |
deloragaskins/deepchem | [
"234ab699cdb997e5963966a8b6926cb2cda7c064"
] | [
"deepchem/molnet/load_function/factors_datasets.py"
] | [
"\"\"\"\nFACTOR dataset loader\n\"\"\"\nimport os\nimport logging\nimport time\n\nimport numpy as np\nimport deepchem\nfrom deepchem.molnet.load_function.kaggle_features import merck_descriptors\n\nlogger = logging.getLogger(__name__)\n\nTRAIN_URL = \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz\"\nVALID_URL = \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz\"\nTEST_URL = \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz\"\n\nTRAIN_FILENAME = \"FACTORS_training_disguised_combined_full.csv.gz\"\nVALID_FILENAME = \"FACTORS_test1_disguised_combined_full.csv.gz\"\nTEST_FILENAME = \"FACTORS_test2_disguised_combined_full.csv.gz\"\n\n\ndef remove_missing_entries(dataset):\n \"\"\"Remove missing entries.\n\n Some of the datasets have missing entries that sneak in as zero'd out\n feature vectors. Get rid of them.\n \"\"\"\n for i, (X, y, w, ids) in enumerate(dataset.itershards()):\n available_rows = X.any(axis=1)\n logger.info(\"Shard %d has %d missing entries.\" %\n (i, np.count_nonzero(~available_rows)))\n X = X[available_rows]\n y = y[available_rows]\n w = w[available_rows]\n ids = ids[available_rows]\n dataset.set_shard(i, X, y, w, ids)\n\n\ndef get_transformers(train_dataset):\n \"\"\"Gets transformers applied to the dataset\"\"\"\n\n transformers = list()\n # TODO: Check if anything needs to be added\n\n return transformers\n\n\ndef gen_factors(FACTORS_tasks,\n data_dir,\n train_dir,\n valid_dir,\n test_dir,\n shard_size=2000):\n \"\"\"Loads the FACTORS dataset; does not do train/test split\"\"\"\n\n time1 = time.time()\n\n train_files = os.path.join(data_dir, TRAIN_FILENAME)\n valid_files = os.path.join(data_dir, VALID_FILENAME)\n test_files = os.path.join(data_dir, TEST_FILENAME)\n\n if not os.path.exists(train_files):\n logger.info(\"Downloading train file...\")\n deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)\n logger.info(\"Training file download complete.\")\n\n logger.info(\"Downloading validation file...\")\n deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)\n logger.info(\"Validation file download complete.\")\n\n logger.info(\"Downloading test file...\")\n deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)\n logger.info(\"Test file download complete\")\n\n # Featurize the FACTORS dataset\n logger.info(\"About to featurize the FACTORS dataset\")\n featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)\n loader = deepchem.data.UserCSVLoader(\n tasks=FACTORS_tasks, id_field=\"Molecule\", featurizer=featurizer)\n\n logger.info(\"Featurizing the train dataset...\")\n train_dataset = loader.featurize(train_files, shard_size=shard_size)\n\n logger.info(\"Featurizing the validation dataset...\")\n valid_dataset = loader.featurize(valid_files, shard_size=shard_size)\n\n logger.info(\"Featurizing the test dataset...\")\n test_dataset = loader.featurize(test_files, shard_size=shard_size)\n\n logger.info(\"Remove missing entries from dataset\")\n remove_missing_entries(train_dataset)\n remove_missing_entries(valid_dataset)\n remove_missing_entries(test_dataset)\n\n # Shuffle the training data\n logger.info(\"Shuffling the training dataset\")\n train_dataset.sparse_shuffle()\n\n # Apply transformations\n logger.info(\"Transforming datasets with transformers\")\n transformers = get_transformers(train_dataset)\n\n for transformer in transformers:\n logger.info(\"Performing transformations with {}\".format(\n transformer.__class__.__name__))\n\n logger.info(\"Transforming the training dataset...\")\n train_dataset = transformer.transform(train_dataset)\n\n logger.info(\"Transforming the validation dataset...\")\n valid_dataset = transformer.transform(valid_dataset)\n\n logger.info(\"Transforming the test dataset...\")\n test_dataset = transformer.transform(test_dataset)\n\n logger.info(\"Transformations complete.\")\n logger.info(\"Moving datasets to corresponding directories\")\n\n train_dataset.move(train_dir)\n logger.info(\"Train dataset moved.\")\n\n valid_dataset.move(valid_dir)\n logger.info(\"Validation dataset moved.\")\n\n test_dataset.move(test_dir)\n logger.info(\"Test dataset moved.\")\n\n time2 = time.time()\n\n # TIMING\n logger.info(\"TIMING: FACTORS fitting took %0.3f s\" % (time2 - time1))\n\n return train_dataset, valid_dataset, test_dataset\n\n\ndef load_factors(shard_size=2000, featurizer=None, split=None, reload=True):\n \"\"\"Loads FACTOR dataset; does not do train/test split\n\n The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper:\n Ramsundar, Bharath, et al. \"Is multitask deep learning practical for pharma?.\" Journal of chemical information and modeling 57.8 (2017): 2068-2076.\n\n It contains 1500 Merck in-house compounds that were measured\n for IC50 of inhibition on 12 serine proteases. Unlike most of\n the other datasets featured in MoleculeNet, the Factors\n collection does not have structures for the compounds tested\n since they were proprietary Merck compounds. However, the\n collection does feature pre-computed descriptors for these\n compounds.\n\n Note that the original train/valid/test split from the source\n data was preserved here, so this function doesn't allow for\n alternate modes of splitting. Similarly, since the source data\n came pre-featurized, it is not possible to apply alternative\n featurizations.\n\n Parameters\n ----------\n shard_size: int, optional\n Size of the DiskDataset shards to write on disk\n featurizer: optional\n Ignored since featurization pre-computed\n split: optional\n Ignored since split pre-computed\n reload: bool, optional\n Whether to automatically re-load from disk\n\n \"\"\"\n\n FACTORS_tasks = [\n 'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',\n 'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'\n ]\n\n data_dir = deepchem.utils.data_utils.get_data_dir()\n data_dir = os.path.join(data_dir, \"factors\")\n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n train_dir = os.path.join(data_dir, \"train_dir\")\n valid_dir = os.path.join(data_dir, \"valid_dir\")\n test_dir = os.path.join(data_dir, \"test_dir\")\n\n if (os.path.exists(train_dir) and os.path.exists(valid_dir) and\n os.path.exists(test_dir)):\n\n logger.info(\"Reloading existing datasets\")\n train_dataset = deepchem.data.DiskDataset(train_dir)\n valid_dataset = deepchem.data.DiskDataset(valid_dir)\n test_dataset = deepchem.data.DiskDataset(test_dir)\n\n else:\n logger.info(\"Featurizing datasets\")\n train_dataset, valid_dataset, test_dataset = gen_factors(\n FACTORS_tasks=FACTORS_tasks,\n data_dir=data_dir,\n train_dir=train_dir,\n valid_dir=valid_dir,\n test_dir=test_dir,\n shard_size=shard_size)\n\n transformers = get_transformers(train_dataset)\n\n return FACTORS_tasks, (train_dataset, valid_dataset,\n test_dataset), transformers\n"
] | [
[
"numpy.count_nonzero"
]
] |
msc5/junior-iw | [
"d356e015fcd3a3be638097a1acc02d5dea4751aa"
] | [
"src/data/datasets/BAIR/BAIR.py"
] | [
"import os\nimport io\nimport numpy as np\nfrom PIL import Image\nimport torch\n\nfrom torchvision.transforms import ToTensor\n\n\nclass BAIR (object):\n\n \"\"\"Data Handler that loads robot pushing data.\"\"\"\n\n def __init__(self, data_root, train=True, seq_len=20, image_size=64):\n self.root_dir = data_root\n if train:\n self.data_dir = '%s/processed_data/train' % self.root_dir\n self.ordered = False\n else:\n self.data_dir = '%s/processed_data/test' % self.root_dir\n self.ordered = True\n self.dirs = []\n for d1 in os.listdir(self.data_dir):\n for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):\n self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))\n self.seq_len = seq_len\n self.image_size = image_size\n self.seed_is_set = False # multi threaded loading\n self.d = 0\n self.totensor = ToTensor()\n\n def set_seed(self, seed):\n if not self.seed_is_set:\n self.seed_is_set = True\n np.random.seed(seed)\n\n def __len__(self):\n return len(self.dirs)\n\n def get_seq(self):\n if self.ordered:\n d = self.dirs[self.d]\n if self.d == len(self.dirs) - 1:\n self.d = 0\n else:\n self.d += 1\n else:\n d = self.dirs[np.random.randint(len(self.dirs))]\n image_seq = []\n for i in range(self.seq_len):\n fname = '%s/%d.png' % (d, i)\n # im = imread(fname).reshape(1, 64, 64, 3)\n # im = np.array(Image.open(fname)).reshape((1, 3, 64, 64))\n im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)\n image_seq.append(im)\n image_seq = torch.cat(image_seq, axis=0)\n return image_seq\n\n def __getitem__(self, index):\n self.set_seed(index)\n return self.get_seq()\n\n\nif __name__ == \"__main__\":\n from torch.utils.data import DataLoader\n train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)\n train_dataloader = DataLoader(train_dataloader, batch_size=4)\n print(len(train_dataset, train_dataloader))\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.seed",
"torch.cat"
]
] |
glaswasser/python-vision | [
"706c314a86b8f35c313bb3e907ae84317dca1a0b"
] | [
"samples/snippets/detect/label-products.py"
] | [
"\nfrom detect import (detect_logos, detect_text)\nimport pandas as pd\nimport re\nimport os\n#from __future__ import print_function\nfrom google.cloud import vision\n\n\nimages_path = \"C:\\\\Users\\\\heinz\\\\Yagora GmbH\\\\Ievgen Kyrda - Crawler\\\\images\\\\foodnewsgermany_images/\"\nfile_names = os.listdir(os.path.dirname(images_path))\n\nfile_paths = [images_path + f for f in file_names]\n\nlogos = [detect_logos(f) for f in file_paths]\n\ntexts = [detect_text(f)[0].description for f in file_paths]\n# remove line break symbols\ntexts = [x.replace(\"\\n\", \", \") for x in texts]\n\ncontained = []\n#contained[1] = \"test\"\nfor i in range(len(logos)): # loop over future rows of df\n tmp = []\n for j in logos[i]: # for every logo-row, check if in text\n if j.lower() in texts[i].lower():\n tmp.append(logos[i])\n else:\n tmp.append(None)\n contained.append(tmp)\n\ndetect_df = pd.DataFrame(\n list(zip(file_names, texts, logos, contained, file_paths)),\n columns = [\"files\", \"texts\", \"logos\", \"probable_brand\", \"file_path\"]\n)\ndetect_df\n\n# other ideas:\n# if logo in existing logos, add logo\n\n\n\nfrom PIL import Image\nfrom io import BytesIO\nfrom IPython.display import HTML\nimport base64\n\n\npd.set_option('display.max_colwidth', -1)\n\ndef get_thumbnail(path):\n i = Image.open(path)\n i.thumbnail((150, 150), Image.LANCZOS)\n return i\n\n\ndef image_base64(im):\n if isinstance(im, str):\n im = get_thumbnail(im)\n with BytesIO() as buffer:\n im.save(buffer, 'jpeg')\n return base64.b64encode(buffer.getvalue()).decode()\n\ndef image_formatter(im):\n return f'<img src=\"data:image/jpeg;base64,{image_base64(im)}\">'\n\n#dogs['file'] = dogs.id.map(lambda id: f'../input/train/{id}.jpg')\n\ndetect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))\n\nHTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False))"
] | [
[
"pandas.set_option"
]
] |
mustafamerttunali/Tensorflow-Training-GUI | [
"1992185fd18e768f30c5bb5edd08ea709be97b09"
] | [
"tests/test_basic.py"
] | [
"import os\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom multiprocessing import Process\n\n\ndef startTensorboard(logdir):\n # Start tensorboard with system call\n os.system(\"tensorboard --logdir {}\".format(logdir))\n\n\ndef fitModel():\n # Create your model\n model = Sequential()\n model.add(Dense(32, activation='relu', input_dim=100))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n # Some mock training data\n data = np.random.random((1000, 100))\n labels = np.random.randint(2, size=(1000, 1))\n\n # Run the fit function\n model.fit(data, labels, epochs=100, batch_size=32)\n\n\nif __name__ == '__main__':\n # Run both processes simultaneously\n Process(target=startTensorboard, args=(\"logs\",)).start()\n Process(target=fitModel).start()"
] | [
[
"numpy.random.random",
"tensorflow.keras.models.Sequential",
"numpy.random.randint",
"tensorflow.keras.layers.Dense"
]
] |
YonginKwon/glow | [
"7d316d028e1792534416755bf80af422adccdaa9"
] | [
"torch_glow/tests/nodes/adaptive_avg_pool2d_test.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nimport torch.nn.functional as F\n\nfrom tests.utils import jitVsGlow\nimport unittest\n\n\nclass TestAdaptiveAvgPool2d(unittest.TestCase):\n def test_adaptive_avg_pool2d_basic(self):\n \"\"\"Basic test of PyTorch adaptive_avg_pool2d Node.\"\"\"\n\n def test_f(inputs):\n return F.adaptive_avg_pool2d(inputs, (5, 5))\n\n inputs = torch.randn(3, 6, 14, 14)\n\n jitVsGlow(test_f, inputs, expected_fused_ops={\n \"aten::adaptive_avg_pool2d\"})\n\n def test_adaptive_avg_pool2d_nonsquare_inputs(self):\n \"\"\"Test of PyTorch adaptive_avg_pool2d Node with non-square inputs.\"\"\"\n\n def test_f(inputs):\n return F.adaptive_avg_pool2d(inputs, (3, 3))\n\n inputs = torch.randn(3, 6, 13, 14)\n\n jitVsGlow(test_f, inputs, expected_fused_ops={\n \"aten::adaptive_avg_pool2d\"})\n\n def test_adaptive_avg_pool2d_nonsquare_outputs(self):\n \"\"\"Test of PyTorch adaptive_avg_pool2d Node with non-square outputs.\"\"\"\n\n def test_f(inputs):\n return F.adaptive_avg_pool2d(inputs, (5, 3))\n\n inputs = torch.randn(3, 6, 14, 14)\n\n jitVsGlow(test_f, inputs, expected_fused_ops={\n \"aten::adaptive_avg_pool2d\"})\n"
] | [
[
"torch.randn",
"torch.nn.functional.adaptive_avg_pool2d"
]
] |
sdat2/seager19 | [
"9c3acbc5332da787de1eda2600a82490ff20fa11"
] | [
"src/visualisation/arrow.py"
] | [
"\"\"\"Arrow plots for mechanism.\"\"\"\nimport os\nfrom src.plot_utils import ps_defaults\nfrom src.constants import FIGURE_PATH\nfrom typing import Optional\nimport matplotlib.pyplot as plt\n\n\ndef plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:\n \"\"\"\n Plot the arrow plot to show that I have reproduced the paper.\n\n Args:\n save_path (Optional[str], optional): Where to save the plot to.\n Defaults to None. If None will not save.\n show_plots (bool, optional): Whether to show plots. Defaults to False.\n \"\"\"\n ps_defaults(use_tex=False)\n\n color_d = {\n \"EEEE\": \"blue\",\n \"EECE\": \"green\",\n \"EEEC\": \"orange\",\n \"EECC\": \"red\",\n }\n\n def plot_error(x: float, y: float, yerr: float, mem: str) -> None:\n plt.fill_between(\n [x - 0.2, x + 0.2],\n [y + yerr, y + yerr],\n [y - yerr, y - yerr],\n color=color_d[mem],\n alpha=0.5,\n )\n plt.plot([x - 0.2, x + 0.2], [y, y], \"black\", linewidth=1)\n\n xlim = [0.5, 3.5]\n head_length = 0.02\n decrease_arrow = 0.01\n ax = plt.axes()\n ecmwf = 0.411\n # ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')\n ax.arrow(\n 1,\n ecmwf,\n 0,\n 0.054 - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(1, ecmwf + 0.054, 0.005, \"EECE\")\n ax.arrow(\n 2,\n ecmwf,\n 0,\n 0.31 - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(2, ecmwf + 0.31, 0.03, \"EEEC\")\n ax.arrow(\n 3,\n ecmwf,\n 0,\n 0.47 - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(3, ecmwf + 0.47, 0.04, \"EECC\")\n plt.plot(xlim, [ecmwf, ecmwf], color=\"blue\", label=\"ECMWF/ORAS4 $= 0.411$ K \")\n plt.plot(\n xlim, [ecmwf + 0.478, ecmwf + 0.478], color=\"red\", label=\"CMIP5 MMM $= 0.889$ K\"\n )\n\n # plt.xticks([0, 1, 2, 3], [\"ECMWF\", \"W\", \"RH\", \"RH+W\"])\n plt.xticks(\n [1, 2, 3],\n [\n \"W\\n\" + r\"$+ 0.054 \\pm 0.005$ K \",\n \"RH\\n \" + r\"$+ 0.31 \\pm 0.03$ K\",\n \"RH+W\\n \" + r\"$+ 0.47 \\pm 0.04$ K\",\n ],\n )\n\n plt.xlim(xlim)\n plt.ylabel(\"1958-2017, Trend in nino3.4 [K]\")\n\n plt.legend(\n bbox_to_anchor=(0.0, 1.02, 1, 0.102),\n loc=\"lower left\",\n mode=\"expand\",\n ncol=2,\n )\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path)\n if show_plots:\n plt.show()\n else:\n plt.clf()\n\n\ndef plot_arrow_plot_6(\n save_path: Optional[str] = None, show_plots: bool = False\n) -> None:\n \"\"\"\n Plot the arrow plot to show how it performs in cmip6.\n\n Args:\n save_path (Optional[str], optional): Where to save the plot to.\n Defaults to None. If None will not save.\n show_plots (bool, optional): Whether to show plots. Defaults to False.\n \"\"\"\n ps_defaults(use_tex=False)\n\n color_d = {\n \"EEEE\": \"blue\",\n \"EECE\": \"green\",\n \"EEEC\": \"orange\",\n \"EECC\": \"red\",\n }\n\n def plot_error(x: float, y: float, yerr: float, mem: str) -> None:\n plt.fill_between(\n [x - 0.2, x + 0.2],\n [y + yerr, y + yerr],\n [y - yerr, y - yerr],\n color=color_d[mem],\n alpha=0.5,\n )\n plt.plot([x - 0.2, x + 0.2], [y, y], \"black\", linewidth=1)\n\n xlim = [0.5, 3.5]\n head_length = 0.02\n decrease_arrow = 0.01\n ax = plt.axes()\n ecmwf = 0.411\n # ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')\n wind = 0.07\n wind_error = 0.01\n rh = 0.15\n rh_error = 0.02\n cmip6 = 0.772\n rh_and_wind = 0.29\n rh_and_wind_error = 0.04\n\n ax.arrow(\n 1,\n ecmwf,\n 0,\n wind - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(1, ecmwf + wind, wind_error, \"EECE\")\n ax.arrow(\n 2,\n ecmwf,\n 0,\n rh - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(2, ecmwf + rh, rh_error, \"EEEC\")\n ax.arrow(\n 3,\n ecmwf,\n 0,\n rh_and_wind - head_length - decrease_arrow,\n head_width=0.02,\n head_length=head_length,\n fc=\"k\",\n ec=\"k\",\n )\n plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, \"EECC\")\n plt.plot(xlim, [ecmwf, ecmwf], color=\"blue\", label=\"ECMWF/ORAS4 $= 0.411$ K \")\n plt.plot(\n xlim,\n [cmip6, cmip6],\n color=\"red\",\n label=\"CMIP6 MMM $= 0.772$ K\",\n )\n\n # plt.xticks([0, 1, 2, 3], [\"ECMWF\", \"W\", \"RH\", \"RH+W\"])\n plt.xticks(\n [1, 2, 3],\n [\n \"W\\n\"\n + r\"$+ $\"\n + str(wind)\n + r\" $\\pm$ \"\n + r\"$\"\n + str(wind_error)\n + r\"$\"\n + \" K \",\n \"RH\\n \" + r\"$+ $ $0.15$ $\\pm$ $0.02$ K\",\n \"RH+W\\n \" + r\"$+ $ $0.29$ $\\pm$ $0.04$ K\",\n ],\n )\n\n plt.xlim(xlim)\n plt.ylabel(\"1958-2017, Trend in nino3.4 [K]\")\n\n plt.legend(\n bbox_to_anchor=(0.0, 1.02, 1, 0.102),\n loc=\"lower left\",\n mode=\"expand\",\n ncol=2,\n )\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path)\n if show_plots:\n plt.show()\n else:\n plt.clf()\n\n\nif __name__ == \"__main__\":\n # python src/visualisation.arrow()\n plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, \"mech_arrow_cmip6.pdf\"))\n plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, \"mech_arrow_cmip6.png\"))\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between"
]
] |
vespos/pcdsdevices | [
"7c4728df62ea58b6491d1cb36bb39d27d6dd9fca"
] | [
"pcdsdevices/tests/test_ccm.py"
] | [
"import logging\nimport time\n\nimport numpy as np\nimport pytest\nfrom ophyd.sim import fake_device_cache, make_fake_device\n\nfrom .. import ccm\nfrom ..sim import FastMotor\n\nlogger = logging.getLogger(__name__)\n\n\nSAMPLE_ALIO = 4.575 # Current value as of writing this file\nSAMPLE_THETA = 1.2 # Modest angle\nSAMPLE_WAVELENGTH = 1.5 # hard xray\n\n\n# Make sure the calcs are properly inverted\ndef test_theta_alio_inversion():\n logger.debug('test_theta_alio_inversion')\n theta = ccm.alio_to_theta(SAMPLE_ALIO, ccm.default_theta0, ccm.default_gr,\n ccm.default_gd)\n alio_calc = ccm.theta_to_alio(theta, ccm.default_theta0, ccm.default_gr,\n ccm.default_gd)\n # Unlike the other inversions, this is just an approximation\n assert np.isclose(alio_calc, SAMPLE_ALIO)\n\n\ndef test_wavelength_theta_inversion():\n logger.debug('test_wavelength_theta_inversion')\n wavelength = ccm.theta_to_wavelength(SAMPLE_THETA, ccm.default_dspacing)\n theta = ccm.wavelength_to_theta(wavelength, ccm.default_dspacing)\n logger.debug('%s, %s', wavelength, theta)\n assert np.isclose(theta, SAMPLE_THETA)\n theta = ccm.wavelength_to_theta(SAMPLE_WAVELENGTH, ccm.default_dspacing)\n wavelength = ccm.theta_to_wavelength(theta, ccm.default_dspacing)\n logger.debug('%s, %s', wavelength, theta)\n assert np.isclose(wavelength, SAMPLE_WAVELENGTH)\n\n\ndef test_energy_wavelength_inversion():\n logger.debug('test_energy_wavelength_inversion')\n energy = ccm.wavelength_to_energy(SAMPLE_WAVELENGTH)\n wavelength_calc = ccm.energy_to_wavelength(energy)\n assert wavelength_calc == SAMPLE_WAVELENGTH\n\n\[email protected](scope='function')\ndef fake_ccm():\n return make_fake_ccm()\n\n\nclass FakeAlio(FastMotor):\n kill = None\n home = None\n\n\ndef make_fake_ccm():\n fake_device_cache[ccm.CCMMotor] = FastMotor\n fake_device_cache[ccm.CCMAlio] = FakeAlio\n FakeCCM = make_fake_device(ccm.CCM)\n fake_ccm = FakeCCM(alio_prefix='ALIO', theta2fine_prefix='THETA',\n theta2coarse_prefix='THTA', chi2_prefix='CHI',\n x_down_prefix='X:DOWN', x_up_prefix='X:UP',\n y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',\n y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,\n name='fake_ccm')\n\n def init_pos(mot, pos=0):\n mot.user_readback.sim_put(0)\n mot.user_setpoint.sim_put(0)\n mot.user_setpoint.sim_set_limits((0, 0))\n mot.motor_spg.sim_put(2)\n mot.part_number.sim_put('tasdf')\n\n init_pos(fake_ccm.x.down)\n init_pos(fake_ccm.x.up)\n init_pos(fake_ccm.y.down)\n init_pos(fake_ccm.y.up_north)\n init_pos(fake_ccm.y.up_south)\n\n fake_ccm.alio.set(SAMPLE_ALIO)\n fake_ccm.energy.alio.set(SAMPLE_ALIO)\n fake_ccm.energy_with_vernier.alio.set(SAMPLE_ALIO)\n fake_ccm.energy_with_vernier.vernier.setpoint.sim_put(0)\n\n return fake_ccm\n\n\ndef test_fake_ccm(fake_ccm):\n logger.debug('test_fake_ccm')\n fake_ccm.get()\n\n\n# Make sure we set up the forward/inverse to use the right methods\ndef test_ccm_calc(fake_ccm):\n logger.debug('test_ccm_calc')\n calc = fake_ccm.energy\n\n logger.debug('physics pos is %s', calc.position)\n logger.debug('real pos is %s', calc.real_position)\n logger.debug('sample alio is %s', SAMPLE_ALIO)\n\n theta_func = ccm.alio_to_theta(\n SAMPLE_ALIO,\n calc.theta0_rad_val,\n calc.gr_val,\n calc.gd_val,\n )\n wavelength_func = ccm.theta_to_wavelength(theta_func, calc.dspacing_val)\n energy_func = ccm.wavelength_to_energy(wavelength_func)\n energy = calc.energy.position\n assert energy == energy_func\n\n calc.alio.move(0)\n calc.move(energy, wait=False)\n assert np.isclose(calc.alio.position, SAMPLE_ALIO)\n\n calc.alio.move(calc.alio.position)\n calc.move(energy=calc.energy.position, wait=False)\n assert np.isclose(calc.alio.position, SAMPLE_ALIO)\n\n\n# Make sure sync'd axes work and that unk/in/out states work\[email protected](5)\ndef test_ccm_main(fake_ccm):\n logger.debug('test_ccm_main')\n fake_ccm.y.move(5, wait=False)\n assert fake_ccm.y.down.user_setpoint.get() == 5\n assert fake_ccm.y.up_north.user_setpoint.get() == 5\n assert fake_ccm.y.up_south.user_setpoint.get() == 5\n\n assert fake_ccm.removed\n assert not fake_ccm.inserted\n\n fake_ccm.x.down.user_readback.sim_put(8)\n fake_ccm.x.up.user_readback.sim_put(8)\n assert not fake_ccm.removed\n assert fake_ccm.inserted\n\n fake_ccm.x.down.user_readback.sim_put(4)\n fake_ccm.x.up.user_readback.sim_put(4)\n assert not fake_ccm.removed\n assert not fake_ccm.inserted\n\n fake_ccm.insert(wait=False)\n assert fake_ccm.x.down.user_setpoint.get() == 8\n assert fake_ccm.x.up.user_setpoint.get() == 8\n\n fake_ccm.remove(wait=False)\n assert fake_ccm.x.down.user_setpoint.get() == 0\n assert fake_ccm.x.up.user_setpoint.get() == 0\n\n\[email protected](5)\ndef test_vernier(fake_ccm):\n logger.debug('test_vernier')\n\n pseudopos = fake_ccm.energy_with_vernier\n\n # Moving with vernier should move the energy request motor too\n pseudopos.move(7, wait=False)\n assert np.isclose(pseudopos.energy.position, 7)\n assert pseudopos.vernier.position == 7000\n\n pseudopos.move(8, wait=False)\n assert np.isclose(pseudopos.energy.position, 8)\n assert pseudopos.vernier.position == 8000\n\n pseudopos.move(9, wait=False)\n assert np.isclose(pseudopos.energy.position, 9)\n assert pseudopos.vernier.position == 9000\n\n # Small moves (less than 30eV) should be skipped on the energy request\n pseudopos.move(9.001, wait=False)\n assert np.isclose(pseudopos.energy.position, 9.001)\n assert pseudopos.vernier.position == 9000\n\n # Unless we set the option for not skipping them\n pseudopos.vernier.skip_small_moves = False\n pseudopos.move(9.002, wait=False)\n assert np.isclose(pseudopos.energy.position, 9.002)\n assert pseudopos.vernier.position == 9002\n\n\[email protected](5)\ndef test_set_current_position(fake_ccm):\n logger.debug('test_set_current_position')\n mot = fake_ccm.energy.energy\n for energy in range(6, 14):\n mot.set_current_position(energy)\n assert np.isclose(mot.position, energy)\n\n\[email protected](5)\ndef test_check_valid_constant(fake_ccm):\n logger.debug('test_check_valid_constant')\n\n # First call to make_valid sends the first monitor update\n def make_valid(sig, valid):\n if valid:\n sig.put(1)\n else:\n sig.put(0)\n\n def make_conn(sig, conn):\n sig._metadata['connected'] = conn\n\n def output(sig):\n return fake_ccm._check_valid_constant(sig, sig.get())\n\n test_sig = fake_ccm.dspacing\n\n # Can we get to all the enum values?\n make_conn(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.ALWAYS_DISCONNECT\n make_conn(test_sig, True)\n make_valid(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.INVALID_CONNECT\n make_conn(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.INVALID_DISCONNECT\n make_conn(test_sig, True)\n make_valid(test_sig, True)\n assert output(test_sig) == ccm.CCMConstantWarning.NO_WARNING\n make_conn(test_sig, False)\n assert output(test_sig) == ccm.CCMConstantWarning.VALID_DISCONNECT\n\n # theta0_deg is allowed to be zero, unlike the others\n test_sig2 = fake_ccm.theta0_deg\n make_conn(test_sig2, True)\n make_valid(test_sig2, False)\n assert output(test_sig2) == ccm.CCMConstantWarning.NO_WARNING\n\n\[email protected](5)\ndef test_show_constant_warning(fake_ccm, caplog):\n logger.debug('test_show_constant_warning')\n for warning in (\n ccm.CCMConstantWarning.NO_WARNING,\n ccm.CCMConstantWarning.ALWAYS_DISCONNECT,\n ccm.CCMConstantWarning.VALID_DISCONNECT,\n ccm.CCMConstantWarning.INVALID_DISCONNECT,\n ccm.CCMConstantWarning.INVALID_CONNECT,\n ):\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n fake_ccm._show_constant_warning(\n warning,\n fake_ccm.dspacing,\n 0.111111,\n 0.222222,\n )\n if warning == ccm.CCMConstantWarning.NO_WARNING:\n assert len(caplog.records) == 0\n else:\n assert len(caplog.records) == 1\n\n\[email protected](5)\ndef test_warn_invalid_constants(fake_ccm, caplog):\n logger.debug('test_warn_invalid_constants')\n # Trick the warning into thinking we've be initialized for a while\n fake_ccm._init_time = time.monotonic() - 1000\n fake_ccm.theta0_deg.put(0)\n fake_ccm.dspacing.put(0)\n fake_ccm.gr.put(0)\n fake_ccm.gd.put(0)\n # We expect three warnings from the fake PVs that start at 0\n caplog.clear()\n with caplog.at_level(logging.WARNING):\n fake_ccm.warn_invalid_constants(only_new=False)\n assert len(caplog.records) == 3\n # We expect the warnings to not repeat\n caplog.clear()\n fake_ccm.warn_invalid_constants(only_new=True)\n assert len(caplog.records) == 0\n # Unless we ask them to\n caplog.clear()\n fake_ccm.warn_invalid_constants(only_new=False)\n assert len(caplog.records) == 3\n # Let's fix the issue and make sure no warnings are shown\n fake_ccm.reset_calc_constant_defaults(confirm=False)\n caplog.clear()\n fake_ccm.warn_invalid_constants(only_new=False)\n assert len(caplog.records) == 0\n\n\[email protected](5)\ndef test_disconnected_ccm():\n ccm.CCM(alio_prefix='ALIO', theta2fine_prefix='THETA',\n theta2coarse_prefix='THTA', chi2_prefix='CHI',\n x_down_prefix='X:DOWN', x_up_prefix='X:UP',\n y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',\n y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,\n name='ccm')\n"
] | [
[
"numpy.isclose"
]
] |
ElliotCheung/simpeg | [
"ce5bde154179ca63798a62a12787a7ec3535472c"
] | [
"SimPEG/electromagnetics/analytics/FDEM.py"
] | [
"from __future__ import division\nimport numpy as np\nfrom scipy.constants import mu_0, pi, epsilon_0\nfrom scipy.special import erf\nfrom SimPEG import utils\nimport warnings\n\n\ndef hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):\n \"\"\"\n The analytical expression is given in Equation 4.56 in Ward and Hohmann,\n 1988, and the example reproduces their Figure 4.2.\n\n\n .. plot::\n\n import numpy as np\n import matplotlib.pyplot as plt\n from SimPEG import electromagnetics as EM\n freq = np.logspace(-1, 5, 301)\n test = EM.analytics.hzAnalyticDipoleF(\n 100, freq, 0.01, secondary=False)\n plt.loglog(freq, test.real, 'C0-', label='Real')\n plt.loglog(freq, -test.real, 'C0--')\n plt.loglog(freq, test.imag, 'C1-', label='Imaginary')\n plt.loglog(freq, -test.imag, 'C1--')\n plt.title('Response at $r=100$ m')\n plt.xlim([1e-1, 1e5])\n plt.ylim([1e-12, 1e-6])\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('$H_z$ (A/m)')\n plt.legend(loc=6)\n plt.show()\n\n\n **Reference**\n\n - Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for\n geophysical applications, Chapter 4 of Electromagnetic Methods in Applied\n Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:\n `10.1190/1.9781560802631.ch4\n <https://doi.org/10.1190/1.9781560802631.ch4>`_.\n\n \"\"\"\n r = np.abs(r)\n k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)\n\n m = 1\n front = m / (2.0 * np.pi * (k**2) * (r**5))\n back = 9 - (\n 9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)\n ) * np.exp(-1j * k * r)\n hz = front * back\n\n if secondary:\n hp = -1 / (4 * np.pi * r**3)\n hz = hz - hp\n\n if hz.ndim == 1:\n hz = utils.mkvc(hz, 2)\n\n return hz\n\n\ndef MagneticDipoleWholeSpace(\n XYZ, srcLoc, sig, f, moment, fieldType=\"b\", mu_r=1, eps_r=1, **kwargs\n):\n \"\"\"\n Analytical solution for a dipole in a whole-space.\n\n The analytical expression is given in Equation 2.57 in Ward and Hohmann,\n 1988, and the example reproduces their Figure 2.2.\n\n TODOs:\n - set it up to instead take a mesh & survey\n - add divide by zero safety\n\n\n .. plot::\n\n import numpy as np\n from SimPEG import electromagnetics as EM\n import matplotlib.pyplot as plt\n from scipy.constants import mu_0\n freqs = np.logspace(-2, 5, 301)\n Bx, By, Bz = EM.analytics.FDEM.MagneticDipoleWholeSpace(\n [0, 100, 0], [0, 0, 0], 1e-2, freqs, moment='Z')\n plt.figure()\n plt.loglog(freqs, Bz.real/mu_0, 'C0', label='Real')\n plt.loglog(freqs, -Bz.real/mu_0, 'C0--')\n plt.loglog(freqs, Bz.imag/mu_0, 'C1', label='Imaginary')\n plt.loglog(freqs, -Bz.imag/mu_0, 'C1--')\n plt.legend()\n plt.xlim([1e-2, 1e5])\n plt.ylim([1e-13, 1e-6])\n plt.show()\n\n **Reference**\n\n - Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for\n geophysical applications, Chapter 4 of Electromagnetic Methods in Applied\n Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:\n `10.1190/1.9781560802631.ch4\n <https://doi.org/10.1190/1.9781560802631.ch4>`_.\n\n \"\"\"\n\n orient = kwargs.pop(\"orientation\", None)\n if orient is not None:\n raise TypeError(\n \"orientation kwarg has been removed, please use the moment argument\",\n )\n magnitude = moment\n moment = orient\n else:\n magnitude = 1\n mu = kwargs.pop(\"mu\", None)\n if mu is not None:\n raise TypeError(\"mu kwarg has been removed, please use the mu_r argument.\")\n mu_r = mu / mu_0\n\n mu = mu_0 * mu_r\n eps = epsilon_0 * eps_r\n w = 2 * np.pi * f\n\n if isinstance(moment, str):\n if moment == \"X\":\n mx, my, mz = 1.0, 0.0, 0.0\n elif moment == \"Y\":\n mx, my, mz = 0.0, 1.0, 0.0\n elif moment == \"Z\":\n mx, my, mz = 0.0, 0.0, 1.0\n else:\n raise NotImplementedError(\"String type for moment not recognized\")\n mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude\n else:\n mx, my, mz = moment[0], moment[1], moment[2]\n\n XYZ = utils.asArray_N_x_Dim(XYZ, 3)\n\n dx = XYZ[:, 0] - srcLoc[0]\n dy = XYZ[:, 1] - srcLoc[1]\n dz = XYZ[:, 2] - srcLoc[2]\n\n r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)\n k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)\n kr = k * r\n\n if fieldType in [\"h\", \"b\"]:\n front = 1 / (4.0 * pi * r**3.0) * np.exp(-1j * kr)\n mid = -(kr**2.0) + 3.0 * 1j * kr + 3.0\n\n Fx = front * (\n mx * ((dx / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))\n + my * ((dy * dx / r**2.0) * mid)\n + mz * ((dx * dz / r**2.0) * mid)\n )\n\n Fy = front * (\n mx * ((dx * dy / r**2.0) * mid)\n + my * ((dy / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))\n + mz * ((dy * dz / r**2.0) * mid)\n )\n\n Fz = front * (\n mx * ((dx * dz / r**2.0) * mid)\n + my * ((dy * dz / r**2.0) * mid)\n + mz * ((dz / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))\n )\n\n if fieldType == \"b\":\n Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz\n\n elif fieldType == \"e\":\n\n front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r**3.0) * np.exp(-1j * kr)\n\n Fx = front * (my * (dz / r) + mz * (-dy / r))\n\n Fy = front * (mx * (-dz / r) + mz * (dx / r))\n\n Fz = front * (mx * (dy / r) + my * (-dx / r))\n\n return Fx, Fy, Fz\n\n\ndef ElectricDipoleWholeSpace(\n XYZ, srcLoc, sig, f, moment=\"X\", fieldType=\"e\", mu_r=1, eps_r=1, **kwargs\n):\n\n orient = kwargs.pop(\"orientation\", None)\n if orient is not None:\n raise TypeError(\n \"orientation kwarg has been removed, please use the moment argument.\"\n )\n mu = kwargs.pop(\"mu\", None)\n if mu is not None:\n raise TypeError(\"mu kwarg has been removed, please use the mu_r argument.\")\n cur = kwargs.pop(\"current\", None)\n if cur is not None:\n raise TypeError(\n \"current kwarg has been removed, please use the moment argument.\",\n )\n else:\n magnitude = 1\n length = kwargs.pop(\"length\", None)\n if length is not None:\n raise TypeError(\n \"length kwarg has been removed, please use the moment argument.\"\n )\n\n mu = mu_0 * mu_r\n eps = epsilon_0 * eps_r\n w = 2 * np.pi * f\n\n if isinstance(moment, str):\n if moment.upper() == \"X\":\n mx, my, mz = 1.0, 0.0, 0.0\n elif moment.upper() == \"Y\":\n mx, my, mz = 0.0, 1.0, 0.0\n elif moment.upper() == \"Z\":\n mx, my, mz = 0.0, 0.0, 1.0\n else:\n raise NotImplementedError(\"String type for moment not recognized\")\n mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude\n\n else:\n mx, my, mz = moment[0], moment[1], moment[2]\n\n XYZ = utils.asArray_N_x_Dim(XYZ, 3)\n\n dx = XYZ[:, 0] - srcLoc[0]\n dy = XYZ[:, 1] - srcLoc[1]\n dz = XYZ[:, 2] - srcLoc[2]\n\n r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)\n k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)\n kr = k * r\n\n if fieldType == \"e\":\n\n front = 1 / (4.0 * np.pi * sig * r**3) * np.exp(-1j * k * r)\n mid = -(k**2) * r**2 + 3 * 1j * k * r + 3\n\n Fx = front * (\n mx * ((dx**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))\n + my * (dy * dx / r**2) * mid\n + mz * (dz * dx / r**2) * mid\n )\n\n Fy = front * (\n mx * (dx * dy / r**2) * mid\n + my * ((dy**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))\n + mz * (dz * dy / r**2) * mid\n )\n\n Fz = front * (\n mx * (dx * dz / r**2) * mid\n + my * (dy * dz / r**2) * mid\n + mz * ((dz**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))\n )\n\n elif fieldType in [\"h\", \"b\"]:\n\n front = (1 + 1j * kr) / (4.0 * np.pi * r**2) * np.exp(-1j * k * r)\n\n Fx = front * (my * (dz / r) + mz * (-dy / r))\n\n Fy = front * (mx * (-dz / r) + mz * (dx / r))\n\n Fz = front * (mx * (dy / r) + my * (-dx / r))\n\n if fieldType == \"b\":\n Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz\n\n return Fx, Fy, Fz\n"
] | [
[
"numpy.sqrt",
"numpy.abs",
"numpy.exp"
]
] |
jiacheng1gujiaxin/poseface | [
"316924e224477f881240712a13a925bdd27adf4c"
] | [
"img2pose/utils/renderer.py"
] | [
"import cv2\nimport numpy as np\nfrom Sim3DR import RenderPipeline\n\nfrom .pose_operations import plot_3d_landmark\n\n\ndef _to_ctype(arr):\n if not arr.flags.c_contiguous:\n return arr.copy(order=\"C\")\n return arr\n\n\ndef get_colors(img, ver):\n h, w, _ = img.shape\n ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1) # x\n ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1) # y\n ind = np.round(ver).astype(np.int32)\n colors = img[ind[1, :], ind[0, :], :] / 255.0 # n x 3\n\n return colors.copy()\n\n\nclass Renderer:\n def __init__(\n self,\n vertices_path=\"../pose_references/vertices_trans.npy\",\n triangles_path=\"../pose_references/triangles.npy\",\n ):\n self.vertices = np.load(vertices_path)\n self.triangles = _to_ctype(np.load(triangles_path).T)\n self.vertices[:, 0] *= -1\n\n self.cfg = {\n \"intensity_ambient\": 0.3,\n \"color_ambient\": (1, 1, 1),\n \"intensity_directional\": 0.6,\n \"color_directional\": (1, 1, 1),\n \"intensity_specular\": 0.1,\n \"specular_exp\": 5,\n \"light_pos\": (0, 0, 5),\n \"view_pos\": (0, 0, 5),\n }\n\n self.render_app = RenderPipeline(**self.cfg)\n\n def transform_vertices(self, img, poses, global_intrinsics=None):\n (w, h) = img.size\n if global_intrinsics is None:\n global_intrinsics = np.array(\n [[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]\n )\n\n transformed_vertices = []\n for pose in poses:\n projected_lms = np.zeros_like(self.vertices)\n projected_lms[:, :2], lms_3d_trans_proj = plot_3d_landmark(\n self.vertices, pose, global_intrinsics\n )\n projected_lms[:, 2] = lms_3d_trans_proj[:, 2] * -1\n\n range_x = np.max(projected_lms[:, 0]) - np.min(projected_lms[:, 0])\n range_y = np.max(projected_lms[:, 1]) - np.min(projected_lms[:, 1])\n\n s = (h + w) / pose[5]\n projected_lms[:, 2] *= s\n projected_lms[:, 2] += (range_x + range_y) * 3\n\n transformed_vertices.append(projected_lms)\n\n return transformed_vertices\n\n def render(self, img, transformed_vertices, alpha=0.9, save_path=None):\n img = np.asarray(img)\n overlap = img.copy()\n\n for vertices in transformed_vertices:\n vertices = _to_ctype(vertices) # transpose\n overlap = self.render_app(vertices, self.triangles, overlap)\n\n res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)\n\n if save_path is not None:\n cv2.imwrite(save_path, res)\n print(f\"Save visualization result to {save_path}\")\n\n return res\n\n def save_to_obj(self, img, ver_lst, height, save_path):\n n_obj = len(ver_lst) # count obj\n\n if n_obj <= 0:\n return\n\n n_vertex = ver_lst[0].T.shape[1]\n n_face = self.triangles.shape[0]\n\n with open(save_path, \"w\") as f:\n for i in range(n_obj):\n ver = ver_lst[i].T\n colors = get_colors(img, ver)\n\n for j in range(n_vertex):\n x, y, z = ver[:, j]\n f.write(\n f\"v {x:.2f} {height - y:.2f} {z:.2f} {colors[j, 2]:.2f} \"\n f\"{colors[j, 1]:.2f} {colors[j, 0]:.2f}\\n\"\n )\n\n for i in range(n_obj):\n offset = i * n_vertex\n for j in range(n_face):\n idx1, idx2, idx3 = self.triangles[j] # m x 3\n f.write(\n f\"f {idx3 + 1 + offset} {idx2 + 1 + offset} \"\n f\"{idx1 + 1 + offset}\\n\"\n )\n\n print(f\"Dump tp {save_path}\")\n"
] | [
[
"numpy.load",
"numpy.zeros_like",
"numpy.round",
"numpy.asarray",
"numpy.max",
"numpy.min",
"numpy.maximum",
"numpy.array"
]
] |
macarro/imputena | [
"3a94ae1419a2af0d9707b20546ee078929ce99e8"
] | [
"imputena/simple_imputation/linear_regression.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nimport logging\n\n\ndef linear_regression(\n data=None, dependent=None, predictors=None, regressions='available',\n noise=False, inplace=False):\n \"\"\"Performs simple or multiple linear regression imputation on the data.\n First, the regression equation for the dependent variable given the\n predictor variables is computed. For this step, all rows that contain a\n missing value in either the dependent variable or any of the predictor\n variable is ignored via pairwise deletion. Then, missing valued in the\n dependent column in imputed using the regression equation. If, in the same\n row as a missing value in the dependent variable the value for any\n predictor variable is missing, a regression model based on all available\n predictors in calculated just to impute those values where the\n predictor(s) are missing. This behavior can be changed by assigning to\n the parameter regressions the value 'complete'. In this case, rows in\n which a predictor variable is missing do not get imputed. If stochastic\n regression imputation should be performed, set noise=True. In this\n case, a random value is chosen from a normal distribution with the width\n of the standard error of the regression model and added to the imputed\n value. If the parameter predictors is omitted, all variables other than\n the dependent are used as predictors. If the parameter dependent is\n omitted, the operation is performed on all columns that contain missing\n values.\n\n :param data: The data on which to perform the linear regression imputation.\n :type data: pandas.DataFrame\n :param dependent: The dependent variable in which the missing values\n should be imputed.\n :type dependent: String, optional\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like, optional\n :param regressions: If 'available': Impute missing values by modeling a\n regression based on all available predictors if some predictors have\n missing values themselves. If 'complete': Only impute with a\n regression model based on all predictors and leave missing values in\n rows in which some predictor value is missing itself unimputed.\n :type regressions: {'available', 'complete'}, default 'available'\n :param noise: Whether to add noise to the imputed values (stochastic\n regression imputation)\n :type noise: bool, default False\n :param inplace: If True, do operation inplace and return None.\n :type inplace: bool, default False\n :return: The dataframe with linear regression imputation performed for the\n incomplete variable(s) or None if inplace=True.\n :rtype: pandas.DataFrame or None\n :raises: TypeError, ValueError\n \"\"\"\n # Check if data is a dataframe:\n if not isinstance(data, pd.DataFrame):\n raise TypeError('The data has to be a DataFrame.')\n # Check if the dependent variable is actually a column of the dataframe:\n if dependent is not None and dependent not in data.columns:\n raise ValueError(\n '\\'' + dependent + '\\' is not a column of the data.')\n # Check if each of the predictor variables is actually a column of the\n # dataframe:\n if predictors is not None:\n for column in predictors:\n if column not in data.columns:\n raise ValueError(\n '\\'' + column + '\\' is not a column of the data.')\n # Assign value to do_available_regressions\n if regressions == 'available':\n do_available_regressions = True\n elif regressions == 'complete':\n do_available_regressions = False\n else:\n raise ValueError(regressions + 'could not be understood')\n # Assign a reference or copy to res, depending on inplace:\n if inplace:\n res = data\n else:\n res = data.copy()\n # If dependent is not set, apply the operation to each column that contains\n # missing data:\n if dependent is None:\n for column in data.columns:\n if data[column].isna().any():\n res.loc[:, :] = linear_regression_one_dependent(\n res, column, predictors, do_available_regressions,\n noise)\n # Otherwise apply the operation to the dependent column only:\n else:\n res.loc[:, :] = linear_regression_one_dependent(\n data, dependent, predictors, do_available_regressions, noise)\n # Return dataframe if the operation is not to be performed inplace:\n if not inplace:\n return res\n\n\ndef linear_regression_one_dependent(\n data, dependent, predictors, do_available_regressions, noise):\n \"\"\"Auxiliary function that performs linear regression imputation for the\n dependent column. The difference with linear_regression() is that in\n that function dependent can be None, in which case this function is\n called for each column containing missing values,\n\n :param data: The data on which to perform the linear regression imputation.\n :type data: pandas.DataFrame\n :param dependent: The dependent variable in which the missing values\n should be imputed.\n :type dependent: String\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like\n :param do_available_regressions: Whether to do regressions for all\n available predictor combinations or only on complete ones\n :type do_available_regressions: bool\n :param noise: Whether to add noise to the imputed values (stochastic\n regression imputation)\n :type noise: bool\n :return: The dataframe with linear regression imputation performed for the\n incomplete variable.\n :rtype: pandas.DataFrame\n \"\"\"\n # This auxiliary function always returns a copy:\n res = data.copy()\n # If predictors is None, all variables except for the dependent one are\n # considered predictors:\n if predictors is None:\n predictors = list(data.columns)\n predictors.remove(dependent)\n # Predictor combination sets and lists\n limited_predictors_combs = set()\n predictors_combs_done = []\n predictors_combs_todo = [tuple(predictors)]\n # Perform the operation:\n while len(predictors_combs_todo) > 0:\n # Select iteration predictors\n it_predictors = predictors_combs_todo.pop(0)\n # Log iteration beginning:\n logging.info('Applying regression imputation with predictors: ' + str(\n it_predictors))\n # Perform iteration:\n res.loc[:, :] = linear_regression_iter(\n res, dependent, list(it_predictors), noise,\n limited_predictors_combs)\n # Update predictor combinations done and to do\n predictors_combs_done.append(it_predictors)\n if do_available_regressions:\n predictors_combs_todo = list(\n set(limited_predictors_combs) - set(predictors_combs_done))\n # Log iteration end:\n logging.info('Predictor combinations done: ' + str(\n predictors_combs_done))\n logging.info('Predictor combinations to do: ' + str(\n predictors_combs_todo))\n return res\n\n\ndef linear_regression_iter(\n data, dependent, predictors, noise, limited_predictors_combs):\n \"\"\"Auxiliary function that performs (simple or multiple) linear\n regression imputation on the data, for the dependent column only. In rows\n that contain a missing value for any predictor variable, the value of the\n dependent variable does not get imputed. The operation is always\n performed on a copy of the data, which is returned.\n\n :param data: The data on which to perform the linear regression imputation.\n :type data: pandas.DataFrame\n :param dependent: The dependent variable in which the missing values\n should be imputed.\n :type dependent: String\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like\n :param noise: Whether to add noise to the imputed value (stochastic\n regression imputation)\n :type noise: bool\n :param limited_predictors_combs: Reference to the set which contains all\n limited predictor combinations that are necessary to use because\n some predictor had a missing value in some row.\n :type limited_predictors_combs: set\n :return: A copy of the dataframe with linear regression imputation\n performed for the incomplete variable.\n :rtype: pandas.DataFrame\n \"\"\"\n # Perform pairwise deletion before calculating the regression\n data_pairwise_deleted = data.copy()\n variables = predictors.copy()\n variables.append(dependent)\n data_pairwise_deleted.dropna(subset=variables, inplace=True)\n # Calculate the regression:\n x = data_pairwise_deleted[predictors]\n y = data_pairwise_deleted[dependent]\n model = linear_model.LinearRegression()\n model.fit(x, y)\n # Extract the regression parameters from the model\n intercept = model.intercept_\n coefs = model.coef_\n # Log regression equation:\n eq = str(dependent) + ' = ' + str(intercept)\n for idx, coef in enumerate(coefs):\n eq += ' + ' + str(coef) + '*' + predictors[idx]\n logging.info('Regression equation: ' + eq)\n # Calculate standard error:\n std_error = (model.predict(x) - y).std()\n logging.info('Standard error: ' + str(std_error))\n # Implementation using apply:\n return data.apply(\n lambda row: get_imputed_row(\n row, dependent, predictors, intercept, coefs, noise, std_error,\n limited_predictors_combs),\n axis=1, result_type='broadcast')\n\n\ndef get_imputed_row(\n row, dependent, predictors, intercept, coefs, noise, std_error,\n limited_predictors_combs):\n \"\"\"Auxiliary function that receives a row of a DataFrame and returns the\n same row. If the row contains a missing value for the dependent variable,\n it gets imputed according to the regression equation specified by\n predictors, intercept and coefs.\n\n :param row: The row for which the missing value should be imputed\n :type row: pandas.Series\n :param dependent: The dependent variable for which the row might contain a\n missing value\n :type dependent: String\n :param predictors: The predictor variables on which the dependent variable\n is dependent.\n :type predictors: array-like\n :param intercept: The y-intercept of the regression equation.\n :type intercept: scalar\n :param coefs: The coefficients of the regression equation, in the same\n order as the predictors.\n :type coefs: array-like,\n :param noise: Whether to add noise to the imputed value (stochastic\n regression imputation)\n :type noise: bool\n :param std_error: The standard error of the regression model. Required\n if noise=True\n :type std_error: scalar\n :param limited_predictors_combs: Reference to the set which contains all\n limited predictor combinations that are necessary to use because\n some predictor had a missing value in some row.\n :type limited_predictors_combs: set\n :return: The row, with the missing value imputed if it contains one.\n :rtype: pandas.Series\n \"\"\"\n res = row.copy()\n if pd.isnull(res[dependent]):\n # Check whether there are predictors for which the value is NA\n na_predictors = tuple(\n row[predictors][row[predictors].isnull()].index.to_list())\n # If the row contains NA values for one or several predictors,\n # add the combination of predictors to na_predictor_combs, in order\n # to perform regression without them:\n if na_predictors != ():\n limited_predictors = tuple(set(predictors) - set(na_predictors))\n # Add the limited_predictors to the set only if the combination\n # isn't empty:\n if limited_predictors != ():\n limited_predictors_combs.add(limited_predictors)\n # If the row doesn't contain missing values for any predictor, impute:\n else:\n value = intercept\n for idx, coef in enumerate(coefs):\n value += coef * row[predictors[idx]]\n # If noise == True, add noise (stochastic regression imputation)\n if noise:\n value += std_error * np.random.randn()\n res[dependent] = value\n return res\n"
] | [
[
"pandas.isnull",
"sklearn.linear_model.LinearRegression",
"numpy.random.randn"
]
] |
nmningmei/metacognition | [
"734082e247cc7fc9d277563e2676e10692617a3f"
] | [
"3 experiments_confidence/batch/e2 (experiment and chance scores) (cpj).py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 12 16:07:58 2018\n\n@author: nmei\n\nin exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)\nBUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.\n\n\n\"\"\"\nif __name__ == '__main__':\n import os\n import pandas as pd\n import numpy as np\n import utils\n # define result saving directory\n dir_saving = 'results_e2'\n if not os.path.exists(dir_saving):\n os.mkdir(dir_saving)\n \n try:# the subject level processing\n df1 = pd.read_csv('e2.csv').iloc[:,1:]\n except: # when I test the script\n df1 = pd.read_csv('../e2.csv').iloc[:,1:]\n df = df1.copy()\n # select the columns that I need\n df = df[['blocks.thisN',\n 'trials.thisN',\n 'key_resp_2.keys',\n 'resp.corr',\n 'resp_mrating.keys',\n 'participant',]]\n # rename the columns\n df.columns = ['blocks',\n 'trials',\n 'awareness',\n 'correctness',\n 'confidence',\n 'participant',]\n # preallocate the data frame structure\n results = dict(sub = [],\n model = [],\n score = [],\n window = [],\n correctness = [],\n awareness = [],\n confidence = [],\n chance = [],\n )\n # use success, awareness, and confidence as features\n np.random.seed(12345)\n # use judgement features\n feature_names = [\n 'correctness',\n 'awareness',\n 'confidence',\n ]\n target_name = 'confidence'\n experiment = 'e2'\n # for some of the variables, we need to rescale them to a more preferable range like 0-1\n name_for_scale = ['awareness']\n # 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'\n # get one of the participants' data\n participant = 'cpj'\n df_sub = df[df['participant'] == participant]\n # pick 1- no experience, 2 brief glimpse for binary classification\n df_sub = df_sub[df_sub['awareness'] != 3]\n # for 1-back to 4-back\n for n_back in np.arange(1,5):\n # experiment score\n results = utils.classification(\n df_sub.dropna(), # take out nan rows\n feature_names, # feature columns\n target_name, # target column\n results, # the saving structure\n participant, # participant's name\n experiment, # experiment name\n window = n_back, # N-back\n chance = False, # it is NOT estimating the chance level but the empirical classification experiment\n name_for_scale = name_for_scale # scale some of the variables\n )\n # empirical chance level\n results = utils.classification(\n df_sub.dropna(),\n feature_names,\n target_name,\n results,\n participant,\n experiment,\n window = n_back,\n chance = True, # it is to estimate the empirical chance level\n name_for_scale = name_for_scale\n )\n results_to_save = pd.DataFrame(results)\n results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.arange",
"pandas.DataFrame",
"numpy.random.seed",
"pandas.read_csv"
]
] |
christianwbrock/algol-reduction | [
"5e85734d9e9e31985ead3ce40e67535418351010",
"5e85734d9e9e31985ead3ce40e67535418351010"
] | [
"reduction/test/plot_algol_h_alpha_line.py",
"reduction/scripts/generate_report.py"
] | [
"\nimport matplotlib.pyplot as plt\nfrom reduction.algol_h_alpha_line_model import AlgolHAlphaModel\n\n\nif __name__ == '__main__':\n\n AlgolHAlphaModel().plot(plt.axes())\n plt.show()\n",
"\"\"\"\\\nGenerate LaTeX report displaying spectra normalized around the H_alpha line.\n\"\"\"\n\nimport logging\nimport os\nimport os.path\nfrom argparse import ArgumentParser\nfrom collections import namedtuple, defaultdict\n\nimport numpy as np\nfrom astropy import constants as const\nfrom astropy.convolution import Box1DKernel\nfrom astropy.convolution import convolve\nfrom astropy.coordinates import EarthLocation\nfrom matplotlib import cm\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rcParams as plot_params\n\nfrom reduction.algol_h_alpha_line_model import AlgolHAlphaModel\nfrom reduction.commandline import poly_glob, filename_parser, verbose_parser, get_loglevel\nfrom reduction.constants import H_ALPHA\nfrom reduction.normalize import normalize\nfrom reduction.spectrum import Spectrum\nfrom reduction.stars.algol import Algol, algol_coordinate\nfrom reduction.utils.ranges import closed_range\n\nlogger = logging.getLogger(__name__)\n\nDiff = namedtuple('Diff', 'wavelength diff phase maxima')\n\n\ndef main():\n\n plot_params['figure.dpi'] = 150\n\n # TODO comment\n max_diff = 0.25\n\n # range around H_alpha in A to be ignored for spectrum normalization\n padding = 10.0\n\n disc_range = closed_range(H_ALPHA.value - padding, H_ALPHA.value + padding)\n continuum_ranges = closed_range(6520, 6610) & ~disc_range\n\n parser = ArgumentParser(parents=[filename_parser('spectrum'), verbose_parser],\n description='Generate LaTeX report displaying spectra normalized around the H_alpha line.')\n\n parser.add_argument('-o', '--output', type=str, default='output',\n help='output folder where TeX file and images are stored')\n parser.add_argument('-f', '--force', action='store_true',\n help='Use this option to remove an existing output folder.')\n parser.add_argument('--deg', type=int, default=3,\n help='Degree of the normalization polynomial (default: %(default)s)')\n parser.add_argument('--cmap', default='bwr',\n help='A valid matplotlib colormap name (default: %(default)s)')\n\n args = parser.parse_args()\n\n logging.basicConfig(level=get_loglevel(logger, args))\n\n os.makedirs(args.output, exist_ok=args.force)\n logger.info(\"write report to '%s'\", os.path.abspath(args.output))\n\n if args.cmap not in cm.datad.keys():\n logger.warning('Invalid colormap not in %s', cm.datad.keys())\n args.cmap = parser.get_default('cmap')\n\n observer_location = EarthLocation.from_geodetic(lon=15.0, lat=50.0)\n algol = Algol()\n\n tex_file = open(os.path.join(args.output, \"report.tex\"), \"w\")\n\n tex_file.write(\"\\\\documentclass{article}\\n\")\n tex_file.write(\"\\\\usepackage[utf8]{inputenc}\\n\")\n tex_file.write(\"\\\\usepackage{graphicx}\\n\")\n tex_file.write(\"\\\\usepackage{seqsplit}\\n\")\n tex_file.write(\"\\\\usepackage{longtable}\\n\")\n tex_file.write(\"\\\\usepackage[hidelinks]{hyperref}\\n\")\n tex_file.write(\"\\\\title{Project Algol\\\\\\\\Spectrum reduction}\\n\")\n tex_file.write(\"\\\\date{\\\\today}\\n\")\n tex_file.write(\"\\\\author{%s\\\\\\\\by Christian Brock}\\n\" % os.path.basename(__file__).replace('_', '\\\\_'))\n tex_file.write(\"\\\\begin{document}\\n\")\n tex_file.write(\"\\\\maketitle\\n\")\n tex_file.write(\"\\\\begin{verbatim}\\n\")\n\n for k,v in args.__dict__.items():\n tex_file.write(\"--%s %s\\n\" % (k, v))\n\n tex_file.write(\"\\\\end{verbatim}\\n\")\n tex_file.write(\"\\\\tableofcontents\\n\")\n\n diff_image_name = \"diff_by_phase.png\"\n diff_image_wm_name = \"diff_by_phase_with_maxima.png\"\n sorted_diff_image_name = \"diff_sorted_phase.png\"\n sorted_diff_image_wm_name = \"diff_sorted_phase_with_maxima.png\"\n snr_by_observer_name = \"snr_by_observer.png\"\n\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\section{Final Result}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % diff_image_name)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % diff_image_wm_name)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % sorted_diff_image_name)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % sorted_diff_image_wm_name)\n tex_file.write(\"\\n\")\n\n # list of Diffs\n diffs_by_phase = []\n snr_by_observer = defaultdict(list)\n\n filenames = poly_glob(args.filenames)\n\n # pass #1 loads all spectra found in the command line arguments\n spectra = []\n for n, filename in enumerate(filenames, start=1):\n\n logger.info(\"pass1 %d/%d: %s\", n, len(filenames), filename)\n\n for spectrum in Spectrum.load(filename, slice(None)):\n\n obs_time = spectrum.obs_date\n if not obs_time:\n logger.error(\"%s has no observation date\", spectrum.filename)\n continue\n\n spectra.append(spectrum)\n\n prev_observer = None\n prev_day = None\n\n # pass #2\n # group all spectra by observer and date\n for n, spectrum in enumerate(sorted(spectra, key=lambda sp: (sp.observer, sp.obs_date)), start=1):\n\n logger.info(\"pass2 %d/%d: %s\", n, len(spectra), spectrum.short_name)\n\n if spectrum.observer != prev_observer:\n tex_file.write(\"\\section{%s}\\n\\n\" % spectrum.observer)\n prev_observer = spectrum.observer\n prev_day = None\n\n obs_day = spectrum.obs_date.iso[:10]\n if obs_day != prev_day:\n tex_file.write(\"\\subsection{%s}\\n\\n\" % obs_day)\n prev_day = obs_day\n\n xs = spectrum.xs\n ys = spectrum.ys\n\n # cut first and last 15 values which may contain invalid (zero) values\n xs = xs[15:-15]\n ys = ys[15:-15]\n\n # normalize the maximum value to 1\n ys = ys / ys.max()\n\n obs_time = spectrum.obs_date\n res = spectrum.resolution\n\n # compute obs_time at solar system center\n light_travel_time = obs_time.light_travel_time(algol_coordinate, location=observer_location)\n obs_time += light_travel_time\n\n algol_rv_a = algol.rv_A(obs_time)\n radial_velocity_correction = algol_coordinate.radial_velocity_correction(obstime=obs_time,\n location=observer_location)\n rv_predicted_a = algol_rv_a - radial_velocity_correction\n phase = algol.AB.phase(obs_time)\n\n def as_redshift(radial_velocity):\n return H_ALPHA * (radial_velocity / const.c).to(1)\n\n redshift_predicted_a = as_redshift(rv_predicted_a)\n\n # 2.354 is the scale between sigma and FWHM of a gaussian\n sigma = H_ALPHA / (res or 15000) / 2.354\n\n model_algol_a = AlgolHAlphaModel(redshift=redshift_predicted_a, sigma=sigma)\n model_algol_a.scale.fixed = True\n model_algol_a.redshift.fixed = True\n model_algol_a.sigma.fixed = True\n\n # TODO: calculate algol spectrum from the single spectra of components A, B and C\n # model_algol_b = AlgolHAlphaModel(redshift=redshift_predicted_b, sigma=sigma)\n # model_algol_c = AlgolHAlphaModel(redshift=redshift_predicted_c, sigma=sigma)\n\n # part_a, part_b, part_c = (1, 0, 0)\n # model_algol = part_a * model_algol_a + part_b * model_algol_b + part_c * model_algol_c\n\n normalization = normalize(xs, ys, ref_ys=model_algol_a(xs), degree_or_range=args.deg,\n continuum_ranges=continuum_ranges)\n\n normalized = normalization.norm\n snr = normalization.snr\n normalization.plot(plt.figure().add_subplot(111))\n\n image_normalized = \"%05d_norm1.png\" % n\n plt.title(\"Normalization: %s\" % model_algol_a)\n plt.savefig(os.path.join(args.output, image_normalized))\n plt.close()\n\n image_diff = \"%05d_diff.png\" % n\n\n xlim = np.asarray(model_algol_a.get_xlimits())\n xlim[0] = max(xlim[0], continuum_ranges.lower_bound())\n xlim[1] = min(xlim[1], continuum_ranges.upper_bound())\n\n # compute difference spectrum between the normalized observed and the reference spectrum\n # This is assumed to be the spectrum of the circum stellar disc\n diff_xs = xs - model_algol_a.redshift\n diff_ys = normalized - model_algol_a(xs)\n\n diff_mask = [x in disc_range for x in diff_xs]\n\n diff_xs = diff_xs[diff_mask]\n diff_ys = diff_ys[diff_mask]\n\n maxima = _find_maxima(diff_xs, diff_ys, H_ALPHA.value)\n\n diffs_by_phase.append(Diff(diff_xs, diff_ys, phase, maxima))\n if spectrum.resolution:\n snr_by_observer[spectrum.observer].append([spectrum.resolution, snr])\n\n create_diff_plot(model_algol_a, model_algol_a, normalized, maxima, spectrum.short_name, xlim, xs, ys,\n os.path.join(args.output, image_diff))\n\n def display(q, format_string):\n return ((format_string + \" %s\") % (q.value, q.unit)).replace('Angstrom', r'\\AA')\n\n def display_rv(rv):\n return r\"%.1f km/s, %.2f \\AA\" % (rv.to('km/s').value, as_redshift(rv).to('AA').value)\n\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\begin{center}\\n\")\n tex_file.write(\"\\\\begin{tabular}{|l|l|}\\n\")\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Observer & %s \\\\\\\\\\n\" % spectrum.observer.replace('_', '\\\\_'))\n tex_file.write(\"Filename & \\\\seqsplit{%s} \\\\\\\\\\n\" % spectrum.short_name.replace('_', '\\\\_'))\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Resolution $\\\\delta\\\\lambda/\\\\lambda$ & %s \\\\\\\\\\n\" % spectrum.resolution)\n tex_file.write(\"Sigma & %s \\\\\\\\\\n\" % display(sigma.to('AA'), \"%.2f\"))\n tex_file.write(\"SNR & %.0f \\\\\\\\\\n\" % snr)\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Observation date $(UTC)$ & %s \\\\\\\\\\n\" % spectrum.obs_date.iso)\n tex_file.write(\"Light travel time& %s \\\\\\\\\\n\" % display(light_travel_time.to('min'), \"%.1f\"))\n tex_file.write(\"Phase & $%.2f$ \\\\\\\\\\n\" % phase)\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Algol radial velocity & %s \\\\\\\\\\n\" % display_rv(algol_rv_a))\n tex_file.write(\"Barycentric correction & %s \\\\\\\\\\n\" % display_rv(radial_velocity_correction))\n tex_file.write(\"Final radial velocity& %s \\\\\\\\\\n\" % display_rv(rv_predicted_a))\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"Redshift, form data & %s \\\\\\\\\\n\" % display(redshift_predicted_a.to('AA'), \"%.2f\"))\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"\\\\end{tabular}\\n\")\n tex_file.write(\"\\\\end{center}\\n\")\n\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % image_diff)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % image_normalized)\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\pagebreak\\n\")\n tex_file.write(\"\\n\")\n\n # end pass #2 spectra\n\n diffs_by_phase = sorted(diffs_by_phase, key=lambda diff: diff.phase)\n\n # TODO what is vmin, vmax?\n vmin = max(-max_diff, np.min([np.nanmin(diff.diff) for diff in diffs_by_phase]))\n vmax = min(+max_diff, np.max([np.nanmax(diff.diff) for diff in diffs_by_phase]))\n\n plot_diff(args.cmap, args.output, diff_image_name, diffs_by_phase, disc_range, vmin, vmax, False)\n plot_diff(args.cmap, args.output, diff_image_wm_name, diffs_by_phase, disc_range, vmin, vmax, True)\n\n plot_sorted_diff(args.cmap, args.output, sorted_diff_image_name, diffs_by_phase, disc_range, vmin, vmax, False)\n plot_sorted_diff(args.cmap, args.output, sorted_diff_image_wm_name, diffs_by_phase, disc_range, vmin, vmax, True)\n\n plot_snr_by_observer(args.output, snr_by_observer_name, snr_by_observer)\n\n\n tex_file.write(\"\\\\appendix\\n\")\n tex_file.write(\"\\\\section{SNRs and Resolutions}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\includegraphics[width=\\\\textwidth]{%s}\\n\" % snr_by_observer_name)\n tex_file.write(\"\\n\")\n\n # generate a txt file containing the maxima around H_alpha assumed to be hot-spots\n # the content is also written as table to the tex file\n max_file = open(os.path.join(args.output, \"maxima.dat\"), \"w\")\n max_file.write(\"#phase,w1,v1,y1,w2,v2,y2\\n\")\n tex_file.write(\"\\\\section{maxima of differences}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"The raw date is stored in {\\\\tt %s}\\n\" % \"maxima.dat\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\begin{longtable}{|l|lll|lll|}\\n\")\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"phase & $\\AA$ & $km/s$ & y & $\\AA$ & $km/s$ & y \\\\\\\\\\n\")\n tex_file.write(\"\\\\hline\\n\")\n\n for diff in diffs_by_phase:\n\n if len(diff.maxima) == 2:\n x1, y1 = diff.maxima[0]\n x2, y2 = diff.maxima[1]\n elif len(diff.maxima) == 1:\n x, y = diff.maxima[0]\n if x < H_ALPHA.value:\n x1, y1 = x, y\n x2, y2 = None, None\n else:\n x1, y1 = None, None\n x2, y2 = x, y\n else: # happens if both maxima are at the border\n continue\n\n v1 = ((x1 - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value if x1 else None\n v2 = ((x2 - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value if x2 else None\n\n def _(value, fmt):\n return fmt % value if value else ''\n\n tex_file.write(\"%.5f & %s & %s & %s & %s & %s & %s\\\\\\\\\\n\" %\n (diff.phase, _(x1, '%.1f'), _(v1, '%.0f'), _(y1, '%.3f'),\n _(x2, '%.1f'), _(v2, '%.0f'), _(y2, '%.3f')))\n\n max_file.write(\"%.5f,%s,%s,%s,%s,%s,%s\\n\" %\n (diff.phase, _(x1, '%.1f'), _(v1, '%.0f'), _(y1, '%.3f'),\n _(x2, '%.1f'), _(v2, '%.0f'), _(y2, '%.3f')))\n\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"\\\\end{longtable}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\section{spectra by phase}\\n\")\n tex_file.write(\"\\n\")\n tex_file.write(\"\\\\begin{longtable}{|l|l|l|l|}\\n\")\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"phase & observer & date & filename \\\\\\\\\\n\")\n tex_file.write(\"\\\\hline\\n\")\n\n for spectrum in sorted(spectra, key=lambda sp: algol.AB.phase(sp.obs_date)):\n tex_file.write(\"%.5f & %s & %s & \\\\seqsplit{%s}\\\\\\\\\\n\" %\n (algol.AB.phase(spectrum.obs_date), spectrum.observer.replace('_', '\\\\_'),\n spectrum.obs_date.iso[:10], spectrum.short_name.replace('_', '\\\\_')))\n\n tex_file.write(\"\\\\hline\\n\")\n tex_file.write(\"\\\\end{longtable}\\n\")\n tex_file.write(\"\\\\end{document}\\n\")\n\n max_file.close()\n tex_file.close()\n\n\ndef plot_sorted_diff(args_cmap, args_output, sorted_diff_image_name, diffs_by_phase, disc_range, vmin, vmax, plot_maxima):\n # create the trailed spectrum *sorted* by phase plot\n fig = plt.figure(figsize=[6.4, 4.8 * 2])\n plot = fig.add_subplot(111)\n plot.set_xlim(disc_range.lower_bound(), disc_range.upper_bound())\n plot.set_ylabel('Spectra sorted by phase')\n plot.set_xlabel('Wavelength ($\\AA$)')\n sc = None\n\n left_xs = []\n left_ys = []\n right_xs = []\n right_ys = []\n\n for i, diff in enumerate(diffs_by_phase):\n assert len(diff.wavelength) == len(diff.diff)\n\n ys = 1.0 * i * np.ones(len(diff.wavelength))\n sc = plot.scatter(diff.wavelength, ys, s=1, c=diff.diff, cmap=args_cmap, vmin=min(vmin, -vmax),\n vmax=max(vmax, -vmin))\n\n if 0.15 <= diff.phase <= 0.85:\n for x, y in diff.maxima:\n if x < H_ALPHA.value:\n left_xs.append(x)\n left_ys.append(i)\n else:\n right_xs.append(x)\n right_ys.append(i)\n\n plot.vlines(H_ALPHA.value, *plot.get_ylim())\n\n if plot_maxima:\n plot.plot(left_xs, left_ys, 'k')\n plot.plot(right_xs, right_ys, 'k')\n\n ax2 = plot.twiny()\n ax2.set_xlim(((np.asarray(plot.get_xlim()) - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value)\n ax2.set_xlabel('Radial velocity ($km/s$)')\n\n fig.colorbar(sc)\n plt.savefig(os.path.join(args_output, sorted_diff_image_name))\n plt.close()\n\n\ndef plot_snr_by_observer(args_output, filename, snr_by_observer):\n\n assert isinstance(filename, str)\n assert isinstance(snr_by_observer, dict)\n\n fig = plt.figure()\n plot = fig.add_subplot(111)\n plot.set_xlabel('Resolution $\\lambda / \\delta \\lambda$')\n plot.set_ylabel('SNR')\n\n for observer, resolutions_and_snrs in sorted(snr_by_observer.items()):\n resolutions = [i[0] for i in resolutions_and_snrs]\n snrs = [i[1] for i in resolutions_and_snrs]\n plot.scatter(resolutions, snrs, label=observer)\n\n plot.legend()\n plt.savefig(os.path.join(args_output, filename))\n plt.close(os.path.join(args_output, filename))\n\n\ndef plot_diff(args_cmap, args_output, diff_image_name, diffs_by_phase, disc_range, vmin, vmax, plot_maxima):\n \"\"\"\n Create the trailed spectrum by phase plot\n \"\"\"\n fig = plt.figure(figsize=[6.4, 4.8 * 2])\n plot = fig.add_subplot(111)\n plot.set_ylim(-0.5, 1.5)\n plot.set_xlim(disc_range.lower_bound(), disc_range.upper_bound())\n plot.set_ylabel('Phase')\n plot.set_xlabel('Wavelength ($\\AA$)')\n for diff in diffs_by_phase:\n\n assert len(diff.wavelength) == len(diff.diff)\n\n for offset in [-1, 0, 1]:\n ys = (diff.phase + offset) * np.ones(len(diff.wavelength))\n sc = plot.scatter(diff.wavelength, ys, s=1, c=diff.diff, cmap=args_cmap, vmin=min(vmin, -vmax),\n vmax=max(vmax, -vmin))\n plot.vlines(H_ALPHA.value, *plot.get_ylim())\n\n if plot_maxima:\n left_xs = []\n left_ys = []\n right_xs = []\n right_ys = []\n\n for diff in diffs_by_phase:\n\n if 0.15 <= diff.phase <= 0.85:\n for x, y in diff.maxima:\n if x < H_ALPHA.value:\n left_ys.append(diff.phase)\n left_xs.append(x)\n else:\n right_xs.append(x)\n right_ys.append(diff.phase)\n plot.plot(left_xs, left_ys, 'k')\n plot.plot(right_xs, right_ys, 'k')\n\n ax2 = plot.twiny()\n ax2.set_xlim(((np.asarray(plot.get_xlim()) - H_ALPHA.value) / H_ALPHA.value * const.c).to('km/s').value)\n ax2.set_xlabel('Radial velocity ($km/s$)')\n\n fig.colorbar(sc)\n\n plt.savefig(os.path.join(args_output, diff_image_name))\n plt.close()\n return sc\n\n\ndef create_diff_plot(final_model, initial_model, normalized, maxima, title, xlim, xs, ys, image_path):\n\n redshift = final_model.redshift\n\n plot = plt.figure().add_subplot(111)\n plot.set_ylim(-0.5, 1.5)\n plot.set_xlim(xlim)\n\n plot.plot(xs, 0.6 * ys, label='measured')\n plot.plot(xs, normalized, label='normalized')\n\n plot.plot(xs, initial_model(xs), label='predicted %s' % initial_model)\n if final_model is not initial_model:\n plot.plot(xs, final_model(xs), label='fitted %s' % final_model)\n\n plot.plot(xs, normalized - final_model(xs), label='normalized - fitted')\n\n if maxima:\n for x, y in maxima:\n plot.vlines(x + redshift, ymin=0, ymax=y, label='maxima')\n\n plot.hlines(0, xlim[0], xlim[1])\n plot.vlines(H_ALPHA.value + redshift, *plot.get_ylim())\n plot.set_title(title)\n plot.legend(loc='upper right')\n\n plt.savefig(image_path)\n plt.close()\n\n\ndef _find_maxima(xs, ys, center):\n \"\"\"\\\n Find maxima of ys below and above the center wave length, i.e. H_alpha\n \"\"\"\n\n result = []\n\n for r in [closed_range(np.min(xs), center), closed_range(center, np.max(xs))]:\n\n mask = [x in r for x in xs]\n\n ys_in_r = ys[mask]\n arg = np.argmax(ys_in_r)\n\n if arg == 0 or arg + 1 == len(ys_in_r):\n logger.debug('ignore maximum in %s at %s bound', r, 'lower' if arg == 0 else 'upper')\n continue\n\n y = ys_in_r[arg]\n x = xs[mask][arg]\n\n logger.debug('maximum in %s at x=%.1f, y=%.2f', r, x, y)\n result.append((x, y))\n\n return tuple(result)\n\n\ndef _find_minimum(xs, ys, dx, range_AA, box_size_AA):\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n\n width = int(np.ceil(box_size_AA / dx))\n\n kernel = Box1DKernel(width)\n\n ys = convolve(ys, kernel=kernel, boundary=None)\n\n assert len(xs) == len(ys)\n\n # remove convolution boundaries\n clip = kernel.array.size // 2\n xs = xs[clip:-clip]\n ys = ys[clip:-clip]\n\n mask = [H_ALPHA.value - range_AA <= x <= H_ALPHA.value + range_AA for x in xs]\n xs = xs[mask]\n ys = ys[mask]\n\n i = np.argmin(ys)\n return xs[i]\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes"
],
[
"matplotlib.cm.datad.keys",
"numpy.ceil",
"numpy.nanmax",
"matplotlib.pyplot.figure",
"numpy.argmin",
"matplotlib.pyplot.savefig",
"numpy.asarray",
"numpy.argmax",
"numpy.nanmin",
"matplotlib.pyplot.title",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.close"
]
] |
lidongyv/Explicit-Context-Mapping-for-Stereo-Matching | [
"9b2e63982daf5629045de0bf0694d8ccb111b2f1"
] | [
"cmf/models/cmfsm.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-07-17 10:44:43\n# @Last Modified by: yulidong\n# @Last Modified time: 2019-03-01 14:12:35\n# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-20 18:01:52\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-07-16 22:16:14\nimport time\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport math\nfrom math import ceil\nfrom torch.autograd import Variable\nfrom torch.nn.functional import cosine_similarity as cosine_s\nfrom cmf import caffe_pb2\nfrom cmf.models.utils import *\nrsn_specs = {\n 'scene': \n {\n 'n_classes': 9,\n 'input_size': (540, 960),\n 'block_config': [3, 4, 23, 3],\n },\n\n}\n\ngroup_dim=32\npramid_dim=8\ngroup_norm_group_num = 32\n\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):\n\n return nn.Sequential(\n nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=kernel_size,\n stride=stride,\n padding=dilation if dilation > 1 else pad,\n dilation=dilation,\n bias=False), nn.GroupNorm(group_norm_group_num, out_planes))\n\n\ndef convbn_3d(in_planes, out_planes, kernel_size, stride, pad):\n\n return nn.Sequential(\n nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=kernel_size,\n padding=pad,\n stride=stride,\n bias=False), nn.GroupNorm(group_norm_group_num, out_planes))\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride, downsample, pad, dilation):\n super(BasicBlock, self).__init__()\n\n self.conv1 = nn.Sequential(\n convbn(inplanes, planes, 3, stride, pad, dilation),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n\n if self.downsample is not None:\n x = self.downsample(x)\n\n out += x\n\n return out\n\n\nclass matchshifted(nn.Module):\n def __init__(self):\n super(matchshifted, self).__init__()\n\n def forward(self, left, right, shift):\n batch, filters, height, width = left.size()\n shifted_left = F.pad(\n torch.index_select(\n left, 3,\n Variable(torch.LongTensor(\n [i for i in range(shift, width)])).cuda()),\n (shift, 0, 0, 0))\n shifted_right = F.pad(\n torch.index_select(\n right, 3,\n Variable(torch.LongTensor(\n [i for i in range(width - shift)])).cuda()),\n (shift, 0, 0, 0))\n out = torch.cat((shifted_left, shifted_right), 1).view(\n batch, filters * 2, 1, height, width)\n return out\n\n\nclass disparityregression(nn.Module):\n def __init__(self, maxdisp):\n super().__init__()\n self.disp = Variable(\n torch.Tensor(\n np.reshape(np.array(range(maxdisp)),\n [1, maxdisp, 1, 1])).cuda(),\n requires_grad=False)\n\n def forward(self, x):\n disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])\n out = torch.sum(x * disp, 1)\n return out\n\n\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.firstconv = nn.Sequential(\n convbn(3, 32, 3, 1, 1, 1),\n # nn.GroupNorm(group_dim, 32),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False))\n self.secondconv = nn.Sequential(\n nn.GroupNorm(group_dim, 32),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 2, 1, 1),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)\n self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)\n self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)\n self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)\n\n self.branch1 = nn.Sequential(\n nn.AvgPool2d((64, 64), stride=(64, 64)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(\n nn.AvgPool2d((32, 32), stride=(32, 32)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(\n nn.AvgPool2d((16, 16), stride=(16, 16)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(\n nn.AvgPool2d((8, 8), stride=(8, 8)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.lastconv = nn.Sequential(\n convbn(320, 128, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False),\n nn.GroupNorm(group_norm_group_num, planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(self.inplanes, planes, stride, downsample, pad, dilation))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, 1, None, pad, dilation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output_all = self.firstconv(x)\n output=self.secondconv(output_all)\n output_rt = self.layer1(output)\n output_raw = self.layer2(output_rt)\n output = self.layer3(output_raw)\n output_skip = self.layer4(output)\n\n output_branch1 = self.branch1(output_skip)\n output_branch1 = F.interpolate(\n output_branch1, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_branch2 = self.branch2(output_skip)\n output_branch2 = F.interpolate(\n output_branch2, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_branch3 = self.branch3(output_skip)\n output_branch3 = F.interpolate(\n output_branch3, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_branch4 = self.branch4(output_skip)\n output_branch4 = F.interpolate(\n output_branch4, (output_skip.size()[2], output_skip.size()[3]),\n mode='bilinear',\n align_corners=False)\n\n output_feature = torch.cat(\n (output_raw, output_skip, output_branch4, output_branch3,\n output_branch2, output_branch1), 1)\n output_feature = self.lastconv(output_feature)\n\n return output_feature, output_rt,output_all\n\n\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes):\n super().__init__()\n\n self.conv1 = nn.Sequential(\n convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(\n inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)\n\n self.conv3 = nn.Sequential(\n convbn_3d(\n inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(\n convbn_3d(\n inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(\n nn.ConvTranspose3d(\n inplanes * 2,\n inplanes * 2,\n kernel_size=3,\n padding=1,\n output_padding=1,\n stride=2,\n bias=False), nn.GroupNorm(group_norm_group_num,\n inplanes * 2)) # +conv2\n\n self.conv6 = nn.Sequential(\n nn.ConvTranspose3d(\n inplanes * 2,\n inplanes,\n kernel_size=3,\n padding=1,\n output_padding=(1,1,1),\n stride=2,\n bias=False), nn.GroupNorm(group_norm_group_num,\n inplanes)) # +x\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(\n self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out) + pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\nclass similarity_measure1(nn.Module):\n def __init__(self):\n super(similarity_measure1, self).__init__()\n self.inplanes = 32\n self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu0 = nn.LeakyReLU(inplace=True) \n self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1) \n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True)\n self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n #self.relu3 = nn.Sigmoid()\n # self.conv4 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n # self.relu4 = nn.LeakyReLU(inplace=True)\n # self.conv5 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1)\n # self.relu5 = nn.ReLU(inplace=True)\n #self.s1=nn.Parameter(torch.ones(1)).float()*0.5\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n\n output = self.conv0(x)\n output = self.relu0(output)\n output = self.conv1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n #output = self.relu3(output)\n # output = self.conv4(output)\n # output = self.relu4(output)\n # output = self.conv5(output)\n # #output = torch.abs(output)\n # output = self.relu5(output)\n\n # print(output.shape)\n # print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n\n # output = output/torch.max(output)\n # output = output-torch.min(output)\n # output = 1-output\n # output = torch.exp(-output)\n #print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())\n return output\nclass similarity_measure2(nn.Module):\n def __init__(self):\n super(similarity_measure2, self).__init__()\n self.inplanes = 32\n self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu0 = nn.LeakyReLU(inplace=True) \n self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1) \n self.relu1 = nn.LeakyReLU(inplace=True)\n self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n bias=False,dilation=1)\n self.relu2 = nn.LeakyReLU(inplace=True) \n #self.s2=nn.Parameter(torch.ones(1)).float()*0.5\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n def forward(self, x):\n\n output = self.conv0(x)\n output = self.relu0(output)\n output = self.conv1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.relu2(output)\n return output\n\n\ndef matrix_generation():\n scale=4\n x=torch.arange(-scale//2,scale//2+1).float()\n x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)\n distance_matrix=x.expand(scale,scale).unsqueeze(0)\n\n distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)\n distance_matrix=distance_matrix.unsqueeze(0)\n distance_matrix1=distance_matrix+0\n distance_matrix2=distance_matrix+0\n distance_matrix3=distance_matrix+0\n distance_matrix4=distance_matrix+0\n distance_matrix5=distance_matrix+0\n distance_matrix6=distance_matrix+0\n distance_matrix7=distance_matrix+0\n distance_matrix8=distance_matrix+0\n x=torch.arange(1,scale+1).float()\n x=x.expand(scale,scale).unsqueeze(0)\n #x=x.repeat(hr_feature.shape[0],hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()\n distance_matrix1[:,0,:,:]=scale-x+1\n distance_matrix2[:,0,:,:]=x\n distance_matrix5[:,0,:,:]=distance_matrix2[:,0,:,:]\n distance_matrix6[:,0,:,:]=distance_matrix1[:,0,:,:]\n distance_matrix7[:,0,:,:]=distance_matrix2[:,0,:,:]\n distance_matrix8[:,0,:,:]=distance_matrix1[:,0,:,:]\n x=torch.arange(1,scale+1).float()\n x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)\n\n distance_matrix3[:,1,:,:]=(scale-x+1)\n distance_matrix4[:,1,:,:]=x\n distance_matrix5[:,1,:,:]=distance_matrix3[:,1,:,:]\n distance_matrix6[:,1,:,:]=distance_matrix3[:,1,:,:]\n distance_matrix7[:,1,:,:]=distance_matrix4[:,1,:,:]\n distance_matrix8[:,1,:,:]=distance_matrix4[:,1,:,:]\n # print(distance_matrix3)\n \n return distance_matrix.cuda(),distance_matrix1.cuda(),distance_matrix2.cuda(),distance_matrix3.cuda(),distance_matrix4.cuda(), \\\n distance_matrix5.cuda(),distance_matrix6.cuda(),distance_matrix7.cuda(),distance_matrix8.cuda()\n\n\nclass eight_related_context_mapping(nn.Module):\n def __init__(self):\n super(eight_related_context_mapping,self).__init__()\n self.similarity1=similarity_measure1()\n #need to remove\n #self.similarity2=similarity_measure2()\n # self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,\n # bias=False,dilation=1),nn.LeakyReLU(inplace=True))\n #self.fuse.weight.data.fill_(1)\n self.sigmoid=nn.Sigmoid()\n self.distance_matrix,self.distance_matrix1,self.distance_matrix2,self.distance_matrix3,self.distance_matrix4, \\\n self.distance_matrix5,self.distance_matrix6,self.distance_matrix7,self.distance_matrix8=matrix_generation()\n def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):\n \n #self.fuse.weight.data=torch.abs(self.fuse.weight.data)\n with torch.no_grad():\n scale=hr_feature.shape[-1]//lr_feature.shape[-1]\n if scale%2!=0:\n exit()\n\n padding1=hr_feature[:,:1,:,:scale]*0-100\n padding2=hr_feature[:,:1,:scale,:]*0-100\n\n distance_matrix=self.distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix1=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix2=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix3=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix4=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix5=self.distance_matrix1.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix6=self.distance_matrix2.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix7=self.distance_matrix3.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n distance_matrix8=self.distance_matrix4.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float()\n #center\n #reference image\n lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \\\n .contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \\\n .unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \\\n .contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)\n\n representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)\n weight=self.similarity1(representation)\n\n #target image\n # lr_feature_r=lr_feature_r.unsqueeze(-1).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3],scale) \\\n # .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3]*scale) \\\n # .unsqueeze(-2).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],scale,lr_feature_r.shape[3]*scale) \\\n # .contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2]*scale,lr_feature_r.shape[3]*scale)\n\n # representation_target=torch.cat([lr_feature_r,hr_feature_r,distance_matrix],1)\n # weight_target=self.similarity1(representation_target)\n\n #left\n #reference\n representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix1[:,:,:,:-scale]],1)\n weight_l=self.similarity1(representation_l)\n weight_l=torch.cat([padding1,weight_l],-1)\n #target\n # representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)\n # weight_l_target=self.similarity1(representation_l_target)\n # weight_l_target=torch.cat([padding1,weight_l_target],-1)\n #right\n #reference\n representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix2[:,:,:,scale:]],1)\n weight_r=self.similarity1(representation_r)\n weight_r=torch.cat([weight_r,padding1],-1)\n\n #target image\n # representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)\n # weight_r_target=self.similarity1(representation_r_target)\n # weight_r_target=torch.cat([weight_r_target,padding1],-1)\n #top\n #reference\n representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)\n weight_t=self.similarity1(representation_t)\n weight_t=torch.cat([padding2,weight_t],-2)\n #target\n # representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)\n # weight_t_target=self.similarity1(representation_t_target)\n # weight_t_target=torch.cat([padding2,weight_t_target],-2)\n #bottom\n #reference\n representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)\n weight_b=self.similarity1(representation_b)\n weight_b=torch.cat([weight_b,padding2],-2)\n\n #left-top\n #reference\n representation_lt=torch.cat([lr_feature[:,:,:-scale,:-scale],hr_feature[:,:,scale:,scale:],distance_matrix5[:,:,:-scale,:-scale]],1)\n weight_lt=self.similarity1(representation_lt)\n weight_lt=torch.cat([padding2,torch.cat([padding1[...,scale:,:],weight_lt],-1)],-2)\n #target\n # representation_l_target=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)\n # weight_l_target=self.similarity1(representation_l_target)\n # weight_l_target=torch.cat([padding1,weight_l_target],-1)\n #right-top\n #reference\n representation_rt=torch.cat([lr_feature[:,:,:-scale,scale:],hr_feature[:,:,scale:,:-scale],distance_matrix6[:,:,:-scale,scale:]],1)\n weight_rt=self.similarity1(representation_rt)\n weight_rt=torch.cat([padding2,torch.cat([weight_rt,padding1[...,scale:,:]],-1)],-2)\n\n #target image\n # representation_r_target=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)\n # weight_r_target=self.similarity1(representation_r_target)\n # weight_r_target=torch.cat([weight_r_target,padding1],-1)\n #left-bottom\n #reference\n representation_lb=torch.cat([lr_feature[:,:,scale:,:-scale],hr_feature[:,:,:-scale:,scale:],distance_matrix7[:,:,scale:,:-scale]],1)\n weight_lb=self.similarity1(representation_lb)\n weight_lb=torch.cat([torch.cat([padding1[...,scale:,:],weight_lb],-1),padding2],-2)\n #target\n # representation_t_target=torch.cat([lr_feature_r[:,:,:-scale,:],hr_feature_r[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)\n # weight_t_target=self.similarity1(representation_t_target)\n # weight_t_target=torch.cat([padding2,weight_t_target],-2)\n #right-bottom\n #reference\n representation_rb=torch.cat([lr_feature[:,:,scale:,scale:],hr_feature[:,:,:-scale,:-scale],distance_matrix8[:,:,scale:,scale:]],1)\n weight_rb=self.similarity1(representation_rb)\n weight_rb=torch.cat([torch.cat([weight_rb,padding1[...,:-scale,:]],-1),padding2],-2)\n\n\n weight_all=torch.cat([weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb],dim=1)\n weight_norm=F.softmax(weight_all, dim=1)\n #weight_fuse=F.softmax(weight_norm*weight_all)\n #target\n # representation_b_target=torch.cat([lr_feature_r[:,:,scale:,:],hr_feature_r[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)\n # weight_b_target=self.similarity1(representation_b_target)\n # weight_b_target=torch.cat([weight_b_target,padding2],-2)\n\n # weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)\n # weight_norm=F.softmax(weight_all, dim=1)\n # weight_all_target=torch.cat([weight_target,weight_r_target,weight_l_target,weight_t_target,weight_b_target],dim=1)\n # weight_norm_target=F.softmax(weight_all_target, dim=1)\n\n # return weight*weight_norm[:,0:1,:,:],weight_target*weight_norm_target[:,0:1,:,:], \\\n # weight_r*weight_norm[:,1:2,:,:],weight_r_target*weight_norm_target[:,1:2,:,:], \\\n # weight_l*weight_norm[:,2:3,:,:],weight_l_target*weight_norm_target[:,2:3,:,:], \\\n # weight_t*weight_norm[:,3:4,:,:],weight_t_target*weight_norm_target[:,3:4,:,:], \\\n # weight_b*weight_norm[:,4:5,:,:],weight_b_target*weight_norm_target[:,4:5,:,:]\n # return self.sigmoid(weight)*weight_norm[:,0:1,...], \\\n # self.sigmoid(weight_l)*weight_norm[:,1:2,...], \\\n # self.sigmoid(weight_r)*weight_norm[:,2:3,...], \\\n # self.sigmoid(weight_t)*weight_norm[:,3:4,...], \\\n # self.sigmoid(weight_b)*weight_norm[:,4:5,...],\\\n # self.sigmoid(weight_lt)*weight_norm[:,5:6,...], \\\n # self.sigmoid(weight_rt)*weight_norm[:,6:7,...], \\\n # self.sigmoid(weight_lb)*weight_norm[:,7:8,...], \\\n # self.sigmoid(weight_rb)*weight_norm[:,8:9,...]\n #print(torch.mean(torch.max(weight_norm,dim=1)[0]),torch.max(weight_all,dim=1)[0])\n #print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))\n #print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))\n if torch.mean(torch.topk(weight_all,1,dim=1)[0].float())<0:\n print(torch.mean(torch.topk(weight_all,3,dim=1)[0].float()),torch.mean(torch.topk(weight_all,3,dim=1)[1].float()))\n print(torch.mean(torch.topk(weight_all,1,dim=1)[0].float()),torch.mean(torch.topk(weight_all,1,dim=1)[1].float()))\n #print(torch.mean(torch.min(weight_norm,dim=1)[0]),torch.min(weight_all,dim=1)[0])\n return weight_norm[:,0:1,...], \\\n weight_norm[:,1:2,...], \\\n weight_norm[:,2:3,...], \\\n weight_norm[:,3:4,...], \\\n weight_norm[:,4:5,...],\\\n weight_norm[:,5:6,...], \\\n weight_norm[:,6:7,...], \\\n weight_norm[:,7:8,...], \\\n weight_norm[:,8:9,...]\nclass cmfsm(nn.Module):\n\n\n def __init__(self, \n maxdisp=192):\n\n super(cmfsm, self).__init__()\n self.maxdisp = maxdisp\n self.feature_extraction = feature_extraction()\n\n self.dres0 = nn.Sequential(\n convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dres1 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(32, 32, 3, 1, 1))\n\n self.dres2 = hourglass(32)\n\n self.dres3 = hourglass(32)\n\n self.dres4 = hourglass(32)\n\n self.classif1 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n self.classif2 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n self.classif3 = nn.Sequential(\n convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n self.mapping_matrix=eight_related_context_mapping()\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * \\\n m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, left, right):\n start=time.time()\n refimg_fea, half,all_feature= self.feature_extraction(left)\n targetimg_fea, _ ,all_feature_right= self.feature_extraction(right)\n scale=all_feature.shape[-1]//refimg_fea.shape[-1]\n #mapping,mapping_r,mapping_l,mapping_t,mapping_b=self.mapping_matrix(refimg_fea,all_feature)\n #target\n #[mapping,mapping_r,mapping_l,mapping_t,mapping_b],[mapping_target,mapping_target_r,mapping_target_l]=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)\n #time=0.1s\n weight,weight_l,weight_r,weight_t,weight_b,weight_lt,weight_rt,weight_lb,weight_rb=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)\n #mapping,mapping_target=self.mapping_matrix(refimg_fea,all_feature,targetimg_fea,all_feature_right)\n # matching\n cost = Variable(\n torch.FloatTensor(refimg_fea.size()[0],\n refimg_fea.size()[1] * 2, self.maxdisp // scale,\n refimg_fea.size()[2],\n refimg_fea.size()[3]).zero_()).cuda()\n\n for i in range(self.maxdisp // scale):\n if i > 0:\n cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :,\n i:]\n cost[:, refimg_fea.size()[1]:, i, :,\n i:] = targetimg_fea[:, :, :, :-i]\n else:\n cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea\n cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea\n cost = cost.contiguous()\n \n cost0 = self.dres0(cost)\n cost0 = self.dres1(cost0) + cost0\n out1, pre1, post1 = self.dres2(cost0, None, None)\n out1 = out1 + cost0\n\n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = out2 + cost0\n\n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n out3 = out3 + cost0\n\n cost1 = self.classif1(out1)\n #cost2 = self.classif2(out2) + cost1\n #cost3 = self.classif3(out3) + cost2\n #torch.Size([1, 1, 256, 512])\n # weight_all=torch.cat([weight,weight_r,weight_l,weight_t,weight_b],dim=1)\n # weight_norm=F.softmax(weight_all, dim=1)\n\n # t=time.time()\n cost1 = torch.squeeze(cost1, 1)\n\n pred1 = F.softmax(cost1, dim=1)\n pred1 = disparityregression(self.maxdisp//scale)(pred1)\n #torch.Size([1, 64, 128])\n\n pred1=scale*pred1.unsqueeze(-1).expand(pred1.shape[0],pred1.shape[1],pred1.shape[2],scale) \\\n .contiguous().view(pred1.shape[0],pred1.shape[1],pred1.shape[2]*scale) \\\n .unsqueeze(-2).expand(pred1.shape[0],pred1.shape[1],scale,pred1.shape[2]*scale) \\\n .contiguous().view(pred1.shape[0],pred1.shape[1]*scale,pred1.shape[2]*scale)\n\n pred1_map=pred1*weight\n pred1_map[...,scale:]+=pred1[...,:-scale]*weight_l[...,scale:]\n pred1_map[...,:-scale]+=pred1[...,scale:]*weight_r[...,:-scale]\n pred1_map[...,scale:,:]+=pred1[...,:-scale,:]*weight_t[...,scale:,:]\n pred1_map[...,:-scale,:]+=pred1[...,scale:,:]*weight_b[...,:-scale,:]\n\n pred1_map[...,scale:,scale:]+=pred1[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]\n pred1_map[...,scale:,:-scale]+=pred1[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]\n pred1_map[...,:-scale,scale:]+=pred1[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]\n pred1_map[...,:-scale,:-scale]+=pred1[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]\n cost2 = self.classif2(out2)\n cost2 = torch.squeeze(cost2, 1)+cost1\n\n pred2 = F.softmax(cost2, dim=1)\n pred2 = disparityregression(self.maxdisp//scale)(pred2)\n\n pred2=scale*pred2.unsqueeze(-1).expand(pred2.shape[0],pred2.shape[1],pred2.shape[2],scale) \\\n .contiguous().view(pred2.shape[0],pred2.shape[1],pred2.shape[2]*scale) \\\n .unsqueeze(-2).expand(pred2.shape[0],pred2.shape[1],scale,pred2.shape[2]*scale) \\\n .contiguous().view(pred2.shape[0],pred2.shape[1]*scale,pred2.shape[2]*scale)\n\n pred2_map=pred2*weight\n pred2_map[...,scale:]+=pred2[...,:-scale]*weight_l[...,scale:]\n pred2_map[...,:-scale]+=pred2[...,scale:]*weight_r[...,:-scale]\n pred2_map[...,scale:,:]+=pred2[...,:-scale,:]*weight_t[...,scale:,:]\n pred2_map[...,:-scale,:]+=pred2[...,scale:,:]*weight_b[...,:-scale,:]\n\n pred2_map[...,scale:,scale:]+=pred2[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]\n pred2_map[...,scale:,:-scale]+=pred2[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]\n pred2_map[...,:-scale,scale:]+=pred2[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]\n pred2_map[...,:-scale,:-scale]+=pred2[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]\n\n\n cost3 = self.classif3(out3)\n cost3 = torch.squeeze(cost3, 1)+cost2\n \n pred3 = F.softmax(cost3, dim=1)\n # print(torch.max(pred3,dim=1)[0])\n # print(torch.min(pred3,dim=1)[0])\n pred3 = disparityregression(self.maxdisp//scale)(pred3)\n\n pred3=scale*pred3.unsqueeze(-1).expand(pred3.shape[0],pred3.shape[1],pred3.shape[2],scale) \\\n .contiguous().view(pred3.shape[0],pred3.shape[1],pred3.shape[2]*scale) \\\n .unsqueeze(-2).expand(pred3.shape[0],pred3.shape[1],scale,pred3.shape[2]*scale) \\\n .contiguous().view(pred3.shape[0],pred3.shape[1]*scale,pred3.shape[2]*scale)\n\n pred3_map=pred3*weight\n pred3_map[...,scale:]+=pred3[...,:-scale]*weight_l[...,scale:]\n pred3_map[...,:-scale]+=pred3[...,scale:]*weight_r[...,:-scale]\n pred3_map[...,scale:,:]+=pred3[...,:-scale,:]*weight_t[...,scale:,:]\n pred3_map[...,:-scale,:]+=pred3[...,scale:,:]*weight_b[...,:-scale,:]\n\n pred3_map[...,scale:,scale:]+=pred3[...,:-scale,:-scale]*weight_lt[...,scale:,scale:]\n pred3_map[...,scale:,:-scale]+=pred3[...,:-scale,scale:]*weight_rt[...,scale:,:-scale]\n pred3_map[...,:-scale,scale:]+=pred3[...,scale:,:-scale]*weight_lb[...,:-scale,scale:]\n pred3_map[...,:-scale,:-scale]+=pred3[...,scale:,scale:]*weight_rb[...,:-scale,:-scale]\n\n\n #pred3 = self.srr(pred3, left, refimg_fea, half)\n #print(time.time()-start)\n return pred1_map, pred2_map, pred3_map\n #return pred3\n\n\n\n"
] | [
[
"torch.sum",
"torch.nn.init.kaiming_normal_",
"torch.nn.GroupNorm",
"torch.nn.init.constant_",
"torch.no_grad",
"torch.nn.ConvTranspose3d",
"torch.nn.ReLU",
"torch.topk",
"torch.arange",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.Conv3d",
"torch.squeeze",
"torch.nn.LeakyReLU"
]
] |
khirotaka/testbed | [
"e32384a3267d5282fb9f2df22597dfa7fb9aa17d"
] | [
"examples/sw.py"
] | [
"import time\nimport numpy as np\nfrom testbed._rust import sliding_window\n\n\nx = np.random.randn(5000, 5)\n\n\ns = time.time()\nrustout = sliding_window(x, 100, 1)\nprint(\"=\" * 50)\nprint(\"Rust Speed: \", time.time() - s)\nprint(rustout.shape)\n\n\ndef sw(array, ws, over):\n sl = len(array)\n return [array[i:i+ws] for i in range(0, sl-ws, over)]\n\n\nprint(\"=\" * 50)\ns = time.time()\ntmp = sw(x, 100, 1)\ntmp = np.stack(tmp, 0)\nprint(\"Python Speed: \", time.time() - s)\nprint(tmp.shape)\n"
] | [
[
"numpy.stack",
"numpy.random.randn"
]
] |
katianaz/GiftHelper | [
"1fbff4e7902c25950a5f50f04f0b2c834842ccbe"
] | [
"informacoes_emails.py"
] | [
"import pontuacao_categorias\r\nimport pandas as pd\r\n\r\nnomes = []\r\nnomes_presenteados = []\r\nenderecos_emails = []\r\n\r\nfor p in range(len(pontuacao_categorias.tabela.index)):\r\n nomes.append(pontuacao_categorias.tabela['3'][p])\r\n nomes_presenteados.append(pontuacao_categorias.tabela['4'][p])\r\n enderecos_emails.append(pontuacao_categorias.tabela['2'][p])\r\n\r\ninformacoes = {'Nome': nomes,\r\n 'Email': enderecos_emails,\r\n 'Presenteado': nomes_presenteados,\r\n 'Sugestoes': pontuacao_categorias.sugestoes}\r\n\r\ninfos = pd.DataFrame(informacoes, columns=['Nome', 'Email', 'Presenteado', 'Sugestoes'])\r\n\r\ninfos.to_csv('infos_emails.csv', encoding='latin-1')\r\n"
] | [
[
"pandas.DataFrame"
]
] |
tfgraph/tfgraph | [
"19ae968b3060275c631dc601757646abaf1f58a1"
] | [
"examples/example_sparsifier_graph.py"
] | [
"#!/usr/bin/python3\n\nimport tensorflow as tf\nimport tfgraph\n\n\ndef main():\n with tf.Session() as sess:\n g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, \"G\", 10, 85)\n g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75)\n\n print(g)\n print(g.m)\n\n print(g_sparse)\n print(g_sparse.m)\n\n print(g_sparse.m / g.m)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.Session"
]
] |
HeyLifeHD/rp-bp | [
"9c59b1bc0267400747477467c45f96364d5528e1"
] | [
"rpbp/analysis/profile_construction/visualize_metagene_profile_bayes_factor.py"
] | [
"#! /usr/bin/env python3\n\nimport matplotlib\nmatplotlib.use('agg')\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport logging\n\ndefault_title = \"Metagene profile Bayes' factors\"\ndefault_xlabel = \"Offset, relative to translation \\ninitiation site\"\ndefault_ylabel = \"Bayes' factor\"\ndefault_font_size = 15\n\ndefault_series_label = \"\"\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"This script visualizes the Bayes' factors for a metagene profile.\\n\\n\"\n \"This script contains some hard-coded field names.\")\n parser.add_argument('bayes_factors', help=\"The metagene profile (csv) file\")\n parser.add_argument('length', help=\"The profile lengths to visualize\", type=int)\n parser.add_argument('out', help=\"The (output) image file\")\n \n parser.add_argument('--title', help=\"The title for the figure\", default=default_title)\n parser.add_argument('--xlabel', help=\"The label for the x-axis\", default=default_xlabel)\n parser.add_argument('--ylabel', help=\"The label for the y-axis\", default=default_ylabel)\n parser.add_argument('--series-label', help=\"The label for the legend\", default=default_series_label)\n parser.add_argument('--font-size', help=\"The font size for the title, axis labels, and \"\n \"xticks labels\", type=int, default=default_font_size)\n\n args = parser.parse_args()\n\n bayes_factors = pd.read_csv(args.bayes_factors)\n\n mask_length = bayes_factors['length'] == args.length\n group = bayes_factors.loc[mask_length]\n\n bfs = group['bayes_factor_mean']\n offsets = group['offset']\n bf_range = max(bfs) - min(bfs)\n \n fig, ax = plt.subplots(figsize=(10,5))\n ax.plot(offsets, bfs, label=args.series_label, color='b')\n ax.scatter(offsets, bfs, color='b')\n\n xlim = (min(offsets), max(offsets))\n\n ymin = min(bfs) - 0.1*bf_range\n ymax = max(bfs) + 0.1*bf_range\n ylim = (ymin, ymax)\n\n # and draw a line at \"bf=5\"\n plt.plot(xlim, (5, 5), color='k', linewidth=2, linestyle=':')\n\n # and a horizontal line at the maximum bf\n plt.plot(xlim, (max(bfs), max(bfs)), color='r', linewidth=1, linestyle=\"-.\")\n\n # and a vertical line at \"offset=-12\"\n ax.plot((-12, -12), ylim, color='g', linestyle=\"--\")\n \n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # finally, add the labels, etc.\n plt.suptitle(args.title, fontsize=args.font_size, y=1.03)\n ax.set_xlabel(args.xlabel, fontsize=args.font_size)\n ax.set_ylabel(args.ylabel, fontsize=args.font_size)\n\n ax.tick_params(axis='both', which='major', labelsize=args.font_size)\n #ax.legend(loc=\"upper right\")\n\n fig.tight_layout()\n fig.savefig(args.out, bbox_inches='tight')\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.suptitle",
"matplotlib.use",
"matplotlib.pyplot.plot"
]
] |
B612-Asteroid-Institute/thor | [
"d3d1dcbe86f67a62c90b4cde3fc577e414825cf2"
] | [
"thor/orbit.py"
] | [
"import numpy as np\n\nfrom .utils import _checkTime\nfrom .vectors import calcNae\nfrom .vectors import calcDelta\nfrom .vectors import calcXae\nfrom .vectors import calcXa\nfrom .vectors import calcNhat\nfrom .vectors import calcR1\nfrom .vectors import calcR2\nfrom .projections import cartesianToGnomonic\nfrom .coordinates import transformCoordinates\n\n__all__ = [\"TestOrbit\"]\n\nclass TestOrbit:\n \"\"\"\n TestOrbit: Class that calculates and stores the rotation matrices \n for a guess of heliocentric distance and velocity. To be used in \n tandem with the Cell class.\n \n Parameters\n ----------\n elements : `~numpy.ndarray` (6)\n Cartesian ecliptic orbital elements with postions in units of AU\n and velocities in units of AU per day. \n t0 : `~astropy.time.core.Time` (1)\n Epoch at which orbital elements are defined.\n \"\"\"\n def __init__(self, elements, epoch):\n _checkTime(epoch, \"epoch\")\n \n self.elements = elements\n self.epoch = epoch\n \n def prepare(self, verbose=True):\n \"\"\"\n Calculate rotation matrices. \n \n Populates the following class properties:\n n_hat : vector normal to the plane of orbit \n R1 : rotation matrix to rotate towards x-y plane\n R2 : rotation matrix to rotate towards x-axis\n M : final rotation matrix\n \n Parameters\n ----------\n verbose : bool, optional\n Print progress statements.\n [Default = True]\n \n Returns\n -------\n None\n \"\"\"\n if verbose is True:\n print(\"Calculating vector normal to plane of orbit...\")\n self.n_hat = calcNhat(self.elements[:3])\n \n if verbose is True:\n print(\"Calculating R1 rotation matrix...\")\n self.R1 = calcR1(self.elements[:3], self.n_hat)\n self.x_a_xy = np.array(self.R1 @ self.elements[:3])[0]\n \n if verbose is True:\n print(\"Calculating R2 rotation matrix...\")\n self.R2 = calcR2(self.x_a_xy)\n \n if verbose is True:\n print(\"Calculating final rotation matrix...\")\n self.M = self.R2 @ self.R1\n \n if verbose is True:\n print(\"Done.\")\n print(\"\")\n return\n \n def applyToObservations(self, observations, verbose=True):\n \"\"\"\n Apply the prepared rotations to the given observations. Adds the gnomonic \n plane coordinates to observations (columns: theta_x_deg, theta_y_deg) \n \n Parameters\n ----------\n observations : `~pandas.DataFrame`\n DataFrame of observations defined at the same epoch as this test orbit, \n to project into the test orbit's frame.\n verbose : bool, optional\n Print progress statements? \n [Default = True]\n \n Returns\n -------\n None\n \"\"\"\n \n if verbose is True:\n print(\"Applying rotation matrices to observations...\")\n print(\"Converting to ecliptic coordinates...\")\n\n #velocities_present = False\n #if \"vRAcosDec\" in observations.columns and \"vDec\" in observations.columns:\n # coords_eq_r = observations[[\"RA_deg\", \"Dec_deg\"]].values\n # coords_eq_v = observations[[\"vRAcosDec\", \"vDec\"]].values\n # coords_eq_v[:, 0] /= np.cos(np.radians(coords_eq_r[:, 1]))\n # coords_eq = np.hstack([\n # np.ones((len(coords_eq_r), 1)), \n # coords_eq_r, \n # np.zeros((len(coords_eq_r), 1)),\n # coords_eq_v\n # ]) \n # velocities_present = True\n\n #else:\n coords_eq = observations[[\"RA_deg\", \"Dec_deg\"]].values\n coords_eq = np.hstack([np.ones((len(coords_eq), 1)), coords_eq]) \n coords_ec = transformCoordinates(coords_eq, \n \"equatorial\", \n \"ecliptic\",\n representation_in=\"spherical\",\n representation_out=\"spherical\"\n )\n \n if verbose is True:\n print(\"Calculating object to observer unit vector...\")\n n_ae = calcNae(coords_ec[:, 1:3])\n x_e = observations[[\"obs_x\", \"obs_y\", \"obs_z\"]].values\n \n if verbose is True:\n print(\"Calculating object to observer distance assuming r = {} AU...\".format(np.linalg.norm(self.elements[:3])))\n delta = np.zeros(len(n_ae))\n for i in range(len(delta)):\n delta[i] = calcDelta(np.linalg.norm(self.elements[:3]), x_e[i, :], n_ae[i, :])\n \n if verbose is True:\n print(\"Calculating object to observer position vector...\")\n x_ae = np.zeros([len(delta), 3])\n for i, (delta_i, n_ae_i) in enumerate(zip(delta, n_ae)):\n x_ae[i] = calcXae(delta_i, n_ae_i)\n \n if verbose is True:\n print(\"Calculating heliocentric object position vector...\")\n x_a = np.zeros([len(x_ae), 3])\n for i, (x_ae_i, x_e_i) in enumerate(zip(x_ae, x_e)):\n x_a[i] = calcXa(x_ae_i, x_e_i)\n \n if verbose is True:\n print(\"Applying rotation matrix M to heliocentric object position vector...\")\n coords_cart_rotated = np.array(self.M @ x_a.T).T\n \n if verbose is True:\n print(\"Performing gnomonic projection...\")\n gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)\n \n\n observations[\"obj_x\"] = x_a[:, 0]\n observations[\"obj_y\"] = x_a[:, 1]\n observations[\"obj_z\"] = x_a[:, 2]\n observations[\"theta_x_deg\"] = np.degrees(gnomonic_coords[:, 0])\n observations[\"theta_y_deg\"] = np.degrees(gnomonic_coords[:, 1])\n observations[\"test_obj_x\"] = self.elements[0]\n observations[\"test_obj_y\"] = self.elements[1]\n observations[\"test_obj_z\"] = self.elements[2]\n observations[\"test_obj_vx\"] = self.elements[3]\n observations[\"test_obj_vy\"] = self.elements[4]\n observations[\"test_obj_vz\"] = self.elements[5]\n\n if verbose is True:\n print(\"Done.\")\n print(\"\")\n return \n\n def applyToEphemeris(self, ephemeris, verbose=True):\n \"\"\"\n Apply the prepared rotations to the given ephemerides. Adds the gnomonic \n plane coordinates to observations (columns: theta_x_deg, theta_y_deg, vtheta_x, and vtheta_y) \n \n Parameters\n ----------\n ephemeris : `~pandas.DataFrame`\n DataFrame of ephemeris generated by a THOR backend defined at the same epoch as this test orbit, \n to project into the test orbit's frame.\n verbose : bool, optional\n Print progress statements? \n [Default = True]\n \n Returns\n -------\n None\n \"\"\"\n coords_cart = ephemeris[[\"obj_x\", \"obj_y\", \"obj_z\", \"obj_vx\", \"obj_vy\", \"obj_vz\"]].values\n coords_cart_rotated = np.zeros_like(coords_cart)\n \n if verbose is True:\n print(\"Applying rotation matrix M to heliocentric object position vector...\")\n coords_cart_rotated[:, :3] = np.array(self.M @ coords_cart[:, :3].T).T\n\n if verbose is True:\n print(\"Applying rotation matrix M to heliocentric object velocity vector...\")\n # Calculate relative velocity, then rotate to projected frame\n coords_cart[:, 3:] = coords_cart[:, 3:] - self.elements[3:].reshape(1, -1)\n coords_cart_rotated[:, 3:] = np.array(self.M @ coords_cart[:, 3:].T).T\n \n if verbose is True:\n print(\"Performing gnomonic projection...\")\n gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)\n \n ephemeris[\"theta_x_deg\"] = np.degrees(gnomonic_coords[:, 0])\n ephemeris[\"theta_y_deg\"] = np.degrees(gnomonic_coords[:, 1])\n ephemeris[\"vtheta_x_deg\"] = np.degrees(gnomonic_coords[:, 2])\n ephemeris[\"vtheta_y_deg\"] = np.degrees(gnomonic_coords[:, 3])\n ephemeris[\"test_obj_x\"] = self.elements[0]\n ephemeris[\"test_obj_y\"] = self.elements[1]\n ephemeris[\"test_obj_z\"] = self.elements[2]\n ephemeris[\"test_obj_vx\"] = self.elements[3]\n ephemeris[\"test_obj_vy\"] = self.elements[4]\n ephemeris[\"test_obj_vz\"] = self.elements[5]\n\n if verbose is True:\n print(\"Done.\")\n print(\"\")\n return "
] | [
[
"numpy.degrees",
"numpy.zeros_like",
"numpy.linalg.norm",
"numpy.array"
]
] |
bullocke/yatsm_nrt | [
"b0ded56032bf9f9dcdf6b7b749f6554ade56de1e"
] | [
"yatsm/cache.py"
] | [
"\"\"\" Functions related to writing to and retrieving from cache files\n\"\"\"\nimport os\n\nimport numpy as np\n\nfrom log_yatsm import logger\n\n_image_ID_str = 'image_IDs'\n\n\ndef get_line_cache_name(dataset_config, n_images, row, nbands):\n \"\"\" Returns cache filename for specified config and line number\n\n Args:\n dataset_config (dict): configuration information about the dataset\n n_images (int): number of images in dataset\n row (int): line of the dataset for output\n nbands (int): number of bands in dataset\n\n Returns:\n str: filename of cache file\n\n \"\"\"\n path = dataset_config.get('cache_line_dir')\n if not path:\n return\n\n filename = 'yatsm_r%i_n%i_b%i.npy.npz' % (row, n_images, nbands)\n\n return os.path.join(path, filename)\n\n\ndef get_line_cache_pattern(row, nbands, regex=False):\n \"\"\" Returns a pattern for a cache file from a certain row\n\n This function is useful for finding all cache files from a line, ignoring\n the number of images in the file.\n\n Args:\n row (int): line of the dataset for output\n nbands (int): number of bands in dataset\n regex (bool, optional): return a regular expression instead of glob\n style (default: False)\n\n Returns:\n str: filename pattern for cache files from line ``row``\n\n \"\"\"\n wildcard = '.*' if regex else '*'\n pattern = 'yatsm_r{l}_n{w}_b{b}.npy.npz'.format(\n l=row, w=wildcard, b=nbands)\n\n return pattern\n\n\ndef test_cache(dataset_config):\n \"\"\" Test cache directory for ability to read from or write to\n\n Args:\n dataset_config (dict): dictionary of dataset configuration options\n\n Returns:\n tuple: tuple of bools describing ability to read from and write to\n cache directory\n\n \"\"\"\n # Try to find / use cache\n read_cache = False\n write_cache = False\n\n cache_dir = dataset_config.get('cache_line_dir')\n if cache_dir:\n # Test existence\n if os.path.isdir(cache_dir):\n if os.access(cache_dir, os.R_OK):\n read_cache = True\n if os.access(cache_dir, os.W_OK):\n write_cache = True\n if read_cache and not write_cache:\n logger.warning('Cache directory exists but is not writable')\n else:\n # If it doesn't already exist, can we create it?\n try:\n os.makedirs(cache_dir)\n except:\n logger.warning('Could not create cache directory')\n else:\n read_cache = True\n write_cache = True\n\n logger.debug('Attempt reading in from cache directory?: {b}'.format(\n b=read_cache))\n logger.debug('Attempt writing to cache directory?: {b}'.format(\n b=write_cache))\n\n return read_cache, write_cache\n\n\ndef read_cache_file(cache_filename, image_IDs=None):\n \"\"\" Returns image data from a cache file\n\n If ``image_IDs`` is not None this function will try to ensure data from\n cache file come from the list of image IDs provided. If cache file does not\n contain a list of image IDs, it will skip the check and return cache data.\n\n Args:\n cache_filename (str): cache filename\n image_IDs (iterable, optional): list of image IDs corresponding to data\n in cache file. If not specified, function will not check for\n correspondence (default: None)\n\n Returns:\n np.ndarray, or None: Return Y as np.ndarray if possible and if the\n cache file passes the consistency check specified by ``image_IDs``,\n else None\n\n \"\"\"\n try:\n cache = np.load(cache_filename)\n except IOError:\n return None\n\n if _image_ID_str in cache.files and image_IDs is not None:\n if not np.array_equal(image_IDs, cache[_image_ID_str]):\n logger.warning('Cache file data in {f} do not match images '\n 'specified'.format(f=cache_filename))\n return None\n\n return cache['Y']\n\n\ndef write_cache_file(cache_filename, Y, image_IDs):\n \"\"\" Writes data to a cache file using np.savez_compressed\n\n Args:\n cache_filename (str): cache filename\n Y (np.ndarray): data to write to cache file\n image_IDs (iterable): list of image IDs corresponding to data in cache\n file. If not specified, function will not check for correspondence\n\n \"\"\"\n np.savez_compressed(cache_filename, **{\n 'Y': Y, _image_ID_str: image_IDs\n })\n\n\n# Cache file updating\ndef update_cache_file(images, image_IDs,\n old_cache_filename, new_cache_filename,\n line, reader):\n \"\"\" Modify an existing cache file to contain data within `images`\n\n This should be useful for updating a set of cache files to reflect\n modifications to the timeseries dataset without completely reading the\n data into another cache file.\n\n For example, the cache file could be updated to reflect the deletion of\n a misregistered or cloudy image. Another common example would be for\n updating cache files to include newly acquired observations.\n\n Note that this updater will not handle updating cache files to include\n new bands.\n\n Args:\n images (iterable): list of new image filenames\n image_IDs (iterable): list of new image identifying strings\n old_cache_filename (str): filename of cache file to update\n new_cache_filename (str): filename of new cache file which includes\n modified data\n line (int): the line of data to be updated\n reader (callable): GDAL or BIP image reader function from\n :mod:`yatsm.io.stack_line_readers`\n\n Raises:\n ValueError: Raise error if old cache file does not record ``image_IDs``\n\n \"\"\"\n images = np.asarray(images)\n image_IDs = np.asarray(image_IDs)\n\n # Cannot proceed if old cache file doesn't store filenames\n old_cache = np.load(old_cache_filename)\n if _image_ID_str not in old_cache.files:\n raise ValueError('Cannot update cache.'\n 'Old cache file does not store image IDs.')\n old_IDs = old_cache[_image_ID_str]\n old_Y = old_cache['Y']\n nband, _, ncol = old_Y.shape\n\n # Create new Y and add in values retained from old cache\n new_Y = np.zeros((nband, image_IDs.size, ncol),\n dtype=old_Y.dtype.type)\n new_IDs = np.zeros(image_IDs.size, dtype=image_IDs.dtype)\n\n # Check deletions -- find which indices to retain in new cache\n retain_old = np.where(np.in1d(old_IDs, image_IDs))[0]\n if retain_old.size == 0:\n logger.warning('No image IDs in common in old cache file.')\n else:\n logger.debug(' retaining {r} of {n} images'.format(\n r=retain_old.size, n=old_IDs.size))\n # Find indices of old data to insert into new data\n idx_old_IDs = np.argsort(old_IDs)\n sorted_old_IDs = old_IDs[idx_old_IDs]\n idx_IDs = np.searchsorted(sorted_old_IDs,\n image_IDs[np.in1d(image_IDs, old_IDs)])\n\n retain_old = idx_old_IDs[idx_IDs]\n\n # Indices to insert into new data\n retain_new = np.where(np.in1d(image_IDs, old_IDs))[0]\n\n new_Y[:, retain_new, :] = old_Y[:, retain_old, :]\n new_IDs[retain_new] = old_IDs[retain_old]\n\n # Check additions -- find which indices we need to insert\n insert = np.where(np.in1d(image_IDs, old_IDs, invert=True))[0]\n\n if retain_old.size == 0 and insert.size == 0:\n raise ValueError('Cannot update cache file -- '\n 'no data retained or added')\n\n # Read in the remaining data from disk\n if insert.size > 0:\n logger.debug('Inserting {n} new images into cache'.format(\n n=insert.size))\n insert_Y = reader.read_row(images[insert], line)\n new_Y[:, insert, :] = insert_Y\n new_IDs[insert] = image_IDs[insert]\n\n np.testing.assert_equal(new_IDs, image_IDs)\n\n # Save\n write_cache_file(new_cache_filename, new_Y, image_IDs)\n"
] | [
[
"numpy.load",
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.argsort",
"numpy.asarray",
"numpy.in1d",
"numpy.array_equal",
"numpy.savez_compressed"
]
] |
mewbak/hypertools | [
"bc2947737be8bd5a6e2a3bdca84132f6fee8989c"
] | [
"examples/plot_hue.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n=============================\nGrouping data by category\n=============================\n\nWhen plotting, its useful to have a way to color points by some category or\nvariable. Hypertools does this using the `hue` kwarg, which takes a list\nof string category labels or numerical values. If text labels are passed, the\ndata is restructured according to those labels and plotted in different colors\naccording to your color palette. If numerical values are passed, the values\nare binned (default resolution: 100) and plotted according to your color\npalette.\n\"\"\"\n\n# Code source: Andrew Heusser\n# License: MIT\n\n# import\nimport hypertools as hyp\nimport numpy as np\n\n# load example data\ngeo = hyp.load('weights_sample')\ndata = geo.get_data()\n\n# simulate random groups\nhue=[]\nfor idx,i in enumerate(data):\n tmp=[]\n for iidx,ii in enumerate(i):\n tmp.append(int(np.random.randint(1000, size=1)))\n hue.append(tmp)\n\n# plot\ngeo.plot(fmt='.', hue=hue)\n"
] | [
[
"numpy.random.randint"
]
] |
gewoonrik/pullreqs-dnn | [
"dbafd1866c1cd44424d238618e5ca54841c358c0"
] | [
"preprocess.py"
] | [
"#!/usr/bin/env python\n#\n# (c) 2016 -- onwards Georgios Gousios <[email protected]>, Rik Nijessen <[email protected]>\n#\n\n\nfrom __future__ import print_function\n\nimport pickle\nimport random\nimport urllib\nimport numpy as np\nimport argparse\n\nfrom config import *\nfrom code_tokenizer import CodeTokenizer\nfrom my_tokenizer import MyTokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n\n@timeit\ndef load_pr_csv(file):\n \"\"\"\n Load a PR dataset, including all engineered features\n :return: A pandas dataframe with all data loaded\n \"\"\"\n print(\"Loading pull requests file \", file)\n pullreqs = pd.read_csv(file)\n pullreqs.set_index(['project_name', 'github_id'])\n return pullreqs\n\n\ndef ensure_diffs():\n \"\"\"\n Make sure that the PR diffs have been downloaded in the appropriate dir\n \"\"\"\n if not os.path.exists(DIFFS_DIR):\n print(\"Downloading pull request diffs\")\n import tarfile\n\n urllib.urlretrieve(DIFFS_DATA_URL, DIFFS_FILE)\n tar = tarfile.open(DIFFS_FILE, \"r:gz\")\n tar.extractall()\n tar.close()\n\n\ndef read_title_and_comments(file):\n str = open(file).read()\n splitted = str.split(\"\\n\")\n title = splitted[0]\n # remove title and empty space\n comment = str[2:]\n return title, comment\n\n@timeit\ndef create_code_tokenizer(code, vocabulary_size):\n tokenizer = CodeTokenizer(nb_words=vocabulary_size)\n tokenizer.fit_on_texts(code)\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n return tokenizer\n\ndef create_text_tokenizer(texts, vocabulary_size):\n tokenizer = MyTokenizer(nb_words=vocabulary_size)\n tokenizer.fit_on_texts(texts)\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n return tokenizer\n\n\n@timeit\ndef tokenize(tokenizer, texts, maxlen):\n print(\"Tokenizing\")\n sequences = tokenizer.texts_to_sequences(texts)\n return pad_sequences(sequences, maxlen=maxlen)\n\n\ndef load_data(pullreqs):\n diffs = []\n titles = []\n comments = []\n labels = []\n successful = failed = 0\n for i, row in pullreqs.iterrows():\n try:\n name = (row['project_name']).replace('/','@')+\"@\"+str(row['github_id'])+'.patch'\n\n diff_file = os.path.join(DIFFS_DIR, name)\n comment_file = os.path.join(TXTS_DIR, name.replace(\".patch\",\".txt\"))\n\n diff = open(diff_file).read()\n title, comment = read_title_and_comments(comment_file)\n\n diffs.append(diff)\n titles.append(title)\n comments.append(comment)\n labels.append(int(row['merged'] * 1))\n successful += 1\n except:\n failed += 1\n pass\n print(\"%s diffs loaded, %s diffs failed\" % (successful, failed), end='\\r')\n\n print(\"\")\n return diffs, comments, titles, labels\n\n\n@timeit\ndef create_dataset(prefix=\"default\",\n diff_vocabulary_size=20000,\n comment_vocabulary_size=20000,\n title_vocabulary_size=20000,\n max_diff_length=100,\n max_comment_length=100,\n max_title_length=100):\n \"\"\"\n Create a dataset for further processing\n :param prefix: Name for the dataset\n :param balance_ratio: The ratio between merged and unmerged PRs to include\n :param num_diffs: Total number of diffs to load. Any value below 1 means load all diffs.\n :param langs: Only include PRs for repos whose primary language is within this array\n :param diff_vocabulary_size: (Max) size of the diff vocabulary to use for tokenizing\n :param comment_vocabulary_size: (Max) size of the comment vocabulary to use for tokenizing\n :param title_vocabulary_size: (Max) size of the title vocabulary to use for tokenizing\n :param max_diff_length: Maximum length of the input diff sequences\n :param max_comment_length: Maximum length of the input comment sequences\n :param max_title_length: Maximum length of the input title sequences\n :return: A training and testing dataset, along with the config used to produce it\n \"\"\"\n config = locals()\n\n pullreqs_train = load_pr_csv(train_csv_file % prefix)\n pullreqs_test = load_pr_csv(test_csv_file % prefix)\n pullreqs_validation = load_pr_csv(validation_csv_file % prefix)\n\n ensure_diffs()\n\n tr_diffs, tr_comments, tr_titles, tr_labels = load_data(pullreqs_train)\n val_diffs, val_comments, val_titles, val_labels = load_data(pullreqs_validation)\n te_diffs, te_comments, te_titles, te_labels = load_data(pullreqs_test)\n\n code_tokenizer = create_code_tokenizer(tr_diffs+val_diffs, diff_vocabulary_size)\n\n diff_train = tokenize(code_tokenizer, tr_diffs, max_diff_length)\n diff_val = tokenize(code_tokenizer, val_diffs, max_diff_length)\n diff_test = tokenize(code_tokenizer, te_diffs, max_diff_length)\n\n comment_tokenizer = create_text_tokenizer(tr_comments+val_comments, comment_vocabulary_size)\n\n comment_train = tokenize(comment_tokenizer, tr_comments, max_comment_length)\n comment_val = tokenize(code_tokenizer, val_comments, max_comment_length)\n comment_test = tokenize(comment_tokenizer, te_comments, max_comment_length)\n\n title_tokenizer = create_text_tokenizer(tr_titles+val_titles, title_vocabulary_size)\n\n title_train = tokenize(title_tokenizer, tr_titles, max_title_length)\n title_val = tokenize(code_tokenizer, val_titles, max_title_length)\n title_test = tokenize(title_tokenizer, te_titles, max_title_length)\n\n\n y_train = np.asarray(tr_labels)\n y_val = np.asarray(val_labels)\n y_test = np.asarray(te_labels)\n\n\n print('Shape of diff tensor:', diff_train.shape)\n print('Shape of comment tensor:', comment_train.shape)\n print('Shape of title tensor:', title_train.shape)\n print('Shape of label tensor:', y_train.shape)\n\n\n # Save dataset\n with open(diff_vocab_file % prefix, 'w') as f:\n pickle.dump(code_tokenizer, f)\n\n with open(comment_vocab_file % prefix, 'w') as f:\n pickle.dump(comment_tokenizer, f)\n\n with open(title_vocab_file % prefix, 'w') as f:\n pickle.dump(title_tokenizer, f)\n\n with open(diff_train_file % prefix, 'w') as f:\n pickle.dump(diff_train, f)\n\n with open(comment_train_file % prefix, 'w') as f:\n pickle.dump(comment_train, f)\n\n with open(title_train_file % prefix, 'w') as f:\n pickle.dump(title_train, f)\n\n with open(y_train_file % prefix, 'w') as f:\n pickle.dump(y_train, f)\n\n with open(diff_val_file % prefix, 'w') as f:\n pickle.dump(diff_val, f)\n\n with open(comment_val_file % prefix, 'w') as f:\n pickle.dump(comment_val, f)\n\n with open(title_val_file % prefix, 'w') as f:\n pickle.dump(title_val, f)\n\n with open(y_val_file % prefix, 'w') as f:\n pickle.dump(y_val, f)\n\n # save testdata\n with open(diff_test_file % prefix, 'w') as f:\n pickle.dump(diff_test, f)\n\n with open(comment_test_file % prefix, 'w') as f:\n pickle.dump(comment_test, f)\n\n\n with open(title_test_file % prefix, 'w') as f:\n pickle.dump(title_test, f)\n\n with open(y_test_file % prefix, 'w') as f:\n pickle.dump(y_test, f)\n\n\n with open(config_file % prefix, 'w') as f:\n pickle.dump(config, f)\n\n return diff_train, comment_train, title_train, y_train, diff_val, comment_val, title_val, y_val, diff_test, comment_test, title_test, y_test, config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--prefix', default='default')\nparser.add_argument('--diff_vocabulary_size', type=int, default=50000)\nparser.add_argument('--comment_vocabulary_size', type=int, default=50000)\nparser.add_argument('--title_vocabulary_size', type=int, default=10000)\nparser.add_argument('--max_diff_sequence_length', type=int, default=150)\nparser.add_argument('--max_comment_sequence_length', type=int, default=150)\nparser.add_argument('--max_title_sequence_length', type=int, default=150)\n\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n create_dataset(args.prefix, args.diff_vocabulary_size, args.comment_vocabulary_size, args.title_vocabulary_size, args.max_diff_sequence_length, args.max_comment_sequence_length, args.max_title_sequence_length)\n\n"
] | [
[
"numpy.asarray"
]
] |
andrewcistola/value-based-healthcare | [
"12583c33bff8dee83a7daf5aaaf1e7c39883a279"
] | [
"READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py"
] | [
"# FractureProof\n## Value Based Healthcare Project\n### Outcome \n#### CMS Hospital Wiide Readmission Rate 2018 \n### Predictors\n#### BEA 2018 County wide Economic Measures\n### Table Key\n#### State County FIPS\n\n### Set working directory to project folder\nos.chdir(\"C:/Users/drewc/GitHub/allocativ\") # Set wd to project repository\n\n### Set file title and path\ntitle = \"fp_VBHC_READMIT_BEA_FIPS_alpha\"\npath = \"fp/VBHC/READMIT/\"\n\n## Section A: Collect Possible Predictors from Public Access Data\n\n### Import Python Libraries\nimport os # Operating system navigation\nimport sqlite3 # SQLite database manager\n\n### Import data science libraries\nimport pandas as pd # Widely used data manipulation library with R/Excel like tables named 'data frames'\nimport numpy as np # Widely used matrix library for numerical processes\n\n### Import scikit-learn libraries: data preparation \nfrom sklearn.preprocessing import StandardScaler # Standard scaling for easier use of machine learning algorithms\nfrom sklearn.impute import SimpleImputer # Univariate imputation for missing data\n\n### Step 1: Import and Join Data\n\n### Import ACS\ndf_bea = pd.read_csv(\"hnb/BEA/2018/BEA_2018_FIPS_full.csv\", low_memory = 'false') # Import dataset saved as csv in _data folder\n\n### Import CMS Data and Join\ndf_cms = pd.read_csv(\"hnb/CMS/CMS_2018_FIPS_full.csv\", low_memory = 'false') # Import dataset saved as csv in _data folder\ndf_cms = df_cms.filter([\"Rate of readmission after discharge from hospital (hospital-wide)\", \"FIPS\"]) # Keep only selected columns\ndf_join = pd.merge(df_cms, df_bea, on = \"FIPS\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\ndf_cms = 0 # Clear variable\ndf_acs = 0 # Clear variable\n\n### Rename and Verify\ndf_step1 = df_join\ndf_join = 0\ndf_step1.info() # Get class, memory, and column info: names, data types, obs.\ndf_step1.head() # Print first 5 observations\n\n### Step 2: Data Manipulation\n\n### Import Datasets\n\n### Drop ID variables\ndf_man = df_step1.drop(columns = [\"FIPS\"]) # Drop Unwanted Columns\n\n### Rename outcome and test\ndf_man = df_man.rename(columns = {\"Rate of readmission after discharge from hospital (hospital-wide)\": \"outcome\"}) # Rename multiple columns in place\n\n### Rename and Verify\ndf_step2 = df_man\ndf_man = 0\ndf_step2.info() # Get class, memory, and column info: names, data types, obs.\ndf_step2.head() # Print first 5 observations\n\n## Step 3: Data Standardization\n\n### Remove outcome and test\ndf_NA = df_step2\noutcome = df_NA.pop(\"outcome\") # 'pop' column from df\n\n### Drop features with less than 75% data\ndf_NA = df_NA.dropna(axis = 1, thresh = 0.75*len(df_NA)) # Drop features less than 75% non-NA count for all columns\n\n### Impute missing values\ndf_NA = pd.DataFrame(SimpleImputer(strategy = \"median\").fit_transform(df_NA), columns = df_NA.columns) # Impute missing data\n\n### Standard Scale Values\ndf_NA = pd.DataFrame(StandardScaler().fit_transform(df_NA.values), columns = df_NA.columns) # convert the normalized features into a tabular format with the help of DataFrame.\n\n### Reattach outcome\ndf_NA.insert(0, \"outcome\", outcome) # reinsert in index\n\n### Drop all remaining rows (should be none)\ndf_NA = df_NA.dropna() # Drop all rows with NA values\n\n### Rename and Verify\ndf_step3 = df_NA\ndf_NA = 0\ndf_step3.info() # Get class, memory, and column info: names, data types, obs.\ndf_step3.head() # Print first 5 observations\n\n## Section B: Identify Significant Predictors with Reduction Algorithms\n\n### Import scikit-learn: machine learning\nfrom sklearn.decomposition import PCA # Principal compnents analysis from sklearn\nfrom sklearn.ensemble import RandomForestClassifier # Random Forest classification component\nfrom sklearn.ensemble import RandomForestRegressor # Random Forest classification component\nfrom sklearn.feature_selection import RFECV # Recursive Feature elimination with cross validation\nfrom sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome\n\n### Step 4: Principal Component Analysis\n\n### Setup initial PCA model\ndf_pca = df_step3.drop(columns = [\"outcome\"]) # Drop outcome variable\ndegree = len(df_step3.columns) - 2 # Save number of features -1 to get degrees of freedom\npca = PCA(n_components = degree) # you will pass the number of components to make PCA model based on degrees of freedom\n\n### Fit initial PCA model\npca.fit(df_pca) # fit to data\n\n### Setup final PCA model\ndf_ev = pd.DataFrame(pca.explained_variance_) # Print explained variance of components\ndf_ev = df_ev[(df_ev[0] > 1)] # Save eigenvalues above 1\ncomponents = len(df_ev.index) # Save count of values for Variable reduction\npca = PCA(n_components = components) # you will pass the number of components to make PCA model\n\n### Fit final PCA model\npca.fit_transform(df_pca) # finally call fit_transform on the aggregate data to create PCA results object\n\n### Collect feature list from PCA\ndf_pca2 = pd.DataFrame(pca.components_, columns = df_pca.columns) # Export eigenvectors to data frame\ndf_pca2[\"Variance\"] = pca.explained_variance_ratio_ # Save eigenvalues as their own column\ndf_pca2 = df_pca2[df_pca2.Variance > df_pca2.Variance.mean()] # Susbet by eigenvalues with above average exlained variance ratio\ndf_pca2 = df_pca2.abs() # get absolute value for column or data frame\ndf_pca3 = pd.DataFrame(df_pca2.max(), columns = [\"MaxEV\"]) # select maximum eigenvector for each feature\ndf_pc = df_pca3[df_pca3.MaxEV > df_pca3.MaxEV.mean()] # Susbet by above average max eigenvalues \ndf_pc = df_pc.reset_index() # Add a new index of ascending values, existing index becomes column named \"index\"\ndf_pc = df_pc.rename(columns = {\"index\": \"Features\"}) # Rename multiple columns in place\n\n### Rename and Verify\ndf_step4 = df_pc\ndf_step4.info() # Get class, memory, and column info: names, data types, obs.\ndf_step4.head() # Print first 5 observations\n\n### Step 5: Random Forest Regressor\n\n### Setup RF model\nY = df_step3[\"outcome\"] # Isolate Outcome variable\nX = df_step3.drop(columns = [\"outcome\"]) # Drop Unwanted Columns # Save features columns as predictor data frame\nforest = RandomForestRegressor(n_estimators = 1000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository. \n\n### Fit Forest model\nforest.fit(X, Y) # This will take time\n\n### Collect features from RF\ngini = forest.feature_importances_ # Output importances of features\nl_gini = list(zip(X, gini)) # Create list of variables alongside importance scores \ndf_gini = pd.DataFrame(l_gini, columns = [\"Features\", \"Gini\"]) # Create data frame of importances with variables and gini column names\ndf_gini = df_gini.sort_values(by = [\"Gini\"], ascending = False) # Sort data frame by gini value in desceding order\ndf_gini = df_gini[(df_gini[\"Gini\"] > df_gini[\"Gini\"].mean())] # Subset by Gini values higher than mean\n\n### Rename and Verify\ndf_step5 = df_gini\ndf_step5.info() # Get class, memory, and column info: names, data types, obs.\ndf_step5.head() # Print first 5 observations\n\n### Step 6: Recursive Feature Elimination\n\n### Collect features from RF and PC\ndf_pc_gini = pd.merge(df_pc, df_gini, on = \"Features\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\npc_gini_features = df_pc_gini[\"Features\"].tolist() # Save features from data frame\ndf_rfecv = df_step3[pc_gini_features] # Add selected features to df\n\n### Setup RFE model\nX = df_rfecv # Save features columns as predictor data frame\nY = df_step3[\"outcome\"] # Use outcome data frame \nRFE = LinearRegression() # Use regression coefficient as estimator\nselector = RFECV(estimator = RFE, min_features_to_select = 10) # define selection parameters, in this case all features are selected. See Readme for more ifo\n\n### Fit RFE model\nselected = selector.fit(X, Y) # This will take time\n\n### Collect features from RFE model\nar_rfe = selected.support_ # Save Boolean values as numpy array\nl_rfe = list(zip(X, ar_rfe)) # Create list of variables alongside RFE value \ndf_rfe = pd.DataFrame(l_rfe, columns = [\"Features\", \"RFE\"]) # Create data frame of importances with variables and gini column names\ndf_rfe = df_rfe[df_rfe.RFE == True] # Select Variables that were True\ndf_rfe = df_rfe.reset_index() # Reset Index\ndf_rfe = df_rfe.filter([\"Features\"]) # Keep only selected columns\n\n### Rename and Verify\ndf_step6 = df_rfe\ndf_step6.info() # Get class, memory, and column info: names, data types, obs.\ndf_step6.head() # Print first 5 observations\n\n## Section C: Evaluate Significant Features with Modeling and Prediction\n\n### Import scikit-learn libraries: regression\nfrom sklearn.linear_model import LogisticRegression # Used for machine learning with categorical outcome\nfrom sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome\n\n### Import scikit-learn: neural network\nfrom sklearn.neural_network import MLPRegressor\n\n### Step 7: Multiple Regression\n\n### Setup MR Model\nfeatures = list(df_step6[\"Features\"]) # Save chosen featres as list\nx = df_step3.filter(features) # Keep only selected columns from rfe\ny = df_step3[\"outcome\"] # Add outcome variable\nLR = LinearRegression() # Linear Regression in scikit learn\n\n### Fit MR model\nregression = LR.fit(x, y) # Fit model\n\n### Collect features from MR model\ncoef = regression.coef_ # Coefficient models as scipy array\nl_reg = list(zip(x, coef)) # Create list of variables alongside coefficient \ndf_reg = pd.DataFrame(l_reg, columns = [\"Features\", \"Coefficients\"]) # Create data frame of importances with variables and gini column names\n\n### Export feature attributes\ndf_pc_gini_reg = pd.merge(df_pc_gini, df_reg, on = \"Features\", how = \"inner\") # Join by column while keeping only items that exist in both, select outer or left for other options\ndf_pc_gini_reg.to_csv(r\"fp/VBHC/READMIT/fp_VBHC_READMIT_BEA_FIPS_alpha.csv\") # Export df as csv\nprint(df_pc_gini_reg)\n\n### Collect prediction results\ndetermination = regression.score(x, y) # rsq value, ceofficient of determination\nprint(determination)\n\n### Rename and Verify\ndf_step7 = df_pc_gini_reg\ndf_step7.info() # Get class, memory, and column info: names, data types, obs.\ndf_step7.head() # Print first 5 observations"
] | [
[
"sklearn.impute.SimpleImputer",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"sklearn.ensemble.RandomForestRegressor",
"pandas.merge",
"sklearn.feature_selection.RFECV",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.PCA"
]
] |
niallscc/Optimus | [
"35218401556e5acc4beb2859084128ebcd1ab4e5"
] | [
"optimus/engines/base/dataframe/columns.py"
] | [
"from functools import reduce\n\nfrom sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler\n\nfrom optimus.engines.base.columns import BaseColumns\nfrom optimus.helpers.columns import parse_columns, name_col\nfrom optimus.helpers.constants import Actions\nfrom optimus.helpers.raiseit import RaiseIt\n\n\nclass DataFrameBaseColumns(BaseColumns):\n\n def __init__(self, df):\n super(DataFrameBaseColumns, self).__init__(df)\n\n @staticmethod\n def exec_agg(exprs, compute=None):\n \"\"\"\n Exectute and aggregation\n Expression in Non dask dataframe can not handle compute. See exec_agg dask implementation\n :param exprs:\n :param compute:\n :return:\n \"\"\"\n return exprs\n\n def qcut(self, columns, num_buckets, handle_invalid=\"skip\"):\n pass\n\n @staticmethod\n def correlation(input_cols, method=\"pearson\", output=\"json\"):\n pass\n\n @staticmethod\n def scatter(columns, buckets=10):\n pass\n\n def standard_scaler(self, input_cols=\"*\", output_cols=None):\n df = self.root\n\n def _standard_scaler(_value):\n return StandardScaler().fit_transform(_value.values.reshape(-1, 1))\n\n return df.cols.apply(input_cols, func=_standard_scaler, output_cols=output_cols, meta_action=Actions.STANDARD_SCALER.value)\n\n def max_abs_scaler(self, input_cols=\"*\", output_cols=None):\n\n df = self.root\n\n def _max_abs_scaler(_value):\n return MaxAbsScaler().fit_transform(_value.values.reshape(-1, 1))\n\n return df.cols.apply(input_cols, func=_max_abs_scaler, output_cols=output_cols,meta_action=Actions.MAX_ABS_SCALER.value )\n\n def min_max_scaler(self, input_cols, output_cols=None):\n # https://github.com/dask/dask/issues/2690\n\n df = self.root\n\n def _min_max_scaler(_value):\n return MinMaxScaler().fit_transform(_value.values.reshape(-1, 1))\n\n return df.cols.apply(input_cols, func=_min_max_scaler, output_cols=output_cols, meta_action=Actions.MIN_MAX_SCALER.value )\n\n def replace_regex(self, input_cols, regex=None, value=\"\", output_cols=None):\n \"\"\"\n Use a Regex to replace values\n :param input_cols: '*', list of columns names or a single column name.\n :param output_cols:\n :param regex: values to look at to be replaced\n :param value: new value to replace the old one\n :return:\n \"\"\"\n\n df = self.root\n\n def _replace_regex(_value, _regex, _replace):\n return _value.replace(_regex, _replace, regex=True)\n\n return df.cols.apply(input_cols, func=_replace_regex, args=(regex, value,), output_cols=output_cols,\n filter_col_by_dtypes=df.constants.STRING_TYPES + df.constants.NUMERIC_TYPES)\n\n def reverse(self, input_cols, output_cols=None):\n def _reverse(value):\n return str(value)[::-1]\n\n df = self.root\n return df.cols.apply(input_cols, _reverse, func_return_type=str,\n filter_col_by_dtypes=df.constants.STRING_TYPES,\n output_cols=output_cols, set_index=True)\n\n @staticmethod\n def astype(*args, **kwargs):\n pass\n\n @staticmethod\n def apply_by_dtypes(columns, func, func_return_type, args=None, func_type=None, data_type=None):\n pass\n\n @staticmethod\n def to_timestamp(input_cols, date_format=None, output_cols=None):\n pass\n\n def nest(self, input_cols, separator=\"\", output_col=None, shape=\"string\", drop=False):\n df = self.root\n\n dfd = df.data\n\n if output_col is None:\n output_col = name_col(input_cols)\n\n input_cols = parse_columns(df, input_cols)\n\n output_ordered_columns = df.cols.names()\n\n # cudfd do nor support apply or agg join for this operation\n if shape == \"vector\" or shape == \"array\":\n raise NotImplementedError(\"Not implemented yet\")\n # https://stackoverflow.com/questions/43898035/pandas-combine-column-values-into-a-list-in-a-new-column/43898233\n # t['combined'] = t.values.tolist()\n\n # dfds = [dfd[input_col] for input_col in input_cols]\n # dfd[output_col] = dfd[input_cols].values.tolist()\n elif shape == \"string\":\n dfds = [dfd[input_col].astype(str) for input_col in input_cols]\n dfd = dfd.assign(**{output_col:reduce((lambda x, y: x + separator + y), dfds)})\n\n if output_col not in output_ordered_columns:\n col_index = output_ordered_columns.index(input_cols[-1]) + 1\n output_ordered_columns[col_index:col_index] = [output_col]\n\n if drop is True:\n for input_col in input_cols:\n if input_col in output_ordered_columns and input_col != output_col:\n output_ordered_columns.remove(input_col)\n\n return self.root.new(dfd).cols.select(output_ordered_columns)\n"
] | [
[
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.MaxAbsScaler",
"sklearn.preprocessing.MinMaxScaler"
]
] |
dspub99/betazero | [
"b1adf9885166e6fb4974952292653efeea1b19dc"
] | [
"mctsPlayer.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\n\nfrom randomPlayer import RandomPlayer\nimport game\nimport play\n\n# Run MCTS with MC to estimate the rest of the game.\n# http://mcts.ai/about/index.html\n# http://ccg.doc.gold.ac.uk/wp-content/uploads/2016/10/browne_tciaig12_1.pdf\n\nclass UCT:\n def __init__(self, c):\n self._c = c\n\n def parts(self, pNode, node):\n return (node.sum/node.n, 2*self._c*np.sqrt(2*np.log(pNode.n) / node.n))\n\n def __call__(self, pNode, node):\n if node.n == 0:\n return np.inf\n\n (exploit, explore) = self.parts( pNode, node )\n return exploit + explore\n\nclass UCTNegamax:\n def __init__(self, c):\n self._uct = UCT(c)\n\n def __call__(self, pNode, node):\n if node.n == 0:\n return np.inf\n\n # pNode.chi gives us negamax\n # Actually, our scores (like node.sum/node.n) are in [0,1] not [-1,1].\n # So to change to the opponent's perspective, we might prefer\n # scoreOpponent_A = 1 - score\n # to\n # scoreOpponent_B = -score\n # Note that scoreOpponent_B = scoreOpponent_A - 1. This offset of -1 in exploit\n # won't affect which node maximizes exploit + explore.\n (exploit, explore) = self._uct.parts( pNode, node )\n return pNode.chi*exploit + explore\n\nclass Node:\n def __init__(self, nprand, ttt, chi, maxPlies, parent=None, move=None):\n self._nprand = nprand\n # each Node has a clone of ttt with the Node's game state\n self.maxPlies = maxPlies\n self.chi = chi\n self.parent = parent\n self.ttt = ttt\n self.move = move\n self.sum = 0\n self.n = 0\n self.children = []\n self._needMoves = list(self.ttt.validMoves())\n\n def dump(self):\n n = 0\n queue = [self]\n while len(queue) > 0:\n # queue[0].ttt.dump()\n s = [str(n), \" \"*n]\n newQueue = []\n n += 1\n for node in queue:\n s.append(\"%d/%d(%d)\" % (2*node.sum, 2*node.n, node.maxPlies))\n newQueue.extend(node.children)\n print (' '.join(s))\n queue = newQueue\n\n\n def check_parentage(self):\n # Am I may children's parent?\n for c in self.children:\n assert(c.parent == self)\n c.check_parentage()\n\n def bestChild(self, uct):\n assert(len(self.children)>0)\n\n phis = []\n for c in self.children:\n # print (\"CHILD:\", uct(self, c))\n phis.append(uct(self, c))\n phis = np.array(phis)\n\n i = self._nprand.choice(np.where(phis > phis.max() - 1e-6)[0])\n return self.children[i]\n\n def findBoard(self, ttt):\n # exactly one ply ahead\n for c in self.children:\n if ttt.equivBoard(c.ttt.board()):\n return c\n return None\n\n def select(self, uct):\n # \"Starting at the root node, a child selection policy is recursively applied to descend\n # through the tree until the most urgent expandable node is reached. A node is expandable if\n # it represents a nonterminal state and has unvisited (i.e. unexpanded) children\"\n\n if len(self._needMoves) > 0:\n return self\n\n if len(self.children)==0:\n return None\n\n return self.bestChild(uct).select(uct)\n\n def expand(self):\n # \"One (or more) child nodes are added to expand the tree, according to the\n # available actions.\"\n\n assert( len(self._needMoves) > 0 )\n\n if self.maxPlies==0:\n # just run another sim from here\n return self\n\n m = self._nprand.choice(self._needMoves)\n self._needMoves.remove(m)\n ttt = self.ttt.clone()\n ttt.add(m)\n c = Node(self._nprand, ttt, -self.chi, self.maxPlies - 1, self, m.clone())\n self.children.append(c)\n return c\n\n def backpropagate(self, score):\n # \"The simulation result is “backed up” (i.e. backpropagated)\n # through the selected nodes to update their statistics.\"\n\n self.n += 1\n self.sum += score\n if self.parent is not None:\n self.parent.backpropagate(score)\n\n def __str__(self):\n return \"sum = %.4f n = %d nChildren = %d self = %s parent = %s\" % (self.sum, self.n, len(self.children), id(self), id(self.parent))\n\n\nclass MCTSPlayer:\n\n def __init__(self, nPlay, maxPlies, bNegamax, cUct = 1/np.sqrt(2), bDump=False):\n self._nPlay = nPlay\n self._maxPlies = maxPlies\n if bNegamax:\n self._uct = UCTNegamax(cUct)\n else:\n self._uct = UCT(cUct)\n self._cUct = cUct\n self._bNegamax = bNegamax\n self._bDump = bDump\n self._uctMove = UCT(0)\n self._rp = RandomPlayer()\n self._nprand = np.random.RandomState()\n\n self._root = None\n\n def __str__(self):\n return (\"%s nPlay = %d maxPlies = %d bNegamax = %s cUct = %.4f\" %\n (self.__class__.__name__, self._nPlay, self._maxPlies,\n self._bNegamax, self._cUct))\n\n def _simulate(self, node):\n # \"A simulation is run from the new node(s) according to the\n # default policy to produce an outcome.\"\n return play.playRest(self._rp, self._rp, node.ttt.clone(), False, 99999)[0]\n\n def setSeed(self, seed):\n self._nprand.seed(seed)\n self._rp.setSeed(seed+1)\n\n def move(self, ttt):\n if self._root is not None:\n self._root = self._root.findBoard(ttt)\n\n if self._root is None:\n self._root = Node(self._nprand, ttt, 1, maxPlies=self._maxPlies)\n\n marker = ttt.whoseTurn()\n for _ in range(self._nPlay):\n nodeLeaf = self._root.select(self._uct)\n if nodeLeaf is not None:\n nodeSim = nodeLeaf.expand()\n if nodeSim is not None:\n # print (\"START:\", nodeSim.maxPlies, nodeSim.move)\n w = self._simulate(nodeSim)\n if w == ttt.whoseTurn():\n score = 1\n elif w == game.Draw:\n score = .5\n else:\n score = 0\n # print (\"SCORE:\", marker, w, score)\n nodeSim.backpropagate(score)\n\n\n if self._bDump:\n self._root.dump()\n self._root = self._root.bestChild(self._uctMove)\n return self._root.move\n\n\n def tests(self):\n self._root.check_parentage()\n\n\nif __name__ == \"__main__\":\n from ticTacToe import TicTacToe\n from mmPlayer import MMPlayer\n from mcPlayer import MCPlayer\n\n\n nPlay = 100\n maxPlies = 1000\n bNegamax = True\n cUct = 1/np.sqrt(2)\n if True:\n mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,\n cUct = cUct, bDump=True)\n mcts.setSeed(1)\n mc10 = MCPlayer(nPlay=10)\n mc10.setSeed(2)\n play.play(TicTacToe, mcts, mc10, bShow = True)\n else:\n score = []\n for _ in range(100):\n mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,\n cUct = cUct)\n # mc10 vs. mc10 gives .79, fyi\n # mcts100_mp=1_c=1e6 vs. mc 10 gives .82\n # mcts100_mp=1_c=1/sqrt(2) vs. mc 10 gives .82\n # mcts100_mp=1_c=0 vs. mc 10 gives .82\n # mcts100_mp=2_c=0 vs. mc 10 gives .855\n # mcts100_mp=3_c=0 vs. mc 10 gives .83\n # mcts100_mp=3_c=1/sqrt(2) vs. mc 10 gives .86\n # mcts100_mp=3_c=1/sqrt(2)_negamax vs. mc 10 gives .86\n # mcts100_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .83\n # mcts1000_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .94\n # mcts1000_mp=1000_c=1/sqrt(2) vs. mc 10 gives .83\n w = play.play(TicTacToe, MCPlayer(nPlay=100), mcts, bShow = False)\n if w == 'X':\n score.append(1)\n elif w == 'D':\n score.append(.5)\n else:\n score.append(0)\n print (np.array(score).mean())\n\n\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.random.RandomState",
"numpy.log",
"numpy.sqrt"
]
] |
Global19-atlassian-net/datasets | [
"db298928fe0e45907fcd61443d2319665a933afc"
] | [
"tensorflow_datasets/core/dataset_utils.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for dealing with tf.data.Dataset.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.core import tf_compat\nfrom tensorflow_datasets.core import utils\n\n\ndef _eager_dataset_iterator(dataset):\n for item in dataset:\n flat = tf.nest.flatten(item)\n flat = [t if isinstance(t, tf.RaggedTensor) else t.numpy() for t in flat]\n yield tf.nest.pack_sequence_as(item, flat)\n\n\ndef _graph_dataset_iterator(ds_iter, graph=None):\n \"\"\"Constructs a Python generator from a tf.data.Iterator.\"\"\"\n with utils.maybe_with_graph(graph, create_if_none=False):\n init = ds_iter.initializer\n ds_item = ds_iter.get_next()\n with utils.nogpu_session(graph) as sess:\n sess.run(init)\n while True:\n try:\n yield sess.run(ds_item)\n except tf.errors.OutOfRangeError:\n break\n\n\ndef as_numpy(dataset, *, graph=None):\n \"\"\"Converts a `tf.data.Dataset` to an iterable of NumPy arrays.\n\n `as_numpy` converts a possibly nested structure of `tf.data.Dataset`s\n and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.\n\n Note that because TensorFlow has support for ragged tensors and NumPy has\n no equivalent representation,\n [`tf.RaggedTensor`s](https://www.tensorflow.org/api_docs/python/tf/RaggedTensor)\n are left as-is for the user to deal with them (e.g. using `to_list()`).\n In TF 1 (i.e. graph mode), `tf.RaggedTensor`s are returned as\n `tf.ragged.RaggedTensorValue`s.\n\n Example:\n\n ```\n ds = tfds.load(name=\"mnist\", split=\"train\")\n ds_numpy = tfds.as_numpy(ds) # Convert `tf.data.Dataset` to Python generator\n for ex in ds_numpy:\n # `{'image': np.array(shape=(28, 28, 1)), 'labels': np.array(shape=())}`\n print(ex)\n ```\n\n Args:\n dataset: a possibly nested structure of `tf.data.Dataset`s and/or\n `tf.Tensor`s.\n graph: `tf.Graph`, optional, explicitly set the graph to use.\n\n Returns:\n A structure matching `dataset` where `tf.data.Dataset`s are converted to\n generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.\n \"\"\"\n nested_ds = dataset\n del dataset\n\n # Flatten\n flat_ds = tf.nest.flatten(nested_ds)\n flat_np = []\n\n # Type check for Tensors and Datasets\n for ds_el in flat_ds:\n types = [type(el) for el in flat_ds]\n types = tf.nest.pack_sequence_as(nested_ds, types)\n if not (\n isinstance(ds_el, (tf.Tensor, tf.RaggedTensor)) or\n tf_compat.is_dataset(ds_el)):\n raise ValueError(\"Arguments to as_numpy must be tf.Tensors or \"\n \"tf.data.Datasets. Got: %s\" % types)\n\n if tf.executing_eagerly():\n # Eager mode\n for ds_el in flat_ds:\n if isinstance(ds_el, tf.Tensor):\n np_el = ds_el.numpy()\n elif isinstance(ds_el, tf.RaggedTensor):\n np_el = ds_el\n elif tf_compat.is_dataset(ds_el):\n np_el = _eager_dataset_iterator(ds_el)\n else:\n assert False\n flat_np.append(np_el)\n else:\n # Graph mode\n\n # First create iterators for datasets\n with utils.maybe_with_graph(graph, create_if_none=False):\n ds_iters = [\n tf.compat.v1.data.make_initializable_iterator(ds_el)\n for ds_el in flat_ds if tf_compat.is_dataset(ds_el)\n ]\n ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]\n\n # Then create numpy arrays for tensors\n with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor\n # Calling sess.run once so that randomness is shared.\n np_arrays = sess.run([tensor for tensor in flat_ds\n if not tf_compat.is_dataset(tensor)])\n\n # Merge the dataset iterators and np arrays\n iter_ds = iter(ds_iters)\n iter_array = iter(np_arrays)\n flat_np = [\n next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)\n for ds_el in flat_ds\n ]\n\n # Nest\n return tf.nest.pack_sequence_as(nested_ds, flat_np)\n\n\ndef dataset_shape_is_fully_defined(ds):\n output_shapes = tf.compat.v1.data.get_output_shapes(ds)\n return all([ts.is_fully_defined() for ts in tf.nest.flatten(output_shapes)])\n\n\ndef features_shape_is_fully_defined(features):\n return all([tf.TensorShape(info.shape).is_fully_defined() for info in\n tf.nest.flatten(features.get_tensor_info())])\n"
] | [
[
"tensorflow.compat.v2.compat.v1.data.get_output_shapes",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.compat.v1.data.make_initializable_iterator",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.nest.pack_sequence_as"
]
] |
JingyaHuang/transformers | [
"6589e510fa4e6c442059de2fab84752535de9b23"
] | [
"tests/models/bloom/test_modeling_bloom.py"
] | [
"# coding=utf-8\n# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nimport unittest\n\nfrom transformers import BloomConfig, is_torch_available\nfrom transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device\n\nfrom ...generation.test_generation_utils import GenerationTesterMixin\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,\n BloomForCausalLM,\n BloomForSequenceClassification,\n BloomForTokenClassification,\n BloomModel,\n BloomTokenizerFast,\n )\n\n\n@require_torch\nclass BloomModelTester:\n def __init__(\n self,\n parent,\n batch_size=14,\n seq_length=7,\n is_training=True,\n use_token_type_ids=False,\n use_input_mask=True,\n use_labels=True,\n use_mc_token_ids=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n num_choices=4,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_token_type_ids = use_token_type_ids\n self.use_input_mask = use_input_mask\n self.use_labels = use_labels\n self.use_mc_token_ids = use_mc_token_ids\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.scope = None\n self.bos_token_id = vocab_size - 1\n self.eos_token_id = vocab_size - 1\n self.pad_token_id = vocab_size - 1\n\n def get_large_model_config(self):\n return BloomConfig.from_pretrained(\"bigscience/bloom\")\n\n def prepare_config_and_inputs(self, gradient_checkpointing=False):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n sequence_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n\n config = self.get_config(gradient_checkpointing=gradient_checkpointing)\n\n return (config, input_ids, input_mask, sequence_labels)\n\n def get_config(self, gradient_checkpointing=False, slow_but_exact=True):\n return BloomConfig(\n vocab_size=self.vocab_size,\n seq_length=self.seq_length,\n hidden_size=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n resid_pdrop=self.hidden_dropout_prob,\n attn_pdrop=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n initializer_range=self.initializer_range,\n use_cache=True,\n bos_token_id=self.bos_token_id,\n eos_token_id=self.eos_token_id,\n pad_token_id=self.pad_token_id,\n num_labels=self.num_labels,\n gradient_checkpointing=gradient_checkpointing,\n slow_but_exact=slow_but_exact,\n dtype=\"float32\",\n )\n\n def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(len(result.past_key_values), config.n_layer)\n\n def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)\n outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))\n outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n past = outputs[\"past_key_values\"]\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # append to next input_ids and token_type_ids\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n\n output_from_no_past = model(next_input_ids)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past_key_values=past)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n model.to(torch_device)\n model.eval()\n\n # create attention mask\n attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)\n half_seq_length = self.seq_length // 2\n attn_mask[:, half_seq_length:] = 0\n\n # first forward pass\n output, past = model(input_ids, attention_mask=attn_mask).to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)\n input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens\n\n # append to next input_ids and attn_mask\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n attn_mask = torch.cat(\n [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],\n dim=1,\n )\n\n # get two different outputs\n output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):\n model = BloomModel(config=config)\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(input_ids, attention_mask=input_mask, use_cache=True)\n\n output, past = outputs.to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)\n\n # append to next input_ids and token_type_ids\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)\n\n output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[\n \"last_hidden_state\"\n ]\n self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):\n model = BloomForCausalLM(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):\n config.num_labels = self.num_labels\n model = BloomForSequenceClassification(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, attention_mask=input_mask)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):\n model = BloomForTokenClassification(config)\n model.to(torch_device)\n model.eval()\n\n result = model(input_ids, attention_mask=input_mask)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))\n\n def create_and_check_forward_and_backwards(\n self, config, input_ids, input_mask, *args, gradient_checkpointing=False\n ):\n model = BloomForCausalLM(config)\n model.to(torch_device)\n if gradient_checkpointing:\n model.gradient_checkpointing_enable()\n\n result = model(input_ids, labels=input_ids)\n self.parent.assertEqual(result.loss.shape, ())\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n result.loss.backward()\n\n def create_and_check_bloom_weight_initialization(self, config, *args):\n model = BloomModel(config)\n model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)\n for key in model.state_dict().keys():\n if \"c_proj\" in key and \"weight\" in key:\n self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)\n self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n config, input_ids, input_mask, sequence_labels = config_and_inputs\n\n inputs_dict = {\"input_ids\": input_ids}\n\n return config, inputs_dict\n\n\n@require_torch\nclass BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n BloomModel,\n BloomForCausalLM,\n BloomForSequenceClassification,\n BloomForTokenClassification,\n )\n if is_torch_available()\n else ()\n )\n\n all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()\n fx_compatible = False\n test_missing_keys = False\n test_pruning = False\n test_torchscript = True # torch.autograd functions seems to be not supported\n\n def setUp(self):\n self.model_tester = BloomModelTester(self)\n self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_bloom_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model(*config_and_inputs)\n\n def test_bloom_model_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)\n\n def test_bloom_model_att_mask_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)\n\n def test_bloom_model_past_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)\n\n def test_bloom_lm_head_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_lm_head_model(*config_and_inputs)\n\n def test_bloom_sequence_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)\n\n def test_bloom_token_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_token_classification_model(*config_and_inputs)\n\n def test_bloom_gradient_checkpointing(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)\n\n def test_bloom_weight_initialization(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = BloomModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n @slow\n @require_torch_gpu\n def test_simple_generation(self):\n path_350m = \"bigscience/bloom-350m\"\n model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype=\"auto\", use_cache=True).cuda()\n model = model.eval()\n tokenizer = BloomTokenizerFast.from_pretrained(path_350m)\n\n input_sentence = \"I enjoy walking with my cute dog\"\n EXPECTED_OUTPUT = (\n \"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am\"\n \" a very good listener. I am a very good person, and I am a very good person. I am a\"\n )\n\n input_ids = tokenizer.encode(input_sentence, return_tensors=\"pt\")\n greedy_output = model.generate(input_ids.cuda(), max_length=50)\n\n self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)\n\n @slow\n @require_torch_gpu\n def test_batch_generation(self):\n path_350m = \"bigscience/bloom-350m\"\n model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype=\"auto\", use_cache=True).cuda()\n model = model.eval()\n tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side=\"left\")\n\n input_sentence = [\"I enjoy walking with my cute dog\", \"I enjoy walking with my cute dog\"]\n\n input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors=\"pt\", padding=True)\n greedy_output = model.generate(\n input_ids[\"input_ids\"].cuda(), attention_mask=input_ids[\"attention_mask\"], max_length=50, do_sample=False\n )\n\n self.assertEqual(\n tokenizer.decode(greedy_output[0], skip_special_tokens=True),\n tokenizer.decode(greedy_output[1], skip_special_tokens=True),\n )\n\n @slow\n @require_torch_gpu\n def test_batch_generation_padd(self):\n path_350m = \"bigscience/bloom-350m\"\n model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype=\"auto\", use_cache=True).cuda()\n model = model.eval()\n tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side=\"left\")\n\n input_sentence = [\"I enjoy walking with my cute dog\", \"Hello my name is\"]\n input_sentence_without_pad = \"Hello my name is\"\n\n input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors=\"pt\", padding=True)\n input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors=\"pt\")\n\n greedy_output = model.generate(\n input_ids[\"input_ids\"].cuda(), attention_mask=input_ids[\"attention_mask\"], max_length=50, do_sample=False\n )\n greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)\n\n # test token values\n self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())\n\n # test reconstructions\n self.assertEqual(\n tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),\n tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),\n )\n\n\n@require_torch\nclass BloomEmbeddingTest(unittest.TestCase):\n \"\"\"\n The goal here is to compare the embeddings generated by the model trained\n using Megatron-LM with the one from the transformers library, with a small GPT2-like model\n to ensure that the conversion from Megatron-LM to transformers has been done successfully.\n The script compares the logits of the embedding layer and the transformer layers.\n\n WARNING: It is expected that these logits will not have exactly the same statistics when running\n the code on CPU or GPU. For more info, please visit:\n - https://github.com/pytorch/pytorch/issues/76052#issuecomment-1103193548\n - https://discuss.pytorch.org/t/reproducibility-issue-between-intel-and-amd-cpus/144779/9\n\n\n You need to install tokenizers following this readme:\n - https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles\n\n Tokenizer used during training:\n - https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles\n\n # TODO change the script (or just add skip) when building the env with tokenizers 0.12.0\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.path_bigscience_model = \"bigscience/bigscience-small-testing\"\n\n @require_torch\n def test_embeddings(self):\n model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype=\"auto\") # load in fp32\n model.eval()\n\n EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {\n 3478: 0.0002307891845703125,\n 368: -0.000568389892578125,\n 109586: -0.0003910064697265625,\n 35433: -0.000194549560546875,\n 2: 0.0004138946533203125,\n 77: 0.000659942626953125,\n 132619: -0.00031280517578125,\n 2175: 0.000457763671875,\n 23714: 0.000263214111328125,\n 73173: -0.000286102294921875,\n 144252: 0.00052642822265625,\n }\n EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {\n 3478: -0.00921630859375,\n 368: -0.010009765625,\n 109586: -0.01031494140625,\n 35433: -0.01177978515625,\n 2: -0.0074462890625,\n 77: -0.00848388671875,\n 132619: -0.009521484375,\n 2175: -0.0074462890625,\n 23714: -0.0145263671875,\n 73173: -0.007415771484375,\n 144252: -0.01007080078125,\n }\n EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {\n 3478: 0.0128173828125,\n 368: 0.01214599609375,\n 109586: 0.0111083984375,\n 35433: 0.01019287109375,\n 2: 0.0157470703125,\n 77: 0.0174560546875,\n 132619: 0.0078125,\n 2175: 0.0113525390625,\n 23714: 0.0146484375,\n 73173: 0.01116943359375,\n 144252: 0.01141357421875,\n }\n EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {\"value\": 0.08203125}\n\n EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {\n 132619: -0.00031256675720214844,\n 3478: 0.00023090839385986328,\n 368: -0.0005702972412109375,\n 109586: -0.00039124488830566406,\n 35433: -0.000194549560546875,\n 2: 0.0004146099090576172,\n 2175: 0.0004572868347167969,\n 23714: 0.00026416778564453125,\n 73173: -0.0002865791320800781,\n 144252: 0.0005254745483398438,\n 77: 0.0006618499755859375,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {\n 3478: -0.00921630859375,\n 368: -0.010009765625,\n 109586: -0.01031494140625,\n 35433: -0.01177978515625,\n 2: -0.0074462890625,\n 77: -0.00848388671875,\n 132619: -0.009521484375,\n 2175: -0.0074462890625,\n 23714: -0.0145263671875,\n 73173: -0.007415771484375,\n 144252: -0.01007080078125,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {\n 3478: 0.0128173828125,\n 368: 0.01214599609375,\n 109586: 0.0111083984375,\n 35433: 0.01019287109375,\n 2: 0.0157470703125,\n 77: 0.0174560546875,\n 132619: 0.0078125,\n 2175: 0.0113525390625,\n 23714: 0.0146484375,\n 73173: 0.01116943359375,\n 144252: 0.01141357421875,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {\"value\": 0.0821533203125}\n\n EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {\n 132619: -0.00031267106533050537,\n 3478: 0.00023087859153747559,\n 368: -0.0005701072514057159,\n 109586: -0.0003911703824996948,\n 35433: -0.0001944899559020996,\n 2: 0.0004146844148635864,\n 2175: 0.00045740045607089996,\n 23714: 0.0002641640603542328,\n 73173: -0.0002864748239517212,\n 144252: 0.0005256589502096176,\n 77: 0.0006617321632802486,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {\n 3478: -0.00921630859375,\n 368: -0.010009765625,\n 109586: -0.01031494140625,\n 35433: -0.01177978515625,\n 2: -0.0074462890625,\n 77: -0.00848388671875,\n 132619: -0.009521484375,\n 2175: -0.0074462890625,\n 23714: -0.0145263671875,\n 73173: -0.007415771484375,\n 144252: -0.01007080078125,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {\n 3478: 0.0128173828125,\n 368: 0.01214599609375,\n 109586: 0.0111083984375,\n 35433: 0.01019287109375,\n 2: 0.0157470703125,\n 77: 0.0174560546875,\n 132619: 0.0078125,\n 2175: 0.0113525390625,\n 23714: 0.0146484375,\n 73173: 0.01116943359375,\n 144252: 0.01141357421875,\n }\n EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {\"value\": 0.08217757940292358}\n\n TEST_EMBEDDINGS = {\n \"torch.bfloat16\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,\n },\n \"torch.float32\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,\n },\n \"torch.float\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,\n },\n \"torch.float16\": {\n \"mean\": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,\n \"max\": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,\n \"min\": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,\n \"sum\": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,\n },\n }\n\n # fmt: off\n EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]\n # fmt: on\n\n EMBEDDINGS_DS_AFTER_LN_MEAN = {\n 3478: -6.580352783203125e-05,\n 368: 0.0001316070556640625,\n 109586: -0.00030517578125,\n 35433: 4.00543212890625e-05,\n 2: -7.2479248046875e-05,\n 77: -8.96453857421875e-05,\n 132619: 0.0001583099365234375,\n 2175: 2.1219253540039062e-05,\n 23714: -0.000247955322265625,\n 73173: -0.00021839141845703125,\n 144252: -0.0001430511474609375,\n }\n EMBEDDINGS_DS_AFTER_LN_MIN = {\n 3478: -1.6953125,\n 368: -1.6875,\n 109586: -1.6875,\n 35433: -2.125,\n 2: -1.390625,\n 77: -1.5390625,\n 132619: -1.875,\n 2175: -1.4609375,\n 23714: -2.296875,\n 73173: -1.3515625,\n 144252: -1.78125,\n }\n EMBEDDINGS_DS_AFTER_LN_MAX = {\n 3478: 2.265625,\n 368: 2.28125,\n 109586: 1.953125,\n 35433: 1.90625,\n 2: 2.703125,\n 77: 2.828125,\n 132619: 1.65625,\n 2175: 2.015625,\n 23714: 2.234375,\n 73173: 2.171875,\n 144252: 1.828125,\n }\n\n EMBEDDINGS_DS_AFTER_LN = {\n \"mean\": EMBEDDINGS_DS_AFTER_LN_MEAN,\n \"min\": EMBEDDINGS_DS_AFTER_LN_MIN,\n \"max\": EMBEDDINGS_DS_AFTER_LN_MAX,\n }\n\n tensor_ids = torch.LongTensor([EXAMPLE_IDS])\n with torch.no_grad():\n embeddings = model.transformer.word_embeddings(tensor_ids)\n embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings) #\n # first check the embeddings before LN\n output_dict = {\"min\": {}, \"max\": {}, \"mean\": {}, \"sum\": {\"value\": embeddings.sum().item()}}\n for i, idx in enumerate(EXAMPLE_IDS):\n output_dict[\"min\"][idx] = embeddings.min(dim=-1).values[0][i].item()\n output_dict[\"max\"][idx] = embeddings.max(dim=-1).values[0][i].item()\n output_dict[\"mean\"][idx] = embeddings.mean(dim=-1)[0][i].item()\n\n for key in TEST_EMBEDDINGS[str(model.dtype)].keys():\n self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])\n\n output_dict_norm = {\"min\": {}, \"max\": {}, \"mean\": {}}\n for i, idx in enumerate(EXAMPLE_IDS):\n output_dict_norm[\"min\"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()\n output_dict_norm[\"max\"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()\n output_dict_norm[\"mean\"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()\n\n # This test does not pass when places = 2\n for i, key in enumerate(output_dict_norm.keys()):\n for j, idx in enumerate(output_dict[key].keys()):\n self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)\n\n @require_torch\n def test_hidden_states_transformers(self):\n cuda_available = torch.cuda.is_available()\n model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype=\"auto\").to(\n torch_device\n )\n model.eval()\n\n # fmt: off\n EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]\n # fmt: on\n\n MEAN_VALUE_LAST_LM = -4.3392181396484375e-05\n MIN_MAX_DICT = {\"min\": -2.0625, \"max\": 2.75}\n tensor_ids = torch.LongTensor([EXAMPLE_IDS])\n\n with torch.no_grad():\n logits = model(tensor_ids.to(torch_device))\n output_dict = {\n \"min\": logits.last_hidden_state.min(dim=-1).values[0][0].item(),\n \"max\": logits.last_hidden_state.max(dim=-1).values[0][0].item(),\n }\n\n if cuda_available:\n self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)\n else:\n self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)\n\n self.assertDictEqual(MIN_MAX_DICT, output_dict)\n\n @require_torch\n def test_logits(self):\n cuda_available = torch.cuda.is_available()\n model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype=\"auto\").to(\n torch_device\n ) # load in bf16\n model.eval()\n\n # fmt: off\n EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]\n # fmt: on\n\n MEAN_LOGITS_GPU_1 = -1.823902130126953e-05\n MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05\n\n tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)\n with torch.no_grad():\n output = model(tensor_ids).logits\n\n output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)\n if cuda_available:\n self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)\n self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)\n else:\n self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) # 1e-06 precision!!\n self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)\n"
] | [
[
"torch.ones_like",
"torch.ones",
"torch.no_grad",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.cat",
"torch.allclose"
]
] |
fjaragones/fastai | [
"be48d209a4526191f71dc7adaef090828897b9ec"
] | [
"old/fastai/structured.py"
] | [
"from .imports import *\n\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype\nfrom sklearn.ensemble import forest\nfrom sklearn.tree import export_graphviz\n\n\ndef set_plot_sizes(sml, med, big):\n plt.rc('font', size=sml) # controls default text sizes\n plt.rc('axes', titlesize=sml) # fontsize of the axes title\n plt.rc('axes', labelsize=med) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=sml) # fontsize of the tick labels\n plt.rc('ytick', labelsize=sml) # fontsize of the tick labels\n plt.rc('legend', fontsize=sml) # legend fontsize\n plt.rc('figure', titlesize=big) # fontsize of the figure title\n\ndef parallel_trees(m, fn, n_jobs=8):\n return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))\n\ndef draw_tree(t, df, size=10, ratio=0.6, precision=0):\n \"\"\" Draws a representation of a random forest in IPython.\n\n Parameters:\n -----------\n t: The tree you wish to draw\n df: The data used to train the tree. This is used to get the names of the features.\n \"\"\"\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))\n\ndef combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,\n seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):\n years = np.asarray(years) - 1970\n months = np.asarray(months) - 1\n days = np.asarray(days) - 1\n types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',\n '<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')\n vals = (years, months, days, weeks, hours, minutes, seconds,\n milliseconds, microseconds, nanoseconds)\n return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)\n if v is not None)\n\ndef get_sample(df,n):\n \"\"\" Gets a random sample of n rows from df, without replacement.\n\n Parameters:\n -----------\n df: A pandas data frame, that you wish to sample from.\n n: The number of rows you wish to sample.\n\n Returns:\n --------\n return value: A random sample of n rows of df.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n >>> get_sample(df, 2)\n col1 col2\n 1 2 b\n 2 3 a\n \"\"\"\n idxs = sorted(np.random.permutation(len(df))[:n])\n return df.iloc[idxs].copy()\n\ndef add_datepart(df, fldname, drop=True, time=False, errors=\"raise\"):\t\n \"\"\"add_datepart converts a column of df from a datetime64 to many columns containing\n the information from the date. This applies changes inplace.\n\n Parameters:\n -----------\n df: A pandas data frame. df gain several new columns.\n fldname: A string that is the name of the date column you wish to expand.\n If it is not a datetime64 series, it will be converted to one with pd.to_datetime.\n drop: If true then the original date column will be removed.\n time: If true time features: Hour, Minute, Second will be added.\n\n Examples:\n ---------\n\n >>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })\n >>> df\n\n A\n 0 2000-03-11\n 1 2000-03-12\n 2 2000-03-13\n\n >>> add_datepart(df, 'A')\n >>> df\n\n AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed\n 0 2000 3 10 11 5 71 False False False False False False 952732800\n 1 2000 3 10 12 6 72 False False False False False False 952819200\n 2 2000 3 11 13 0 73 False False False False False False 952905600\n \"\"\"\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)\n targ_pre = re.sub('[Dd]ate$', '', fldname)\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())\n df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9\n if drop: df.drop(fldname, axis=1, inplace=True)\n\ndef is_date(x): return np.issubdtype(x.dtype, np.datetime64)\n\ndef train_cats(df):\n \"\"\"Change any columns of strings in a panda's dataframe to a column of\n categorical values. This applies the changes inplace.\n\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values.\n\n Examples:\n ---------\n\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category\n \"\"\"\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()\n\ndef apply_cats(df, trn):\n \"\"\"Changes any columns of strings in df into categorical variables using trn as\n a template for the category codes.\n\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values. The category codes are determined by trn.\n\n trn: A pandas dataframe. When creating a category for df, it looks up the\n what the category's code were in trn and makes those the category codes\n for df.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category {a : 1, b : 2}\n\n >>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})\n >>> apply_cats(df2, df)\n\n col1 col2\n 0 1 b\n 1 2 a\n 2 3 a\n\n now the type of col is category {a : 1, b : 2}\n \"\"\"\n for n,c in df.items():\n if (n in trn.columns) and (trn[n].dtype.name=='category'):\n df[n] = c.astype('category').cat.as_ordered()\n df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)\n\ndef fix_missing(df, col, name, na_dict):\n \"\"\" Fill missing data in a column of df with the median, and add a {name}_na column\n which specifies if the data was missing.\n\n Parameters:\n -----------\n df: The data frame that will be changed.\n\n col: The column of data to fix by filling in missing data.\n\n name: The name of the new filled column in df.\n\n na_dict: A dictionary of values to create na's of and the value to insert. If\n name is not a key of na_dict the median will fill any missing data. Also\n if name is not a key of na_dict and there is no missing data in col, then\n no {name}_na column is not created.\n\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n >>> fix_missing(df, df['col1'], 'col1', {})\n >>> df\n col1 col2 col1_na\n 0 1 5 False\n 1 2 2 True\n 2 3 2 False\n\n\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n >>> fix_missing(df, df['col2'], 'col2', {})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n\n >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500})\n >>> df\n col1 col2 col1_na\n 0 1 5 False\n 1 500 2 True\n 2 3 2 False\n \"\"\"\n if is_numeric_dtype(col):\n if pd.isnull(col).sum() or (name in na_dict):\n df[name+'_na'] = pd.isnull(col)\n filler = na_dict[name] if name in na_dict else col.median()\n df[name] = col.fillna(filler)\n na_dict[name] = filler\n return na_dict\n\ndef numericalize(df, col, name, max_n_cat):\n \"\"\" Changes the column col from a categorical type to it's integer codes.\n\n Parameters:\n -----------\n df: A pandas dataframe. df[name] will be filled with the integer codes from\n col.\n\n col: The column you wish to change into the categories.\n name: The column name you wish to insert into df. This column will hold the\n integer codes.\n\n max_n_cat: If col has more categories than max_n_cat it will not change the\n it to its integer codes. If max_n_cat is None, then col will always be\n converted.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category { a : 1, b : 2}\n\n >>> numericalize(df, df['col2'], 'col3', None)\n\n col1 col2 col3\n 0 1 a 1\n 1 2 b 2\n 2 3 a 1\n \"\"\"\n if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat):\n df[name] = col.cat.codes+1\n\ndef scale_vars(df, mapper):\n warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)\n if mapper is None:\n map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]\n mapper = DataFrameMapper(map_f).fit(df)\n df[mapper.transformed_names_] = mapper.transform(df)\n return mapper\n\ndef proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,\n preproc_fn=None, max_n_cat=None, subset=None, mapper=None):\n \"\"\" proc_df takes a data frame df and splits off the response variable, and\n changes the df into an entirely numeric dataframe. For each column of df \n which is not in skip_flds nor in ignore_flds, na values are replaced by the\n median value of the column.\n\n Parameters:\n -----------\n df: The data frame you wish to process.\n\n y_fld: The name of the response variable\n\n skip_flds: A list of fields that dropped from df.\n\n ignore_flds: A list of fields that are ignored during processing.\n\n do_scale: Standardizes each column in df. Takes Boolean Values(True,False)\n\n na_dict: a dictionary of na columns to add. Na columns are also added if there\n are any missing values.\n\n preproc_fn: A function that gets applied to df.\n\n max_n_cat: The maximum number of categories to break into dummy values, instead\n of integer codes.\n\n subset: Takes a random subset of size subset from df.\n\n mapper: If do_scale is set as True, the mapper variable\n calculates the values used for scaling of variables during training time (mean and standard deviation).\n\n Returns:\n --------\n [x, y, nas, mapper(optional)]:\n\n x: x is the transformed version of df. x will not have the response variable\n and is entirely numeric.\n\n y: y is the response variable\n\n nas: returns a dictionary of which nas it created, and the associated median.\n\n mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continuous\n variables which is then used for scaling of during test-time.\n\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n note the type of col2 is string\n\n >>> train_cats(df)\n >>> df\n\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n\n now the type of col2 is category { a : 1, b : 2}\n\n >>> x, y, nas = proc_df(df, 'col1')\n >>> x\n\n col2\n 0 1\n 1 2\n 2 1\n\n >>> data = DataFrame(pet=[\"cat\", \"dog\", \"dog\", \"fish\", \"cat\", \"dog\", \"cat\", \"fish\"],\n children=[4., 6, 3, 3, 2, 3, 5, 4],\n salary=[90, 24, 44, 27, 32, 59, 36, 27])\n\n >>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),\n ([:children], StandardScaler())])\n\n >>>round(fit_transform!(mapper, copy(data)), 2)\n\n 8x4 Array{Float64,2}:\n 1.0 0.0 0.0 0.21\n 0.0 1.0 0.0 1.88\n 0.0 1.0 0.0 -0.63\n 0.0 0.0 1.0 -0.63\n 1.0 0.0 0.0 -1.46\n 0.0 1.0 0.0 -0.63\n 1.0 0.0 0.0 1.04\n 0.0 0.0 1.0 0.21\n \"\"\"\n if not ignore_flds: ignore_flds=[]\n if not skip_flds: skip_flds=[]\n if subset: df = get_sample(df,subset)\n else: df = df.copy()\n ignored_flds = df.loc[:, ignore_flds]\n df.drop(ignore_flds, axis=1, inplace=True)\n if preproc_fn: preproc_fn(df)\n if y_fld is None: y = None\n else:\n if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes\n y = df[y_fld].values\n skip_flds += [y_fld]\n df.drop(skip_flds, axis=1, inplace=True)\n\n if na_dict is None: na_dict = {}\n else: na_dict = na_dict.copy()\n na_dict_initial = na_dict.copy()\n for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)\n if len(na_dict_initial.keys()) > 0:\n df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)\n if do_scale: mapper = scale_vars(df, mapper)\n for n,c in df.items(): numericalize(df, c, n, max_n_cat)\n df = pd.get_dummies(df, dummy_na=True)\n df = pd.concat([ignored_flds, df], axis=1)\n res = [df, y, na_dict]\n if do_scale: res = res + [mapper]\n return res\n\ndef rf_feat_importance(m, df):\n return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}\n ).sort_values('imp', ascending=False)\n\ndef set_rf_samples(n):\n \"\"\" Changes Scikit learn's random forests to give each tree a random sample of\n n random rows.\n \"\"\"\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n))\n\ndef reset_rf_samples():\n \"\"\" Undoes the changes produced by set_rf_samples.\n \"\"\"\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n_samples))\n\ndef get_nn_mappers(df, cat_vars, contin_vars):\n # Replace nulls with 0 for continuous, \"\" for categorical.\n for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)\n for v in cat_vars: df[v].fillna('#NA#', inplace=True)\n\n # list of tuples, containing variable and instance of a transformer for that variable\n # for categoricals, use LabelEncoder to map to integers. For continuous, standardize\n cat_maps = [(o, LabelEncoder()) for o in cat_vars]\n contin_maps = [([o], StandardScaler()) for o in contin_vars]\n return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)\n"
] | [
[
"pandas.api.types.is_string_dtype",
"pandas.api.types.is_numeric_dtype",
"sklearn.preprocessing.LabelEncoder",
"sklearn.ensemble.forest.check_random_state",
"sklearn.preprocessing.StandardScaler",
"sklearn.tree.export_graphviz"
]
] |
dizcza/pytorch-mighty | [
"942c53b529377c9100bffc2f7f20ec740763e6ae"
] | [
"mighty/trainer/autoencoder.py"
] | [
"from typing import Union\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau\nfrom torch.optim.optimizer import Optimizer\n\nfrom mighty.loss import LossPenalty\nfrom mighty.models import AutoencoderLinear\nfrom mighty.monitor.monitor import MonitorAutoencoder\nfrom mighty.utils.var_online import MeanOnline\nfrom mighty.utils.signal import peak_to_signal_noise_ratio\nfrom mighty.utils.common import input_from_batch, batch_to_cuda\nfrom mighty.utils.data import DataLoader\nfrom .embedding import TrainerEmbedding\n\n\n__all__ = [\n \"TrainerAutoencoder\"\n]\n\n\nclass TrainerAutoencoder(TrainerEmbedding):\n \"\"\"\n An unsupervised AutoEncoder trainer that not only transforms inputs to\n meaningful embeddings but also aims to restore the input signal from it.\n\n\n Parameters\n ----------\n model : nn.Module\n A neural network to train.\n criterion : nn.Module\n A loss function.\n data_loader : DataLoader\n A data loader.\n optimizer : Optimizer\n An optimizer (Adam, SGD, etc.).\n scheduler : _LRScheduler or ReduceLROnPlateau, or None\n A learning rate scheduler.\n Default: None\n accuracy_measure : AccuracyEmbedding, optional\n Calculates the accuracy of embedding vectors.\n Default: ``AccuracyEmbedding()``\n **kwargs\n Passed to the base class.\n \"\"\"\n\n watch_modules = TrainerEmbedding.watch_modules + (AutoencoderLinear,)\n\n def __init__(self,\n model: nn.Module,\n criterion: nn.Module,\n data_loader: DataLoader,\n optimizer: Optimizer,\n scheduler: Union[_LRScheduler, ReduceLROnPlateau] = None,\n **kwargs):\n super().__init__(model, criterion=criterion, data_loader=data_loader,\n optimizer=optimizer, scheduler=scheduler, **kwargs)\n\n def _init_monitor(self, mutual_info) -> MonitorAutoencoder:\n monitor = MonitorAutoencoder(\n mutual_info=mutual_info,\n normalize_inverse=self.data_loader.normalize_inverse\n )\n return monitor\n\n def _init_online_measures(self):\n online = super()._init_online_measures()\n\n # peak signal-to-noise ratio\n online['psnr-train'] = MeanOnline()\n online['psnr-test'] = MeanOnline()\n\n return online\n\n def _get_loss(self, batch, output):\n input = input_from_batch(batch)\n latent, reconstructed = output\n if isinstance(self.criterion, LossPenalty):\n loss = self.criterion(reconstructed, input, latent)\n else:\n loss = self.criterion(reconstructed, input)\n return loss\n\n def _on_forward_pass_batch(self, batch, output, train):\n input = input_from_batch(batch)\n latent, reconstructed = output\n if isinstance(self.criterion, nn.BCEWithLogitsLoss):\n reconstructed = reconstructed.sigmoid()\n psnr = peak_to_signal_noise_ratio(input, reconstructed)\n fold = 'train' if train else 'test'\n if torch.isfinite(psnr):\n self.online[f'psnr-{fold}'].update(psnr.cpu())\n super()._on_forward_pass_batch(batch, latent, train)\n\n def _epoch_finished(self, loss):\n self.plot_autoencoder()\n for fold in ('train', 'test'):\n self.monitor.plot_psnr(self.online[f'psnr-{fold}'].get_mean(),\n mode=fold)\n super()._epoch_finished(loss)\n\n def plot_autoencoder(self):\n \"\"\"\n Plots AutoEncoder reconstruction.\n \"\"\"\n batch = self.data_loader.sample()\n batch = batch_to_cuda(batch)\n mode_saved = self.model.training\n self.model.train(False)\n with torch.no_grad():\n latent, reconstructed = self._forward(batch)\n if isinstance(self.criterion, nn.BCEWithLogitsLoss):\n reconstructed = reconstructed.sigmoid()\n self._plot_autoencoder(batch, reconstructed)\n self.model.train(mode_saved)\n\n def _plot_autoencoder(self, batch, reconstructed, mode='train'):\n input = input_from_batch(batch)\n self.monitor.plot_autoencoder(input, reconstructed, mode=mode)\n"
] | [
[
"torch.no_grad",
"torch.isfinite"
]
] |
RussellM2020/RoboticTasks | [
"c7157c986cdbbf08cc0ea296205ef2dbcf6fc487"
] | [
"rllab/misc/instrument.py"
] | [
"import os\nimport re\nimport subprocess\nimport base64\nimport os.path as osp\nimport pickle as pickle\nimport inspect\nimport hashlib\nimport sys\nfrom contextlib import contextmanager\n\nimport errno\n\nfrom rllab.core.serializable import Serializable\nfrom rllab import config\nfrom rllab.misc.console import mkdir_p\nfrom rllab.misc import ext\nfrom io import StringIO\nimport datetime\nimport dateutil.tz\nimport json\nimport time\nimport numpy as np\n\nfrom rllab.misc.ext import AttrDict\nfrom rllab.viskit.core import flatten\nimport collections\n\n\nclass StubBase(object):\n def __getitem__(self, item):\n return StubMethodCall(self, \"__getitem__\", args=[item], kwargs=dict())\n\n def __getattr__(self, item):\n try:\n return super(self.__class__, self).__getattribute__(item)\n except AttributeError:\n if item.startswith(\"__\") and item.endswith(\"__\"):\n raise\n return StubAttr(self, item)\n\n def __pow__(self, power, modulo=None):\n return StubMethodCall(self, \"__pow__\", [power, modulo], dict())\n\n def __call__(self, *args, **kwargs):\n return StubMethodCall(self.obj, self.attr_name, args, kwargs)\n\n def __add__(self, other):\n return StubMethodCall(self, \"__add__\", [other], dict())\n\n def __rmul__(self, other):\n return StubMethodCall(self, \"__rmul__\", [other], dict())\n\n def __div__(self, other):\n return StubMethodCall(self, \"__div__\", [other], dict())\n\n def __rdiv__(self, other):\n return StubMethodCall(BinaryOp(), \"rdiv\", [self, other], dict()) # self, \"__rdiv__\", [other], dict())\n\n def __rpow__(self, power, modulo=None):\n return StubMethodCall(self, \"__rpow__\", [power, modulo], dict())\n\n\nclass BinaryOp(Serializable):\n def __init__(self):\n Serializable.quick_init(self, locals())\n\n def rdiv(self, a, b):\n return b / a\n # def __init__(self, opname, a, b):\n # self.opname = opname\n # self.a = a\n # self.b = b\n\n\nclass StubAttr(StubBase):\n def __init__(self, obj, attr_name):\n self.__dict__[\"_obj\"] = obj\n self.__dict__[\"_attr_name\"] = attr_name\n\n @property\n def obj(self):\n return self.__dict__[\"_obj\"]\n\n @property\n def attr_name(self):\n return self.__dict__[\"_attr_name\"]\n\n def __str__(self):\n return \"StubAttr(%s, %s)\" % (str(self.obj), str(self.attr_name))\n\n\nclass StubMethodCall(StubBase, Serializable):\n def __init__(self, obj, method_name, args, kwargs):\n self._serializable_initialized = False\n Serializable.quick_init(self, locals())\n self.obj = obj\n self.method_name = method_name\n self.args = args\n self.kwargs = kwargs\n\n def __str__(self):\n return \"StubMethodCall(%s, %s, %s, %s)\" % (\n str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))\n\n\nclass StubClass(StubBase):\n def __init__(self, proxy_class):\n self.proxy_class = proxy_class\n\n def __call__(self, *args, **kwargs):\n if len(args) > 0:\n # Convert the positional arguments to keyword arguments\n spec = inspect.getargspec(self.proxy_class.__init__)\n kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)\n args = tuple()\n return StubObject(self.proxy_class, *args, **kwargs)\n\n def __getstate__(self):\n return dict(proxy_class=self.proxy_class)\n\n def __setstate__(self, dict):\n self.proxy_class = dict[\"proxy_class\"]\n\n def __getattr__(self, item):\n if hasattr(self.proxy_class, item):\n return StubAttr(self, item)\n raise AttributeError\n\n def __str__(self):\n return \"StubClass(%s)\" % self.proxy_class\n\n\nclass StubObject(StubBase):\n def __init__(self, __proxy_class, *args, **kwargs):\n if len(args) > 0:\n spec = inspect.getargspec(__proxy_class.__init__)\n kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)\n args = tuple()\n self.proxy_class = __proxy_class\n self.args = args\n self.kwargs = kwargs\n\n def __getstate__(self):\n return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)\n\n def __setstate__(self, dict):\n self.args = dict[\"args\"]\n self.kwargs = dict[\"kwargs\"]\n self.proxy_class = dict[\"proxy_class\"]\n\n def __getattr__(self, item):\n # why doesnt the commented code work?\n # return StubAttr(self, item)\n # checks bypassed to allow for accesing instance fileds\n if hasattr(self.proxy_class, item):\n return StubAttr(self, item)\n raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))\n\n def __str__(self):\n return \"StubObject(%s, *%s, **%s)\" % (str(self.proxy_class), str(self.args), str(self.kwargs))\n\n\nclass VariantDict(AttrDict):\n def __init__(self, d, hidden_keys):\n super(VariantDict, self).__init__(d)\n self._hidden_keys = hidden_keys\n\n def dump(self):\n return {k: v for k, v in self.items() if k not in self._hidden_keys}\n\n\nclass VariantGenerator(object):\n \"\"\"\n Usage:\n\n vg = VariantGenerator()\n vg.add(\"param1\", [1, 2, 3])\n vg.add(\"param2\", ['x', 'y'])\n vg.variants() => # all combinations of [1,2,3] x ['x','y']\n\n Supports noncyclic dependency among parameters:\n vg = VariantGenerator()\n vg.add(\"param1\", [1, 2, 3])\n vg.add(\"param2\", lambda param1: [param1+1, param1+2])\n vg.variants() => # ..\n \"\"\"\n\n def __init__(self):\n self._variants = []\n self._populate_variants()\n self._hidden_keys = []\n for k, vs, cfg in self._variants:\n if cfg.get(\"hide\", False):\n self._hidden_keys.append(k)\n\n def add(self, key, vals, **kwargs):\n self._variants.append((key, vals, kwargs))\n\n def _populate_variants(self):\n methods = inspect.getmembers(\n self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))\n methods = [x[1].__get__(self, self.__class__)\n for x in methods if getattr(x[1], '__is_variant', False)]\n for m in methods:\n self.add(m.__name__, m, **getattr(m, \"__variant_config\", dict()))\n\n def variants(self, randomized=False):\n ret = list(self.ivariants())\n if randomized:\n np.random.shuffle(ret)\n return list(map(self.variant_dict, ret))\n\n def variant_dict(self, variant):\n return VariantDict(variant, self._hidden_keys)\n\n def to_name_suffix(self, variant):\n suffix = []\n for k, vs, cfg in self._variants:\n if not cfg.get(\"hide\", False):\n suffix.append(k + \"_\" + str(variant[k]))\n return \"_\".join(suffix)\n\n def ivariants(self):\n dependencies = list()\n for key, vals, _ in self._variants:\n if hasattr(vals, \"__call__\"):\n args = inspect.getargspec(vals).args\n if hasattr(vals, 'im_self') or hasattr(vals, \"__self__\"):\n # remove the first 'self' parameter\n args = args[1:]\n dependencies.append((key, set(args)))\n else:\n dependencies.append((key, set()))\n sorted_keys = []\n # topo sort all nodes\n while len(sorted_keys) < len(self._variants):\n # get all nodes with zero in-degree\n free_nodes = [k for k, v in dependencies if len(v) == 0]\n if len(free_nodes) == 0:\n error_msg = \"Invalid parameter dependency: \\n\"\n for k, v in dependencies:\n if len(v) > 0:\n error_msg += k + \" depends on \" + \" & \".join(v) + \"\\n\"\n raise ValueError(error_msg)\n dependencies = [(k, v)\n for k, v in dependencies if k not in free_nodes]\n # remove the free nodes from the remaining dependencies\n for _, v in dependencies:\n v.difference_update(free_nodes)\n sorted_keys += free_nodes\n return self._ivariants_sorted(sorted_keys)\n\n def _ivariants_sorted(self, sorted_keys):\n if len(sorted_keys) == 0:\n yield dict()\n else:\n first_keys = sorted_keys[:-1]\n first_variants = self._ivariants_sorted(first_keys)\n last_key = sorted_keys[-1]\n last_vals = [v for k, v, _ in self._variants if k == last_key][0]\n if hasattr(last_vals, \"__call__\"):\n last_val_keys = inspect.getargspec(last_vals).args\n if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):\n last_val_keys = last_val_keys[1:]\n else:\n last_val_keys = None\n for variant in first_variants:\n if hasattr(last_vals, \"__call__\"):\n last_variants = last_vals(\n **{k: variant[k] for k in last_val_keys})\n for last_choice in last_variants:\n yield AttrDict(variant, **{last_key: last_choice})\n else:\n for last_choice in last_vals:\n yield AttrDict(variant, **{last_key: last_choice})\n\n\ndef variant(*args, **kwargs):\n def _variant(fn):\n fn.__is_variant = True\n fn.__variant_config = kwargs\n return fn\n\n if len(args) == 1 and isinstance(args[0], collections.Callable):\n return _variant(args[0])\n return _variant\n\n\ndef stub(glbs):\n # replace the __init__ method in all classes\n # hacky!!!\n for k, v in list(glbs.items()):\n # look at all variables that are instances of a class (not yet Stub)\n if isinstance(v, type) and v != StubClass:\n glbs[k] = StubClass(v) # and replaces them by a the same but Stub\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n\nexp_count = 0\nnow = datetime.datetime.now(dateutil.tz.tzlocal())\ntimestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\nremote_confirmed = False\n\n\ndef run_experiment_lite(\n stub_method_call=None,\n batch_tasks=None,\n exp_prefix=\"experiment\",\n exp_name=None,\n log_dir=None,\n script=\"scripts/run_experiment_lite.py\",\n python_command=\"python\",\n mode=\"local\",\n dry=False,\n docker_image=None,\n aws_config=None,\n env=None,\n variant=None,\n use_gpu=False,\n sync_s3_pkl=False,\n sync_s3_png=False,\n sync_s3_log=False,\n sync_log_on_termination=True,\n confirm_remote=True,\n terminate_machine=True,\n periodic_sync=True,\n periodic_sync_interval=15,\n sync_all_data_node_to_s3=True,\n use_cloudpickle=None,\n pre_commands=None,\n added_project_directories=[],\n **kwargs):\n \"\"\"\n Serialize the stubbed method call and run the experiment using the specified mode.\n :param stub_method_call: A stubbed method call.\n :param script: The name of the entrance point python script\n :param mode: Where & how to run the experiment. Should be one of \"local\", \"local_docker\", \"ec2\",\n and \"lab_kube\".\n :param dry: Whether to do a dry-run, which only prints the commands without executing them.\n :param exp_prefix: Name prefix for the experiments\n :param docker_image: name of the docker image. Ignored if using local mode.\n :param aws_config: configuration for AWS. Only used under EC2 mode\n :param env: extra environment variables\n :param kwargs: All other parameters will be passed directly to the entrance python script.\n :param variant: If provided, should be a dictionary of parameters\n :param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including\n certain environment flags\n :param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param sync_s3_png: Whether to sync png files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param sync_s3_log: Whether to sync log files during execution of the experiment (they will always be synced at\n the end of the experiment)\n :param confirm_remote: Whether to confirm before launching experiments remotely\n :param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using\n mode=\"ec2\". This is useful when one wants to debug after an experiment finishes abnormally.\n :param periodic_sync: Whether to synchronize certain experiment files periodically during execution.\n :param periodic_sync_interval: Time interval between each periodic sync, in seconds.\n \"\"\"\n assert stub_method_call is not None or batch_tasks is not None, \"Must provide at least either stub_method_call or batch_tasks\"\n\n\n\n \n if use_cloudpickle is None:\n for maybe_stub in (batch_tasks or [stub_method_call]):\n # decide mode\n if isinstance(maybe_stub, StubBase):\n use_cloudpickle = False\n else:\n assert hasattr(maybe_stub, '__call__')\n use_cloudpickle = True\n # ensure variant exists\n if variant is None:\n variant = dict()\n\n if batch_tasks is None:\n batch_tasks = [\n dict(\n kwargs,\n pre_commands=pre_commands,\n stub_method_call=stub_method_call,\n exp_name=exp_name,\n log_dir=log_dir,\n env=env,\n variant=variant,\n use_cloudpickle=use_cloudpickle\n )\n ]\n\n global exp_count\n global remote_confirmed\n config.USE_GPU = use_gpu\n\n # params_list = []\n\n for task in batch_tasks:\n call = task.pop(\"stub_method_call\")\n if use_cloudpickle:\n import cloudpickle\n data = base64.b64encode(cloudpickle.dumps(call)).decode(\"utf-8\")\n else:\n data = base64.b64encode(pickle.dumps(call)).decode(\"utf-8\")\n task[\"args_data\"] = data\n exp_count += 1\n params = dict(kwargs)\n if task.get(\"exp_name\", None) is None:\n task[\"exp_name\"] = \"%s_%s_%04d\" % (\n exp_prefix, timestamp, exp_count)\n if task.get(\"log_dir\", None) is None:\n task[\"log_dir\"] = config.LOG_DIR + \"/local/\" + \\\n exp_prefix.replace(\"_\", \"-\") + \"/\" + task[\"exp_name\"]\n if task.get(\"variant\", None) is not None:\n variant = task.pop(\"variant\")\n if \"exp_name\" not in variant:\n variant[\"exp_name\"] = task[\"exp_name\"]\n task[\"variant_data\"] = base64.b64encode(pickle.dumps(variant)).decode(\"utf-8\")\n elif \"variant\" in task:\n del task[\"variant\"]\n task[\"remote_log_dir\"] = osp.join(\n config.AWS_S3_PATH, exp_prefix.replace(\"_\", \"-\"), task[\"exp_name\"])\n task[\"env\"] = task.get(\"env\", dict()) or dict()\n task[\"env\"][\"RLLAB_USE_GPU\"] = str(use_gpu)\n\n if mode not in [\"local\", \"local_docker\"] and not remote_confirmed and not dry and confirm_remote:\n remote_confirmed = query_yes_no(\n \"Running in (non-dry) mode %s. Confirm?\" % mode)\n if not remote_confirmed:\n sys.exit(1)\n\n if hasattr(mode, \"__call__\"):\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n mode(\n task,\n docker_image=docker_image,\n use_gpu=use_gpu,\n exp_prefix=exp_prefix,\n script=script,\n python_command=python_command,\n sync_s3_pkl=sync_s3_pkl,\n sync_log_on_termination=sync_log_on_termination,\n periodic_sync=periodic_sync,\n periodic_sync_interval=periodic_sync_interval,\n sync_all_data_node_to_s3=sync_all_data_node_to_s3,\n )\n elif mode == \"local\":\n for task in batch_tasks:\n del task[\"remote_log_dir\"]\n env = task.pop(\"env\", None)\n command = to_local_command(\n task,\n python_command=python_command,\n script=osp.join(config.PROJECT_PATH, script),\n use_gpu=use_gpu\n )\n print(command)\n if dry:\n return\n try:\n if env is None:\n env = dict()\n subprocess.call(\n command, shell=True, env=dict(os.environ, **env))\n except Exception as e:\n print(e)\n if isinstance(e, KeyboardInterrupt):\n raise\n elif mode == \"local_docker\":\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n for task in batch_tasks:\n del task[\"remote_log_dir\"]\n env = task.pop(\"env\", None)\n command = to_docker_command(\n task, # these are the params. Pre and Post command can be here\n docker_image=docker_image,\n script=script,\n env=env,\n use_gpu=use_gpu,\n use_tty=True,\n python_command=python_command,\n )\n print(command)\n if dry:\n return\n p = subprocess.Popen(command, shell=True)\n try:\n p.wait()\n except KeyboardInterrupt:\n try:\n print(\"terminating\")\n p.terminate()\n except OSError:\n print(\"os error!\")\n pass\n p.wait()\n elif mode == \"ec2\":\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)\n launch_ec2(batch_tasks,\n exp_prefix=exp_prefix,\n docker_image=docker_image,\n python_command=python_command,\n script=script,\n aws_config=aws_config,\n dry=dry,\n terminate_machine=terminate_machine,\n use_gpu=use_gpu,\n code_full_path=s3_code_path,\n sync_s3_pkl=sync_s3_pkl,\n sync_s3_png=sync_s3_png,\n sync_s3_log=sync_s3_log,\n sync_log_on_termination=sync_log_on_termination,\n periodic_sync=periodic_sync,\n periodic_sync_interval=periodic_sync_interval)\n elif mode == \"lab_kube\":\n # assert env is None\n # first send code folder to s3\n s3_code_path = s3_sync_code(config, dry=dry)\n if docker_image is None:\n docker_image = config.DOCKER_IMAGE\n for task in batch_tasks:\n # if 'env' in task:\n # assert task.pop('env') is None\n # TODO: dangerous when there are multiple tasks?\n task[\"resources\"] = params.pop(\n \"resources\", config.KUBE_DEFAULT_RESOURCES)\n task[\"node_selector\"] = params.pop(\n \"node_selector\", config.KUBE_DEFAULT_NODE_SELECTOR)\n task[\"exp_prefix\"] = exp_prefix\n pod_dict = to_lab_kube_pod(\n task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,\n python_command=python_command,\n sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,\n periodic_sync_interval=periodic_sync_interval,\n sync_all_data_node_to_s3=sync_all_data_node_to_s3,\n terminate_machine=terminate_machine,\n )\n pod_str = json.dumps(pod_dict, indent=1)\n if dry:\n print(pod_str)\n dir = \"{pod_dir}/{exp_prefix}\".format(\n pod_dir=config.POD_DIR, exp_prefix=exp_prefix)\n ensure_dir(dir)\n fname = \"{dir}/{exp_name}.json\".format(\n dir=dir,\n exp_name=task[\"exp_name\"]\n )\n with open(fname, \"w\") as fh:\n fh.write(pod_str)\n kubecmd = \"kubectl create -f %s\" % fname\n print(kubecmd)\n if dry:\n return\n retry_count = 0\n wait_interval = 1\n while retry_count <= 5:\n try:\n return_code = subprocess.call(kubecmd, shell=True)\n if return_code == 0:\n break\n retry_count += 1\n print(\"trying again...\")\n time.sleep(wait_interval)\n except Exception as e:\n if isinstance(e, KeyboardInterrupt):\n raise\n print(e)\n else:\n raise NotImplementedError\n\n\n_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search\n\n\ndef ensure_dir(dirname):\n \"\"\"\n Ensure that a named directory exists; if it does not, attempt to create it.\n \"\"\"\n try:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef _shellquote(s):\n \"\"\"Return a shell-escaped version of the string *s*.\"\"\"\n if not s:\n return \"''\"\n\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n\n\ndef _to_param_val(v):\n if v is None:\n return \"\"\n elif isinstance(v, list):\n return \" \".join(map(_shellquote, list(map(str, v))))\n else:\n return _shellquote(str(v))\n\n\ndef to_local_command(params, python_command=\"python\", script=osp.join(config.PROJECT_PATH,\n 'scripts/run_experiment.py'),\n use_gpu=False):\n command = python_command + \" \" + script\n if use_gpu and not config.USE_TF:\n command = \"THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' \" + command\n for k, v in config.ENV.items():\n command = (\"%s=%s \" % (k, v)) + command\n pre_commands = params.pop(\"pre_commands\", None)\n post_commands = params.pop(\"post_commands\", None)\n if pre_commands is not None or post_commands is not None:\n print(\"Not executing the pre_commands: \", pre_commands, \", nor post_commands: \", post_commands)\n\n for k, v in params.items():\n if isinstance(v, dict):\n for nk, nv in v.items():\n if str(nk) == \"_name\":\n command += \" --%s %s\" % (k, _to_param_val(nv))\n else:\n command += \\\n \" --%s_%s %s\" % (k, nk, _to_param_val(nv))\n else:\n command += \" --%s %s\" % (k, _to_param_val(v))\n return command\n\n\ndef to_docker_command(params, docker_image, python_command=\"python\", script='scripts/run_experiment_lite.py',\n pre_commands=None, use_tty=False,\n mujoco_path=None,\n post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):\n \"\"\"\n :param params: The parameters for the experiment. If logging directory parameters are provided, we will create\n docker volume mapping to make sure that the logging files are created at the correct locations\n :param docker_image: docker image to run the command on\n :param script: script command for running experiment\n :return:\n \"\"\"\n log_dir = params.get(\"log_dir\")\n docker_args = params.pop(\"docker_args\", \"\")\n if pre_commands is None:\n pre_commands = params.pop(\"pre_commands\", None)\n if post_commands is None:\n post_commands = params.pop(\"post_commands\", None)\n if mujoco_path is None:\n mujoco_path = config.MUJOCO_KEY_PATH\n # script = 'rllab/' + script\n # if not dry:\n\n # create volume for logging directory\n if use_gpu:\n command_prefix = \"nvidia-docker run\"\n else:\n command_prefix = \"docker run\"\n docker_log_dir = config.DOCKER_LOG_DIR\n\n if env is None:\n env = dict()\n env = dict(\n env,\n AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,\n AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,\n )\n if env is not None:\n for k, v in env.items():\n command_prefix += \" -e \\\"{k}={v}\\\"\".format(k=k, v=v)\n command_prefix += \" -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}\".format(\n local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')\n command_prefix += \" -v {local_log_dir}:{docker_log_dir}\".format(\n local_log_dir=log_dir,\n docker_log_dir=docker_log_dir\n )\n command_prefix += docker_args\n if local_code_dir is None:\n local_code_dir = config.PROJECT_PATH\n command_prefix += \" -v {local_code_dir}:{docker_code_dir}\".format(\n local_code_dir=local_code_dir,\n docker_code_dir=config.DOCKER_CODE_DIR\n )\n params = dict(params, log_dir=docker_log_dir)\n if use_tty:\n command_prefix += \" -ti \" + docker_image + \" /bin/bash -c \"\n else:\n command_prefix += \" -i \" + docker_image + \" /bin/bash -c \"\n command_list = list()\n if pre_commands is not None:\n command_list.extend(pre_commands)\n command_list.append(\"echo \\\"Running in docker\\\"\")\n command_list.append(to_local_command(\n params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))\n # We for 2 min sleep after termination to allow for last syncs.\n if post_commands is None:\n post_commands = ['sleep 120']\n command_list.extend(post_commands)\n return command_prefix + \"'\" + \"; \".join(command_list) + \"'\"\n\n\ndef dedent(s):\n lines = [l.strip() for l in s.split('\\n')]\n return '\\n'.join(lines)\n\n\ndef launch_ec2(params_list, exp_prefix, docker_image, code_full_path,\n python_command=\"python\",\n script='scripts/run_experiment.py',\n aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,\n sync_s3_png=False,\n sync_s3_log=False,\n sync_log_on_termination=True,\n periodic_sync=True, periodic_sync_interval=15):\n if len(params_list) == 0:\n return\n\n default_config = dict(\n image_id=config.AWS_IMAGE_ID,\n instance_type=config.AWS_INSTANCE_TYPE,\n key_name=config.AWS_KEY_NAME,\n spot=config.AWS_SPOT,\n spot_price=config.AWS_SPOT_PRICE,\n iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,\n security_groups=config.AWS_SECURITY_GROUPS,\n security_group_ids=config.AWS_SECURITY_GROUP_IDS,\n network_interfaces=config.AWS_NETWORK_INTERFACES,\n )\n\n if aws_config is None:\n aws_config = dict()\n aws_config = dict(default_config, **aws_config)\n\n sio = StringIO()\n sio.write(\"#!/bin/bash\\n\")\n sio.write(\"{\\n\")\n sio.write(\"\"\"\n die() { status=$1; shift; echo \"FATAL: $*\"; exit $status; }\n \"\"\")\n sio.write(\"\"\"\n EC2_INSTANCE_ID=\"`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`\"\n \"\"\")\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n \"\"\".format(exp_name=params_list[0].get(\"exp_name\"), aws_region=config.AWS_REGION_NAME))\n if config.LABEL:\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}\n \"\"\".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}\n \"\"\".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n service docker start\n \"\"\")\n sio.write(\"\"\"\n docker --config /home/ubuntu/.docker pull {docker_image}\n \"\"\".format(docker_image=docker_image))\n sio.write(\"\"\"\n export AWS_DEFAULT_REGION={aws_region}\n \"\"\".format(aws_region=config.AWS_REGION_NAME))\n if config.FAST_CODE_SYNC:\n # sio.write(\"\"\"\n # aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}\n # \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n # aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))\n sio.write(\"\"\"\n mkdir -p {local_code_path}\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n aws_region=config.AWS_REGION_NAME))\n else:\n # sio.write(\"\"\"\n # aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}\n # \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,\n # aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp --recursive {code_full_path} {local_code_path}\n \"\"\".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))\n\n s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'\n # sio.write(\"\"\"\n # aws s3 cp --recursive {} {} --region {}\n # \"\"\".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp --recursive {} {}\n \"\"\".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))\n sio.write(\"\"\"\n cd {local_code_path}\n \"\"\".format(local_code_path=config.DOCKER_CODE_DIR))\n\n for params in params_list:\n log_dir = params.get(\"log_dir\")\n remote_log_dir = params.pop(\"remote_log_dir\")\n env = params.pop(\"env\", None)\n\n sio.write(\"\"\"\n aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}\n \"\"\".format(exp_name=params.get(\"exp_name\"), aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n mkdir -p {log_dir}\n \"\"\".format(log_dir=log_dir))\n if periodic_sync:\n include_png = \" --include '*.png' \" if sync_s3_png else \" \"\n include_pkl = \" --include '*.pkl' \" if sync_s3_pkl else \" \"\n include_log = \" --include '*.log' \" if sync_s3_log else \" \"\n # sio.write(\"\"\"\n # while /bin/true; do\n # aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}\n # sleep {periodic_sync_interval}\n # done & echo sync initiated\"\"\".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,\n # log_dir=log_dir, remote_log_dir=remote_log_dir,\n # aws_region=config.AWS_REGION_NAME,\n # periodic_sync_interval=periodic_sync_interval))\n sio.write(\"\"\"\n while /bin/true; do\n aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,\n log_dir=log_dir, remote_log_dir=remote_log_dir,\n periodic_sync_interval=periodic_sync_interval))\n if sync_log_on_termination:\n # sio.write(\"\"\"\n # while /bin/true; do\n # if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \\ -f 2) ]\n # then\n # logger \"Running shutdown hook.\"\n # aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n # aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n # break\n # else\n # # Spot instance not yet marked for termination.\n # sleep 5\n # fi\n # done & echo log sync initiated\n # \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n while /bin/true; do\n if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \\ -f 2) ]\n then\n logger \"Running shutdown hook.\"\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log\n aws s3 cp --recursive {log_dir} {remote_log_dir}\n break\n else\n # Spot instance not yet marked for termination.\n sleep 5\n fi\n done & echo log sync initiated\n \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir))\n if use_gpu:\n sio.write(\"\"\"\n for i in {1..800}; do su -c \"nvidia-modprobe -u -c=0\" ubuntu && break || sleep 3; done\n systemctl start nvidia-docker\n \"\"\")\n sio.write(\"\"\"\n {command}\n \"\"\".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,\n use_gpu=use_gpu, env=env,\n local_code_dir=config.DOCKER_CODE_DIR)))\n # sio.write(\"\"\"\n # aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}\n # \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp --recursive {log_dir} {remote_log_dir}\n \"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir))\n # sio.write(\"\"\"\n # aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}\n # \"\"\".format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))\n sio.write(\"\"\"\n aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log\n \"\"\".format(remote_log_dir=remote_log_dir))\n\n if terminate_machine:\n sio.write(\"\"\"\n EC2_INSTANCE_ID=\"`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \\\"wget instance-id has failed: $?\\\"`\"\n aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}\n \"\"\".format(aws_region=config.AWS_REGION_NAME))\n sio.write(\"} >> /home/ubuntu/user_data.log 2>&1\\n\")\n\n full_script = dedent(sio.getvalue())\n\n import boto3\n import botocore\n if aws_config[\"spot\"]:\n ec2 = boto3.client(\n \"ec2\",\n region_name=config.AWS_REGION_NAME,\n aws_access_key_id=config.AWS_ACCESS_KEY,\n aws_secret_access_key=config.AWS_ACCESS_SECRET,\n )\n else:\n ec2 = boto3.resource(\n \"ec2\",\n region_name=config.AWS_REGION_NAME,\n aws_access_key_id=config.AWS_ACCESS_KEY,\n aws_secret_access_key=config.AWS_ACCESS_SECRET,\n )\n\n if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode(\"utf-8\")) > 10000:\n # Script too long; need to upload script to s3 first.\n # We're being conservative here since the actual limit is 16384 bytes\n s3_path = upload_file_to_s3(full_script)\n sio = StringIO()\n sio.write(\"#!/bin/bash\\n\")\n sio.write(\"\"\"\n aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\\\\n chmod +x /home/ubuntu/remote_script.sh && \\\\\n bash /home/ubuntu/remote_script.sh\n \"\"\".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))\n user_data = dedent(sio.getvalue())\n else:\n user_data = full_script\n print(full_script)\n with open(\"/tmp/full_script\", \"w\") as f:\n f.write(full_script)\n\n instance_args = dict(\n ImageId=aws_config[\"image_id\"],\n KeyName=aws_config[\"key_name\"],\n UserData=user_data,\n InstanceType=aws_config[\"instance_type\"],\n EbsOptimized=config.EBS_OPTIMIZED,\n SecurityGroups=aws_config[\"security_groups\"],\n SecurityGroupIds=aws_config[\"security_group_ids\"],\n NetworkInterfaces=aws_config[\"network_interfaces\"],\n IamInstanceProfile=dict(\n Name=aws_config[\"iam_instance_profile_name\"],\n ),\n **config.AWS_EXTRA_CONFIGS,\n )\n\n if len(instance_args[\"NetworkInterfaces\"]) > 0:\n # disable_security_group = query_yes_no(\n # \"Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?\",\n # default=\"yes\",\n # )\n disable_security_group = True\n if disable_security_group:\n instance_args.pop(\"SecurityGroups\")\n instance_args.pop(\"SecurityGroupIds\")\n\n if aws_config.get(\"placement\", None) is not None:\n instance_args[\"Placement\"] = aws_config[\"placement\"]\n if not aws_config[\"spot\"]:\n instance_args[\"MinCount\"] = 1\n instance_args[\"MaxCount\"] = 1\n print(\"************************************************************\")\n print(instance_args[\"UserData\"])\n print(\"************************************************************\")\n if aws_config[\"spot\"]:\n instance_args[\"UserData\"] = base64.b64encode(instance_args[\"UserData\"].encode()).decode(\"utf-8\")\n spot_args = dict(\n DryRun=dry,\n InstanceCount=1,\n LaunchSpecification=instance_args,\n SpotPrice=aws_config[\"spot_price\"],\n # ClientToken=params_list[0][\"exp_name\"],\n )\n import pprint\n pprint.pprint(spot_args)\n if not dry:\n response = ec2.request_spot_instances(**spot_args)\n print(response)\n spot_request_id = response['SpotInstanceRequests'][\n 0]['SpotInstanceRequestId']\n for _ in range(10):\n try:\n ec2.create_tags(\n Resources=[spot_request_id],\n Tags=[\n {'Key': 'Name', 'Value': params_list[0][\"exp_name\"]}\n ],\n )\n break\n except botocore.exceptions.ClientError:\n continue\n else:\n import pprint\n pprint.pprint(instance_args)\n ec2.create_instances(\n DryRun=dry,\n **instance_args\n )\n\n\nS3_CODE_PATH = None\n\n\ndef s3_sync_code(config, dry=False, added_project_directories=[]):\n global S3_CODE_PATH\n if S3_CODE_PATH is not None:\n return S3_CODE_PATH\n base = config.AWS_CODE_SYNC_S3_PATH\n has_git = True\n\n if config.FAST_CODE_SYNC:\n try:\n current_commit = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"]).strip().decode(\"utf-8\")\n except subprocess.CalledProcessError as _:\n print(\"Warning: failed to execute git commands\")\n current_commit = None\n\n file_name = str(timestamp) + \"_\" + hashlib.sha224(\n subprocess.check_output([\"pwd\"]) + str(current_commit).encode() + str(timestamp).encode()\n ).hexdigest() + \".tar.gz\"\n\n file_path = \"/tmp/\" + file_name\n\n tar_cmd = [\"tar\", \"-zcvf\", file_path, \"-C\", config.PROJECT_PATH]\n\n for pattern in config.FAST_CODE_SYNC_IGNORES:\n tar_cmd += [\"--exclude\", pattern]\n tar_cmd += [\"-h\", \".\"]\n\n for path in added_project_directories:\n tar_cmd.append(\"-C\")\n tar_cmd.append(path)\n tar_cmd += [\".\"]\n\n remote_path = \"%s/%s\" % (base, file_name)\n\n upload_cmd = [\"aws\", \"s3\", \"cp\", file_path, remote_path]\n\n mujoco_key_cmd = [\n \"aws\", \"s3\", \"sync\", config.MUJOCO_KEY_PATH, \"{}/.mujoco/\".format(base)]\n\n print(\" \".join(tar_cmd))\n print(\" \".join(upload_cmd))\n print(\" \".join(mujoco_key_cmd))\n\n if not dry:\n subprocess.check_call(tar_cmd)\n subprocess.check_call(upload_cmd)\n try:\n subprocess.check_call(mujoco_key_cmd)\n except Exception as e:\n print(e)\n\n S3_CODE_PATH = remote_path\n return remote_path\n else:\n try:\n current_commit = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"]).strip().decode(\"utf-8\")\n clean_state = len(\n subprocess.check_output([\"git\", \"status\", \"--porcelain\"])) == 0\n except subprocess.CalledProcessError as _:\n print(\"Warning: failed to execute git commands\")\n has_git = False\n dir_hash = base64.b64encode(subprocess.check_output([\"pwd\"])).decode(\"utf-8\")\n code_path = \"%s_%s\" % (\n dir_hash,\n (current_commit if clean_state else \"%s_dirty_%s\" % (current_commit, timestamp)) if\n has_git else timestamp\n )\n full_path = \"%s/%s\" % (base, code_path)\n cache_path = \"%s/%s\" % (base, dir_hash)\n cache_cmds = [\"aws\", \"s3\", \"cp\", \"--recursive\"] + \\\n flatten([\"--exclude\", \"%s\" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \\\n [cache_path, full_path]\n cmds = [\"aws\", \"s3\", \"cp\", \"--recursive\"] + \\\n flatten([\"--exclude\", \"%s\" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \\\n [\".\", full_path]\n caching_cmds = [\"aws\", \"s3\", \"cp\", \"--recursive\"] + \\\n flatten([\"--exclude\", \"%s\" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \\\n [full_path, cache_path]\n mujoco_key_cmd = [\n \"aws\", \"s3\", \"sync\", config.MUJOCO_KEY_PATH, \"{}/.mujoco/\".format(base)]\n print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)\n if not dry:\n subprocess.check_call(cache_cmds)\n subprocess.check_call(cmds)\n subprocess.check_call(caching_cmds)\n try:\n subprocess.check_call(mujoco_key_cmd)\n except Exception:\n print('Unable to sync mujoco keys!')\n S3_CODE_PATH = full_path\n return full_path\n\n\ndef upload_file_to_s3(script_content):\n import tempfile\n import uuid\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(script_content.encode())\n f.close()\n remote_path = os.path.join(\n config.AWS_CODE_SYNC_S3_PATH, \"oversize_bash_scripts\", str(uuid.uuid4()))\n subprocess.check_call([\"aws\", \"s3\", \"cp\", f.name, remote_path])\n os.unlink(f.name)\n return remote_path\n\n\ndef to_lab_kube_pod(\n params, docker_image, code_full_path,\n python_command=\"python\",\n script='scripts/run_experiment.py',\n is_gpu=False,\n sync_s3_pkl=False,\n periodic_sync=True,\n periodic_sync_interval=15,\n sync_all_data_node_to_s3=False,\n terminate_machine=True\n):\n \"\"\"\n :param params: The parameters for the experiment. If logging directory parameters are provided, we will create\n docker volume mapping to make sure that the logging files are created at the correct locations\n :param docker_image: docker image to run the command on\n :param script: script command for running experiment\n :return:\n \"\"\"\n log_dir = params.get(\"log_dir\")\n remote_log_dir = params.pop(\"remote_log_dir\")\n resources = params.pop(\"resources\")\n node_selector = params.pop(\"node_selector\")\n exp_prefix = params.pop(\"exp_prefix\")\n\n kube_env = [\n {\"name\": k, \"value\": v}\n for k, v in (params.pop(\"env\", None) or dict()).items()\n ]\n mkdir_p(log_dir)\n pre_commands = list()\n pre_commands.append('mkdir -p ~/.aws')\n pre_commands.append('mkdir ~/.mujoco')\n # fetch credentials from the kubernetes secret file\n pre_commands.append('echo \"[default]\" >> ~/.aws/credentials')\n pre_commands.append(\n \"echo \\\"aws_access_key_id = %s\\\" >> ~/.aws/credentials\" % config.AWS_ACCESS_KEY)\n pre_commands.append(\n \"echo \\\"aws_secret_access_key = %s\\\" >> ~/.aws/credentials\" % config.AWS_ACCESS_SECRET)\n s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'\n pre_commands.append(\n 'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))\n\n if config.FAST_CODE_SYNC:\n pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)\n pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)\n pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)\n else:\n pre_commands.append('aws s3 cp --recursive %s %s' %\n (code_full_path, config.DOCKER_CODE_DIR))\n pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)\n pre_commands.append('mkdir -p %s' %\n (log_dir))\n\n if sync_all_data_node_to_s3:\n print('Syncing all data from node to s3.')\n if periodic_sync:\n if sync_s3_pkl:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n else:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n else:\n if periodic_sync:\n if sync_s3_pkl:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n else:\n pre_commands.append(\"\"\"\n while /bin/true; do\n aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet\n sleep {periodic_sync_interval}\n done & echo sync initiated\"\"\".format(log_dir=log_dir, remote_log_dir=remote_log_dir,\n aws_region=config.AWS_REGION_NAME,\n periodic_sync_interval=periodic_sync_interval))\n # copy the file to s3 after execution\n post_commands = list()\n post_commands.append('aws s3 cp --recursive %s %s' %\n (log_dir,\n remote_log_dir))\n if not terminate_machine:\n post_commands.append('sleep infinity')\n command_list = list()\n if pre_commands is not None:\n command_list.extend(pre_commands)\n command_list.append(\"echo \\\"Running in docker\\\"\")\n command_list.append(\n \"%s 2>&1 | tee -a %s\" % (\n to_local_command(params, python_command=python_command, script=script),\n \"%s/stdouterr.log\" % log_dir\n )\n )\n if post_commands is not None:\n command_list.extend(post_commands)\n command = \"; \".join(command_list)\n pod_name = config.KUBE_PREFIX + params[\"exp_name\"]\n # underscore is not allowed in pod names\n pod_name = pod_name.replace(\"_\", \"-\")\n print(\"Is gpu: \", is_gpu)\n if not is_gpu:\n return {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"name\": pod_name,\n \"labels\": {\n \"owner\": config.LABEL,\n \"expt\": pod_name,\n \"exp_time\": timestamp,\n \"exp_prefix\": exp_prefix,\n },\n },\n \"spec\": {\n \"containers\": [\n {\n \"name\": \"foo\",\n \"image\": docker_image,\n \"command\": [\n \"/bin/bash\",\n \"-c\",\n \"-li\", # to load conda env file\n command,\n ],\n \"resources\": resources,\n \"imagePullPolicy\": \"Always\",\n }\n ],\n \"restartPolicy\": \"Never\",\n \"nodeSelector\": node_selector,\n \"dnsPolicy\": \"Default\",\n }\n }\n return {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"name\": pod_name,\n \"labels\": {\n \"owner\": config.LABEL,\n \"expt\": pod_name,\n \"exp_time\": timestamp,\n \"exp_prefix\": exp_prefix,\n },\n },\n \"spec\": {\n \"containers\": [\n {\n \"name\": \"foo\",\n \"image\": docker_image,\n \"env\": kube_env,\n \"command\": [\n \"/bin/bash\",\n \"-c\",\n \"-li\", # to load conda env file\n command,\n ],\n \"resources\": resources,\n \"imagePullPolicy\": \"Always\",\n # gpu specific\n \"volumeMounts\": [\n {\n \"name\": \"nvidia\",\n \"mountPath\": \"/usr/local/nvidia\",\n \"readOnly\": True,\n }\n ],\n \"securityContext\": {\n \"privileged\": True,\n }\n }\n ],\n \"volumes\": [\n {\n \"name\": \"nvidia\",\n \"hostPath\": {\n \"path\": \"/var/lib/docker/volumes/nvidia_driver_352.63/_data\",\n }\n }\n ],\n \"restartPolicy\": \"Never\",\n \"nodeSelector\": node_selector,\n \"dnsPolicy\": \"Default\",\n }\n }\n\n\ndef concretize(maybe_stub):\n if isinstance(maybe_stub, StubMethodCall):\n obj = concretize(maybe_stub.obj)\n method = getattr(obj, maybe_stub.method_name)\n args = concretize(maybe_stub.args)\n kwargs = concretize(maybe_stub.kwargs)\n return method(*args, **kwargs)\n elif isinstance(maybe_stub, StubClass):\n return maybe_stub.proxy_class\n elif isinstance(maybe_stub, StubAttr):\n obj = concretize(maybe_stub.obj)\n attr_name = maybe_stub.attr_name\n attr_val = getattr(obj, attr_name)\n return concretize(attr_val)\n elif isinstance(maybe_stub, StubObject):\n if not hasattr(maybe_stub, \"__stub_cache\"):\n args = concretize(maybe_stub.args)\n kwargs = concretize(maybe_stub.kwargs)\n try:\n maybe_stub.__stub_cache = maybe_stub.proxy_class(\n *args, **kwargs)\n except Exception as e:\n print((\"Error while instantiating %s\" % maybe_stub.proxy_class))\n import traceback\n traceback.print_exc()\n ret = maybe_stub.__stub_cache\n return ret\n elif isinstance(maybe_stub, dict):\n # make sure that there's no hidden caveat\n ret = dict()\n for k, v in maybe_stub.items():\n ret[concretize(k)] = concretize(v)\n return ret\n elif isinstance(maybe_stub, (list, tuple)):\n return maybe_stub.__class__(list(map(concretize, maybe_stub)))\n else:\n return maybe_stub\n"
] | [
[
"numpy.random.shuffle"
]
] |
jlo118/DLlab2 | [
"01978907f48cfeb5cc406564a64454dc6b4f8485"
] | [
"Q2.py"
] | [
"import pandas\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Activation\r\nfrom keras.callbacks import TensorBoard\r\n# load dataset\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv(\"framingham.csv\", header=None).values\r\nimport numpy as np\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(dataset[:,0:15], dataset[:,15],\r\n test_size=0.33, random_state=87)\r\n\r\nnp.random.seed(100)\r\nnnokay = Sequential() # create model\r\nnnokay.add(Dense(20, input_dim=15, activation='tanh')) # hidden layer\r\nnnokay.add(Dense(30, activation='tanh')) #add whole layer\r\nnnokay.add(Dense(60, activation='tanh'))\r\nnnokay.add(Dense(20, activation='tanh'))\r\nnnokay.add(Dense(15, activation='tanh'))\r\nnnokay.add(Dense(60, activation='tanh'))\r\nnnokay.add(Dense(1, activation='tanh')) # output layer\r\nnnokay.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\r\n\r\nnnokay.fit(X_train, Y_train, epochs=250, verbose=0,\r\n callbacks=[TensorBoard(log_dir = '/tmp/auto')])\r\n#print(nnokay.summary())\r\n#print(nnokay.evaluate(X_test, Y_test, verbose=0))\r\n\r\nscore = nnokay.evaluate(X_test, Y_test)\r\nprint('test accuracy', score[1])\r\n\r\n"
] | [
[
"pandas.read_csv",
"numpy.random.seed",
"sklearn.model_selection.train_test_split"
]
] |
raoyongming/CAL | [
"76475ff56e399b276630d8bf3a4f5594803609a6"
] | [
"reid/modeling/baseline.py"
] | [
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport sys\n\nfrom .backbones.resnet import ResNet\nsys.path.append('.')\n\n\nEPSILON = 1e-12\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return y\n\n\nclass BAP(nn.Module):\n def __init__(self, pool='GAP'):\n super(BAP, self).__init__()\n assert pool in ['GAP', 'GMP']\n if pool == 'GAP':\n self.pool = None\n else:\n self.pool = nn.AdaptiveMaxPool2d(1)\n\n def forward(self, features, attentions, counterfactual=False):\n B, C, H, W = features.size()\n _, M, AH, AW = attentions.size()\n\n # match size\n if AH != H or AW != W:\n attentions = F.upsample_bilinear(attentions, size=(H, W))\n\n # feature_matrix: (B, M, C) -> (B, M * C)\n if self.pool is None:\n feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)\n else:\n feature_matrix = []\n for i in range(M):\n AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)\n feature_matrix.append(AiF)\n feature_matrix = torch.cat(feature_matrix, dim=1)\n\n # sign-sqrt\n feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)\n\n # l2 normalization along dimension M and C\n feature_matrix = F.normalize(feature_matrix_raw, dim=-1)\n\n if counterfactual:\n if self.training:\n fake_att = torch.zeros_like(attentions).uniform_(0, 2)\n else:\n fake_att = torch.ones_like(attentions)\n # mean_feature = features.mean(3).mean(2).view(B, 1, C)\n # counterfactual_feature = mean_feature.expand(B, M, C).contiguous().view(B, -1)\n counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)\n\n counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)\n\n counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)\n return feature_matrix, counterfactual_feature\n else:\n return feature_matrix\n\nclass MultiHeadAtt(nn.Module):\n \"\"\"\n Extend the channel attention into MultiHeadAtt. \n It is modified from \"Zhang H, Wu C, Zhang Z, et al. Resnest: Split-attention networks.\" \n \"\"\"\n def __init__(self, in_channels, channels,\n radix=4, reduction_factor=4,\n rectify=False, norm_layer=nn.BatchNorm2d):\n super(MultiHeadAtt, self).__init__()\n\n inter_channels = max(in_channels*radix//reduction_factor, 32)\n self.radix = radix\n self.channels = channels\n \n self.relu = nn.ReLU(inplace=True)\n self.fc1 = nn.Conv2d(channels, inter_channels, 1, groups=1)\n self.bn1 = norm_layer(inter_channels)\n self.fc2 = nn.Conv2d(inter_channels, channels*radix, 1, groups=1)\n\n\n def forward(self, x):\n batch, channel = x.shape[:2]\n splited = torch.split(x, channel//self.radix, dim=1)\n gap = sum(splited)\n gap = F.adaptive_avg_pool2d(gap, 1)\n gap = self.fc1(gap)\n gap = self.bn1(gap)\n gap = self.relu(gap)\n\n atten = self.fc2(gap).view((batch, self.radix, self.channels))\n atten = F.softmax(atten, dim=1).view(batch, -1, 1, 1)\n atten = torch.split(atten, channel//self.radix, dim=1)\n\n out= torch.cat([att*split for (att, split) in zip(atten, splited)],1)\n return out.contiguous()\n\n\nclass BN2d(nn.Module):\n def __init__(self, planes):\n super(BN2d, self).__init__()\n self.bottleneck2 = nn.BatchNorm2d(planes)\n self.bottleneck2.bias.requires_grad_(False) # no shift\n self.bottleneck2.apply(weights_init_kaiming)\n\n def forward(self, x):\n return self.bottleneck2(x)\n\n\n\n\nclass Baseline(nn.Module):\n in_planes = 2048\n\n def __init__(self, num_classes, last_stride, model_path, using_cal):\n super(Baseline, self).__init__()\n self.using_cal = using_cal\n self.base = ResNet(last_stride)\n self.base.load_param(model_path)\n self.radix = 2\n self.base_1 = nn.Sequential(*list(self.base.children())[0:3])\n self.BN1 = BN2d(64)\n self.att1 = SELayer(64,8)\n self.att_s1=MultiHeadAtt(64,int(64/self.radix),radix=self.radix)\n self.base_2 = nn.Sequential(*list(self.base.children())[3:4])\n self.BN2 = BN2d(256)\n self.att2 = SELayer(256,32)\n self.att_s2=MultiHeadAtt(256,int(256/self.radix),radix=self.radix)\n self.base_3 = nn.Sequential(*list(self.base.children())[4:5])\n self.BN3 = BN2d(512)\n self.att3 = SELayer(512,64)\n self.att_s3 = MultiHeadAtt(512,int(512/self.radix),radix=self.radix)\n self.base_4 = nn.Sequential(*list(self.base.children())[5:6])\n self.BN4 = BN2d(1024)\n self.att4 = SELayer(1024,128)\n self.att_s4=MultiHeadAtt(1024,int(1024/self.radix),radix=self.radix)\n self.base_5 = nn.Sequential(*list(self.base.children())[6:])\n self.BN5 = BN2d(2048)\n self.att5 = SELayer(2048,256)\n self.att_s5=MultiHeadAtt(2048,int(2048/self.radix),radix=self.radix)\n\n self.M = 8\n\n self.attentions = BasicConv2d(2048, self.M, kernel_size=1)\n self.bap = BAP(pool='GAP')\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n\n self.num_classes = num_classes\n\n self.bottleneck = nn.BatchNorm1d(self.in_planes)\n self.bottleneck.bias.requires_grad_(False) # no shift\n self.bottleneck.apply(weights_init_kaiming)\n\n\n self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)\n self.classifier_bap = nn.Linear(self.in_planes*self.M, self.in_planes, bias=False)\n\n self.classifier.apply(weights_init_classifier)\n self.classifier_bap.apply(weights_init_classifier)\n\n \n def forward(self, x):\n\n ############\n x_1 = self.base_1(x)\n x_1 = self.att_s1(x_1)\n x_1 = self.BN1(x_1)\n y_1 = self.att1(x_1)\n x_att1=x_1*y_1.expand_as(x_1)\n\n\n x_2 = self.base_2(x_att1)\n x_2 = self.att_s2(x_2)\n x_2 = self.BN2(x_2)\n y_2 = self.att2(x_2)\n x_att2=x_2*y_2.expand_as(x_2)\n\n x_3 = self.base_3(x_att2)\n x_3 = self.att_s3(x_3)\n x_3 = self.BN3(x_3)\n y_3 = self.att3(x_3)\n x_att3=x_3*y_3.expand_as(x_3)\n\n x_4 = self.base_4(x_att3)\n x_4 = self.att_s4(x_4)\n x_4 = self.BN4(x_4)\n y_4 = self.att4(x_4)\n x_att4=x_4*y_4.expand_as(x_4)\n\n x_5 = self.base_5(x_att4)\n x_5 = self.att_s5(x_5)\n x_5 = self.BN5(x_5)\n y_5 = self.att5(x_5)\n x=x_5*y_5.expand_as(x_5) \n ############\n\n # x = self.base(x) replace above with this to use base network\n\n attention_maps = self.attentions(x)\n\n \n\n global_feat,global_feat_hat = self.bap(x, attention_maps,counterfactual=True)\n global_feat = global_feat.view(global_feat.shape[0], -1)\n global_feat_hat = global_feat_hat.view(global_feat.shape[0], -1)\n\n global_feat = self.classifier_bap(global_feat)\n global_feat_hat = self.classifier_bap(global_feat_hat)\n \n \n feat_hat = self.bottleneck(global_feat_hat)\n feat = self.bottleneck(global_feat) # normalize for angular softmax\n\n cls_score = self.classifier(feat)\n cls_score_hat = self.classifier(feat_hat)\n\n if self.training:\n if self.using_cal: \n return cls_score, cls_score-cls_score_hat, global_feat # global feature for triplet loss\n else:\n return cls_score, global_feat\n else:\n return cls_score\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.init.kaiming_normal_",
"torch.nn.BatchNorm2d",
"torch.nn.BatchNorm1d",
"torch.nn.init.normal_",
"torch.sign",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.ones_like",
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.normalize",
"torch.einsum",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.split",
"torch.zeros_like",
"torch.nn.functional.relu",
"torch.abs",
"torch.nn.ReLU",
"torch.nn.functional.upsample_bilinear"
]
] |
cheewoei1997/sentiment-analysis | [
"e936824de57a8cd40586a1a19145c6205b6c0843"
] | [
"sample_application/__init__.py"
] | [
"from flask import Flask, render_template, flash, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_appconfig import AppConfig\nfrom flask_wtf import Form, RecaptchaField\nfrom flask_wtf.file import FileField\nfrom wtforms import TextField, HiddenField, ValidationError, RadioField,\\\n BooleanField, SubmitField, IntegerField, FormField, validators\nfrom wtforms.validators import Required\n\nimport nltk\nfrom nltk.corpus import stopwords\n# from nltk.classify import SklearnClassifier\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.collocations import BigramCollocationFinder\n\nimport sklearn\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\nimport os\nfrom random import shuffle\n\nnltk.download('punkt')\n\n\n# from analyser import set_data\n\n\nclass SentimentForm(Form):\n sentence = TextField('Type your sentence here', validators=[Required()])\n classifier = RadioField('This is a radio field', choices=[\n ('lsvc', 'LinearSVC'),\n ('bernb', 'BernoulliNB'),\n ('multi', 'Multinomial'),\n ('logreg', 'Logistic Regression'),\n ('svc', 'SVC'),\n ])\n\n submit_button = SubmitField('Submit')\n\n\ndef create_app(configfile=None):\n app = Flask(__name__)\n AppConfig(app, configfile) # Flask-Appconfig is not necessary, but\n # highly recommend =)\n # https://github.com/mbr/flask-appconfig\n Bootstrap(app)\n\n # in a real app, these should be configured through Flask-Appconfig\n app.config['SECRET_KEY'] = 'devkey'\n app.config['RECAPTCHA_PUBLIC_KEY'] = \\\n '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'\n\n \n @app.route('/', methods=('GET', 'POST'))\n def index():\n # form = ExampleForm()\n form = SentimentForm()\n form.validate_on_submit() # to get error messages to the browser\n # flash('critical message', 'critical')\n # flash('error message', 'error')\n # flash('warning message', 'warning')\n # flash('info message', 'info')\n # flash('debug message', 'debug')\n # flash('different message', 'different')\n # flash('uncategorized message')\n sentences = ['the show is not only great, but also fantastic and a masterpiece',\n 'today is definitely a day for walking the dog',]\n\n\n if form.validate_on_submit():\n if request.method == 'POST':\n # switch out request.form with the 20 sentences\n result = request.form\n input_sentence = set_data(result)\n train_data = get_dataset(input_sentence)\n\n choice = result['classifier']\n choice_dict = {\n 'bernb': 'Bernoulli Naive Bayes',\n 'multi': 'Multinomial Naive Bayes',\n 'logreg': 'Logistic Regression',\n 'svc': 'Support Vector Classifier',\n 'lsvc': 'Linear Support Vector Classifier',\n }\n\n if choice == 'bernb':\n stats = set_classifier(BernoulliNB(), train_data, input_sentence)\n elif choice == 'multi':\n stats = set_classifier(MultinomialNB(), train_data, input_sentence)\n elif choice == 'logreg':\n stats = set_classifier(LogisticRegression(), train_data, input_sentence)\n elif choice == 'svc':\n stats = set_classifier(SVC(), train_data, input_sentence)\n elif choice == 'lsvc':\n stats = set_classifier(LinearSVC(), train_data, input_sentence)\n else:\n print('Something went terribly wrong')\n\n stats_dict = {\n 'posPercent': stats[0],\n 'negPercent': stats[1],\n 'pos': stats[2],\n 'neg': stats[3],\n 'sentence': result['sentence'],\n 'train_data': train_data,\n 'choice': choice_dict[str(choice)],\n }\n\n return render_template('result.html', context=stats_dict)\n \n else:\n print('ELSEEEE')\n print(request.form)\n # print(form.csrf_token)\n return render_template('error.html', form=form) \n\n return render_template('index.html', form=form)\n\n\n # @app.route('/result/')\n # def result():\n # print('Hola this is result')\n # return render_template('result.html')\n\n\n return app\n\n\ndef word_feats(words):\n return dict([(words, True)])\n\n\ndef set_data(requested):\n sentence = requested['sentence']\n target = sentence.lower()\n target = nltk.word_tokenize(target)\n return target\n\n\ndef get_dataset(target):\n # Loads the positive and negative words\n pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()\n neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()\n\n # Tokenize the words\n pos_words = nltk.word_tokenize(pos_words)\n neg_words = nltk.word_tokenize(neg_words)\n shuffle(pos_words)\n shuffle(neg_words)\n neg_words = neg_words[:2139]\n\n # Keep both positive and negative into posneg\n posneg = pos_words + neg_words\n\n neu_words = []\n [neu_words.append(neu) for neu in target if neu not in posneg]\n\n positive_features = [(word_feats(pos), 'pos') for pos in pos_words]\n negative_features = [(word_feats(neg), 'neg') for neg in neg_words]\n neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]\n\n print('Positive feats:', len(positive_features))\n print('Negative feats:', len(negative_features))\n print('Neutral feats:', neutral_features)\n\n train_set = positive_features + negative_features + neutral_features\n return train_set\n\n\ndef set_classifier(chosen_classifier, train_set, sentence):\n classifier = SklearnClassifier(chosen_classifier)\n classifier.train(train_set)\n\n neg = 0\n pos = 0\n print('set_classifier', sentence)\n\n for word in sentence:\n classResult = classifier.classify(word_feats(word))\n print(word_feats(word))\n print(classResult)\n if classResult == 'neg':\n neg = neg + 1\n if classResult == 'pos':\n pos = pos + 1\n\n posPercent = str(float(pos)/len(sentence))\n negPercent = str(float(neg)/len(sentence))\n \n # print ('Accuracy:', nltk.classify.util.accuracy(classifier, sentence))\n # classifier.show_most_informative_features()\n # print('Score:', score)\n\n print('Positive: ' + posPercent)\n print('Negative: ' + negPercent)\n print('Pos', pos)\n print('Neg', neg)\n\n return posPercent, negPercent, pos, neg\n \n\nif __name__ == '__main__':\n create_app().run(debug=True)\n\n\n# ==============================================================================\n\n\n# from flask import Flask, render_template, flash, request\n# from flask_bootstrap import Bootstrap\n# from flask_appconfig import AppConfig\n# from flask_wtf import Form, RecaptchaField\n# from flask_wtf.file import FileField\n# from wtforms import TextField, HiddenField, ValidationError, RadioField,\\\n# BooleanField, SubmitField, IntegerField, FormField, validators\n# from wtforms.validators import Required\n\n# import nltk\n# from nltk.corpus import stopwords\n# # from nltk.classify import SklearnClassifier\n# from nltk.classify import NaiveBayesClassifier\n# from nltk.collocations import BigramCollocationFinder\n\n# import sklearn\n# from nltk.classify.scikitlearn import SklearnClassifier\n# from sklearn.svm import SVC, LinearSVC, NuSVC\n# from sklearn.naive_bayes import MultinomialNB, BernoulliNB\n# from sklearn.linear_model import LogisticRegression\n# from sklearn.metrics import accuracy_score\n\n# import os\n# from random import shuffle\n\n# nltk.download('punkt')\n\n\n# # from analyser import set_data\n\n\n# class SentimentForm(Form):\n# sentence = TextField('Type your sentence here', validators=[Required()])\n# classifier = RadioField('This is a radio field', choices=[\n# ('lsvc', 'LinearSVC'),\n# ('bernb', 'BernoulliNB'),\n# ('multi', 'Multinomial'),\n# ('logreg', 'Logistic Regression'),\n# ('svc', 'SVC'),\n# ])\n\n# submit_button = SubmitField('Submit')\n\n\n# def create_app(configfile=None):\n# app = Flask(__name__)\n# AppConfig(app, configfile) # Flask-Appconfig is not necessary, but\n# # highly recommend =)\n# # https://github.com/mbr/flask-appconfig\n# Bootstrap(app)\n\n# # in a real app, these should be configured through Flask-Appconfig\n# app.config['SECRET_KEY'] = 'devkey'\n# app.config['RECAPTCHA_PUBLIC_KEY'] = \\\n# '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'\n\n \n# @app.route('/', methods=('GET', 'POST'))\n# def index():\n# # form = ExampleForm()\n# form = SentimentForm()\n# form.validate_on_submit() # to get error messages to the browser\n# # flash('critical message', 'critical')\n# # flash('error message', 'error')\n# # flash('warning message', 'warning')\n# # flash('info message', 'info')\n# # flash('debug message', 'debug')\n# # flash('different message', 'different')\n# # flash('uncategorized message')\n\n# if form.validate_on_submit():\n# if request.method == 'POST':\n# # switch out request.form with the 20 sentences\n# result = request.form\n# input_sentence = set_data(result)\n# train_data = get_dataset(input_sentence)\n\n# choice = result['classifier']\n# choice_dict = {\n# 'bernb': 'Bernoulli Naive Bayes',\n# 'multi': 'Multinomial Naive Bayes',\n# 'logreg': 'Logistic Regression',\n# 'svc': 'Support Vector Classifier',\n# 'lsvc': 'Linear Support Vector Classifier',\n# }\n\n# if choice == 'bernb':\n# stats = set_classifier(BernoulliNB(), train_data, input_sentence)\n# elif choice == 'multi':\n# stats = set_classifier(MultinomialNB(), train_data, input_sentence)\n# elif choice == 'logreg':\n# stats = set_classifier(LogisticRegression(), train_data, input_sentence)\n# elif choice == 'svc':\n# stats = set_classifier(SVC(), train_data, input_sentence)\n# elif choice == 'lsvc':\n# stats = set_classifier(LinearSVC(), train_data, input_sentence)\n# else:\n# print('Something went terribly wrong')\n\n# stats_dict = {\n# 'posPercent': stats[0],\n# 'negPercent': stats[1],\n# 'pos': stats[2],\n# 'neg': stats[3],\n# 'sentence': result['sentence'],\n# 'train_data': train_data,\n# 'choice': choice_dict[str(choice)],\n# }\n\n# return render_template('result.html', context=stats_dict)\n \n# else:\n# print('ELSEEEE')\n# print(request.form)\n# # print(form.csrf_token)\n# return render_template('error.html', form=form) \n\n# return render_template('index.html', form=form)\n\n\n# # @app.route('/result/')\n# # def result():\n# # print('Hola this is result')\n# # return render_template('result.html')\n\n\n# return app\n\n\n# def word_feats(words):\n# return dict([(words, True)])\n\n\n# def set_data(requested):\n# sentence = requested['sentence']\n# target = sentence.lower()\n# target = nltk.word_tokenize(target)\n# return target\n\n\n# def get_dataset(target):\n# # Loads the positive and negative words\n# pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()\n# neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()\n\n# # Tokenize the words\n# pos_words = nltk.word_tokenize(pos_words)\n# neg_words = nltk.word_tokenize(neg_words)\n# shuffle(pos_words)\n# shuffle(neg_words)\n# neg_words = neg_words[:2139]\n\n# # Keep both positive and negative into posneg\n# posneg = pos_words + neg_words\n\n# neu_words = []\n# [neu_words.append(neu) for neu in target if neu not in posneg]\n\n# positive_features = [(word_feats(pos), 'pos') for pos in pos_words]\n# negative_features = [(word_feats(neg), 'neg') for neg in neg_words]\n# neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]\n\n# print('Positive feats:', len(positive_features))\n# print('Negative feats:', len(negative_features))\n# print('Neutral feats:', neutral_features)\n\n# train_set = positive_features + negative_features + neutral_features\n# return train_set\n\n\n# def set_classifier(chosen_classifier, train_set, sentence):\n# classifier = SklearnClassifier(chosen_classifier)\n# classifier.train(train_set)\n\n# neg = 0\n# pos = 0\n# print('set_classifier', sentence)\n\n# for word in sentence:\n# classResult = classifier.classify(word_feats(word))\n# print(word_feats(word))\n# print(classResult)\n# if classResult == 'neg':\n# neg = neg + 1\n# if classResult == 'pos':\n# pos = pos + 1\n\n# posPercent = str(float(pos)/len(sentence))\n# negPercent = str(float(neg)/len(sentence))\n \n# # print ('Accuracy:', nltk.classify.util.accuracy(classifier, sentence))\n# # classifier.show_most_informative_features()\n# # print('Score:', score)\n\n# print('Positive: ' + posPercent)\n# print('Negative: ' + negPercent)\n# print('Pos', pos)\n# print('Neg', neg)\n\n# return posPercent, negPercent, pos, neg\n \n\n# if __name__ == '__main__':\n# create_app().run(debug=True)\n\n"
] | [
[
"sklearn.svm.SVC",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.svm.LinearSVC",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.linear_model.LogisticRegression"
]
] |
semitable/multiagent-particle-envs | [
"2cef12f72a9192a819ef289646526801c39fb909"
] | [
"mpe/environment.py"
] | [
"import gym\nfrom gym import spaces\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\nfrom mpe.multi_discrete import MultiDiscrete\nimport copy\n\n# environment for all agents in the multiagent world\n# currently code assumes that no agents will be created/destroyed at runtime!\nclass MultiAgentEnv(gym.Env):\n metadata = {\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, world, reset_callback=None, reward_callback=None,\n observation_callback=None, info_callback=None,\n done_callback=None, shared_viewer=True):\n\n world = copy.deepcopy(world)\n self.world = world\n self.agents = self.world.policy_agents\n # set required vectorized gym env property\n self.n = len(world.policy_agents)\n # scenario callbacks\n self.reset_callback = reset_callback\n self.reward_callback = reward_callback\n self.observation_callback = observation_callback\n self.info_callback = info_callback\n self.done_callback = done_callback\n # environment parameters\n self.discrete_action_space = True\n # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector\n self.discrete_action_input = False\n # if true, even the action is continuous, action will be performed discretely\n self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False\n # if true, every agent has the same reward\n self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False\n self.time = 0\n\n # configure spaces\n self.action_space = []\n self.observation_space = []\n for agent in self.agents:\n total_action_space = []\n # physical action space\n if self.discrete_action_space:\n u_action_space = spaces.Discrete(world.dim_p * 2 + 1)\n else:\n u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)\n if agent.movable:\n total_action_space.append(u_action_space)\n # communication action space\n if self.discrete_action_space:\n c_action_space = spaces.Discrete(world.dim_c)\n else:\n c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)\n if not agent.silent:\n total_action_space.append(c_action_space)\n # total action space\n if len(total_action_space) > 1:\n # all action spaces are discrete, so simplify to MultiDiscrete action space\n if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):\n act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])\n else:\n act_space = spaces.Tuple(total_action_space)\n self.action_space.append(act_space)\n else:\n self.action_space.append(total_action_space[0])\n # observation space\n obs_dim = len(observation_callback(agent, self.world))\n self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))\n agent.action.c = np.zeros(self.world.dim_c)\n\n self.action_space = spaces.Tuple(tuple(self.action_space))\n self.observation_space = spaces.Tuple(tuple(self.observation_space))\n self.n_agents = self.n\n\n # rendering\n self.shared_viewer = shared_viewer\n if self.shared_viewer:\n self.viewers = [None]\n else:\n self.viewers = [None] * self.n\n self._reset_render()\n\n def seed(self, seed):\n self.world.seed(seed)\n\n def step(self, action_n):\n\n one_hot_actions = []\n for act, acsp in zip(action_n, self.action_space):\n one_hot = np.zeros(acsp.n)\n one_hot[act] = 1.0\n one_hot_actions.append(one_hot)\n action_n = one_hot_actions\n\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n self.agents = self.world.policy_agents\n # set action for each agent\n for i, agent in enumerate(self.agents):\n self._set_action(action_n[i], agent, self.action_space[i])\n # advance world state\n self.world.step()\n # record observation for each agent\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n reward_n.append(self._get_reward(agent))\n done_n.append(self._get_done(agent))\n\n info_n['n'].append(self._get_info(agent))\n\n # all agents get total reward in cooperative case\n reward = np.sum(reward_n)\n if self.shared_reward:\n reward_n = [reward] * self.n\n\n return tuple(obs_n), reward_n, done_n, info_n\n\n def reset(self):\n # reset world\n self.reset_callback(self.world)\n # reset renderer\n self._reset_render()\n # record observations for each agent\n obs_n = []\n self.agents = self.world.policy_agents\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n return tuple(obs_n)\n\n # get info used for benchmarking\n def _get_info(self, agent):\n if self.info_callback is None:\n return {}\n return self.info_callback(agent, self.world)\n\n # get observation for a particular agent\n def _get_obs(self, agent):\n if self.observation_callback is None:\n return np.zeros(0)\n return self.observation_callback(agent, self.world).astype(np.float32)\n\n # get dones for a particular agent\n # unused right now -- agents are allowed to go beyond the viewing screen\n def _get_done(self, agent):\n if self.done_callback is None:\n return False\n return self.done_callback(agent, self.world)\n\n # get reward for a particular agent\n def _get_reward(self, agent):\n if self.reward_callback is None:\n return 0.0\n return self.reward_callback(agent, self.world)\n\n # set env action for a particular agent\n def _set_action(self, action, agent, action_space, time=None):\n agent.action.u = np.zeros(self.world.dim_p)\n agent.action.c = np.zeros(self.world.dim_c)\n # process action\n if isinstance(action_space, MultiDiscrete):\n act = []\n size = action_space.high - action_space.low + 1\n index = 0\n for s in size:\n act.append(action[index:(index+s)])\n index += s\n action = act\n else:\n action = [action]\n\n if agent.movable:\n # physical action\n if self.discrete_action_input:\n agent.action.u = np.zeros(self.world.dim_p)\n # process discrete action\n if action[0] == 1: agent.action.u[0] = -1.0\n if action[0] == 2: agent.action.u[0] = +1.0\n if action[0] == 3: agent.action.u[1] = -1.0\n if action[0] == 4: agent.action.u[1] = +1.0\n else:\n if self.force_discrete_action:\n d = np.argmax(action[0])\n action[0][:] = 0.0\n action[0][d] = 1.0\n if self.discrete_action_space:\n agent.action.u[0] += action[0][1] - action[0][2]\n agent.action.u[1] += action[0][3] - action[0][4]\n else:\n agent.action.u = action[0]\n sensitivity = 5.0\n if agent.accel is not None:\n sensitivity = agent.accel\n agent.action.u *= sensitivity\n action = action[1:]\n if not agent.silent:\n # communication action\n if self.discrete_action_input:\n agent.action.c = np.zeros(self.world.dim_c)\n agent.action.c[action[0]] = 1.0\n else:\n agent.action.c = action[0]\n action = action[1:]\n # make sure we used all elements of action\n assert len(action) == 0\n\n # reset rendering assets\n def _reset_render(self):\n self.render_geoms = None\n self.render_geoms_xform = None\n\n # render environment\n def render(self, mode='human'):\n if mode == 'human':\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n message = ''\n for agent in self.world.agents:\n comm = []\n for other in self.world.agents:\n if other is agent: continue\n if np.all(other.state.c == 0):\n word = '_'\n else:\n word = alphabet[np.argmax(other.state.c)]\n message += (other.name + ' to ' + agent.name + ': ' + word + ' ')\n print(message)\n\n for i in range(len(self.viewers)):\n # create viewers (if necessary)\n if self.viewers[i] is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from mpe import rendering\n self.viewers[i] = rendering.Viewer(700,700)\n\n # create rendering geometry\n if self.render_geoms is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from mpe import rendering\n self.render_geoms = []\n self.render_geoms_xform = []\n for entity in self.world.entities:\n geom = rendering.make_circle(entity.size)\n xform = rendering.Transform()\n if 'agent' in entity.name:\n geom.set_color(*entity.color, alpha=0.5)\n else:\n geom.set_color(*entity.color)\n geom.add_attr(xform)\n self.render_geoms.append(geom)\n self.render_geoms_xform.append(xform)\n\n # add geoms to viewer\n for viewer in self.viewers:\n viewer.geoms = []\n for geom in self.render_geoms:\n viewer.add_geom(geom)\n\n results = []\n for i in range(len(self.viewers)):\n from mpe import rendering\n # update bounds to center around agent\n cam_range = 1\n if self.shared_viewer:\n pos = np.zeros(self.world.dim_p)\n else:\n pos = self.agents[i].state.p_pos\n self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)\n # update geometry positions\n for e, entity in enumerate(self.world.entities):\n self.render_geoms_xform[e].set_translation(*entity.state.p_pos)\n # render to display or array\n results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))\n\n if self.shared_viewer:\n assert len(results) == 1\n return results[0]\n\n return results\n\n # create receptor field locations in local coordinate frame\n def _make_receptor_locations(self, agent):\n receptor_type = 'polar'\n range_min = 0.05 * 2.0\n range_max = 1.00\n dx = []\n # circular receptive field\n if receptor_type == 'polar':\n for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):\n for distance in np.linspace(range_min, range_max, 3):\n dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))\n # add origin\n dx.append(np.array([0.0, 0.0]))\n # grid receptive field\n if receptor_type == 'grid':\n for x in np.linspace(-range_max, +range_max, 5):\n for y in np.linspace(-range_max, +range_max, 5):\n dx.append(np.array([x,y]))\n return dx\n\n def close(self):\n for viewer in self.viewers:\n if viewer:\n viewer.close()\n\n\n# vectorized wrapper for a batch of multi-agent environments\n# assumes all environments have the same observation and action space\nclass BatchMultiAgentEnv(gym.Env):\n metadata = {\n 'runtime.vectorized': True,\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, env_batch):\n self.env_batch = env_batch\n\n @property\n def n(self):\n return np.sum([env.n for env in self.env_batch])\n\n @property\n def action_space(self):\n return self.env_batch[0].action_space\n\n @property\n def observation_space(self):\n return self.env_batch[0].observation_space\n\n def step(self, action_n, time):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n i = 0\n for env in self.env_batch:\n obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)\n i += env.n\n obs_n += obs\n # reward = [r / len(self.env_batch) for r in reward]\n reward_n += reward\n done_n += done\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n obs_n = []\n for env in self.env_batch:\n obs_n += env.reset()\n return obs_n\n\n # render environment\n def render(self, mode='human', close=True):\n results_n = []\n for env in self.env_batch:\n results_n += env.render(mode, close)\n return results_n\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.cos",
"numpy.argmax",
"numpy.all",
"numpy.array",
"numpy.sin",
"numpy.linspace"
]
] |
pyjsdev/googlemap_flask | [
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5",
"9d0dd899a9cbf756b3d83c33e3d8a47e7db40cc5"
] | [
"examples/charts/file/hover_span.py",
"examples/models/image_url.py"
] | [
"import pandas as pd\n\nfrom bokeh.charts import Line, Scatter, show, output_file, defaults\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import HoverTool\nfrom bokeh.sampledata.degrees import data\n\ndefaults.width = 500\ndefaults.height = 300\n\nTOOLS='box_zoom,box_select,hover,crosshair,reset'\n\nTOOLTIPS = [ (\"y\", \"$~y\"), (\"x\", \"$~x\") ]\n\ndata = data[['Biology', 'Business', 'Computer Science', \"Year\"]]\ndata = pd.melt(data, id_vars=['Year'],\n value_vars=['Biology', 'Business', 'Computer Science'],\n value_name='Count', var_name='Degree')\n\nvline = Line(data, y='Count', color='Degree', title=\"Lines VLine\", ylabel='measures',\n tools=TOOLS)\n\nhline = Line(data, y='Count', color='Degree', title=\"Lines HLine\",\n ylabel='measures', tools=TOOLS)\n\nint_vline = Line(data, y='Count', color='Degree', title=\"Lines VLine Interp\",\n ylabel='measures', tools=TOOLS)\n\nint_hline = Line(data, y='Count', color='Degree', title=\"Lines HLine Interp\",\n ylabel='measures', tools=TOOLS)\n\nscatter_point = Scatter(data, x='Year', y='Count', color='Degree',\n title=\"Scatter mouse\", ylabel='measures', legend=True,\n tools=TOOLS)\n\nscatter = Scatter(data, x='Year', y='Count', color='Degree',\n title=\"Scatter V Line\", ylabel='measures', legend=True, tools=TOOLS)\n\nint_point_line = Line(data, x='Year', y='Count', color='Degree',\n title=\"Lines Mouse Interp.\", ylabel='measures', tools=TOOLS)\n\npoint_line = Line(data, x='Year', y='Count', color='Degree',\n title=\"Lines Mouse\", ylabel='measures', tools=TOOLS)\n\n\nhhover = hline.select(HoverTool)\nhhover.mode = 'hline'\nhhover.line_policy = 'next'\n\nvhover = vline.select(HoverTool)\nvhover.mode = 'vline'\nvhover.line_policy = 'nearest'\n\nint_hhover = int_hline.select(HoverTool)\nint_hhover.mode = 'hline'\nint_hhover.line_policy = 'interp'\n\nint_vhover = int_vline.select(HoverTool)\nint_vhover.mode = 'vline'\nint_vhover.line_policy = 'interp'\n\niphover = int_point_line.select(HoverTool)\niphover.mode = 'mouse'\niphover.line_policy = 'interp'\n\ntphover = point_line.select(HoverTool)\ntphover.mode = 'mouse'\n\nshover = scatter.select(HoverTool)\nshover.mode = 'vline'\n\nshoverp = scatter_point.select(HoverTool)\nshoverp.mode = 'mouse'\n\n# set up tooltips\nint_vhover.tooltips = int_hhover.tooltips = TOOLTIPS\ntphover.tooltips = iphover.tooltips = TOOLTIPS\nshover.tooltips = shoverp.tooltips = TOOLTIPS\nvhover.tooltips = hhover.tooltips = TOOLTIPS\n\noutput_file(\"hover_span.html\", title=\"hover_span.py example\")\n\nshow(gridplot(hline, vline, int_hline, int_vline,\n int_point_line, point_line, scatter_point, scatter,\n ncols=2))\n",
"import numpy as np\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import ImageURL\nfrom bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\nfrom bokeh.resources import INLINE\n\nurl = \"http://bokeh.pydata.org/en/latest/_static/images/logo.png\"\nN = 5\n\nsource = ColumnDataSource(dict(\n url = [url]*N,\n x1 = np.linspace( 0, 150, N),\n y1 = np.linspace( 0, 150, N),\n w1 = np.linspace( 10, 50, N),\n h1 = np.linspace( 10, 50, N),\n x2 = np.linspace(-50, 150, N),\n y2 = np.linspace( 0, 200, N),\n))\n\nxdr = Range1d(start=-100, end=200)\nydr = Range1d(start=-100, end=200)\n\nplot = Plot(x_range=xdr, y_range=ydr)\nplot.title.text = \"ImageURL\"\nplot.toolbar_location = None\n\nimage1 = ImageURL(url=\"url\", x=\"x1\", y=\"y1\", w=\"w1\", h=\"h1\", anchor=\"center\", global_alpha=0.2)\nplot.add_glyph(source, image1)\n\nimage2 = ImageURL(url=\"url\", x=\"x2\", y=\"y2\", w=20, h=20, anchor=\"top_left\")\nplot.add_glyph(source, image2)\n\nimage3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor=\"bottom_right\")\nplot.add_glyph(source, image3)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis,'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document( )\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n doc.validate()\n filename = \"image_url.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Image URL Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n"
] | [
[
"pandas.melt"
],
[
"numpy.linspace"
]
] |
itamblyn/pytorch_geometric | [
"67ed16492863378b8434b03713a75924f0cc5df1",
"86308313d6f1af56e5931e2ca89bb1a867c10ff3"
] | [
"torch_geometric/nn/conv/han_conv.py",
"torch_geometric/transforms/one_hot_degree.py"
] | [
"from typing import Union, Dict, Optional, List\r\n\r\nimport torch\r\nfrom torch import Tensor, nn\r\nimport torch.nn.functional as F\r\n\r\nfrom torch_geometric.typing import NodeType, EdgeType, Metadata, Adj\r\nfrom torch_geometric.nn.dense import Linear\r\nfrom torch_geometric.utils import softmax\r\nfrom torch_geometric.nn.conv import MessagePassing\r\nfrom torch_geometric.nn.inits import glorot, reset\r\n\r\n\r\ndef group(xs: List[Tensor], q: nn.Parameter,\r\n k_lin: nn.Module) -> Optional[Tensor]:\r\n if len(xs) == 0:\r\n return None\r\n else:\r\n num_edge_types = len(xs)\r\n out = torch.stack(xs)\r\n attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1)\r\n attn = F.softmax(attn_score, dim=0)\r\n out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0)\r\n return out\r\n\r\n\r\nclass HANConv(MessagePassing):\r\n r\"\"\"\r\n The Heterogenous Graph Attention Operator from the\r\n `\"Heterogenous Graph Attention Network\"\r\n <https://arxiv.org/pdf/1903.07293.pdf>`_ paper.\r\n\r\n .. note::\r\n\r\n For an example of using HANConv, see `examples/hetero/han_imdb.py\r\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\r\n hetero/han_imdb.py>`_.\r\n\r\n Args:\r\n in_channels (int or Dict[str, int]): Size of each input sample of every\r\n node type, or :obj:`-1` to derive the size from the first input(s)\r\n to the forward method.\r\n out_channels (int): Size of each output sample.\r\n metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata\r\n of the heterogeneous graph, *i.e.* its node and edge types given\r\n by a list of strings and a list of string triplets, respectively.\r\n See :meth:`torch_geometric.data.HeteroData.metadata` for more\r\n information.\r\n heads (int, optional): Number of multi-head-attentions.\r\n (default: :obj:`1`)\r\n negative_slope (float, optional): LeakyReLU angle of the negative\r\n slope. (default: :obj:`0.2`)\r\n dropout (float, optional): Dropout probability of the normalized\r\n attention coefficients which exposes each node to a stochastically\r\n sampled neighborhood during training. (default: :obj:`0`)\r\n **kwargs (optional): Additional arguments of\r\n :class:`torch_geometric.nn.conv.MessagePassing`.\r\n \"\"\"\r\n def __init__(\r\n self,\r\n in_channels: Union[int, Dict[str, int]],\r\n out_channels: int,\r\n metadata: Metadata,\r\n heads: int = 1,\r\n negative_slope=0.2,\r\n dropout: float = 0.0,\r\n **kwargs,\r\n ):\r\n super().__init__(aggr='add', node_dim=0, **kwargs)\r\n\r\n if not isinstance(in_channels, dict):\r\n in_channels = {node_type: in_channels for node_type in metadata[0]}\r\n\r\n self.heads = heads\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.negative_slope = negative_slope\r\n self.metadata = metadata\r\n self.dropout = dropout\r\n self.k_lin = nn.Linear(out_channels, out_channels)\r\n self.q = nn.Parameter(torch.Tensor(1, out_channels))\r\n\r\n self.proj = nn.ModuleDict()\r\n for node_type, in_channels in self.in_channels.items():\r\n self.proj[node_type] = Linear(in_channels, out_channels)\r\n\r\n self.lin_src = nn.ParameterDict()\r\n self.lin_dst = nn.ParameterDict()\r\n dim = out_channels // heads\r\n for edge_type in metadata[1]:\r\n edge_type = '__'.join(edge_type)\r\n self.lin_src[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))\r\n self.lin_dst[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))\r\n\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n reset(self.proj)\r\n glorot(self.lin_src)\r\n glorot(self.lin_dst)\r\n self.k_lin.reset_parameters()\r\n glorot(self.q)\r\n\r\n def forward(\r\n self, x_dict: Dict[NodeType, Tensor],\r\n edge_index_dict: Dict[EdgeType,\r\n Adj]) -> Dict[NodeType, Optional[Tensor]]:\r\n r\"\"\"\r\n Args:\r\n x_dict (Dict[str, Tensor]): A dictionary holding input node\r\n features for each individual node type.\r\n edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A\r\n dictionary holding graph connectivity information for each\r\n individual edge type, either as a :obj:`torch.LongTensor` of\r\n shape :obj:`[2, num_edges]` or a\r\n :obj:`torch_sparse.SparseTensor`.\r\n\r\n :rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings\r\n for each node type.\r\n In case a node type does not receive any message, its output will\r\n be set to :obj:`None`.\r\n \"\"\"\r\n H, D = self.heads, self.out_channels // self.heads\r\n x_node_dict, out_dict = {}, {}\r\n\r\n # Iterate over node types:\r\n for node_type, x_node in x_dict.items():\r\n x_node_dict[node_type] = self.proj[node_type](x_node).view(\r\n -1, H, D)\r\n out_dict[node_type] = []\r\n\r\n # Iterate over edge types:\r\n for edge_type, edge_index in edge_index_dict.items():\r\n src_type, _, dst_type = edge_type\r\n edge_type = '__'.join(edge_type)\r\n lin_src = self.lin_src[edge_type]\r\n lin_dst = self.lin_dst[edge_type]\r\n x_dst = x_node_dict[dst_type]\r\n alpha_src = (x_node_dict[src_type] * lin_src).sum(dim=-1)\r\n alpha_dst = (x_dst * lin_dst).sum(dim=-1)\r\n alpha = (alpha_src, alpha_dst)\r\n # propagate_type: (x_dst: Tensor, alpha: PairTensor)\r\n out = self.propagate(edge_index, x_dst=x_dst, alpha=alpha,\r\n size=None)\r\n\r\n out = F.relu(out)\r\n out_dict[dst_type].append(out)\r\n\r\n # iterate over node types:\r\n for node_type, outs in out_dict.items():\r\n out = group(outs, self.q, self.k_lin)\r\n\r\n if out is None:\r\n out_dict[node_type] = None\r\n continue\r\n out_dict[node_type] = out\r\n\r\n return out_dict\r\n\r\n def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor,\r\n index: Tensor, ptr: Optional[Tensor],\r\n size_i: Optional[int]) -> Tensor:\r\n\r\n alpha = alpha_j + alpha_i\r\n alpha = F.leaky_relu(alpha, self.negative_slope)\r\n alpha = softmax(alpha, index, ptr, size_i)\r\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\r\n out = x_dst_i * alpha.view(-1, self.heads, 1)\r\n return out.view(-1, self.out_channels)\r\n\r\n def __repr__(self) -> str:\r\n return (f'{self.__class__.__name__}({self.out_channels}, '\r\n f'heads={self.heads})')\r\n",
"import torch\nimport torch.nn.functional as F\n\nfrom torch_geometric.utils import degree\nfrom torch_geometric.transforms import BaseTransform\n\n\nclass OneHotDegree(BaseTransform):\n r\"\"\"Adds the node degree as one hot encodings to the node features.\n\n Args:\n max_degree (int): Maximum degree.\n in_degree (bool, optional): If set to :obj:`True`, will compute the\n in-degree of nodes instead of the out-degree.\n (default: :obj:`False`)\n cat (bool, optional): Concat node degrees to node features instead\n of replacing them. (default: :obj:`True`)\n \"\"\"\n def __init__(self, max_degree, in_degree=False, cat=True):\n self.max_degree = max_degree\n self.in_degree = in_degree\n self.cat = cat\n\n def __call__(self, data):\n idx, x = data.edge_index[1 if self.in_degree else 0], data.x\n deg = degree(idx, data.num_nodes, dtype=torch.long)\n deg = F.one_hot(deg, num_classes=self.max_degree + 1).to(torch.float)\n\n if x is not None and self.cat:\n x = x.view(-1, 1) if x.dim() == 1 else x\n data.x = torch.cat([x, deg.to(x.dtype)], dim=-1)\n else:\n data.x = deg\n\n return data\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}({self.max_degree})'\n"
] | [
[
"torch.stack",
"torch.nn.Linear",
"torch.nn.functional.dropout",
"torch.nn.functional.softmax",
"torch.nn.ParameterDict",
"torch.nn.functional.relu",
"torch.nn.functional.leaky_relu",
"torch.nn.ModuleDict",
"torch.Tensor"
],
[
"torch.nn.functional.one_hot"
]
] |
anibalsolon/brainhack-donostia.github.io | [
"ad4f30f938923af7ff85fed542972f94f2032d13"
] | [
"populate_projects.py"
] | [
"import os\nimport pandas as pd\nfrom string import Template\nimport wget\n\ncsv_file_path = \"https://docs.google.com/spreadsheets/d/1AlflVlTg1KmajQrWBOUBT2XeoAUqfjB9SCQfDIPvSXo/export?format=csv&gid=565678921\"\nproject_card_path = \"assets/templates/project_card.html\"\nprojects_page_path = \"assets/templates/template_projects.md\"\n\n\ndef populate_project_card(title, description, leader):\n with open(str(project_card_path), 'r') as card:\n card_tpl = Template(card.read())\n card_html = card_tpl.substitute(projectTitle=title,\n projectDescription=description,\n projectLeader=leader)\n card.close()\n return card_html\n\n\ndef populate_projects_page(html):\n with open(str(projects_page_path), 'r') as prj:\n prj_tpl = Template(prj.read())\n prj_html = prj_tpl.substitute(projectCards=html,\n link=\"/projects/\")\n prj.close()\n return prj_html\n\n\ndef main():\n # Download CSV file\n filename = wget.download(csv_file_path)\n\n # Read CSV file\n df = pd.read_csv(filename)\n df = df[df[\"Leader:\"].notna()]\n\n prj_card = \"\"\n\n for pj_index, prj_row in df.iterrows():\n prj_title = prj_row[\"Project title:\"]\n prj_descr = prj_row[\"Project description:\"]\n prj_leader = prj_row[\"Leader:\"]\n\n prj_card += populate_project_card(prj_title, prj_descr, prj_leader)\n\n prj_page = populate_projects_page(prj_card)\n\n with open(\"projects.md\", \"wb\") as f:\n f.write(prj_page.encode(\"utf-8\"))\n\n os.remove(filename)\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"pandas.read_csv"
]
] |
kylepgr/heart-disease-pred | [
"d128cc815dde4839ba18e887113bb47387499ce1"
] | [
"heart_app/views.py"
] | [
"from typing_extensions import SupportsIndex\r\nfrom django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.http import HttpResponse\r\nfrom .forms import InputForm\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nfrom pymongo import MongoClient\r\n\r\nclient = MongoClient('localhost', 27017)\r\ndb = client['PatientDB']\r\n\r\n\r\nloaded_model = pickle.load(open(\"C:/Users/Kyle/Untitled Folder/finalized_model.pkl\", 'rb'))\r\n\r\ndef index(request):\r\n if request.method == \"POST\":\r\n myform = InputForm(request.POST)\r\n if myform.is_valid():\r\n age = myform.cleaned_data['age_v']\r\n sex = myform.cleaned_data['sex_v']\r\n\r\n cp = myform.cleaned_data['cp_v']\r\n thalach = myform.cleaned_data['thalach_v']\r\n exang = myform.cleaned_data['exang_v']\r\n oldpeak = myform.cleaned_data['oldpeak_v']\r\n slope = myform.cleaned_data['slope_v']\r\n\r\n ca = myform.cleaned_data['ca_v']\r\n\r\n m_inputs = [[age, sex, cp, thalach, exang, oldpeak, slope, ca]]\r\n \r\n\r\n y_pred = [np.exp(point)/np.sum(np.exp(point), axis=0)\r\n for point in m_inputs]\r\n\r\n \r\n return render(request, 'index.html', {'prediction': round(y_pred.mean())})\r\n\r\n\r\n else:\r\n myform = InputForm()\r\n\r\n \r\n\r\n return render(request, 'index.html', {'form': myform})\r\n\r\ndef updateDataBase(request):\r\n temp={}\r\n \r\n temp['age']= myform.cleaned_data['age_v']\r\n temp['sex']= myform.cleaned_data['sex_v']\r\n temp['cp']= myform.cleaned_data['cp_v']\r\n temp['thalach']= myform.cleaned_data['thalach_v']\r\n temp['exang']= myform.cleaned_data['exang_v']\r\n temp['oldpeak']= myform.cleaned_data['oldpeak_v']\r\n temp['slope']= myform.cleaned_data['slope_v']\r\n temp['ca']= myform.cleaned_data['ca_v']\r\n \r\n collectionD.insert_one(temp)\r\n countOfrow = collectionD.find().count()\r\n context = {\"Row Count\": countOfrow}\r\n \r\n return render(request,'viewDB.html',context)\r\n \r\n \r\n\r\n\r\n \r\n"
] | [
[
"numpy.exp"
]
] |
JanaLasser/agent_based_COVID_SEIRX | [
"c4e28d472a0484fe1a125ba6974683973141c09e"
] | [
"src/scseirx/model_SEIRX.py"
] | [
"import numpy as np\nimport networkx as nx\nfrom math import gamma\nfrom scipy.optimize import root_scalar\n\nfrom mesa import Model\nfrom mesa.time import RandomActivation, SimultaneousActivation\nfrom mesa.datacollection import DataCollector\n\nfrom scseirx.testing_strategy import Testing\n\n## data collection functions ##\ndef get_N_diagnostic_tests(model):\n return model.number_of_diagnostic_tests\n\n\ndef get_N_preventive_screening_tests(model):\n return model.number_of_preventive_screening_tests\n\n\ndef get_infection_state(agent):\n if agent.exposed == True: return 'exposed'\n elif agent.infectious == True: return 'infectious'\n elif agent.recovered == True: return 'recovered'\n else: return 'susceptible'\n\ndef get_quarantine_state(agent):\n if agent.quarantined == True: return True\n else: return False\n\n\ndef get_undetected_infections(model):\n return model.undetected_infections\n\n\ndef get_predetected_infections(model):\n return model.predetected_infections\n\n\ndef get_pending_test_infections(model):\n return model.pending_test_infections\n\n\ndef get_diagnostic_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['student']\ndef get_diagnostic_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['teacher']\ndef get_diagnostic_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['family_member']\ndef get_diagnostic_test_detected_infections_resident(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['resident']\ndef get_diagnostic_test_detected_infections_employee(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['employee']\ndef get_diagnostic_test_detected_infections_unistudent(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['unistudent']\ndef get_diagnostic_test_detected_infections_lecturer(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['lecturer']\n\ndiagnostic_test_detected_infections_funcs = {\n 'student':get_diagnostic_test_detected_infections_student,\n 'teacher':get_diagnostic_test_detected_infections_teacher,\n 'family_member':get_diagnostic_test_detected_infections_family_member,\n 'resident':get_diagnostic_test_detected_infections_resident,\n 'employee':get_diagnostic_test_detected_infections_employee,\n 'unistudent':get_diagnostic_test_detected_infections_unistudent,\n 'lecturer':get_diagnostic_test_detected_infections_lecturer\n}\n\ndef get_preventive_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['student']\ndef get_preventive_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['teacher']\ndef get_preventive_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['family_member']\ndef get_preventive_test_detected_infections_resident(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['resident']\ndef get_preventive_test_detected_infections_employee(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['employee']\ndef get_preventive_test_detected_infections_unistudent(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['unistudent']\ndef get_preventive_test_detected_infections_lecturer(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['lecturer']\n\npreventive_test_detected_infections_funcs = {\n 'student':get_preventive_test_detected_infections_student,\n 'teacher':get_preventive_test_detected_infections_teacher,\n 'family_member':get_preventive_test_detected_infections_family_member,\n 'resident':get_preventive_test_detected_infections_resident,\n 'employee':get_preventive_test_detected_infections_employee,\n 'unistudent':get_preventive_test_detected_infections_unistudent,\n 'lecturer':get_preventive_test_detected_infections_lecturer\n}\n\n\n# parameter sanity check functions\n\n\ndef check_positive(var):\n\tassert var >= 0, 'negative number'\n\treturn var\n\n\ndef check_bool(var):\n\tassert type(var) == bool, 'not a bool'\n\treturn var\n\n\ndef check_positive_int(var):\n if var == None:\n return var\n assert type(var) == int, 'not an integer'\n assert var >= 0, 'negative number'\n return var\n\n\ndef check_contact_type_dict(var):\n\tassert type(var) == dict, 'not a dictionary'\n\tassert set(var.keys()).issubset({'very_far', 'far', 'intermediate', 'close'}), \\\n\t\t'does not contain the correct contact types (has to be very_far, far, intermediate or close)'\n\tassert all((isinstance(i, int) or isinstance(i, float)) for i in var.values()), \\\n\t\t'contact type weights are not numeric'\n\n\treturn var\n\n\ndef check_K1_contact_types(var):\n for area in var:\n assert area in ['very_far', 'far', 'intermediate',\n 'close'], 'K1 contact type not recognised'\n return var\n\n\ndef check_testing(var):\n assert var in ['diagnostic', 'background', 'preventive',\n 'background+preventive', False], \\\n 'unknown testing mode: {}'.format(var)\n\n return var\n\n\n\ndef check_probability(var):\n\tassert (type(var) == float) or (var == 0) or (var == 1), \\\n\t\t '{} not a float'.format(var)\n\tassert var >= 0, 'probability negative'\n\tassert var <= 1, 'probability larger than 1'\n\treturn var\n\n\ndef check_graph(var):\n assert type(var) in [nx.Graph, nx.MultiGraph], 'not a networkx graph'\n assert len(var.nodes) > 0, 'graph has no nodes'\n assert len(var.edges) > 0, 'graph has no edges'\n areas = [e[2]['contact_type'] for e in var.edges(data=True)]\n areas = set(areas)\n for a in areas:\n assert a in {'very_far', 'far', 'intermediate',\n 'close'}, 'contact type {} not recognised'.format(a)\n return var\n\n\ndef check_index_case(var, agent_types):\n\tallowed_strings = agent_types[:]\n\tallowed_strings.extend(['continuous'])\n\tassert var in allowed_strings, 'unknown index case mode'\n\treturn var\n\n\ndef check_discount(var):\n if var['slope'] != None:\n assert var['slope'] <= 0, 'slope needs to be <= 0 or None'\n assert np.abs(var['slope']) <= 1, 'absolute value of slope needs to be <= 1'\n assert var['intercept'], 'intercept needs to be positive'\n assert var['intercept'], 'intercept needs to be <= 1'\n return var\n\n\ndef get_weibull_shape(k, mu, var):\n '''\n Calculates the shape parameter of a Weibull distribution, given its mean\n mu and its variance var\n '''\n return var / mu**2 - gamma(1 + 2/k) / gamma(1+1/k)**2 + 1\n\n\n\ndef get_weibull_scale(mu, k):\n '''\n Calculates the scale parameter of a Weibull distribution, given its mean\n mu and its shape parameter k\n '''\n return mu / gamma(1 + 1/k)\n\n\ndef weibull_two_param(shape, scale):\n '''\n A two-parameter Weibull distribution, based on numpy ramdon's single\n parameter distribution. We use this distribution in the simulation to draw\n random epidemiological parameters for agents from the given distribution\n See https://numpy.org/doc/stable/reference/random/generated/numpy.random.weibull.html\n '''\n return scale * np.random.weibull(shape)\n\n\nclass SEIRX(Model):\n '''\n A model with a number of different agents that reproduces\n the SEIRX dynamics of pandemic spread in a facility. Note:\n all times are set to correspond to days\n\n G: networkx undirected graph, interaction graph between agents. Edges have\n to have edge the edge attribute 'contact_type' specifying the closeness of\n contacts, which can be ['very far', 'far', 'intermediate' and 'close'].\n Nodes have to have the node attribute 'type' which specifies the agent type\n of the given node (for example 'student' or 'teacher' in a school scenario).\n In addition, nodes can have the attribute 'unit', which assigns them to a\n unit in space (for example a 'class' in a school scenario).\n\n verbosity: integer in [0, 1, 2], controls text output to std out to track\n simulation progress and transmission dynamics. Default = 0.\n\n testing, default = 'diagnostic'\n 'diagnostic': only diagnostic tests for symptomatic agents\n 'background': adds background screens of all agents after a positive\n diagnostic test\n 'preventive': adds preventive screens of agent groups to diagnostic\n testing. Screens happen in time intervals specified \n separately for each agent group in the variable \n 'screening_interval'.\n 'background+preventive': preventive screens AND background screens on\n top of diagnostic testing.\n\n infection_duration, default = 11 NOTE: includes the time an agent is exposed\n but not yet infectious at the beginning of an infection\n positive integer: mean or median of the infection duration in days\n list of two floats: mean and standard deviation of a distribution\n specifying the infection duration in days. These\n numbers will be used to construct a Weibull\n distribution from which the infection duration will\n be drawn for every agent individually\n\n exposure_duration, default = 4. Sets the time from transmission to becoming\n infectious\n positive integer: mean or median of the exposure duration in days\n list of two floats: mean and standard deviation of a distribution\n specifying the exposure duration in days. These\n numbers will be used to construct a Weibull\n distributoin from which the exposure duration will\n be drawn for every agent individually.\n\n time_until_symptoms, default = 6. Sets the time from transmission to\n (potentially) developing symptoms. Symptom probability has to be set for\n each agent group individually using the parameter 'symptom_probability'\n positive integer: mean or median of the time until symptoms in days\n list of two floats: mean and standard deviation of a distribution\n specifying the time until symptoms in days. These\n numbers will be used to construct a Weibull\n distribution from which the time until symptoms will\n be drawn for every agent individually.\n\n quarantine_duration, default = 14. Positive integer, sets the time a\n positively tested agent is quarantined in days\n\n infection_risk_contact_type_weights: dictionary of the form\n {'very_far':float, 'far':float, 'intermediate':float, 'close':float}\n that sets transmission risk multipliers for different contact types of\n agents specified in the contact network G. Default: {'very_far': 0.1,\n 'far': 0.5, 'intermediate': 1, 'close': 3}\n\n subclinical_modifier: default = 1.0. Float, modifies the infectiousness of\n asymptomatic cases. Example: if subclinical_modifier = 0.5, the\n infectiousness of an asymptomatic case will be reduced to 50%.\n\n K1_contact_types: list of strings from ['very_far', 'far', 'intermediate',\n 'close']. Definition of contact types for which agents are considered\n \"K1 contact persons\" if they had contact to a positively tested person wtith\n a specified contact intensity. Default = ['close'].\n\n diagnostic_test_type, default = 'one_day_PCR'. String, specifies the test\n technology and test result turnover time used for diagnostic testing. For\n example 'same_day_antigen' or 'two_day_PCR'. See module \"Testing\" for\n different implemented testing techologies.\n\n preventive_screening_test_type:, default = 'one_day_PCR', String, specifies\n the test technology and test result turnover time used for preventive\n sreening. For example 'same_day_antigen' or 'two_day_PCR'. See module\n \"Testing\" for different implemented testing techologies.\n\n follow_up_testing_interval, default = None. Positive integer, sets the time\n a follow-up screen (background screen) is initiated after an initial screen\n triggered by a positive test result. Only applies if the testing strategy is\n 'background' or preventive.\n\n liberating_testing, default = False. Boolean, flag that specifies, whether\n or not an agent is released from quarantine after returning a negative test\n result.\n\n\tindex_case, default = 'employee' (nursing home scenario) or 'teacher'\n (school scenario). Specifies how infections are introduced into the facility.\n agent_type: If an agent type (for example 'student' or 'teacher' in\n the school scenario) is specified, a single randomly\n chosen agent from this agent group will become the index\n case and no further index cases will be introduced into\n the scenario.\n 'continuous': In this case, agents have a continuous risk to become\n index cases in every simulation step. The risk has to\n be specified for every agent group individually, using\n the 'index_probability' parameter. If only a single\n agent group has a non-zero index probability, then only\n agents from this group can become index cases.\n\n\n agent_types: dictionary of the structure\n {\n agent type:\n {\n screening interval : integer, number of days between each preventive\n screen in this agent group\n\n index probability : float in the range [0, 1], sets the probability\n to become an index case in each time step\n\n mask : bool\n whether or not the agent type is wearing a mask\n }\n }\n\n The dictionary's keys are the names of the agent types which have to\n correspond to the node attributes in the contact graph. The screening\n interval sets the time-delay between preventive screens of this agent group,\n the index probability sets the probability of a member of this agent group\n becoming an index case in every time step\n\n seed: positive integer, fixes the seed of the simulation to enable\n repeatable simulation runs. If seed = None, the simulation will be\n initialized at random.\n '''\n\n def __init__(self, G,\n verbosity = 0,\n base_transmission_risk = 0.05,\n testing='diagnostic',\n exposure_duration = [5.0, 1.9],\n time_until_symptoms = [6.4, 0.8],\n infection_duration = [10.91, 3.95],\n quarantine_duration = 10,\n subclinical_modifier = 0.6,\n infection_risk_contact_type_weights = {\n 'very_far': 0.1,\n 'far': 0.25,\n 'intermediate': 0.5,\n 'close': 1},\n K1_contact_types = ['close'],\n diagnostic_test_type = 'one_day_PCR',\n preventive_screening_test_type = 'same_day_antigen',\n follow_up_testing_interval = None,\n liberating_testing = False,\n index_case = 'teacher',\n agent_types = {\n 'teacher': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0},\n 'student': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0},\n 'family_member':{'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0}},\n age_transmission_risk_discount = \\\n {'slope':-0.02,\n 'intercept':1},\n age_symptom_modification = \\\n {'slope':-0.02545,\n 'intercept':0.854545},\n mask_filter_efficiency = {'exhale':0, 'inhale':0},\n transmission_risk_ventilation_modifier = 0,\n transmission_risk_vaccination_modifier = {\n 'reception':1,\n 'transmission':0},\n seed = None):\n\n # mesa models already implement fixed seeds through their own random\n # number generations. Sadly, we need to use the Weibull distribution\n # here, which is not implemented in mesa's random number generation\n # module. Therefore, we need to initialize the numpy random number\n # generator with the given seed as well\n if seed != None:\n np.random.seed(seed)\n\n # sets the (daily) transmission risk for a household contact without\n # any precautions. Target infection ratios are taken from literature\n # and the value of the base_transmission_risk is calibrated such that\n # the simulation produces the correct infection ratios in a household\n # setting with the given distributions for epidemiological parameters\n # of agents\n self.base_transmission_risk = base_transmission_risk\n \t# sets the level of detail of text output to stdout (0 = no output)\n self.verbosity = check_positive_int(verbosity)\n # flag to turn off the testing & tracing strategy\n self.testing = check_testing(testing)\n self.running = True # needed for the batch runner implemented by mesa\n # set the interaction mode to simultaneous activation\n self.schedule = SimultaneousActivation(self)\n\n\n # internal step counter used to launch screening tests\n self.Nstep = 0\n\n # since we may have weekday-specific contact networks, we need\n # to keep track of the day of the week. Since the index case\n # per default is introduced at step 0 in index case mode, we\n # need to offset the starting weekday by a random number of weekdays\n # to prevent artifacts from always starting on the same day of the week\n\n self.weekday_offset = self.random.randint(1, 8)\n self.weekday = self.Nstep + self.weekday_offset\n\n ## epidemiological parameters: can be either a single integer or the\n # mean and standard deviation of a distribution\n self.epi_params = {}\n # counter to track the number of pathological parameter combinations\n # that had to be re-rolled (only here for debugging and control reasons)\n self.param_rerolls = 0\n\n for param, param_name in zip([exposure_duration, time_until_symptoms,\n infection_duration],['exposure_duration', 'time_until_symptoms',\n 'infection_duration']):\n\n if isinstance(param, int):\n self.epi_params[param_name] = check_positive_int(param)\n\n elif isinstance(param, list) and len(param) == 2:\n\n mu = check_positive(param[0])\n var = check_positive(param[1]**2)\n shape = root_scalar(get_weibull_shape, args=(mu, var),\n method='toms748', bracket=[0.2, 500]).root\n scale = get_weibull_scale(mu, shape)\n\n self.epi_params[param_name] = [shape, scale]\n else:\n print('{} format not recognized, should be either a single '+\\\n 'int or a tuple of two positive numbers'.format(param_name))\n\n\n # duration of quarantine\n self.quarantine_duration = check_positive_int(quarantine_duration)\n\n self.infection_risk_area_weights = check_contact_type_dict(\n infection_risk_contact_type_weights)\n\n # modifier for infectiosness for asymptomatic cases\n self.subclinical_modifier = check_positive(subclinical_modifier)\n # modifiers for the infection risk, depending on contact type\n self.infection_risk_contact_type_weights = infection_risk_contact_type_weights\n\n # modifications for age-dependent transmission and reception risks and\n # symptom probabilities\n self.age_transmission_risk_discount = \\\n check_discount(age_transmission_risk_discount)\n\n self.age_symptom_modification = age_symptom_modification\n #check_discount(age_symptom_modification)\n\n self.mask_filter_efficiency = mask_filter_efficiency\n self.transmission_risk_ventilation_modifier = \\\n transmission_risk_ventilation_modifier\n self.transmission_risk_vaccination_modifier = \\\n transmission_risk_vaccination_modifier\n ## agents and their interactions\n # interaction graph of agents\n self.G = check_graph(G)\n # add weights as edge attributes so they can be visualised easily\n if type(self.G) == nx.MultiGraph:\n for (u, v, key, contact_type) in self.G.edges(keys=True,\n data='contact_type'):\n self.G[u][v][key]['weight'] = \\\n self.infection_risk_contact_type_weights[contact_type]\n else:\n for e in G.edges(data=True):\n G[e[0]][e[1]]['weight'] = self.infection_risk_contact_type_weights\\\n \t[G[e[0]][e[1]]['contact_type']]\n\n # extract the different agent types from the contact graph\n self.agent_types = list(agent_types.keys())\n # dictionary of available agent classes with agent types and classes\n self.agent_classes = {}\n if 'resident' in agent_types:\n from scseirx.agent_resident import resident\n self.agent_classes['resident'] = resident\n if 'employee' in agent_types:\n from scseirx.agent_employee import employee\n self.agent_classes['employee'] = employee\n if 'student' in agent_types:\n from scseirx.agent_student import student\n self.agent_classes['student'] = student\n if 'teacher' in agent_types:\n from scseirx.agent_teacher import teacher\n self.agent_classes['teacher'] = teacher\n if 'family_member' in agent_types:\n from scseirx.agent_family_member import family_member\n self.agent_classes['family_member'] = family_member\n if 'lecturer' in agent_types:\n from scseirx.agent_lecturer import lecturer\n self.agent_classes['lecturer'] = lecturer\n if 'unistudent' in agent_types:\n from scseirx.agent_unistudent import unistudent\n self.agent_classes['unistudent'] = unistudent\n\n ## set agent characteristics for all agent groups\n # list of agent characteristics\n params = ['screening_interval','index_probability', 'mask' ,'vaccination_ratio',\n 'voluntary_testing_rate']\n\n # default values that are used in case a characteristic is not specified\n # for an agent group\n defaults = {'screening_interval':None,\n 'index_probability':0,\n 'mask':False,\n 'vaccination_ratio':0,\n 'voluntary_testing_rate':1\n }\n\n # sanity checks that are applied to parameters passed to the class\n # constructor to make sure they conform to model expectations\n check_funcs = [check_positive_int, check_probability, check_bool,\n check_probability, check_probability]\n\n # member dicts that store the parameter values for each agent group\n self.screening_intervals = {}\n self.index_probabilities = {}\n self.masks = {}\n self.vaccination_probabilities = {}\n self.voluntary_testing_rates = {}\n\n\n param_dicts = [self.screening_intervals, self.index_probabilities,\n self.masks, self.vaccination_probabilities, self.voluntary_testing_rates]\n\n # iterate over all possible agent parameters and agent groups: set the\n # respective value to the value passed through the constructor or to\n # the default value if no value has been passed\n for param,param_dict,check_func in zip(params,param_dicts,check_funcs):\n for at in self.agent_types:\n try:\n param_dict.update({at:check_func(agent_types[at][param])})\n except KeyError:\n param_dict.update({at:defaults[param]})\n\n # pass all parameters relevant for the testing strategy to the testing\n # class. NOTE: this separation is not a strictly necessary design\n # decision but I like to keep the parameters related to testing and\n # tracing in a separate place\n self.Testing = Testing(self, diagnostic_test_type,\n preventive_screening_test_type,\n check_positive_int(follow_up_testing_interval),\n self.screening_intervals,\n check_bool(liberating_testing),\n check_K1_contact_types(K1_contact_types),\n verbosity)\n\n\n # specifies either continuous probability for index cases in agent\n # groups based on the 'index_probability' for each agent group, or a\n # single (randomly chosen) index case in the passed agent group\n self.index_case = check_index_case(index_case, self.agent_types)\n\n self.num_agents = {}\n\n ## add agents\n # extract the agent nodes from the graph and add them to the scheduler\n for agent_type in self.agent_types:\n IDs = [x for x,y in G.nodes(data=True) if y['type'] == agent_type]\n self.num_agents.update({agent_type:len(IDs)})\n\n # get the agent locations (units) from the graph node attributes\n units = [self.G.nodes[ID]['unit'] for ID in IDs]\n\n # determine the agents that will be vaccinated, given the \n # vaccination ratio of the respective agent group\n vaccination_status = np.asarray([False] * len(IDs))\n if self.vaccination_probabilities[agent_type] > 0:\n n = round(self.vaccination_probabilities[agent_type] * len(IDs))\n idx = list(range(len(IDs)))\n rnd_idx = np.asarray(self.random.sample(idx, n))\n vaccination_status[rnd_idx] = True\n\n\n for ID, unit, vaccinated in zip(IDs, units, vaccination_status):\n\n tmp_epi_params = {}\n # for each of the three epidemiological parameters, check if\n # the parameter is an integer (if yes, pass it directly to the\n # agent constructor), or if it is specified by the shape and\n # scale parameters of a Weibull distribution. In the latter\n # case, draw a new number for every agent from the distribution\n # NOTE: parameters drawn from the distribution are rounded to\n # the nearest integer\n while True:\n for param_name, param in self.epi_params.items():\n if isinstance(param, int):\n tmp_epi_params[param_name] = param\n\n else:\n tmp_epi_params[param_name] = \\\n round(weibull_two_param(param[0], param[1]))\n\n if tmp_epi_params['exposure_duration'] > 0 and \\\n tmp_epi_params['time_until_symptoms'] >= \\\n tmp_epi_params['exposure_duration'] and\\\n tmp_epi_params['infection_duration'] > \\\n tmp_epi_params['exposure_duration']:\n break\n else:\n self.param_rerolls += 1\n if verbosity > 1:\n print('pathological epi-param case found!')\n print(tmp_epi_params)\n\n # check if the agent participates in voluntary testing\n p = self.voluntary_testing_rates[agent_type]\n voluntary_testing = np.random.choice([True, False],\n p=[p, 1-p])\n\n # construct the agent object\n a = self.agent_classes[agent_type](ID, unit, self,\n tmp_epi_params['exposure_duration'],\n tmp_epi_params['time_until_symptoms'],\n tmp_epi_params['infection_duration'],\n vaccinated,\n voluntary_testing,\n verbosity)\n self.schedule.add(a)\n\n\n\t\t# infect the first agent in single index case mode\n if self.index_case != 'continuous':\n infection_targets = [\n a for a in self.schedule.agents if a.type == index_case]\n # pick a random agent to infect in the selected agent group\n target = self.random.randint(0, len(infection_targets) - 1)\n infection_targets[target].exposed = True\n if self.verbosity > 0:\n print('{} exposed: {}'.format(index_case,\n infection_targets[target].ID))\n\n\n # list of agents that were tested positive this turn\n self.newly_positive_agents = []\n # flag that indicates if there were new positive tests this turn\n self.new_positive_tests = False\n # dictionary of flags that indicate whether a given agent group has\n # been creened this turn\n self.screened_agents= {\n 'reactive':{agent_type: False for agent_type in self.agent_types},\n 'follow_up':{agent_type: False for agent_type in self.agent_types},\n 'preventive':{agent_type: False for agent_type in self.agent_types}}\n\n\n # dictionary of counters that count the days since a given agent group\n # was screened. Initialized differently for different index case modes\n if (self.index_case == 'continuous') or \\\n \t (not np.any(list(self.Testing.screening_intervals.values()))):\n \tself.days_since_last_agent_screen = {agent_type: 0 for agent_type in\n \tself.agent_types}\n # NOTE: if we initialize these variables with 0 in the case of a single\n # index case, we introduce a bias since in 'single index case mode' the\n # first index case will always become exposed in step 0. To realize\n # random states of the preventive sceening procedure with respect to the\n # incidence of the index case, we have to randomly pick the days since\n # the last screen for the agent group from which the index case is\n else:\n \tself.days_since_last_agent_screen = {}\n \tfor agent_type in self.agent_types:\n \t\tif self.Testing.screening_intervals[agent_type] != None:\n \t\t\tself.days_since_last_agent_screen.update({\n \t\t\t\tagent_type: self.random.choice(range(0,\n \t\t\t\t self.Testing.screening_intervals[agent_type] + 1))})\n \t\telse:\n \t\t\tself.days_since_last_agent_screen.update({agent_type: 0})\n\n # dictionary of flags that indicates whether a follow-up screen for a\n # given agent group is scheduled\n self.scheduled_follow_up_screen = {agent_type: False for agent_type in\n \tself.agent_types}\n\n # counters\n self.number_of_diagnostic_tests = 0\n self.number_of_preventive_screening_tests = 0\n self.positive_tests = {self.Testing.preventive_screening_test_type:\n {agent_type:0 for agent_type in self.agent_types},\n self.Testing.diagnostic_test_type:\n {agent_type:0 for agent_type in self.agent_types}}\n\n self.undetected_infections = 0\n self.predetected_infections = 0\n self.pending_test_infections = 0\n self.quarantine_counters = {agent_type:0 for agent_type in agent_types.keys()}\n self.false_negative = 0\n\n # data collectors to save population counts and agent states every\n # time step\n\n model_reporters = {\n 'N_diagnostic_tests':get_N_diagnostic_tests,\n 'N_preventive_screening_tests':get_N_preventive_screening_tests,\n 'undetected_infections':get_undetected_infections,\n 'predetected_infections':get_predetected_infections,\n 'pending_test_infections':get_pending_test_infections\n }\n\n for agent_type in self.agent_types:\n model_reporters.update({\n 'diagnostic_test_detected_infections_{}'.format(agent_type):\\\n diagnostic_test_detected_infections_funcs[agent_type]\n })\n model_reporters.update({\n 'preventive_test_detected_infections_{}'.format(agent_type):\\\n preventive_test_detected_infections_funcs[agent_type]\n })\n\n\n self.datacollector = DataCollector(\n model_reporters=model_reporters,\n agent_reporters=\n \t{\n \t'infection_state': get_infection_state,\n 'quarantine_state': get_quarantine_state\n })\n\n\n ## transmission risk modifiers\n def get_transmission_risk_contact_type_modifier(self, source, target):\n # construct the edge key as combination between agent IDs and weekday\n n1 = source.ID\n n2 = target.ID\n tmp = [n1, n2]\n tmp.sort()\n n1, n2 = tmp\n key = '{}{}d{}'.format(n1, n2, self.weekday)\n contact_weight = self.G.get_edge_data(n1, n2, key)['weight']\n\n # the link weight is a multiplicative modifier of the link strength.\n # contacts of type \"close\" have, by definition, a weight of 1. Contacts\n # of type intermediate, far or very far have a weight < 1 and therefore\n # are less likely to transmit an infection. For example, if the contact\n # type far has a weight of 0.2, a contact of type far has only a 20%\n # chance of transmitting an infection, when compared to a contact of\n # type close. To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of success)\n # by the modifications introduced by preventive measures. These\n # modifications are formulated in terms of \"probability of failure\", or\n # \"q\". A low contact weight has a high probability of failure, therefore\n # we return q = 1 - contact_weight here.\n q1 = 1 - contact_weight\n\n return q1\n\n\n def get_transmission_risk_age_modifier_transmission(self, source):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = source.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n\n # The age weight can be interpreted as multiplicative factor that\n # reduces the chance for transmission with decreasing age. The slope\n # of the age_transmission_discount function is the decrease (in % of\n # the transmission risk for an 18 year old or above) of transmission\n # risk with every year a person is younger than 18 (the intercept is\n # 1 by definition).\n # To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of \n # success) by the modifications introduced by preventive measures. \n # These modifications are formulated in terms of \"probability of \n # failure\", or \"q\". A low age weight has a high probability of \n # failure, therefore we return q = 1 - age_weight here.\n q2 = 1 - age_weight\n else:\n q2 = 0\n\n return q2\n\n\n def get_transmission_risk_age_modifier_reception(self, target):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = target.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n # see description in get_transmission_risk_age_modifier_transmission\n q3 = 1 - age_weight\n else:\n q3 = 0\n\n return q3\n\n\n # infectiousness is constant and high until symptom onset and then\n # decreases monotonically until agents are not infectious anymore\n # at the end of the infection_duration\n def get_transmission_risk_progression_modifier(self, source):\n if source.days_since_exposure < source.exposure_duration:\n progression_weight = 0\n elif source.days_since_exposure <= source.time_until_symptoms:\n progression_weight = 1\n elif source.days_since_exposure > source.time_until_symptoms and \\\n source.days_since_exposure <= source.infection_duration:\n # we add 1 in the denominator, such that the source is also\n # (slightly) infectious on the last day of the infection_duration\n progression_weight = \\\n (source.days_since_exposure - source.time_until_symptoms) / \\\n (source.infection_duration - source.time_until_symptoms + 1)\n else:\n progression_weight = 0\n # see description in get_transmission_risk_age_modifier_transmission\n q4 = 1 - progression_weight\n\n return q4\n\n def get_transmission_risk_subclinical_modifier(self, source):\n if source.symptomatic_course == False:\n subclinical_weight = self.subclinical_modifier\n else:\n subclinical_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q5 = 1 - subclinical_weight\n return q5\n\n def get_transmission_risk_exhale_modifier(self, source):\n if source.mask:\n exhale_weight = self.mask_filter_efficiency['exhale']\n else:\n exhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q6 = 1 - exhale_weight\n return q6\n\n\n def get_transmission_risk_inhale_modifier(self, target):\n if target.mask:\n inhale_weight = self.mask_filter_efficiency['inhale']\n else:\n inhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q7 = 1 - inhale_weight\n return q7\n\n\n def get_transmission_risk_ventilation_modifier(self):\n ventilation_weight = self.transmission_risk_ventilation_modifier\n # see description in get_transmission_risk_age_modifier_transmission\n q8 = 1 - ventilation_weight\n return q8\n\n def get_transmission_risk_vaccination_modifier_reception(self, a):\n if a.vaccinated:\n q9 = self.transmission_risk_vaccination_modifier['reception']\n else:\n q9 = 0\n return q9\n\n def get_transmission_risk_vaccination_modifier_transmission(self, a):\n if a.vaccinated:\n q10 = self.transmission_risk_vaccination_modifier['transmission']\n else:\n q10 = 0\n return q10\n\n def test_agent(self, a, test_type):\n a.tested = True\n a.pending_test = test_type\n if test_type == self.Testing.diagnostic_test_type:\n self.number_of_diagnostic_tests += 1\n else:\n self.number_of_preventive_screening_tests += 1\n\n if a.exposed:\n # tests that happen in the period of time in which the agent is\n # exposed but not yet infectious. \n # Note: tests[test_type]['time_until_testable'] is negative for\n # tests that can detect an infection before agents become infectious\n if a.days_since_exposure >= a.exposure_duration + \\\n self.Testing.tests[test_type]['time_until_testable']:\n \n if self.verbosity > 1:\n print('{} {} sent positive sample (even though not infectious yet)'\n .format(a.type, a.ID))\n a.sample = 'positive'\n self.predetected_infections += 1\n self.positive_tests[test_type][a.type] += 1\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n elif a.infectious:\n # tests that happen in the period of time in which the agent is\n # infectious and the infection is detectable by a given test\n # Note: tests[test_type]['time_until_testable'] is negative for \n # tests that can detect an infection before agents become \n # infectious. tests[test_type]['time_testable'] is negative for\n # tests that cease to detect an infection before agents stop being\n # infectious\n if a.days_since_exposure >= a.exposure_duration + \\\n self.Testing.tests[test_type]['time_until_testable'] and \\\n a.days_since_exposure <= a.infection_duration + \\\n self.Testing.tests[test_type]['time_testable']:\n if self.verbosity > 1:\n print('{} {} sent positive sample'.format(a.type, a.ID))\n a.sample = 'positive'\n self.positive_tests[test_type][a.type] += 1\n\n # track the undetected infections to assess how important they are\n # for infection spread\n else:\n if self.verbosity > 1:\n print('{} {} sent negative sample (even though infectious)'\n .format(a.type, a.ID))\n a.sample = 'negative'\n self.undetected_infections += 1\n\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n # for same-day testing, immediately act on the results of the test\n if a.days_since_tested >= self.Testing.tests[test_type]['time_until_test_result']:\n a.act_on_test_result()\n\n def screen_agents(self, agent_group, test_type, screen_type):\n # only test agents that have not been tested already in this simulation\n # step and that are not already known positive cases\n\n if self.verbosity > 0:\n print('initiating {} {} screen'\\\n .format(screen_type, agent_group))\n\n untested_agents = [a for a in self.schedule.agents if\n (a.tested == False and a.known_positive == False\n and a.type == agent_group)]\n\n if len(untested_agents) > 0:\n self.screened_agents[screen_type][agent_group] = True\n self.days_since_last_agent_screen[agent_group] = 0\n\n # only test agents if they participate in voluntary testing\n if screen_type == 'preventive':\n for a in untested_agents:\n if a.voluntary_testing:\n self.test_agent(a, test_type)\n else:\n if self.verbosity > 1:\n print('not testing {} {}, not participating in voluntary testing'\\\n .format(agent_group, a.ID))\n else:\n for a in untested_agents:\n self.test_agent(a, test_type)\n\n if self.verbosity > 0:\n print()\n else:\n if self.verbosity > 0:\n print('no agents tested because all agents have already been tested')\n\n # the type of the test used in the pending test result is stored in the\n # variable pending_test\n\n def collect_test_results(self):\n agents_with_test_results = [a for a in self.schedule.agents if\n (a.pending_test and\n a.days_since_tested >= self.Testing.tests[a.pending_test]['time_until_test_result'])]\n\n return agents_with_test_results\n\n def trace_contacts(self, a):\n if a.quarantined == False:\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n if self.verbosity > 0:\n print('qurantined {} {}'.format(a.type, a.ID))\n\n # find all agents that share edges with the agent\n # that are classified as K1 contact types in the testing\n # strategy\n if a in self.G.nodes():\n K1_contacts = [e[1] for e in self.G.edges(a.ID, data=True) if\n e[2]['contact_type'] in self.Testing.K1_contact_types]\n K1_contacts = [a for a in self.schedule.agents if a.ID in K1_contacts]\n\n for K1_contact in K1_contacts:\n if self.verbosity > 0:\n print('quarantined {} {} (K1 contact of {} {})'\n .format(K1_contact.type, K1_contact.ID, a.type, a.ID))\n K1_contact.quarantined = True\n K1_contact.quarantine_start = self.Nstep\n\n def test_symptomatic_agents(self):\n # find symptomatic agents that have not been tested yet and are not\n # in quarantine and test them\n newly_symptomatic_agents = np.asarray([a for a in self.schedule.agents\n if (a.symptoms == True and a.tested == False and a.quarantined == False)])\n\n for a in newly_symptomatic_agents:\n # all symptomatic agents are quarantined by default\n if self.verbosity > 0:\n print('quarantined: {} {}'.format(a.type, a.ID))\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n self.test_agent(a, self.Testing.diagnostic_test_type)\n\n def quarantine_contacts(self):\n # trace and quarantine contacts of newly positive agents\n if len(self.newly_positive_agents) > 0:\n if self.verbosity > 0: print('new positive test(s) from {}'\n .format([a.ID for a in self.newly_positive_agents]))\n\n # send all K1 contacts of positive agents into quarantine\n for a in self.newly_positive_agents:\n self.trace_contacts(a)\n\n # indicate that a screen should happen because there are new\n # positive test results\n self.new_positive_tests = True\n self.newly_positive_agents = []\n\n else:\n self.new_positive_tests = False\n\n\n def step(self):\n self.weekday = (self.Nstep + self.weekday_offset) % 7 + 1\n # if the connection graph is time-resloved, set the graph that is\n # used to determine connections in this step to the sub-graph corres-\n # ponding to the current day of the week\n if self.dynamic_connections:\n self.G = self.weekday_connections[self.weekday]\n\n if self.verbosity > 0:\n print('weekday {}'.format(self.weekday))\n\n if self.testing:\n for agent_type in self.agent_types:\n for screen_type in ['reactive', 'follow_up', 'preventive']:\n self.screened_agents[screen_type][agent_type] = False\n\n if self.verbosity > 0:\n print('* testing and tracing *')\n\n self.test_symptomatic_agents()\n\n\n # collect and act on new test results\n agents_with_test_results = self.collect_test_results()\n for a in agents_with_test_results:\n a.act_on_test_result()\n\n self.quarantine_contacts()\n\n # screening:\n # a screen should take place if\n # (a) there are new positive test results\n # (b) as a follow-up screen for a screen that was initiated because\n # of new positive cases\n # (c) if there is a preventive screening policy and it is time for\n # a preventive screen in a given agent group\n\n # (a)\n if (self.testing == 'background' or self.testing == 'background+preventive')\\\n and self.new_positive_tests == True:\n for agent_type in self.screening_agents:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'reactive')\n self.scheduled_follow_up_screen[agent_type] = True\n\n # (b)\n elif (self.testing == 'background' or self.testing == 'background+preventive') and \\\n self.Testing.follow_up_testing_interval != None and \\\n sum(list(self.scheduled_follow_up_screen.values())) > 0:\n for agent_type in self.screening_agents:\n if self.scheduled_follow_up_screen[agent_type] and\\\n self.days_since_last_agent_screen[agent_type] >=\\\n self.Testing.follow_up_testing_interval:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'follow_up')\n else:\n if self.verbosity > 0:\n print('not initiating {} follow-up screen (last screen too close)'\\\n .format(agent_type))\n\n # (c) \n elif (self.testing == 'preventive' or self.testing == 'background+preventive')and \\\n np.any(list(self.Testing.screening_intervals.values())):\n\n for agent_type in self.screening_agents:\n interval = self.Testing.screening_intervals[agent_type]\n assert interval in [7, 3, 2, None], \\\n 'testing interval {} for agent type {} not supported!'\\\n .format(interval, agent_type)\n\n # (c.1) testing every 7 days = testing on Mondays\n if interval == 7 and self.weekday == 1:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.2) testing every 3 days = testing on Mo & Turs\n elif interval == 3 and self.weekday in [1, 4]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.3) testing every 2 days = testing on Mo, Wed & Fri\n elif interval == 2 and self.weekday in [1, 3, 5]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # No interval specified = no testing, even if testing\n # mode == preventive\n elif interval == None:\n pass\n else:\n if self.verbosity > 0:\n print('not initiating {} preventive screen (wrong weekday)'\\\n .format(agent_type))\n else:\n # do nothing\n pass\n\n for agent_type in self.agent_types:\n if not (self.screened_agents['reactive'][agent_type] or \\\n self.screened_agents['follow_up'][agent_type] or \\\n self.screened_agents['preventive'][agent_type]):\n self.days_since_last_agent_screen[agent_type] += 1\n\n\n if self.verbosity > 0: print('* agent interaction *')\n self.datacollector.collect(self)\n self.schedule.step()\n self.Nstep += 1\n"
] | [
[
"numpy.random.weibull",
"numpy.random.seed",
"numpy.random.choice",
"numpy.abs",
"numpy.asarray",
"scipy.optimize.root_scalar"
]
] |
sankar-mukherjee/DCASE-2018---Task-4- | [
"f8034641efef6e60ea721abc5569d9c1aa8ee56d"
] | [
"task4_crnn.py"
] | [
"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n#########################################################################\n# This code is an adaptation from Toni Heittola's code [task1 baseline dcase 2018](https://github.com/DCASE-REPO/dcase2018_baseline/tree/master/task1/)\n# Copyright Nicolas Turpault, Romain Serizel, Hamid Eghbal-zadeh, Ankit Parag Shah, 2018, v1.0\n# This software is distributed under the terms of the License MIT\n#########################################################################\nimport dcase_util\nimport sys\nimport numpy\nimport os\nimport random\nimport pickle\n\nimport tensorflow as tf\nfrom keras import backend as K\nimport keras\n\n#from evaluation_measures import get_f_measure_by_class, event_based_evaluation, segment_based_evaluation\nfrom evaluation_measures import get_f_measure_by_class, event_based_evaluation\nfrom Dataset_dcase2018 import DCASE2018_Task4_DevelopmentSet\n\ndcase_util.utils.setup_logging(logging_file='task4.log')\nprint(keras.__version__)\n\nrandom.seed(10)\nnumpy.random.seed(42)\n\ntf.set_random_seed(1234)\nsess = tf.Session(graph=tf.get_default_graph())\nK.set_session(sess)\n\n\ndef main(parameters):\n log = dcase_util.ui.ui.FancyLogger()\n log.title('DCASE2018 / Task4')\n\n overwirte_preprocessing = False\n overwrite_learning = False\n overwrite_testing = True\n\n # =====================================================================\n # Parameters\n # =====================================================================\n # Process parameters\n param = dcase_util.containers.DCASEAppParameterContainer(\n parameters,\n path_structure={\n 'FEATURE_EXTRACTOR': [\n 'DATASET',\n 'FEATURE_EXTRACTOR'\n ],\n 'FEATURE_NORMALIZER': [\n 'DATASET',\n 'FEATURE_EXTRACTOR'\n ],\n 'LEARNER': [\n 'DATASET',\n 'FEATURE_EXTRACTOR',\n 'FEATURE_NORMALIZER',\n 'FEATURE_SEQUENCER',\n 'LEARNER'\n ],\n 'RECOGNIZER': [\n 'DATASET',\n 'FEATURE_EXTRACTOR',\n 'FEATURE_NORMALIZER',\n 'FEATURE_SEQUENCER',\n 'LEARNER',\n 'RECOGNIZER'\n ],\n }\n ).process()\n\n # Make sure all system paths exists\n dcase_util.utils.Path().create(\n paths=list(param['path'].values())\n )\n\n # Initialize\n keras_model_first_pass = None\n keras_model_second_pass = None\n\n # =====================================================================\n # Dataset\n # =====================================================================\n # Get dataset and initialize it\n\n db = DCASE2018_Task4_DevelopmentSet(included_content_types=['all'],\n local_path=\"\",\n data_path=param.get_path('path.dataset'),\n audio_paths=[\n os.path.join(\"dataset\", \"audio\", \"train\", \"weak\"),\n os.path.join(\"dataset\", \"audio\", \"train\", \"unlabel_in_domain\"),\n os.path.join(\"dataset\", \"audio\", \"train\", \"unlabel_out_of_domain\"),\n os.path.join(\"dataset\", \"audio\", \"test\")\n ]\n ).initialize()\n\n # Active folds\n folds = db.folds(\n mode=param.get_path('dataset.parameters.evaluation_mode')\n )\n\n active_fold_list = param.get_path('dataset.parameters.fold_list')\n if active_fold_list:\n folds = list(set(folds).intersection(active_fold_list))\n\n # =====================================================================\n # Feature extraction stage\n # =====================================================================\n if param.get_path('flow.feature_extraction'):\n log.section_header('Feature Extraction / Train material')\n\n # Prepare feature extractor\n mel_extractor = dcase_util.features.MelExtractor(\n **param.get_path('feature_extractor.parameters.mel')\n )\n\n # Loop over all audio files in the dataset and extract features for them.\n # for audio_filename in db.audio_files:\n for audio_filename in db.audio_files:\n # Get filename for feature data from audio filename\n feature_filename = dcase_util.utils.Path(\n path=audio_filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle'\n )\n\n if not os.path.isfile(feature_filename) or overwirte_preprocessing:\n log.line(\n data=os.path.split(audio_filename)[1],\n indent=2\n )\n\n # Load audio data\n audio = dcase_util.containers.AudioContainer().load(\n filename=audio_filename,\n mono=True,\n fs=param.get_path('feature_extractor.fs')\n )\n\n # Extract features and store them into FeatureContainer, and save it to the disk\n dcase_util.containers.FeatureContainer(\n data=mel_extractor.extract(audio.data),\n time_resolution=param.get_path('feature_extractor.hop_length_seconds')\n ).save(\n filename=feature_filename\n )\n\n log.foot()\n\n # =====================================================================\n # Feature normalization stage\n # =====================================================================\n\n if param.get_path('flow.feature_normalization'):\n log.section_header('Feature Normalization')\n\n # Get filename for the normalization factors\n features_norm_filename = os.path.join(\n param.get_path('path.application.feature_normalizer'),\n 'normalize_values.cpickle'\n )\n\n if not os.path.isfile(features_norm_filename) or overwirte_preprocessing:\n normalizer = dcase_util.data.Normalizer(\n filename=features_norm_filename\n )\n\n # Loop through all training data, two train folds\n for fold in folds:\n for filename in db.train(fold=fold).unique_files:\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle',\n )\n\n # Load feature matrix\n features = dcase_util.containers.FeatureContainer().load(\n filename=feature_filename\n )\n\n # Accumulate statistics\n normalizer.accumulate(\n data=features.data\n )\n\n # Finalize and save\n normalizer.finalize().save()\n\n log.foot()\n\n # Create processing chain for features\n feature_processing_chain = dcase_util.processors.ProcessingChain()\n for chain in param.get_path('feature_processing_chain'):\n processor_name = chain.get('processor_name')\n init_parameters = chain.get('init_parameters', {})\n\n # Inject parameters\n if processor_name == 'dcase_util.processors.NormalizationProcessor':\n init_parameters['filename'] = features_norm_filename\n\n if init_parameters.get('enable') is None or init_parameters.get('enable') is True:\n feature_processing_chain.push_processor(\n processor_name=processor_name,\n init_parameters=init_parameters,\n )\n\n # =====================================================================\n # Learning stage\n # =====================================================================\n if param.get_path('flow.learning'):\n log.section_header('Learning')\n\n # setup keras parameters\n dcase_util.keras.setup_keras(\n seed=param.get_path('learner.parameters.random_seed'),\n profile=param.get_path('learner.parameters.keras_profile'),\n backend=param.get_path('learner.parameters.backend'),\n device=param.get_path('learner.parameters.device'),\n verbose=False\n )\n\n # encoder used to convert text labels into vector\n many_hot_encoder = dcase_util.data.ManyHotEncoder(\n label_list=db.tags(),\n time_resolution=1\n )\n\n # =====================================================================\n # Training first pass\n # =====================================================================\n\n fold = 1\n # Get model filename\n fold1_model_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'model_fold_{fold}.h5'.format(fold=fold)\n )\n\n if not os.path.isfile(fold1_model_filename) or overwrite_learning:\n # Split the dataset into training and validation files\n training_files, validation_files = db.validation_split(\n fold=fold,\n split_type='random',\n validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),\n verbose=True\n )\n\n batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')\n shuffle = param.get_path('learner.parameters.model.first_pass.fit.shuffle')\n\n # Get items (with labels) associated with training files\n training_items = db.train(fold=fold).filter(file_list=training_files)\n\n # Create the generator, which convert filename and item into arrays batch_X, batch_y in right formats\n training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=shuffle)\n\n validation_items = db.train(fold=fold).filter(file_list=validation_files)\n validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=False)\n\n # Update constants with useful information to setup the model\n model_parameter_constants = {\n 'NB_CLASSES': db.tag_count(),\n 'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),\n 'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),\n }\n model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))\n\n # Load the sequential keras model defined in the YAML.\n keras_model_first_pass = dcase_util.keras.create_sequential_model(\n model_parameter_list=param.get_path('learner.parameters.model.first_pass.config'),\n constants=model_parameter_constants\n )\n\n # Print the model configuration\n keras_model_first_pass.summary(print_fn=log.line)\n\n # Create optimizer object from info given in YAML\n param.set_path(\n path='learner.parameters.compile.optimizer',\n new_value=dcase_util.keras.create_optimizer(\n class_name=param.get_path('learner.parameters.optimizer.class_name'),\n config=param.get_path('learner.parameters.optimizer.config')\n )\n )\n # Compile model\n keras_model_first_pass.compile(\n **param.get_path('learner.parameters.compile')\n )\n\n epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')\n\n # Setup callbacks used during training\n callback_list = [\n dcase_util.keras.ProgressLoggerCallback(\n epochs=epochs,\n metric=param.get_path('learner.parameters.compile.metrics')[0],\n loss=param.get_path('learner.parameters.compile.loss'),\n output_type='logging',\n **param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')\n )\n ]\n if param.get_path('learner.parameters.callbacks.StopperCallback'):\n callback_list.append(\n dcase_util.keras.StopperCallback(\n epochs=epochs,\n **param.get_path('learner.parameters.callbacks.StopperCallback')\n )\n )\n\n if param.get_path('learner.parameters.callbacks.StasherCallback'):\n callback_list.append(\n dcase_util.keras.StasherCallback(\n epochs=epochs,\n **param.get_path('learner.parameters.callbacks.StasherCallback')\n )\n )\n\n processing_interval = param.get_path(\n 'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'\n )\n epochs = param.get_path('learner.parameters.model.first_pass.fit.epochs')\n\n # Iterate through epoch to be able to manually update callbacks\n for epoch_start in range(0, epochs, processing_interval):\n epoch_end = epoch_start + processing_interval\n\n # Make sure we have only specified amount of epochs\n if epoch_end > epochs:\n epoch_end = epochs\n\n # Train keras_model_first_pass\n keras_model_first_pass.fit_generator(\n generator=training_generator,\n steps_per_epoch=len(training_files) // batch_size,\n validation_data=validation_generator,\n validation_steps=len(validation_files) // batch_size,\n callbacks=callback_list,\n verbose=0,\n initial_epoch=epoch_start,\n epochs=epoch_end\n )\n\n # Get f_measures of the current epoch\n val_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,\n len(validation_files) // batch_size)\n val_macro_f_measure = val_macro_f_measure.mean()\n\n tra_macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), training_generator,\n len(training_files) // batch_size,\n )\n tra_macro_f_measure = tra_macro_f_measure.mean()\n\n # Inject external metric values to the callbacks\n for callback in callback_list:\n if hasattr(callback, 'set_external_metric_value'):\n callback.set_external_metric_value(\n metric_label='val_macro_f_measure',\n metric_value=val_macro_f_measure\n )\n callback.set_external_metric_value(\n metric_label='tra_macro_f_measure',\n metric_value=tra_macro_f_measure\n )\n\n # Manually update callbacks\n for callback in callback_list:\n if hasattr(callback, 'update'):\n callback.update()\n\n # Check we need to stop training\n stop_training = False\n for callback in callback_list:\n if hasattr(callback, 'stop'):\n if callback.stop():\n log.line(\"Early stropping\")\n stop_training = True\n\n if stop_training:\n # Stop the training loop\n break\n\n # Fetch best model\n for callback in callback_list:\n if isinstance(callback, dcase_util.keras.StasherCallback):\n callback.log()\n best_weights = callback.get_best()['weights']\n if best_weights:\n keras_model_first_pass.set_weights(best_weights)\n break\n\n # Save trained model\n keras_model_first_pass.save(fold1_model_filename)\n\n log.foot()\n\n # =======\n # Calculate best thresholds\n # =======\n thresholds_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'thresholds_{fold}.p'.format(fold=fold)\n )\n\n if not os.path.isfile(thresholds_filename) or overwrite_learning:\n training_files, validation_files = db.validation_split(\n fold=fold,\n split_type='random',\n validation_amount=param.get_path('learner.parameters.model.first_pass.validation_amount'),\n verbose=True\n )\n batch_size = param.get_path('learner.parameters.model.first_pass.fit.batch_size')\n validation_items = db.train(fold=fold).filter(file_list=validation_files)\n validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=False)\n\n # Load model if not trained during this run\n if not keras_model_first_pass:\n keras_model_first_pass = keras.models.load_model(fold1_model_filename)\n\n thresholds = [0] * db.tag_count()\n max_f_measure = [-numpy.inf] * db.tag_count()\n for threshold in numpy.arange(0., 1 + 1e-6, 0.1):\n # Assign current threshold to each class\n current_thresholds = [threshold] * db.tag_count()\n\n # Calculate f_measures with the current thresholds\n macro_f_measure = get_f_measure_by_class(keras_model_first_pass, db.tag_count(), validation_generator,\n len(validation_files) // batch_size,\n current_thresholds)\n\n # Update thresholds for class with better f_measures\n for i, label in enumerate(db.tags()):\n f_measure = macro_f_measure[i]\n if f_measure > max_f_measure[i]:\n max_f_measure[i] = f_measure\n thresholds[i] = threshold\n\n for i, label in enumerate(db.tags()):\n log.line(\"{:30}, threshold: {}\".format(label, thresholds[i]))\n\n thresholds_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'thresholds.p'.format(fold=fold)\n )\n pickle.dump(thresholds, open(thresholds_filename, \"wb\"))\n\n else:\n thresholds = pickle.load(open(thresholds_filename, \"rb\"))\n\n # =====================================================================\n # Predict stage from weak to predict unlabel_in_domain tags\n # =====================================================================\n\n log.section_header('Predict 1st pass, add labels to unlabel_in_domain data')\n\n # Get results filename\n fold_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'pred_weak_fold_{fold}.txt'.format(fold=fold)\n )\n\n if not os.path.isfile(fold_results_filename) or overwrite_testing:\n # Initialize results container\n res = dcase_util.containers.MetaDataContainer(\n filename=fold_results_filename\n )\n\n # Load model if not yet loaded\n if not keras_model_first_pass:\n keras_model_first_pass = keras.models.load_model(fold1_model_filename)\n\n # Loop through all test files from the current cross-validation fold\n for item in db.test(fold=fold):\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=item.filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle'\n )\n\n features = feature_processing_chain.process(\n filename=feature_filename\n )\n\n input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)\n input_data = input_data.reshape((1,)+input_data.shape) # (1, 500, 64)\n\n # Get network output\n probabilities = keras_model_first_pass.predict(x=input_data)\n\n # Binarization of the network output\n frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(\n probabilities=probabilities,\n binarization_type='class_threshold',\n threshold=thresholds,\n time_axis=0\n )\n\n estimated_tags = dcase_util.data.DecisionEncoder(\n label_list=db.tags()\n ).many_hot(\n frame_decisions=frame_decisions,\n time_axis=0\n )\n\n # Store result into results container\n res.append(\n {\n 'filename': item.filename,\n 'tags': estimated_tags[0]\n }\n )\n\n # Save results container\n res.save()\n\n log.foot()\n\n # =====================================================================\n # Learning stage 2nd pass, learn from weak and unlabel_in_domain annotated data\n # =====================================================================\n\n fold = 2\n\n log.line(data='Fold [{fold}]'.format(fold=fold), indent=2)\n\n # Get model filename\n fold2_model_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'model_fold_{fold}.h5'.format(fold=fold)\n )\n\n if not os.path.isfile(fold2_model_filename) or overwrite_learning:\n\n model_parameter_constants = {\n 'NB_CLASSES': db.tag_count(),\n 'INPUT_FREQUENCIES': param.get_path('feature_extractor.parameters.mel.n_mels'),\n 'INPUT_SEQUENCE_LENGTH': param.get_path('feature_sequencer.sequence_length'),\n }\n model_parameter_constants.update(param.get_path('learner.parameters.model.constants', {}))\n\n keras_model_second_pass = dcase_util.keras.create_sequential_model(\n model_parameter_list=param.get_path('learner.parameters.model.second_pass.config'),\n constants=model_parameter_constants\n )\n\n keras_model_second_pass.summary(print_fn=log.line)\n\n # Create optimizer object\n param.set_path(\n path='learner.parameters.compile.optimizer',\n new_value=dcase_util.keras.create_optimizer(\n class_name=param.get_path('learner.parameters.optimizer.class_name'),\n config=param.get_path('learner.parameters.optimizer.config')\n )\n )\n # Compile model\n keras_model_second_pass.compile(\n **param.get_path('learner.parameters.compile')\n )\n\n # Get annotations from the 1st pass model\n fold1_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'pred_weak_fold_{fold}.txt'.format(fold=1)\n )\n # Load annotations\n predictions_first_pass = dcase_util.containers.MetaDataContainer(\n filename=fold1_results_filename\n ).load()\n\n # Split the dataset into train and validation. If \"weak\" is provided, files from weak.csv are used to\n # validate the model. Else, give a percentage which will be used\n if param.get_path('learner.parameters.model.second_pass.validation_amount') == \"weak\":\n training_files = predictions_first_pass.unique_files\n training_items = predictions_first_pass\n validation_files = db.train(fold=1).unique_files\n validation_items = db.train(fold=1)\n else:\n # Get validation files\n training_files, validation_files = db.validation_split(\n fold=fold,\n split_type='random',\n validation_amount=param.get_path('learner.parameters.model.second_pass.validation_amount'),\n verbose=False\n )\n training_fold2 = predictions_first_pass + db.train(fold=1)\n\n training_items = training_fold2.filter(file_list=training_files)\n validation_items = training_fold2.filter(file_list=validation_files)\n\n processing_interval = param.get_path(\n 'learner.parameters.callbacks.ProgressLoggerCallback.processing_interval'\n )\n epochs = param.get_path('learner.parameters.model.second_pass.fit.epochs')\n\n batch_size = param.get_path('learner.parameters.model.second_pass.fit.batch_size')\n shuffle = param.get_path('learner.parameters.model.second_pass.fit.shuffle')\n\n # Create generators, which convert filename and item into arrays batch_X, batch_y in right formats\n training_generator = data_generator(training_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder, feature_processing_chain,\n batch_size=batch_size, shuffle=shuffle, mode=\"strong\")\n\n validation_generator = data_generator(validation_items, param.get_path('path.application.feature_extractor'),\n many_hot_encoder,\n feature_processing_chain,\n batch_size=batch_size, shuffle=False, mode=\"strong\")\n\n # Initialize callbacks used during training\n callback_list = [\n dcase_util.keras.ProgressLoggerCallback(\n epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),\n metric=param.get_path('learner.parameters.compile.metrics')[0],\n loss=param.get_path('learner.parameters.compile.loss'),\n output_type='logging',\n **param.get_path('learner.parameters.callbacks.ProgressLoggerCallback')\n )\n ]\n if param.get_path('learner.parameters.callbacks.StopperCallback'):\n callback_list.append(\n dcase_util.keras.StopperCallback(\n epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),\n **param.get_path('learner.parameters.callbacks.StopperCallback')\n )\n )\n\n if param.get_path('learner.parameters.callbacks.StasherCallback'):\n callback_list.append(\n dcase_util.keras.StasherCallback(\n epochs=param.get_path('learner.parameters.model.second_pass.fit.epochs'),\n **param.get_path('learner.parameters.callbacks.StasherCallback')\n )\n )\n\n for epoch_start in range(0, epochs, processing_interval):\n epoch_end = epoch_start + processing_interval\n\n # Make sure we have only specified amount of epochs\n if epoch_end > epochs:\n epoch_end = epochs\n\n # Train keras_model_second_pass\n keras_model_second_pass.fit_generator(\n generator=training_generator,\n steps_per_epoch=len(training_files) // batch_size,\n validation_data=validation_generator,\n validation_steps=len(validation_files) // batch_size,\n callbacks=callback_list,\n verbose=0,\n initial_epoch=epoch_start,\n epochs=epoch_end\n )\n\n # Calculate external metrics, f_measure of the current epoch\n val_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), validation_generator,\n len(validation_files) // batch_size, )\n val_macro_f_measure = val_macro_f_measure.mean()\n\n tra_macro_f_measure = get_f_measure_by_class(keras_model_second_pass, db.tag_count(), training_generator,\n len(training_files) // batch_size,\n )\n tra_macro_f_measure = tra_macro_f_measure.mean()\n\n # Inject external metric values to the callbacks\n for callback in callback_list:\n if hasattr(callback, 'set_external_metric_value'):\n callback.set_external_metric_value(\n metric_label='val_macro_f_measure',\n metric_value=val_macro_f_measure\n )\n callback.set_external_metric_value(\n metric_label='tra_macro_f_measure',\n metric_value=tra_macro_f_measure\n )\n\n # Manually update callbacks\n for callback in callback_list:\n if hasattr(callback, 'update'):\n callback.update()\n\n # Check we need to stop training\n stop_training = False\n for callback in callback_list:\n if hasattr(callback, 'stop'):\n if callback.stop():\n log.line(\"Early stropping\")\n stop_training = True\n\n if stop_training:\n # Stop the training loop\n break\n\n # Fetch best model\n for callback in callback_list:\n if isinstance(callback, dcase_util.keras.StasherCallback):\n callback.log()\n best_weights = callback.get_best()['weights']\n if best_weights:\n keras_model_second_pass.set_weights(best_weights)\n break\n\n # Save trained model\n keras_model_second_pass.save(fold2_model_filename)\n\n log.foot()\n\n # =====================================================================\n # Testing stage, get strong annotations\n # =====================================================================\n\n if param.get_path('flow.testing'):\n log.section_header('Testing')\n\n # Get results filename\n fold_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'res_fold_{fold}.txt'.format(fold=2)\n )\n\n # Get model filename\n fold2_model_filename = os.path.join(\n param.get_path('path.application.learner'),\n 'model_fold_{fold}.h5'.format(fold=2)\n )\n\n if not os.path.isfile(fold_results_filename) or overwrite_testing:\n # Load model if not yet loaded\n if not keras_model_second_pass:\n keras_model_second_pass = keras.models.load_model(fold2_model_filename)\n\n # Initialize results container\n res = dcase_util.containers.MetaDataContainer(\n filename=fold_results_filename\n )\n\n # Loop through all test files from the current cross-validation fold\n for item in db.test(fold=2):\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=item.filename\n ).modify(\n path_base=param.get_path('path.application.feature_extractor'),\n filename_extension='.cpickle'\n )\n\n # Get features array\n features = feature_processing_chain.process(\n filename=feature_filename\n )\n\n input_data = features.data.reshape(features.shape[:-1]).T # (500, 64)\n # Create a batch with only one file\n input_data = input_data.reshape((1,) + input_data.shape) # (1, 500, 64)\n\n # Get network output for strong data\n probabilities = keras_model_second_pass.predict(input_data)\n\n # only one file in the batch\n probabilities = probabilities[0]\n\n if param.get_path('recognizer.frame_binarization.enable'):\n # Binarization of the network output\n frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(\n probabilities=probabilities,\n binarization_type=param.get_path('recognizer.frame_binarization.binarization_type'),\n threshold=param.get_path('recognizer.frame_binarization.threshold'),\n time_axis=0\n )\n else:\n frame_decisions = dcase_util.data.ProbabilityEncoder().binarization(\n probabilities=probabilities,\n binarization_type=\"global_threshold\",\n threshold=0.5,\n time_axis=0\n )\n\n decision_encoder = dcase_util.data.DecisionEncoder(\n label_list=db.tags()\n )\n\n if param.get_path('recognizer.process_activity.enable'):\n frame_decisions = decision_encoder.process_activity(\n frame_decisions,\n window_length=param.get_path('recognizer.process_activity.window_length'),\n time_axis=0)\n\n for i, label in enumerate(db.tags()):\n\n # given a list of ones, give the onset and offset in frames\n estimated_events = decision_encoder.find_contiguous_regions(\n activity_array=frame_decisions[:, i]\n )\n\n for [onset, offset] in estimated_events:\n hop_length_seconds = param.get_path('feature_extractor.hop_length_seconds')\n # Store result into results container, convert frames to seconds\n res.append(\n {\n 'filename': item.filename,\n 'event_label': label,\n 'onset': onset * hop_length_seconds,\n 'offset': offset * hop_length_seconds\n }\n )\n\n # Save results container\n res.save()\n log.foot()\n\n # =====================================================================\n # Evaluation stage, get results\n # =====================================================================\n\n if param.get_path('flow.evaluation'):\n log.section_header('Evaluation')\n\n stats_filename = os.path.join(param.get_path('path.application.recognizer'), 'evaluation.txt')\n\n if not os.path.isfile(stats_filename) or overwrite_testing:\n fold_results_filename = os.path.join(\n param.get_path('path.application.recognizer'),\n 'res_fold_{fold}.txt'.format(fold=fold)\n )\n\n # test data used to evaluate the system\n reference_event_list = db.eval(fold=fold)\n\n # predictions done during the step test before\n estimated_event_list = dcase_util.containers.MetaDataContainer().load(\n filename=fold_results_filename\n )\n\n # Calculate the metric\n event_based_metric = event_based_evaluation(reference_event_list, estimated_event_list)\n\n with open(stats_filename, \"w\") as stats_file:\n stats_file.write(event_based_metric.__str__())\n\n log.line(event_based_metric.__str__(), indent=4)\n\n log.foot()\n\n\ndef data_generator(items, feature_path, many_hot_encoder, feature_processing_chain, batch_size=1, shuffle=True, mode='weak'):\n \"\"\" Transform MetaDataContainer into batches of data\n\n Parameters\n ----------\n\n items : MetaDataContainer, items to be generated\n\n feature_path : String, base path where features are stored\n\n many_hot_encoder : ManyHotEncoder, class to encode data\n\n feature_processing_chain : ProcessingChain, chain to process data\n\n batch_size : int, size of the batch to be returned\n\n shuffle : bool, shuffle the items before creating the batch\n\n mode : \"weak\" or \"strong\", indicate to return labels as tags (1/file) or event_labels (1/frame)\n\n Return\n ------\n\n (batch_X, batch_y): generator, arrays containing batches of data.\n\n \"\"\"\n while True:\n batch_X = []\n batch_y = []\n if shuffle:\n random.shuffle(items)\n for item in items:\n # Get feature filename\n feature_filename = dcase_util.utils.Path(\n path=item.filename\n ).modify(\n path_base=feature_path,\n filename_extension='.cpickle',\n )\n\n features = feature_processing_chain.process(\n filename=feature_filename\n )\n input_data = features.data.reshape(features.shape[:-1]).T\n\n # Target\n targets = item.tags\n targets = many_hot_encoder.encode(targets, length_frames=1).data.flatten()\n if mode == \"strong\":\n targets = numpy.repeat(targets.reshape((1,) + targets.shape), input_data.shape[0], axis=0)\n\n if batch_size == 1:\n batch_X = input_data.reshape((1,) + input_data.shape)\n batch_y = targets.reshape((1,) + targets.shape)\n else:\n batch_X.append(input_data)\n batch_y.append(targets)\n if len(batch_X) == batch_size and len(batch_y) == batch_size:\n yield numpy.array(batch_X), numpy.array(batch_y)\n\n batch_X = []\n batch_y = []\n\n\n\nif __name__ == \"__main__\":\n # Read parameters file\n parameters = dcase_util.containers.DictContainer().load(\n filename='task4_crnn.yaml'\n )\n\n try:\n sys.exit(main(parameters))\n except (ValueError, IOError) as e:\n sys.exit(e)\n"
] | [
[
"numpy.random.seed",
"numpy.arange",
"tensorflow.set_random_seed",
"tensorflow.get_default_graph",
"numpy.array"
]
] |
rockscie/async_blp | [
"acb8777ccf2499681bde87d76ca780b61219699c"
] | [
"tests/test_instruments_request.py"
] | [
"import pandas as pd\nimport pytest\n\nfrom async_blp.instruments_requests import InstrumentRequestBase\n\n\[email protected]\nclass TestInstrumentRequestBase:\n\n def test__weight(self):\n request = InstrumentRequestBase('query', max_results=5)\n request.response_fields = ['field_1', 'field_2']\n\n assert request.weight == 10\n\n async def test__process(self, security_lookup_msg):\n request = InstrumentRequestBase('query', max_results=5)\n request.response_fields = ['security', 'description']\n\n request.send_queue_message(security_lookup_msg)\n request.send_queue_message(None)\n\n data, _ = await request.process()\n\n expected_data = pd.DataFrame([['F US Equity', 'Ford Motors Co']],\n columns=['security', 'description'])\n\n pd.testing.assert_frame_equal(expected_data, data)\n"
] | [
[
"pandas.DataFrame",
"pandas.testing.assert_frame_equal"
]
] |
olegs22/Quickquasar_QA | [
"df74994780216846501710b79b4dce7d025809c9"
] | [
"run_quickquasars.py"
] | [
"import numpy as np\nimport os\nimport shutil\nimport glob as glob\n\ndef get_slurm_script(script_name,command,outdir,idir,mail,log,part,nodes,threads,time,job_name):\n if os.path.isdir(outdir+'/run') == False:\n os.mkdir(outdir+'/run')\n file_name = outdir + '/run/' + script_name\n f = open(file_name,'w')\n slurm_dict = dict()\n slurm_dict['line_0'] = '#SBATCH -C haswell\\n'\n slurm_dict['line_1'] = '#SBATCH --partition='+part+'\\n'\n slurm_dict['line_2'] = '#SBATCH --account=desi\\n'\n slurm_dict['line_3'] = '#SBATCH --nodes='+str(nodes)+'\\n'\n slurm_dict['line_4'] = '#SBATCH --time='+time+'\\n'\n slurm_dict['line_5'] = '#SBATCH --job-name='+job_name+'\\n'\n slurm_dict['line_6'] = '#SBATCH --output='+log+'\\n'\n slurm_dict['line_7'] = '#SBATCH --mail-user='+mail+'\\n'\n slurm_dict['line_8'] = 'idir='+idir+'\\n'\n slurm_dict['line_9'] = 'outdir='+outdir+'\\n'\n slurm_dict['line_10'] = 'nodes='+str(nodes)+'\\n' # CHECK MATCHING #SBATCH --nodes ABOVE !!!!\n slurm_dict['line_11'] = 'nthreads='+str(threads)+'\\n' # TO BE TUNED ; CAN HIT NODE MEMORY LIMIT ; 4 is max on edison for nside=16 and ~50 QSOs/deg2\n slurm_dict['line_12'] = 'echo \"get list of skewers to run ...\"\\n'\n slurm_dict['line_13'] = 'files=`\\ls -1 $idir/*/*/transmission*.fits*`\\n'\n slurm_dict['line_14'] = 'nfiles=`echo $files | wc -w`\\n'\n slurm_dict['line_15'] = 'nfilespernode=$((nfiles/nodes+1))\\n'\n slurm_dict['line_16'] = 'echo \"n files =\" $nfiles\\n'\n slurm_dict['line_17'] = 'echo \"n files per node =\" $nfilespernode\\n'\n slurm_dict['line_18'] = 'first=1\\n'\n slurm_dict['line_19'] = 'last=$nfilespernode\\n'\n slurm_dict['line_20'] = 'for node in `seq $nodes` ; do\\n'\n slurm_dict['line_21'] = ' echo \"starting node $node\"\\n'\n slurm_dict['line_22'] = ' # list of files to run\\n'\n slurm_dict['line_23'] = ' if (( $node == $nodes )) ; then\\n'\n slurm_dict['line_24'] = ' last=\"\"\\n'\n slurm_dict['line_25'] = ' fi\\n'\n slurm_dict['line_26'] = ' echo ${first}-${last}\\n'\n slurm_dict['line_27'] = ' tfiles=`echo $files | cut -d \" \" -f ${first}-${last}`\\n'\n slurm_dict['line_28'] = ' first=$(( first + nfilespernode ))\\n'\n slurm_dict['line_29'] = ' last=$(( last + nfilespernode ))\\n'\n \n set_up = \" srun -N 1 -n 1 -c $nthreads quickquasars -i $tfiles --nproc $nthreads --outdir $outdir/spectra-16 \"\n slurm_dict['line_30'] = set_up + command +'\\n'\n slurm_dict['line_31'] = ' done\\n'\n slurm_dict['line_32'] = 'wait\\n'\n slurm_dict['line_33'] = 'echo \"END\"\\n'\n for i in range(len(slurm_dict)):\n f.write(slurm_dict['line_' + str(i)])\n return None\n\nif __name__ == \"__main__\":\n import argparse\n from pathlib import Path\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--outdir',type=str,help='output directory of the quickquasar run')\n parser.add_argument('--idir',type=str,help='directory from where to fetch the input data')\n parser.add_argument('--mail',type=str,default=' ',help='email to sent status of the job')\n parser.add_argument('--log',type=str,default =' ',help='directory to output the log of the job run')\n parser.add_argument('--qos',type=str,default='regular',help='which queue')\n parser.add_argument('--nodes',type=int,default=40,help='number numbers to use')\n parser.add_argument('--threads',type=int,default=4,help='number of thread to use per node')\n parser.add_argument('--time',default='00:30:00',type=str)\n parser.add_argument('--name',type=str,default='lyasim',help='name of the job')\n parser.add_argument('--seed-generator',type=int,default=15430289,help='seed to run quickquasar')\n parser.add_argument('--nruns',type=int,default=1,help='number of quickquasar runs with the same arguments')\n args = parser.parse_args()\n\n outfile = open('submit.sh','w+')\n np.random.seed(args.seed_generator)\n for k in range(args.nruns):\n #make the output dirs\n output_dirs = args.outdir + '_'+str(k)\n if os.path.isdir(output_dirs) == False:\n os.mkdir(output_dirs)\n if os.path.isdir(output_dirs+'/logs') == False:\n os.mkdir(output_dirs+'/logs')\n if os.path.isdir(output_dirs+'/spectra-16') == False:\n os.mkdir(output_dirs+'/spectra-16')\n \n \n seed = np.random.randint(12345,98765,size=1)\n\n #read config file for quickquasart\n file = open('config.txt','r')\n lines = []\n for l in file:\n lines.append(l)\n \n for i in range(len(lines)):\n line_comp = lines[i].split()\n if len(line_comp) != 1:\n lines[i] = '--' + line_comp[0] + ' ' + line_comp[1] + ' ' \n else:\n lines[i] = '--' + line_comp[0] + ' '\n \n command = \"\".join(lines) + '--seed '+str(seed[0]) \n\n name = 'run_quickquasar.sh'\n get_slurm_script(name,command,output_dirs,args.idir,args.mail,args.log,args.qos,args.nodes,args.threads,args.time,args.name) \n \n \n outfile.write('sbatch '+output_dirs+'/run/'+name+'\\n')\n outfile.close()\n"
] | [
[
"numpy.random.randint",
"numpy.random.seed"
]
] |
marlon27/Light_FAMD | [
"fe4328f15f6145798869908fa126eabe75e85391"
] | [
"light_famd/mca.py"
] | [
"\"\"\"Multiple Correspondence Analysis (MCA)\"\"\"\n\nimport numpy as np\nfrom sklearn import utils\n\nfrom . import ca\nfrom . import one_hot\n\n\n\nclass MCA(ca.CA):\n\n def fit(self, X, y=None):\n if self.check_input:\n utils.check_array(X, dtype=[str, np.number])\n \n n_initial_columns = X.shape[1]\n\n # One-hot encode the data\n self.one_hot_ = one_hot.OneHotEncoder().fit(X)\n \n _X_t= self.one_hot_.transform(X) \n \n _0_freq_serie= (_X_t == 0).sum(axis=0)/ len(_X_t)\n \n self._usecols=_0_freq_serie[_0_freq_serie < 0.99].index\n print('MCA PROCESS ELIMINATED {0} COLUMNS SINCE THEIR MISS_RATES >= 99%'.format( _X_t.shape[1] - len(self._usecols) ))\n \n n_new_columns = len(self._usecols)\n self.total_inertia_ = (n_new_columns - n_initial_columns) / n_initial_columns\n # Apply CA to the indicator matrix\n super().fit(_X_t.loc[:,self._usecols])\n\n return self\n\n def _transform(self, X):\n return super()._transform(self.one_hot_.transform(X).loc[:,self._usecols])\n\n\n\n def transform(self, X):\n \"\"\"Computes the row principal coordinates of a dataset.\"\"\"\n utils.validation.check_is_fitted(self, 'singular_values_')\n if self.check_input:\n utils.check_array(X, dtype=[str, np.number])\n return self._transform(X)\n\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.check_array"
]
] |
shayxu-ai/A-Repository-for-Machine-Learning | [
"4b4cea15bb005d1c58f4395fde97cadf44fb0186"
] | [
"测试/tensorflow_hello/2.practices_on_nlp.py"
] | [
"# -*- coding: utf-8 -*-\n# @Time: 2020/2/5,005 22:02\n# @Last Update: 2020/2/5,005 22:02\n# @Author: 徐缘\n# @FileName: 2.practices_on_nlp.py\n# @Software: PyCharm\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals # 导入一些熟悉的陌生人\n# 绝对引入,精确除法,print,unicode类型字符串。都是为了适配python2,不加也罢\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D\nfrom tensorflow.keras import Model\nfrom tensorflow import keras\n\n\nimport tensorflow_hub as hub # 模型库\nimport tensorflow_datasets as tfds # 数据|库 https://tensorflow.google.cn/datasets/api_docs/python/tfds?hl=en\ntfds.disable_progress_bar()\n\n\ndef version():\n \"\"\"\n 国际惯例,先看下版本\n \"\"\"\n print(\"Eager mode: \", tf.executing_eagerly())\n print(\"Hub version: \", hub.__version__)\n print(\"tfds version\", tfds.__version__)\n print(\"GPU is\", \"available\" if tf.config.experimental.list_physical_devices(\"GPU\") else \"NOT AVAILABLE\")\n\n\ndef tf_hub_hello():\n \"\"\"\n 预训练word2vector(迁移学习) + 全连接层\n loss: 0.329\n accuracy: 0.858 我记得 cnn 文本分类可以有95%呢\n\n \"\"\"\n train_data, validation_data, test_data = tfds.load(\n name=\"imdb_reviews\", split=('train[:60%]', 'train[60%:]', 'test'),\n as_supervised=True)\n train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))\n print(train_examples_batch)\n print(train_labels_batch)\n\n embedding = \"https://hub.tensorflow.google.cn/google/tf2-preview/gnews-swivel-20dim/1\"\n hub_layer = hub.KerasLayer(embedding, input_shape=[],\n dtype=tf.string, trainable=True)\n print(hub_layer(train_examples_batch[:3]))\n\n model = tf.keras.Sequential()\n model.add(hub_layer)\n model.add(tf.keras.layers.Dense(16, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n\n # model.summary()\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = model.fit(train_data.shuffle(10000).batch(512),\n epochs=20,\n validation_data=validation_data.batch(512),\n verbose=1)\n\n results = model.evaluate(test_data.batch(512), verbose=2)\n\n for name, value in zip(model.metrics_names, results):\n print(\"%s: %.3f\" % (name, value))\n\n\ndef preprocess_text():\n \"\"\"\n\n\n \"\"\"\n (train_data, test_data), info = tfds.load(\n # Use the version pre-encoded with an ~8k vocabulary.\n 'imdb_reviews/subwords8k',\n # Return the train/test datasets as a tuple.\n split=(tfds.Split.TRAIN, tfds.Split.TEST),\n # Return (example, label) pairs from the dataset (instead of a dictionary).\n as_supervised=True,\n # Also return the `info` structure.\n with_info=True)\n\n encoder = info.features['text'].encoder\n print('Vocabulary size: {}'.format(encoder.vocab_size))\n\n sample_string = 'Hello TensorFlow.'\n\n encoded_string = encoder.encode(sample_string)\n print('Encoded string is {}'.format(encoded_string))\n\n original_string = encoder.decode(encoded_string)\n print('The original string: \"{}\"'.format(original_string))\n\n assert original_string == sample_string\n\n for ts in encoded_string:\n print('{} ----> {}'.format(ts, encoder.decode([ts])))\n\n for train_example, train_label in train_data.take(1):\n print('Encoded text:', train_example[:10].numpy())\n print('Label:', train_label.numpy())\n\n encoder.decode(train_example)\n\n BUFFER_SIZE = 1000\n\n train_batches = (\n train_data\n .shuffle(BUFFER_SIZE)\n .padded_batch(32, train_data.output_shapes))\n\n test_batches = (\n test_data\n .padded_batch(32, train_data.output_shapes))\n\n for example_batch, label_batch in train_batches.take(2):\n print(\"Batch shape:\", example_batch.shape)\n print(\"label shape:\", label_batch.shape)\n\n model = keras.Sequential([\n keras.layers.Embedding(encoder.vocab_size, 16),\n keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(1, activation='sigmoid')])\n\n model.summary()\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = model.fit(train_batches,\n epochs=10,\n validation_data=test_batches,\n validation_steps=30)\n\n loss, accuracy = model.evaluate(test_batches)\n\n print(\"Loss: \", loss)\n print(\"Accuracy: \", accuracy)\n\n history_dict = history.history\n history_dict.keys()\n\n import matplotlib.pyplot as plt\n\n acc = history_dict['accuracy']\n val_acc = history_dict['val_accuracy']\n loss = history_dict['loss']\n val_loss = history_dict['val_loss']\n\n epochs = range(1, len(acc) + 1)\n\n # \"bo\" is for \"blue dot\"\n plt.plot(epochs, loss, 'bo', label='Training loss')\n # b is for \"solid blue line\"\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n\n plt.show()\n\n plt.clf() # clear figure\n\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n\n plt.show()\n return\n\n\nif __name__ == '__main__':\n # version()\n preprocess_text()\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.keras.layers.Embedding",
"tensorflow.executing_eagerly",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.Sequential",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.plot",
"tensorflow.config.experimental.list_physical_devices",
"matplotlib.pyplot.xlabel"
]
] |
friedenhe/OpenMDAO | [
"db1d7e22a8bf9f66afa82ec3544b7244d5545f6d"
] | [
"openmdao/components/interp_util/interp.py"
] | [
"\"\"\"\nBase class for interpolation methods that calculate values for each dimension independently.\n\nBased on Tables in NPSS, and was added to bridge the gap between some of the slower scipy\nimplementations.\n\"\"\"\nimport numpy as np\n\nfrom openmdao.components.interp_util.interp_akima import InterpAkima, Interp1DAkima\nfrom openmdao.components.interp_util.interp_bsplines import InterpBSplines\nfrom openmdao.components.interp_util.interp_cubic import InterpCubic\nfrom openmdao.components.interp_util.interp_lagrange2 import InterpLagrange2, Interp3DLagrange2\nfrom openmdao.components.interp_util.interp_lagrange3 import InterpLagrange3, Interp3DLagrange3\nfrom openmdao.components.interp_util.interp_scipy import InterpScipy\nfrom openmdao.components.interp_util.interp_slinear import InterpLinear, Interp3DSlinear, \\\n Interp1DSlinear, Interp2DSlinear\n\nfrom openmdao.components.interp_util.outofbounds_error import OutOfBoundsError\nfrom openmdao.utils.om_warnings import warn_deprecation\n\n\nINTERP_METHODS = {\n 'slinear': InterpLinear,\n 'lagrange2': InterpLagrange2,\n 'lagrange3': InterpLagrange3,\n 'cubic': InterpCubic,\n 'akima': InterpAkima,\n 'scipy_cubic': InterpScipy,\n 'scipy_slinear': InterpScipy,\n 'scipy_quintic': InterpScipy,\n 'bsplines': InterpBSplines,\n '1D-slinear': Interp1DSlinear,\n '2D-slinear': Interp2DSlinear,\n '3D-slinear': Interp3DSlinear,\n '3D-lagrange2': Interp3DLagrange2,\n '3D-lagrange3': Interp3DLagrange3,\n '1D-akima': Interp1DAkima,\n 'trilinear': Interp3DSlinear, # Deprecated\n 'akima1D': Interp1DAkima, # Deprecated\n}\n\nTABLE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima',\n 'scipy_cubic', 'scipy_slinear', 'scipy_quintic',\n 'trilinear', 'akima1D', # These two are Deprecated\n '3D-slinear', '2D-slinear', '1D-slinear',\n '1D-akima',\n '3D-lagrange2', '3D-lagrange3']\nSPLINE_METHODS = ['slinear', 'lagrange2', 'lagrange3', 'cubic', 'akima', 'bsplines',\n 'scipy_cubic', 'scipy_slinear', 'scipy_quintic']\n\n\nclass InterpND(object):\n \"\"\"\n Interpolation on a regular grid of arbitrary dimensions.\n\n The data must be defined on a regular grid; the grid spacing however may be uneven. Several\n interpolation methods are supported. These are defined in the child classes. Gradients are\n provided for all interpolation methods. Gradients with respect to grid values are also\n available optionally.\n\n Parameters\n ----------\n method : str\n Name of interpolation method.\n points : ndarray or tuple of ndarray\n The points defining the regular grid in n dimensions.\n For 1D interpolation, this can be an ndarray of table locations.\n For table interpolation, it can be a tuple or an ndarray. If it is a tuple, it should\n contain one ndarray for each table dimension.\n For spline evaluation, num_cp can be specified instead of points.\n values : ndarray or tuple of ndarray or None\n These must be specified for interpolation.\n The data on the regular grid in n dimensions.\n x_interp : ndarray or None\n If we are always interpolating at a fixed set of locations, then they can be\n specified here.\n extrapolate : bool\n If False, when interpolated values are requested outside of the domain of the input\n data, a ValueError is raised. If True, then the methods are allowed to extrapolate.\n Default is True (raise an exception).\n num_cp : None or int\n Optional. When specified, use a linear distribution of num_cp control points. If you\n are using 'bsplines' as the method, then num_cp must be set instead of points.\n **kwargs : dict\n Interpolator-specific options to pass onward.\n\n Attributes\n ----------\n extrapolate : bool\n If False, when interpolated values are requested outside of the domain of the input data,\n a ValueError is raised. If True, then the methods are allowed to extrapolate.\n Default is True.\n grid : tuple\n Collection of points that determine the regular grid.\n table : <InterpTable>\n Table object that contains algorithm that performs the interpolation.\n values : array_like, shape (m1, ..., mn, ...)\n The data on the regular grid in n dimensions.\n x_interp : ndarray\n Cached non-decreasing vector of points to be interpolated when used as an order-reducing\n spline.\n _compute_d_dvalues : bool\n When set to True, compute gradients with respect to the grid values.\n _compute_d_dx : bool\n When set to True, compute gradients with respect to the interpolated point location.\n _d_dx : ndarray\n Cache of computed gradients with respect to evaluation point.\n _d_dvalues : ndarray\n Cache of computed gradients with respect to table values.\n _interp : class\n Class specified as interpolation algorithm, used to regenerate if needed.\n _interp_config : dict\n Configuration object that stores the number of points required for each interpolation\n method.\n _interp_options : dict\n Dictionary of cached interpolator-specific options.\n _xi : ndarray\n Cache of current evaluation point.\n \"\"\"\n\n def __init__(self, method=\"slinear\", points=None, values=None, x_interp=None, extrapolate=False,\n num_cp=None, **kwargs):\n \"\"\"\n Initialize an InterpND object.\n\n This object can be setup and used to interpolate on a curve or multi-dimensional table.\n\n It can also be used to setup an interpolating spline that can be evaluated at fixed\n locations.\n\n For interpolation, specify values and points.\n\n For spline evaluation, specifiy x_interp and either points or num_cp.\n \"\"\"\n if not isinstance(method, str):\n msg = \"Argument 'method' should be a string.\"\n raise ValueError(msg)\n elif method not in INTERP_METHODS:\n all_m = ', '.join(['\"' + m + '\"' for m in INTERP_METHODS])\n raise ValueError('Interpolation method \"%s\" is not defined. Valid methods are '\n '%s.' % (method, all_m))\n elif method == 'akima1D':\n warn_deprecation(\"The 'akima1D' method has been renamed to '1D-akima'.\")\n elif method == 'trilinear':\n warn_deprecation(\"The 'trilinear' method has been renamed to '3D-slinear'.\")\n\n self.extrapolate = extrapolate\n\n # The table points are always defined, by specifying either the points directly, or num_cp.\n if points is None:\n if num_cp is not None:\n points = [np.linspace(0.0, 1.0, num_cp)]\n else:\n msg = \"Either 'points' or 'num_cp' must be specified.\"\n raise ValueError(msg)\n else:\n\n if isinstance(points, np.ndarray):\n points = [points]\n\n for i, p in enumerate(points):\n n_p = len(p)\n if not np.all(np.diff(p) > 0.):\n raise ValueError(\"The points in dimension %d must be strictly \"\n \"ascending\" % i)\n if not np.asarray(p).ndim == 1:\n raise ValueError(\"The points in dimension %d must be \"\n \"1-dimensional\" % i)\n\n # Table Interpolation\n if x_interp is None:\n\n if values is None:\n msg = \"Either 'values' or 'x_interp' must be specified.\"\n raise ValueError(msg)\n\n if method == 'bsplines':\n msg = \"Method 'bsplines' is not supported for table interpolation.\"\n raise ValueError(msg)\n\n if not hasattr(values, 'ndim'):\n # allow reasonable duck-typed values\n values = np.asarray(values)\n\n if hasattr(values, 'dtype') and hasattr(values, 'astype'):\n if not np.issubdtype(values.dtype, np.inexact):\n values = values.astype(float)\n\n if len(points) > values.ndim:\n raise ValueError(\"There are %d point arrays, but values has %d \"\n \"dimensions\" % (len(points), values.ndim))\n\n if (method.startswith('scipy') or method == 'akima') and \\\n (np.iscomplexobj(values[:]) or np.any(np.iscomplex(points[0]))):\n msg = f\"Interpolation method '{method}' does not support complex points or values.\"\n raise ValueError(msg)\n\n for i, p in enumerate(points):\n n_p = len(p)\n if values.shape[i] != n_p:\n raise ValueError(\"There are %d points and %d values in \"\n \"dimension %d\" % (len(p), values.shape[i], i))\n\n self.grid = tuple([np.asarray(p) for p in points])\n self.values = values\n self.x_interp = x_interp\n\n self._xi = None\n self._d_dx = None\n self._d_dvalues = None\n self._compute_d_dvalues = False\n self._compute_d_dx = True\n\n # Cache spline coefficients.\n interp = INTERP_METHODS[method]\n\n if method.startswith('scipy'):\n kwargs['interp_method'] = method\n\n table = interp(self.grid, values, interp, **kwargs)\n table.check_config()\n self.table = table\n self._interp = interp\n self._interp_options = kwargs\n\n def interpolate(self, x, compute_derivative=False):\n \"\"\"\n Interpolate at the sample coordinates.\n\n Parameters\n ----------\n x : ndarray or tuple\n Locations to interpolate.\n compute_derivative : bool\n Set to True to compute derivatives with respect to x.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n ndarray\n Value of derivative of interpolated output with respect to input x. (Only when\n compute_derivative is True).\n \"\"\"\n self._compute_d_dx = compute_derivative\n self.table._compute_d_dx = compute_derivative\n self.table._compute_d_dvalues = False\n\n if isinstance(x, np.ndarray):\n if len(x.shape) < 2:\n if len(self.grid) > 1:\n # Input is an array containing multi-D coordinates of a single point.\n x = np.atleast_2d(x)\n else:\n # Input is an array of separate points on a 1D table.\n x = np.atleast_2d(x).T\n else:\n # Input is a list or tuple of separate points.\n x = np.atleast_2d(x)\n\n # cache latest evaluation point for gradient method's use later\n self._xi = x\n\n xnew = self._interpolate(x)\n\n if compute_derivative:\n return xnew, self._d_dx\n else:\n return xnew\n\n def evaluate_spline(self, values, compute_derivative=False):\n \"\"\"\n Interpolate at all fixed output coordinates given the new table values.\n\n Parameters\n ----------\n values : ndarray(n_points)\n New data values for all points on the regular grid.\n compute_derivative : bool\n Set to True to compute derivatives with respect to x.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n ndarray\n Value of derivative of interpolated output with respect to values.\n \"\"\"\n self._compute_d_dvalues = compute_derivative\n self.table._compute_d_dvalues = compute_derivative\n self.table._compute_d_dx = False\n\n if len(values.shape) == 1:\n values = np.expand_dims(values, axis=0)\n\n # cache latest evaluation point for gradient method's use later\n self._xi = self.x_interp.copy()\n\n result = self._evaluate_spline(values)\n if result.shape[0] == 1:\n # Not vectorized, so drop the extra dimension.\n result = result.ravel()\n\n if compute_derivative:\n d_dvalues = self.spline_gradient()\n if d_dvalues.shape[0] == 1:\n d_dvalues = d_dvalues[0]\n return result, d_dvalues\n else:\n return result\n\n def _interpolate(self, xi):\n \"\"\"\n Interpolate at the sample coordinates.\n\n This method is called from OpenMDAO, and is not meant for standalone use.\n\n Parameters\n ----------\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n \"\"\"\n if not self.extrapolate:\n for i, p in enumerate(xi.T):\n if np.isnan(p).any():\n raise OutOfBoundsError(\"One of the requested xi contains a NaN\",\n i, np.NaN, self.grid[i][0], self.grid[i][-1])\n\n eps = 1e-14 * self.grid[i][-1]\n if np.any(p < self.grid[i][0] - eps) or np.any(p > self.grid[i][-1] + eps):\n p1 = np.where(self.grid[i][0] > p)[0]\n p2 = np.where(p > self.grid[i][-1])[0]\n # First violating entry is enough to direct the user.\n violated_idx = set(p1).union(p2).pop()\n value = p[violated_idx]\n raise OutOfBoundsError(\"One of the requested xi is out of bounds\",\n i, value, self.grid[i][0], self.grid[i][-1])\n\n if self._compute_d_dvalues:\n # If the table grid or values are component inputs, then we need to create a new table\n # each iteration.\n interp = self._interp\n self.table = interp(self.grid, self.values, interp, **self._interp_options)\n if not self.table._supports_d_dvalues:\n raise RuntimeError(f'Method {self.table._name} does not support the '\n '\"training_data_gradients\" option.')\n\n self.table._compute_d_dvalues = True\n\n table = self.table\n if table.vectorized(xi):\n result, derivs_x, derivs_val, derivs_grid = table.evaluate_vectorized(xi)\n\n else:\n n_nodes, nx = xi.shape\n result = np.empty((n_nodes, ), dtype=xi.dtype)\n derivs_x = np.empty((n_nodes, nx), dtype=xi.dtype)\n derivs_val = None\n\n # TODO: it might be possible to vectorize over n_nodes.\n for j in range(n_nodes):\n val, d_x, d_values, d_grid = table.evaluate(xi[j, ...])\n result[j] = val\n derivs_x[j, :] = d_x.ravel()\n if d_values is not None:\n if derivs_val is None:\n dv_shape = [n_nodes]\n dv_shape.extend(self.values.shape)\n derivs_val = np.zeros(dv_shape, dtype=xi.dtype)\n in_slice = table._full_slice\n full_slice = [slice(j, j + 1)]\n full_slice.extend(in_slice)\n shape = derivs_val[tuple(full_slice)].shape\n derivs_val[tuple(full_slice)] = d_values.reshape(shape)\n\n # Cache derivatives\n self._d_dx = derivs_x\n self._d_dvalues = derivs_val\n\n return result\n\n def _evaluate_spline(self, values):\n \"\"\"\n Interpolate at all fixed output coordinates given the new table values.\n\n This method is called from OpenMDAO, and is not meant for standalone use.\n\n Parameters\n ----------\n values : ndarray(n_nodes x n_points)\n The data on the regular grid in n dimensions.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n \"\"\"\n xi = self.x_interp\n self.values = values\n\n table = self.table\n if table._vectorized:\n\n if table._name == 'bsplines':\n # bsplines is fully vectorized.\n table.values = values\n result, _, derivs_val, _ = table.evaluate_vectorized(xi)\n\n else:\n # Scipy implementation vectorized over lookups, but not over multiple table values.\n interp = self._interp\n n_nodes, _ = values.shape\n nx = np.prod(xi.shape)\n\n result = np.empty((n_nodes, nx), dtype=values.dtype)\n derivs_val = None\n\n for j in range(n_nodes):\n\n table = interp(self.grid, values[j, :], interp, **self._interp_options)\n table._compute_d_dvalues = False\n table._compute_d_dx = False\n\n result[j, :], _, _, _ = table.evaluate_vectorized(xi.reshape((nx, 1)))\n\n else:\n interp = self._interp\n n_nodes, _ = values.shape\n nx = np.prod(xi.shape)\n result = np.empty((n_nodes, nx), dtype=values.dtype)\n derivs_val = None\n\n # TODO: it might be possible to vectorize over n_nodes.\n for j in range(n_nodes):\n\n table = interp(self.grid, values[j, :], interp, **self._interp_options)\n table._compute_d_dvalues = True\n table._compute_d_dx = False\n\n for k in range(nx):\n x_pt = np.atleast_2d(xi[k])\n val, _, d_values, _ = table.evaluate(x_pt)\n result[j, k] = val\n if d_values is not None:\n if derivs_val is None:\n dv_shape = [n_nodes, nx]\n dv_shape.extend(values.shape[1:])\n derivs_val = np.zeros(dv_shape, dtype=values.dtype)\n in_slice = table._full_slice\n full_slice = [slice(j, j + 1), slice(k, k + 1)]\n full_slice.extend(in_slice)\n shape = derivs_val[tuple(full_slice)].shape\n derivs_val[tuple(full_slice)] = d_values.reshape(shape)\n\n # Cache derivatives\n self._d_dvalues = derivs_val\n\n self.table = table\n return result\n\n def gradient(self, xi):\n \"\"\"\n Compute the gradients at the specified point.\n\n Most of the gradients are computed as the interpolation itself is performed,\n but are cached and returned separately by this method.\n\n If the point for evaluation differs from the point used to produce\n the currently cached gradient, the interpolation is re-performed in\n order to return the correct gradient.\n\n Parameters\n ----------\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data at.\n\n Returns\n -------\n ndarray\n Vector of gradients of the interpolated values with respect to each value in xi.\n \"\"\"\n if (self._xi is None) or (not np.array_equal(xi, self._xi)):\n # If inputs have changed since last computation, then re-interpolate.\n self.interpolate(xi)\n\n return self._gradient().reshape(np.asarray(xi).shape)\n\n def _gradient(self):\n \"\"\"\n Return the pre-computed gradients.\n\n Returns\n -------\n ndarray\n Vector of gradients of the interpolated values with respect to each value in xi.\n \"\"\"\n return self._d_dx\n\n def training_gradients(self, pt):\n \"\"\"\n Compute the training gradient for the vector of training points.\n\n Parameters\n ----------\n pt : ndarray\n Training point values.\n\n Returns\n -------\n ndarray\n Gradient of output with respect to training point values.\n \"\"\"\n if self.table._vectorized:\n return self.table.training_gradients(pt)\n\n else:\n grid = self.grid\n interp = self._interp\n opts = self._interp_options\n\n for i, axis in enumerate(grid):\n ngrid = axis.size\n values = np.zeros(ngrid)\n deriv_i = np.zeros(ngrid)\n\n for j in range(ngrid):\n values[j] = 1.0\n table = interp([grid[i]], values, interp, **opts)\n table._compute_d_dvalues = False\n deriv_i[j], _, _, _ = table.evaluate(pt[i:i + 1])\n values[j] = 0.0\n\n if i == 0:\n deriv_running = deriv_i.copy()\n else:\n deriv_running = np.outer(deriv_running, deriv_i)\n\n return deriv_running\n\n def spline_gradient(self):\n \"\"\"\n Return derivative of spline with respect to its control points.\n\n Returns\n -------\n ndarray\n Gradient of output with respect to training point values.\n \"\"\"\n vec_size, n_cp = self.values.shape\n x_interp = self.x_interp\n n_interp = len(x_interp)\n\n d_dvalues = self._d_dvalues\n if d_dvalues is not None:\n dy_ddata = np.zeros((vec_size, n_interp, n_cp), dtype=d_dvalues.dtype)\n\n if d_dvalues.shape[0] == vec_size:\n # Akima precomputes derivs at all points in vec_size.\n dy_ddata[:] = d_dvalues\n else:\n # Bsplines computed derivative is the same at all points in vec_size.\n dy_ddata[:] = np.broadcast_to(d_dvalues.toarray(), (vec_size, n_interp, n_cp))\n else:\n # Note: These derivatives are independent of control point y values, so they will never\n # be complex dtype.\n dy_ddata = np.zeros((n_interp, n_cp))\n\n # This way works for the rest of the interpolation methods.\n for k in range(n_interp):\n val = self.training_gradients(x_interp[k:k + 1])\n dy_ddata[k, :] = val\n dy_ddata = np.broadcast_to(dy_ddata, (vec_size, n_interp, n_cp))\n\n return dy_ddata\n"
] | [
[
"numpy.iscomplex",
"numpy.atleast_2d",
"numpy.empty",
"numpy.zeros",
"numpy.diff",
"numpy.outer",
"numpy.issubdtype",
"numpy.any",
"numpy.asarray",
"numpy.array_equal",
"numpy.expand_dims",
"numpy.prod",
"numpy.broadcast_to",
"numpy.iscomplexobj",
"numpy.where",
"numpy.linspace",
"numpy.isnan"
]
] |
mremilien/object-deformnet | [
"bb07fe05f1ee3983835ebe071252541cee5c42f8"
] | [
"data/shape_dataset.py"
] | [
"import h5py\nimport numpy as np\nimport torch.utils.data as data\n\n\nclass ShapeDataset(data.Dataset):\n def __init__(self, h5_file, mode, n_points=2048, augment=False):\n assert (mode == 'train' or mode == 'val'), 'Mode must be \"train\" or \"val\".'\n self.mode = mode\n self.n_points = n_points\n self.augment = augment\n # load data from h5py file\n with h5py.File(h5_file, 'r') as f:\n self.length = f[self.mode].attrs['len']\n self.data = f[self.mode]['data'][:]\n self.label = f[self.mode]['label'][:]\n # augmentation parameters\n self.sigma = 0.01\n self.clip = 0.02\n self.shift_range = 0.02\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n xyz = self.data[index]\n label = self.label[index] - 1 # data saved indexed from 1\n # randomly downsample\n np_data = xyz.shape[0]\n assert np_data >= self.n_points, 'Not enough points in shape.'\n idx = np.random.choice(np_data, self.n_points)\n xyz = xyz[idx, :]\n # data augmentation\n if self.augment:\n jitter = np.clip(self.sigma*np.random.randn(self.n_points, 3), -self.clip, self.clip)\n xyz[:, :3] += jitter\n shift = np.random.uniform(-self.shift_range, self.shift_range, (1, 3))\n xyz[:, :3] += shift\n return xyz, label\n"
] | [
[
"numpy.random.uniform",
"numpy.random.randn",
"numpy.random.choice"
]
] |
dileep-kishore/deeplearning-examples | [
"2b230ea17f366f602044d44cc8abcac419d4e521"
] | [
"deeplearning_examples/loaders/Churn.py"
] | [
"# @Author: dileep\n# @Last Modified by: dileep\n\nfrom collections import OrderedDict\nimport os\nfrom typing import Tuple, Iterable, Sequence, Dict, Union\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom . import datapath\nfrom ..preprocessing import Encoder\nfrom ..sampling import hold_out\n\n#TODO: Make this a subclass of torch.utils.data.Dataset\nclass Churn:\n \"\"\"\n Class for loading the `churn` dataset to predict whether customer `exited` or not\n Parameters:\n ----------\n features : Iterable[str]\n List of features to be used in training and testing.\n NOTE: Do not include the dependent variable\n Options: {RowNumber,CustomerId,Surname,CreditScore,Geography,Gender,\n Age,Tenure,Balance,NumOfProducts,HasCrCard,IsActiveMember,\n EstimatedSalary}\n Attributes:\n ----------\n raw_data : pd.Series\n Raw data returned in the form of a pandas dataframe\n train_data : Tuple[np.ndarray, np.ndarray]\n Tuple of (features, targets) where each is a numpy ndarray\n test_data : Tuple[np.ndarray, np.ndarray]\n Tuple of (features, targets) where each is a numpy ndarray\n \"\"\"\n _feature_dict = {\n 'multi-category': {'Geography'},\n 'binary-category': {'Gender', 'HasCrCard', 'IsActiveMember', 'Exited'},\n 'int': {'CreditScore', 'Age', 'Tenure', 'NumOfProducts'},\n 'float': {'Balance', 'EstimatedSalary'}\n }\n\n def __init__(self, features: Union[Iterable[str], str] = 'all') -> None:\n churn_path = os.path.join(datapath(), 'churn/Churn_Modeling.csv')\n self.raw_data = pd.read_csv(churn_path, index_col=0)\n if features == 'all':\n features = self.all_features\n assert self._validate_features(features), \"Invalid features given\"\n self._features = features + ['Exited']\n\n def __call__(self):\n raw_train, raw_test = hold_out(self.raw_data[self._features])\n feat_meta = self._get_feat_meta(self._features)\n data_encoder = Encoder(feat_meta)\n return data_encoder.encode(raw_train, raw_test, 'Exited')\n\n @property\n def all_features(self) -> Iterable[str]:\n \"\"\"\n Returns all the possible features that can be used\n Returns:\n -------\n Iterable[str]\n A list of all possible features\n \"\"\"\n features = list(self.raw_data.columns)\n return [f for f in features if f not in {'Exited', 'RowNumber', 'CustomerId', 'Surname'}]\n\n def _validate_features(self, features: Iterable[str]) -> bool:\n \"\"\"\n Returns whether the input set of features are valid\n Parameters:\n ----------\n features : Iterable[str]\n Features input to the class\n Returns:\n -------\n bool\n True/False based on validity\n \"\"\"\n all_features = set()\n for f_set in self._feature_dict.values():\n all_features.update(f_set)\n return not any(filter(lambda f: f not in all_features, features))\n\n def _get_feat_meta(self, features: Iterable[str]) -> Dict[str, str]:\n \"\"\"\n Returns the type for each feature\n Parameters:\n ----------\n features : Iterable[str]\n A list of features that are to be used for classification\n Returns:\n -------\n Dict[str, str]\n Dictionary of features and their corresponding types\n \"\"\"\n invert_fdict = {frozenset(v): k for k, v in self._feature_dict.items()}\n feat_meta: Dict[str, str] = OrderedDict()\n for feat in features:\n for feat_group, data_type in invert_fdict.items():\n if feat in feat_group:\n feat_meta[feat] = data_type\n continue\n return feat_meta\n\n def encode_features(self, features: Iterable[str]) -> Tuple[np.ndarray, np.ndarray]:\n cat_features = (self._feature_dict['binary-category'] or\n self._feature_dict['multi-category'])\n for feat in features:\n if feat in cat_features:\n self.pp\n\n def split_data(self, features: Iterable[str]) -> Sequence[np.ndarray]:\n \"\"\"\n Splits the raw data into training and testing using the features as a filter\n Parameters:\n ----------\n features : Iterable[str]\n Features that are to be used in the training and testing data\n Returns:\n -------\n Sequence[np.ndarray]\n Sequence of x_train, x_test, y_train, y_test\n \"\"\"\n pass\n"
] | [
[
"pandas.read_csv"
]
] |
Joshinn-io/augur | [
"e9410887f58af2b26c350edf08e3f70ff783bdc5"
] | [
"tests/test_metrics/test_issue_metrics.py"
] | [
"#SPDX-License-Identifier: MIT\n\nimport pytest\nimport pandas as pd\n\ndef test_issues_new(metrics):\n #repo_id\n assert metrics.issues_new(1, 1 , period='year').iloc[0]['issues'] > 0\n\n #repo_group_id\n assert metrics.issues_new(10, period='year').iloc[1]['issues'] > 0\n\n #begin_date & end_date\n assert metrics.issues_new(10, 25430, period='week', begin_date='2017',\n end_date='2017-10').iloc[1]['issues'] > 0\n assert metrics.issues_new(10, period='month', begin_date='2017-05',\n end_date='2018').iloc[2]['issues'] > 0\n\ndef test_issues_active(metrics):\n # repo\n assert metrics.issues_active(1, 1, period='year').iloc[0]['issues'] > 0\n\n # repo_group\n assert metrics.issues_active(10, period='year').iloc[0]['issues'] > 0\n\n # begin_date & end_date\n assert metrics.issues_active(10, 25430, period='month', begin_date='2020-02',\n end_date='2020-03').iloc[0]['issues'] > 0\n\n assert metrics.issues_active(10, period='week', begin_date='2020-01',\n end_date='2020-03') .iloc[0]['issues'] > 0\n\ndef test_issues_closed(metrics):\n # repo\n assert metrics.issues_closed(10, 25430, period='year').iloc[0]['issues'] > 0\n\n #repo_group\n assert metrics.issues_closed(10, period='year').iloc[0]['issues'] > 0\n\n # begin_date & end_date\n assert metrics.issues_closed(10, 25430, period='week', begin_date='2019',\n end_date='2020-02').iloc[0]['issues'] > 0\n\n assert metrics.issues_closed(10, period='month', begin_date='2018-05',\n end_date='2019-08-15').iloc[0]['issues'] > 0\n\ndef test_issue_duration(metrics):\n # repo\n assert metrics.issue_duration(10, 25430).iloc[0]['duration'] == '20 days 03:08:22.000000000'\n\n # repo_group\n assert metrics.issue_duration(10).iloc[0]['duration'] == '20 days 03:08:22.000000000'\n\ndef test_issue_participants(metrics):\n # repo\n assert metrics.issue_participants(10, 25430).iloc[0]['participants'] > 0\n\n # repo_group\n assert metrics.issue_participants(10).iloc[0]['participants'] > 0\n\ndef test_issue_throughput(metrics):\n # repo\n assert metrics.issue_throughput(10, 25430).iloc[0]['throughput'] >= 0\n\n # repo_group\n assert metrics.issue_throughput(10).iloc[0]['throughput'] >= 0\n\ndef test_issue_backlog(metrics):\n #repo_id\n assert metrics.issue_backlog(10, 25430).iloc[0]['issue_backlog'] > 0\n\n #repo_group_id\n assert metrics.issue_backlog(10).iloc[0]['issue_backlog'] > 0\n\n\ndef test_issues_first_time_closed(metrics):\n\n # repo id\n assert metrics.issues_first_time_closed(10, repo_id=25430, period='year').isin(\n [pd.Timestamp('2019', tz='UTC')]).any().any()\n\n # repo_group_id\n assert metrics.issues_first_time_closed(10, period='year').isin(\n [pd.Timestamp('2020', tz='UTC')]).any().any()\n\n # begin_date and end_date\n assert metrics.issues_first_time_closed(10, period='year', begin_date='2019-1-1 00:00:00',\n end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()\n\n assert metrics.issues_first_time_closed(10, repo_id=25430, period='year', begin_date='2019-1-1 00:00:00',\n end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()\n\n\ndef test_open_issues_count(metrics):\n # repo\n assert metrics.open_issues_count(10, 25430).iloc[0]['open_count'] > 0\n\n # repo_group\n assert metrics.open_issues_count(10).iloc[0]['open_count'] > 0\n\ndef test_closed_issues_count(metrics):\n # repo\n assert metrics.closed_issues_count(10, 25430).iloc[0]['closed_count'] > 0\n\n # repo_group\n assert metrics.closed_issues_count(10).iloc[0]['closed_count'] > 0\n\ndef test_issues_open_age(metrics):\n #repo group\n assert metrics.issues_open_age(10).iloc[0]['open_date'] > 0\n # repo\n assert metrics.issues_open_age(10, 25430).iloc[0]['open_date'] > 0\n\ndef test_issues_closed_resolution_duration(metrics):\n # repo group\n assert metrics.issues_closed_resolution_duration(10).iloc[0]['diffdate'] >= 0\n # repo\n assert metrics.issues_closed_resolution_duration(10, 25430).iloc[0]['diffdate'] >= 0\n\ndef test_average_issue_resolution_time(metrics):\n #repo\n assert metrics.average_issue_resolution_time(10, 25430).isin(\n ['augur', '61 days 12:20:43.791667']).any().any()\n\n # repo_group\n assert metrics.average_issue_resolution_time(10).isin(\n ['grimoirelab', ' 67 days 22:41:55.260417']).any().any()\n\ndef test_issues_maintainer_response_duration(metrics):\n assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0\n assert metrics.issues_maintainer_response_duration(10).iloc[0].average_days_comment > 0\n assert metrics.issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0\n\ndef test_issue_comments_mean(metrics):\n assert metrics.issue_comments_mean(10).any().any()\n assert metrics.issue_comments_mean(10, 25430).any().any()\n assert metrics.issue_comments_mean(10, group_by='year').any().any()\n assert metrics.issue_comments_mean(10, 25430, group_by='year').any().any()\n\ndef test_issue_comments_mean_std(metrics):\n assert metrics.issue_comments_mean_std(10).any().any()\n assert metrics.issue_comments_mean_std(10, 25430).any().any()\n assert metrics.issue_comments_mean_std(10, group_by='year').any().any()\n assert metrics.issue_comments_mean_std(10, 25430, group_by='year').any().any()\n"
] | [
[
"pandas.Timestamp"
]
] |
c-martinez/compactness | [
"679a1644e0cd3ded278e9917efe171b5e89fc780"
] | [
"pydescriptors/helpers.py"
] | [
"import numpy as _np\n\nfrom .moments import immoment3D as _immoment3D\n\ndef getSphere(side):\n \"\"\"Create a 3D volume of sideXsideXside, where voxels representing a\n sphere are ones and background is zeros.\n\n Keyword arguments:\n side -- the number of voxels the 3D volume should have on each side.\n\n Returns:\n A (side,side,side) shaped matrix of zeros and ones.\n \"\"\"\n volume = _np.zeros((side, side, side))\n r = side / 2\n Xs, Ys = _np.meshgrid(_np.arange(-r, r), _np.arange(-r, r))\n for k, z in enumerate(_np.arange(-r, r)):\n volume[:, :, k] = _np.sqrt(Xs ** 2 + Ys ** 2 + z ** 2) < r\n return volume\n\n\ndef rotate3D(X, Y, Z, rx, ry):\n \"\"\"Rotates a 3D object along one ordinate axis at a time.\n\n Keyword arguments:\n X -- The X coordinate of the voxels to be rotated.\n Y -- The Y coordinate of the voxels to be rotated.\n Z -- The Z coordinate of the voxels to be rotated.\n\n Returns:\n X,Y,Z coordinates of the rotated voxels.\n \"\"\"\n R = _np.eye(3)\n Rx = _np.array([[1, 0, 0],\n [0, _np.cos(rx), -_np.sin(rx)],\n [0, _np.sin(rx), _np.cos(rx)]])\n Ry = _np.array([[_np.cos(ry), 0, _np.sin(ry)],\n [0, 1, 0],\n [-_np.sin(ry), 0, _np.cos(ry)]])\n R = _np.dot(R, Rx)\n R = _np.dot(R, Ry)\n\n XYZ = _np.vstack([X, Y, Z])\n XYZ_ = _np.dot(XYZ.T, R)\n\n return XYZ_[:, 0], XYZ_[:, 1], XYZ_[:, 2]\n\n\ndef recenter(X, Y, Z):\n # TODO: Document, write unit test\n m000 = _immoment3D(X, Y, Z, 0, 0, 0)\n m100 = _immoment3D(X, Y, Z, 1, 0, 0)\n m010 = _immoment3D(X, Y, Z, 0, 1, 0)\n m001 = _immoment3D(X, Y, Z, 0, 0, 1)\n\n # Find centroid\n cx = m100 / m000\n cy = m010 / m000\n cz = m001 / m000\n\n # Recentering\n X_ = X - cx\n Y_ = Y - cy\n Z_ = Z - cz\n\n return X_, Y_, Z_\n"
] | [
[
"numpy.vstack",
"numpy.eye",
"numpy.zeros",
"numpy.cos",
"numpy.arange",
"numpy.sqrt",
"numpy.sin",
"numpy.dot"
]
] |
yaront/MutSig | [
"456dc793ab2dbd955b5cef098fd14539d428de0b"
] | [
"scripts/Emdometrial/Statistics/mut_analysis.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 26 20:42:43 2018\n\n@author: tomer\n\"\"\"\n\n#%%\n# =================================================\n# # Mutation per gene\n# =================================================\n\nimport numpy as np\nimport pandas as pd\n\n#%%\n\n#tumor = sys.argv[1]\n#tumor = tumor.split('/')[-1].split('.')[0]\n#print tumor\n\ntumor = 'UCEC'\n\n#%% Reading data\n\nprint(\"Starting: \" + tumor)\n\nmut_data = pd.read_table('./../../../databases/Endometrial/TCGA_MAFs/' + tumor + '.maf', sep = '\\t')\nbmi_data = pd.read_table('./../../../databases/Endometrial/information/TCGA_bmi_data.txt', sep = '\\t')\npat_bmi = bmi_data[bmi_data['bmi'] != '--']\npat_bmi = pat_bmi[(18.5 < pd.to_numeric(pat_bmi['bmi'])) & (pd.to_numeric(pat_bmi['bmi']) < 90)]\n\npatients = list(set(np.unique(['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']])).intersection(list(pat_bmi['submitter_id'].values)))\n\npat_bmi = pat_bmi[[(x in patients) for x in pat_bmi['submitter_id'].values]].sort_values(by = ['bmi'])\npat_mut = mut_data[[(x in patients) for x in ['-'.join(x.split('-')[0:3]) for x in mut_data['Tumor_Sample_Barcode']]]]\npat_mut = pat_mut[pat_mut['Variant_Classification'].isin(['Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Translation_Start_Site'])]\n\n#%% Creating table of mutations per BMI and mutation burden per patient\n\ngene_bmi_mut = pd.DataFrame(0, columns = ['BMI','Total_Mutations'] + list(np.unique(pat_mut['Hugo_Symbol'])), index = np.sort(pat_bmi[['submitter_id','bmi']])[:,1])\ngene_bmi_mut['BMI'] = np.sort(pat_bmi[['submitter_id','bmi']])[:,0]\n\npat_name_mut = ['-'.join(x.split('-')[0:3]) for x in pat_mut['Tumor_Sample_Barcode']]\n\nfor pat in gene_bmi_mut.index:\n gene_bmi_mut.loc[pat,'Total_Mutations'] = pat_name_mut.count(pat)\n\ngene_bmi_mut = gene_bmi_mut[gene_bmi_mut['Total_Mutations'] < 3000]\n\n\n#%% Assigning mutations per gene per patient\n\nprint(\"Calculating mutations for \" + tumor)\n\nfor g in np.unique(pat_mut['Hugo_Symbol']):\n gene_mut = pat_mut[pat_mut['Hugo_Symbol'] == g]\n gene_pat = ['-'.join(x.split('-')[0:3]) for x in gene_mut['Tumor_Sample_Barcode']]\n\n for p in np.unique(gene_pat):\n gene_bmi_mut.loc[p,g] = gene_pat.count(p)\n\ngene_bmi_mut = gene_bmi_mut.transpose()\n\nnorm_gene_bmi_mut = []\n\n\n#%% Finding the slope\n\nprint(\"Calculating slope for \" + tumor)\n\ninds = {bmi: ind for ind,bmi in enumerate(set(pd.to_numeric(gene_bmi_mut.loc['BMI',:])))}\nbmi_ind = [inds[bmi] for bmi in pd.to_numeric(gene_bmi_mut.loc['BMI',:])]\n\nslope = []\nfor i,j in gene_bmi_mut.iloc[2:,:].iterrows():\n norm_mut = pd.to_numeric(j) / pd.to_numeric(gene_bmi_mut.loc['Total_Mutations'])\n norm_gene_bmi_mut.append(norm_mut)\n weight_mut = np.bincount(np.array(bmi_ind),weights=list(map(float,norm_mut.values))) / np.bincount(np.array(bmi_ind))\n slope.append(np.polyfit(list(range(len(weight_mut))), weight_mut,1)[0])\n\nnorm_gene_bmi_mut = pd.DataFrame(norm_gene_bmi_mut)\nnorm_gene_bmi_mut = pd.concat([gene_bmi_mut.loc[['BMI','Total_Mutations'],:],norm_gene_bmi_mut])\nnorm_gene_bmi_mut.index = gene_bmi_mut.index\n\ngene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope\ngene_bmi_mut = gene_bmi_mut.sort_values(by = ['Slope'])\ngene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'\n\nnorm_gene_bmi_mut['Slope'] = [-np.inf,-np.inf] + slope\nnorm_gene_bmi_mut = norm_gene_bmi_mut.sort_values(by = ['Slope'])\nnorm_gene_bmi_mut.loc[['BMI','Total_Mutations'],'Slope'] = '-'\n\n\n#%% Writing the data\n\nprint(\"Writing \" + tumor)\n\ngene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut.txt', header = True, index = True, sep = '\\t')\nnorm_gene_bmi_mut.to_csv('./../output/' + tumor + '_bmi_gene_mut_norm.txt', header = True, index = True, sep = '\\t')\n\nwriter = pd.ExcelWriter('./../output/' + tumor + '_bmi_gene_mut_slope.xlsx', engine='xlsxwriter')\ngene_bmi_mut.to_excel(writer, sheet_name = tumor + '_binary')\nnorm_gene_bmi_mut.to_excel(writer, sheet_name = tumor + '_norm')\nwriter.save()\n\nprint(\"Done: \" + tumor)\n\n\n\n\n\n"
] | [
[
"pandas.read_table",
"pandas.to_numeric",
"pandas.DataFrame",
"pandas.ExcelWriter",
"pandas.concat",
"numpy.sort",
"numpy.array",
"numpy.unique"
]
] |
polikutinevgeny/FrontsCNN | [
"a9f48d5afcdd7e0fe561840d94af36c0fedf1c15"
] | [
"dataset_results.py"
] | [
"import gc\nimport numpy as np\n\n\ndef dataset_results(dataset, model, binary=False):\n x = np.array([dataset[i][0][0] for i in range(len(dataset))])\n y_true = np.array([dataset[i][1][0] for i in range(len(dataset))])\n y_pred = model.predict(x, batch_size=1, verbose=0).flatten()\n if binary:\n y_true = y_true[..., 0].flatten()\n else:\n y_true = np.argmax(y_true, axis=-1).flatten()\n del x\n gc.collect()\n return y_true, y_pred\n"
] | [
[
"numpy.argmax"
]
] |
MaxSobolMark/mbrl-lib | [
"bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594"
] | [
"mbrl/env/pets_reacher.py"
] | [
"import os\nfrom typing import Tuple\n\nimport numpy as np\nfrom numpy.random import MT19937, RandomState, SeedSequence\nimport torch\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\n\nclass Reacher3DEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, task_id=None, hide_goal=False):\n self.viewer = None\n utils.EzPickle.__init__(self)\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.goal = np.zeros(3)\n self._hide_goal = hide_goal\n mujoco_env.MujocoEnv.__init__(\n self, os.path.join(dir_path, \"assets/reacher3d.xml\"), 2)\n self._task_id = task_id\n if task_id is not None:\n self._rng = RandomState(MT19937(SeedSequence(task_id)))\n self.goal = self._rng.normal(loc=0, scale=0.1, size=[3])\n\n def step(self, a):\n self.do_simulation(a, self.frame_skip)\n ob = self._get_obs()\n # print('[pets_reacher:22] ob[7:10]: ', ob[7:10])\n reward = -np.sum(\n np.square(Reacher3DEnv.get_EE_pos(ob[None]) - self.goal))\n reward -= 0.01 * np.square(a).sum()\n done = False\n return ob, reward, done, dict(reward_dist=0, reward_ctrl=0)\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 1\n self.viewer.cam.distance = 2.5\n self.viewer.cam.elevation = -30\n self.viewer.cam.azimuth = 270\n\n def reset_model(self):\n qpos, qvel = np.copy(self.init_qpos), np.copy(self.init_qvel)\n if self._task_id is not None:\n qpos[-3:] += self.goal\n else:\n qpos[-3:] += np.random.normal(loc=0, scale=0.1, size=[3])\n self.goal = qpos[-3:]\n qvel[-3:] = 0\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def _get_obs(self):\n if not self._hide_goal:\n return np.concatenate([\n self.data.qpos.flat,\n self.data.qvel.flat[:-3],\n ])\n return np.concatenate([\n self.data.qpos.flat[:-3],\n self.data.qvel.flat[:-3],\n ])\n\n @staticmethod\n def get_EE_pos(states, are_tensors=False):\n theta1, theta2, theta3, theta4, theta5, theta6, _ = (\n states[:, :1],\n states[:, 1:2],\n states[:, 2:3],\n states[:, 3:4],\n states[:, 4:5],\n states[:, 5:6],\n states[:, 6:],\n )\n\n if not are_tensors:\n\n rot_axis = np.concatenate(\n [\n np.cos(theta2) * np.cos(theta1),\n np.cos(theta2) * np.sin(theta1),\n -np.sin(theta2),\n ],\n axis=1,\n )\n rot_perp_axis = np.concatenate(\n [-np.sin(theta1),\n np.cos(theta1),\n np.zeros(theta1.shape)],\n axis=1)\n cur_end = np.concatenate(\n [\n 0.1 * np.cos(theta1) +\n 0.4 * np.cos(theta1) * np.cos(theta2),\n 0.1 * np.sin(theta1) +\n 0.4 * np.sin(theta1) * np.cos(theta2) - 0.188,\n -0.4 * np.sin(theta2),\n ],\n axis=1,\n )\n\n for length, hinge, roll in [(0.321, theta4, theta3),\n (0.16828, theta6, theta5)]:\n perp_all_axis = np.cross(rot_axis, rot_perp_axis)\n x = np.cos(hinge) * rot_axis\n y = np.sin(hinge) * np.sin(roll) * rot_perp_axis\n z = -np.sin(hinge) * np.cos(roll) * perp_all_axis\n new_rot_axis = x + y + z\n new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)\n new_rot_perp_axis[np.linalg.norm(\n new_rot_perp_axis, axis=1) < 1e-30] = rot_perp_axis[\n np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]\n new_rot_perp_axis /= np.linalg.norm(new_rot_perp_axis,\n axis=1,\n keepdims=True)\n rot_axis, rot_perp_axis, cur_end = (\n new_rot_axis,\n new_rot_perp_axis,\n cur_end + length * new_rot_axis,\n )\n\n return cur_end\n else:\n rot_axis = torch.cat(\n [\n torch.cos(theta2) * torch.cos(theta1),\n torch.cos(theta2) * torch.sin(theta1),\n -torch.sin(theta2),\n ],\n dim=1,\n )\n rot_perp_axis = torch.cat([\n -torch.sin(theta1),\n torch.cos(theta1),\n torch.zeros_like(theta1)\n ],\n dim=1)\n cur_end = torch.cat(\n [\n 0.1 * torch.cos(theta1) +\n 0.4 * torch.cos(theta1) * torch.cos(theta2),\n 0.1 * torch.sin(theta1) +\n 0.4 * torch.sin(theta1) * torch.cos(theta2) - 0.188,\n -0.4 * torch.sin(theta2),\n ],\n dim=1,\n )\n\n for length, hinge, roll in [(0.321, theta4, theta3),\n (0.16828, theta6, theta5)]:\n perp_all_axis = torch.cross(rot_axis, rot_perp_axis)\n x = torch.cos(hinge) * rot_axis\n y = torch.sin(hinge) * torch.sin(roll) * rot_perp_axis\n z = -torch.sin(hinge) * torch.cos(roll) * perp_all_axis\n new_rot_axis = x + y + z\n new_rot_perp_axis = torch.cross(new_rot_axis, rot_axis)\n new_rot_perp_axis[torch.linalg.norm(\n new_rot_perp_axis, dim=1) < 1e-30] = rot_perp_axis[\n torch.linalg.norm(new_rot_perp_axis, dim=1) < 1e-30]\n new_rot_perp_axis /= torch.linalg.norm(new_rot_perp_axis,\n dim=1,\n keepdims=True)\n rot_axis, rot_perp_axis, cur_end = (\n new_rot_axis,\n new_rot_perp_axis,\n cur_end + length * new_rot_axis,\n )\n\n return cur_end\n\n @staticmethod\n def get_reward(ob, action):\n # This is a bit tricky to implement, implement when needed\n print('NOT SUPPOSED TO RUN THIS!')\n raise NotImplementedError\n\n def forward_postprocess_fn(\n self, inputs: torch.Tensor, mean: torch.Tensor, logvar: torch.Tensor,\n min_logvar: torch.nn.parameter.Parameter\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if not self._hide_goal:\n mean[..., 7:10] = inputs[..., 7:10]\n logvar[..., 7:10] = torch.full(logvar[..., 7:10].shape,\n -float('inf'))\n return mean, logvar\n"
] | [
[
"numpy.zeros",
"torch.linalg.norm",
"torch.cos",
"torch.zeros_like",
"numpy.cross",
"numpy.cos",
"numpy.copy",
"torch.sin",
"numpy.random.SeedSequence",
"torch.cross",
"numpy.random.normal",
"numpy.sin",
"numpy.concatenate",
"numpy.square",
"numpy.linalg.norm"
]
] |
mosesnah-shared/whip-project-targeting | [
"7f47598635f027e2cb05ad33b66ed67627d20329"
] | [
"MuJoCo/modules/utils.py"
] | [
"# [Built-in modules]\nimport os\nimport re\nimport sys\nimport shutil\nimport time, datetime\nimport math as myMath\nimport glob\n\n# [3rd party modules]\nimport cv2\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\nimport sympy as sp\nfrom sympy.utilities.lambdify import lambdify, implemented_function\n\nfrom scipy.special import lambertw\nfrom scipy.integrate import quad\nfrom scipy.spatial.transform import Rotation as R\n\n# [Local modules]\nfrom modules.constants import Constants\n\nclass MyVideo:\n \"\"\"\n Description\n ----------\n\n Arguments\n ---------\n\n Returns\n -------\n \"\"\"\n def __init__( self, vid_dir = None, height = 1440, width = 850, fps = 60 ):\n\n # self.height = height\n # self.width = width\n\n self.height = 2880\n self.width = 1800\n\n self.vid_dir = vid_dir if not None else \".\"\n self.fps = fps\n\n fourcc = cv2.VideoWriter_fourcc( *'MP4V' ) # 4-character code of codec used to compress the frames.\n # For example, VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec,\n # VideoWriter::fourcc('M','J','P','G') is a motion-jpeg codec etc.\n # List of codes can be obtained at Video Codecs by FOURCC page.\n # self.outVideo = cv2.VideoWriter( self.vid_dir + \"/video.mp4\", fourcc, fps, ( self.height, self.width ) )\n self.outVideo = cv2.VideoWriter( self.vid_dir + \"/video.mp4\", fourcc, fps, ( self.height//2, self.width//2 ) )\n\n def write( self, myViewer ):\n data = myViewer.read_pixels( self.height, self.width, depth = False ) # Get the pixel from the render screen\n data = cv2.cvtColor( data, cv2.COLOR_BGR2RGB )\n\n # data = cv2.resize( data,( self.height, self.width ) )\n data = cv2.resize( data,( self.height//2, self.width//2 ) )\n\n self.outVideo.write( np.flip( data, axis = 0 ) )\n\n def release( self ):\n self.outVideo.release()\n\ndef length_elem2elem( mjModel, mjData, elem_name1, elem_name2 ):\n type1 = get_elem_type( mjModel, elem_name1 )\n type2 = get_elem_type( mjModel, elem_name2 )\n\n # The euclidean distance between two elements, calling using \"get_geom_xpos\" or \"get_site_xpos\" or \"get_body_xpos\" methods\n return np.linalg.norm( getattr( mjData, \"get_\" + type1 + \"_\" + \"xpos\" )( elem_name1 )\n - getattr( mjData, \"get_\" + type2 + \"_\" + \"xpos\" )( elem_name2 ) , ord = 2 )\n\n\ndef get_elem_type( mjModel, elem_name ):\n \"\"\"\n The naming convention of our mujoco simulation is \"{elem}_name\", where elem = [geom, site, body]\n The string before the first underbar '_' describes the elem(ent) of the model.\n This function parses the string and returns the first string (i.e., the element of the model)\n \"\"\"\n return elem_name.split( '_' )[ 0 ] # Parse and get the first string before \"_\"\n\ndef get_property( mjModel, elem_name, prop_name ):\n # Get the property of the name\n\n # The name of the elements start with \"XXXX_\", hence getting the string before the underbar.\n type = get_elem_type( mjModel, elem_name )\n\n for idx, s in enumerate( getattr( mjModel, type + \"_\" + \"names\" ) ): # run through the list of \"geom_names\" or \"body_names\"\n if elem_name == s:\n tmp = getattr( mjModel, type + \"_\" + prop_name )\n return tmp[ idx ]\n\n # If couldn't match in list, raise error\n raise NameError( 'Cannot find geom_name with {0} in list, please check'.format( elem_name ) )\n\n\ndef snake2camel( s ):\n \"\"\"\n Switch string s from snake_form_naming to CamelCase\n \"\"\"\n\n return ''.join( word.title() for word in s.split( '_' ) )\n\ndef camel2snake( s ):\n \"\"\"\n Switch string s from CamelCase to snake_form_naming\n [REF] https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case\n \"\"\"\n re.sub( r'(?<!^)(?=[A-Z])', '_', s ).lower()\n\ndef clear_dir( dir ):\n \"\"\" Cleaning up the contents in the directory \"\"\"\n\n\n\ndef args_cleanup( args, s ):\n \"\"\"\n Description\n -----------\n Clean-up the substring s for keys in args\n\n Arguments\n ---------\n args: The dictionary to be parsed\n s : Substring to be discarded. e.g. s = '--', then \"--record\" --> \"record\"\n\n \"\"\"\n if not isinstance( args, dict ) or not isinstance( s, str ):\n raise ValueError( \"Wrong input type. args should be type dict and s should be type str. {0:} and {1:} are rather given\".format(\n type( args ), type( str ) ) )\n\n for old_key in list( args ) :\n new_key = old_key.replace( s, '' )\n args[ new_key ] = args.pop( old_key )\n\n return args\n\n\ndef rot2quat( rot ):\n # Taking the SO(3) matrix as an input and return the quaternion\n\n return quat\n\ndef euler2quaternion( euler_angs ):\n \"\"\"\n Description\n -----------\n This code is directly from the following reference\n [REF] https://computergraphics.stackexchange.com/questions/8195/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr\n Converting a R4 quaternion vector (w, x, y, z) to Euler Angle (Roll, Pitch, Yaw)\n\n Arguments\n ---------\n [NAME] [TYPE] [DESCRIPTION]\n (1) yaw, pitch, roll The euler angles of the given quaternion vector.\n\n [OUTPUTS]\n -----------\n [NAME] [TYPE] [DESCRIPTION]\n (1) quatVec List The quaternion vector, ordered in w, x, y and z\n\n\n \"\"\"\n yaw, pitch, roll = euler_angs[ : ]\n\n cy = np.cos( yaw * 0.5 )\n sy = np.sin( yaw * 0.5 )\n cp = np.cos( pitch * 0.5 )\n sp = np.sin( pitch * 0.5 )\n cr = np.cos( roll * 0.5 )\n sr = np.sin( roll * 0.5 )\n\n\n w = cr * cp * cy + sr * sp * sy;\n x = sr * cp * cy - cr * sp * sy;\n y = cr * sp * cy + sr * cp * sy;\n z = cr * cp * sy - sr * sp * cy;\n\n return w,x,y,z\n\n\ndef quaternion2euler( quatVec ): # Inputting quaternion matrix and outputing the yaw, pitch, roll of the euler angle.\n \"\"\"\n Description\n -----------\n Converting a R4 quaternion vector (w, x, y, z) to Euler Angle (Roll, Pitch, Yaw)\n This code is directly from the following reference\n [REF] https://computergraphics.stackexchange.com/questions/8195/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr\n\n Arguments\n ---------\n [NAME] [TYPE] [DESCRIPTION]\n (1) quatVec List The quaternion vector, ordered in w, x, y and z\n\n Outputs\n --------\n [NAME] [TYPE] [DESCRIPTION]\n (1) yaw, pitch, roll The euler angles of the given quaternion vector.\n\n\n \"\"\"\n\n if len( quatVec ) != 4:\n raise ValueError( \"Wrong size of input argument. Given size is [{0:d}] while it should be 4\".format(\n len( quatVec ) ) )\n\n w, x, y ,z = quatVec[:]\n\n t0 = + 2.0 * ( w * x + y * z )\n t1 = + 1.0 - 2.0 * ( x * x + y * y )\n roll = myMath.atan2( t0, t1 )\n\n t2 = + 2.0 * ( w * y - z * x )\n t2 = + 1.0 if t2 > +1.0 else t2\n t2 = - 1.0 if t2 < -1.0 else t2\n pitch = myMath.asin( t2 )\n\n t3 = + 2.0 * ( w * z + x * y )\n t4 = + 1.0 - 2.0 * ( y * y + z * z )\n yaw = myMath.atan2( t3, t4 )\n\n return yaw, pitch, roll\n\ndef str2bool( s ):\n \"\"\"\n\n Description:\n ----------\n Converting an input string to a boolean\n\n Arguments:\n ----------\n [NAME] [TYPE] [DESCRIPTION]\n (1) s dict, str The string which\n\n Returns:\n ----------\n True/False depending on the given input strin gv\n\n \"\"\"\n if isinstance( s, dict ):\n for key, _ in s.items():\n s[ key ] = str2bool( s[ key ] )\n else:\n return v.lower() in ( \"yes\", \"true\", \"t\", \"1\" )\n\ndef str2float( s ):\n \"\"\"\n\n Description:\n ----------\n Converting an input string to a float arraay\n\n Arguments:\n ----------\n [NAME] [TYPE] [DESCRIPTION]\n (1) s str The string which will be parsed to float array\n\n Returns:\n ----------\n The parsed float array\n\n \"\"\"\n if not isinstance( s, str ):\n raise ValueError( \"Input argument should be string, but {} is given\".format( type( s ) ) )\n\n return [ float( i ) for i in re.findall( r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\", s ) ]\n\ndef my_mkdir( ):\n\n dir = Constants.TMP_DIR # Temporarily saving at tmp\n dir += datetime.datetime.now().strftime( \"%Y%m%d_%H%M%S/\" ) # Appending the date when this directory is called.\n if not os.path.exists( dir ): # If directory not exist\n os.makedirs( dir, exist_ok = True ) # mkdir -p functionality via exist_ok\n\n return dir\n\n\ndef my_mvdir( from_dir, to_dir ):\n shutil.move( from_dir , to_dir )\n\n\n\n\ndef my_rmdir( dir ):\n\n if not isinstance( dir, str ):\n raise ValueError( \"Input directory should be a str, {} is given\".format( type ( dir ) ) )\n\n try:\n shutil.rmtree( dir )\n except:\n print( \"{0:s} Doesn't exist, hence cannot remove the directory\".format( dir ) )\n\n print( \"Erasing Directory [{0:s}]\".format( dir ) )\n\ndef my_print( **kwargs ):\n \"\"\"\n Description:\n ----------\n ** double asterisk means giving the argument as dictionary\n By using double asterisk \"kwargs\" as input argument,\n\n Arguments:\n ----------\n\n Returns:\n ----------\n \"\"\"\n\n prec = kwargs[ \"prec\" ] if \"prec\" in kwargs else 5\n f = kwargs[ \"file\" ] if \"file\" in kwargs else sys.stdout # If there is a keyword called \"file\" then use that as our standard output\n\n tmpMaxLen = len( max( kwargs.keys( ), key = len ) ) # Getting the maximum length of a string list\n\n for args in kwargs:\n\n if 'file' == args.lower( ):\n # Ignore the file's value, since it should not be added to the \"output.txt\" log file.\n continue\n\n\n print( \"[{1:{0}s}]:\".format( tmpMaxLen, args ), end = ' ', file = f ) # Printing out the name of the array\n # {1:{0}s} Enables to set a variable as format length.\n tmpData = kwargs[ args ]\n\n if isinstance( tmpData, ( float, int ) ):\n tmpPrint = \"{2:{1}.{0}f}\".format( prec, prec + 2, tmpData )\n\n elif isinstance( tmpData, list ):\n tmpPrint = np.array2string( np.array( tmpData ).flatten(), precision = prec, separator = ',' )\n\n elif isinstance( tmpData, np.ndarray ):\n tmpPrint = np.array2string( tmpData.flatten() , precision = prec, separator = ',' )\n\n elif isinstance( tmpData, str ):\n tmpPrint = tmpData\n\n elif tmpData is None:\n tmpPrint = \"None\"\n\n else:\n raise ValueError( \"CHECK INPUT\")\n\n print( tmpPrint, file = f )\n\ndef solve_eq_posture( q0 ):\n\n q1_0 = q0[ 0 ]\n q2_0 = q0[ 1 ]\n q3_0 = q0[ 2 ]\n q4_0 = q0[ 3 ]\n\n q1 = sp.Symbol( 'q1' )\n q2 = sp.Symbol( 'q2' )\n q3 = sp.Symbol( 'q3' )\n q4 = sp.Symbol( 'q4' )\n\n eqn1 = 0.52444712807465876380774716380984*sp.cos(q2)*sp.sin(q1) - 0.12721953522735995889547666592989*sp.cos(q1)*sp.sin(q2) - 0.05501625493258266441642945210333*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) - 0.063807174539763700238381716189906*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) - 0.042749427781976545581699156173272*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) + 0.1762293392050615636890142923221*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) + 0.1762293392050615636890142923221*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) - 0.063807174539763700238381716189906*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) + 0.042749427781976545581699156173272*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) + 0.063807174539763700238381716189906*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) + 0.1762293392050615636890142923221*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q1 - q1_0\n eqn2 = 0.1966778910733553153988850681344*sp.cos(q1)*sp.sin(q2) - 0.12721953522735995889547666592989*sp.cos(q2)*sp.sin(q1) + 0.020788410744410568131712579997838*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) + 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) + 0.066089435759419945526360606891103*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) - 0.042749427781976545581699156173272*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) - 0.042749427781976545581699156173272*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) + 0.015478241093474287559672575298464*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) - 0.066089435759419945526360606891103*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) - 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) - 0.042749427781976545581699156173272*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q2 - q2_0\n eqn3 = 0.1637248203220158515591720060911*sp.cos(q2)*sp.sin(q1) - 0.061864967327922570916598488111049*sp.cos(q1)*sp.sin(q2) - 0.083555731966853175052278857037891*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) - 0.019919678510073035582195188908372*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) - 0.020788410744410568131712579997838*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) + 0.05501625493258266441642945210333*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) + 0.05501625493258266441642945210333*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) - 0.019919678510073035582195188908372*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) + 0.020788410744410568131712579997838*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) + 0.019919678510073035582195188908372*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) + 0.05501625493258266441642945210333*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q3 - q3_0\n eqn4 = 0.046062245513354471704303705337225*sp.cos(q1)*sp.sin(q2) - 0.18988602913048024944941971625667*sp.cos(q2)*sp.sin(q1) + 0.019919678510073035582195188908372*sp.sin(q4)*(sp.sin(q1)*sp.sin(q3) + sp.cos(q1)*sp.cos(q3)*sp.sin(q2)) + 0.10117159250577656415259752975544*sp.cos(q1)*sp.cos(q2)*sp.sin(q4) + 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q4)*sp.sin(q2) - 0.063807174539763700238381716189906*sp.cos(q2)*sp.cos(q4)*sp.sin(q1) - 0.063807174539763700238381716189906*sp.cos(q1)*sp.cos(q3)*sp.sin(q4) + 0.10117159250577656415259752975544*sp.cos(q3)*sp.cos(q4)*sp.sin(q1) - 0.015478241093474287559672575298464*sp.cos(q1)*sp.cos(q2)*sp.sin(q3)*sp.sin(q4) - 0.10117159250577656415259752975544*sp.cos(q1)*sp.cos(q4)*sp.sin(q2)*sp.sin(q3) - 0.063807174539763700238381716189906*sp.sin(q1)*sp.sin(q2)*sp.sin(q3)*sp.sin(q4) + q4 - q4_0\n\n sol = sp.solvers.nsolve( ( eqn1, eqn2, eqn3, eqn4 ), ( q1, q2, q3, q4 ), q0 )\n sol = np.array( sol )\n return np.array( [ sol[ 0 ][ 0 ], sol[ 1 ][ 0 ], sol[ 2 ][ 0 ], sol[ 3 ][ 0 ] ] )\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.array",
"numpy.sin",
"numpy.cos",
"numpy.flip"
]
] |
qpiel/gammapy | [
"cfb976909e63f4d5d578e1495245c0baad69482b"
] | [
"gammapy/stats/tests/test_significance.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom ...stats import (\n significance_to_probability_normal,\n probability_to_significance_normal,\n probability_to_significance_normal_limit,\n significance_to_probability_normal_limit,\n)\n\n\ndef test_significance_to_probability_normal():\n significance = 5\n p = significance_to_probability_normal(significance)\n assert_allclose(p, 2.8665157187919328e-07)\n\n s = probability_to_significance_normal(p)\n assert_allclose(s, significance)\n\n\ndef test_significance_to_probability_normal_limit():\n significance = 5\n p = significance_to_probability_normal_limit(significance)\n assert_allclose(p, 2.792513e-07)\n\n s = probability_to_significance_normal_limit(p)\n assert_allclose(s, significance)\n"
] | [
[
"numpy.testing.assert_allclose"
]
] |
tsfw/yolov3 | [
"bf6d03d9a84a0ac1e94bcc4f9a026f7d32dfbdab"
] | [
"dataReader.py"
] | [
"import os\nimport config\nimport json\nimport tensorflow as tf\nimport numpy as np\nfrom collections import defaultdict\n\nclass Reader:\n def __init__(self, mode, data_dir, anchors_path, num_classes, tfrecord_num = 12, input_shape = 416, max_boxes = 20):\n \"\"\"\n Introduction\n ------------\n 构造函数\n Parameters\n ----------\n data_dir: 文件路径\n mode: 数据集模式\n anchors: 数据集聚类得到的anchor\n num_classes: 数据集图片类别数量\n input_shape: 图像输入模型的大小\n max_boxes: 每张图片最大的box数量\n jitter: 随机长宽比系数\n hue: 调整hsv颜色空间系数\n sat: 调整饱和度系数\n cont: 调整对比度系数\n bri: 调整亮度系数\n \"\"\"\n self.data_dir = data_dir\n self.input_shape = input_shape\n self.max_boxes = max_boxes\n self.mode = mode\n self.annotations_file = {'train' : config.train_annotations_file, 'val' : config.val_annotations_file}\n self.data_file = {'train': config.train_data_file, 'val': config.val_data_file}\n self.anchors_path = anchors_path\n self.anchors = self._get_anchors()\n self.num_classes = num_classes\n file_pattern = self.data_dir + \"/*\" + self.mode + '.tfrecords'\n self.TfrecordFile = tf.gfile.Glob(file_pattern)\n self.class_names = self._get_class(config.classes_path)\n if len(self.TfrecordFile) == 0:\n self.convert_to_tfrecord(self.data_dir, tfrecord_num)\n\n def _get_anchors(self):\n \"\"\"\n Introduction\n ------------\n 获取anchors\n Returns\n -------\n anchors: anchor数组\n \"\"\"\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def _get_class(self, classes_path):\n \"\"\"\n Introduction\n ------------\n 获取类别名字\n Returns\n -------\n class_names: coco数据集类别对应的名字\n \"\"\"\n classes_path = os.path.expanduser(classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def Preprocess_true_boxes(self, true_boxes):\n \"\"\"\n Introduction\n ------------\n 对训练数据的ground truth box进行预处理\n Parameters\n ----------\n true_boxes: ground truth box 形状为[boxes, 5], x_min, y_min, x_max, y_max, class_id\n \"\"\"\n num_layers = len(self.anchors) // 3\n anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array([self.input_shape, self.input_shape], dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2.\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]\n\n grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]\n y_true = [np.zeros((grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + self.num_classes), dtype='float32') for l in range(num_layers)]\n # 这里扩充维度是为了后面应用广播计算每个图中所有box的anchor互相之间的iou\n anchors = np.expand_dims(self.anchors, 0)\n anchors_max = anchors / 2.\n anchors_min = -anchors_max\n # 因为之前对box做了padding, 因此需要去除全0行\n valid_mask = boxes_wh[..., 0] > 0\n wh = boxes_wh[valid_mask]\n # 为了应用广播扩充维度\n wh = np.expand_dims(wh, -2)\n # wh 的shape为[box_num, 1, 2]\n boxes_max = wh / 2.\n boxes_min = -boxes_max\n\n intersect_min = np.maximum(boxes_min, anchors_min)\n intersect_max = np.minimum(boxes_max, anchors_max)\n intersect_wh = np.maximum(intersect_max - intersect_min, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # 找出和ground truth box的iou最大的anchor box, 然后将对应不同比例的负责该ground turth box 的位置置为ground truth box坐标\n best_anchor = np.argmax(iou, axis = -1)\n for t, n in enumerate(best_anchor):\n for l in range(num_layers):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[t, 0] * grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[t, 1] * grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[t, 4].astype('int32')\n y_true[l][j, i, k, 0:4] = true_boxes[t, 0:4]\n y_true[l][j, i, k, 4] = 1.\n y_true[l][j, i, k, 5 + c] = 1.\n return y_true[0], y_true[1], y_true[2]\n\n\n\n def read_annotations(self):\n \"\"\"\n Introduction\n ------------\n 读取COCO数据集图片路径和对应的标注\n Parameters\n ----------\n data_file: 文件路径\n \"\"\"\n image_data = []\n boxes_data = []\n name_box_id = defaultdict(list)\n with open(self.annotations_file[self.mode], encoding='utf-8') as file:\n data = json.load(file)\n annotations = data['annotations']\n for ant in annotations:\n id = ant['image_id']\n name = os.path.join(self.data_file[self.mode], '%012d.jpg' % id)\n cat = ant['category_id']\n if cat >= 1 and cat <= 11:\n cat = cat - 1\n elif cat >= 13 and cat <= 25:\n cat = cat - 2\n elif cat >= 27 and cat <= 28:\n cat = cat - 3\n elif cat >= 31 and cat <= 44:\n cat = cat - 5\n elif cat >= 46 and cat <= 65:\n cat = cat - 6\n elif cat == 67:\n cat = cat - 7\n elif cat == 70:\n cat = cat - 9\n elif cat >= 72 and cat <= 82:\n cat = cat - 10\n elif cat >= 84 and cat <= 90:\n cat = cat - 11\n name_box_id[name].append([ant['bbox'], cat])\n\n for key in name_box_id.keys():\n boxes = []\n image_data.append(key)\n box_infos = name_box_id[key]\n for info in box_infos:\n x_min = info[0][0]\n y_min = info[0][1]\n x_max = x_min + info[0][2]\n y_max = y_min + info[0][3]\n boxes.append(np.array([x_min, y_min, x_max, y_max, info[1]]))\n boxes_data.append(np.array(boxes))\n\n return image_data, boxes_data\n\n\n def convert_to_tfrecord(self, tfrecord_path, num_tfrecords):\n \"\"\"\n Introduction\n ------------\n 将图片和boxes数据存储为tfRecord\n Parameters\n ----------\n tfrecord_path: tfrecord文件存储路径\n num_tfrecords: 分成多少个tfrecord\n \"\"\"\n image_data, boxes_data = self.read_annotations()\n images_num = int(len(image_data) / num_tfrecords)\n for index_records in range(num_tfrecords):\n output_file = os.path.join(tfrecord_path, str(index_records) + '_' + self.mode + '.tfrecords')\n with tf.python_io.TFRecordWriter(output_file) as record_writer:\n for index in range(index_records * images_num, (index_records + 1) * images_num):\n with tf.gfile.FastGFile(image_data[index], 'rb') as file:\n image = file.read()\n xmin, xmax, ymin, ymax, label = [], [], [], [], []\n for box in boxes_data[index]:\n xmin.append(box[0])\n ymin.append(box[1])\n xmax.append(box[2])\n ymax.append(box[3])\n label.append(box[4])\n example = tf.train.Example(features = tf.train.Features(\n feature = {\n 'image/encoded' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [image])),\n 'image/object/bbox/xmin' : tf.train.Feature(float_list = tf.train.FloatList(value = xmin)),\n 'image/object/bbox/xmax': tf.train.Feature(float_list = tf.train.FloatList(value = xmax)),\n 'image/object/bbox/ymin': tf.train.Feature(float_list = tf.train.FloatList(value = ymin)),\n 'image/object/bbox/ymax': tf.train.Feature(float_list = tf.train.FloatList(value = ymax)),\n 'image/object/bbox/label': tf.train.Feature(float_list = tf.train.FloatList(value = label)),\n }\n ))\n record_writer.write(example.SerializeToString())\n if index % 1000 == 0:\n print('Processed {} of {} images'.format(index + 1, len(image_data)))\n\n\n def parser(self, serialized_example):\n \"\"\"\n Introduction\n ------------\n 解析tfRecord数据\n Parameters\n ----------\n serialized_example: 序列化的每条数据\n \"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features = {\n 'image/encoded' : tf.FixedLenFeature([], dtype = tf.string),\n 'image/object/bbox/xmin' : tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype = tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(dtype = tf.float32)\n }\n )\n image = tf.image.decode_jpeg(features['image/encoded'], channels = 3)\n image = tf.image.convert_image_dtype(image, tf.uint8)\n xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, axis = 0)\n ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, axis = 0)\n xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, axis = 0)\n ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, axis = 0)\n label = tf.expand_dims(features['image/object/bbox/label'].values, axis = 0)\n bbox = tf.concat(axis = 0, values = [xmin, ymin, xmax, ymax, label])\n bbox = tf.transpose(bbox, [1, 0])\n image, bbox = self.Preprocess(image, bbox)\n bbox_true_13, bbox_true_26, bbox_true_52 = tf.py_func(self.Preprocess_true_boxes, [bbox], [tf.float32, tf.float32, tf.float32])\n return image, bbox, bbox_true_13, bbox_true_26, bbox_true_52\n\n def Preprocess(self, image, bbox):\n \"\"\"\n Introduction\n ------------\n 对图片进行预处理,增强数据集\n Parameters\n ----------\n image: tensorflow解析的图片\n bbox: 图片中对应的box坐标\n \"\"\"\n image_width, image_high = tf.cast(tf.shape(image)[1], tf.float32), tf.cast(tf.shape(image)[0], tf.float32)\n input_width = tf.cast(self.input_shape, tf.float32)\n input_high = tf.cast(self.input_shape, tf.float32)\n new_high = image_high * tf.minimum(input_width / image_width, input_high / image_high)\n new_width = image_width * tf.minimum(input_width / image_width, input_high / image_high)\n # 将图片按照固定长宽比进行padding缩放\n dx = (input_width - new_width) / 2\n dy = (input_high - new_high) / 2\n image = tf.image.resize_images(image, [tf.cast(new_high, tf.int32), tf.cast(new_width, tf.int32)], method = tf.image.ResizeMethod.BICUBIC)\n new_image = tf.image.pad_to_bounding_box(image, tf.cast(dy, tf.int32), tf.cast(dx, tf.int32), tf.cast(input_high, tf.int32), tf.cast(input_width, tf.int32))\n image_ones = tf.ones_like(image)\n image_ones_padded = tf.image.pad_to_bounding_box(image_ones, tf.cast(dy, tf.int32), tf.cast(dx, tf.int32), tf.cast(input_high, tf.int32), tf.cast(input_width, tf.int32))\n image_color_padded = (1 - image_ones_padded) * 128\n image = image_color_padded + new_image\n # 矫正bbox坐标\n xmin, ymin, xmax, ymax, label = tf.split(value = bbox, num_or_size_splits=5, axis = 1)\n xmin = xmin * new_width / image_width + dx\n xmax = xmax * new_width / image_width + dx\n ymin = ymin * new_high / image_high + dy\n ymax = ymax * new_high / image_high + dy\n bbox = tf.concat([xmin, ymin, xmax, ymax, label], 1)\n if self.mode == 'train':\n # 随机左右翻转图片\n def _flip_left_right_boxes(boxes):\n xmin, ymin, xmax, ymax, label = tf.split(value = boxes, num_or_size_splits = 5, axis = 1)\n flipped_xmin = tf.subtract(input_width, xmax)\n flipped_xmax = tf.subtract(input_width, xmin)\n flipped_boxes = tf.concat([flipped_xmin, ymin, flipped_xmax, ymax, label], 1)\n return flipped_boxes\n flip_left_right = tf.greater(tf.random_uniform([], dtype = tf.float32, minval = 0, maxval = 1), 0.5)\n image = tf.cond(flip_left_right, lambda : tf.image.flip_left_right(image), lambda : image)\n bbox = tf.cond(flip_left_right, lambda: _flip_left_right_boxes(bbox), lambda: bbox)\n # 将图片归一化到0和1之间\n image = image / 255.\n image = tf.clip_by_value(image, clip_value_min = 0.0, clip_value_max = 1.0)\n bbox = tf.clip_by_value(bbox, clip_value_min = 0, clip_value_max = input_width - 1)\n bbox = tf.cond(tf.greater(tf.shape(bbox)[0], config.max_boxes), lambda: bbox[:config.max_boxes], lambda: tf.pad(bbox, paddings = [[0, config.max_boxes - tf.shape(bbox)[0]], [0, 0]], mode = 'CONSTANT'))\n return image, bbox\n\n\n def build_dataset(self, batch_size):\n \"\"\"\n Introduction\n ------------\n 建立数据集dataset\n Parameters\n ----------\n batch_size: batch大小\n Return\n ------\n dataset: 返回tensorflow的dataset\n \"\"\"\n dataset = tf.data.TFRecordDataset(filenames = self.TfrecordFile)\n dataset = dataset.map(self.parser, num_parallel_calls = 10)\n if self.mode == 'train':\n dataset = dataset.repeat().shuffle(9000).batch(batch_size).prefetch(batch_size)\n else:\n dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)\n return dataset\n"
] | [
[
"tensorflow.data.TFRecordDataset",
"tensorflow.VarLenFeature",
"tensorflow.image.flip_left_right",
"tensorflow.train.FloatList",
"tensorflow.concat",
"tensorflow.train.BytesList",
"tensorflow.split",
"tensorflow.minimum",
"tensorflow.gfile.Glob",
"tensorflow.gfile.FastGFile",
"numpy.expand_dims",
"tensorflow.clip_by_value",
"tensorflow.image.decode_jpeg",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.FixedLenFeature",
"tensorflow.transpose",
"numpy.minimum",
"tensorflow.shape",
"tensorflow.subtract",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"numpy.argmax",
"tensorflow.random_uniform",
"tensorflow.cast",
"tensorflow.image.convert_image_dtype",
"numpy.maximum",
"tensorflow.py_func",
"numpy.floor",
"numpy.array"
]
] |
sebtac/MLxE | [
"93baa6b7c9fd14e54abd7199e868fb828e9a7c52"
] | [
"a3c_master_sewak.py"
] | [
"\"\"\" A3C in Code - Centralized/ Gobal Network Parameter Server/ Controller\n\nBased On:\n \nA3C Code as in the book Deep Reinforcement Learning, Chapter 12.\n\nRuntime: Python 3.6.5\nDependencies: numpy, matplotlib, tensorflow (/ tensorflow-gpu), gym\nDocStrings: GoogleStyle\n\nAuthor : Mohit Sewak ([email protected])\nInspired from: A3C implementation on TensorFLow official github repository (Tensorflow/models/research)\n\n**********************************************************************\n\nAdjusted by Seabstian Taciak as part of develeopment of MLxE Architecture\n\n@author: sebtac\n@contact: https://www.linkedin.com/in/sebastian-taciak-5893861/\n\n\"\"\"\n\n# SET BEFORE RUNNIG\n\n# AGENT TYPE\n# 0 - Sewak Base Agent (Fixed)\n# 1 - Sewak DNN Adjusted\n# 2 - Sewak \"Task\" Modified\n# 3 - Sewak ISTB (Iterative, Synchronous Thread Based)\n\nAgent_Type = 3\n\nlearning_rate = 0.0001\n\nimport multiprocessing\ncores = multiprocessing.cpu_count() # DEFAULT SETTING\n#cores = 1 # FOR DEBUGGING\n\n# GENERAL IMPORTS\nimport sys\nsys.path.append(r'C:\\Users\\surface\\Documents\\Python\\RL\\MLxE\\Mohit Sewak RL\\Mohit12_A3C')\nimport time\nimport winsound\nimport logging\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# DEEP LEARING and ENVIRONEMENT RELATER IMPORTS\nimport tensorflow as tf\nimport tensorflow_addons as tfa # ST for DNN Adjustment\nimport gym\n\n# CUSTOM SEWAK's MODULES with OPTIONAL SEBTAC ADJUSTMENTS\nfrom experience_replay_sewak import SimpleListBasedMemory\n\nif Agent_Type == 0:\n from actorcritic_model_sewak import ActorCriticModel as ACModel # For Sewak Fixed version\n from a3c_worker_sewak_base import A3C_Worker # the intial Sewak's implementation with fixes of the Policy_Loss Calcultion\nelif Agent_Type == 1:\n from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel\n from a3c_worker_sewak_DNN_Adjusted import A3C_Worker\nelif Agent_Type == 2:\n from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel\n from a3c_worker_sewak_Task_Modifications import A3C_Worker\nelif Agent_Type == 3:\n from actorcritic_model_sewak import ActorCriticModel_DoubleDimond as ACModel\n from a3c_worker_sewak_ISTB import A3C_Worker\n\n# SEWAK's Implementation Fix\n\"\"\"\n- Policy Loss Calcualtion\n- Using actual play in example generation (was random)\n\"\"\"\n\n# DNN Adjustments\n\"\"\"\n- Adding monotonic decrease in Learing Rate relative to the number of episodes run with:\n self.alpha_power = 0.998\n self.alpha_limit = 0.000001\n- Modifying the Model to: common_network_size=[128,256,128], policy_network_size=[64,128,64], value_network_size=[64,128,64]\n- Changing the Optimizer to RectifiedAdam -- requaires tensorflow_addons\n- Changing Gamma coeffcient to 0.97\n\"\"\"\n\n# Task Specific Modifications\n\"\"\"\n- Modified state representation with addition of 5th parameter representing the squared distance of the cart from the center of the plane\n- Adverse Initial Position\n- Negative Reward: -10.0 (originally 0.0)\n- Monotonically Decreasing Discount Factor (Gamma Coefficent)\n- Goal Specific Reward for cart being close to center of the pland and the pole being close to vertical\n\"\"\"\n\nclass A3C_Master():\n \"\"\"A3C Master\n\n Centralized Master class of A3C used for hosting the global network parameters and spawning the agents.\n\n Args:\n env_name (str): Name of a valid gym environment\n model_dir (str): Directory for saving the model during training, and loading the same while playing\n learning_rate (float): The learning rate (alpha) for the optimizer\n\n Examples:\n agent = A3C_Master()\n agent.train()\n agent.play()\n\n \"\"\"\n\n def __init__(self, Agent_Type=Agent_Type, env_name='CartPole-v0', model_dir=\"models\", learning_rate=learning_rate): #ST 0.001 for Fixed, 0.0001 otherwise \n self.env_name = env_name\n self.model_dir = model_dir\n self.alpha = learning_rate\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n self.env = gym.make(self.env_name)\n self.action_size = self.env.action_space.n\n \n if Agent_Type <= 1:\n self.state_size = self.env.observation_space.shape[0] # For None TaH imlementations\n elif Agent_Type == 2:\n self.state_size = self.env.observation_space.shape[0] + 1 # ST for TaH implementation\n elif Agent_Type == 3:\n self.state_size = self.env.observation_space.shape[0] + 1 # ST for TaH implementation\n \n if Agent_Type == 0:\n self.optimizer = tf.keras.optimizers.Adam(self.alpha)\n else:\n self.optimizer = tfa.optimizers.RectifiedAdam(self.alpha) # ST DNN Adjustment\n \n logger.debug(\"StateSize:{}, ActionSize:{}\".format(self.state_size, self.action_size))\n self.master_model = ACModel(self.action_size)\n self.master_model(tf.convert_to_tensor(np.random.random((1, self.state_size)), dtype=tf.float32))\n\n def train(self, cores):\n \"\"\"Train the A3C agent\n Main function to train the A3C agent after instantiation.\n\n This method uses the number of processor cores to spawns as many Workers. The workers are spawned as\n multiple parallel threads instead of multiple parallel processes. Being a threaded execution, the workers\n share memory and hence can write directly into the shared global variables.\n\n A more optimal, completely asynchronous implementation could be to spawn the workers as different processes\n using a task queue or multiprocessing. In case if this is adopted, then the shared variables need to made\n accessible in the distributed environment.\n\n \"\"\"\n\n a3c_workers = [A3C_Worker(self.master_model, \n self.optimizer, \n i, \n self.env_name, \n self.model_dir, \n workers_num = cores, \n learning_rate = learning_rate)\n for i in range(cores)]\n for i, worker in enumerate(a3c_workers):\n logger.info(\"Starting worker {}\".format(i))\n worker.start()\n [worker.join() for worker in a3c_workers]\n self.plot_training_statistics()\n\n def play(self):\n \"\"\"Play the environment using a trained agent\n\n This function opens a (graphical) window that will play a trained agent. The function will try to retrieve\n the model saved in the model_dir with filename formatted to contain the associated env_name.\n If the model is not found, then the function will first call the train function to start the training.\n\n \"\"\"\n env = self.env.unwrapped\n state = env.reset()\n model = self.master_model\n model_path = os.path.join(self.model_dir, 'model_{}.h5'.format(self.env_name))\n if not os.path.exists(model_path):\n logger.info('A3CMaster: No model found at {}, starting fresh training before playing!'.format(model_path))\n self.train()\n logger.info('A3CMaster: Playing env, Loading model from: {}'.format(model_path))\n print(\"Model Path:\", model_path)\n #model.load_weights(model_path)\n done = False\n step_counter = 0\n reward_sum = 0\n try:\n while not done:\n env.render(mode='rgb_array')\n policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32))\n policy = tf.nn.softmax(policy)\n action = np.argmax(policy)\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n logger.info(\"{}. Reward: {}, action: {}\".format(step_counter, reward_sum, action))\n step_counter += 1\n except KeyboardInterrupt:\n print(\"Received Keyboard Interrupt. Shutting down.\")\n finally:\n env.close()\n\n def plot_training_statistics(self, training_statistics=None):\n \"\"\"Plot training statistics\n\n This function plot the training statistics like the steps, rewards, discounted_rewards, and loss in each\n of the training episode.\n\n \"\"\"\n training_statistics = A3C_Worker.global_shared_training_stats if training_statistics is None \\\n else training_statistics\n all_episodes = []\n all_steps = []\n all_rewards = []\n all_discounted_rewards = []\n all_losses = []\n for stats in training_statistics:\n worker, episode, steps, reward, discounted_rewards, loss = stats\n all_episodes.append(episode)\n all_steps.append(steps)\n all_rewards.append(reward)\n all_discounted_rewards.append(discounted_rewards)\n all_losses.append(loss)\n self._make_double_axis_plot(all_episodes, all_steps, all_rewards)\n self._make_double_axis_plot(all_episodes,all_discounted_rewards,all_losses, label_y1=\"Discounted Reward\",\n label_y2=\"Loss\", color_y1=\"cyan\", color_y2=\"black\")\n \n np.savetxt('run.csv', all_steps, delimiter=',', fmt='%d')\n\n @staticmethod\n def _make_double_axis_plot(data_x, data_y1, data_y2, x_label='Episodes (e)', label_y1='Steps To Episode Completion',\n label_y2='Reward in each Episode', color_y1=\"red\", color_y2=\"blue\"):\n \"\"\"Internal helper function for plotting dual axis plots\n \"\"\"\n fig, ax1 = plt.subplots()\n ax1.set_xlabel(x_label)\n ax1.set_ylabel(label_y1, color=color_y1)\n ax1.plot(data_x, data_y1, color=color_y1)\n ax2 = ax1.twinx()\n ax2.set_ylabel(label_y2, color=color_y2)\n ax2.plot(data_x, data_y2, color=color_y2)\n fig.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n \"\"\"Main function for testing the A3C Master code's implementation\n \"\"\"\n agent = A3C_Master(Agent_Type=Agent_Type)\n agent.train(cores)\n #agent.play()\n \n for i in range(10):\n winsound.Beep(500,500)\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"numpy.savetxt",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.random.random",
"matplotlib.pyplot.show",
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax"
]
] |
geflaspohler/deep-OTD | [
"0daec276669776952b5142149007175b8a3c4d87"
] | [
"examples/cdv/plttraj.py"
] | [
"import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams['font.size'] = 9\n\nndim = 6\ndata = np.genfromtxt('dOTD_tst1.out')\n\nxticks = [900, 1100, 1300]\nyticks = [[0.7, 0.8, 0.9, 1],\n [-0.2, 0, 0.2, 0.4],\n [-0.5, 0, 0.5],\n [-1, -0.5, 0],\n [-0.5, 0, 0.5],\n [-0.5, 0, 0.5, 1]]\n\ndef latexify(ticklabels):\n \"\"\"Manually set LaTeX format for tick labels.\"\"\"\n return [r\"$\" + str(label) + \"$\" for label in ticklabels]\n \nfor ii in range(ndim):\n fig = plt.figure(figsize=(2.2,1.3), constrained_layout=True)\n fig.set_constrained_layout_pads(w_pad=0, h_pad=0)\n ax = plt.axes()\n plt.plot(data[:,0], data[:,ii+1], 'k-', linewidth=0.75)\n plt.xlabel('$t$')\n plt.ylabel('$z_{' + str(ii+1) + '}$')\n plt.xlim(xticks[0], xticks[-1])\n plt.ylim(yticks[ii][0], yticks[ii][-1])\n ax.set_xticks(xticks)\n ax.set_yticks(yticks[ii])\n ax.set_xticklabels(latexify(xticks))\n ax.set_yticklabels(latexify(yticks[ii]))\n ax.yaxis.set_label_coords(-0.2, 0.5)\n ax.tick_params(direction='in', length=2)\n plt.savefig('traj' + str(ii+1) + '.pdf')\n\n\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"numpy.genfromtxt",
"matplotlib.pyplot.xlabel"
]
] |
nate-russell/Jungle | [
"114d744ed66fec11b8d5e62444253892a7ffa5cd"
] | [
"jungle/code/sorting.py"
] | [
"'''\nSorting Examples for showcasing and developing Jungle features\n'''\nimport inspect\nfrom jungle import JungleExperiment, JungleProfiler\nimport numpy as np\n\nprint('Finished Loading Modules')\n\nclass Sorting_Prototype:\n\n print('\\n---Test Sort N---')\n @JungleExperiment(reps=1, n=[100, 500])\n def test_sort_n(self, n=100, seed=1234):\n ''' Test sorting an iterable of size n with a random distribution '''\n # make data to sort with random distribution\n np.random.seed(seed)\n list_2_sort = list(np.random.randn(n))\n\n @JungleProfiler()\n def sort_n(l):\n sorted_list = self.sort(l)\n return sorted_list\n\n # Sort and check sort status\n sorted_list, _ = sort_n(list_2_sort)\n sort_status = all(sorted_list[i] <= sorted_list[i + 1] for i in range(len(sorted_list) - 1))\n return sort_status\n\n print('\\n---Test Block Sort---')\n @JungleExperiment(reps=1, n_blocks=[2, 4], block_size=[50, 100])\n @JungleProfiler()\n def test_block_random_sort(self, n_blocks=4, block_size=100):\n print('n_blocks: %s' % n_blocks)\n print('block_size: %s' % block_size)\n return 'something'\n\n\n\n\n\n\nclass NP_QuickSort(Sorting_Prototype):\n\n def sort(self, l):\n return np.sort(l, kind='quicksort')\n\n\nclass NP_MergeSort(Sorting_Prototype):\n\n def sort(self, l):\n return np.sort(l, kind='mergesort')\n\n\nclass NP_HeapSort(Sorting_Prototype):\n\n def sort(self, l):\n return np.sort(l, kind='heapsort')\n\n\nif __name__ == '__main__':\n print('\\n__main__\\n')\n\n print('\\n---Starting Call #1---')\n m1 = NP_QuickSort()\n jc1 = m1.test_sort_n()\n\n print('\\n---Starting Call #2---')\n m2 = NP_MergeSort()\n jc2 = m2.test_sort_n()\n\n print('\\n---Starting Call #3---')\n m1 = NP_QuickSort()\n jc1 = m1.test_block_random_sort()\n\n print('\\n---Starting Call #4---')\n m2 = NP_MergeSort()\n jc2 = m2.test_block_random_sort()\n"
] | [
[
"numpy.sort",
"numpy.random.seed",
"numpy.random.randn"
]
] |
gmathez/Project_ADA_2018_Bruttin_Mathez_Petitpierre | [
"e237300b3d9fb966b0eb747dd66816cc6cfc11b3"
] | [
"main.py"
] | [
"# Import kivy tools\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.recycleboxlayout import RecycleBoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.checkbox import CheckBox\nfrom kivy.uix.spinner import Spinner\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.recycleview.views import RecycleDataViewBehavior\nfrom kivy.uix.behaviors import FocusBehavior\nfrom kivy.uix.recycleview.layout import LayoutSelectionBehavior\nfrom kivy.properties import BooleanProperty, ObjectProperty\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.lang import Builder\n\n# Import the kv files\nBuilder.load_file('./src/rv.kv')\nBuilder.load_file('./src/screenhome.kv')\nBuilder.load_file('./src/screenprofile.kv')\nBuilder.load_file('./src/screensettings.kv')\nBuilder.load_file('./src/screenproduct.kv')\nBuilder.load_file('./src/screenquantities.kv')\nBuilder.load_file('./src/screenfinal.kv')\nBuilder.load_file('./src/manager.kv')\n\n# Other imports\nimport pandas as pd\nimport re\nfrom Algo_main import algo # Import the algorithm for NutriScore computation\n\nclass SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,\n RecycleBoxLayout):\n ''' Add selection and focus behaviour to the view '''\n pass\n\nclass SelectableGrid(RecycleDataViewBehavior, GridLayout):\n ''' Add selection support to the Label '''\n\n index = None\n selected = BooleanProperty(False)\n selectable = BooleanProperty(True)\n\n def refresh_view_attrs(self, rv, index, data):\n ''' Catch and handle the view changes '''\n\n self.index = index\n self.ids['id_label1'].text = data['label1']['text']\n self.ids['id_label2'].text = data['label2']['text']\n self.ids['id_label3'].text = data['label3']['text']\n return super(SelectableGrid, self).refresh_view_attrs(\n rv, index, data)\n\n def on_touch_down(self, touch):\n ''' Add selection on touch down '''\n\n if super(SelectableGrid, self).on_touch_down(touch):\n return True\n\n if self.collide_point(*touch.pos) and self.selectable:\n return self.parent.select_with_touch(self.index, touch)\n\n def apply_selection(self, rv, index, is_selected):\n ''' Respond to the selection of items '''\n\n self.selected = is_selected \n \n\nclass SelectableQuantity(RecycleDataViewBehavior, GridLayout):\n ''' Add selection support to the Label '''\n\n index = None\n selected = BooleanProperty(False)\n selectable = BooleanProperty(True)\n\n def refresh_view_attrs(self, rv, index, data):\n ''' Catch and handle the view changes '''\n\n self.index = index\n self.ids['id_label1'].text = data['label1']['text']\n self.ids['id_label2'].text = data['label2']['text']\n self.ids['id_label3'].text = data['label3']['text']\n return super(SelectableQuantity, self).refresh_view_attrs(\n rv, index, data) \n\nclass RV(RecycleView):\n ''' Class for the RecycleView Controller '''\n\n def __init__(self, **kwargs):\n super(RV, self).__init__(**kwargs)\n\n def upload(self, query, active):\n ''' Search data according to the user input '''\n\n # Reset data\n self.data = []\n\n # Check if the Raw Food CheckBox is active or not\n if active:\n self.parent.parent.getSelection('API', query, True)\n self.data = [{'label1': {'text': 'API'}, 'label2': {'text': query}, 'label3': {'text': 'Add/Remove'}}]\n \n else:\n isinside = allTrue\n for item in query.split(): # Split the query in keywords\n isinside = isinside & \\\n (DF['product_name'].str.contains(item, case=False) | \\\n DF['Brands'].str.contains(item, case=False))\n\n if any(isinside):\n selection = DF[isinside] # Select products to display\n \n for row in selection.itertuples(): # Iterate through the columns of DF\n d = {'label1': {'text': str(row[0])}, \\\n 'label2': {'text': str(row[1])},\n 'label3': {'text': str(row[-1])}} # barcode, product_name, brand\n self.data.append(d)\n else:\n isinside = DF.index.str.contains(query, case=False) # Search for Barcode\n\n if any(isinside):\n selection = DF[isinside]\n\n for row in selection.itertuples():\n d = {'label1': {'text': str(row[0])}, \\\n 'label2': {'text': str(row[1])},\n 'label3': {'text': str(row[-1])}} # barcode, product_name, brand\n self.data.append(d) \n\n else:\n # In case no product is found\n self.data = [{'label1': {'text': ''}, \\\n 'label2': {'text': 'No product found'}, 'label3': {'text': ''}}]\n def getQuantities(self, dict):\n ''' Gather data for display on Quantities Screen '''\n\n self.data = []\n code = dict['code']\n product_name = dict['product_name']\n quantity = dict['quantity']\n\n for index in range(len(code)):\n d = {'label1': {'text': code[index]}, 'label2': {'text': product_name[index]}, \\\n 'label3': {'text': quantity[index]}}\n self.data.append(d)\n\nclass ScreenHome(Screen):\n ''' Class for the Home Screen. No variables or functions needed for this screen '''\n pass\n\nclass ScreenProfile(Screen):\n ''' Class for the Profile Screen '''\n\n def updateDF(self):\n global DF\n DF = pd.read_csv('https://drive.google.com/uc?export=download&id=1aLUh1UoQcS9lBa6oVRln-DuskxK5uK3y', \\\n index_col=[0], low_memory = False)\n\n DF.to_csv('./data/OpenFoodFacts_final.csv.gz', compression='gzip')\n self.ids['update'].text = 'Updated'\n self.ids['update'].background_color = (0,1,0,1)\n\n def update(self):\n self.ids['update'].text = 'Updating'\n self.ids['update'].background_color = (50/255,164/255,206/255,1) \n\n\nclass ScreenSettings(Screen):\n ''' Class for the Settings Screen '''\n\n settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \\\n 'email': '', 'activity': 0, 'days': 0}\n id_profile = -999\n\n def resetForm(self):\n ''' Reset the indicators of invalid input '''\n\n self.ids.sex.color = (1,1,1,1)\n self.ids.activity.color = (1,1,1,1)\n self.ids.age.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.weight.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.days.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.email.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.name.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n self.ids.surname.hint_text_color = (0.5, 0.5, 0.5, 1.0)\n\n def setForm(self, id_profile):\n self.id_profile = id_profile\n self.settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \\\n 'email': '', 'activity': 0, 'days': 0}\n\n if int(self.id_profile) >= 0:\n self.ids.name.text = str(profile_list.iloc[self.id_profile]['name'])\n self.ids.surname.text= str(profile_list.iloc[self.id_profile]['surname'])\n self.ids.age.text = str(profile_list.iloc[self.id_profile]['age'])\n if bool(profile_list.iloc[self.id_profile]['sex']):\n self.ids.male.active = True\n self.ids.female.active = False\n\n else:\n self.ids.male.active = False\n self.ids.female.active = True\n\n self.ids.weight.text = str(profile_list.iloc[self.id_profile]['weight'])\n self.ids.email.text = str(profile_list.iloc[self.id_profile]['email'])\n self.ids.days.text = str(profile_list.iloc[self.id_profile]['days'])\n if int(profile_list.iloc[self.id_profile]['activity']) == 1.8:\n self.ids.seated.active = False\n self.ids.both.active = False\n self.ids.standing.active = True\n\n elif int(profile_list.iloc[self.id_profile]['activity']) == 1.6:\n self.ids.seated.active = False\n self.ids.both.active = True\n self.ids.standing.active = False\n\n else:\n self.ids.seated.active = True\n self.ids.both.active = False\n self.ids.standing.active = False\n elif int(self.id_profile) == -999:\n self.ids.name.text = ''\n self.ids.surname.text = ''\n self.ids.age.text = ''\n self.ids.male.active = False\n self.ids.female.active = False\n self.ids.email.text = ''\n self.ids.weight.text = ''\n self.ids.seated.active = False\n self.ids.both.active = False\n self.ids.standing.active = False\n self.ids.days.text = ''\n else:\n self.changeScreen(False)\n\n def changeScreen(self, valid):\n ''' Handle the validity of the inputs and the change of current screen '''\n\n if valid:\n self.resetForm()\n # Check name validity\n if self.ids.name.text.strip() == '':\n self.ids.name.hint_text_color = (1,0,0,1)\n return False\n # Check surname validity\n elif self.ids.surname.text.strip() == '':\n self.ids.surname.hint_text_color = (1,0,0,1)\n return False\n # Check age validity\n elif self.ids.age.text.strip() == '' or int(self.ids.age.text) <= 0 or \\\n int(self.ids.age.text) >= 120:\n self.ids.age.text = ''\n self.ids.age.hint_text_color = (1,0,0,1)\n return False\n # Check sex validity\n elif not(self.ids.male.active or self.ids.female.active):\n self.ids.sex.color = (1,0,0,1) \n return False\n # Check email validity\n elif not re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\", self.ids.email.text):\n self.ids.email.text = ''\n self.ids.email.hint_text_color = (1,0,0,1)\n return False\n # Check weight validity\n elif self.ids.weight.text.strip() == '' or int(self.ids.weight.text) <= 0:\n self.ids.weight.text = ''\n self.ids.weight.hint_text_color = (1,0,0,1)\n return False \n # Check activity validity\n elif not(self.ids.seated.active or self.ids.both.active or self.ids.standing.active):\n self.ids.activity.color = (1,0,0,1)\n return False\n # Check days validity\n elif self.ids.days.text.strip() == '' or int(self.ids.days.text) <= 0:\n self.ids.days.text = ''\n self.ids.days.hint_text_color = (1,0,0,1)\n return False\n \n else: # Validation of the form and reset\n self.settings['rec'] = True\n self.settings['name'] = self.ids.name.text\n self.settings['surname'] = self.ids.surname.text\n self.settings['age'] = int(self.ids.age.text)\n self.settings['weight'] = int(self.ids.weight.text)\n self.settings['email'] = self.ids.email.text\n self.settings['days'] = int(self.ids.days.text)\n self.settings['sex'] = self.ids.male.active\n\n if self.ids.seated.active:\n self.settings['activity'] = 1.4\n\n if self.ids.both.active:\n self.settings['activity'] = 1.6\n\n if self.ids.standing.active:\n self.settings['activity'] = 1.8\n\n self.resetForm()\n\n else: # If the user pass the settings screen\n self.settings['rec'] = False\n\n self.manager.setSettings(self.settings, self.id_profile)\n # Change the current screen\n self.manager.current = 'Product Screen'\n\nclass ScreenProduct(Screen):\n ''' Class for the Product Screen '''\n\n temp_dict = {'code':'', 'product_name': ''}\n\n def getSelection(self, text1, text2, state):\n # Select or deselect temporarly a product\n if state:\n self.temp_dict['code'] = text1\n self.temp_dict['product_name'] = text2\n\n else:\n self.temp_dict['code'] = ''\n self.temp_dict['product_name'] = ''\n\nclass ScreenQuantities(Screen):\n ''' Class for the Quantities Screen '''\n\n temp_dict = {'code': [], 'product_name': [], 'quantity': [], 'color': []}\n\n def initQuantity(self, data):\n ''' Initialize the dictionary of the products '''\n\n if self.temp_dict['quantity'] == []:\n self.temp_dict = data\n\n self.ids.rv.getQuantities(data)\n\n def updateQuantity(self, index, text1, text2, text3): \n ''' Store the quantities input by the user '''\n\n l = len(self.temp_dict['quantity'])\n\n if text3 == '' or text3 == '-' or int(text3) < 0:\n text3 = '0'\n\n if index < l:\n self.temp_dict['code'][index] = text1\n self.temp_dict['product_name'][index] = text2\n self.temp_dict['quantity'][index] = text3\n \n # Append the list of quantities if needed\n else:\n temp = ['0' for i in range(index-l)] \n self.temp_dict['code'] = self.temp_dict['code'] + temp + [text1]\n self.temp_dict['product_name'] = self.temp_dict['product_name'] + temp + [text2]\n self.temp_dict['quantity'] = self.temp_dict['quantity'] + temp + [text3]\n\n # Update the data displayed\n self.initQuantity(self.temp_dict)\n\nclass ScreenFinal(Screen):\n ''' Class for the Final Screen. No variables or functions needed for this screen '''\n pass\n\nclass Manager(ScreenManager):\n ''' Class for the Manager Controller. Store main data '''\n selected_products = {'code': [], 'product_name': [], 'quantity': []}\n settings = {'Rec': True, 'Name': '', 'Surname': '', 'Email': '', 'Age': 0, 'Sex': True, 'Pal': 0, \\\n 'Weight': 0, 'Day': 0}\n\n def getProfiles(self):\n self.ids.screen_profile.ids.profile_spinner.values = \\\n [str(index + 1) + ' : ' + str(profile_list['name'][index]) + ' ' + str(profile_list['surname'][index]) \\\n for index in profile_list.index]\n\n def toSettings(self, text):\n if text == 'new':\n id_profile = -999\n elif text == 'pass':\n id_profile = -1000\n else:\n items = text.split()\n id_profile = items[0].strip()\n id_profile = int(id_profile) - 1\n\n self.ids.screen_settings.setForm(id_profile)\n if id_profile != -1000:\n self.current = 'Settings Screen'\n \n\n def addProduct(self):\n ''' Add product to main storage '''\n item1 = self.ids.screen_product.temp_dict['code']\n item2 = self.ids.screen_product.temp_dict['product_name']\n\n if item1 != '' and item2 != '':\n self.selected_products['code'].append(item1)\n self.selected_products['product_name'].append(item2)\n self.selected_products['quantity'].append('0')\n\n def deleteProduct(self):\n ''' Remove product of main storage '''\n item1 = self.ids.screen_product.temp_dict['code']\n item2 = self.ids.screen_product.temp_dict['product_name']\n\n if item1 in self.selected_products['code'] and item2 in self.selected_products['product_name']:\n self.selected_products['code'].remove(item1)\n self.selected_products['product_name'].remove(item2)\n self.selected_products['quantity'].pop()\n\n def getQuantities(self, data):\n ''' Add quantities to main storage '''\n\n self.selected_products['quantity'] = data['quantity']\n l = len(self.selected_products['quantity'])\n\n for item in range(l):\n\n if self.selected_products['quantity'][item] == '':\n self.selected_products['quantity'][item] = '0'\n \n self.current = 'Final Screen'\n\n def setSettings(self, data, new):\n ''' Add settings to main storage '''\n\n self.settings['Rec'] = data['rec']\n self.settings['Name'] = data['name']\n self.settings['Surname'] = data['surname']\n self.settings['Email'] = data['email']\n self.settings['Pal'] = data['activity']\n self.settings['Weight'] = data['weight']\n self.settings['Day'] = data['days']\n self.settings['Sex'] = data['sex']\n self.settings['Age'] = data['age']\n \n update = True\n\n if new == -999:\n temp_df = pd.DataFrame.from_dict({'index': [len(profile_list)], \\\n 'name': [data['name']], 'surname': [data['surname']], \\\n 'age': [data['age']], 'sex': [data['sex']], 'email': [data['email']], \\\n 'weight': [data['weight']], \\\n 'activity': [data['activity']], 'days': [data['days']]}).set_index('index')\n new_profile_list = pd.concat([profile_list, temp_df]) \n elif new == -1000:\n update = False\n else:\n temp_df = pd.DataFrame.from_dict({'name': [data['name']], 'surname': [data['surname']], \\\n 'age': [data['age']], 'sex': [data['sex']], 'email': [data['email']], 'weight': [data['weight']], \\\n 'activity': [data['activity']], 'days': [data['days']]})\n new_profile_list= profile_list\n new_profile_list.iloc[new] = temp_df.iloc[0]\n\n if update:\n new_profile_list.to_csv('./data/profile.csv', sep=';')\n\n\n def computation(self):\n ''' Call algo for computation of NutriScore and recommendation. Display results '''\n dict_product = {'Product': [], 'API': []}\n\n for index in range(len(self.selected_products['code'])):\n \n # Separation of API and OpenFoodFacts data\n if str(self.selected_products['code'][index]) == 'API':\n dict_product['API'].append((str(self.selected_products[\n 'product_name'][index]), int(self.selected_products['quantity'][index])))\n \n else:\n dict_product['Product'].append((str(self.selected_products[\n 'code'][index]), int(self.selected_products['quantity'][index])))\n\n # Run the algorithm to get the recommendation to print on-screen\n text_app_beverages, text_app_nonbeverages = algo(dict_product, self.settings, DF)\n self.ids.screen_final.ids.beverages.text = text_app_beverages\n self.ids.screen_final.ids.non_beverages.text = text_app_nonbeverages\n\nclass NutriScoreApp(App):\n ''' Main class of the App '''\n\n def build(self):\n ''' Import the database for the whole application '''\n global DF, allTrue, profile_list\n\n try:\n DF = pd.read_csv('./data/OpenFoodFacts_final.csv.gz', low_memory=False, index_col = [0])\n allTrue = DF['product_name'].str.contains('', case=False) # True Vector of length len(DF)\n profile_list = pd.read_csv('./data/profile.csv', sep=';', index_col=[0])\n\n except:\n print('Fatal error: files missing') \n \n return Manager()\n\nif __name__ == '__main__':\n NutriScoreApp().run()\n\n"
] | [
[
"pandas.read_csv",
"pandas.concat",
"pandas.DataFrame.from_dict"
]
] |
edervishaj/spotify-recsys-challenge | [
"5e7844a77ce3c26658400f161d2d74d682f30e69"
] | [
"personal/Ervin/run_knn_collaborative_item.py"
] | [
"from utils.datareader import Datareader\nfrom utils.evaluator import Evaluator\nfrom utils.submitter import Submitter\nfrom utils.post_processing import eurm_to_recommendation_list_submission\nfrom utils.post_processing import eurm_to_recommendation_list\nfrom utils.pre_processing import norm_l1_row, norm_max_row, norm_max_col\nfrom recommenders.knn_collaborative_item import Knn_collaborative_item\nimport recommenders.similarity.similarity as sm\nimport scipy.sparse as sps\nimport sys\nimport numpy as np\nfrom personal.Ervin.other_similarity import position_similarity\n\n\n\n'''\nThis file contains just an example on how to run the algorithm.\nThe parameter used are just the result of a first research of the optimum value.\nTo run this file just set the parameter at the start of the main function or set from console as argv parameter.\nAs argv you can even set mode of execution (online, offline) and the name of the result file\n'''\nif __name__ == '__main__':\n\n ### Select execution mode: 'offline', 'online' ###\n mode = \"offline\"\n name = \"CFitem\"\n knn = 200\n topk = 750\n\n if len(sys.argv) > 1:\n mode = sys.argv[1]\n name = sys.argv[2]\n knn = int(sys.argv[3])\n topk = int(sys.argv[4])\n\n complete_name = mode+\"_\"+name+\"_knn=\"+str(knn)+\"_topk=\"+str(topk)\n\n if mode == \"offline\":\n\n \"\"\"Test Set\"\"\"\n #Data initialization\n dr = Datareader(verbose=True, mode=mode, only_load=True)\n\n #Evaluetor initialization\n ev = Evaluator(dr)\n\n #Recommender algorithm initialization\n rec = Knn_collaborative_item()\n\n #Getting for the recommender algorithm\n urm = dr.get_urm()\n urm.data = np.ones(len(urm.data))\n position_urm = dr.get_position_matrix(position_type='last')\n pos_urm = position_urm.T.tocoo().tocsr()\n pid = dr.get_test_pids()\n\n #Fitting data\n rec.fit(urm, pid)\n\n #Computing similarity/model\n rec.compute_model(top_k= knn, sm_type=sm.TVERSKY, shrink=200, alpha=0.1, beta=1, binary=True, verbose=True)\n rec.model = rec.model.tocsr()\n rec.model.eliminate_zeros()\n # rec.model = norm_max_row(rec.model)\n\n print('Initial model has {:2} data'.format(len(rec.model.data)))\n\n print('[ Updating the model ]')\n rec.model = position_similarity(rec.model, pos_urm, knn=knn, verbose=True)\n rec.model.eliminate_zeros()\n\n print('New model has {:2} data'.format(len(rec.model.data)))\n\n #Computing ratings\n rec.compute_rating(top_k=topk,verbose=True, small=True, remove_seed=False)\n\n #evaluation and saving\n sps.save_npz(complete_name+\".npz\", rec.eurm)\n ev.evaluate(recommendation_list=eurm_to_recommendation_list(rec.eurm, datareader=dr, remove_seed=True),\n name=name, old_mode=False)\n\n if mode == \"online\":\n\n \"\"\"Submission\"\"\"\n #Data initialization\n dr = Datareader(verbose=True, mode=mode, only_load=False)\n\n #Recommender algorithm initialization\n rec = Knn_collaborative_item()\n\n #Submitter initialization\n sb = Submitter(dr)\n\n #Getting for the recommender algorithm\n urm = dr.get_urm()\n pid = dr.get_test_pids()\n\n #Fitting data\n rec.fit(urm, pid)\n\n #Computing similarity/model\n rec.compute_model(top_k=knn, sm_type=sm.TVERSKY,shrink=200, alpha=0.1, beta=1, binary=True, verbose=True)\n\n #Computing ratings\n rec.compute_rating(top_k=topk, verbose=True, small=True)\n\n #submission\n sps.save_npz(complete_name+\".npz\", rec.eurm)\n sb.submit(recommendation_list=eurm_to_recommendation_list_submission(rec.eurm), name=name, track=\"main\", verify=True, gzipped=False)\n\n\n\n\n"
] | [
[
"scipy.sparse.save_npz"
]
] |
Valentinkvn/Udacity-Full-Autonomous-Vehicle-Project | [
"b1313345a09f84c122a91c1145230fe69da0d20f"
] | [
"ros/src/styx/bridge.py"
] | [
"\nimport rospy\n\nimport tf\nfrom geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped\nfrom dbw_mkz_msgs.msg import SteeringReport, ThrottleCmd, BrakeCmd, SteeringCmd\nfrom std_msgs.msg import Float32 as Float\nfrom std_msgs.msg import Bool\nfrom sensor_msgs.msg import PointCloud2\nfrom sensor_msgs.msg import Image\nimport sensor_msgs.point_cloud2 as pcl2\nfrom std_msgs.msg import Header\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom styx_msgs.msg import TrafficLight, TrafficLightArray, Lane\nimport numpy as np\nfrom PIL import Image as PIL_Image\nfrom io import BytesIO\nimport base64\n\nimport math\n\nTYPE = {\n 'bool': Bool,\n 'float': Float,\n 'pose': PoseStamped,\n 'pcl': PointCloud2,\n 'twist': TwistStamped,\n 'steer': SteeringReport,\n 'trafficlights': TrafficLightArray,\n 'steer_cmd': SteeringCmd,\n 'brake_cmd': BrakeCmd,\n 'throttle_cmd': ThrottleCmd,\n 'path_draw': Lane,\n 'image':Image\n}\n\nNUM_IMAGES_TO_SKIP = 2\n\nclass Bridge(object):\n def __init__(self, conf, server):\n rospy.init_node('styx_server')\n self.server = server\n self.vel = 0.\n self.yaw = None\n self.angular_vel = 0.\n self.bridge = CvBridge()\n self.img_count = 0\n\n self.callbacks = {\n '/vehicle/steering_cmd': self.callback_steering,\n '/vehicle/throttle_cmd': self.callback_throttle,\n '/vehicle/brake_cmd': self.callback_brake,\n '/final_waypoints': self.callback_path\n }\n\n self.subscribers = [rospy.Subscriber(e.topic, TYPE[e.type], self.callbacks[e.topic])\n for e in conf.subscribers]\n\n self.publishers = {e.name: rospy.Publisher(e.topic, TYPE[e.type], queue_size=1)\n for e in conf.publishers}\n\n def create_light(self, x, y, z, yaw, state):\n light = TrafficLight()\n\n light.header = Header()\n light.header.stamp = rospy.Time.now()\n light.header.frame_id = '/world'\n\n light.pose = self.create_pose(x, y, z, yaw)\n light.state = state\n\n return light\n\n def create_pose(self, x, y, z, yaw=0.):\n pose = PoseStamped()\n\n pose.header = Header()\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = '/world'\n\n pose.pose.position.x = x\n pose.pose.position.y = y\n pose.pose.position.z = z\n\n q = tf.transformations.quaternion_from_euler(0., 0., math.pi * yaw/180.)\n pose.pose.orientation = Quaternion(*q)\n\n return pose\n\n def create_float(self, val):\n fl = Float()\n fl.data = val\n return fl\n\n def create_twist(self, velocity, angular):\n tw = TwistStamped()\n tw.twist.linear.x = velocity\n tw.twist.angular.z = angular\n return tw\n\n def create_steer(self, val):\n st = SteeringReport()\n st.steering_wheel_angle_cmd = val * math.pi/180.\n st.enabled = True\n st.speed = self.vel\n return st\n\n def calc_angular(self, yaw):\n angular_vel = 0.\n if self.yaw is not None:\n angular_vel = (yaw - self.yaw)/(rospy.get_time() - self.prev_time)\n self.yaw = yaw\n self.prev_time = rospy.get_time()\n return angular_vel\n\n def create_point_cloud_message(self, pts):\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n cloud_message = pcl2.create_cloud_xyz32(header, pts)\n return cloud_message\n\n def broadcast_transform(self, name, position, orientation):\n br = tf.TransformBroadcaster()\n br.sendTransform(position,\n orientation,\n rospy.Time.now(),\n name,\n \"world\")\n\n def publish_odometry(self, data):\n pose = self.create_pose(data['x'], data['y'], data['z'], data['yaw'])\n\n position = (data['x'], data['y'], data['z'])\n orientation = tf.transformations.quaternion_from_euler(0, 0, math.pi * data['yaw']/180.)\n self.broadcast_transform(\"base_link\", position, orientation)\n\n self.publishers['current_pose'].publish(pose)\n self.vel = data['velocity']* 0.44704\n self.angular = self.calc_angular(data['yaw'] * math.pi/180.)\n self.publishers['current_velocity'].publish(self.create_twist(self.vel, self.angular))\n\n\n def publish_controls(self, data):\n steering, throttle, brake = data['steering_angle'], data['throttle'], data['brake']\n self.publishers['steering_report'].publish(self.create_steer(steering))\n self.publishers['throttle_report'].publish(self.create_float(throttle))\n self.publishers['brake_report'].publish(self.create_float(brake))\n\n def publish_obstacles(self, data):\n for obs in data['obstacles']:\n pose = self.create_pose(obs[0], obs[1], obs[2])\n self.publishers['obstacle'].publish(pose)\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n cloud = pcl2.create_cloud_xyz32(header, data['obstacles'])\n self.publishers['obstacle_points'].publish(cloud)\n\n def publish_lidar(self, data):\n self.publishers['lidar'].publish(self.create_point_cloud_message(zip(data['lidar_x'], data['lidar_y'], data['lidar_z'])))\n\n def publish_traffic(self, data):\n x, y, z = data['light_pos_x'], data['light_pos_y'], data['light_pos_z'],\n yaw = [math.atan2(dy, dx) for dx, dy in zip(data['light_pos_dx'], data['light_pos_dy'])]\n status = data['light_state']\n\n lights = TrafficLightArray()\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n lights.lights = [self.create_light(*e) for e in zip(x, y, z, yaw, status)]\n self.publishers['trafficlights'].publish(lights)\n\n def publish_dbw_status(self, data):\n self.publishers['dbw_status'].publish(Bool(data))\n\n def publish_camera(self, data):\n self.img_count += 1\n if self.img_count >= NUM_IMAGES_TO_SKIP:\n # rospy.logwarn(\"Publish camera data\")\n imgString = data[\"image\"]\n image = PIL_Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n image_message = self.bridge.cv2_to_imgmsg(image_array, encoding=\"rgb8\")\n self.publishers['image'].publish(image_message)\n self.img_count = 0\n\n def callback_steering(self, data):\n self.server('steer', data={'steering_angle': str(data.steering_wheel_angle_cmd)})\n\n def callback_throttle(self, data):\n self.server('throttle', data={'throttle': str(data.pedal_cmd)})\n\n def callback_brake(self, data):\n self.server('brake', data={'brake': str(data.pedal_cmd)})\n\n def callback_path(self, data):\n x_values = []\n y_values = []\n z_values = []\n for waypoint in data.waypoints:\n x = waypoint.pose.pose.position.x\n y = waypoint.pose.pose.position.y\n z = waypoint.pose.pose.position.z+0.5\n x_values.append(x)\n y_values.append(y)\n z_values.append(z)\n\n self.server('drawline', data={'next_x': x_values, 'next_y': y_values, 'next_z': z_values})\n"
] | [
[
"numpy.asarray"
]
] |
HeegyuKim/face_recognition | [
"d96d2c94225e49d3dd8f2cae4444d35d5c88d13b"
] | [
"mfr2.py"
] | [
"import os\nimport shutil\n\nimport os\nfrom glob import glob\nimport pandas as pd\nimport random\nfrom collections import defaultdict\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\n\ndef get_all_images(dir):\n types = [\"jpeg\", \"jpg\", \"png\"]\n files = []\n for t in types:\n path = os.path.join(dir, \"**\", \"*.\" + t)\n files.extend(glob(path))\n \n return files\n\n\ndef casia(dir):\n files = get_all_images(dir)\n users = defaultdict(set)\n rows = []\n\n for file in files:\n user = file.split(\"/\")[-2]\n users[user].add(file)\n rows.append({\n \"image\": file,\n \"id\": user\n })\n\n df = pd.DataFrame(rows)\n positives = []\n negatives = []\n\n for user, files in users.items():\n if len(files) <= 1:\n continue\n \n samples = random.sample(files, 2)\n positives.append({\n \"image1\": samples[0],\n \"image2\": samples[1],\n \"id1\": user,\n \"id2\": user,\n \"label\": 1\n })\n \n user_ids = list(users.keys())\n for i in range(0, len(user_ids), 2):\n if i == len(user_ids) - 1:\n continue\n\n id1, id2 = user_ids[i], user_ids[i + 1]\n files1, files2 = users[id1], users[id2]\n\n if len(files1) < 2 or len(files2) < 2:\n break\n \n samples1, samples2 = random.sample(files1, 2), random.sample(files2, 2)\n for j in range(2):\n negatives.append({\n \"image1\": samples1[j],\n \"image2\": samples2[j],\n \"id1\": id1,\n \"id2\": id2,\n \"label\": -1\n })\n \n test_set = pd.DataFrame(positives + negatives)\n return df, test_set\n\n# trainset, testset = casia(\"train/\")\n# trainset.to_csv(\"train.csv\", index=False)\n# testset.to_csv(\"train_eval.csv\", index=False)\n\nfor file in glob(\"dataset/validation/**/*.png\", recursive=True):\n tokens = file.split(\"/\")\n filename = tokens[-1]\n id = tokens[-3]\n\n dst = f\"mfeval/{id}/{filename}\"\n os.makedirs(os.path.abspath(os.path.dirname(dst)), exist_ok=True)\n shutil.copyfile(file, dst)"
] | [
[
"pandas.DataFrame"
]
] |
cmarlin/agents | [
"1729e06f42237b34dab8bd9d8c01980c2d2b391c"
] | [
"tf_agents/experimental/examples/sac/haarnoja18/sac_train_eval.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Train and Eval SAC.\n\nAll hyperparameters come from the SAC paper\nhttps://arxiv.org/pdf/1812.05905.pdf\n\"\"\"\nimport functools\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport gin\nimport reverb\nimport tensorflow as tf\n\nfrom tf_agents.agents.sac import sac_agent\nfrom tf_agents.agents.sac import tanh_normal_projection_network\nfrom tf_agents.environments import suite_mujoco\nfrom tf_agents.keras_layers import inner_reshape\nfrom tf_agents.metrics import py_metrics\nfrom tf_agents.networks import nest_map\nfrom tf_agents.networks import sequential\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.policies import py_tf_eager_policy\nfrom tf_agents.policies import random_py_policy\nfrom tf_agents.replay_buffers import reverb_replay_buffer\nfrom tf_agents.replay_buffers import reverb_utils\nfrom tf_agents.train import actor\nfrom tf_agents.train import learner\nfrom tf_agents.train import triggers\nfrom tf_agents.train.utils import spec_utils\nfrom tf_agents.train.utils import train_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.')\nflags.DEFINE_integer(\n 'reverb_port', None,\n 'Port for reverb server, if None, use a randomly chosen unused port.')\nflags.DEFINE_integer('num_iterations', 3000000,\n 'Total number train/eval iterations to perform.')\nflags.DEFINE_integer(\n 'eval_interval', 10000,\n 'Number of train steps between evaluations. Set to 0 to skip.')\nflags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')\nflags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')\n\n\ndense = functools.partial(\n tf.keras.layers.Dense,\n activation=tf.keras.activations.relu,\n kernel_initializer='glorot_uniform')\n\n\ndef create_fc_network(layer_units):\n return sequential.Sequential([dense(num_units) for num_units in layer_units])\n\n\ndef create_identity_layer():\n return tf.keras.layers.Lambda(lambda x: x)\n\n\ndef create_sequential_critic_network(obs_fc_layer_units,\n action_fc_layer_units,\n joint_fc_layer_units):\n \"\"\"Create a sequential critic network.\"\"\"\n # Split the inputs into observations and actions.\n def split_inputs(inputs):\n return {'observation': inputs[0], 'action': inputs[1]}\n\n # Create an observation network.\n obs_network = (create_fc_network(obs_fc_layer_units) if obs_fc_layer_units\n else create_identity_layer())\n\n # Create an action network.\n action_network = (create_fc_network(action_fc_layer_units)\n if action_fc_layer_units else create_identity_layer())\n\n # Create a joint network.\n joint_network = (create_fc_network(joint_fc_layer_units)\n if joint_fc_layer_units else create_identity_layer())\n\n # Final layer.\n value_layer = tf.keras.layers.Dense(1, kernel_initializer='glorot_uniform')\n\n return sequential.Sequential([\n tf.keras.layers.Lambda(split_inputs),\n nest_map.NestMap({\n 'observation': obs_network,\n 'action': action_network\n }),\n nest_map.NestFlatten(),\n tf.keras.layers.Concatenate(),\n joint_network,\n value_layer,\n inner_reshape.InnerReshape(current_shape=[1], new_shape=[])\n ], name='sequential_critic')\n\n\nclass _TanhNormalProjectionNetworkWrapper(\n tanh_normal_projection_network.TanhNormalProjectionNetwork):\n \"\"\"Wrapper to pass predefined `outer_rank` to underlying projection net.\"\"\"\n\n def __init__(self, sample_spec, predefined_outer_rank=1):\n super(_TanhNormalProjectionNetworkWrapper, self).__init__(sample_spec)\n self.predefined_outer_rank = predefined_outer_rank\n\n def call(self, inputs, network_state=(), **kwargs):\n kwargs['outer_rank'] = self.predefined_outer_rank\n if 'step_type' in kwargs:\n del kwargs['step_type']\n return super(_TanhNormalProjectionNetworkWrapper,\n self).call(inputs, **kwargs)\n\n\ndef create_sequential_actor_network(actor_fc_layers, action_tensor_spec):\n \"\"\"Create a sequential actor network.\"\"\"\n def tile_as_nest(non_nested_output):\n return tf.nest.map_structure(lambda _: non_nested_output,\n action_tensor_spec)\n\n return sequential.Sequential(\n [dense(num_units) for num_units in actor_fc_layers] +\n [tf.keras.layers.Lambda(tile_as_nest)] + [\n nest_map.NestMap(\n tf.nest.map_structure(_TanhNormalProjectionNetworkWrapper,\n action_tensor_spec))\n ])\n\n\[email protected]\ndef train_eval(\n root_dir,\n env_name='HalfCheetah-v2',\n # Training params\n initial_collect_steps=10000,\n num_iterations=3200000,\n actor_fc_layers=(256, 256),\n critic_obs_fc_layers=None,\n critic_action_fc_layers=None,\n critic_joint_fc_layers=(256, 256),\n # Agent params\n batch_size=256,\n actor_learning_rate=3e-4,\n critic_learning_rate=3e-4,\n alpha_learning_rate=3e-4,\n gamma=0.99,\n target_update_tau=0.005,\n target_update_period=1,\n reward_scale_factor=0.1,\n # Replay params\n reverb_port=None,\n replay_capacity=1000000,\n # Others\n policy_save_interval=10000,\n replay_buffer_save_interval=100000,\n eval_interval=10000,\n eval_episodes=30,\n debug_summaries=False,\n summarize_grads_and_vars=False):\n \"\"\"Trains and evaluates SAC.\"\"\"\n logging.info('Training SAC on: %s', env_name)\n collect_env = suite_mujoco.load(env_name)\n eval_env = suite_mujoco.load(env_name)\n\n _, action_tensor_spec, time_step_tensor_spec = (\n spec_utils.get_tensor_specs(collect_env))\n\n train_step = train_utils.create_train_step()\n\n actor_net = create_sequential_actor_network(\n actor_fc_layers=actor_fc_layers, action_tensor_spec=action_tensor_spec)\n\n critic_net = create_sequential_critic_network(\n obs_fc_layer_units=critic_obs_fc_layers,\n action_fc_layer_units=critic_action_fc_layers,\n joint_fc_layer_units=critic_joint_fc_layers)\n\n agent = sac_agent.SacAgent(\n time_step_tensor_spec,\n action_tensor_spec,\n actor_network=actor_net,\n critic_network=critic_net,\n actor_optimizer=tf.keras.optimizers.Adam(\n learning_rate=actor_learning_rate),\n critic_optimizer=tf.keras.optimizers.Adam(\n learning_rate=critic_learning_rate),\n alpha_optimizer=tf.keras.optimizers.Adam(\n learning_rate=alpha_learning_rate),\n target_update_tau=target_update_tau,\n target_update_period=target_update_period,\n td_errors_loss_fn=tf.math.squared_difference,\n gamma=gamma,\n reward_scale_factor=reward_scale_factor,\n gradient_clipping=None,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step)\n agent.initialize()\n\n table_name = 'uniform_table'\n table = reverb.Table(\n table_name,\n max_size=replay_capacity,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1))\n\n reverb_checkpoint_dir = os.path.join(root_dir, learner.TRAIN_DIR,\n learner.REPLAY_BUFFER_CHECKPOINT_DIR)\n reverb_checkpointer = reverb.platform.checkpointers_lib.DefaultCheckpointer(\n path=reverb_checkpoint_dir)\n reverb_server = reverb.Server([table],\n port=reverb_port,\n checkpointer=reverb_checkpointer)\n reverb_replay = reverb_replay_buffer.ReverbReplayBuffer(\n agent.collect_data_spec,\n sequence_length=2,\n table_name=table_name,\n local_server=reverb_server)\n rb_observer = reverb_utils.ReverbAddTrajectoryObserver(\n reverb_replay.py_client,\n table_name,\n sequence_length=2,\n stride_length=1)\n\n dataset = reverb_replay.as_dataset(\n sample_batch_size=batch_size, num_steps=2).prefetch(50)\n experience_dataset_fn = lambda: dataset\n\n saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)\n env_step_metric = py_metrics.EnvironmentSteps()\n learning_triggers = [\n triggers.PolicySavedModelTrigger(\n saved_model_dir,\n agent,\n train_step,\n interval=policy_save_interval,\n metadata_metrics={triggers.ENV_STEP_METADATA_KEY: env_step_metric}),\n triggers.ReverbCheckpointTrigger(\n train_step,\n interval=replay_buffer_save_interval,\n reverb_client=reverb_replay.py_client),\n # TODO(b/165023684): Add SIGTERM handler to checkpoint before preemption.\n triggers.StepPerSecondLogTrigger(train_step, interval=1000),\n ]\n\n agent_learner = learner.Learner(\n root_dir,\n train_step,\n agent,\n experience_dataset_fn,\n triggers=learning_triggers)\n\n random_policy = random_py_policy.RandomPyPolicy(\n collect_env.time_step_spec(), collect_env.action_spec())\n initial_collect_actor = actor.Actor(\n collect_env,\n random_policy,\n train_step,\n steps_per_run=initial_collect_steps,\n observers=[rb_observer])\n logging.info('Doing initial collect.')\n initial_collect_actor.run()\n\n tf_collect_policy = agent.collect_policy\n collect_policy = py_tf_eager_policy.PyTFEagerPolicy(\n tf_collect_policy, use_tf_function=True)\n\n collect_actor = actor.Actor(\n collect_env,\n collect_policy,\n train_step,\n steps_per_run=1,\n metrics=actor.collect_metrics(10),\n summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),\n observers=[rb_observer, env_step_metric])\n\n tf_greedy_policy = greedy_policy.GreedyPolicy(agent.policy)\n eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(\n tf_greedy_policy, use_tf_function=True)\n\n eval_actor = actor.Actor(\n eval_env,\n eval_greedy_policy,\n train_step,\n episodes_per_run=eval_episodes,\n metrics=actor.eval_metrics(eval_episodes),\n summary_dir=os.path.join(root_dir, 'eval'),\n )\n\n if eval_interval:\n logging.info('Evaluating.')\n eval_actor.run_and_log()\n\n logging.info('Training.')\n for _ in range(num_iterations):\n collect_actor.run()\n agent_learner.run(iterations=1)\n\n if eval_interval and agent_learner.train_step_numpy % eval_interval == 0:\n logging.info('Evaluating.')\n eval_actor.run_and_log()\n\n rb_observer.close()\n reverb_server.stop()\n\n\ndef main(_):\n logging.set_verbosity(logging.INFO)\n tf.compat.v1.enable_v2_behavior()\n\n gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)\n\n train_eval(\n FLAGS.root_dir,\n num_iterations=FLAGS.num_iterations,\n reverb_port=FLAGS.reverb_port,\n eval_interval=FLAGS.eval_interval)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('root_dir')\n app.run(main)\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Concatenate",
"tensorflow.nest.map_structure",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"tensorflow.compat.v1.enable_v2_behavior"
]
] |
jdavidagudelo/tensorflow-models | [
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979",
"6f019beec73b01861363bf717706e27f4210b979"
] | [
"research/compression/entropy_coder/core/entropy_coder_train.py",
"research/object_detection/core/anchor_generator.py",
"research/morph_net/framework/concat_and_slice_regularizers_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Train an entropy coder model.\"\"\"\n\nimport time\n\nimport tensorflow as tf\n\nfrom research.compression.entropy_coder.core import code_loader\nfrom research.compression.entropy_coder.core import config_helper\n\n# pylint: enable=unused-import\nfrom research.compression.entropy_coder.model import model_factory\n\nFLAGS = tf.app.flags.FLAGS\n\n# Hardware resources configuration.\ntf.app.flags.DEFINE_string('master', '',\n \"\"\"Name of the TensorFlow master to use.\"\"\")\ntf.app.flags.DEFINE_string('train_dir', None,\n \"\"\"Directory where to write event logs.\"\"\")\ntf.app.flags.DEFINE_integer('task', None,\n \"\"\"Task id of the replica running the training.\"\"\")\ntf.app.flags.DEFINE_integer('ps_tasks', 0, \"\"\"Number of tasks in the ps job.\n If 0 no ps job is used.\"\"\")\n\n# Model selection and configuration.\ntf.app.flags.DEFINE_string('model', None, \"\"\"Underlying encoder model.\"\"\")\ntf.app.flags.DEFINE_string('model_config', None,\n \"\"\"Model config protobuf given as text file.\"\"\")\n\n# Training data and parameters configuration.\ntf.app.flags.DEFINE_string('input_config', None,\n \"\"\"Path to the training input config file.\"\"\")\ntf.app.flags.DEFINE_string('train_config', None,\n \"\"\"Path to the training experiment config file.\"\"\")\n\n\ndef train():\n if FLAGS.train_dir is None:\n raise ValueError('Parameter train_dir must be provided')\n if FLAGS.task is None:\n raise ValueError('Parameter task must be provided')\n if FLAGS.model is None:\n raise ValueError('Parameter model must be provided')\n\n input_config_string = config_helper.GetConfigString(FLAGS.input_config)\n input_config = config_helper.InputConfig(input_config_string)\n\n # Training parameters.\n train_config_string = config_helper.GetConfigString(FLAGS.train_config)\n train_config = config_helper.TrainConfig(train_config_string)\n\n batch_size = train_config.batch_size\n initial_learning_rate = train_config.learning_rate\n decay_rate = train_config.decay_rate\n samples_per_decay = train_config.samples_per_decay\n\n # Parameters for learning-rate decay.\n # The formula is decay_rate ** floor(steps / decay_steps).\n decay_steps = samples_per_decay / batch_size\n decay_steps = max(decay_steps, 1)\n\n first_code = code_loader.ReadFirstCode(input_config.data)\n first_code_height = (\n first_code.features.feature['code_shape'].int64_list.value[0])\n first_code_width = (\n first_code.features.feature['code_shape'].int64_list.value[1])\n max_bit_depth = (\n first_code.features.feature['code_shape'].int64_list.value[2])\n print('Maximum code depth: {}'.format(max_bit_depth))\n\n with tf.Graph().as_default():\n ps_ops = [\"Variable\", \"VariableV2\", \"AutoReloadVariable\", \"VarHandleOp\"]\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,\n ps_ops=ps_ops)):\n codes = code_loader.LoadBinaryCode(\n input_config=input_config,\n batch_size=batch_size)\n if input_config.unique_code_size:\n print('Input code size: {} x {}'.format(first_code_height,\n first_code_width))\n codes.set_shape(\n [batch_size, first_code_height, first_code_width, max_bit_depth])\n else:\n codes.set_shape([batch_size, None, None, max_bit_depth])\n codes_effective_shape = tf.shape(codes)\n\n global_step = tf.contrib.framework.create_global_step()\n\n # Apply learning-rate decay.\n learning_rate = tf.train.exponential_decay(\n learning_rate=initial_learning_rate,\n global_step=global_step,\n decay_steps=decay_steps,\n decay_rate=decay_rate,\n staircase=True)\n tf.summary.scalar('Learning Rate', learning_rate)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n epsilon=1.0)\n\n # Create the entropy coder model.\n model = model_factory.GetModelRegistry().CreateModel(FLAGS.model)\n model_config_string = config_helper.GetConfigString(FLAGS.model_config)\n model.Initialize(global_step, optimizer, model_config_string)\n model.BuildGraph(codes)\n\n summary_op = tf.summary.merge_all()\n\n # Verify that the model can actually be trained.\n if model.train_op is None:\n raise ValueError('Input model {} is not trainable'.format(FLAGS.model))\n\n # We disable the summary thread run by Supervisor class by passing\n # summary_op=None. We still pass save_summaries_secs because it is used by\n # the global step counter thread.\n is_chief = (FLAGS.task == 0)\n sv = tf.train.Supervisor(logdir=FLAGS.train_dir,\n is_chief=is_chief,\n global_step=global_step,\n # saver=model.saver,\n summary_op=None,\n save_summaries_secs=120,\n save_model_secs=600,\n recovery_wait_secs=30)\n\n sess = sv.PrepareSession(FLAGS.master)\n sv.StartQueueRunners(sess)\n\n step = sess.run(global_step)\n print('Trainer initial step: {}.'.format(step))\n\n # Once everything has been setup properly, save the configs.\n if is_chief:\n config_helper.SaveConfig(FLAGS.train_dir, 'input_config.json',\n input_config_string)\n config_helper.SaveConfig(FLAGS.train_dir, 'model_config.json',\n model_config_string)\n config_helper.SaveConfig(FLAGS.train_dir, 'train_config.json',\n train_config_string)\n\n # Train the model.\n next_summary_time = time.time()\n while not sv.ShouldStop():\n feed_dict = None\n\n # Once in a while, update the summaries on the chief worker.\n if is_chief and next_summary_time < time.time():\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n sv.SummaryComputed(sess, summary_str)\n next_summary_time = time.time() + sv.save_summaries_secs\n else:\n tf_tensors = {\n 'train': model.train_op,\n 'code_length': model.average_code_length\n }\n np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)\n print(np_tensors['code_length'])\n\n sv.Stop()\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Base anchor generator.\n\nThe job of the anchor generator is to create (or load) a collection\nof bounding boxes to be used as anchors.\n\nGenerated anchors are assumed to match some convolutional grid or list of grid\nshapes. For example, we might want to generate anchors matching an 8x8\nfeature map and a 4x4 feature map. If we place 3 anchors per grid location\non the first feature map and 6 anchors per grid location on the second feature\nmap, then 3*8*8 + 6*4*4 = 288 anchors are generated in total.\n\nTo support fully convolutional settings, feature map shapes are passed\ndynamically at generation time. The number of anchors to place at each location\nis static --- implementations of AnchorGenerator must always be able return\nthe number of anchors that it uses per location for each feature map.\n\"\"\"\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nimport tensorflow as tf\n\n\nclass AnchorGenerator(object):\n \"\"\"Abstract base class for anchor generators.\"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def name_scope(self):\n \"\"\"Name scope.\n\n Must be defined by implementations.\n\n Returns:\n a string representing the name scope of the anchor generation operation.\n \"\"\"\n pass\n\n @property\n def check_num_anchors(self):\n \"\"\"Whether to dynamically check the number of anchors generated.\n\n Can be overridden by implementations that would like to disable this\n behavior.\n\n Returns:\n a boolean controlling whether the Generate function should dynamically\n check the number of anchors generated against the mathematically\n expected number of anchors.\n \"\"\"\n return True\n\n @abstractmethod\n def num_anchors_per_location(self):\n \"\"\"Returns the number of anchors per spatial location.\n\n Returns:\n a list of integers, one for each expected feature map to be passed to\n the `generate` function.\n \"\"\"\n pass\n\n def generate(self, feature_map_shape_list, **params):\n \"\"\"Generates a collection of bounding boxes to be used as anchors.\n\n TODO(rathodv): remove **params from argument list and make stride and\n offsets (for multiple_grid_anchor_generator) constructor arguments.\n\n Args:\n feature_map_shape_list: list of (height, width) pairs in the format\n [(height_0, width_0), (height_1, width_1), ...] that the generated\n anchors must align with. Pairs can be provided as 1-dimensional\n integer tensors of length 2 or simply as tuples of integers.\n **params: parameters for anchor generation op\n\n Returns:\n boxes_list: a list of BoxLists each holding anchor boxes corresponding to\n the input feature map shapes.\n\n Raises:\n ValueError: if the number of feature map shapes does not match the length\n of NumAnchorsPerLocation.\n \"\"\"\n if self.check_num_anchors and (\n len(feature_map_shape_list) != len(self.num_anchors_per_location())):\n raise ValueError('Number of feature maps is expected to equal the length '\n 'of `num_anchors_per_location`.')\n with tf.name_scope(self.name_scope()):\n anchors_list = self._generate(feature_map_shape_list, **params)\n if self.check_num_anchors:\n with tf.control_dependencies([\n self._assert_correct_number_of_anchors(\n anchors_list, feature_map_shape_list)]):\n for item in anchors_list:\n item.set(tf.identity(item.get()))\n return anchors_list\n\n @abstractmethod\n def _generate(self, feature_map_shape_list, **params):\n \"\"\"To be overridden by implementations.\n\n Args:\n feature_map_shape_list: list of (height, width) pairs in the format\n [(height_0, width_0), (height_1, width_1), ...] that the generated\n anchors must align with.\n **params: parameters for anchor generation op\n\n Returns:\n boxes_list: a list of BoxList, each holding a collection of N anchor\n boxes.\n \"\"\"\n pass\n\n def _assert_correct_number_of_anchors(self, anchors_list,\n feature_map_shape_list):\n \"\"\"Assert that correct number of anchors was generated.\n\n Args:\n anchors_list: A list of box_list.BoxList object holding anchors generated.\n feature_map_shape_list: list of (height, width) pairs in the format\n [(height_0, width_0), (height_1, width_1), ...] that the generated\n anchors must align with.\n Returns:\n Op that raises InvalidArgumentError if the number of anchors does not\n match the number of expected anchors.\n \"\"\"\n expected_num_anchors = 0\n actual_num_anchors = 0\n for num_anchors_per_location, feature_map_shape, anchors in zip(\n self.num_anchors_per_location(), feature_map_shape_list, anchors_list):\n expected_num_anchors += (num_anchors_per_location\n * feature_map_shape[0]\n * feature_map_shape[1])\n actual_num_anchors += anchors.num_boxes()\n return tf.assert_equal(expected_num_anchors, actual_num_anchors)\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for framework.concat_and_slice_regularizers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom research.morph_net.framework import concat_and_slice_regularizers\nfrom research.morph_net.testing import op_regularizer_stub\n\n\nclass ConcatAndSliceRegularizersTest(tf.test.TestCase):\n\n def setUp(self):\n self._reg_vec1 = [0.1, 0.3, 0.6, 0.2]\n self._alive_vec1 = [False, True, True, False]\n self._reg_vec2 = [0.2, 0.4, 0.5]\n self._alive_vec2 = [False, True, False]\n self._reg1 = op_regularizer_stub.OpRegularizerStub(self._reg_vec1,\n self._alive_vec1)\n self._reg2 = op_regularizer_stub.OpRegularizerStub(self._reg_vec2,\n self._alive_vec2)\n\n def testConcatRegularizer(self):\n concat_reg = concat_and_slice_regularizers.ConcatRegularizer(\n [self._reg1, self._reg2])\n with self.test_session():\n self.assertAllEqual(self._alive_vec1 + self._alive_vec2,\n concat_reg.alive_vector.eval())\n self.assertAllClose(self._reg_vec1 + self._reg_vec2,\n concat_reg.regularization_vector.eval(), 1e-5)\n\n def testSliceRegularizer(self):\n concat_reg = concat_and_slice_regularizers.SlicingReferenceRegularizer(\n lambda: self._reg1, 1, 2)\n with self.test_session():\n self.assertAllEqual(self._alive_vec1[1:3],\n concat_reg.alive_vector.eval())\n self.assertAllClose(self._reg_vec1[1:3],\n concat_reg.regularization_vector.eval(), 1e-5)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.shape",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.run",
"tensorflow.summary.merge_all",
"tensorflow.train.AdamOptimizer",
"tensorflow.Graph",
"tensorflow.contrib.framework.create_global_step",
"tensorflow.train.exponential_decay",
"tensorflow.train.Supervisor",
"tensorflow.train.replica_device_setter",
"tensorflow.app.flags.DEFINE_integer"
],
[
"tensorflow.assert_equal"
],
[
"tensorflow.test.main"
]
] |
gurukiran07/pandas | [
"3cce96f515917170ea9bce731ffcc913750464b8"
] | [
"pandas/tests/groupby/test_groupby.py"
] | [
"from datetime import datetime\nfrom decimal import Decimal\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Grouper,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n read_csv,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.core.base import SpecificationError\nimport pandas.core.common as com\n\n\ndef test_repr():\n # GH18203\n result = repr(Grouper(key=\"A\", level=\"B\"))\n expected = \"Grouper(key='A', level='B', axis=0, sort=False)\"\n assert result == expected\n\n\[email protected](\"dtype\", [\"int64\", \"int32\", \"float64\", \"float32\"])\ndef test_basic(dtype):\n\n data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)\n\n index = np.arange(9)\n np.random.shuffle(index)\n data = data.reindex(index)\n\n grouped = data.groupby(lambda x: x // 3)\n\n for k, v in grouped:\n assert len(v) == 3\n\n agged = grouped.aggregate(np.mean)\n assert agged[1] == 1\n\n tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand\n tm.assert_series_equal(agged, grouped.mean())\n tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())\n\n expected = grouped.apply(lambda x: x * x.sum())\n transformed = grouped.transform(lambda x: x * x.sum())\n assert transformed[7] == 12\n tm.assert_series_equal(transformed, expected)\n\n value_grouped = data.groupby(data)\n tm.assert_series_equal(\n value_grouped.aggregate(np.mean), agged, check_index_type=False\n )\n\n # complex agg\n agged = grouped.aggregate([np.mean, np.std])\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped.aggregate({\"one\": np.mean, \"two\": np.std})\n\n group_constants = {0: 10, 1: 20, 2: 30}\n agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())\n assert agged[1] == 21\n\n # corner cases\n msg = \"Must produce aggregated value\"\n # exception raised is type Exception\n with pytest.raises(Exception, match=msg):\n grouped.aggregate(lambda x: x * 2)\n\n\ndef test_groupby_nonobject_dtype(mframe, df_mixed_floats):\n key = mframe.index.codes[0]\n grouped = mframe.groupby(key)\n result = grouped.sum()\n\n expected = mframe.groupby(key.astype(\"O\")).sum()\n tm.assert_frame_equal(result, expected)\n\n # GH 3911, mixed frame non-conversion\n df = df_mixed_floats.copy()\n df[\"value\"] = range(len(df))\n\n def max_value(group):\n return group.loc[group[\"value\"].idxmax()]\n\n applied = df.groupby(\"A\").apply(max_value)\n result = applied.dtypes\n expected = df.dtypes\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_return_type():\n\n # GH2893, return a reduced type\n df1 = DataFrame(\n [\n {\"val1\": 1, \"val2\": 20},\n {\"val1\": 1, \"val2\": 19},\n {\"val1\": 2, \"val2\": 27},\n {\"val1\": 2, \"val2\": 12},\n ]\n )\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n with tm.assert_produces_warning(FutureWarning):\n result = df1.groupby(\"val1\", squeeze=True).apply(func)\n assert isinstance(result, Series)\n\n df2 = DataFrame(\n [\n {\"val1\": 1, \"val2\": 20},\n {\"val1\": 1, \"val2\": 19},\n {\"val1\": 1, \"val2\": 27},\n {\"val1\": 1, \"val2\": 12},\n ]\n )\n\n def func(dataf):\n return dataf[\"val2\"] - dataf[\"val2\"].mean()\n\n with tm.assert_produces_warning(FutureWarning):\n result = df2.groupby(\"val1\", squeeze=True).apply(func)\n assert isinstance(result, Series)\n\n # GH3596, return a consistent type (regression in 0.11 from 0.10.1)\n df = DataFrame([[1, 1], [1, 1]], columns=[\"X\", \"Y\"])\n with tm.assert_produces_warning(FutureWarning):\n result = df.groupby(\"X\", squeeze=False).count()\n assert isinstance(result, DataFrame)\n\n\ndef test_inconsistent_return_type():\n # GH5592\n # inconsistent return type\n df = DataFrame(\n {\n \"A\": [\"Tiger\", \"Tiger\", \"Tiger\", \"Lamb\", \"Lamb\", \"Pony\", \"Pony\"],\n \"B\": Series(np.arange(7), dtype=\"int64\"),\n \"C\": date_range(\"20130101\", periods=7),\n }\n )\n\n def f(grp):\n return grp.iloc[0]\n\n expected = df.groupby(\"A\").first()[[\"B\"]]\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n tm.assert_frame_equal(result, expected)\n\n def f(grp):\n if grp.name == \"Tiger\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n e = expected.copy()\n e.loc[\"Tiger\"] = np.nan\n tm.assert_frame_equal(result, e)\n\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"B\"]]\n e = expected.copy()\n e.loc[\"Pony\"] = np.nan\n tm.assert_frame_equal(result, e)\n\n # 5592 revisited, with datetimes\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0]\n\n result = df.groupby(\"A\").apply(f)[[\"C\"]]\n e = df.groupby(\"A\").first()[[\"C\"]]\n e.loc[\"Pony\"] = pd.NaT\n tm.assert_frame_equal(result, e)\n\n # scalar outputs\n def f(grp):\n if grp.name == \"Pony\":\n return None\n return grp.iloc[0].loc[\"C\"]\n\n result = df.groupby(\"A\").apply(f)\n e = df.groupby(\"A\").first()[\"C\"].copy()\n e.loc[\"Pony\"] = np.nan\n e.name = None\n tm.assert_series_equal(result, e)\n\n\ndef test_pass_args_kwargs(ts, tsframe):\n def f(x, q=None, axis=0):\n return np.percentile(x, q, axis=axis)\n\n g = lambda x: np.percentile(x, 80, axis=0)\n\n # Series\n ts_grouped = ts.groupby(lambda x: x.month)\n agg_result = ts_grouped.agg(np.percentile, 80, axis=0)\n apply_result = ts_grouped.apply(np.percentile, 80, axis=0)\n trans_result = ts_grouped.transform(np.percentile, 80, axis=0)\n\n agg_expected = ts_grouped.quantile(0.8)\n trans_expected = ts_grouped.transform(g)\n\n tm.assert_series_equal(apply_result, agg_expected)\n tm.assert_series_equal(agg_result, agg_expected)\n tm.assert_series_equal(trans_result, trans_expected)\n\n agg_result = ts_grouped.agg(f, q=80)\n apply_result = ts_grouped.apply(f, q=80)\n trans_result = ts_grouped.transform(f, q=80)\n tm.assert_series_equal(agg_result, agg_expected)\n tm.assert_series_equal(apply_result, agg_expected)\n tm.assert_series_equal(trans_result, trans_expected)\n\n # DataFrame\n df_grouped = tsframe.groupby(lambda x: x.month)\n agg_result = df_grouped.agg(np.percentile, 80, axis=0)\n apply_result = df_grouped.apply(DataFrame.quantile, 0.8)\n expected = df_grouped.quantile(0.8)\n tm.assert_frame_equal(apply_result, expected, check_names=False)\n tm.assert_frame_equal(agg_result, expected)\n\n agg_result = df_grouped.agg(f, q=80)\n apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)\n tm.assert_frame_equal(agg_result, expected)\n tm.assert_frame_equal(apply_result, expected, check_names=False)\n\n\ndef test_len():\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])\n assert len(grouped) == len(df)\n\n grouped = df.groupby([lambda x: x.year, lambda x: x.month])\n expected = len({(x.year, x.month) for x in df.index})\n assert len(grouped) == expected\n\n # issue 11016\n df = DataFrame({\"a\": [np.nan] * 3, \"b\": [1, 2, 3]})\n assert len(df.groupby(\"a\")) == 0\n assert len(df.groupby(\"b\")) == 3\n assert len(df.groupby([\"a\", \"b\"])) == 3\n\n\ndef test_basic_regression():\n # regression\n result = Series([1.0 * x for x in list(range(1, 10)) * 10])\n\n data = np.random.random(1100) * 10.0\n groupings = Series(data)\n\n grouped = result.groupby(groupings)\n grouped.mean()\n\n\[email protected](\n \"dtype\", [\"float64\", \"float32\", \"int64\", \"int32\", \"int16\", \"int8\"]\n)\ndef test_with_na_groups(dtype):\n index = Index(np.arange(10))\n values = Series(np.ones(10), index, dtype=dtype)\n labels = Series(\n [np.nan, \"foo\", \"bar\", \"bar\", np.nan, np.nan, \"bar\", \"bar\", np.nan, \"foo\"],\n index=index,\n )\n\n # this SHOULD be an int\n grouped = values.groupby(labels)\n agged = grouped.agg(len)\n expected = Series([4, 2], index=[\"bar\", \"foo\"])\n\n tm.assert_series_equal(agged, expected, check_dtype=False)\n\n # assert issubclass(agged.dtype.type, np.integer)\n\n # explicitly return a float from my function\n def f(x):\n return float(len(x))\n\n agged = grouped.agg(f)\n expected = Series([4.0, 2.0], index=[\"bar\", \"foo\"])\n\n tm.assert_series_equal(agged, expected)\n\n\ndef test_indices_concatenation_order():\n\n # GH 2808\n\n def f1(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=[\"b\", \"c\"])\n res = DataFrame(columns=[\"a\"], index=multiindex)\n return res\n else:\n y = y.set_index([\"b\", \"c\"])\n return y\n\n def f2(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n return DataFrame()\n else:\n y = y.set_index([\"b\", \"c\"])\n return y\n\n def f3(x):\n y = x[(x.b % 2) == 1] ** 2\n if y.empty:\n multiindex = MultiIndex(\n levels=[[]] * 2, codes=[[]] * 2, names=[\"foo\", \"bar\"]\n )\n res = DataFrame(columns=[\"a\", \"b\"], index=multiindex)\n return res\n else:\n return y\n\n df = DataFrame({\"a\": [1, 2, 2, 2], \"b\": range(4), \"c\": range(5, 9)})\n\n df2 = DataFrame({\"a\": [3, 2, 2, 2], \"b\": range(4), \"c\": range(5, 9)})\n\n # correct result\n result1 = df.groupby(\"a\").apply(f1)\n result2 = df2.groupby(\"a\").apply(f1)\n tm.assert_frame_equal(result1, result2)\n\n # should fail (not the same number of levels)\n msg = \"Cannot concat indices that do not have the same number of levels\"\n with pytest.raises(AssertionError, match=msg):\n df.groupby(\"a\").apply(f2)\n with pytest.raises(AssertionError, match=msg):\n df2.groupby(\"a\").apply(f2)\n\n # should fail (incorrect shape)\n with pytest.raises(AssertionError, match=msg):\n df.groupby(\"a\").apply(f3)\n with pytest.raises(AssertionError, match=msg):\n df2.groupby(\"a\").apply(f3)\n\n\ndef test_attr_wrapper(ts):\n grouped = ts.groupby(lambda x: x.weekday())\n\n result = grouped.std()\n expected = grouped.agg(lambda x: np.std(x, ddof=1))\n tm.assert_series_equal(result, expected)\n\n # this is pretty cool\n result = grouped.describe()\n expected = {name: gp.describe() for name, gp in grouped}\n expected = DataFrame(expected).T\n tm.assert_frame_equal(result, expected)\n\n # get attribute\n result = grouped.dtype\n expected = grouped.agg(lambda x: x.dtype)\n tm.assert_series_equal(result, expected)\n\n # make sure raises error\n msg = \"'SeriesGroupBy' object has no attribute 'foo'\"\n with pytest.raises(AttributeError, match=msg):\n getattr(grouped, \"foo\")\n\n\ndef test_frame_groupby(tsframe):\n grouped = tsframe.groupby(lambda x: x.weekday())\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n assert len(aggregated) == 5\n assert len(aggregated.columns) == 4\n\n # by string\n tscopy = tsframe.copy()\n tscopy[\"weekday\"] = [x.weekday() for x in tscopy.index]\n stragged = tscopy.groupby(\"weekday\").aggregate(np.mean)\n tm.assert_frame_equal(stragged, aggregated, check_names=False)\n\n # transform\n grouped = tsframe.head(30).groupby(lambda x: x.weekday())\n transformed = grouped.transform(lambda x: x - x.mean())\n assert len(transformed) == 30\n assert len(transformed.columns) == 4\n\n # transform propagate\n transformed = grouped.transform(lambda x: x.mean())\n for name, group in grouped:\n mean = group.mean()\n for idx in group.index:\n tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)\n\n # iterate\n for weekday, group in grouped:\n assert group.index[0].weekday() == weekday\n\n # groups / group_indices\n groups = grouped.groups\n indices = grouped.indices\n\n for k, v in groups.items():\n samething = tsframe.index.take(indices[k])\n assert (samething == v).all()\n\n\ndef test_frame_groupby_columns(tsframe):\n mapping = {\"A\": 0, \"B\": 0, \"C\": 1, \"D\": 1}\n grouped = tsframe.groupby(mapping, axis=1)\n\n # aggregate\n aggregated = grouped.aggregate(np.mean)\n assert len(aggregated) == len(tsframe)\n assert len(aggregated.columns) == 2\n\n # transform\n tf = lambda x: x - x.mean()\n groupedT = tsframe.T.groupby(mapping, axis=0)\n tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))\n\n # iterate\n for k, v in grouped:\n assert len(v.columns) == 2\n\n\ndef test_frame_set_name_single(df):\n grouped = df.groupby(\"A\")\n\n result = grouped.mean()\n assert result.index.name == \"A\"\n\n result = df.groupby(\"A\", as_index=False).mean()\n assert result.index.name != \"A\"\n\n result = grouped.agg(np.mean)\n assert result.index.name == \"A\"\n\n result = grouped.agg({\"C\": np.mean, \"D\": np.std})\n assert result.index.name == \"A\"\n\n result = grouped[\"C\"].mean()\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg(np.mean)\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg([np.mean, np.std])\n assert result.index.name == \"A\"\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"foo\": np.mean, \"bar\": np.std})\n\n\ndef test_multi_func(df):\n col1 = df[\"A\"]\n col2 = df[\"B\"]\n\n grouped = df.groupby([col1.get, col2.get])\n agged = grouped.mean()\n expected = df.groupby([\"A\", \"B\"]).mean()\n\n # TODO groupby get drops names\n tm.assert_frame_equal(\n agged.loc[:, [\"C\", \"D\"]], expected.loc[:, [\"C\", \"D\"]], check_names=False\n )\n\n # some \"groups\" with no data\n df = DataFrame(\n {\n \"v1\": np.random.randn(6),\n \"v2\": np.random.randn(6),\n \"k1\": np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"]),\n \"k2\": np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"]),\n },\n index=[\"one\", \"two\", \"three\", \"four\", \"five\", \"six\"],\n )\n # only verify that it works for now\n grouped = df.groupby([\"k1\", \"k2\"])\n grouped.agg(np.sum)\n\n\ndef test_multi_key_multiple_functions(df):\n grouped = df.groupby([\"A\", \"B\"])[\"C\"]\n\n agged = grouped.agg([np.mean, np.std])\n expected = DataFrame({\"mean\": grouped.agg(np.mean), \"std\": grouped.agg(np.std)})\n tm.assert_frame_equal(agged, expected)\n\n\ndef test_frame_multi_key_function_list():\n data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n grouped = data.groupby([\"A\", \"B\"])\n funcs = [np.mean, np.std]\n agged = grouped.agg(funcs)\n expected = pd.concat(\n [grouped[\"D\"].agg(funcs), grouped[\"E\"].agg(funcs), grouped[\"F\"].agg(funcs)],\n keys=[\"D\", \"E\", \"F\"],\n axis=1,\n )\n assert isinstance(agged.index, MultiIndex)\n assert isinstance(expected.index, MultiIndex)\n tm.assert_frame_equal(agged, expected)\n\n\[email protected](\"op\", [lambda x: x.sum(), lambda x: x.mean()])\ndef test_groupby_multiple_columns(df, op):\n data = df\n grouped = data.groupby([\"A\", \"B\"])\n\n result1 = op(grouped)\n\n keys = []\n values = []\n for n1, gp1 in data.groupby(\"A\"):\n for n2, gp2 in gp1.groupby(\"B\"):\n keys.append((n1, n2))\n values.append(op(gp2.loc[:, [\"C\", \"D\"]]))\n\n mi = MultiIndex.from_tuples(keys, names=[\"A\", \"B\"])\n expected = pd.concat(values, axis=1).T\n expected.index = mi\n\n # a little bit crude\n for col in [\"C\", \"D\"]:\n result_col = op(grouped[col])\n pivoted = result1[col]\n exp = expected[col]\n tm.assert_series_equal(result_col, exp)\n tm.assert_series_equal(pivoted, exp)\n\n # test single series works the same\n result = data[\"C\"].groupby([data[\"A\"], data[\"B\"]]).mean()\n expected = data.groupby([\"A\", \"B\"]).mean()[\"C\"]\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_as_index_select_column():\n # GH 5764\n df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=[\"A\", \"B\"])\n result = df.groupby(\"A\", as_index=False)[\"B\"].get_group(1)\n expected = Series([2, 4], name=\"B\")\n tm.assert_series_equal(result, expected)\n\n result = df.groupby(\"A\", as_index=False)[\"B\"].apply(lambda x: x.cumsum())\n expected = Series(\n [2, 6, 6], name=\"B\", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_as_index_select_column_sum_empty_df():\n # GH 35246\n df = DataFrame(columns=[\"A\", \"B\", \"C\"])\n left = df.groupby(by=\"A\", as_index=False)[\"B\"].sum()\n assert type(left) is DataFrame\n assert left.to_dict() == {\"A\": {}, \"B\": {}}\n\n\ndef test_groupby_as_index_agg(df):\n grouped = df.groupby(\"A\", as_index=False)\n\n # single-key\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped.agg({\"C\": np.mean, \"D\": np.sum})\n expected2 = grouped.mean()\n expected2[\"D\"] = grouped.sum()[\"D\"]\n tm.assert_frame_equal(result2, expected2)\n\n grouped = df.groupby(\"A\", as_index=True)\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"Q\": np.sum})\n\n # multi-key\n\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped.agg({\"C\": np.mean, \"D\": np.sum})\n expected2 = grouped.mean()\n expected2[\"D\"] = grouped.sum()[\"D\"]\n tm.assert_frame_equal(result2, expected2)\n\n expected3 = grouped[\"C\"].sum()\n expected3 = DataFrame(expected3).rename(columns={\"C\": \"Q\"})\n result3 = grouped[\"C\"].agg({\"Q\": np.sum})\n tm.assert_frame_equal(result3, expected3)\n\n # GH7115 & GH8112 & GH8582\n df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=[\"jim\", \"joe\", \"jolie\"])\n ts = Series(np.random.randint(5, 10, 50), name=\"jim\")\n\n gr = df.groupby(ts)\n gr.nth(0) # invokes set_selection_from_grouper internally\n tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))\n\n for attr in [\"mean\", \"max\", \"count\", \"idxmax\", \"cumsum\", \"all\"]:\n gr = df.groupby(ts, as_index=False)\n left = getattr(gr, attr)()\n\n gr = df.groupby(ts.values, as_index=True)\n right = getattr(gr, attr)().reset_index(drop=True)\n\n tm.assert_frame_equal(left, right)\n\n\ndef test_ops_not_as_index(reduction_func):\n # GH 10355, 21090\n # Using as_index=False should not modify grouped column\n\n if reduction_func in (\"corrwith\",):\n pytest.skip(\"Test not applicable\")\n\n if reduction_func in (\"nth\", \"ngroup\"):\n pytest.skip(\"Skip until behavior is determined (GH #5755)\")\n\n df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=[\"a\", \"b\"])\n expected = getattr(df.groupby(\"a\"), reduction_func)()\n if reduction_func == \"size\":\n expected = expected.rename(\"size\")\n expected = expected.reset_index()\n\n g = df.groupby(\"a\", as_index=False)\n\n result = getattr(g, reduction_func)()\n tm.assert_frame_equal(result, expected)\n\n result = g.agg(reduction_func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(g[\"b\"], reduction_func)()\n tm.assert_frame_equal(result, expected)\n\n result = g[\"b\"].agg(reduction_func)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_as_index_series_return_frame(df):\n grouped = df.groupby(\"A\", as_index=False)\n grouped2 = df.groupby([\"A\", \"B\"], as_index=False)\n\n result = grouped[\"C\"].agg(np.sum)\n expected = grouped.agg(np.sum).loc[:, [\"A\", \"C\"]]\n assert isinstance(result, DataFrame)\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped2[\"C\"].agg(np.sum)\n expected2 = grouped2.agg(np.sum).loc[:, [\"A\", \"B\", \"C\"]]\n assert isinstance(result2, DataFrame)\n tm.assert_frame_equal(result2, expected2)\n\n result = grouped[\"C\"].sum()\n expected = grouped.sum().loc[:, [\"A\", \"C\"]]\n assert isinstance(result, DataFrame)\n tm.assert_frame_equal(result, expected)\n\n result2 = grouped2[\"C\"].sum()\n expected2 = grouped2.sum().loc[:, [\"A\", \"B\", \"C\"]]\n assert isinstance(result2, DataFrame)\n tm.assert_frame_equal(result2, expected2)\n\n\ndef test_as_index_series_column_slice_raises(df):\n # GH15072\n grouped = df.groupby(\"A\", as_index=False)\n msg = r\"Column\\(s\\) C already selected\"\n\n with pytest.raises(IndexError, match=msg):\n grouped[\"C\"].__getitem__(\"D\")\n\n\ndef test_groupby_as_index_cython(df):\n data = df\n\n # single-key\n grouped = data.groupby(\"A\", as_index=False)\n result = grouped.mean()\n expected = data.groupby([\"A\"]).mean()\n expected.insert(0, \"A\", expected.index)\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n # multi-key\n grouped = data.groupby([\"A\", \"B\"], as_index=False)\n result = grouped.mean()\n expected = data.groupby([\"A\", \"B\"]).mean()\n\n arrays = list(zip(*expected.index.values))\n expected.insert(0, \"A\", arrays[0])\n expected.insert(1, \"B\", arrays[1])\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_as_index_series_scalar(df):\n grouped = df.groupby([\"A\", \"B\"], as_index=False)\n\n # GH #421\n\n result = grouped[\"C\"].agg(len)\n expected = grouped.agg(len).loc[:, [\"A\", \"B\", \"C\"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_as_index_corner(df, ts):\n msg = \"as_index=False only valid with DataFrame\"\n with pytest.raises(TypeError, match=msg):\n ts.groupby(lambda x: x.weekday(), as_index=False)\n\n msg = \"as_index=False only valid for axis=0\"\n with pytest.raises(ValueError, match=msg):\n df.groupby(lambda x: x.lower(), as_index=False, axis=1)\n\n\ndef test_groupby_multiple_key(df):\n df = tm.makeTimeDataFrame()\n grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])\n agged = grouped.sum()\n tm.assert_almost_equal(df.values, agged.values)\n\n grouped = df.T.groupby(\n [lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1\n )\n\n agged = grouped.agg(lambda x: x.sum())\n tm.assert_index_equal(agged.index, df.columns)\n tm.assert_almost_equal(df.T.values, agged.values)\n\n agged = grouped.agg(lambda x: x.sum())\n tm.assert_almost_equal(df.T.values, agged.values)\n\n\ndef test_groupby_multi_corner(df):\n # test that having an all-NA column doesn't mess you up\n df = df.copy()\n df[\"bad\"] = np.nan\n agged = df.groupby([\"A\", \"B\"]).mean()\n\n expected = df.groupby([\"A\", \"B\"]).mean()\n expected[\"bad\"] = np.nan\n\n tm.assert_frame_equal(agged, expected)\n\n\ndef test_omit_nuisance(df):\n grouped = df.groupby(\"A\")\n\n result = grouped.mean()\n expected = df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\").mean()\n tm.assert_frame_equal(result, expected)\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n tm.assert_frame_equal(agged, exp)\n\n df = df.loc[:, [\"A\", \"C\", \"D\"]]\n df[\"E\"] = datetime.now()\n grouped = df.groupby(\"A\")\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n tm.assert_frame_equal(result, expected)\n\n # won't work with axis = 1\n grouped = df.groupby({\"A\": 0, \"C\": 0, \"D\": 1, \"E\": 1}, axis=1)\n msg = \"'DatetimeArray' does not implement reduction 'sum'\"\n with pytest.raises(TypeError, match=msg):\n grouped.agg(lambda x: x.sum(0, numeric_only=False))\n\n\ndef test_omit_nuisance_sem(df):\n # GH 38774 - sem should work with nuisance columns\n grouped = df.groupby(\"A\")\n result = grouped.sem()\n expected = df.loc[:, [\"A\", \"C\", \"D\"]].groupby(\"A\").sem()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_omit_nuisance_python_multiple(three_group):\n grouped = three_group.groupby([\"A\", \"B\"])\n\n agged = grouped.agg(np.mean)\n exp = grouped.mean()\n tm.assert_frame_equal(agged, exp)\n\n\ndef test_empty_groups_corner(mframe):\n # handle empty groups\n df = DataFrame(\n {\n \"k1\": np.array([\"b\", \"b\", \"b\", \"a\", \"a\", \"a\"]),\n \"k2\": np.array([\"1\", \"1\", \"1\", \"2\", \"2\", \"2\"]),\n \"k3\": [\"foo\", \"bar\"] * 3,\n \"v1\": np.random.randn(6),\n \"v2\": np.random.randn(6),\n }\n )\n\n grouped = df.groupby([\"k1\", \"k2\"])\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n grouped = mframe[3:5].groupby(level=0)\n agged = grouped.apply(lambda x: x.mean())\n agged_A = grouped[\"A\"].apply(np.mean)\n tm.assert_series_equal(agged[\"A\"], agged_A)\n assert agged.index.name == \"first\"\n\n\ndef test_nonsense_func():\n df = DataFrame([0])\n msg = r\"unsupported operand type\\(s\\) for \\+: 'int' and 'str'\"\n with pytest.raises(TypeError, match=msg):\n df.groupby(lambda x: x + \"foo\")\n\n\ndef test_wrap_aggregated_output_multindex(mframe):\n df = mframe.T\n df[\"baz\", \"two\"] = \"peekaboo\"\n\n keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]\n agged = df.groupby(keys).agg(np.mean)\n assert isinstance(agged.columns, MultiIndex)\n\n def aggfun(ser):\n if ser.name == (\"foo\", \"one\"):\n raise TypeError\n else:\n return ser.sum()\n\n agged2 = df.groupby(keys).aggregate(aggfun)\n assert len(agged2.columns) + 1 == len(df.columns)\n\n\ndef test_groupby_level_apply(mframe):\n\n result = mframe.groupby(level=0).count()\n assert result.index.name == \"first\"\n result = mframe.groupby(level=1).count()\n assert result.index.name == \"second\"\n\n result = mframe[\"A\"].groupby(level=0).count()\n assert result.index.name == \"first\"\n\n\ndef test_groupby_level_mapper(mframe):\n deleveled = mframe.reset_index()\n\n mapper0 = {\"foo\": 0, \"bar\": 0, \"baz\": 1, \"qux\": 1}\n mapper1 = {\"one\": 0, \"two\": 0, \"three\": 1}\n\n result0 = mframe.groupby(mapper0, level=0).sum()\n result1 = mframe.groupby(mapper1, level=1).sum()\n\n mapped_level0 = np.array([mapper0.get(x) for x in deleveled[\"first\"]])\n mapped_level1 = np.array([mapper1.get(x) for x in deleveled[\"second\"]])\n expected0 = mframe.groupby(mapped_level0).sum()\n expected1 = mframe.groupby(mapped_level1).sum()\n expected0.index.name, expected1.index.name = \"first\", \"second\"\n\n tm.assert_frame_equal(result0, expected0)\n tm.assert_frame_equal(result1, expected1)\n\n\ndef test_groupby_level_nonmulti():\n # GH 1313, GH 13901\n s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name=\"foo\"))\n expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name=\"foo\"))\n\n result = s.groupby(level=0).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=[0]).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=-1).sum()\n tm.assert_series_equal(result, expected)\n result = s.groupby(level=[-1]).sum()\n tm.assert_series_equal(result, expected)\n\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=1)\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=-2)\n msg = \"No group keys passed!\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[])\n msg = \"multiple levels only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[0, 0])\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[0, 1])\n msg = \"level > 0 or level < -1 only valid with MultiIndex\"\n with pytest.raises(ValueError, match=msg):\n s.groupby(level=[1])\n\n\ndef test_groupby_complex():\n # GH 12902\n a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])\n expected = Series((1 + 2j, 5 + 10j))\n\n result = a.groupby(level=0).sum()\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = a.sum(level=0)\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_series_indexed_differently():\n s1 = Series(\n [5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],\n index=Index([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"]),\n )\n s2 = Series(\n [1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index([\"a\", \"b\", \"d\", \"f\", \"g\", \"h\"])\n )\n\n grouped = s1.groupby(s2)\n agged = grouped.mean()\n exp = s1.groupby(s2.reindex(s1.index).get).mean()\n tm.assert_series_equal(agged, exp)\n\n\ndef test_groupby_with_hier_columns():\n tuples = list(\n zip(\n *[\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n )\n )\n index = MultiIndex.from_tuples(tuples)\n columns = MultiIndex.from_tuples(\n [(\"A\", \"cat\"), (\"B\", \"dog\"), (\"B\", \"cat\"), (\"A\", \"dog\")]\n )\n df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)\n\n result = df.groupby(level=0).mean()\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0, axis=1).mean()\n tm.assert_index_equal(result.index, df.index)\n\n result = df.groupby(level=0).agg(np.mean)\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0).apply(lambda x: x.mean())\n tm.assert_index_equal(result.columns, columns)\n\n result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))\n tm.assert_index_equal(result.columns, Index([\"A\", \"B\"]))\n tm.assert_index_equal(result.index, df.index)\n\n # add a nuisance column\n sorted_columns, _ = columns.sortlevel(0)\n df[\"A\", \"foo\"] = \"bar\"\n result = df.groupby(level=0).mean()\n tm.assert_index_equal(result.columns, df.columns[:-1])\n\n\ndef test_grouping_ndarray(df):\n grouped = df.groupby(df[\"A\"].values)\n\n result = grouped.sum()\n expected = df.groupby(\"A\").sum()\n tm.assert_frame_equal(\n result, expected, check_names=False\n ) # Note: no names when grouping by value\n\n\ndef test_groupby_wrong_multi_labels():\n data = \"\"\"index,foo,bar,baz,spam,data\n0,foo1,bar1,baz1,spam2,20\n1,foo1,bar2,baz1,spam3,30\n2,foo2,bar2,baz1,spam2,40\n3,foo1,bar1,baz2,spam1,50\n4,foo3,bar1,baz2,spam1,60\"\"\"\n\n data = read_csv(StringIO(data), index_col=0)\n\n grouped = data.groupby([\"foo\", \"bar\", \"baz\", \"spam\"])\n\n result = grouped.agg(np.mean)\n expected = grouped.mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_with_name(df):\n result = df.groupby(df[\"A\"]).mean()\n result2 = df.groupby(df[\"A\"], as_index=False).mean()\n assert result.index.name == \"A\"\n assert \"A\" in result2\n\n result = df.groupby([df[\"A\"], df[\"B\"]]).mean()\n result2 = df.groupby([df[\"A\"], df[\"B\"]], as_index=False).mean()\n assert result.index.names == (\"A\", \"B\")\n assert \"A\" in result2\n assert \"B\" in result2\n\n\ndef test_seriesgroupby_name_attr(df):\n # GH 6265\n result = df.groupby(\"A\")[\"C\"]\n assert result.count().name == \"C\"\n assert result.mean().name == \"C\"\n\n testFunc = lambda x: np.sum(x) * 2\n assert result.agg(testFunc).name == \"C\"\n\n\ndef test_consistency_name():\n # GH 12363\n\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n expected = df.groupby([\"A\"]).B.count()\n result = df.B.groupby(df.A).count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_name_propagation(df):\n # GH 6124\n def summarize(df, name=None):\n return Series({\"count\": 1, \"mean\": 2, \"omissions\": 3}, name=name)\n\n def summarize_random_name(df):\n # Provide a different name for each Series. In this case, groupby\n # should not attempt to propagate the Series name since they are\n # inconsistent.\n return Series({\"count\": 1, \"mean\": 2, \"omissions\": 3}, name=df.iloc[0][\"A\"])\n\n metrics = df.groupby(\"A\").apply(summarize)\n assert metrics.columns.name is None\n metrics = df.groupby(\"A\").apply(summarize, \"metrics\")\n assert metrics.columns.name == \"metrics\"\n metrics = df.groupby(\"A\").apply(summarize_random_name)\n assert metrics.columns.name is None\n\n\ndef test_groupby_nonstring_columns():\n df = DataFrame([np.arange(10) for x in range(10)])\n grouped = df.groupby(0)\n result = grouped.mean()\n expected = df.groupby(df[0]).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_mixed_type_columns():\n # GH 13432, unorderable types in py3\n df = DataFrame([[0, 1, 2]], columns=[\"A\", \"B\", 0])\n expected = DataFrame([[1, 2]], columns=[\"B\", 0], index=Index([0], name=\"A\"))\n\n result = df.groupby(\"A\").first()\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(\"A\").sum()\n tm.assert_frame_equal(result, expected)\n\n\n# TODO: Ensure warning isn't emitted in the first place\[email protected](\"ignore:Mean of:RuntimeWarning\")\ndef test_cython_grouper_series_bug_noncontig():\n arr = np.empty((100, 100))\n arr.fill(np.nan)\n obj = Series(arr[:, 0])\n inds = np.tile(range(10), 10)\n\n result = obj.groupby(inds).agg(Series.median)\n assert result.isna().all()\n\n\ndef test_series_grouper_noncontig_index():\n index = Index(tm.rands_array(10, 100))\n\n values = Series(np.random.randn(50), index=index[::2])\n labels = np.random.randint(0, 5, 50)\n\n # it works!\n grouped = values.groupby(labels)\n\n # accessing the index elements causes segfault\n f = lambda x: len(set(map(id, x.index)))\n grouped.agg(f)\n\n\ndef test_convert_objects_leave_decimal_alone():\n\n s = Series(range(5))\n labels = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"], dtype=\"O\")\n\n def convert_fast(x):\n return Decimal(str(x.mean()))\n\n def convert_force_pure(x):\n # base will be length 0\n assert len(x.values.base) > 0\n return Decimal(str(x.mean()))\n\n grouped = s.groupby(labels)\n\n result = grouped.agg(convert_fast)\n assert result.dtype == np.object_\n assert isinstance(result[0], Decimal)\n\n result = grouped.agg(convert_force_pure)\n assert result.dtype == np.object_\n assert isinstance(result[0], Decimal)\n\n\ndef test_groupby_dtype_inference_empty():\n # GH 6733\n df = DataFrame({\"x\": [], \"range\": np.arange(0, dtype=\"int64\")})\n assert df[\"x\"].dtype == np.float64\n\n result = df.groupby(\"x\").first()\n exp_index = Index([], name=\"x\", dtype=np.float64)\n expected = DataFrame({\"range\": Series([], index=exp_index, dtype=\"int64\")})\n tm.assert_frame_equal(result, expected, by_blocks=True)\n\n\ndef test_groupby_unit64_float_conversion():\n # GH: 30859 groupby converts unit64 to floats sometimes\n df = DataFrame({\"first\": [1], \"second\": [1], \"value\": [16148277970000000000]})\n result = df.groupby([\"first\", \"second\"])[\"value\"].max()\n expected = Series(\n [16148277970000000000],\n MultiIndex.from_product([[1], [1]], names=[\"first\", \"second\"]),\n name=\"value\",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_list_infer_array_like(df):\n result = df.groupby(list(df[\"A\"])).mean()\n expected = df.groupby(df[\"A\"]).mean()\n tm.assert_frame_equal(result, expected, check_names=False)\n\n with pytest.raises(KeyError, match=r\"^'foo'$\"):\n df.groupby(list(df[\"A\"][:-1]))\n\n # pathological case of ambiguity\n df = DataFrame({\"foo\": [0, 1], \"bar\": [3, 4], \"val\": np.random.randn(2)})\n\n result = df.groupby([\"foo\", \"bar\"]).mean()\n expected = df.groupby([df[\"foo\"], df[\"bar\"]]).mean()[[\"val\"]]\n\n\ndef test_groupby_keys_same_size_as_index():\n # GH 11185\n freq = \"s\"\n index = date_range(\n start=Timestamp(\"2015-09-29T11:34:44-0700\"), periods=2, freq=freq\n )\n df = DataFrame([[\"A\", 10], [\"B\", 15]], columns=[\"metric\", \"values\"], index=index)\n result = df.groupby([Grouper(level=0, freq=freq), \"metric\"]).mean()\n expected = df.set_index([df.index, \"metric\"])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_one_row():\n # GH 11741\n msg = r\"^'Z'$\"\n df1 = DataFrame(np.random.randn(1, 4), columns=list(\"ABCD\"))\n with pytest.raises(KeyError, match=msg):\n df1.groupby(\"Z\")\n df2 = DataFrame(np.random.randn(2, 4), columns=list(\"ABCD\"))\n with pytest.raises(KeyError, match=msg):\n df2.groupby(\"Z\")\n\n\ndef test_groupby_nat_exclude():\n # GH 6992\n df = DataFrame(\n {\n \"values\": np.random.randn(8),\n \"dt\": [\n np.nan,\n Timestamp(\"2013-01-01\"),\n np.nan,\n Timestamp(\"2013-02-01\"),\n np.nan,\n Timestamp(\"2013-02-01\"),\n np.nan,\n Timestamp(\"2013-01-01\"),\n ],\n \"str\": [np.nan, \"a\", np.nan, \"a\", np.nan, \"a\", np.nan, \"b\"],\n }\n )\n grouped = df.groupby(\"dt\")\n\n expected = [Index([1, 7]), Index([3, 5])]\n keys = sorted(grouped.groups.keys())\n assert len(keys) == 2\n for k, e in zip(keys, expected):\n # grouped.groups keys are np.datetime64 with system tz\n # not to be affected by tz, only compare values\n tm.assert_index_equal(grouped.groups[k], e)\n\n # confirm obj is not filtered\n tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)\n assert grouped.ngroups == 2\n\n expected = {\n Timestamp(\"2013-01-01 00:00:00\"): np.array([1, 7], dtype=np.intp),\n Timestamp(\"2013-02-01 00:00:00\"): np.array([3, 5], dtype=np.intp),\n }\n\n for k in grouped.indices:\n tm.assert_numpy_array_equal(grouped.indices[k], expected[k])\n\n tm.assert_frame_equal(grouped.get_group(Timestamp(\"2013-01-01\")), df.iloc[[1, 7]])\n tm.assert_frame_equal(grouped.get_group(Timestamp(\"2013-02-01\")), df.iloc[[3, 5]])\n\n with pytest.raises(KeyError, match=r\"^NaT$\"):\n grouped.get_group(pd.NaT)\n\n nan_df = DataFrame(\n {\"nan\": [np.nan, np.nan, np.nan], \"nat\": [pd.NaT, pd.NaT, pd.NaT]}\n )\n assert nan_df[\"nan\"].dtype == \"float64\"\n assert nan_df[\"nat\"].dtype == \"datetime64[ns]\"\n\n for key in [\"nan\", \"nat\"]:\n grouped = nan_df.groupby(key)\n assert grouped.groups == {}\n assert grouped.ngroups == 0\n assert grouped.indices == {}\n with pytest.raises(KeyError, match=r\"^nan$\"):\n grouped.get_group(np.nan)\n with pytest.raises(KeyError, match=r\"^NaT$\"):\n grouped.get_group(pd.NaT)\n\n\ndef test_groupby_two_group_keys_all_nan():\n # GH #36842: Grouping over two group keys shouldn't raise an error\n df = DataFrame({\"a\": [np.nan, np.nan], \"b\": [np.nan, np.nan], \"c\": [1, 2]})\n result = df.groupby([\"a\", \"b\"]).indices\n assert result == {}\n\n\ndef test_groupby_2d_malformed():\n d = DataFrame(index=range(2))\n d[\"group\"] = [\"g1\", \"g2\"]\n d[\"zeros\"] = [0, 0]\n d[\"ones\"] = [1, 1]\n d[\"label\"] = [\"l1\", \"l2\"]\n tmp = d.groupby([\"group\"]).mean()\n res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)\n tm.assert_index_equal(tmp.columns, Index([\"zeros\", \"ones\"]))\n tm.assert_numpy_array_equal(tmp.values, res_values)\n\n\ndef test_int32_overflow():\n B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))\n A = np.arange(25000)\n df = DataFrame({\"A\": A, \"B\": B, \"C\": A, \"D\": B, \"E\": np.random.randn(25000)})\n\n left = df.groupby([\"A\", \"B\", \"C\", \"D\"]).sum()\n right = df.groupby([\"D\", \"C\", \"B\", \"A\"]).sum()\n assert len(left) == len(right)\n\n\ndef test_groupby_sort_multi():\n df = DataFrame(\n {\n \"a\": [\"foo\", \"bar\", \"baz\"],\n \"b\": [3, 2, 1],\n \"c\": [0, 1, 2],\n \"d\": np.random.randn(3),\n }\n )\n\n tups = [tuple(row) for row in df[[\"a\", \"b\", \"c\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"a\", \"b\", \"c\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])\n\n tups = [tuple(row) for row in df[[\"c\", \"a\", \"b\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"c\", \"a\", \"b\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups)\n\n tups = [tuple(x) for x in df[[\"b\", \"c\", \"a\"]].values]\n tups = com.asarray_tuplesafe(tups)\n result = df.groupby([\"b\", \"c\", \"a\"], sort=True).sum()\n tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])\n\n df = DataFrame(\n {\"a\": [0, 1, 2, 0, 1, 2], \"b\": [0, 0, 0, 1, 1, 1], \"d\": np.random.randn(6)}\n )\n grouped = df.groupby([\"a\", \"b\"])[\"d\"]\n result = grouped.sum()\n\n def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):\n tups = [tuple(row) for row in df[keys].values]\n tups = com.asarray_tuplesafe(tups)\n expected = f(df.groupby(tups)[field])\n for k, v in expected.items():\n assert result[k] == v\n\n _check_groupby(df, result, [\"a\", \"b\"], \"d\")\n\n\ndef test_dont_clobber_name_column():\n df = DataFrame(\n {\"key\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"], \"name\": [\"foo\", \"bar\", \"baz\"] * 2}\n )\n\n result = df.groupby(\"key\").apply(lambda x: x)\n tm.assert_frame_equal(result, df)\n\n\ndef test_skip_group_keys():\n\n tsf = tm.makeTimeDataFrame()\n\n grouped = tsf.groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_values(by=\"A\")[:3])\n\n pieces = [group.sort_values(by=\"A\")[:3] for key, group in grouped]\n\n expected = pd.concat(pieces)\n tm.assert_frame_equal(result, expected)\n\n grouped = tsf[\"A\"].groupby(lambda x: x.month, group_keys=False)\n result = grouped.apply(lambda x: x.sort_values()[:3])\n\n pieces = [group.sort_values()[:3] for key, group in grouped]\n\n expected = pd.concat(pieces)\n tm.assert_series_equal(result, expected)\n\n\ndef test_no_nonsense_name(float_frame):\n # GH #995\n s = float_frame[\"C\"].copy()\n s.name = None\n\n result = s.groupby(float_frame[\"A\"]).agg(np.sum)\n assert result.name is None\n\n\ndef test_multifunc_sum_bug():\n # GH #1065\n x = DataFrame(np.arange(9).reshape(3, 3))\n x[\"test\"] = 0\n x[\"fl\"] = [1.3, 1.5, 1.6]\n\n grouped = x.groupby(\"test\")\n result = grouped.agg({\"fl\": \"sum\", 2: \"size\"})\n assert result[\"fl\"].dtype == np.float64\n\n\ndef test_handle_dict_return_value(df):\n def f(group):\n return {\"max\": group.max(), \"min\": group.min()}\n\n def g(group):\n return Series({\"max\": group.max(), \"min\": group.min()})\n\n result = df.groupby(\"A\")[\"C\"].apply(f)\n expected = df.groupby(\"A\")[\"C\"].apply(g)\n\n assert isinstance(result, Series)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"grouper\", [\"A\", [\"A\", \"B\"]])\ndef test_set_group_name(df, grouper):\n def f(group):\n assert group.name is not None\n return group\n\n def freduce(group):\n assert group.name is not None\n return group.sum()\n\n def foo(x):\n return freduce(x)\n\n grouped = df.groupby(grouper)\n\n # make sure all these work\n grouped.apply(f)\n grouped.aggregate(freduce)\n grouped.aggregate({\"C\": freduce, \"D\": freduce})\n grouped.transform(f)\n\n grouped[\"C\"].apply(f)\n grouped[\"C\"].aggregate(freduce)\n grouped[\"C\"].aggregate([freduce, foo])\n grouped[\"C\"].transform(f)\n\n\ndef test_group_name_available_in_inference_pass():\n # gh-15062\n df = DataFrame({\"a\": [0, 0, 1, 1, 2, 2], \"b\": np.arange(6)})\n\n names = []\n\n def f(group):\n names.append(group.name)\n return group.copy()\n\n df.groupby(\"a\", sort=False, group_keys=False).apply(f)\n\n expected_names = [0, 1, 2]\n assert names == expected_names\n\n\ndef test_no_dummy_key_names(df):\n # see gh-1291\n result = df.groupby(df[\"A\"].values).sum()\n assert result.index.name is None\n\n result = df.groupby([df[\"A\"].values, df[\"B\"].values]).sum()\n assert result.index.names == (None, None)\n\n\ndef test_groupby_sort_multiindex_series():\n # series multiindex groupby sort argument was not being passed through\n # _compress_group_index\n # GH 9444\n index = MultiIndex(\n levels=[[1, 2], [1, 2]],\n codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],\n names=[\"a\", \"b\"],\n )\n mseries = Series([0, 1, 2, 3, 4, 5], index=index)\n index = MultiIndex(\n levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=[\"a\", \"b\"]\n )\n mseries_result = Series([0, 2, 4], index=index)\n\n result = mseries.groupby(level=[\"a\", \"b\"], sort=False).first()\n tm.assert_series_equal(result, mseries_result)\n result = mseries.groupby(level=[\"a\", \"b\"], sort=True).first()\n tm.assert_series_equal(result, mseries_result.sort_index())\n\n\ndef test_groupby_reindex_inside_function():\n\n periods = 1000\n ind = date_range(start=\"2012/1/1\", freq=\"5min\", periods=periods)\n df = DataFrame({\"high\": np.arange(periods), \"low\": np.arange(periods)}, index=ind)\n\n def agg_before(func, fix=False):\n \"\"\"\n Run an aggregate func on the subset of data.\n \"\"\"\n\n def _func(data):\n d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()\n if fix:\n data[data.index[0]]\n if len(d) == 0:\n return None\n return func(d)\n\n return _func\n\n grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))\n closure_bad = grouped.agg({\"high\": agg_before(np.max)})\n closure_good = grouped.agg({\"high\": agg_before(np.max, True)})\n\n tm.assert_frame_equal(closure_bad, closure_good)\n\n\ndef test_groupby_multiindex_missing_pair():\n # GH9049\n df = DataFrame(\n {\n \"group1\": [\"a\", \"a\", \"a\", \"b\"],\n \"group2\": [\"c\", \"c\", \"d\", \"c\"],\n \"value\": [1, 1, 1, 5],\n }\n )\n df = df.set_index([\"group1\", \"group2\"])\n df_grouped = df.groupby(level=[\"group1\", \"group2\"], sort=True)\n\n res = df_grouped.agg(\"sum\")\n idx = MultiIndex.from_tuples(\n [(\"a\", \"c\"), (\"a\", \"d\"), (\"b\", \"c\")], names=[\"group1\", \"group2\"]\n )\n exp = DataFrame([[2], [1], [5]], index=idx, columns=[\"value\"])\n\n tm.assert_frame_equal(res, exp)\n\n\ndef test_groupby_multiindex_not_lexsorted():\n # GH 11640\n\n # define the lexsorted version\n lexsorted_mi = MultiIndex.from_tuples(\n [(\"a\", \"\"), (\"b1\", \"c1\"), (\"b2\", \"c2\")], names=[\"b\", \"c\"]\n )\n lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)\n assert lexsorted_df.columns._is_lexsorted()\n\n # define the non-lexsorted version\n not_lexsorted_df = DataFrame(\n columns=[\"a\", \"b\", \"c\", \"d\"], data=[[1, \"b1\", \"c1\", 3], [1, \"b2\", \"c2\", 4]]\n )\n not_lexsorted_df = not_lexsorted_df.pivot_table(\n index=\"a\", columns=[\"b\", \"c\"], values=\"d\"\n )\n not_lexsorted_df = not_lexsorted_df.reset_index()\n assert not not_lexsorted_df.columns._is_lexsorted()\n\n # compare the results\n tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)\n\n expected = lexsorted_df.groupby(\"a\").mean()\n with tm.assert_produces_warning(PerformanceWarning):\n result = not_lexsorted_df.groupby(\"a\").mean()\n tm.assert_frame_equal(expected, result)\n\n # a transforming function should work regardless of sort\n # GH 14776\n df = DataFrame(\n {\"x\": [\"a\", \"a\", \"b\", \"a\"], \"y\": [1, 1, 2, 2], \"z\": [1, 2, 3, 4]}\n ).set_index([\"x\", \"y\"])\n assert not df.index._is_lexsorted()\n\n for level in [0, 1, [0, 1]]:\n for sort in [False, True]:\n result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)\n expected = df\n tm.assert_frame_equal(expected, result)\n\n result = (\n df.sort_index()\n .groupby(level=level, sort=sort)\n .apply(DataFrame.drop_duplicates)\n )\n expected = df.sort_index()\n tm.assert_frame_equal(expected, result)\n\n\ndef test_index_label_overlaps_location():\n # checking we don't have any label/location confusion in the\n # wake of GH5375\n df = DataFrame(list(\"ABCDE\"), index=[2, 0, 2, 1, 1])\n g = df.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n tm.assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n # ... and again, with a generic Index of floats\n df.index = df.index.astype(float)\n g = df.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = df.iloc[[1, 3, 4]]\n tm.assert_frame_equal(actual, expected)\n\n ser = df[0]\n g = ser.groupby(list(\"ababb\"))\n actual = g.filter(lambda x: len(x) > 2)\n expected = ser.take([1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n\ndef test_transform_doesnt_clobber_ints():\n # GH 7972\n n = 6\n x = np.arange(n)\n df = DataFrame({\"a\": x // 2, \"b\": 2.0 * x, \"c\": 3.0 * x})\n df2 = DataFrame({\"a\": x // 2 * 1.0, \"b\": 2.0 * x, \"c\": 3.0 * x})\n\n gb = df.groupby(\"a\")\n result = gb.transform(\"mean\")\n\n gb2 = df2.groupby(\"a\")\n expected = gb2.transform(\"mean\")\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"sort_column\",\n [\"ints\", \"floats\", \"strings\", [\"ints\", \"floats\"], [\"ints\", \"strings\"]],\n)\[email protected](\n \"group_column\", [\"int_groups\", \"string_groups\", [\"int_groups\", \"string_groups\"]]\n)\ndef test_groupby_preserves_sort(sort_column, group_column):\n # Test to ensure that groupby always preserves sort order of original\n # object. Issue #8588 and #9651\n\n df = DataFrame(\n {\n \"int_groups\": [3, 1, 0, 1, 0, 3, 3, 3],\n \"string_groups\": [\"z\", \"a\", \"z\", \"a\", \"a\", \"g\", \"g\", \"g\"],\n \"ints\": [8, 7, 4, 5, 2, 9, 1, 1],\n \"floats\": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],\n \"strings\": [\"z\", \"d\", \"a\", \"e\", \"word\", \"word2\", \"42\", \"47\"],\n }\n )\n\n # Try sorting on different types and with different group types\n\n df = df.sort_values(by=sort_column)\n g = df.groupby(group_column)\n\n def test_sort(x):\n tm.assert_frame_equal(x, x.sort_values(by=sort_column))\n\n g.apply(test_sort)\n\n\ndef test_pivot_table_values_key_error():\n # This test is designed to replicate the error in issue #14938\n df = DataFrame(\n {\n \"eventDate\": date_range(datetime.today(), periods=20, freq=\"M\").tolist(),\n \"thename\": range(0, 20),\n }\n )\n\n df[\"year\"] = df.set_index(\"eventDate\").index.year\n df[\"month\"] = df.set_index(\"eventDate\").index.month\n\n with pytest.raises(KeyError, match=\"'badname'\"):\n df.reset_index().pivot_table(\n index=\"year\", columns=\"month\", values=\"badname\", aggfunc=\"count\"\n )\n\n\[email protected](\"columns\", [\"C\", [\"C\"]])\[email protected](\"keys\", [[\"A\"], [\"A\", \"B\"]])\[email protected](\n \"values\",\n [\n [True],\n [0],\n [0.0],\n [\"a\"],\n Categorical([0]),\n [to_datetime(0)],\n date_range(0, 1, 1, tz=\"US/Eastern\"),\n pd.array([0], dtype=\"Int64\"),\n pd.array([0], dtype=\"Float64\"),\n pd.array([False], dtype=\"boolean\"),\n ],\n)\[email protected](\"method\", [\"attr\", \"agg\", \"apply\"])\[email protected](\n \"op\", [\"idxmax\", \"idxmin\", \"mad\", \"min\", \"max\", \"sum\", \"prod\", \"skew\"]\n)\ndef test_empty_groupby(columns, keys, values, method, op, request):\n # GH8093 & GH26411\n\n if isinstance(values, Categorical) and len(keys) == 1 and method == \"apply\":\n mark = pytest.mark.xfail(raises=TypeError, match=\"'str' object is not callable\")\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and op in [\"idxmax\", \"idxmin\"]\n ):\n mark = pytest.mark.xfail(\n raises=ValueError, match=\"attempt to get arg(min|max) of an empty sequence\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and not isinstance(columns, list)\n ):\n mark = pytest.mark.xfail(\n raises=TypeError, match=\"'Categorical' does not implement\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 1\n and op in [\"mad\", \"min\", \"max\", \"sum\", \"prod\", \"skew\"]\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, Categorical)\n and len(keys) == 2\n and op in [\"min\", \"max\", \"sum\"]\n and method != \"apply\"\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n elif (\n isinstance(values, pd.core.arrays.BooleanArray)\n and op in [\"sum\", \"prod\"]\n and method != \"apply\"\n ):\n mark = pytest.mark.xfail(\n raises=AssertionError, match=\"(DataFrame|Series) are different\"\n )\n request.node.add_marker(mark)\n\n override_dtype = None\n if isinstance(values[0], bool) and op in (\"prod\", \"sum\") and method != \"apply\":\n # sum/product of bools is an integer\n override_dtype = \"int64\"\n\n df = DataFrame({\"A\": values, \"B\": values, \"C\": values}, columns=list(\"ABC\"))\n\n if hasattr(values, \"dtype\"):\n # check that we did the construction right\n assert (df.dtypes == values.dtype).all()\n\n df = df.iloc[:0]\n\n gb = df.groupby(keys)[columns]\n if method == \"attr\":\n result = getattr(gb, op)()\n else:\n result = getattr(gb, method)(op)\n\n expected = df.set_index(keys)[columns]\n if override_dtype is not None:\n expected = expected.astype(override_dtype)\n if len(keys) == 1:\n expected.index.name = keys[0]\n tm.assert_equal(result, expected)\n\n\ndef test_tuple_as_grouping():\n # https://github.com/pandas-dev/pandas/issues/18314\n df = DataFrame(\n {\n (\"a\", \"b\"): [1, 1, 1, 1],\n \"a\": [2, 2, 2, 2],\n \"b\": [2, 2, 2, 2],\n \"c\": [1, 1, 1, 1],\n }\n )\n\n with pytest.raises(KeyError, match=r\"('a', 'b')\"):\n df[[\"a\", \"b\", \"c\"]].groupby((\"a\", \"b\"))\n\n result = df.groupby((\"a\", \"b\"))[\"c\"].sum()\n expected = Series([4], name=\"c\", index=Index([1], name=(\"a\", \"b\")))\n tm.assert_series_equal(result, expected)\n\n\ndef test_tuple_correct_keyerror():\n # https://github.com/pandas-dev/pandas/issues/18798\n df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]]))\n with pytest.raises(KeyError, match=r\"^\\(7, 8\\)$\"):\n df.groupby((7, 8)).mean()\n\n\ndef test_groupby_agg_ohlc_non_first():\n # GH 21716\n df = DataFrame(\n [[1], [1]],\n columns=[\"foo\"],\n index=date_range(\"2018-01-01\", periods=2, freq=\"D\"),\n )\n\n expected = DataFrame(\n [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],\n columns=MultiIndex.from_tuples(\n (\n (\"foo\", \"sum\", \"foo\"),\n (\"foo\", \"ohlc\", \"open\"),\n (\"foo\", \"ohlc\", \"high\"),\n (\"foo\", \"ohlc\", \"low\"),\n (\"foo\", \"ohlc\", \"close\"),\n )\n ),\n index=date_range(\"2018-01-01\", periods=2, freq=\"D\"),\n )\n\n result = df.groupby(Grouper(freq=\"D\")).agg([\"sum\", \"ohlc\"])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_multiindex_nat():\n # GH 9236\n values = [\n (pd.NaT, \"a\"),\n (datetime(2012, 1, 2), \"a\"),\n (datetime(2012, 1, 2), \"b\"),\n (datetime(2012, 1, 3), \"a\"),\n ]\n mi = MultiIndex.from_tuples(values, names=[\"date\", None])\n ser = Series([3, 2, 2.5, 4], index=mi)\n\n result = ser.groupby(level=1).mean()\n expected = Series([3.0, 2.5], index=[\"a\", \"b\"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_empty_list_raises():\n # GH 5289\n values = zip(range(10), range(10))\n df = DataFrame(values, columns=[\"apple\", \"b\"])\n msg = \"Grouper and axis must be same length\"\n with pytest.raises(ValueError, match=msg):\n df.groupby([[]])\n\n\ndef test_groupby_multiindex_series_keys_len_equal_group_axis():\n # GH 25704\n index_array = [[\"x\", \"x\"], [\"a\", \"b\"], [\"k\", \"k\"]]\n index_names = [\"first\", \"second\", \"third\"]\n ri = MultiIndex.from_arrays(index_array, names=index_names)\n s = Series(data=[1, 2], index=ri)\n result = s.groupby([\"first\", \"third\"]).sum()\n\n index_array = [[\"x\"], [\"k\"]]\n index_names = [\"first\", \"third\"]\n ei = MultiIndex.from_arrays(index_array, names=index_names)\n expected = Series([3], index=ei)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_groups_in_BaseGrouper():\n # GH 26326\n # Test if DataFrame grouped with a pandas.Grouper has correct groups\n mi = MultiIndex.from_product([[\"A\", \"B\"], [\"C\", \"D\"]], names=[\"alpha\", \"beta\"])\n df = DataFrame({\"foo\": [1, 2, 1, 2], \"bar\": [1, 2, 3, 4]}, index=mi)\n result = df.groupby([Grouper(level=\"alpha\"), \"beta\"])\n expected = df.groupby([\"alpha\", \"beta\"])\n assert result.groups == expected.groups\n\n result = df.groupby([\"beta\", Grouper(level=\"alpha\")])\n expected = df.groupby([\"beta\", \"alpha\"])\n assert result.groups == expected.groups\n\n\[email protected](\"group_name\", [\"x\", [\"x\"]])\ndef test_groupby_axis_1(group_name):\n # GH 27614\n df = DataFrame(\n np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20]\n )\n df.index.name = \"y\"\n df.columns.name = \"x\"\n\n results = df.groupby(group_name, axis=1).sum()\n expected = df.T.groupby(group_name).sum().T\n tm.assert_frame_equal(results, expected)\n\n # test on MI column\n iterables = [[\"bar\", \"baz\", \"foo\"], [\"one\", \"two\"]]\n mi = MultiIndex.from_product(iterables=iterables, names=[\"x\", \"x1\"])\n df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi)\n results = df.groupby(group_name, axis=1).sum()\n expected = df.T.groupby(group_name).sum().T\n tm.assert_frame_equal(results, expected)\n\n\[email protected](\n \"op, expected\",\n [\n (\n \"shift\",\n {\n \"time\": [\n None,\n None,\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n None,\n None,\n ]\n },\n ),\n (\n \"bfill\",\n {\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ]\n },\n ),\n (\n \"ffill\",\n {\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ]\n },\n ),\n ],\n)\ndef test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected):\n # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill\n tz = tz_naive_fixture\n data = {\n \"id\": [\"A\", \"B\", \"A\", \"B\", \"A\", \"B\"],\n \"time\": [\n Timestamp(\"2019-01-01 12:00:00\"),\n Timestamp(\"2019-01-01 12:30:00\"),\n None,\n None,\n Timestamp(\"2019-01-01 14:00:00\"),\n Timestamp(\"2019-01-01 14:30:00\"),\n ],\n }\n df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz))\n\n grouped = df.groupby(\"id\")\n result = getattr(grouped, op)()\n expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_only_none_group():\n # see GH21624\n # this was crashing with \"ValueError: Length of passed values is 1, index implies 0\"\n df = DataFrame({\"g\": [None], \"x\": 1})\n actual = df.groupby(\"g\")[\"x\"].transform(\"sum\")\n expected = Series([np.nan], name=\"x\")\n\n tm.assert_series_equal(actual, expected)\n\n\ndef test_groupby_duplicate_index():\n # GH#29189 the groupby call here used to raise\n ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])\n gb = ser.groupby(level=0)\n\n result = gb.mean()\n expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0])\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"idx\", [Index([\"a\", \"a\"]), MultiIndex.from_tuples(((\"a\", \"a\"), (\"a\", \"a\")))]\n)\[email protected](\"ignore:tshift is deprecated:FutureWarning\")\ndef test_dup_labels_output_shape(groupby_func, idx):\n if groupby_func in {\"size\", \"ngroup\", \"cumcount\"}:\n pytest.skip(\"Not applicable\")\n\n df = DataFrame([[1, 1]], columns=idx)\n grp_by = df.groupby([0])\n\n args = []\n if groupby_func in {\"fillna\", \"nth\"}:\n args.append(0)\n elif groupby_func == \"corrwith\":\n args.append(df)\n elif groupby_func == \"tshift\":\n df.index = [Timestamp(\"today\")]\n args.extend([1, \"D\"])\n\n result = getattr(grp_by, groupby_func)(*args)\n\n assert result.shape == (1, 2)\n tm.assert_index_equal(result.columns, idx)\n\n\ndef test_groupby_crash_on_nunique(axis):\n # Fix following 30253\n df = DataFrame({(\"A\", \"B\"): [1, 2], (\"A\", \"C\"): [1, 3], (\"D\", \"B\"): [0, 0]})\n\n axis_number = df._get_axis_number(axis)\n if not axis_number:\n df = df.T\n\n result = df.groupby(axis=axis_number, level=0).nunique()\n\n expected = DataFrame({\"A\": [1, 2], \"D\": [1, 1]})\n if not axis_number:\n expected = expected.T\n\n tm.assert_frame_equal(result, expected)\n\n # same thing, but empty columns\n gb = df[[]].groupby(axis=axis_number, level=0)\n res = gb.nunique()\n exp = expected[[]]\n tm.assert_frame_equal(res, exp)\n\n\ndef test_groupby_list_level():\n # GH 9790\n expected = DataFrame(np.arange(0, 9).reshape(3, 3))\n result = expected.groupby(level=[0]).mean()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"max_seq_items, expected\",\n [\n (5, \"{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}\"),\n (4, \"{0: [0], 1: [1], 2: [2], 3: [3], ...}\"),\n (1, \"{0: [0], ...}\"),\n ],\n)\ndef test_groups_repr_truncates(max_seq_items, expected):\n # GH 1135\n df = DataFrame(np.random.randn(5, 1))\n df[\"a\"] = df.index\n\n with pd.option_context(\"display.max_seq_items\", max_seq_items):\n result = df.groupby(\"a\").groups.__repr__()\n assert result == expected\n\n result = df.groupby(np.array(df.a)).groups.__repr__()\n assert result == expected\n\n\ndef test_group_on_two_row_multiindex_returns_one_tuple_key():\n # GH 18451\n df = DataFrame([{\"a\": 1, \"b\": 2, \"c\": 99}, {\"a\": 1, \"b\": 2, \"c\": 88}])\n df = df.set_index([\"a\", \"b\"])\n\n grp = df.groupby([\"a\", \"b\"])\n result = grp.indices\n expected = {(1, 2): np.array([0, 1], dtype=np.int64)}\n\n assert len(result) == 1\n key = (1, 2)\n assert (result[key] == expected[key]).all()\n\n\[email protected](\n \"klass, attr, value\",\n [\n (DataFrame, \"level\", \"a\"),\n (DataFrame, \"as_index\", False),\n (DataFrame, \"sort\", False),\n (DataFrame, \"group_keys\", False),\n (DataFrame, \"squeeze\", True),\n (DataFrame, \"observed\", True),\n (DataFrame, \"dropna\", False),\n pytest.param(\n Series,\n \"axis\",\n 1,\n marks=pytest.mark.xfail(\n reason=\"GH 35443: Attribute currently not passed on to series\"\n ),\n ),\n (Series, \"level\", \"a\"),\n (Series, \"as_index\", False),\n (Series, \"sort\", False),\n (Series, \"group_keys\", False),\n (Series, \"squeeze\", True),\n (Series, \"observed\", True),\n (Series, \"dropna\", False),\n ],\n)\[email protected](\n \"ignore:The `squeeze` parameter is deprecated:FutureWarning\"\n)\ndef test_subsetting_columns_keeps_attrs(klass, attr, value):\n # GH 9959 - When subsetting columns, don't drop attributes\n df = DataFrame({\"a\": [1], \"b\": [2], \"c\": [3]})\n if attr != \"axis\":\n df = df.set_index(\"a\")\n\n expected = df.groupby(\"a\", **{attr: value})\n result = expected[[\"b\"]] if klass is DataFrame else expected[\"b\"]\n assert getattr(result, attr) == getattr(expected, attr)\n\n\ndef test_subsetting_columns_axis_1():\n # GH 37725\n g = DataFrame({\"A\": [1], \"B\": [2], \"C\": [3]}).groupby([0, 0, 1], axis=1)\n match = \"Cannot subset columns when using axis=1\"\n with pytest.raises(ValueError, match=match):\n g[[\"A\", \"B\"]].sum()\n\n\[email protected](\"func\", [\"sum\", \"any\", \"shift\"])\ndef test_groupby_column_index_name_lost(func):\n # GH: 29764 groupby loses index sometimes\n expected = Index([\"a\"], name=\"idx\")\n df = DataFrame([[1]], columns=expected)\n df_grouped = df.groupby([1])\n result = getattr(df_grouped, func)().columns\n tm.assert_index_equal(result, expected)\n\n\ndef test_groupby_duplicate_columns():\n # GH: 31735\n df = DataFrame(\n {\"A\": [\"f\", \"e\", \"g\", \"h\"], \"B\": [\"a\", \"b\", \"c\", \"d\"], \"C\": [1, 2, 3, 4]}\n ).astype(object)\n df.columns = [\"A\", \"B\", \"B\"]\n result = df.groupby([0, 0, 0, 0]).min()\n expected = DataFrame([[\"e\", \"a\", 1]], columns=[\"A\", \"B\", \"B\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_series_with_tuple_name():\n # GH 37755\n ser = Series([1, 2, 3, 4], index=[1, 1, 2, 2], name=(\"a\", \"a\"))\n ser.index.name = (\"b\", \"b\")\n result = ser.groupby(level=0).last()\n expected = Series([2, 4], index=[1, 2], name=(\"a\", \"a\"))\n expected.index.name = (\"b\", \"b\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](not IS64, reason=\"GH#38778: fail on 32-bit system\")\[email protected](\n \"func, values\", [(\"sum\", [97.0, 98.0]), (\"mean\", [24.25, 24.5])]\n)\ndef test_groupby_numerical_stability_sum_mean(func, values):\n # GH#38778\n data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]\n df = DataFrame({\"group\": [1, 2] * 4, \"a\": data, \"b\": data})\n result = getattr(df.groupby(\"group\"), func)()\n expected = DataFrame({\"a\": values, \"b\": values}, index=Index([1, 2], name=\"group\"))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](not IS64, reason=\"GH#38778: fail on 32-bit system\")\ndef test_groupby_numerical_stability_cumsum():\n # GH#38934\n data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]\n df = DataFrame({\"group\": [1, 2] * 4, \"a\": data, \"b\": data})\n result = df.groupby(\"group\").cumsum()\n exp_data = (\n [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]\n )\n expected = DataFrame({\"a\": exp_data, \"b\": exp_data})\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n\ndef test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex):\n dups = rand_series_with_duplicate_datetimeindex\n result = dups.groupby(level=0).mean()\n expected = dups.groupby(dups.index).mean()\n tm.assert_series_equal(result, expected)\n"
] | [
[
"pandas._testing.assert_numpy_array_equal",
"numpy.ones",
"numpy.sum",
"pandas.Series",
"pandas.array",
"pandas._testing.assert_frame_equal",
"pandas.Categorical",
"pandas._testing.assert_series_equal",
"pandas.Grouper",
"pandas._testing.assert_produces_warning",
"pandas._testing.assert_equal",
"pandas._testing.assert_almost_equal",
"pandas.to_datetime",
"pandas.Timestamp",
"pandas.date_range",
"pandas.core.common.asarray_tuplesafe",
"pandas.MultiIndex.from_product",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.concat",
"numpy.std",
"pandas.Index",
"numpy.array",
"numpy.percentile",
"numpy.random.shuffle",
"pandas._testing.makeTimeDataFrame",
"numpy.empty",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.random",
"pandas._testing.assert_index_equal",
"pandas._testing.rands_array",
"pandas.MultiIndex",
"numpy.random.randint",
"pandas.option_context"
]
] |
kbrose/pytorch | [
"fc0b8e60337ae46b90ed5d2f6d1f623f0f8d6581"
] | [
"test/test_sort_and_select.py"
] | [
"import torch\nimport numpy as np\n\nimport random\nfrom torch._six import nan\nfrom itertools import permutations, product\n\nfrom torch.testing import all_types, all_types_and\nfrom torch.testing._internal.common_utils import \\\n (TEST_WITH_ROCM, TestCase, run_tests, make_tensor, slowTest)\nfrom torch.testing._internal.common_device_type import \\\n (instantiate_device_type_tests, dtypes, onlyOnCPUAndCUDA,\n skipCUDAIfRocm, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)\n\n# TODO: remove this\nSIZE = 100\n\nclass TestSortAndSelect(TestCase):\n\n def assertIsOrdered(self, order, x, mxx, ixx, task):\n SIZE = x.size(1)\n if order == 'descending':\n def check_order(a, b):\n # `a != a` because we put NaNs\n # at the end of ascending sorted lists,\n # and the beginning of descending ones.\n return ((a != a) | (a >= b)).all().item()\n elif order == 'ascending':\n def check_order(a, b):\n # see above\n return ((b != b) | (a <= b)).all().item()\n else:\n error('unknown order \"{}\", must be \"ascending\" or \"descending\"'.format(order))\n\n are_ordered = True\n for k in range(1, SIZE):\n self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),\n 'torch.sort ({}) values unordered for {}'.format(order, task))\n\n seen = set()\n indicesCorrect = True\n size0 = x.size(0)\n size = x.size(x.dim() - 1)\n x = x.tolist()\n mxx = mxx.tolist()\n ixx = ixx.tolist()\n for k in range(size0):\n seen.clear()\n for j in range(size):\n self.assertEqual(x[k][ixx[k][j]], mxx[k][j],\n msg='torch.sort ({}) indices wrong for {}'.format(order, task))\n seen.add(ixx[k][j])\n self.assertEqual(len(seen), size)\n\n def test_sort(self, device):\n # on CUDA 2048 vs >2048 have different code path for the dim being sorted\n for SIZE in (4, 2049):\n x = torch.rand(4, SIZE, device=device)\n res1val, res1ind = torch.sort(x)\n\n # Test inplace\n y = x.clone()\n y_inds = torch.tensor((), dtype=torch.int64, device=device)\n torch.sort(y, out=(y, y_inds))\n x_vals, x_inds = torch.sort(x)\n self.assertEqual(x_vals, y)\n self.assertEqual(x_inds, y_inds)\n\n # Test use of result tensor\n res2val = torch.tensor((), device=device)\n res2ind = torch.tensor((), device=device, dtype=torch.long)\n torch.sort(x, out=(res2val, res2ind))\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n self.assertEqual(torch.argsort(x), res1ind)\n self.assertEqual(x.argsort(), res1ind)\n\n # Test sorting of random numbers\n self.assertIsOrdered('ascending', x, res2val, res2ind, 'random')\n\n # Test simple sort\n self.assertEqual(\n torch.sort(torch.tensor((50, 40, 30, 20, 10), device=device))[0],\n torch.tensor((10, 20, 30, 40, 50), device=device),\n atol=0, rtol=0\n )\n\n # Test that we still have proper sorting with duplicate keys\n x = torch.floor(torch.rand(4, SIZE, device=device) * 10)\n torch.sort(x, out=(res2val, res2ind))\n self.assertIsOrdered('ascending', x, res2val, res2ind, 'random with duplicate keys')\n\n # DESCENDING SORT\n x = torch.rand(4, SIZE, device=device)\n res1val, res1ind = torch.sort(x, x.dim() - 1, True)\n\n # Test use of result tensor\n res2val = torch.tensor((), device=device)\n res2ind = torch.tensor((), device=device, dtype=torch.long)\n torch.sort(x, x.dim() - 1, True, out=(res2val, res2ind))\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n self.assertEqual(torch.argsort(x, x.dim() - 1, True), res1ind)\n self.assertEqual(x.argsort(x.dim() - 1, True), res1ind)\n\n # Test sorting of random numbers\n self.assertIsOrdered('descending', x, res2val, res2ind, 'random')\n\n # Test simple sort task\n self.assertEqual(\n torch.sort(torch.tensor((10, 20, 30, 40, 50), device=device), 0, True)[0],\n torch.tensor((50, 40, 30, 20, 10), device=device),\n atol=0, rtol=0\n )\n\n # Test that we still have proper sorting with duplicate keys\n self.assertIsOrdered('descending', x, res2val, res2ind, 'random with duplicate keys')\n\n # Test sorting with NaNs\n x = torch.rand(4, SIZE, device=device)\n x[1][2] = float('NaN')\n x[3][0] = float('NaN')\n torch.sort(x, out=(res2val, res2ind))\n self.assertIsOrdered('ascending', x, res2val, res2ind,\n 'random with NaNs')\n torch.sort(x, out=(res2val, res2ind), descending=True)\n self.assertIsOrdered('descending', x, res2val, res2ind,\n 'random with NaNs')\n\n # FIXME: remove torch.bool from unsupported types once support is added for cub sort\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128})\n def test_stable_sort(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n sizes = (100, 1000, 10000)\n for ncopies in sizes:\n x = torch.tensor([0, 1] * ncopies, dtype=dtype, device=device)\n _, idx = x.sort(stable=True)\n self.assertEqual(\n idx[:ncopies],\n torch.arange(start=0, end=2 * ncopies, step=2, device=device)\n )\n self.assertEqual(\n idx[ncopies:],\n torch.arange(start=1, end=2 * ncopies, step=2, device=device)\n )\n\n @onlyCUDA\n @dtypes(torch.uint8)\n @largeTensorTest('200GB') # Unfortunately 80GB A100 is not large enough\n def test_sort_large(self, device, dtype):\n t0 = torch.randperm(8192, device=device).to(dtype)\n t = t0.view(1, 8192).expand(2 ** 18 + 1, -1).contiguous()\n v, i = t.sort()\n del t\n iv, im = i.var_mean(dim=0)\n del i\n vv, vm = v.var_mean(dim=0)\n del v\n self.assertEqual(vv, torch.zeros_like(vv))\n self.assertEqual(iv, torch.zeros_like(iv))\n self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device))\n self.assertEqual(im, t0.sort().indices)\n\n def _test_sort_discontiguous(self, device, dtype):\n # on CUDA 2048 vs >2048 have different code path for the dim being sorted\n sizes = (5, 7, 2049)\n for shape in permutations(sizes):\n for perm in permutations((0, 1, 2)):\n for dim in range(3):\n t = torch.randn(shape, device=device, dtype=dtype).permute(perm)\n r1 = t.sort(dim=dim)\n r2 = t.contiguous().sort(dim=dim)\n self.assertEqual(r1, r2)\n n = t.size(dim)\n\n # assert ordered\n self.assertTrue((r1.values.narrow(dim, 1, n - 1) >= r1.values.narrow(dim, 0, n - 1)).all())\n\n # assert that different segments does not mix, which can easily happen\n # if the stride is not handled correctly\n self.assertTrue((t.unsqueeze(-1).transpose(dim, -1) == r1.values.unsqueeze(-1)).any(dim=dim).any(dim=-1).all())\n\n # assert stride is preserved\n if self.device_type == 'cuda':\n # FIXME: this behavior should be true for all cases, not\n # just the one specified in if condition\n self.assertEqual(r1.values.stride(), t.stride())\n self.assertEqual(r1.indices.stride(), t.stride())\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_sort_discontiguous(self, device, dtype):\n self._test_sort_discontiguous(device, dtype)\n\n @slowTest # this test is slow on CPU, but not on CUDA\n @onlyCPU\n @dtypes(torch.float32)\n def test_sort_discontiguous_slow(self, device, dtype):\n self._test_sort_discontiguous(device, dtype)\n\n # FIXME: remove torch.bool from unsupported types once support is added for cub sort\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128})\n def test_stable_sort_against_numpy(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n if dtype in torch.testing.floating_types_and(torch.float16, torch.bfloat16):\n inf = float('inf')\n neg_inf = -float('inf')\n nan = float('nan')\n else:\n if dtype != torch.bool:\n # no torch.iinfo support for torch.bool\n inf = torch.iinfo(dtype).max\n neg_inf = torch.iinfo(dtype).min\n else:\n inf = True\n neg_inf = ~inf\n # no nan for integral types, we use inf instead for simplicity\n nan = inf\n\n def generate_samples():\n from itertools import chain, combinations\n\n for sizes in [(1025,), (10000,)]:\n size = sizes[0]\n # binary strings\n yield (torch.tensor([0, 1] * size, dtype=dtype, device=device), 0)\n\n if self.device_type == 'cuda':\n return\n\n yield (torch.tensor([0, 1] * 100, dtype=dtype, device=device), 0)\n\n def repeated_index_fill(t, dim, idxs, vals):\n res = t\n for idx, val in zip(idxs, vals):\n res = res.index_fill(dim, idx, val)\n return res\n\n for sizes in [(1, 10), (10, 1), (10, 10), (10, 10, 10)]:\n size = min(*sizes)\n x = (torch.randn(*sizes, device=device) * size).to(dtype)\n yield (x, 0)\n\n # Generate tensors which are being filled at random locations\n # with values from the non-empty subsets of the set (inf, neg_inf, nan)\n # for each dimension.\n n_fill_vals = 3 # cardinality of (inf, neg_inf, nan)\n for dim in range(len(sizes)):\n idxs = (torch.randint(high=size, size=(size // 10,)) for i in range(n_fill_vals))\n vals = (inf, neg_inf, nan)\n subsets = chain.from_iterable(combinations(list(zip(idxs, vals)), r)\n for r in range(1, n_fill_vals + 1))\n for subset in subsets:\n idxs_subset, vals_subset = zip(*subset)\n yield (repeated_index_fill(x, dim, idxs_subset, vals_subset), dim)\n\n for sample, dim in generate_samples():\n _, idx_torch = sample.sort(dim=dim, stable=True)\n if dtype is torch.bfloat16:\n sample_numpy = sample.float().cpu().numpy()\n else:\n sample_numpy = sample.cpu().numpy()\n idx_numpy = np.argsort(sample_numpy, axis=dim, kind='stable')\n self.assertEqual(idx_torch, idx_numpy)\n\n @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))\n def test_msort(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n\n def test(shape):\n tensor = make_tensor(shape, device, dtype, low=-9, high=9)\n if tensor.size() != torch.Size([]):\n if dtype is torch.bfloat16:\n expected = torch.from_numpy(np.msort(tensor.float().cpu().numpy())).bfloat16()\n else:\n expected = torch.from_numpy(np.msort(tensor.cpu().numpy()))\n else:\n expected = tensor # numpy.msort() does not support empty shapes tensor\n\n result = torch.msort(tensor)\n self.assertEqual(result, expected)\n\n out = torch.empty_like(result)\n torch.msort(tensor, out=out)\n self.assertEqual(out, expected)\n\n shapes = (\n [],\n [0, ],\n [20, ],\n [1, 20],\n [30, 30],\n [10, 20, 30]\n )\n for shape in shapes:\n test(shape)\n\n def test_topk(self, device):\n def topKViaSort(t, k, dim, dir):\n sorted, indices = t.sort(dim, dir)\n return sorted.narrow(dim, 0, k), indices.narrow(dim, 0, k)\n\n def compareTensors(t, res1, ind1, res2, ind2, dim):\n # Values should be exactly equivalent\n self.assertEqual(res1, res2, atol=0, rtol=0)\n\n # Indices might differ based on the implementation, since there is\n # no guarantee of the relative order of selection\n if not ind1.eq(ind2).all():\n # To verify that the indices represent equivalent elements,\n # gather from the input using the topk indices and compare against\n # the sort indices\n vals = t.gather(dim, ind2)\n self.assertEqual(res1, vals, atol=0, rtol=0)\n\n def compare(t, k, dim, dir):\n topKVal, topKInd = t.topk(k, dim, dir, True)\n sortKVal, sortKInd = topKViaSort(t, k, dim, dir)\n compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim)\n\n t = torch.rand(random.randint(1, SIZE),\n random.randint(1, SIZE),\n random.randint(1, SIZE), device=device)\n\n for _kTries in range(3):\n for _dimTries in range(3):\n for transpose in (True, False):\n for dir in (True, False):\n testTensor = t\n if transpose:\n dim1 = random.randrange(t.ndimension())\n dim2 = dim1\n while dim1 == dim2:\n dim2 = random.randrange(t.ndimension())\n\n testTensor = t.transpose(dim1, dim2)\n\n dim = random.randrange(testTensor.ndimension())\n k = random.randint(1, testTensor.size(dim))\n compare(testTensor, k, dim, dir)\n\n def test_topk_arguments(self, device):\n q = torch.randn(10, 2, 10, device=device)\n # Make sure True isn't mistakenly taken as the 2nd dimension (interpreted as 1)\n self.assertRaises(TypeError, lambda: q.topk(4, True))\n\n @skipCUDAIfRocm\n def test_unique_dim(self, device):\n self.assertFalse(hasattr(torch, 'unique_dim'))\n\n def run_test(device, dtype):\n x = torch.tensor([[[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]],\n [[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]]],\n dtype=dtype,\n device=device)\n x_empty = torch.empty(5, 0, dtype=dtype, device=device)\n x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)\n x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device)\n expected_unique_dim0 = torch.tensor([[[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]]],\n dtype=dtype,\n device=device)\n expected_inverse_dim0 = torch.tensor([0, 0])\n expected_counts_dim0 = torch.tensor([2])\n expected_unique_dim1 = torch.tensor([[[0., 1.],\n [1., 1.],\n [2., 1.]],\n [[0., 1.],\n [1., 1.],\n [2., 1.]]],\n dtype=dtype,\n device=device)\n expected_unique_dim1_bool = torch.tensor([[[False, True], [True, True]],\n [[False, True], [True, True]]],\n dtype=torch.bool,\n device=device)\n expected_inverse_dim1 = torch.tensor([1, 0, 2, 0])\n expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0])\n expected_counts_dim1 = torch.tensor([2, 1, 1])\n expected_counts_dim1_bool = torch.tensor([2, 2])\n expected_unique_dim2 = torch.tensor([[[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]],\n [[1., 1.],\n [0., 1.],\n [2., 1.],\n [0., 1.]]],\n dtype=dtype,\n device=device)\n expected_inverse_dim2 = torch.tensor([0, 1])\n expected_counts_dim2 = torch.tensor([1, 1])\n expected_unique_empty = torch.tensor([], dtype=dtype, device=device)\n expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)\n expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)\n # dim0\n x_unique = torch.unique(x, dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n\n x_unique, x_inverse = torch.unique(\n x,\n return_inverse=True,\n dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n self.assertEqual(expected_inverse_dim0, x_inverse)\n\n x_unique, x_counts = torch.unique(\n x,\n return_inverse=False,\n return_counts=True,\n dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n self.assertEqual(expected_counts_dim0, x_counts)\n\n x_unique, x_inverse, x_counts = torch.unique(\n x,\n return_inverse=True,\n return_counts=True,\n dim=0)\n self.assertEqual(expected_unique_dim0, x_unique)\n self.assertEqual(expected_inverse_dim0, x_inverse)\n self.assertEqual(expected_counts_dim0, x_counts)\n\n # dim1\n x_unique = torch.unique(x, dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n\n x_unique, x_inverse = torch.unique(\n x,\n return_inverse=True,\n dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n self.assertEqual(expected_inverse_dim1_bool, x_inverse)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n self.assertEqual(expected_inverse_dim1, x_inverse)\n\n x_unique, x_counts = torch.unique(\n x,\n return_inverse=False,\n return_counts=True,\n dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n self.assertEqual(expected_counts_dim1_bool, x_counts)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n self.assertEqual(expected_counts_dim1, x_counts)\n\n x_unique, x_inverse, x_counts = torch.unique(\n x,\n return_inverse=True,\n return_counts=True,\n dim=1)\n if x.dtype == torch.bool:\n self.assertEqual(expected_unique_dim1_bool, x_unique)\n self.assertEqual(expected_inverse_dim1_bool, x_inverse)\n self.assertEqual(expected_counts_dim1_bool, x_counts)\n else:\n self.assertEqual(expected_unique_dim1, x_unique)\n self.assertEqual(expected_inverse_dim1, x_inverse)\n self.assertEqual(expected_counts_dim1, x_counts)\n\n # dim2\n x_unique = torch.unique(x, dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n\n x_unique, x_inverse = torch.unique(\n x,\n return_inverse=True,\n dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n self.assertEqual(expected_inverse_dim2, x_inverse)\n\n x_unique, x_counts = torch.unique(\n x,\n return_inverse=False,\n return_counts=True,\n dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n self.assertEqual(expected_counts_dim2, x_counts)\n\n x_unique, x_inverse, x_counts = torch.unique(\n x,\n return_inverse=True,\n return_counts=True,\n dim=2)\n self.assertEqual(expected_unique_dim2, x_unique)\n self.assertEqual(expected_inverse_dim2, x_inverse)\n self.assertEqual(expected_counts_dim2, x_counts)\n\n # test empty tensor\n x_unique, x_inverse, x_counts = torch.unique(\n x_empty,\n return_inverse=True,\n return_counts=True,\n dim=1)\n self.assertEqual(expected_unique_empty, x_unique)\n self.assertEqual(expected_inverse_empty, x_inverse)\n self.assertEqual(expected_counts_empty, x_counts)\n\n # test not a well formed tensor\n # Checking for runtime error, as this is the expected behaviour\n with self.assertRaises(RuntimeError):\n torch.unique(\n x_ill_formed_empty,\n return_inverse=True,\n return_counts=True,\n dim=1)\n\n # test along dim2\n with self.assertRaises(RuntimeError):\n torch.unique(\n x_ill_formed_empty_another,\n return_inverse=True,\n return_counts=True,\n dim=2)\n\n # test consecutive version\n y = torch.tensor(\n [[0, 1],\n [0, 1],\n [0, 1],\n [1, 2],\n [1, 2],\n [3, 4],\n [0, 1],\n [0, 1],\n [3, 4],\n [1, 2]],\n dtype=dtype,\n device=device\n )\n expected_y_unique = torch.tensor(\n [[0, 1],\n [1, 2],\n [3, 4],\n [0, 1],\n [3, 4],\n [1, 2]],\n dtype=dtype,\n device=device\n )\n expected_y_inverse = torch.tensor([0, 0, 0, 1, 1, 2, 3, 3, 4, 5], dtype=torch.int64, device=device)\n expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device)\n expected_y_inverse_bool = torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device)\n expected_y_counts_bool = torch.tensor([3, 3, 2, 2], dtype=torch.int64, device=device)\n y_unique, y_inverse, y_counts = torch.unique_consecutive(y, return_inverse=True, return_counts=True, dim=0)\n if x.dtype == torch.bool:\n self.assertEqual(expected_y_inverse_bool, y_inverse)\n self.assertEqual(expected_y_counts_bool, y_counts)\n else:\n self.assertEqual(expected_y_inverse, y_inverse)\n self.assertEqual(expected_y_counts, y_counts)\n\n run_test(device, torch.float)\n run_test(device, torch.double)\n run_test(device, torch.long)\n run_test(device, torch.uint8)\n run_test(device, torch.bool)\n\n @onlyCUDA\n def test_topk_noncontiguous_gpu(self, device):\n t = torch.randn(20, device=device)[::2]\n top1, idx1 = t.topk(5)\n top2, idx2 = t.contiguous().topk(5)\n self.assertEqual(top1, top2)\n self.assertEqual(idx1, idx2)\n\n def _test_topk_dtype(self, device, dtype, integral, size):\n if integral:\n a = torch.randint(torch.iinfo(dtype).min, torch.iinfo(dtype).max,\n size=(size,), dtype=dtype, device=device)\n else:\n a = torch.randn(size=(size,), dtype=dtype, device=device)\n\n sort_topk = a.sort()[0][-(size // 2):].flip(0)\n topk = a.topk(size // 2)\n self.assertEqual(sort_topk, topk[0]) # check values\n self.assertEqual(sort_topk, a[topk[1]]) # check indices\n\n @dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)\n def test_topk_integral(self, device, dtype):\n small = 10\n large = 4096\n for curr_size in (small, large):\n self._test_topk_dtype(device, dtype, True, curr_size)\n\n @onlyCUDA\n @dtypes(torch.bfloat16)\n @skipCUDAIfRocm\n def test_topk_bfloat16(self, device, dtype):\n\n small = 10\n large = 8192\n for curr_size in (small, large):\n self._test_topk_dtype(device, dtype, False, curr_size)\n\n @dtypesIfCUDA(*torch.testing.get_all_fp_dtypes())\n @dtypes(torch.float, torch.double, torch.bfloat16)\n def test_topk_nonfinite(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n\n x = torch.tensor([float('nan'), float('inf'), 1e4, 0, -1e4, -float('inf')], device=device, dtype=dtype)\n val, idx = x.topk(4)\n expect = torch.tensor([float('nan'), float('inf'), 1e4, 0], device=device, dtype=dtype)\n self.assertEqual(val, expect)\n self.assertEqual(idx, [0, 1, 2, 3])\n\n val, idx = x.topk(4, largest=False)\n expect = torch.tensor([-float('inf'), -1e4, 0, 1e4], device=device, dtype=dtype)\n self.assertEqual(val, expect)\n self.assertEqual(idx, [5, 4, 3, 2])\n\n def test_topk_4d(self, device):\n x = torch.ones(2, 3072, 2, 2, device=device)\n x[:, 1, :, :] *= 2.\n x[:, 10, :, :] *= 1.5\n val, ind = torch.topk(x, k=2, dim=1)\n expected_ind = torch.ones(2, 2, 2, 2, dtype=torch.long, device=device)\n expected_ind[:, 1, :, :] = 10\n expected_val = torch.ones(2, 2, 2, 2, device=device)\n expected_val[:, 0, :, :] *= 2.\n expected_val[:, 1, :, :] *= 1.5\n self.assertEqual(val, expected_val, atol=0, rtol=0)\n self.assertEqual(ind, expected_ind, atol=0, rtol=0)\n\n @onlyOnCPUAndCUDA\n @dtypesIfCUDA(*(torch.testing.get_all_dtypes(include_complex=False,\n include_bool=False,\n include_half=False,\n include_bfloat16=True)))\n @dtypes(*(torch.testing.get_all_dtypes(include_complex=False, include_bool=False, include_half=False, include_bfloat16=False)))\n def test_topk_zero(self, device, dtype):\n if TEST_WITH_ROCM and dtype == torch.bfloat16:\n return\n\n # https://github.com/pytorch/pytorch/issues/49205\n t = torch.rand(2, 2, device=device).to(dtype=dtype)\n val, idx = torch.topk(t, k=0, largest=False)\n self.assertEqual(val.size(), torch.Size([2, 0]))\n self.assertEqual(idx.size(), torch.Size([2, 0]))\n\n def _test_unique_scalar_empty(self, dtype, device, f):\n # test scalar\n x = torch.tensor(0, dtype=dtype, device=device)\n unique, inverse, counts = f(x, return_inverse=True, return_counts=True)\n expected_unique = torch.tensor([0], dtype=dtype, device=device)\n expected_inverse = torch.tensor(0, device=device)\n expected_counts = torch.tensor([1], device=device)\n self.assertEqual(unique, expected_unique)\n self.assertEqual(inverse, expected_inverse)\n self.assertEqual(counts, expected_counts)\n\n # test zero sized tensor\n x = torch.zeros((0, 0, 3), dtype=dtype, device=device)\n unique, inverse, counts = f(x, return_inverse=True, return_counts=True)\n expected_unique = torch.tensor([], dtype=dtype, device=device)\n expected_inverse = torch.empty((0, 0, 3), dtype=torch.long, device=device)\n expected_counts = torch.tensor([], dtype=torch.long, device=device)\n self.assertEqual(unique, expected_unique)\n self.assertEqual(inverse, expected_inverse)\n self.assertEqual(counts, expected_counts)\n\n def _test_unique_with_expects(self, device, dtype, f, x, expected_unique, expected_inverse, expected_counts, additional_shape):\n def ensure_tuple(x):\n if isinstance(x, torch.Tensor):\n return (x,)\n return x\n\n for return_inverse in [True, False]:\n for return_counts in [True, False]:\n # test with expected\n ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts))\n self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts))\n self.assertEqual(expected_unique, ret[0])\n if return_inverse:\n self.assertEqual(expected_inverse, ret[1])\n if return_counts:\n count_index = 1 + int(return_inverse)\n self.assertEqual(expected_counts, ret[count_index])\n\n # tests per-element unique on a higher rank tensor.\n y = x.view(additional_shape)\n y_unique, y_inverse, y_counts = f(y, return_inverse=True, return_counts=True)\n self.assertEqual(expected_unique, y_unique)\n self.assertEqual(expected_inverse.view(additional_shape), y_inverse)\n self.assertEqual(expected_counts, y_counts)\n\n @dtypesIfCPU(*set(torch.testing.get_all_dtypes()) - {torch.complex64, torch.complex128})\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128})\n def test_unique(self, device, dtype):\n if dtype is torch.half and self.device_type == 'cpu':\n return # CPU does not have half support\n\n def ensure_tuple(x):\n if isinstance(x, torch.Tensor):\n return (x,)\n return x\n\n if dtype is torch.bool:\n x = torch.tensor([True, False, False, False, True, False, True, False], dtype=torch.bool, device=device)\n expected_unique = torch.tensor([False, True], dtype=torch.bool, device=device)\n expected_inverse = torch.tensor([1, 0, 0, 0, 1, 0, 1, 0], dtype=torch.long, device=device)\n expected_counts = torch.tensor([5, 3], dtype=torch.long, device=device)\n else:\n x = torch.tensor([1, 2, 3, 2, 8, 5, 2, 3], dtype=dtype, device=device)\n expected_unique = torch.tensor([1, 2, 3, 5, 8], dtype=dtype, device=device)\n expected_inverse = torch.tensor([0, 1, 2, 1, 4, 3, 1, 2], device=device)\n expected_counts = torch.tensor([1, 3, 2, 1, 1], device=device)\n\n # test sorted unique\n fs = (\n lambda x, **kwargs: torch.unique(x, sorted=True, **kwargs),\n lambda x, **kwargs: x.unique(sorted=True, **kwargs),\n )\n x_sliced = torch.empty(x.size(0) * 2, dtype=dtype, device=device)[::2].copy_(x)\n xs = (x, x_sliced)\n for f, x in product(fs, xs):\n self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (2, 2, 2))\n self._test_unique_scalar_empty(dtype, device, f)\n\n # test unsorted unique\n fs = (\n lambda x, **kwargs: torch.unique(x, sorted=False, **kwargs),\n lambda x, **kwargs: x.unique(sorted=False, **kwargs)\n )\n for f, x in product(fs, xs):\n self._test_unique_scalar_empty(dtype, device, f)\n for return_inverse, return_counts in product((True, False), repeat=2):\n ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts))\n self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts))\n x_list = x.tolist()\n x_unique_list = ret[0].tolist()\n self.assertEqual(expected_unique.tolist(), sorted(x_unique_list))\n if return_inverse:\n x_inverse_list = ret[1].tolist()\n for i, j in enumerate(x_inverse_list):\n self.assertEqual(x_list[i], x_unique_list[j])\n if return_counts:\n count_index = 1 + int(return_inverse)\n x_counts_list = ret[count_index].tolist()\n for i, j in zip(x_unique_list, x_counts_list):\n count = 0\n for k in x_list:\n if k == i:\n count += 1\n self.assertEqual(j, count)\n\n @dtypesIfCPU(*set(torch.testing.get_all_dtypes()) - {torch.complex64, torch.complex128})\n @dtypes(*set(torch.testing.get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128})\n def test_unique_consecutive(self, device, dtype):\n if dtype is torch.half and self.device_type == 'cpu':\n return # CPU does not have half support\n\n if dtype is torch.bool:\n x = torch.tensor([True, False, False, False, True, True, False, False, False], dtype=torch.bool, device=device)\n expected_unique = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)\n expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 3], dtype=torch.long, device=device)\n expected_counts = torch.tensor([1, 3, 2, 3], dtype=torch.long, device=device)\n else:\n x = torch.tensor([1, 2, 2, 2, 5, 5, 2, 2, 3], dtype=dtype, device=device)\n expected_unique = torch.tensor([1, 2, 5, 2, 3], dtype=dtype, device=device)\n expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4], device=device)\n expected_counts = torch.tensor([1, 3, 2, 2, 1], device=device)\n\n for f in [torch.unique_consecutive, lambda x, **kwargs: x.unique_consecutive(**kwargs)]:\n self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (3, 3))\n self._test_unique_scalar_empty(dtype, device, f)\n\n @dtypes(torch.double)\n def test_kthvalue(self, device, dtype):\n SIZE = 50\n x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device)\n x0 = x.clone()\n\n k = random.randint(1, SIZE)\n res1val, res1ind = torch.kthvalue(x, k, keepdim=False)\n res2val, res2ind = torch.sort(x)\n\n self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)\n # test use of result tensors\n k = random.randint(1, SIZE)\n res1val = torch.tensor([], dtype=dtype, device=device)\n res1ind = torch.tensor([], dtype=torch.long, device=device)\n torch.kthvalue(x, k, keepdim=False, out=(res1val, res1ind))\n res2val, res2ind = torch.sort(x)\n self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)\n\n # test non-default dim\n k = random.randint(1, SIZE)\n res1val, res1ind = torch.kthvalue(x, k, 0, keepdim=False)\n res2val, res2ind = torch.sort(x, 0)\n self.assertEqual(res1val, res2val[k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind[k - 1], atol=0, rtol=0)\n\n # non-contiguous\n y = x.narrow(1, 0, 1)\n y0 = y.contiguous()\n k = random.randint(1, SIZE)\n res1val, res1ind = torch.kthvalue(y, k)\n res2val, res2ind = torch.kthvalue(y0, k)\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n\n # non-contiguous [Reference: https://github.com/pytorch/pytorch/issues/45721]\n non_contig_t = torch.tensor([0, -1, 1, -2, 2], dtype=dtype, device=device)[::2]\n expected_val, expected_ind = non_contig_t.contiguous().kthvalue(2)\n non_contig_cpu_t = non_contig_t.cpu()\n expected_val_cpu, expected_ind_cpu = non_contig_cpu_t.kthvalue(2)\n\n out_val, out_ind = non_contig_t.kthvalue(2)\n self.assertEqual(expected_val, out_val, atol=0, rtol=0)\n self.assertEqual(expected_ind, out_ind, atol=0, rtol=0)\n self.assertEqual(expected_val_cpu, out_val, atol=0, rtol=0)\n self.assertEqual(expected_ind_cpu, out_ind, atol=0, rtol=0)\n\n # check that the input wasn't modified\n self.assertEqual(x, x0, atol=0, rtol=0)\n\n # simple test case (with repetitions)\n y = torch.tensor((3., 5, 4, 1, 1, 5), dtype=dtype, device=device)\n self.assertEqual(torch.kthvalue(y, 3)[0], 3, atol=0, rtol=0)\n self.assertEqual(torch.kthvalue(y, 2)[0], 1, atol=0, rtol=0)\n\n # simple test case (with NaN)\n SIZE = 50\n x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device)\n x[torch.arange(SIZE), :, torch.randint(50, (50,))] = nan\n ks = [random.randint(1, SIZE), 1, SIZE, SIZE - 1]\n res2val, res2ind = torch.sort(x)\n for k in ks:\n res1val, res1ind = torch.kthvalue(x, k, keepdim=False)\n self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0)\n self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0)\n\n # test overlapping output\n @dtypes(torch.double)\n @onlyOnCPUAndCUDA # Fails on XLA\n def test_kthvalue_overlap(self, device, dtype):\n S = 10\n k = 5\n a = torch.randn(S, device=device)\n indices = torch.empty((), device=device, dtype=torch.long)\n with self.assertRaisesRegex(RuntimeError, \"unsupported operation:\"):\n torch.kthvalue(a, k, out=(a, indices))\n\n @dtypes(torch.float)\n @onlyOnCPUAndCUDA # Fails on XLA\n def test_kthvalue_scalar(self, device, dtype):\n # Test scalar input (test case from https://github.com/pytorch/pytorch/issues/30818)\n # Tests that passing a scalar tensor or 1D tensor with 1 element work either way\n res = torch.tensor(2, device=device, dtype=dtype).kthvalue(1)\n ref = torch.tensor([2], device=device, dtype=dtype).kthvalue(1)\n self.assertEqual(res[0], ref[0].squeeze())\n self.assertEqual(res[1], ref[1].squeeze())\n\n @dtypes(*all_types())\n @dtypesIfCUDA(*all_types_and(torch.half))\n def test_isin(self, device, dtype):\n def assert_isin_equal(a, b):\n # Compare to the numpy reference implementation.\n x = torch.isin(a, b)\n a = a.cpu().numpy() if torch.is_tensor(a) else np.array(a)\n b = b.cpu().numpy() if torch.is_tensor(b) else np.array(b)\n y = np.isin(a, b)\n self.assertEqual(x, y)\n\n # multi-dim tensor, multi-dim tensor\n a = torch.arange(24, device=device, dtype=dtype).reshape([2, 3, 4])\n b = torch.tensor([[10, 20, 30], [0, 1, 3], [11, 22, 33]], device=device, dtype=dtype)\n assert_isin_equal(a, b)\n\n # zero-dim tensor\n zero_d = torch.tensor(3, device=device, dtype=dtype)\n assert_isin_equal(zero_d, b)\n assert_isin_equal(a, zero_d)\n assert_isin_equal(zero_d, zero_d)\n\n # empty tensor\n empty = torch.tensor([], device=device, dtype=dtype)\n assert_isin_equal(empty, b)\n assert_isin_equal(a, empty)\n assert_isin_equal(empty, empty)\n\n # scalar\n assert_isin_equal(a, 6)\n assert_isin_equal(5, b)\n\n def define_expected(lst, invert=False):\n expected = torch.tensor(lst, device=device)\n if invert:\n expected = expected.logical_not()\n return expected\n\n # Adapted from numpy's in1d tests\n for mult in [1, 10]:\n for invert in [False, True]:\n a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)\n b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)\n ec = define_expected([True, False, True, True], invert=invert)\n c = torch.isin(a, b, assume_unique=True, invert=invert)\n self.assertEqual(c, ec)\n\n a[0] = 8\n ec = define_expected([False, False, True, True], invert=invert)\n c = torch.isin(a, b, assume_unique=True, invert=invert)\n self.assertEqual(c, ec)\n\n a[0], a[3] = 4, 8\n ec = define_expected([True, False, True, False], invert=invert)\n c = torch.isin(a, b, assume_unique=True, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], device=device, dtype=dtype)\n b = torch.tensor([2, 3, 4] * mult, device=device, dtype=dtype)\n ec = define_expected([False, True, False, True, True, True, True, True, True,\n False, True, False, False, False], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n b = torch.tensor([2, 3, 4] * mult + [5, 5, 4] * mult, device=device, dtype=dtype)\n ec = define_expected([True, True, True, True, True, True, True, True, True, True,\n True, False, True, True], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype)\n b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype)\n ec = define_expected([True, False, True, True], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 7, 1, 1, 2], device=device, dtype=dtype)\n b = torch.tensor([2, 4, 3, 3, 1, 5] * mult, device=device, dtype=dtype)\n ec = define_expected([True, False, True, True, True], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n a = torch.tensor([5, 5], device=device, dtype=dtype)\n b = torch.tensor([2, 2] * mult, device=device, dtype=dtype)\n ec = define_expected([False, False], invert=invert)\n c = torch.isin(a, b, invert=invert)\n self.assertEqual(c, ec)\n\n # multi-dimensional input case using sort-based algo\n for assume_unique in [False, True]:\n a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])\n b = torch.arange(3, 30, device=device, dtype=dtype)\n ec = define_expected([[False, False, False], [True, True, True]], invert=invert)\n c = torch.isin(a, b, invert=invert, assume_unique=assume_unique)\n self.assertEqual(c, ec)\n\n def test_isin_different_dtypes(self, device):\n supported_types = all_types() if device == 'cpu' else all_types_and(torch.half)\n for mult in [1, 10]:\n for assume_unique in [False, True]:\n for dtype1, dtype2 in product(supported_types, supported_types):\n a = torch.tensor([1, 2, 3], device=device, dtype=dtype1)\n b = torch.tensor([3, 4, 5] * mult, device=device, dtype=dtype2)\n ec = torch.tensor([False, False, True], device=device)\n c = torch.isin(a, b, assume_unique=assume_unique)\n self.assertEqual(c, ec)\n\n @onlyCUDA\n @dtypes(*all_types())\n def test_isin_different_devices(self, device, dtype):\n a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3])\n b = torch.arange(3, 30, device='cpu', dtype=dtype)\n with self.assertRaises(RuntimeError):\n torch.isin(a, b)\n\n c = torch.arange(6, device='cpu', dtype=dtype).reshape([2, 3])\n d = torch.arange(3, 30, device=device, dtype=dtype)\n with self.assertRaises(RuntimeError):\n torch.isin(c, d)\n\n\ninstantiate_device_type_tests(TestSortAndSelect, globals())\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.empty",
"torch.randint",
"torch.testing._internal.common_utils.run_tests",
"torch.rand",
"torch.argsort",
"numpy.argsort",
"torch.testing._internal.common_device_type.largeTensorTest",
"torch.testing._internal.common_utils.make_tensor",
"torch.msort",
"torch.testing.get_all_fp_dtypes",
"torch.randn",
"torch.iinfo",
"torch.testing._internal.common_device_type.dtypes",
"numpy.isin",
"torch.arange",
"torch.unique",
"torch.testing.floating_types_and",
"torch.sort",
"torch.ones",
"torch.tensor",
"torch.unique_consecutive",
"torch.testing.get_all_int_dtypes",
"numpy.array",
"torch.isin",
"torch.testing.get_all_dtypes",
"torch.empty_like",
"torch.Size",
"torch.testing.all_types",
"torch.zeros_like",
"torch.topk",
"torch.is_tensor",
"torch.randperm",
"torch.zeros",
"torch.kthvalue",
"torch.testing.all_types_and"
]
] |
yssource/pandas-ta | [
"98478f8bf049a4c8748d6f3c795f4f335ced05ca"
] | [
"pandas_ta/performance/log_return.py"
] | [
"# -*- coding: utf-8 -*-\nfrom numpy import log as nplog\nfrom pandas_ta.utils import get_offset, verify_series\n\n\ndef log_return(close, length=None, cumulative=False, offset=None, **kwargs):\n \"\"\"Indicator: Log Return\"\"\"\n # Validate Arguments\n close = verify_series(close)\n length = int(length) if length and length > 0 else 1\n offset = get_offset(offset)\n\n # Calculate Result\n log_return = nplog(close).diff(periods=length)\n\n if cumulative:\n log_return = log_return.cumsum()\n\n # Offset\n if offset != 0:\n log_return = log_return.shift(offset)\n\n # Handle fills\n if \"fillna\" in kwargs:\n log_return.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n log_return.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Name & Category\n log_return.name = f\"{'CUM' if cumulative else ''}LOGRET_{length}\"\n log_return.category = \"performance\"\n\n return log_return\n\n\nlog_return.__doc__ = \\\n\"\"\"Log Return\n\nCalculates the logarithmic return of a Series.\nSee also: help(df.ta.log_return) for additional **kwargs a valid 'df'.\n\nSources:\n https://stackoverflow.com/questions/31287552/logarithmic-returns-in-pandas-dataframe\n\nCalculation:\n Default Inputs:\n length=1, cumulative=False\n LOGRET = log( close.diff(periods=length) )\n CUMLOGRET = LOGRET.cumsum() if cumulative\n\nArgs:\n close (pd.Series): Series of 'close's\n length (int): It's period. Default: 20\n cumulative (bool): If True, returns the cumulative returns. Default: False\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.Series: New feature generated.\n\"\"\"\n"
] | [
[
"numpy.log"
]
] |
Solara570/demo-solara | [
"3ce6df1fd68089c427bbd46fb0857e8b76428ca6"
] | [
"articles/inversion.py"
] | [
"#coding=utf-8\n\n################################################################################################\n# A 3-part series on circle inversion, Descartes' theorem along with its variants, and more! #\n# #\n# Part 1: An Introduction to Circle Inversion - https://zhuanlan.zhihu.com/p/86644341 #\n# Part 2: Four Circles & Descartes' Theorem (1) - https://zhuanlan.zhihu.com/p/105819963 #\n# Part 3: Four Circles & Descartes' Theorem (2) - https://zhuanlan.zhihu.com/p/106874090 #\n################################################################################################\n\nimport numpy as np\nimport itertools as it\nfrom manimlib.constants import *\nfrom manimlib.utils.color import *\nfrom manimlib.utils.space_ops import *\nfrom manimlib.utils.simple_functions import *\nfrom manimlib.animation.composition import AnimationGroup\nfrom manimlib.animation.creation import ShowCreation, Write, DrawBorderThenFill\nfrom manimlib.animation.fading import FadeOut, FadeInFromDown\nfrom manimlib.animation.transform import Transform, ReplacementTransform, MoveToTarget, ApplyMethod\nfrom manimlib.mobject.mobject import Mobject\nfrom manimlib.mobject.coordinate_systems import Axes, NumberPlane, ThreeDAxes\nfrom manimlib.mobject.geometry import Circle, Line, Dot, SmallDot, Square, Polygon, RegularPolygon, \\\n Arrow, Sector, Vector\nfrom manimlib.mobject.numbers import DecimalNumber\nfrom manimlib.mobject.value_tracker import ValueTracker\nfrom manimlib.mobject.shape_matchers import BackgroundRectangle, SurroundingRectangle\nfrom manimlib.mobject.three_dimensions import Sphere\nfrom manimlib.mobject.svg.brace import Brace\nfrom manimlib.mobject.svg.tex_mobject import TexMobject, TextMobject\nfrom manimlib.mobject.types.vectorized_mobject import VMobject, VGroup, VectorizedPoint, DashedVMobject\nfrom manimlib.scene.scene import Scene\nfrom manimlib.scene.three_d_scene import ThreeDScene\n\nfrom short.apollonian_gasket import calc_centers_by_radii, calc_new_agc_info, AGCircle, \\\n ApollonianGasket, ApollonianGasketScene\nfrom short.ford_circles import get_coprime_numers_by_denom, get_stroke_width_by_height, \\\n AssembledFraction, ZoomInOnFordCircles\n\n\n#####\n## Constants\nMAX_NORM = 1e2\nCB_DARK = \"#825201\"\nCB_LIGHT = \"#B69B4C\"\n\n\n#####\n## General Methods\ndef complex_inversion(z, z0, r):\n return z0 + np.conjugate(r**2 / (z-z0))\n\ndef R3_inversion(point, inv_center, radius):\n z = R3_to_complex(point)\n z0 = R3_to_complex(inv_center)\n w = complex_inversion(z, z0, radius)\n return complex_to_R3(w)\n\ndef inversion(point, inv_center, radius):\n # Just a rename\n return R3_inversion(point, inv_center, radius)\n\ndef is_close_in_R3(p1, p2, thres = 1e-6):\n \"\"\"Check if two points are close in R^3.\"\"\"\n return np.linalg.norm(p1 - p2) < thres\n\ndef is_close(z1, z2, thres = 1e-6):\n \"\"\"Check if two complex numbers are close to each other.\"\"\"\n return np.abs(z1 - z2) < thres\n\ndef get_tangent_point(c1, c2, thres = 1e-4):\n \"\"\"Return the tangency point of circles 'c1' and 'c2'.\"\"\"\n p1 = c1.get_center()\n p2 = c2.get_center()\n r1 = c1.get_height() / 2\n r2 = c2.get_height() / 2\n d = get_norm(p2 - p1)\n if is_close(d, r1-r2, thres):\n return p1 + r1*normalize(p2-p1)\n elif is_close(d, r2-r1, thres):\n return p2 + r2*normalize(p1-p2)\n elif is_close(d, r1+r2, thres):\n return (r1*p2+r2*p1) / (r1+r2)\n else:\n raise Exception(\"These two circles aren't tangent.\")\n\ndef get_para_and_perp_components(point, lp1, lp2):\n v = lp2 - point\n v0 = lp2 - lp1\n v_para = fdiv(np.dot(v, v0), np.dot(v0, v0)) * v0\n v_perp = v - v_para\n return v_para, v_perp\n\ndef distance_to_the_line(point, lp1, lp2):\n \"\"\"Return the distance from 'point' to the line given by 'lp1' and 'lp2'.\"\"\"\n v_para, v_perp = get_para_and_perp_components(point, lp1, lp2)\n return np.linalg.norm(v_perp)\n\ndef is_on_the_line(point, lp1, lp2, thres = 1e-6):\n \"\"\"Check if 'point' is on the line given by two points 'lp1' and 'lp2'.\"\"\"\n return is_close(distance_to_the_line(point, lp1, lp2), thres)\n\ndef get_random_vector(max_step):\n \"\"\"Return a random vector with a maximum length of 'max_step'.\"\"\"\n return max_step*np.random.random() * rotate_vector(RIGHT, TAU*np.random.random())\n\ndef get_nearest_int(num):\n return int(np.round(num, 0))\n\ndef solve_quadratic_equation(a, b, c):\n delta = b**2 - 4*a*c\n x1 = (-b-np.sqrt(delta)) /(2*a)\n x2 = (-b+np.sqrt(delta)) /(2*a)\n print(a, b, c, x1, x2)\n return x1, x2\n\ndef get_next_terms(k1, k2, k3):\n \"\"\"Return two adjacent terms in the loxodromic sequence.\"\"\"\n b = -2*(k1+k2+k3)\n c = 2*(k1**2+k2**2+k3**2) - (k1+k2+k3)**2\n return list(map(get_nearest_int, solve_quadratic_equation(1, b, c)))\n\ndef get_sequence_string(arr):\n arr_copy = list(map(str, arr))\n arr_copy.insert(0, \"...\")\n arr_copy.append(\"...\")\n return \", \".join(arr_copy)\n\n\n#####\n## Mobjects\nclass FineCircle(Circle):\n CONFIG = {\n # In manim, circles are approximated by multiple cubic Beziers,\n # so it's necessary to increase the number of components for\n # high-precision calculations.\n \"num_components\": 100,\n }\n\n\nclass ExtendedLine(Line):\n def __init__(self, sp, ep, n = 10, **kwargs):\n unit_vec = normalize(ep - sp)\n new_sp = sp - n * unit_vec\n new_ep = ep + n * unit_vec\n Line.__init__(self, new_sp, new_ep, **kwargs)\n\n\nclass DotLabel(VMobject):\n CONFIG = {\n \"position\" : UP,\n \"label_buff\" : 0.25,\n }\n def __init__(self, label_text, dot, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dot = dot\n label = TexMobject(label_text, **kwargs)\n if self.position is not None:\n label.add_updater(\n lambda l: l.next_to(self.dot.get_center(), self.position, buff = self.label_buff)\n )\n self.add(label)\n\n def set_label(self, label):\n label.next_to(self.dot.get_center())\n\n\nclass TwoDotsSegment(Line):\n def __init__(self, dot_1, dot_2, **kwargs):\n self.dot_1 = dot_1\n self.dot_2 = dot_2\n sp, ep = self.get_dots_centers()\n Line.__init__(self, start = sp, end = ep, **kwargs)\n self.add_updater(self.set_start_and_end)\n\n def get_dots_centers(self):\n return self.dot_1.get_center(), self.dot_2.get_center()\n\n def set_start_and_end(self, line_mob):\n sp, ep = self.get_dots_centers()\n line_mob.put_start_and_end_on(sp, ep)\n\n\nclass LengthLabel(DecimalNumber):\n CONFIG = {\n \"num_decimal_places\" : 3,\n \"label_height\" : 0.3,\n \"label_buff\" : 0.3,\n \"offset\" : 0,\n \"is_on_opposite_side\" : False,\n }\n def __init__(self, line_mob, **kwargs):\n DecimalNumber.__init__(self, **kwargs)\n self.line_mob = line_mob\n self.add_updater(self.set_label)\n\n def set_label(self, label):\n label.set_value(self.line_mob.get_length())\n label.set_height(self.label_height)\n label.rotate(self.line_mob.get_angle())\n side_factor = -1 if self.is_on_opposite_side else 1\n label.move_to(\n self.line_mob.get_center() \\\n + self.line_mob.get_vector() / 2 * self.offset \\\n + side_factor * rotate_vector(self.line_mob.get_unit_vector(), PI/2) * self.label_buff\n )\n\n def set_offset(self, offset):\n self.offset = offset\n return self\n\n def switch_side(self):\n self.is_on_opposite_side = not self.is_on_opposite_side\n return self\n\n\nclass ManyDotsPolygon(VMobject):\n def __init__(self, *dots, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dots = dots\n dots_centers = self.get_dots_centers()\n polygon = Polygon(*dots_centers, **kwargs)\n polygon.add_updater(self.set_vertices)\n self.add(polygon)\n\n def get_dots_centers(self):\n return [dot.get_center() for dot in self.dots]\n\n def set_vertices(self, polygon_mob):\n vertices = self.get_dots_centers()\n polygon_mob.set_points_as_corners([*vertices, vertices[0]])\n\n\nclass AngleIndicator(VMobject):\n CONFIG = {\n \"color\" : RED,\n \"radius\" : 0.2,\n \"fill_opacity\" : 0.6,\n \"is_minor_arc\" : True,\n }\n def __init__(self, dot_A, dot_C, dot_B, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dot_A = dot_A\n self.dot_C = dot_C\n self.dot_B = dot_B\n sector = Sector()\n sector.add_updater(self.set_sector)\n self.add(sector)\n self.sector = sector\n\n def get_point_center(self, point_or_mob):\n if isinstance(point_or_mob, Mobject):\n return point_or_mob.get_center()\n else:\n return point_or_mob\n\n def get_point_centers(self):\n return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B]))\n\n def set_sector(self, mob):\n pt_A, pt_C, pt_B = self.get_point_centers()\n start_angle, angle = self.get_angles()\n outer_radius = min([self.radius, get_norm(pt_C - pt_A)/2, get_norm(pt_C - pt_B)/2])\n new_sector = Sector(\n start_angle = start_angle, angle = angle, outer_radius = outer_radius,\n color = self.color, fill_opacity = self.fill_opacity, stroke_width = 0\n )\n new_sector.move_arc_center_to(self.get_point_center(self.dot_C))\n mob.become(new_sector)\n \n def get_angles(self):\n pt_A, pt_C, pt_B = self.get_point_centers()\n start_angle = angle_of_vector(pt_A - pt_C)\n end_angle = angle_of_vector(pt_B - pt_C)\n angle = (end_angle - start_angle) % TAU\n if self.is_minor_arc and angle > PI:\n angle -= TAU\n return start_angle, angle\n\n\nclass RightAngleIndicator(VMobject):\n CONFIG = {\n \"color\" : WHITE,\n \"side_length\" : 0.2,\n \"line_width\" : 1,\n \"square_opacity\" : 0.5,\n }\n def __init__(self, dot_A, dot_C, dot_B, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.dot_A = dot_A\n self.dot_C = dot_C\n self.dot_B = dot_B\n line = VMobject(stroke_width = self.line_width, fill_opacity = 0)\n square = VMobject(stroke_width = 0, fill_color = self.color, fill_opacity = self.square_opacity)\n line.add_updater(self.set_line)\n square.add_updater(self.set_square)\n self.add(square, line)\n self.line = line\n self.square = square\n\n def get_point_center(self, point_or_mob):\n if isinstance(point_or_mob, Mobject):\n return point_or_mob.get_center()\n else:\n return point_or_mob\n\n def get_point_centers(self):\n return tuple(map(self.get_point_center, [self.dot_A, self.dot_C, self.dot_B]))\n\n def get_norm_vectors(self):\n pt_A, pt_C, pt_B = self.get_point_centers()\n norm_vec_CA = normalize(pt_A - pt_C)\n norm_vec_CB = normalize(pt_B - pt_C)\n return norm_vec_CA, norm_vec_CB\n\n def get_corner_points(self):\n pt_A, pt_C, pt_B = self.get_point_centers()\n norm_vec_CA, norm_vec_CB = self.get_norm_vectors()\n side_length = min([self.side_length, get_norm(pt_A - pt_C)/2, get_norm(pt_B - pt_C)/2])\n return (\n pt_C,\n pt_C + norm_vec_CA * side_length,\n pt_C + norm_vec_CA * side_length + norm_vec_CB * side_length,\n pt_C + norm_vec_CB * side_length\n )\n\n def set_line(self, line_mob):\n p, q, r, s = self.get_corner_points()\n line_mob.set_points_as_corners([q, r, s])\n\n def set_square(self, square_mob):\n p, q, r, s = self.get_corner_points()\n square_mob.set_points_as_corners([p, q, r, s, p])\n\n\nclass InversedDot(VMobject):\n CONFIG = {\n \"color\" : PINK,\n \"stroke_width\" : 3,\n \"fill_opacity\" : 1,\n \"is_hollow\" : True,\n \"center_color\" : BLACK,\n }\n def __init__(self, orig_dot, circle, **kwargs):\n self.orig_dot = orig_dot\n self.circle = circle\n VMobject.__init__(self, **kwargs)\n\n def generate_points(self):\n if self.is_hollow:\n self.fill_color = self.center_color\n else:\n self.fill_color = self.color\n self.stroke_width = 0\n inv_dot = Dot(ORIGIN, color = self.color)\n self.inv_dot = inv_dot\n self.add(inv_dot)\n self.add_updater_to_inversed_dot()\n\n def add_updater_to_inversed_dot(self):\n self.inv_dot.add_updater(self.move_inversed_dot)\n\n def move_inversed_dot(self, inv_dot):\n point = self.orig_dot.get_center()\n inv_center = self.circle.get_center()\n radius = self.circle.get_height() / 2.\n if is_close_in_R3(point, inv_center):\n pass\n else:\n inv_dot.move_to(inversion(point, inv_center, radius))\n\n\nclass InversedVMobject(VMobject):\n CONFIG = {\n \"is_analytical\" : True,\n \"match_original_style\" : False,\n \"use_dashed_vmob\" : True,\n \"dashed_vmob_config\": {\n \"num_dashes\" : 50,\n \"positive_space_ratio\" : 0.6,\n },\n }\n def __init__(self, orig_vmob, circle, **kwargs):\n VMobject.__init__(self, **kwargs)\n self.orig_vmob = orig_vmob\n self.circle = circle\n self.orig_vmob_type = \"Others\"\n self.initialize_orig_vmob_type()\n self.add_updater_to_inversed_vmobject()\n\n def add_updater_to_inversed_vmobject(self):\n self.add_updater(self.set_inversed_vmobject)\n\n def initialize_orig_vmob_type(self):\n if isinstance(self.orig_vmob, Line):\n self.orig_vmob_type = \"Line\"\n elif isinstance(self.orig_vmob, Circle):\n self.orig_vmob_type = \"Circle\"\n else:\n self.orig_vmob_type = \"Others\"\n\n def set_orig_vmob_type(self, orig_vmob_type):\n self.orig_vmob_type = orig_vmob_type\n\n def set_inversed_vmobject(self, inv_vmob):\n inv_center = self.circle.get_center()\n radius = self.circle.get_height() / 2.\n if self.is_analytical and self.orig_vmob_type == \"Line\":\n # If it's a line...\n lp1, lp2 = self.orig_vmob.get_start_and_end()\n if is_on_the_line(inv_center, lp1, lp2):\n # If it's a line passing through the inversion center,\n # then the inversion is just the line itself.\n temp_vmob = ExtendedLine(lp1, lp2)\n else:\n # If it's a line NOT through the inversion center,\n # then the inversion is a circle passing through the inversion center.\n v_para, v_perp = get_para_and_perp_components(inv_center, lp1, lp2)\n d = distance_to_the_line(inv_center, lp1, lp2)\n # d = np.linalg.norm(v_perp)\n inv_vmob_radius = fdiv(radius**2, 2*d)\n closepoint = inv_center + v_perp\n inv_vmob_closepoint = inversion(closepoint, inv_center, radius)\n inv_vmob_center = (inv_center + inv_vmob_closepoint) / 2.\n temp_vmob = FineCircle(radius = inv_vmob_radius)\n temp_vmob.move_to(inv_vmob_center)\n elif self.is_analytical and self.orig_vmob_type == \"Circle\":\n # If it's a circle...\n orig_vmob_center = self.orig_vmob.get_center()\n orig_vmob_radius = self.orig_vmob.get_height() / 2.\n center_vec = orig_vmob_center - inv_center\n d = get_norm(center_vec)\n if is_close(orig_vmob_radius, d):\n # If it's a circle passing through the inversion center,\n # then the inversion is a line perps to the line through the circle centers.\n foot = inv_center + fdiv(radius**2, 2*d) * normalize(center_vec)\n lp1 = foot + rotate_vector(center_vec, PI/2)\n lp2 = foot + rotate_vector(center_vec, -PI/2)\n temp_vmob = ExtendedLine(lp1, lp2)\n else:\n # If it's a circle NOT through the inversion center,\n # then the inversion is a circle NOT through the inversion center.\n dp1 = orig_vmob_center - orig_vmob_radius * normalize(center_vec)\n dp2 = orig_vmob_center + orig_vmob_radius * normalize(center_vec)\n inv_dp1 = inversion(dp1, inv_center, radius)\n inv_dp2 = inversion(dp2, inv_center, radius)\n inv_vmob_radius = get_norm(inv_dp2 - inv_dp1) / 2.\n inv_vmob_center = (inv_dp2 + inv_dp1) / 2.\n temp_vmob = FineCircle(radius = inv_vmob_radius)\n temp_vmob.move_to(inv_vmob_center)\n else:\n temp_vmob = self.orig_vmob.copy()\n temp_vmob.apply_function(lambda p: inversion(p, inv_center, radius))\n if self.use_dashed_vmob:\n temp_vmob = DashedVMobject(temp_vmob, **self.dashed_vmob_config)\n inv_vmob.become(temp_vmob)\n if self.match_original_style:\n inv_vmob.match_style(self.orig_vmob)\n\n\nclass FourCirclesNormalForm(VMobject):\n CONFIG = {\n \"circle_colors\" : [MAROON_B, RED, GREEN, BLUE],\n \"r\" : 1.2,\n \"l\" : 9,\n \"use_dashed_vmob\" : True,\n \"dashed_vmob_config\" : {\n \"num_dashes\" : 30,\n \"positive_space_ratio\" : 0.6,\n }\n }\n def __init__(self, **kwargs):\n VMobject.__init__(self, **kwargs)\n c1 = Circle(radius = self.r, **kwargs).shift(self.r*LEFT)\n c2 = Circle(radius = self.r, **kwargs).shift(self.r*RIGHT)\n c3 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*DOWN)\n c4 = Line(self.l*LEFT, self.l*RIGHT, **kwargs).shift(self.r*UP)\n for mob, color in zip([c1, c2, c3, c4], self.circle_colors):\n mob.set_color(color)\n if self.use_dashed_vmob:\n self.add(DashedVMobject(mob, **self.dashed_vmob_config))\n else:\n self.add(mob)\n\n\nclass DescartesFourCircles(VMobject):\n CONFIG = {\n \"outer_circle_index\" : None,\n \"orig_circle_color\" : BLUE,\n \"new_circle_color\" : YELLOW,\n \"show_new_circles\" : True,\n \"show_new_circles_centers\" : False,\n }\n def __init__(self, ccdot1, ccdot2, ccdot3, **kwargs):\n self.ccdot1 = ccdot1\n self.ccdot2 = ccdot2\n self.ccdot3 = ccdot3\n VMobject.__init__(self, **kwargs)\n self.add_orig_circles()\n self.add_orig_circles_updaters()\n self.generate_new_circles()\n if self.show_new_circles:\n self.add_new_circles()\n if self.show_new_circles_centers:\n self.add_new_circles_centers()\n \n def add_orig_circles(self):\n self.c1, self.c2, self.c3 = self.cs = VGroup(*[\n Circle(arc_center = cc, radius = r, color = self.orig_circle_color)\n for cc, r in zip(self.get_orig_circle_centers(), self.calc_radii_by_centers())\n ])\n self.add(self.cs)\n\n def add_orig_circles_updaters(self):\n def get_center(k):\n return self.get_orig_circle_centers()[k]\n def get_abs_radius(k):\n return np.abs(self.calc_radii_by_centers()[k])\n # Since enumerate() won't work here (seriously?),\n # I have to use a much more direct approach - list them all.\n self.c1.add_updater(lambda c: c.move_to(get_center(0)))\n self.c1.add_updater(lambda c: c.set_height(2*get_abs_radius(0)))\n self.c2.add_updater(lambda c: c.move_to(get_center(1)))\n self.c2.add_updater(lambda c: c.set_height(2*get_abs_radius(1)))\n self.c3.add_updater(lambda c: c.move_to(get_center(2)))\n self.c3.add_updater(lambda c: c.set_height(2*get_abs_radius(2)))\n\n def get_orig_circles(self):\n return self.cs\n\n def get_orig_circle_centers(self):\n return [dot.get_center() for dot in (self.ccdot1, self.ccdot2, self.ccdot3)]\n\n def get_orig_circle_radii(self):\n return self.calc_radii_by_centers()\n\n def get_orig_circle_curvatures(self):\n return [fdiv(1, radius) for radius in self.calc_radii_by_centers()]\n\n def calc_radii_by_centers(self):\n p1, p2, p3 = self.get_orig_circle_centers()\n d12 = get_norm(p2 - p1)\n d23 = get_norm(p3 - p2)\n d13 = get_norm(p3 - p1)\n sum_r = (d12 + d23 + d13) / 2.\n if self.outer_circle_index == 1:\n # If circle 1 contains other two circles...\n return [-sum_r, sum_r-d12, sum_r-d13]\n elif self.outer_circle_index == 2:\n # If circle 2 contains other two circles...\n return [sum_r-d12, -sum_r, sum_r-d23]\n elif self.outer_circle_index == 3:\n # If circle 3 contains other two circles...\n return [sum_r-d13, sum_r-d23, -sum_r]\n else:\n return [sum_r-d23, sum_r-d13, sum_r-d12]\n\n def generate_new_circles(self):\n self.c4_1, self.c4_2 = self.new_circles = VGroup(*[\n Circle(arc_center = new_cc, radius = new_r, color = self.new_circle_color)\n for new_cc, new_r in self.calc_new_circles_centers_and_radii()\n ])\n self.generate_new_circles_centers()\n self.add_new_circles_updaters()\n\n def calc_new_circles_centers_and_radii(self):\n k1, k2, k3 = self.get_orig_circle_curvatures()\n z1, z2, z3 = map(R3_to_complex, self.get_orig_circle_centers())\n # Calculate the curvatures of new circles\n sum_k = k1 + k2 + k3\n sum_k2 = k1**2 + k2**2 + k3**2\n sum_k_cycle_prod = k1*k2 + k2*k3 + k3*k1\n b = (-2)*sum_k\n c = sum_k2 - 2*sum_k_cycle_prod\n delta = b**2 - 4*c\n k4_1 = (-b + np.sqrt(delta)) / 2\n k4_2 = (-b - np.sqrt(delta)) / 2\n # Calculate the centers of new circles\n # arxiv.org/abs/math/0101066v1 - Eqn 2.3\n sum_kz = k1*z1 + k2*z2 + k3*z3\n sum_k2z = k1**2 * z1 + k2**2 * z2 + k3**2 * z3\n coeff_1 = (sum_k - k4_1) * k4_1\n const_1 = 2 * sum_k2z - (sum_k + k4_1) * sum_kz\n z4_1 = const_1 / coeff_1\n coeff_2 = (sum_k - k4_2) * k4_2\n const_2 = 2 * sum_k2z - (sum_k + k4_2) * sum_kz\n z4_2 = const_2 / coeff_2\n return [[complex_to_R3(z4_1), fdiv(1, k4_1)], [complex_to_R3(z4_2), fdiv(1, k4_2)]]\n\n def generate_new_circles_centers(self):\n ccdot4_1 = Dot(color = self.new_circle_color)\n ccdot4_1.add_updater(lambda m: m.move_to(self.c4_1.get_center()))\n ccdot4_2 = Dot(color = self.new_circle_color)\n ccdot4_2.add_updater(lambda m: m.move_to(self.c4_2.get_center()))\n self.ccdot4_1 = ccdot4_1\n self.ccdot4_2 = ccdot4_2\n\n def add_new_circles_updaters(self):\n def get_new_center(k):\n return self.calc_new_circles_centers_and_radii()[k][0]\n def get_abs_new_radius(k):\n return np.abs(self.calc_new_circles_centers_and_radii()[k][1])\n # Since enumerate() won't work here (seriously?),\n # I have to use a much more direct approach - list them all.\n self.c4_1.add_updater(lambda c: c.move_to(get_new_center(0)))\n self.c4_1.add_updater(lambda c: c.set_height(2*get_abs_new_radius(0)))\n self.c4_2.add_updater(lambda c: c.move_to(get_new_center(1)))\n self.c4_2.add_updater(lambda c: c.set_height(2*get_abs_new_radius(1)))\n\n def add_new_circles(self):\n if not hasattr(self, \"new_circles\"):\n self.new_circles = generate_new_circles()\n self.add(self.new_circles)\n\n def get_new_circles(self):\n if not hasattr(self, \"new_circles\"):\n self.new_circles = generate_new_circles()\n return self.new_circles\n\n def add_new_circles_centers(self):\n self.add(self.ccdot4_1, self.ccdot4_2)\n\n def remove_new_circles_center(self):\n self.remove(self.ccdot4_1, self.ccdot4_2)\n\n\n\n#####\n## Inversion Introduction Scenes\nclass ConceptsInInversion(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n \"color_radius\" : RED,\n \"color_P\" : WHITE,\n }\n def construct(self):\n self.add_backgrounds()\n self.move_around_point_P()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 3.5, color = self.color_circle)\n circle_O.shift(3*LEFT)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n remark_O = TextMobject(\"反演中心\", color = self.color_circle)\n remark_O.next_to(label_O, LEFT, buff = 0.15)\n radius = Line(circle_O.get_center(), circle_O.get_left())\n label_radius = TexMobject(\"R\").scale(0.8)\n remark_radius = TextMobject(\"反演幂\").scale(0.8)\n brace_radius = Brace(radius, UP)\n brace_radius.put_at_tip(label_radius)\n remark_radius.next_to(label_radius, LEFT, buff = 0.15)\n group_radius = VGroup(radius, label_radius, brace_radius, remark_radius)\n group_radius.set_color(self.color_radius)\n group_radius.rotate(-PI/12, about_point = dot_O.get_center())\n def_inversion = TextMobject(\"反演变换:$P \\\\mapsto P'$\")\n rlt_inversion = TexMobject(\"|OP| \\\\times |OP'|=\", \"R^2\")\n rlt_inversion.next_to(def_inversion, DOWN, aligned_edge = RIGHT)\n rlt_inversion[-1].set_color(self.color_radius)\n remarks = VGroup(def_inversion, rlt_inversion)\n remarks.to_corner(DR)\n dot_P = Dot(LEFT, color = self.color_P)\n label_P = DotLabel(\"P\", dot_P, color = self.color_P, position = DL, label_buff = 0.2)\n dot_Pi = InversedDot(dot_P, circle_O, color = self.color_P)\n label_Pi = DotLabel(\"P'\", dot_Pi, color = self.color_P, position = DR, label_buff = 0.2)\n line_OP = TwoDotsSegment(dot_O, dot_P, stroke_width = 2)\n line_OPi = TwoDotsSegment(dot_O, dot_Pi, stroke_width = 2)\n self.add(remarks)\n self.add(group_radius)\n self.add(circle_O, dot_O, label_O, remark_O, remark_circle)\n self.add(dot_P, dot_Pi, label_P, label_Pi, line_OP, line_OPi)\n self.circle_O = circle_O\n self.dot_P = dot_P\n\n def move_around_point_P(self):\n self.dot_P.save_state()\n for dx, dy in [(-0.2, 0.3), (0.1, -0.4), (4, 0.3), (1, 1)]:\n vec = np.array([dx, dy, 0])\n self.play(self.dot_P.shift, vec, run_time = 1)\n self.wait()\n self.play(self.dot_P.move_to, self.circle_O.get_right())\n self.wait()\n self.play(self.dot_P.restore, run_time = 1)\n self.wait()\n\n\nclass InversionExamples(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n }\n def construct(self):\n circle_O = Circle(radius = 3.5, color = self.color_circle)\n circle_O.shift(3*LEFT)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n init_shape = Square(side_length = 1.2, color = BLUE).rotate(TAU/13)\n init_shape.next_to(circle_O.get_right(), LEFT, buff = 0.5)\n init_shape.save_state()\n inv_shape = InversedVMobject(init_shape, circle_O, use_dashed_vmob = False)\n new_shapes = [\n RegularPolygon(n = 6, start_angle = PI/7, color = PINK).scale(0.8),\n TexMobject(\"42\", color = RED).scale(2.5).rotate(-PI/9),\n TexMobject(\"\\\\pi\", color = MAROON_B).scale(5).rotate(PI/15),\n ]\n\n self.add(circle_O, remark_circle, dot_O, label_O)\n self.add(init_shape, inv_shape)\n for new_shape in new_shapes:\n # new_shape.set_color(BLUE)\n new_shape.next_to(circle_O.get_right(), LEFT, buff = 0.6)\n self.play(Transform(init_shape, new_shape), run_time = 1)\n self.wait()\n init_shape.generate_target()\n init_shape.target.become(new_shape)\n init_shape.target.shift(get_random_vector(0.5))\n random_angle = 0.5*np.random.random()\n init_shape.target.rotate(random_angle)\n self.play(MoveToTarget(init_shape, path_arc = random_angle, run_time = 1)),\n self.wait()\n self.play(ApplyMethod(init_shape.restore))\n self.wait()\n\n\nclass LineToLineInversion(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n \"color_orig\" : BLUE,\n \"color_inv\" : RED,\n }\n def construct(self):\n self.add_backgrounds()\n self.show_line_to_line_inversion()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 2.5, color = self.color_circle)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n conclusion = TextMobject(\"经过反演中心的直线\", \"$\\\\mapsto$\", \"经过反演中心的直线\")\n conclusion.scale(0.8)\n conclusion[0].set_color(self.color_orig)\n conclusion[2].set_color(self.color_inv)\n conclusion.to_corner(DR)\n self.add(circle_O, remark_circle, dot_O, label_O)\n self.add(conclusion)\n self.circle_O = circle_O\n \n def show_line_to_line_inversion(self):\n angle_tracker = ValueTracker(-PI/11)\n position_tracker = ValueTracker(1.4)\n angle_tracker.save_state()\n position_tracker.save_state()\n orig_line = ExtendedLine(LEFT, RIGHT, color = self.color_orig, stroke_width = 8)\n orig_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle()))\n inv_line = ExtendedLine(LEFT, RIGHT, color = self.color_inv, stroke_width = 4)\n inv_line.add_updater(lambda m: m.rotate(angle_tracker.get_value() - m.get_angle()))\n dot_P = Dot(color = self.color_orig)\n dot_P.add_updater(\n lambda m: m.move_to(\n position_tracker.get_value() * rotate_vector(RIGHT, angle_tracker.get_value())\n )\n )\n dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False, color = self.color_inv)\n label_P = DotLabel(\"P\", dot_P, position = DOWN, color = self.color_orig)\n label_Pi = DotLabel(\"P'\", dot_Pi, position = DOWN, color = self.color_inv)\n \n def get_lb():\n return LEFT_SIDE + UP * LEFT_SIDE[0] * np.tan(angle_tracker.get_value())\n def get_rb():\n return RIGHT_SIDE + UP * RIGHT_SIDE[0] * np.tan(angle_tracker.get_value())\n def is_oolb(m):\n return m.get_right()[0] < LEFT_SIDE[0]\n def is_oorb(m):\n return m.get_left()[0] > RIGHT_SIDE[0]\n\n oolb_arrow = Arrow(ORIGIN, LEFT, color = self.color_inv).scale(2)\n oolb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value() + PI))\n oolb_arrow.add_updater(lambda m: m.next_to(get_lb(), DOWN, aligned_edge = LEFT, buff = 0.2))\n oorb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_inv).scale(2)\n oorb_arrow.add_updater(lambda m: m.set_angle(angle_tracker.get_value()))\n oorb_arrow.add_updater(lambda m: m.next_to(get_rb(), DOWN, aligned_edge = RIGHT, buff = 0.2))\n oolb_label = TexMobject(\"P'\", color = self.color_inv, background_stroke_width = 0)\n oolb_label.add_updater(lambda m: m.next_to(oolb_arrow, DOWN, buff = 0.2))\n oorb_label = TexMobject(\"P'\", color = self.color_inv, background_stroke_width = 0)\n oorb_label.add_updater(lambda m: m.next_to(oorb_arrow, DOWN, buff = 0.2))\n oolb_group = VGroup(oolb_arrow, oolb_label)\n oorb_group = VGroup(oorb_arrow, oorb_label)\n oolb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oolb(label_Pi) else 0))\n oolb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oolb(label_Pi) else 0))\n oorb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oorb(label_Pi) else 0))\n oorb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oorb(label_Pi) else 0))\n\n self.add(orig_line, inv_line, dot_P, dot_Pi, label_P, label_Pi)\n self.add(oolb_group, oorb_group)\n for d_position, d_angle in [(2, 0), (1, PI/10), (-5, 0), (-3, -PI/7), (4, PI/11)]:\n self.play(\n ApplyMethod(position_tracker.increment_value, d_position),\n ApplyMethod(angle_tracker.increment_value, d_angle),\n run_time = 2,\n )\n self.wait()\n self.play(\n ApplyMethod(angle_tracker.restore),\n ApplyMethod(position_tracker.restore),\n run_time = 2,\n )\n self.wait()\n\n\nclass LineToCircleInversion(Scene):\n CONFIG = {\n \"color_circle\" : YELLOW,\n \"color_orig\" : BLUE,\n \"color_inv\" : RED,\n \"line_config\" : {\n \"stroke_width\" : 2,\n \"color\" : WHITE,\n },\n }\n def construct(self):\n self.add_backgrounds()\n self.add_shapes()\n self.show_line_to_circle_inversion()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 3, color = self.color_circle)\n circle_O.shift(3*LEFT+0.5*UP)\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle_O.get_bottom(), UP)\n dot_O = Dot(circle_O.get_center(), color = self.color_circle)\n label_O = DotLabel(\"O\", dot_O, color = self.color_circle, position = DOWN)\n conclusion1 = TextMobject(\"不经过反演中心的直线\", \"$\\\\mapsto$\", \"经过反演中心的圆\")\n conclusion1[0].set_color(self.color_orig)\n conclusion1[-1].set_color(self.color_inv)\n conclusion2 = TextMobject(\"经过反演中心的圆\", \"$\\\\mapsto$\", \"不经过反演中心的直线\")\n conclusion2[0].set_color(self.color_inv)\n conclusion2[-1].set_color(self.color_orig)\n conclusions = VGroup(conclusion1, conclusion2)\n for c in conclusions:\n c.scale(0.8)\n conclusions.arrange_submobjects(DOWN, index_of_submobject_to_align = 1)\n conclusions.to_corner(DR)\n bg_rect = BackgroundRectangle(conclusions)\n self.add(circle_O, remark_circle)\n self.add_foreground_mobjects(dot_O, label_O, bg_rect, conclusions)\n self.dot_O = dot_O\n self.circle_O = circle_O\n self.conclusions = conclusions\n self.bg_rect = bg_rect\n\n def add_shapes(self):\n position_tracker = ValueTracker(2)\n line_angle_tracker = ValueTracker(PI*9/19)\n circle_angle_tracker = ValueTracker(PI/5)\n line = ExtendedLine(LEFT, RIGHT, color = self.color_orig)\n line.add_updater(lambda m: m.move_to(position_tracker.get_value() * RIGHT))\n line.add_updater(lambda m: m.rotate(line_angle_tracker.get_value() - m.get_angle()))\n inv_line = InversedVMobject(line, self.circle_O, use_dashed_vmob = False, color = self.color_inv)\n inv_line_center = SmallDot(color = self.color_inv)\n inv_line_center.add_updater(lambda m: m.move_to(inv_line.get_center()))\n dot_Ai = Dot(color = self.color_inv)\n dot_Ai.add_updater(\n lambda m: m.move_to(inv_line.get_center() * 2 - self.circle_O.get_center())\n )\n dot_Pi = Dot(color = self.color_inv)\n dot_Pi.add_updater(\n lambda m: m.move_to(\n inv_line.get_center() \\\n + rotate_vector(\n inv_line.get_center() - self.circle_O.get_center(),\n circle_angle_tracker.get_value()\n )\n )\n )\n dot_P = InversedDot(dot_Pi, self.circle_O, is_hollow = False, color = self.color_orig)\n dot_A = InversedDot(dot_Ai, self.circle_O, is_hollow = False, color = self.color_orig)\n line_OA, line_OAi, line_OP, line_OPi, line_AP, line_AiPi = aux_lines = VGroup(*[\n TwoDotsSegment(pt_1, pt_2, **self.line_config)\n for pt_1, pt_2 in [\n (self.dot_O, dot_A), (self.dot_O, dot_Ai),\n (self.dot_O, dot_P), (self.dot_O, dot_Pi),\n (dot_A, dot_P), (dot_Ai, dot_Pi)\n ]\n ])\n ai_AiOPi = AngleIndicator(dot_Ai, self.dot_O, dot_Pi, color = MAROON_B, radius = 0.8)\n rtai_OAP = RightAngleIndicator(self.dot_O, dot_A, dot_P)\n rtai_OPiAi = RightAngleIndicator(self.dot_O, dot_Pi, dot_Ai)\n label_P = TexMobject(\"P\", color = self.color_orig)\n label_Pi = TexMobject(\"P'\", color = self.color_inv)\n label_A = TexMobject(\"A\", color = self.color_orig)\n label_Ai = TexMobject(\"A'\", color = self.color_inv)\n label_A.add_updater(\n lambda m: m.move_to(\n dot_A.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center())\n )\n )\n label_P.add_updater(\n lambda m: m.move_to(\n dot_P.get_center() + 0.3 * normalize(dot_A.get_center() - self.dot_O.get_center())\n )\n )\n label_Ai.add_updater(\n lambda m: m.move_to(\n dot_Ai.get_center() + 0.4 * rotate_vector(\n normalize(dot_Ai.get_center() - inv_line_center.get_center()), -PI/4\n )\n )\n )\n label_Pi.add_updater(\n lambda m: m.move_to(\n dot_Pi.get_center() + 0.4 * normalize(dot_Pi.get_center() - inv_line_center.get_center())\n )\n )\n\n def get_ub():\n return line.get_center() + TOP + RIGHT * TOP[1] / np.tan(line_angle_tracker.get_value())\n def get_bb():\n return line.get_center() + BOTTOM + RIGHT * BOTTOM[1] / np.tan(line_angle_tracker.get_value())\n def is_ooub(m):\n return m.get_bottom()[1] > TOP[1]\n def is_oobb(m):\n return m.get_top()[1] < BOTTOM[1]\n ooub_arrow = Arrow(ORIGIN, LEFT, color = self.color_orig).scale(2)\n ooub_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value()))\n ooub_arrow.add_updater(lambda m: m.next_to(get_ub(), RIGHT, aligned_edge = TOP, buff = 0.2))\n oobb_arrow = Arrow(ORIGIN, RIGHT, color = self.color_orig).scale(2)\n oobb_arrow.add_updater(lambda m: m.set_angle(line_angle_tracker.get_value() + PI))\n oobb_arrow.add_updater(lambda m: m.next_to(get_bb(), RIGHT, aligned_edge = BOTTOM, buff = 0.2))\n oolb_label = TexMobject(\"P\", color = self.color_orig, background_stroke_width = 0)\n oolb_label.add_updater(lambda m: m.next_to(ooub_arrow, RIGHT, buff = 0.2))\n oorb_label = TexMobject(\"P\", color = self.color_orig, background_stroke_width = 0)\n oorb_label.add_updater(lambda m: m.next_to(oobb_arrow, RIGHT, buff = 0.2))\n ooub_group = VGroup(ooub_arrow, oolb_label)\n oobb_group = VGroup(oobb_arrow, oorb_label)\n ooub_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_ooub(label_P) else 0))\n ooub_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_ooub(label_P) else 0))\n oobb_group.add_updater(lambda m: m.set_fill(opacity = 1 if is_oobb(label_P) else 0))\n oobb_group.add_updater(lambda m: m.set_stroke(opacity = 1 if is_oobb(label_P) else 0))\n\n self.add(line, inv_line)\n self.add(dot_A, dot_P, dot_Ai, dot_Pi)\n self.add(label_P, label_Pi, label_A, label_Ai)\n self.add(aux_lines)\n self.add(ai_AiOPi, rtai_OAP, rtai_OPiAi)\n self.add(ooub_group, oobb_group)\n\n self.position_tracker = position_tracker\n self.line_angle_tracker = line_angle_tracker\n self.circle_angle_tracker = circle_angle_tracker\n\n def show_line_to_circle_inversion(self):\n play_args = [\n [0, PI/12, 0, 2],\n [0, 0, PI*7/5, 4],\n [-2, PI/8, -PI/5, 3],\n [0, 0, PI*19/10, 6],\n [1.5, -PI/7, PI*2/5, 4],\n ]\n restore_arg = [\n -sum([arg[k] for arg in play_args])\n for k in range(len(play_args[0]))\n ]\n restore_arg[1] = (restore_arg[1] + PI) % (2*PI) - PI\n restore_arg[2] = (restore_arg[2] + PI) % (2*PI) - PI\n restore_arg[-1] = 3\n play_args.append(restore_arg)\n for d_center, d_line_angle, d_circle_angle, run_time in play_args:\n self.play(\n ApplyMethod(self.position_tracker.increment_value, d_center),\n ApplyMethod(self.line_angle_tracker.increment_value, d_line_angle),\n ApplyMethod(self.circle_angle_tracker.increment_value, d_circle_angle),\n run_time = run_time,\n )\n self.wait()\n\n\nclass InversionCreateSimilarTriangles(Scene):\n CONFIG = {\n \"random_seed\" : 5+7-0,\n \"num_of_nudges\" : 5,\n \"max_step\" : 1,\n \"color_A\" : RED,\n \"color_B\" : BLUE,\n \"color_combined\" : MAROON_B,\n \"color_circle\": YELLOW,\n }\n def construct(self):\n self.add_remark()\n self.show_figure_animation()\n\n def add_remark(self):\n cond_1 = TexMobject(\"{|OP|\", \"\\\\over\", \"|OQ|}\", \"=\", \"{|OQ'|\", \"\\\\over\", \"|OP'|}\")\n cond_2 = TexMobject(\"\\\\angle POQ\", \"=\", \"\\\\angle Q'OP'\")\n conds = VGroup(cond_1, cond_2)\n conds.arrange_submobjects(DOWN, buff = 0.5)\n conds_rect = SurroundingRectangle(conds, color = WHITE)\n arrow = TexMobject(\"\\\\Downarrow\")\n arrow.next_to(conds_rect, DOWN)\n concl = TexMobject(\"\\\\triangle OPQ\", \"\\\\sim\", \"\\\\triangle OQ'P'\")\n concl.next_to(arrow, DOWN)\n for mob in (cond_1[0], cond_1[2], concl[0]):\n mob.set_color(self.color_A)\n for mob in (cond_1[-1], cond_1[-3], concl[-1]):\n mob.set_color(self.color_B)\n for mob in (cond_2[0], cond_2[-1]):\n mob.set_color(self.color_combined)\n remark = VGroup(conds, conds_rect, arrow, concl)\n remark.to_corner(DR)\n self.add(remark)\n\n def show_figure_animation(self):\n circle = Circle(radius = 3, color = self.color_circle)\n circle.move_to(3.5*LEFT)\n dot_O = Dot(color = self.color_combined)\n dot_O.add_updater(lambda m: m.move_to(circle.get_center()))\n dot_P = Dot(point = 1.2*UP+LEFT, color = self.color_A)\n dot_Q = Dot(point = 0.5*DOWN+1.9*LEFT, color = self.color_A)\n dot_Pi = InversedDot(dot_P, circle, is_hollow = False, color = self.color_B)\n dot_Qi = InversedDot(dot_Q, circle, is_hollow = False, color = self.color_B)\n triangle_OPQ = ManyDotsPolygon(\n dot_O, dot_P, dot_Q, color = self.color_A,\n stroke_width = 5, fill_opacity = 0.4\n )\n triangle_OPiQi = ManyDotsPolygon(\n dot_O, dot_Pi, dot_Qi, color = self.color_B,\n stroke_width = 2, fill_opacity = 0.3\n )\n label_O, label_P, label_Pi, label_Q, label_Qi = (\n DotLabel(\n text, dot, color = color, position = position,\n background_stroke_width = 5,\n ).scale(0.8)\n for text, dot, color, position in zip(\n [\"O\", \"P\", \"P'\", \"Q\", \"Q'\"],\n [dot_O, dot_P, dot_Pi, dot_Q, dot_Qi],\n [self.color_combined, self.color_A, self.color_B, self.color_A, self.color_B],\n [LEFT, UP, UP, DOWN, DOWN]\n )\n )\n self.add(dot_O, dot_P, dot_Q, dot_Pi, dot_Qi)\n self.add(circle, triangle_OPQ, triangle_OPiQi)\n self.add(label_O, label_P, label_Pi, label_Q, label_Qi)\n dot_P.save_state()\n dot_Q.save_state()\n for k in range(self.num_of_nudges):\n nudge_P = get_random_vector(self.max_step)\n nudge_Q = get_random_vector(self.max_step)\n self.play(\n ApplyMethod(dot_P.shift, nudge_P),\n ApplyMethod(dot_Q.shift, nudge_Q),\n run_time = 2\n )\n self.wait()\n self.play(dot_P.restore, dot_Q.restore, run_time = 2)\n self.wait()\n\n\nclass CircleToCircleInversionProof(Scene):\n CONFIG = {\n \"color_O\" : YELLOW,\n \"color_A\" : RED,\n \"color_B\" : BLUE,\n \"color_combined\" : MAROON_B,\n \"label_buff\" : 0.1,\n \"label_scaling_factor\" : 0.75,\n \"line_config\" : {\n \"stroke_width\" : 2,\n \"color\" : WHITE,\n },\n }\n def construct(self):\n self.add_backgrounds()\n self.show_left_and_right_points()\n self.show_random_point()\n self.show_similar_triangles()\n self.show_complementary_property()\n self.show_inversion_result()\n\n def add_backgrounds(self):\n circle_O = Circle(radius = 3.2, color = self.color_O)\n circle_O.shift(3.5*LEFT)\n dot_O = Dot(circle_O.get_center(), color = self.color_O)\n remark_O = TextMobject(\"反演圆\", color = YELLOW)\n remark_O.next_to(circle_O.get_bottom(), UP, buff = 0.4)\n circle_C = Circle(radius = 0.8, stroke_width = 2)\n circle_C.next_to(circle_O.get_right(), LEFT, buff = 0.5)\n dot_C = Dot(circle_C.get_center())\n label_O, label_C = (\n DotLabel(\n text, dot, color = color, position = DOWN, label_buff = self.label_buff\n ).scale(self.label_scaling_factor)\n for text, dot, color in zip([\"O\", \"C\"], [dot_O, dot_C], [self.color_O, WHITE])\n )\n for orig_mob in (circle_C, dot_C, label_C):\n orig_mob.set_sheen_direction(RIGHT)\n orig_mob.set_color([self.color_A, self.color_B])\n inv_circle_template = InversedVMobject(circle_C, circle_O, use_dashed_vmob = False)\n inv_circle = Circle(radius = inv_circle_template.get_width()/2)\n inv_circle.move_to(inv_circle_template.get_center())\n inv_circle.set_sheen_direction(LEFT)\n inv_circle.set_color([self.color_A, self.color_B])\n self.add(circle_O, dot_O, circle_C, dot_C)\n self.add(label_O, label_C)\n self.add(remark_O)\n self.wait()\n\n self.circle_O = circle_O\n self.dot_O = dot_O\n self.remark_O = remark_O\n self.circle_C = circle_C\n self.dot_C = dot_C\n self.inv_circle = inv_circle\n\n def show_left_and_right_points(self):\n dot_A = Dot(color = self.color_A)\n dot_A.move_to(self.circle_C.get_left())\n dot_B = Dot(color = self.color_B)\n dot_B.move_to(self.circle_C.get_right())\n dot_Ai = InversedDot(dot_A, self.circle_O, is_hollow = False, color = self.color_A)\n dot_Bi = InversedDot(dot_B, self.circle_O, is_hollow = False, color = self.color_B)\n dot_Q = Dot((dot_Ai.get_center() + dot_Bi.get_center()) / 2)\n line_OB = Line(self.dot_O.get_center(), dot_B.get_center(), **self.line_config)\n line_OAi = Line(self.dot_O.get_center(), dot_Ai.get_center(), **self.line_config)\n label_A, label_Ai, label_B, label_Bi = (\n DotLabel(\n text, dot, color = color, position = position, label_buff = self.label_buff\n ).scale(self.label_scaling_factor)\n for text, dot, color, position in zip(\n [\"A\", \"A'\", \"B\", \"B'\"],\n [dot_A, dot_Ai, dot_B, dot_Bi],\n [self.color_A, self.color_A, self.color_B, self.color_B],\n [DL, DR, DR, DL]\n )\n )\n remark_AB = TextMobject(\"圆心连线 \\\\\\\\ 的交点...\").scale(0.6)\n remark_AB.next_to(VGroup(dot_A, dot_B), DOWN, buff = 1)\n arrows_AB = VGroup(*[\n Arrow(remark_AB.get_critical_point(direction), dot, buff = 0.1)\n for direction, dot in zip([UL, UR], [dot_A, dot_B])\n ])\n remark_AiBi = TextMobject(\"...以及它们的反点\").scale(0.8)\n remark_AiBi.next_to(VGroup(dot_Ai, dot_Bi), DOWN, buff = 1)\n arrows_AiBi = VGroup(*[\n Arrow(remark_AiBi.get_critical_point(direction), dot, buff = 0.1)\n for direction, dot in zip([UR, UL], [dot_Ai, dot_Bi])\n ])\n self.play(ShowCreation(line_OB))\n self.play(Write(dot_A), Write(dot_B), Write(label_A), Write(label_B))\n self.wait()\n self.play(Write(remark_AB), ShowCreation(arrows_AB))\n self.wait()\n self.play(\n ReplacementTransform(dot_A.deepcopy(), dot_Ai),\n ReplacementTransform(dot_B.deepcopy(), dot_Bi),\n )\n self.play(Write(label_Ai), Write(label_Bi))\n self.wait()\n self.play(\n ReplacementTransform(remark_AB, remark_AiBi),\n ReplacementTransform(arrows_AB, arrows_AiBi)\n )\n self.play(ReplacementTransform(line_OB, line_OAi))\n self.play(FadeOut(VGroup(remark_AiBi, arrows_AiBi)))\n self.wait()\n\n self.dot_A = dot_A\n self.dot_Ai = dot_Ai\n self.dot_B = dot_B\n self.dot_Bi = dot_Bi\n self.dot_Q = dot_Q\n self.line_OAi = line_OAi\n self.dots_AB = VGroup(dot_A, dot_Ai, dot_B, dot_Bi)\n self.labels_AB = VGroup(label_A, label_Ai, label_B, label_Bi)\n\n def show_random_point(self):\n angle_tracker = ValueTracker(PI/3)\n dot_P = Dot()\n dot_P.add_updater(\n lambda m: m.move_to(\n self.circle_C.point_at_angle(angle_tracker.get_value() % TAU)\n )\n )\n dot_P.add_updater(\n lambda m: m.set_color(\n interpolate_color(\n self.color_A, self.color_B,\n (dot_P.get_center()[0] - self.dot_A.get_center()[0]) / (self.dot_B.get_center()[0] - self.dot_A.get_center()[0])\n )\n )\n )\n label_P = DotLabel(\"P\", dot_P, position = None)\n label_P.scale(0.8)\n label_P.add_updater(lambda m: m.set_color(dot_P.get_color()))\n label_P.add_updater(\n lambda m: m.move_to(dot_P.get_center() * 1.4 - self.dot_C.get_center() * 0.4)\n )\n arrow_P = Vector(DR, buff = 0, color = WHITE).scale(0.5)\n arrow_P.add_updater(lambda m: m.next_to(dot_P, UL, buff = 0.1))\n remark_P = TextMobject(\"圆上任意一点...\").scale(0.75)\n remark_P.add_updater(lambda m: m.next_to(arrow_P, UL, buff = 0.1))\n dot_Pi = InversedDot(dot_P, self.circle_O, is_hollow = False)\n dot_Pi.add_updater(lambda m: m.set_color(dot_P.get_color()))\n label_Pi = DotLabel(\"P'\", dot_Pi, position = None)\n label_Pi.scale(0.8)\n label_Pi.add_updater(lambda m: m.set_color(dot_Pi.get_color()))\n label_Pi.add_updater(\n lambda m: m.move_to(dot_Pi.get_center() * 1.1 - self.inv_circle.get_center() * 0.1)\n )\n arrow_Pi = Vector(DL, buff = 0, color = WHITE).scale(0.5)\n arrow_Pi.add_updater(lambda m: m.next_to(dot_Pi, UR, buff = 0.1))\n remark_Pi = TextMobject(\"...以及它的反点\").scale(0.75)\n remark_Pi.add_updater(lambda m: m.next_to(arrow_Pi, UR, buff = 0.1))\n line_OP, line_OPi, line_AP, line_AiPi, line_BP, line_BiPi = aux_lines = VGroup(*[\n TwoDotsSegment(pt_1, pt_2, **self.line_config)\n for pt_1, pt_2 in [\n (self.dot_O, dot_P), (self.dot_O, dot_Pi), (self.dot_A, dot_P),\n (self.dot_Ai, dot_Pi), (self.dot_B, dot_P), (self.dot_Bi, dot_Pi)\n ]\n ])\n rtai_APB = RightAngleIndicator(self.dot_A, dot_P, self.dot_B)\n rtai_BiPiAi = RightAngleIndicator(self.dot_Bi, dot_Pi, self.dot_Ai, side_length = 0.5)\n self.play(Write(dot_P), Write(label_P))\n self.play(ShowCreation(arrow_P), Write(remark_P))\n self.play(Write(line_AP), Write(line_BP))\n self.play(ShowCreation(rtai_APB))\n self.wait()\n self.play(ReplacementTransform(dot_P.deepcopy(), dot_Pi))\n self.play(Write(label_Pi))\n self.play(\n ReplacementTransform(arrow_P.deepcopy(), arrow_Pi),\n ReplacementTransform(remark_P.deepcopy(), remark_Pi),\n )\n self.play(angle_tracker.increment_value, PI/6, run_time = 2)\n self.play(FadeOut(VGroup(arrow_P, remark_P, arrow_Pi, remark_Pi)))\n self.wait()\n self.play(Write(VGroup(line_OP, line_OPi, line_AiPi, line_BiPi)))\n self.wait()\n\n self.dot_P = dot_P\n self.dot_Pi = dot_Pi\n self.rtai_APB = rtai_APB\n self.rtai_BiPiAi = rtai_BiPiAi\n self.angle_tracker = angle_tracker\n self.aux_lines = aux_lines\n self.dots_P = VGroup(dot_P, dot_Pi)\n self.labels_P = VGroup(label_P, label_Pi)\n self.rtais = VGroup(self.rtai_APB, self.rtai_BiPiAi)\n\n def show_similar_triangles(self):\n ai_OAP = AngleIndicator(self.dot_O, self.dot_A, self.dot_P, radius = 0.3, color = self.color_A)\n ai_OBP = AngleIndicator(self.dot_O, self.dot_B, self.dot_P, radius = 0.4, color = self.color_B)\n ai_OPiAi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Ai, radius = 0.3, color = self.color_A)\n ai_OPiBi = AngleIndicator(self.dot_O, self.dot_Pi, self.dot_Bi, radius = 0.4, color = self.color_B)\n triangle_OAP, triangle_OPiAi, triangle_OBP, triangle_OPiBi = [\n ManyDotsPolygon(\n pt_1, pt_2, pt_3, color = self.color_combined,\n stroke_width = 0, fill_opacity = 0.4\n )\n for pt_1, pt_2, pt_3 in (\n (self.dot_O, self.dot_A, self.dot_P),\n (self.dot_O, self.dot_Pi, self.dot_Ai),\n (self.dot_O, self.dot_B, self.dot_P),\n (self.dot_O, self.dot_Pi, self.dot_Bi),\n )\n ]\n remark_sim_A = TexMobject(\"\\\\triangle OAP\", \"\\\\sim\", \"\\\\triangle OP'A'\")\n remark_sim_B = TexMobject(\"\\\\triangle OBP\", \"\\\\sim\", \"\\\\triangle OP'B'\")\n remark_arrow = TexMobject(\"\\\\Downarrow\")\n remark_angle_A = TexMobject(\"\\\\angle OAP\", \"=\", \"\\\\angle OP'A'\")\n remark_angle_B = TexMobject(\"\\\\angle OBP\", \"=\", \"\\\\angle OP'B'\")\n remarks_A = VGroup(remark_sim_A, remark_arrow, remark_angle_A)\n remarks_B = VGroup(remark_sim_B, remark_arrow, remark_angle_B)\n remarks_A.arrange_submobjects(DOWN)\n remarks_A.next_to(self.dot_Q, DOWN, buff = 1)\n remark_sim_B.move_to(remark_sim_A.get_center())\n remark_angle_B.move_to(remark_angle_A.get_center())\n for remark, color in ([remark_sim_A, self.color_combined], [remark_sim_B, self.color_combined], \\\n [remark_angle_A, self.color_A], [remark_angle_B, self.color_B]):\n remark[0].set_color(color)\n remark[-1].set_color(color)\n self.play(Write(remark_sim_A))\n self.play(FadeInFromDown(VGroup(remark_arrow, remark_angle_A)))\n self.wait()\n self.play(ShowCreation(triangle_OAP), ShowCreation(ai_OAP))\n self.wait()\n self.play(\n ReplacementTransform(triangle_OAP, triangle_OPiAi),\n ReplacementTransform(ai_OAP.deepcopy(), ai_OPiAi),\n )\n self.play(FadeOut(triangle_OPiAi))\n self.wait()\n self.play(ReplacementTransform(remarks_A, remarks_B))\n self.wait()\n self.play(ShowCreation(triangle_OBP), ShowCreation(ai_OBP))\n self.wait()\n self.play(\n ReplacementTransform(triangle_OBP, triangle_OPiBi),\n ReplacementTransform(ai_OBP.deepcopy(), ai_OPiBi),\n )\n self.play(FadeOut(remarks_B), FadeOut(triangle_OPiBi))\n self.wait()\n\n self.ai_OAP = ai_OAP\n self.ai_OBP = ai_OBP\n self.ai_OPiAi = ai_OPiAi\n self.ai_OPiBi = ai_OPiBi\n self.ais = VGroup(ai_OAP, ai_OBP, ai_OPiAi, ai_OPiBi)\n\n def show_complementary_property(self):\n ai_OAP_copy = self.ai_OAP.deepcopy()\n ai_OBP_copy = self.ai_OBP.deepcopy()\n rtai_APB_copy = self.rtai_APB.deepcopy()\n for ai_copy in (ai_OAP_copy, ai_OBP_copy, rtai_APB_copy):\n ai_copy.clear_updaters()\n comp_prop = VGroup(ai_OAP_copy, TexMobject(\"=\"), ai_OBP_copy, TexMobject(\"+\"), rtai_APB_copy)\n comp_prop.arrange_submobjects(RIGHT)\n comp_prop.scale(1.2)\n comp_prop.next_to(self.circle_O.get_top(), DOWN, buff = 1)\n self.play(\n ReplacementTransform(self.ai_OAP.deepcopy(), ai_OAP_copy),\n ReplacementTransform(self.ai_OBP.deepcopy(), ai_OBP_copy),\n ReplacementTransform(self.rtai_APB.deepcopy(), rtai_APB_copy),\n )\n self.play(Write(comp_prop[1]), Write(comp_prop[3]))\n self.wait()\n self.play(ReplacementTransform(rtai_APB_copy.deepcopy(), self.rtai_BiPiAi))\n self.wait()\n for ai in self.ais:\n ai.clear_updaters()\n self.play(\n FadeOut(comp_prop),\n FadeOut(self.ais),\n FadeOut(self.labels_AB), FadeOut(self.labels_P),\n )\n self.wait()\n\n def show_inversion_result(self):\n inv_circle_copy = self.inv_circle.deepcopy()\n self.play(self.angle_tracker.set_value, PI, run_time = 2)\n self.wait()\n def update_inv_circle(inv_circle):\n angle = self.angle_tracker.get_value()\n if (angle <= -PI) or (angle > PI):\n alpha = 1\n else:\n QPi = self.dot_Pi.get_center() - self.dot_Q.get_center()\n QAi = self.dot_Ai.get_center() - self.dot_Q.get_center()\n theta = angle_between(QPi, QAi)\n if self.dot_Pi.get_center()[1] < self.dot_Q.get_center()[1]:\n theta = 2*PI - theta\n alpha = theta / (2*PI)\n inv_circle.become(inv_circle_copy.get_subcurve(0, alpha))\n self.inv_circle.add_updater(update_inv_circle)\n self.add(self.inv_circle)\n self.play(\n ApplyMethod(self.angle_tracker.increment_value, -2*PI),\n run_time = 5,\n )\n self.inv_circle.clear_updaters()\n for line in self.aux_lines:\n line.clear_updaters()\n self.play(\n FadeOut(self.dots_AB), FadeOut(self.dots_P), FadeOut(self.rtais),\n FadeOut(self.line_OAi), FadeOut(self.aux_lines)\n )\n self.wait()\n color_template = Square(\n stroke_width = 0, fill_opacity = 1, fill_color = [self.color_A, self.color_B]\n )\n conclusion = TextMobject(\"不经过反演中心的圆\", \"$\\\\mapsto$\", \"不经过反演中心的圆\")\n conclusion.scale(0.8)\n conclusion[0].set_color_by_gradient(self.color_A, self.color_B)\n conclusion[2].set_color_by_gradient(self.color_B, self.color_A)\n conclusion.to_corner(DR)\n self.play(Write(conclusion))\n self.wait(3)\n self.play(FadeOut(conclusion), FadeOut(self.inv_circle))\n self.wait()\n\n\nclass ConcentricPropertyDoesNotHold(Scene):\n def setup(self):\n N = 8\n self.circle_radii = [0.9-0.1*k for k in range(N)]\n self.dot_radii = [0.08-0.005*k for k in range(N)]\n self.circle_colors = color_gradient([BLUE, GREEN, RED], N)\n\n def construct(self):\n orig_circles = VGroup(*[\n Circle(radius = radius, stroke_width = 1.5,color = color)\n for radius, color in zip(self.circle_radii, self.circle_colors)]\n )\n orig_circles.shift(2*LEFT+0.5*DOWN)\n orig_circles_centers = VGroup(*[\n Dot(circle.get_center(), radius = radius, color = color)\n for circle, radius, color in zip(orig_circles, self.dot_radii, self.circle_colors)\n ])\n # Dot(orig_circles.get_center())\n circle = Circle(radius = 3, color = YELLOW)\n circle.shift(3.8*LEFT+0.5*DOWN)\n circle_center = Dot(circle.get_center(), color = YELLOW)\n inv_circles = VGroup(*[\n InversedVMobject(orig_circle, circle).clear_updaters().set_color(color)\n for orig_circle, color in zip(orig_circles, self.circle_colors)\n ])\n inv_circles_centers = VGroup(*[\n Dot(inv_circle.get_center(), color = color)\n for inv_circle, color in zip(inv_circles, self.circle_colors)\n ])\n\n circle_text = TextMobject(\"反演圆\", color = YELLOW)\n circle_text.next_to(circle.get_bottom(), UP, buff = 0.4)\n orig_circles_text = TextMobject(\"同心的圆\", color = WHITE)\n orig_circles_text.next_to(orig_circles, UP)\n orig_circles_text.to_edge(UP, buff = 0.4)\n inv_circles_text = TextMobject(\"不同心的像\", color = WHITE)\n inv_circles_text.next_to(inv_circles, UP)\n inv_circles_text.to_edge(UP, buff = 0.4)\n arrow = Arrow(orig_circles_text.get_right(), inv_circles_text.get_left())\n\n self.add(circle, circle_center)\n self.add(orig_circles, orig_circles_centers)\n self.add(inv_circles, inv_circles_centers)\n self.add(circle_text, orig_circles_text, inv_circles_text, arrow)\n self.wait()\n\n\nclass DemonstratePtolemyInequality(Scene):\n CONFIG = {\n \"R\" : 2.7,\n \"angle_A\" : -PI*2/3,\n \"angle_B\" : PI*4/5,\n \"angle_D\" : -PI/5,\n \"radius_C\" : 3.2,\n \"angle_C\" : PI/5,\n }\n def construct(self):\n radius_tracker = ValueTracker(self.radius_C)\n angle_tracker = ValueTracker(self.angle_C)\n circle = Circle(radius = self.R, color = WHITE, stroke_width = 1)\n circle.shift(DOWN)\n dashed_circle = DashedVMobject(circle, num_dashes = 100, positive_space_ratio = 0.5)\n dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[\n Dot(circle.point_at_angle(angle % TAU), color = WHITE)\n for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D)\n ])\n dot_C.add_updater(\n lambda m: m.move_to(\n circle.get_center() + radius_tracker.get_value() * \\\n rotate_vector(RIGHT, angle_tracker.get_value())\n )\n )\n dot_labels = VGroup(*[\n DotLabel(text, dot, position = position, label_buff = 0.1)\n for text, dot, position in zip(\n [\"A\", \"B\", \"C\", \"D\"], dots, [DL, UL, UR, DR]\n )\n ])\n lines = VGroup(*[\n TwoDotsSegment(dot_1, dot_2)\n for dot_1, dot_2 in (\n [dot_B, dot_A], [dot_A, dot_C], [dot_A, dot_D],\n [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D],\n )\n ])\n length_labels = VGroup(*[LengthLabel(line) for line in lines])\n length_labels[0].switch_side()\n length_labels[2].switch_side()\n length_labels[1].set_offset(-0.4)\n length_labels[-2].set_offset(-0.4)\n\n def get_sums():\n AB, AC, AD, BC, BD, CD = [line.get_length() for line in lines]\n sum_lhs = AB * CD + AD * BC\n sum_rhs = AC * BD\n return sum_lhs, sum_rhs\n relation_eq = TexMobject(\n \"|AB| \\\\cdot |CD| + |AD| \\\\cdot |BC|\", \"=\", \"|AC| \\\\cdot |BD|\",\n background_stroke_width = 0,\n )\n relation_neq = TexMobject(\n \"|AB| \\\\cdot |CD| + |AD| \\\\cdot |BC|\", \">\", \"|AC| \\\\cdot |BD|\",\n background_stroke_width = 0,\n )\n relation_eq[1].set_color(GREEN)\n relation_neq[1].set_color(RED)\n relation_eq.to_edge(UP, buff = 1.2)\n for eq_mob, neq_mob in zip(relation_eq, relation_neq):\n neq_mob.move_to(eq_mob.get_center())\n lhs, eq_sign, rhs = relation_eq\n neq_sign = relation_neq[1]\n label_lhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True)\n label_rhs = DecimalNumber(num_decimal_places = 4, show_ellipsis = True)\n label_lhs.add_updater(lambda m: m.set_value(get_sums()[0]))\n label_rhs.add_updater(lambda m: m.set_value(get_sums()[1]))\n brace_lhs = Brace(lhs, UP, buff = 0.1)\n brace_rhs = Brace(rhs, UP, buff = 0.1)\n brace_lhs.put_at_tip(label_lhs)\n brace_rhs.put_at_tip(label_rhs)\n\n def get_indication_color(thres = 1e-2):\n return GREEN if is_close(radius_tracker.get_value(), self.R, thres = thres) else RED\n def get_indication_opacity(thres = 1e-2):\n return 0 if is_close(radius_tracker.get_value(), self.R, thres = thres) else 1\n figure_group = VGroup(dashed_circle, dots, lines, length_labels, dot_labels)\n figure_group.add_updater(lambda m: m.set_color(get_indication_color()))\n relation_group = VGroup(lhs, eq_sign, rhs, neq_sign, brace_lhs, brace_rhs, label_lhs, label_rhs)\n label_lhs.add_updater(lambda m: m.set_color(get_indication_color()))\n label_rhs.add_updater(lambda m: m.set_color(get_indication_color()))\n eq_sign.add_updater(lambda m: m.set_opacity(1 - get_indication_opacity()))\n neq_sign.add_updater(lambda m: m.set_opacity(get_indication_opacity()))\n self.add(figure_group)\n self.add(relation_group)\n\n deltas = [\n (0.5, -0.1), (0, -0.4), (-1, 0.3), (0, 0.4),\n (-1, 0), (0.3, -0.2), (0.7, -0.3),\n ]\n radius_tracker.save_state()\n angle_tracker.save_state()\n for d_radius, d_angle in deltas:\n self.play(\n ApplyMethod(radius_tracker.increment_value, d_radius),\n ApplyMethod(angle_tracker.increment_value, d_angle),\n run_time = 2,\n )\n self.wait()\n self.play(\n ApplyMethod(radius_tracker.restore),\n ApplyMethod(angle_tracker.restore),\n run_time = 2,\n )\n self.wait()\n\n\nclass PtolemyInversionFigure(Scene):\n CONFIG = {\n \"R\" : 3.8,\n \"r\" : 1.3,\n \"angle_A\" : PI,\n \"angle_B\" : PI/3,\n \"angle_C\" : -PI/9,\n \"angle_D\" : -PI*2/7,\n \"color_circle\" : YELLOW,\n \"color_ABD\" : BLUE,\n }\n def construct(self):\n circle_ABD = Circle(radius = self.r, color = self.color_ABD, stroke_width = 3)\n circle_ABD.shift(0.2*LEFT)\n dot_A, dot_B, dot_C, dot_D = dots = VGroup(*[\n Dot(circle_ABD.point_at_angle(angle % TAU), color = WHITE)\n for angle in (self.angle_A, self.angle_B, self.angle_C, self.angle_D)\n ])\n dot_A.set_color(self.color_circle)\n dot_C.shift(0.4*RIGHT)\n circle = Circle(radius = self.R, color = self.color_circle, stroke_width = 5)\n circle.move_to(dot_A.get_center())\n remark_circle = TextMobject(\"反演圆\", color = self.color_circle)\n remark_circle.next_to(circle.get_bottom(), UP)\n label_A, label_B, label_C, label_D = dot_labels = VGroup(*[\n DotLabel(text, dot, position = position, label_buff = 0.2)\n for text, dot, position in zip(\n [\"A\", \"B\", \"C\", \"D\"], dots, [DL, UP, DOWN, DOWN]\n )\n ])\n label_A.set_color(self.color_circle)\n dot_Bi, dot_Ci, dot_Di = inv_dots = VGroup(*[\n InversedDot(dot, circle, is_hollow = False, color = WHITE)\n for dot in (dot_B, dot_C, dot_D)\n ])\n label_Bi, label_Ci, label_Di = inv_dot_labels = VGroup(*[\n DotLabel(text, dot, position = RIGHT, label_buff = 0.2)\n for text, dot in zip([\"B'\", \"C'\", \"D'\"], [dot_Bi, dot_Ci, dot_Di])\n ])\n lines = VGroup(*[\n TwoDotsSegment(dot_1, dot_2, stroke_width = 1)\n for dot_1, dot_2 in (\n [dot_A, dot_B], [dot_A, dot_C], [dot_A, dot_D],\n [dot_B, dot_C], [dot_B, dot_D], [dot_C, dot_D],\n [dot_A, dot_Bi], [dot_A, dot_Ci], [dot_A, dot_Di],\n [dot_Bi, dot_Ci], [dot_Bi, dot_Di], [dot_Ci, dot_Di],\n )\n ])\n inv_circle_ABD = InversedVMobject(circle_ABD, circle, use_dashed_vmob = False)\n inv_circle_ABD.add_updater(lambda m: m.set_color(self.color_ABD))\n inv_circle_ABD.add_updater(lambda m: m.set_stroke(width = 2))\n self.add(circle, remark_circle, circle_ABD, inv_circle_ABD)\n self.add(dots, dot_labels, inv_dots, inv_dot_labels, lines)\n self.add()\n self.wait()\n\n\n#####\n## Inversion Advanced P1 Scenes\nclass KissingCirclesPuzzle(Scene):\n def construct(self):\n self.show_figure()\n self.show_question()\n\n def show_figure(self):\n type_text_1 = TextMobject(\"外切-外切-外切\")\n type_text_2 = TextMobject(\"内切-内切-外切\")\n type_text_1.move_to(LEFT_SIDE/2)\n type_text_2.move_to(RIGHT_SIDE/2)\n type_text_1.to_edge(DOWN)\n type_text_2.to_edge(DOWN)\n dot_l1, dot_l2, dot_l3 = dots_l = VGroup(*[\n VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE)\n for coords in [(-3.9, 1.5), (-4.9, 0.0), (-2.8, -1.0)]\n ])\n dot_r1, dot_r2, dot_r3 = dots_r = VGroup(*[\n VectorizedPoint(np.array([coords[0], coords[1], 0]), color = BLUE)\n for coords in [(4.6, 0.3), (3.9, 0.6), (3.5, 1.6)]\n ])\n dfc_l = DescartesFourCircles(*dots_l, show_new_circles = False)\n dfc_r = DescartesFourCircles(*dots_r, show_new_circles = False, outer_circle_index = 2)\n for dfc in [dfc_l, dfc_r]:\n for mob in dfc.get_orig_circles():\n mob.set_stroke(width = 2, color = BLUE)\n self.add(type_text_1, type_text_2)\n self.add(dfc_l, dfc_r)\n self.dfc_l = dfc_l\n self.dfc_r = dfc_r\n self.dots_l = dots_l\n self.dots_r = dots_r\n\n def show_question(self):\n question = TextMobject(\"能否添加第四个圆,使之与其他三个圆都相切?\")\n question.to_edge(UP, buff = 0.2)\n self.add(question)\n self.wait()\n\n \nclass KissingCirclesSimplified(Scene):\n def construct(self):\n line1 = ExtendedLine(UL, UR)\n line2 = ExtendedLine(DL, DR)\n center_circle = Circle(radius = 1)\n figure_group = VGroup(line1, line2, center_circle)\n for mob in figure_group:\n mob.set_stroke(width = 2, color = BLUE)\n question = TextMobject(\"能否添加第四个“圆”,使之与其他三个“圆”都相切?\")\n question.next_to(figure_group, UP, buff = 0.5)\n group = VGroup(question, figure_group)\n group.move_to(ORIGIN)\n self.add(group)\n self.wait()\n\n\nclass KissingCirclesSimplifiedAnswer(Scene):\n def construct(self):\n line1 = ExtendedLine(UL, UR, stroke_width = 2, color = BLUE)\n line2 = ExtendedLine(DL, DR, stroke_width = 2, color = BLUE)\n center_circle = Circle(radius = 1, stroke_width = 2, color = BLUE)\n new_circles = VGroup(*[\n Circle(radius = 1, color = color, fill_opacity = 0.1, stroke_width = 5) \\\n .next_to(center_circle, direction, buff = 0)\n for direction, color in zip([LEFT, RIGHT], [RED, ORANGE])\n ])\n numbers = VGroup(*[\n TexMobject(f\"{num}\", color = circle.get_color()).move_to(circle.get_center())\n for num, circle in zip([\"1\", \"2\"], new_circles)\n ])\n group = VGroup(line1, line2, center_circle, new_circles, numbers)\n group.move_to(ORIGIN)\n self.add(group)\n self.wait()\n\n\nclass KissingCirclesSimplifiedExplanation(Scene):\n CONFIG = {\n \"dashed_vmob_config\" : {\n \"num_dashes\" : 30,\n \"positive_space_ratio\" : 0.6,\n },\n \"line_colors\" : [GREEN, BLUE],\n \"center_color\" : MAROON_B,\n \"circle_colors\" : [RED, ORANGE],\n }\n def construct(self):\n self.add_backgrounds()\n self.show_process()\n\n def add_backgrounds(self):\n N = 5\n line1 = Line(UP + N*LEFT, UP + N*RIGHT, stroke_width = 2, color = self.line_colors[0])\n line2 = Line(DOWN + N*LEFT, DOWN + N*RIGHT, stroke_width = 2, color = self.line_colors[1])\n center_circle = FineCircle(radius = 1, stroke_width = 2, color = self.center_color)\n new_circle1 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[0])\n new_circle1.next_to(center_circle, LEFT, buff = 0)\n new_circle2 = FineCircle(radius = 1, stroke_width = 5, color = self.circle_colors[1])\n new_circle2.next_to(center_circle, RIGHT, buff = 0)\n inv_old_group = VGroup(line1, line2, center_circle)\n inv_new_group = VGroup(new_circle1, new_circle2)\n inv_group = VGroup(inv_old_group, inv_new_group)\n inv_group.rotate(-PI*2/5)\n inv_group.shift(3*RIGHT)\n circle = FineCircle(radius = 3.5, color = YELLOW)\n circle.shift(2*LEFT)\n circle_center = Dot(circle.get_center(), color = YELLOW)\n remark_circle = TextMobject(\"反演圆\", color = YELLOW)\n remark_circle.next_to(circle.get_bottom(), UP)\n remark_center = VGroup(*[\n Arrow(DL, UR, color = YELLOW, buff = 0).scale(0.3),\n TextMobject(\"反演中心\", color = YELLOW).scale(0.8),\n ])\n remark_center.arrange_submobjects(DL, buff = 0)\n remark_center.next_to(circle_center, DL, buff = 0.1)\n orig_old_group = VGroup(*[\n InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True)\n for mob in inv_old_group\n ])\n orig_new_group = VGroup(*[\n InversedVMobject(mob, circle, use_dashed_vmob = False, match_original_style = True)\n for mob in inv_new_group\n ])\n for mob in orig_old_group:\n mob.clear_updaters()\n mob.set_stroke(width = 2)\n for mob in orig_new_group:\n mob.clear_updaters()\n mob.set_stroke(width = 5)\n mob.set_fill(opacity = 0.1)\n self.add(orig_old_group)\n self.add(circle, circle_center, remark_circle, remark_center)\n self.circle = circle\n self.inv_old_group = inv_old_group\n self.inv_new_group = inv_new_group\n self.orig_old_group = orig_old_group\n self.orig_new_group = orig_new_group\n \n def show_process(self):\n dashed_inv_old_group = VGroup(*[\n DashedVMobject(mob, **self.dashed_vmob_config)\n for mob in self.inv_old_group\n ])\n dashed_inv_new_group = VGroup(*[\n DashedVMobject(mob, **self.dashed_vmob_config)\n for mob in self.inv_new_group\n ])\n self.play(ShowCreation(dashed_inv_old_group, lag_ratio = 0.05), run_time = 3)\n self.wait()\n dashed_copys = VGroup(*[dashed_inv_old_group[-1].deepcopy() for k in range(2)])\n dashed_copys.generate_target()\n for mob_copy, mob_template in zip(dashed_copys.target, dashed_inv_new_group):\n mob_copy.match_style(mob_template)\n mob_copy.move_to(mob_template.get_center())\n self.play(MoveToTarget(dashed_copys), run_time = 3)\n self.remove(dashed_copys)\n self.add(dashed_inv_new_group)\n self.wait()\n self.play(DrawBorderThenFill(self.orig_new_group), run_time = 3)\n self.wait(2)\n self.play(\n FadeOut(dashed_inv_new_group),\n FadeOut(dashed_inv_old_group),\n FadeOut(self.orig_new_group),\n )\n self.wait()\n\n\nclass DifferentTangentTypesWithSameConclusion(KissingCirclesPuzzle):\n CONFIG = {\n \"random_seed\" : 570,\n \"num_of_nudges\" : 5, \n \"max_step\" : 0.5,\n \"color_1\" : ORANGE,\n \"color_2\" : RED,\n }\n def construct(self):\n super().show_figure()\n self.dots_l.save_state()\n self.dots_r.save_state()\n for dfc in [self.dfc_l, self.dfc_r]:\n dfc.add_new_circles()\n dfc.get_orig_circles().set_stroke(width = 2)\n c4_1, c4_2 = dfc.get_new_circles()\n c4_1.set_color(self.color_1)\n c4_2.set_color(self.color_2)\n self.add(self.dfc_l, self.dfc_r)\n for k in range(self.num_of_nudges):\n for dot in it.chain(self.dots_l, self.dots_r):\n dot.generate_target()\n dot.target.shift(get_random_vector(self.max_step))\n anims = AnimationGroup(*[\n MoveToTarget(dot, path_arc = PI/3., run_time = 1.5)\n for dot in it.chain(self.dots_l, self.dots_r)\n ], run_time = 2)\n self.play(anims)\n self.wait()\n self.play(self.dots_l.restore, self.dots_r.restore, run_time = 1.5)\n\n\nclass LineToCircleInversionRevisited(LineToCircleInversion):\n def construct(self):\n super().construct()\n self.remove_conclusions()\n self.add_explanation()\n\n def remove_conclusions(self):\n self.remove(self.bg_rect)\n self.remove(self.conclusions)\n\n def add_explanation(self):\n radius = Line(\n self.circle_O.get_left(), self.circle_O.get_center(),\n color = self.color_circle, stroke_width = 1,\n )\n radius_text = TexMobject(\"R\", color = self.color_circle)\n radius_text.next_to(radius, UP, buff = 0.1)\n radius_group = VGroup(radius, radius_text)\n radius_group.rotate(-PI/12, about_point = self.circle_O.get_center())\n remark_length = TexMobject(\"|OA| = d\", \"\\\\Downarrow\", \"|OA'| = \\dfrac{R^2}{d}\")\n remark_length.arrange_submobjects(DOWN)\n remark_length.scale(1.2)\n remark_length[0].set_color(self.color_orig)\n remark_length[-1].set_color(self.color_inv)\n remark_length.to_edge(RIGHT)\n self.add(radius_group, remark_length)\n self.wait()\n\n\nclass CircleToCircleInversionRevisited(CircleToCircleInversionProof):\n def construct(self):\n super().add_backgrounds()\n super().show_left_and_right_points()\n super().show_random_point()\n super().show_similar_triangles()\n self.arrange_elements()\n self.add_explanation()\n\n def arrange_elements(self):\n self.angle_tracker.set_value(PI/3)\n self.remove(self.remark_O)\n self.remove(self.ai_OAP, self.ai_OBP, self.ai_OPiAi, self.ai_OPiBi)\n self.add(self.inv_circle)\n self.add(self.dots_P, self.labels_P)\n self.add(self.dots_AB, self.labels_AB)\n self.add(self.aux_lines, self.rtais)\n dot_I = Dot(self.inv_circle.get_center())\n label_I = DotLabel(\"I\", dot_I, position = DOWN, label_buff = 0.15).scale(0.8)\n for mob in (dot_I, label_I):\n mob.set_sheen_direction(RIGHT)\n mob.set_color([self.color_B, self.color_A])\n remark_I = TextMobject(\"反形的圆心(并非$C$的反点!)\")\n remark_I.scale(0.5)\n remark_I.next_to(label_I, DOWN, buff = 0.1)\n self.add(dot_I, label_I, remark_I)\n\n def add_explanation(self):\n for circle, color, text, angle in zip(\n [self.circle_O, self.circle_C], [self.color_O, MAROON_B],\n [\"R\", \"r\"], [-PI/12, PI/3]\n ):\n radius = Line(\n circle.get_left(), circle.get_center(),\n color = color, stroke_width = 1,\n )\n radius_text = TexMobject(text, color = color)\n radius_text.next_to(radius, UP, buff = 0.1)\n radius_group = VGroup(radius, radius_text)\n radius_group.rotate(angle, about_point = circle.get_center())\n self.add(radius_group)\n remark_length_A = TexMobject(\"|OA| = d-r\", \"\\\\Rightarrow\", \"|OA'| = \\dfrac{R^2}{d-r}\")\n remark_length_B = TexMobject(\"|OB| = d+r\", \"\\\\Rightarrow\", \"|OB'| = \\dfrac{R^2}{d+r}\")\n remark_length_A[0].set_color(self.color_A)\n remark_length_A[-1].set_color(self.color_A)\n remark_length_B[0].set_color(self.color_B)\n remark_length_B[-1].set_color(self.color_B)\n length_group = VGroup(remark_length_A, remark_length_B)\n length_group.arrange_submobjects(DOWN, buff = 0.4)\n brace = Brace(length_group, RIGHT)\n arrow = TexMobject(\"\\\\Rightarrow\")\n remarks = VGroup(\n TexMobject(\"|A'B'| = \\\\dfrac{2 R^2 r}{|d^2-r^2|}\"),\n TexMobject(\"|OI| = \\\\dfrac{R^2 d}{|d^2-r^2|}\")\n )\n remarks.arrange_submobjects(DOWN, aligned_edge = LEFT)\n remarks.set_color(MAROON_B)\n result_group = VGroup(brace, arrow, remarks)\n result_group.arrange_submobjects(RIGHT)\n result_group.next_to(length_group, RIGHT)\n remark_group = VGroup(length_group, result_group)\n remark_group.center().to_edge(DOWN, buff = 0.2)\n bg_rect = BackgroundRectangle(remark_group, fill_opacity = 0.9)\n self.add(bg_rect, remark_group)\n self.wait()\n\n\nclass DescartesTheoremExamples(Scene):\n CONFIG = {\n \"circle_colors\" : [MAROON_B, RED, GREEN, BLUE],\n \"curvs_outer\" : [3, 6, 7, 34],\n \"curvs_inner\" : [10, 15, 19, -6],\n }\n def setup(self):\n self.text_color_map = dict(\n zip([\"{k_1}\", \"{k_2}\", \"{k_3}\", \"{k_4}\"], self.circle_colors)\n )\n\n def construct(self):\n self.add_title()\n self.add_outer_dfc()\n self.add_inner_dfc()\n\n def add_title(self):\n title = TexMobject(\n \"\\\\left(\", \"{k_1}\", \"+\", \"{k_2}\", \"+\", \"{k_3}\", \"+\", \"{k_4}\", \"\\\\right) ^2\",\n \"= 2 \\\\left(\", \"{k_1}\",\"^2 +\",\"{k_2}\",\"^2 +\",\"{k_3}\",\"^2 +\",\"{k_4}\",\"^2\", \"\\\\right)\"\n )\n title.set_color_by_tex_to_color_map(self.text_color_map)\n title.scale(1.2)\n title.to_edge(UP, buff = 0.2)\n self.add(title)\n\n def add_outer_dfc(self):\n r1, r2, r3, r4 = [1./curv for curv in self.curvs_outer]\n p1, p2, p3 = [\n VectorizedPoint(center)\n for center in calc_centers_by_radii(r1, r2, r3, init_angle = PI*2/3)\n ]\n outer_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False)\n c1, c2, c3 = outer_dfc.get_orig_circles()\n c4 = outer_dfc.get_new_circles()[0]\n outer_circles = VGroup(c1, c2, c3, c4)\n outer_circles.clear_updaters()\n outer_circles.set_height(5.5)\n outer_circles.to_corner(DL)\n texts = VGroup(*[\n TexMobject(f\"k_{num}\", \"=\", f\"{curv}\") \\\n .scale(0.8) \\\n .move_to(circle.get_center())\n for num, curv, circle in zip(range(1, 5), self.curvs_outer, outer_circles)\n ])\n for circle, text, color in zip(outer_circles, texts, self.circle_colors):\n circle.set_color(color)\n text.set_color(color)\n texts[-1].shift(2.5*RIGHT+1.2*UP)\n arrow = Arrow(\n texts[-1].get_bottom(), outer_circles[-1].get_right(),\n path_arc = -PI*2/3, buff = 0.1,\n ).set_color(self.circle_colors[-1])\n outer_group = VGroup(outer_circles, texts, arrow)\n self.add(outer_group)\n\n def add_inner_dfc(self):\n r1, r2, r3, r4 = [1./curv for curv in self.curvs_inner]\n p1, p2, p3 = [\n VectorizedPoint(center)\n for center in calc_centers_by_radii(r1, r2, r3, init_angle = -PI/7)\n ]\n inner_dfc = DescartesFourCircles(p1, p2, p3, show_new_circles = False)\n c1, c2, c3 = inner_dfc.get_orig_circles()\n c4 = inner_dfc.get_new_circles()[1]\n inner_circles = VGroup(c1, c2, c3, c4)\n inner_circles.clear_updaters()\n inner_circles.set_height(5.5)\n inner_circles.to_corner(DR)\n inner_texts = VGroup(*[\n TexMobject(f\"k_{num}\", \"=\", f\"{curv}\") \\\n .scale(0.8) \\\n .move_to(circle.get_center())\n for num, curv, circle in zip(range(1, 5), self.curvs_inner, inner_circles)\n ])\n for circle, text, color in zip(inner_circles, inner_texts, self.circle_colors):\n circle.set_color(color)\n text.set_color(color)\n inner_texts[-1].shift(2.8*LEFT+2.7*UP)\n inner_arrow = Arrow(\n inner_texts[-1].get_critical_point(DOWN),\n inner_texts[-1].get_critical_point(DOWN)+0.7*DR,\n buff = 0.1,\n ).set_color(self.circle_colors[-1])\n inner_group = VGroup(inner_circles, inner_texts, inner_arrow)\n self.add(inner_group)\n self.wait()\n self.inner_circles = inner_circles\n self.inner_texts = inner_texts\n self.inner_arrow = inner_arrow\n\n\nclass DFCInversionProofP1(DescartesTheoremExamples):\n CONFIG = {\n \"remark_scale_text\" : \"示意图,图像并非真实比例\",\n \"orig_label_texts\" : [\"C_1\", \"C_2\", \"C_3\", \"C_4\"],\n \"inv_label_texts\" : [\"C_1'\", \"C_2'\", \"C_3'\", \"C_4'\"],\n }\n def construct(self):\n super().add_inner_dfc()\n self.arrange_elements()\n self.add_labels()\n self.add_inversion_center()\n self.add_mapsto_symbol()\n self.add_not_to_scale_remark()\n self.wait()\n\n def arrange_elements(self):\n self.remove(self.inner_texts, self.inner_arrow)\n self.inner_circles.center().shift(4*UP)\n normal_form = FourCirclesNormalForm()\n normal_form.shift(4*DOWN)\n self.add(normal_form)\n self.normal_form = normal_form\n\n def add_labels(self):\n orig_labels = VGroup()\n for n, (circle, text) in enumerate(zip(self.inner_circles, self.orig_label_texts)):\n label = TexMobject(text).scale(1.2)\n label.set_color(circle.get_color())\n label.move_to(circle.get_center())\n orig_labels.add(label)\n inv_labels = VGroup()\n for n, (circle, text) in enumerate(zip(self.normal_form, self.inv_label_texts)):\n label = TexMobject(text).scale(1.2)\n label.set_color(circle.get_color())\n label.move_to(circle.get_center())\n inv_labels.add(label)\n c1, c2, c3, c4 = self.inner_circles\n l1, l2, l3, l4 = orig_labels\n c1i, c2i, c3i, c4i = self.normal_form\n l1i, l2i, l3i, l4i = inv_labels\n l4.next_to(c4.get_bottom(), UP, buff = 0.3)\n l3i.next_to(c3i, DOWN).to_edge(RIGHT)\n l4i.next_to(c4i, UP).to_edge(RIGHT)\n self.add(orig_labels, inv_labels)\n self.orig_labels = orig_labels\n self.inv_labels = inv_labels\n\n def add_inversion_center(self):\n c1, c2, c3, c4 = self.inner_circles\n inv_center = get_tangent_point(c3, c4)\n dot_O = Dot(inv_center, color = YELLOW)\n label_O = TexMobject(\"O\", color = YELLOW).next_to(dot_O, UP)\n remark_O = TextMobject(\"反演中心\", color = YELLOW)\n remark_O.next_to(dot_O, RIGHT, buff = 1.5)\n arrow_O = Arrow(remark_O.get_left(), dot_O.get_right(), color = YELLOW, buff = 0.2)\n orig_center_group = VGroup(dot_O, label_O, remark_O, arrow_O)\n inv_dot_O = VectorizedPoint()\n inv_dot_O.next_to(self.normal_form[-1], UP, buff = 1.4)\n inv_dot_O.shift(2*RIGHT)\n inv_center_group = orig_center_group.deepcopy()\n inv_center_group.shift(inv_dot_O.get_center() - dot_O.get_center())\n self.add(orig_center_group, inv_center_group)\n self.orig_center_group = orig_center_group\n self.inv_center_group = inv_center_group\n\n def add_mapsto_symbol(self):\n mapsto = TexMobject(\"\\\\mapsto\")\n mapsto.rotate(-PI/2)\n mapsto.scale(2.5)\n mapsto.next_to(self.inner_circles, DOWN)\n remark_mapsto = TextMobject(\"反演变换\")\n remark_mapsto.next_to(mapsto, LEFT)\n self.add(mapsto, remark_mapsto)\n\n def add_not_to_scale_remark(self):\n remark_scale = TextMobject(\"(\" + self.remark_scale_text + \")\")\n remark_scale.scale(0.75)\n remark_scale.next_to(6.5*DL, RIGHT, buff = 0)\n self.add(remark_scale)\n\n\nclass DFCInversionProofP2(DFCInversionProofP1):\n CONFIG = {\n \"remark_scale_text\" : \"示意图,反演圆未标出,且图像并非真实比例\",\n \"inv_label_texts\" : [\"C_1'\", \"C_2'\", \"C_3':y=-1\", \"C_4':y=1\"],\n \"inv_center_coord_text\" : \"(x_0, y_0) \\\\, (y_0>1)\",\n \"circle_center_coord_texts\" : [\"(-1,0)\", \"(1,0)\"],\n }\n def construct(self):\n super().construct()\n self.change_center_remarks()\n self.add_coord_system()\n self.change_inv_labels()\n self.wait()\n\n def change_center_remarks(self):\n for center_group in (self.orig_center_group, self.inv_center_group):\n dot, label, remark, arrow = center_group\n self.remove(remark, arrow)\n if center_group is self.inv_center_group:\n coord = TexMobject(self.inv_center_coord_text)\n coord.next_to(dot, RIGHT)\n coord.set_color(dot.get_color())\n self.add(coord)\n\n def add_coord_system(self):\n c1, c2, c3, c4 = self.normal_form\n center_point = (c1.get_center() + c2.get_center()) / 2\n unit_size = c1.get_height()/2\n coord_system = Axes(\n center_point = center_point,\n number_line_config = {\"unit_size\" : unit_size},\n y_min = -1.8, y_max = 2.8,\n )\n self.add(coord_system)\n self.coord_system = coord_system\n\n def change_inv_labels(self):\n l1i, l2i, l3i, l4i = self.inv_labels\n for label, x_coord, coord_text in zip([l1i, l2i], [-1, 1], self.circle_center_coord_texts):\n center = self.coord_system.c2p(x_coord, 0)\n label.next_to(center, UP)\n dot_i = Dot(center, radius = 0.1).set_color(label.get_color())\n coord_i = TexMobject(coord_text).set_color(label.get_color()).next_to(center, DOWN)\n self.add(dot_i, coord_i)\n\n\n#####\n## Inversion Advanced P2 Scenes\nclass ApollonianGasketConstruction(ApollonianGasketScene):\n CONFIG = {\n \"max_iter\" : 8,\n \"curvatures\" : [2, 2, 3],\n \"init_angle\" : 0,\n \"curv_thres\" : 30000,\n \"ag_config\": {\n \"agc_config\" : {\n \"radius_thres\" : 1e-3,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n },\n \"color_curr\" : YELLOW,\n \"wait_time\" : 2,\n }\n def construct(self):\n r1, r2, r3 = [1./curv for curv in self.curvatures]\n p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle)\n agc1 = AGCircle(p1, r1, parents = None, **self.ag_config[\"agc_config\"])\n agc2 = AGCircle(p2, r2, parents = None, **self.ag_config[\"agc_config\"])\n agc3 = AGCircle(p3, r3, parents = None, **self.ag_config[\"agc_config\"])\n remark = TextMobject(\"(圆内数字为该圆的曲率)\")\n remark.scale(0.75).to_corner(DL)\n self.add(remark)\n for k in range(self.max_iter):\n agcs_copy = [agc.deepcopy() for agc in (agc1, agc2, agc3)]\n ag = ApollonianGasket(\n *agcs_copy, num_iter = k,\n curv_thres = self.curv_thres, **self.ag_config\n )\n iter_num = VGroup(\n TextMobject(\"迭代次数:\"), TexMobject(f\"{k}\")\n ).arrange_submobjects(RIGHT).scale(1.5)\n iter_num.to_edge(LEFT, buff = 1)\n ag.scale(3.8)\n ag.shift(np.array([0, 3.8, 0]) - ag.get_top() + 3*RIGHT)\n VGroup(*ag.agc_list[-1]).set_color(self.color_curr)\n self.add(ag, iter_num)\n self.wait(self.wait_time)\n if k != self.max_iter-1:\n self.remove(ag, iter_num)\n \n\nclass ApollonianGasketExample1(Scene):\n CONFIG = {\n \"max_iter\" : 20,\n \"curvatures\" : [3, 6, 7],\n \"curvature_texts\" : [-2, 3, 6, 7],\n \"init_angle\" : 0,\n \"curv_thres\" : 4000,\n \"ag_config\": {\n \"agc_config\" : {\n \"radius_thres\" : 1e-3,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n },\n \"ag_scaling_factor\" : 5.2,\n }\n def construct(self):\n r1, r2, r3 = [1./curv for curv in self.curvatures]\n p1, p2, p3 = calc_centers_by_radii(r1, r2, r3, init_angle = self.init_angle)\n agc1 = AGCircle(p1, r1, parents = None, **self.ag_config[\"agc_config\"])\n agc2 = AGCircle(p2, r2, parents = None, **self.ag_config[\"agc_config\"])\n agc3 = AGCircle(p3, r3, parents = None, **self.ag_config[\"agc_config\"])\n ag_seed = ApollonianGasket(\n *[agc.deepcopy() for agc in (agc1, agc2, agc3)],\n num_iter = 0, curv_thres = self.curv_thres, **self.ag_config\n )\n ag_result = ApollonianGasket(\n *[agc.deepcopy() for agc in (agc1, agc2, agc3)],\n num_iter = self.max_iter, curv_thres = self.curv_thres, **self.ag_config\n )\n ag_seed_center = ag_seed[0][0].get_right()\n ag_result_center = ag_result[0][0].get_right()\n arrow = Arrow(LEFT, RIGHT)\n figure_group = VGroup(ag_seed, ag_result, arrow)\n for ag, center, direction in zip(\n [ag_seed, ag_result], [ag_seed_center, ag_result_center], [4*LEFT, 4*RIGHT]):\n ag.scale(self.ag_scaling_factor)\n ag.shift(direction - center)\n figure_group.shift(DOWN)\n k1, k2, k3, k4 = list(map(str, self.curvature_texts))\n title = TexMobject(\n f\"({k1}+{k2}+{k3}+{k4})^2 = 2\\\\left[({k1})^2+{k2}^2+{k3}^2+{k4}^2 \\\\right]\"\n )\n title.set_width(13)\n title.set_color(YELLOW)\n title.to_edge(UP)\n self.add(figure_group, title)\n self.wait()\n\n\nclass ApollonianGasketExample2(ApollonianGasketExample1):\n CONFIG = {\n \"max_iter\" : 20,\n \"curvatures\" : [5, 8, 12],\n \"curvature_texts\" : [-3, 5, 8, 12],\n \"curv_thres\" : 5000,\n \"ag_config\": {\n \"agc_config\" : {\n \"radius_thres\" : 5e-4,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n },\n \"ag_scaling_factor\" : 8,\n }\n\n\nclass LoxodromicSpiralInTangentCircles(Scene):\n CONFIG = {\n \"max_iter\" : 20,\n \"agc_config\" : {\n \"radius_thres\" : 1,\n \"circle_color\" : BLUE,\n \"label_color\" : WHITE,\n },\n \"curve_config\" : {\n \"color\" : YELLOW,\n \"stroke_width\" : 2,\n },\n \"alpha\" : 0.6,\n \"dashed_line_config\" : {\n \"color\" : GREY,\n \"stroke_width\" : 0.5,\n \"num_dashes\" : 200,\n \"positive_space_ratio\" : 0.6,\n }\n }\n def construct(self):\n self.generate_circles()\n self.generate_curves()\n self.generate_labels()\n self.generate_lines()\n self.add_elements()\n self.zooming_in()\n\n def generate_circles(self):\n agcm2 = AGCircle(2/3.*UP, 1/3., **self.agc_config)\n agcm1 = AGCircle(RIGHT/2, 1/2., **self.agc_config)\n agczr = AGCircle(ORIGIN, -1, **self.agc_config)\n agcp1 = AGCircle(LEFT/2, 1/2., **self.agc_config)\n agcp2 = AGCircle(2/3.*DOWN, 1/3., **self.agc_config)\n agc_list = [agcm2, agcm1, agczr, agcp1, agcp2]\n for n in range(self.max_iter):\n A, B, C, known_agc = agc_list[:4]\n agc_m_k, agc_m_c = calc_new_agc_info(A, B, C, known_agc = known_agc)\n agc_m = AGCircle(agc_m_c, 1./agc_m_k, parents = (A, B, C), **self.agc_config)\n known_agc, C, B, A = agc_list[-4:]\n agc_p_k, agc_p_c = calc_new_agc_info(C, B, A, known_agc = known_agc)\n agc_p = AGCircle(agc_p_c, 1./agc_p_k, parents = (C, B, A), **self.agc_config)\n agc_list.insert(0, agc_m)\n agc_list.append(agc_p)\n agc_group = VGroup(*agc_list)\n agc_group.set_height(7.8)\n self.agc_list = agc_list\n self.agc_group = agc_group\n\n def generate_curves(self):\n agc_ps = self.agc_list[-self.max_iter-4:]\n agc_ps_points = []\n loxo_curve_p_solid = VMobject(**self.curve_config)\n for k in range(len(agc_ps)-2):\n if k != 0:\n c1, c2, c3 = agc_ps[k], agc_ps[k+1], agc_ps[k+2]\n pt1 = get_tangent_point(c1, c2)\n pt2 = get_tangent_point(c2, c3)\n p = c2.get_center()\n if k != 1:\n agc_ps_points.extend(\n [pt1, p*(1-self.alpha)+pt1*self.alpha, p*(1-self.alpha)+pt2*self.alpha, pt2]\n )\n else:\n agc_ps_points.extend(\n [pt1, p*0.7+pt1*0.3, p*0.6+pt2*0.4, pt2]\n )\n else:\n c1, c2 = agc_ps[1], agc_ps[2]\n pt = get_tangent_point(c1, c2)\n agc_ps_points.extend([8*LEFT, 7*LEFT, 6*LEFT, pt])\n loxo_curve_p_solid.append_points(agc_ps_points)\n loxo_curve_m_solid = loxo_curve_p_solid.deepcopy()\n loxo_curve_m_solid.rotate(PI, about_point = self.agc_group.get_center())\n self.loxo_curve_p_solid = loxo_curve_p_solid\n self.loxo_curve_m_solid = loxo_curve_m_solid\n \n def generate_labels(self):\n labels = VGroup(*[\n TexMobject(\"C_{%d}\" % num, background_stroke_width = 0)\n for num in range(-self.max_iter-2, self.max_iter+3)\n ])\n for label, circle in zip(labels, self.agc_group):\n label.set_height(circle.get_height()*0.15)\n label.move_to(circle.get_center())\n label_c0 = labels[self.max_iter+2]\n label_c0.set_height(0.8)\n label_c0.next_to(self.agc_group.get_critical_point(UL), DR, buff = 0.1)\n self.labels = labels\n\n def generate_lines(self):\n agc_ps = self.agc_list[-self.max_iter-2:]\n line_p_solid = VMobject(**self.dashed_line_config)\n line_p_solid_corners = [8*LEFT]\n for circle in agc_ps:\n line_p_solid_corners.append(circle.get_center())\n line_p_solid.set_points_as_corners(line_p_solid_corners)\n line_m_solid = line_p_solid.deepcopy()\n line_m_solid.rotate(PI, about_point = self.agc_group.get_center())\n self.line_p_solid = line_p_solid\n self.line_m_solid = line_m_solid\n\n def add_elements(self):\n figure = VGroup(\n self.agc_group, self.loxo_curve_p_solid, self.loxo_curve_m_solid,\n self.line_p_solid, self.line_m_solid, self.labels,\n )\n self.add(figure)\n self.figure = figure\n\n def zooming_in(self):\n self.figure.save_state()\n self.wait(0.5)\n self.play(\n ApplyMethod(self.figure.shift, -self.agc_group[-1].get_center()),\n run_time = 2,\n )\n self.wait()\n for k in range(10):\n self.play(\n ApplyMethod(self.figure.scale, 2.5, {\"about_point\" : self.agc_group[-1].get_center()}),\n run_time = 2,\n )\n self.wait()\n self.play(self.figure.restore, run_time = 15)\n self.wait(2)\n\n\nclass ShowFordCircles(ZoomInOnFordCircles):\n CONFIG = {\n \"q_max\" : 30,\n }\n def construct(self):\n self.setup_axes()\n self.setup_circles_and_labels()\n self.add_remarks()\n self.first_zoom_in()\n self.wait()\n\n def first_zoom_in(self):\n self.zoom_in_on(1/2., 6)\n\n def add_remarks(self):\n nl_text = TextMobject(\"数轴\")\n nl_arrow = Arrow(ORIGIN, UP).match_height(nl_text)\n nl_remark = VGroup(nl_arrow, nl_text)\n nl_remark.scale(0.8)\n nl_remark.set_color(LIGHT_GREY)\n nl_remark.arrange_submobjects(RIGHT, buff = 0.1)\n nl_remark.next_to(self.axes.coords_to_point(0, 0), DOWN, buff = 0.1)\n nl_remark.to_edge(LEFT, buff = 0.15)\n frac_remark = TextMobject(\"圆内分数为圆心横坐标\")\n frac_remark.scale(0.6)\n frac_remark.to_corner(DL, buff = 0.15)\n self.add(nl_remark, frac_remark)\n\n\nclass ShowFordCirclesDetails(ShowFordCircles):\n CONFIG = {\n \"q_max\" : 100,\n }\n def construct(self):\n super().construct()\n self.further_zoom_in()\n\n def setup_circles_and_labels(self):\n circles = VGroup()\n labels = VGroup()\n for q in range(1, self.q_max+1):\n for p in get_coprime_numers_by_denom(q):\n if (q <= 40) or (0.6 <= p/q <= 0.8):\n circle = self.generate_circle_by_fraction(p, q)\n circle.add_updater(\n lambda m: m.set_stroke(width = get_stroke_width_by_height(m.get_height()))\n )\n label = AssembledFraction(p, q)\n label.set_height(circle.get_height() * self.label_height_factor)\n label.move_to(circle.get_center())\n circles.add(circle)\n labels.add(label)\n self.add(circles, labels)\n self.circles = circles\n self.labels = labels\n\n def further_zoom_in(self):\n self.acl = VGroup(self.axes, self.circles, self.labels)\n self.acl.save_state()\n self.wait(0.5)\n self.play_zooming_animation(1/np.sqrt(2), 9, run_time = 5)\n self.wait()\n self.play_zooming_animation(0.73, 5, run_time = 4)\n self.wait()\n self.play_zooming_animation(0.74, 5, run_time = 4)\n self.wait()\n self.play(self.acl.restore, run_time = 5)\n self.wait(2)\n\n\nclass ProveFordCirclesPropertiesP1(Scene):\n CONFIG = {\n \"c1_frac\" : [2, 3],\n \"c2_frac\" : [3, 4],\n \"c3_frac\" : [5, 7],\n \"circle_config\" : {\"stroke_color\" : BLUE, \"stroke_width\" : 2,},\n \"line_config\" : {\"stroke_color\" : GREY, \"stroke_width\" : 2,},\n \"aux_line_config\" : {\"stroke_color\" : GREY, \"stroke_width\" : 0.8,},\n \"polygon_config\" : {\"fill_color\" : GREY, \"fill_opacity\" : 0.4, \"stroke_width\" : 0,},\n }\n def setup(self):\n a, b = self.c1_frac\n c, d = self.c2_frac\n p, q = self.c3_frac\n r1 = 1/(2*b**2)\n r2 = 1/(2*d**2)\n r3 = 1/(2*q**2)\n c1_center = a/b*RIGHT + r1*UP\n c2_center = c/d*RIGHT + r2*UP\n c3_center = p/q*RIGHT + r3*UP\n c1 = Circle(arc_center = c1_center, radius = r1, **self.circle_config)\n c2 = Circle(arc_center = c2_center, radius = r2, **self.circle_config)\n c3 = Circle(arc_center = c3_center, radius = r3, **self.circle_config)\n c1_dot = SmallDot(color = GREY)\n c1_dot.add_updater(lambda m: m.move_to(c1.get_center()))\n c2_dot = SmallDot(color = GREY)\n c2_dot.add_updater(lambda m: m.move_to(c2.get_center()))\n c3_dot = SmallDot(color = GREY)\n c3_dot.add_updater(lambda m: m.move_to(c3.get_center()))\n line = Line(\n 2*c1.get_bottom()-c2.get_bottom(),\n 2*c2.get_bottom()-c1.get_bottom(),\n **self.line_config\n )\n VGroup(c1, c2, c3, line).set_height(6).center().to_edge(UP)\n aux_line_1 = Line(c1.get_center(), c1.get_bottom(), **self.aux_line_config)\n aux_line_2 = Line(c2.get_center(), c2.get_bottom(), **self.aux_line_config)\n aux_line_3 = Line(c1.get_center(), c2.get_center(), **self.aux_line_config)\n aux_line_4 = Line(c1.get_bottom(), c2.get_bottom(), **self.aux_line_config) \\\n .shift(c2.get_height()/2*UP)\n polygon = Polygon(\n c1.get_center(), c2.get_center(), aux_line_4.get_start_and_end()[0],\n **self.polygon_config,\n )\n l1 = TexMobject(\"\\\\dfrac{a}{b}\").next_to(c1, DOWN)\n l2 = TexMobject(\"\\\\dfrac{c}{d}\").next_to(c2, DOWN)\n l3 = TexMobject(\"\\\\dfrac{a+c}{b+d}\").next_to(c3, DOWN)\n self.orig_group = VGroup(c1, c2, line, c1_dot, c2_dot, l1, l2)\n self.aux_group = VGroup(aux_line_1, aux_line_2, aux_line_3, aux_line_4, polygon)\n self.new_group = VGroup(c3, c3_dot, l3)\n \n def construct(self):\n self.add(self.orig_group, self.aux_group)\n self.wait()\n\n\nclass ProveFordCirclesPropertiesP2(ProveFordCirclesPropertiesP1):\n def construct(self):\n self.add(self.orig_group, self.new_group)\n self.wait()\n\n\nclass ShowFordCirclesFareySum(ZoomInOnFordCircles):\n pass\n # A rename, that's it.\n\n\nclass DFCInversionProofP3(DFCInversionProofP2):\n CONFIG = {\n \"remark_scale_text\" : \"示意图,反演圆未标出,且图像并非真实比例\",\n \"inv_label_texts\" : [\"C_1'\", \"C_2'\", \"C_3':\\\\mathrm{Im}(z)=-1\", \"C_4':\\\\mathrm{Im}(z)=1\"],\n \"inv_center_coord_text\" : \"z_0 = x_0+iy_0\\\\, (y_0>1)\",\n \"circle_center_coord_texts\" : [\"-1\", \"1\"],\n }\n def construct(self):\n super().construct()\n self.wait()\n\n def add_coord_system(self):\n c1, c2, c3, c4 = self.normal_form\n center_point = (c1.get_center() + c2.get_center()) / 2\n unit_size = c1.get_height()/2\n coord_system = NumberPlane(\n center_point = center_point,\n number_line_config = {\"unit_size\" : unit_size},\n y_min = -3, y_max = 3,\n background_line_style = {\n \"stroke_color\" : GREY,\n \"stroke_width\" : 1.5,\n \"stroke_opacity\" : 0.8,\n },\n )\n aux_coord_system = Axes(\n center_point = center_point,\n number_line_config = {\"unit_size\" : unit_size},\n y_min = -3, y_max = 3,\n stroke_opacity = 0.8,\n )\n self.add(coord_system, aux_coord_system)\n self.coord_system = coord_system\n\n\nclass NormalFormIn3D(ThreeDScene):\n CONFIG = {\n \"axis_unit_size\" : 1.5,\n \"axis_min\" : -1.5,\n \"axis_max\" : 2.8,\n \"resolution\" : (60, 120),\n \"plane_colors\" : [GREEN, BLUE],\n \"sphere_colors\" : [MAROON_B, RED, PINK],\n }\n def construct(self):\n self.add_3d_stuff()\n self.add_2d_stuff()\n\n def add_3d_stuff(self):\n self.set_camera_orientation(theta = 70 * DEGREES, phi = 50 * DEGREES)\n axes = ThreeDAxes(\n x_min = self.axis_min, x_max = self.axis_max,\n y_min = self.axis_min, y_max = self.axis_max,\n z_min = self.axis_min, z_max = self.axis_max,\n number_line_config = {\"unit_size\" : self.axis_unit_size},\n )\n sphere_centers = [\n axis.number_to_point(1)\n for axis in [axes.x_axis, axes.y_axis, axes.z_axis]\n ]\n radius = 1/np.sqrt(2) * self.axis_unit_size\n sphere_dots = VGroup(*[\n Sphere(\n radius = 0.08, resolution = self.resolution,\n fill_opacity = 1, stroke_width = 0,\n ).move_to(sphere_center).set_color(color)\n for sphere_center, color in zip(sphere_centers, self.sphere_colors)\n ])\n spheres = VGroup(*[\n Sphere(\n radius = radius, resolution = self.resolution,\n fill_opacity = 0.6, stroke_width = 0.5,\n ).move_to(sphere_center).set_color(color)\n for sphere_center, color in zip(sphere_centers, self.sphere_colors)\n ])\n planes = VGroup(*[\n VGroup(*[\n Square(\n side_length = 1, fill_opacity = fill_opacity,\n stroke_color = GREY, stroke_width = 0.3, stroke_opacity = 0.2,\n )\n for k in range(n**2)\n ]).arrange_in_grid(n, n, buff = 0) \\\n .apply_matrix(z_to_vector([1, 1, 1])) \\\n .move_to(np.average(sphere_centers)) \\\n .shift(radius * normalize(direction)) \\\n .set_color(color)\n for n, fill_opacity, direction, color in zip(\n [7, 8], [0.2, 0.3], [np.ones(3), -np.ones(3)], self.plane_colors,\n )\n ])\n figure_group = VGroup(axes, planes, sphere_dots, spheres)\n figure_group.shift(RIGHT*2+0.5*OUT)\n self.add(figure_group)\n self.add(axes)\n self.add(planes)\n self.add(sphere_dots, spheres)\n\n def add_2d_stuff(self):\n sphere_remarks = VGroup(*[\n TextMobject(\n \"球:圆心为\" + f\"$({int(x)},{int(y)},{int(z)})$\" + \\\n \",半径为\" + \"$\\\\dfrac{1}{\\\\sqrt{2}}$\"\n ).set_color(color)\n for (x, y, z), color in zip([RIGHT, UP, OUT], self.sphere_colors)\n ]).arrange_submobjects(DOWN)\n plane_remarks = VGroup(*[\n TexMobject(\n \"\\\\text{平面:}\" + \"x+y+z=1\" + sign + \"\\\\dfrac{\\\\sqrt{3}}{\\\\sqrt{2}\"\n ).set_color(color)\n for sign, color in zip([\"+\", \"-\"], self.plane_colors)\n ]).arrange_submobjects(DOWN)\n remarks = VGroup(sphere_remarks, plane_remarks)\n remarks.arrange_submobjects(DOWN, aligned_edge = LEFT)\n remarks.scale(0.8)\n remarks.to_corner(DR)\n self.add_fixed_in_frame_mobjects(remarks)\n self.wait()\n\n\n#####\n## Banner\nclass Banner_Intro(Scene):\n CONFIG = {\n \"circle_color\" : YELLOW,\n \"text_color\" : BLUE,\n \"inv_text_color\" : BLUE,\n \"circle_center\" : 0.8*UP,\n \"circle_radius\" : 3,\n \"grid_side_length\" : 0.5,\n \"x_range\" : 300,\n \"y_range\" : 300,\n \"dist_thres\" : 300,\n }\n def construct(self):\n circle = Circle(color = self.circle_color, radius = self.circle_radius, stroke_width = 5)\n circle.move_to(self.circle_center)\n dot = SmallDot(self.circle_center, color = self.circle_color)\n text = TextMobject(\"Inversion\", color = self.text_color, background_stroke_width = 3)\n text.rotate(PI/2.)\n text.move_to(0.4*RIGHT)\n text.apply_complex_function(np.exp)\n text.rotate(-PI/2.)\n text.scale(1.5)\n text.move_to(0.9*DOWN)\n inv_text = InversedVMobject(text, circle, use_dashed_vmob = False)\n inv_text.suspend_updating()\n inv_text.set_background_stroke(color = \"#303030\", width = 3)\n inv_text.set_stroke(width = 0)\n inv_text.set_fill(color = self.inv_text_color, opacity = 0.5)\n grid = VGroup(*[\n Square(\n side_length = self.grid_side_length,\n stroke_width = 0, fill_opacity = 0.3,\n fill_color = CB_DARK if (i+j)%2==0 else CB_LIGHT\n ).move_to(self.circle_center + (i*RIGHT+j*UP)*self.grid_side_length)\n for i in range(-self.x_range, self.x_range+1, 1)\n for j in range(-self.y_range, self.y_range+1, 1)\n if np.sqrt(i**2+j**2) * self.grid_side_length < self.dist_thres\n ])\n for square in grid:\n if is_close_in_R3(square.get_center(), self.circle_center):\n grid.remove(square)\n inv_grid = InversedVMobject(grid, circle, use_dashed_vmob = False)\n self.add(inv_grid, circle, dot, text, inv_text)\n self.wait()\n\n\nclass Banner_AdvancedP1(ApollonianGasketScene):\n CONFIG = {\n \"curvatures\" : [570, 968, 1112],\n \"init_angle\" : PI/7,\n \"num_iter\" : 20,\n \"curv_thres\" : 1e6,\n \"ag_config\" : {\n \"agc_config\" : {\n \"radius_thres\" : 5e-6,\n \"circle_color\" : YELLOW,\n \"label_color\" : WHITE,\n },\n },\n \"part_text\" : \"上篇\",\n }\n def construct(self):\n super().construct()\n ag = self.ag\n ag.set_height(7)\n circle_myst = ag.agc_list[0][0]\n label_myst = circle_myst.label\n label_question = TexMobject(\"???\")\n label_question.match_height(label_myst)\n label_question.move_to(label_myst)\n self.remove(label_myst)\n self.add(label_question)\n part = TextMobject(self.part_text)\n part.to_corner(DR)\n self.add(part)\n\n\nclass Banner_AdvancedP2(Banner_AdvancedP1):\n CONFIG = {\n \"part_text\" : \"下篇\",\n }\n\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.ones",
"numpy.array",
"numpy.average",
"numpy.conjugate",
"numpy.abs",
"numpy.random.random",
"numpy.round",
"numpy.dot",
"numpy.linalg.norm"
]
] |
hannesb0/MSWH | [
"ce214f26369106c124052638e93cc38fbd58cc91"
] | [
"mswh/comm/tests/test_sql.py"
] | [
"import logging\r\nimport os\r\nimport unittest\r\n\r\nfrom mswh.comm.sql import Sql\r\n\r\nimport pandas as pd\r\n\r\nlogging.basicConfig(level=logging.DEBUG)\r\n\r\n\r\n# has setUpClass method, thus run the test on the entire class\r\nclass SqlTests(unittest.TestCase):\r\n \"\"\"Tests the db-python read-write capabilities.\"\"\"\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n \"\"\"Initiates the sqlite db engine\r\n for the test db file.\r\n \"\"\"\r\n test_db_name = \"test.db\"\r\n test_db_fulpath = os.path.join(os.path.dirname(__file__), test_db_name)\r\n cls.test_db_fulpath = test_db_fulpath\r\n\r\n print(test_db_fulpath)\r\n # create test db if it does not exist\r\n\r\n if not os.path.exists(test_db_fulpath):\r\n os.system(\"touch \" + test_db_fulpath)\r\n\r\n cls.sql_api = Sql(test_db_fulpath)\r\n\r\n # example dict to write to db\r\n cls.df = pd.DataFrame(\r\n data=[[\"a\", 1], [\"b\", 2]], columns=[\"comp\", \"cost\"]\r\n )\r\n\r\n # example dict to write to db as table\r\n cls.dict = {\"k1\": [12, 13, 14], \"k2\": [\"a\", \"b\", \"c\"]}\r\n\r\n # example csv data\r\n cls.path_to_csv = os.path.join(os.path.dirname(__file__), \"table.csv\")\r\n\r\n # sql code to execute\r\n cls.raw_sql = \"\"\"CREATE TABLE sys_components\r\n(\r\n Component TEXT NOT NULL ,\r\n Function TEXT NOT NULL ,\r\n\r\nPRIMARY KEY (Component)\r\n);\"\"\"\r\n\r\n @classmethod\r\n def tearDownClass(cls):\r\n \"\"\"Clean up for any reinitiation of the test,\r\n but keep the result. Any new run will overwrite\r\n the result.\r\n \"\"\"\r\n store_db_name = \"test_done.db\"\r\n # close the test db\r\n cls.sql_api.db.close()\r\n store_db_fulpath = os.path.join(\r\n os.path.dirname(__file__), store_db_name\r\n )\r\n # rename file, overwrite if exists\r\n if os.path.exists(store_db_fulpath):\r\n os.remove(store_db_fulpath)\r\n\r\n os.rename(cls.test_db_fulpath, store_db_fulpath)\r\n\r\n def test_a_pd2table(self):\r\n \"\"\"Tests write pandas dataframe to\r\n db as a table.\r\n \"\"\"\r\n self.sql_api.pd2table(self.df, \"pd2table\")\r\n\r\n def test_b_csv2table(self):\r\n \"\"\"Tests write csv file to\r\n db as a table.\r\n \"\"\"\r\n self.sql_api.csv2table(self.path_to_csv, \"csv2table\")\r\n\r\n def test_c_table2pd(self):\r\n \"\"\"Reads a single table from db as a pd.df\"\"\"\r\n df = self.sql_api.table2pd(\"pd2table\")\r\n self.assertTrue((df == self.df).all().all())\r\n\r\n def test_d_commit(self):\r\n \"\"\"Use sql to write to db (e.g. create, alter)\"\"\"\r\n self.assertTrue(self.sql_api.commit(self.raw_sql))\r\n\r\n def test_e_tables2dict(self):\r\n \"\"\"Read all tables from db into a dictionary\r\n of dataframes.\r\n \"\"\"\r\n data = self.sql_api.tables2dict()\r\n self.assertEqual(data[\"pd2table\"].iloc[1, 1], 2)\r\n"
] | [
[
"pandas.DataFrame"
]
] |
chrhck/pyABC | [
"731cfdec26bef3898bf6e244daa5c8f83f3fe19d",
"731cfdec26bef3898bf6e244daa5c8f83f3fe19d"
] | [
"test/visualization/test_visualization.py",
"pyabc/epsilon/temperature.py"
] | [
"import pyabc\nimport tempfile\nimport pytest\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# create and run some model\n\n\ndef model(p):\n return {'ss0': p['p0'] + 0.1 * np.random.uniform(),\n 'ss1': p['p1'] + 0.1 * np.random.uniform()}\n\n\np_true = {'p0': 3, 'p1': 4}\nobservation = {'ss0': p_true['p0'], 'ss1': p_true['p1']}\nlimits = {'p0': (0, 5), 'p1': (1, 8)}\nprior = pyabc.Distribution(**{\n key: pyabc.RV('uniform', limits[key][0], limits[key][1] - limits[key][0])\n for key in p_true.keys()})\n\ndb_path = \"sqlite:///\" \\\n + os.path.join(tempfile.gettempdir(), \"test_visualize.db\")\n\n\ndistance = pyabc.PNormDistance(p=2)\nn_history = 2\nsampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)\n\nfor _ in range(n_history):\n abc = pyabc.ABCSMC(model, prior, distance, 20, sampler=sampler)\n abc.new(db_path, observation)\n abc.run(minimum_epsilon=.1, max_nr_populations=3)\n\n\nhistories = []\nlabels = []\nfor j in range(n_history):\n history = pyabc.History(db_path)\n history.id = j + 1\n histories.append(history)\n labels.append(\"Some run \" + str(j))\n\n\ndef test_epsilons():\n pyabc.visualization.plot_epsilons(histories, labels)\n plt.close()\n\n\ndef test_sample_numbers():\n pyabc.visualization.plot_sample_numbers(\n histories, rotation=43, size=(5, 5))\n _, ax = plt.subplots()\n pyabc.visualization.plot_sample_numbers(histories, labels, ax=ax)\n with pytest.raises(ValueError):\n pyabc.visualization.plot_sample_numbers(histories, [labels[0]])\n plt.close()\n\n\ndef test_sample_numbers_trajectory():\n pyabc.visualization.plot_sample_numbers_trajectory(\n histories, labels, yscale='log', rotation=90)\n _, ax = plt.subplots()\n pyabc.visualization.plot_sample_numbers_trajectory(\n histories, labels, yscale='log10', size=(8, 8), ax=ax)\n plt.close()\n\n\ndef test_acceptance_rates_trajectory():\n pyabc.visualization.plot_acceptance_rates_trajectory(\n histories, labels, yscale='log', rotation=76)\n _, ax = plt.subplots()\n pyabc.visualization.plot_acceptance_rates_trajectory(\n histories, labels, yscale='log10', rotation=76, size=(10, 5), ax=ax)\n plt.close()\n\n\ndef test_total_sample_numbers():\n pyabc.visualization.plot_total_sample_numbers(histories)\n pyabc.visualization.plot_total_sample_numbers(\n histories, labels, yscale='log', size=(10, 5))\n _, ax = plt.subplots()\n pyabc.visualization.plot_total_sample_numbers(\n histories, rotation=75, yscale='log10', ax=ax)\n plt.close()\n\n\ndef test_effective_sample_sizes():\n pyabc.visualization.plot_effective_sample_sizes(\n histories, labels, rotation=45, relative=True)\n plt.close()\n\n\ndef test_histograms():\n # 1d\n pyabc.visualization.plot_histogram_1d(\n histories[0], 'p0', bins=20,\n xmin=limits['p0'][0], xmax=limits['p0'][1], size=(5, 5), refval=p_true)\n # 2d\n pyabc.visualization.plot_histogram_2d(histories[0], 'p0', 'p1')\n pyabc.visualization.plot_histogram_2d(\n histories[0], 'p0', 'p1', xmin=limits['p0'][0], xmax=limits['p0'][1],\n ymin=limits['p1'][0], ymax=limits['p1'][1], size=(5, 6), refval=p_true)\n # matrix\n pyabc.visualization.plot_histogram_matrix(\n histories[0], bins=1000, size=(6, 7), refval=p_true)\n plt.close()\n\n\ndef test_kdes():\n history = histories[0]\n df, w = history.get_distribution(m=0, t=None)\n pyabc.visualization.plot_kde_1d(\n df, w, x='p0',\n xmin=limits['p0'][0], xmax=limits['p0'][1],\n label=\"PDF\")\n pyabc.visualization.plot_kde_2d(df, w, x='p0', y='p1')\n pyabc.visualization.plot_kde_matrix(df, w)\n\n # also use the highlevel interfaces\n pyabc.visualization.plot_kde_1d_highlevel(history, x='p0', size=(4, 5),\n refval=p_true)\n pyabc.visualization.plot_kde_2d_highlevel(history, x='p0', y='p1',\n size=(7, 5),\n refval=p_true)\n pyabc.visualization.plot_kde_matrix_highlevel(history, height=27.43,\n refval=p_true)\n plt.close()\n\n\ndef test_credible_intervals():\n pyabc.visualization.plot_credible_intervals(histories[0])\n pyabc.visualization.plot_credible_intervals(\n histories[0], levels=[0.2, 0.5, 0.9],\n show_kde_max_1d=True, show_kde_max=True, show_mean=True,\n refval=p_true)\n pyabc.visualization.plot_credible_intervals_for_time(\n histories, levels=[0.5, 0.99],\n show_kde_max_1d=True, show_kde_max=True, show_mean=True,\n refvals=p_true)\n plt.close()\n\n\ndef test_model_probabilities():\n pyabc.visualization.plot_model_probabilities(histories[0])\n plt.close()\n\n\ndef test_data_callback():\n def plot_data(sum_stat, weight, ax, **kwargs):\n ax.plot(sum_stat['ss0'], alpha=weight, **kwargs)\n\n def plot_data_aggregated(sum_stats, weights, ax, **kwargs):\n data = np.array([sum_stat['ss0'] for sum_stat in sum_stats])\n weights = np.array(weights).reshape((-1, 1))\n mean = (data * weights).sum(axis=0)\n plot_data({'ss0': mean}, 1.0, ax)\n\n pyabc.visualization.plot_data_callback(\n histories[0], plot_data, plot_data_aggregated)\n\n\ndef test_data_default():\n obs_dict = {1: 0.7, 2: np.array([43, 423, 5.5]),\n 3: pd.DataFrame({'a': [1, 2], 'b': [4, 6]})}\n sim_dict = {1: 6.5, 2: np.array([32, 5, 6]),\n 3: pd.DataFrame({'a': [1.55, -0.1], 'b': [54, 6]})}\n pyabc.visualization.plot_data_default(obs_dict, sim_dict)\n for i in range(5):\n obs_dict[i] = i + 1\n sim_dict[i] = i + 2\n pyabc.visualization.plot_data_default(obs_dict, sim_dict)\n plt.close()\n",
"import numpy as np\nimport scipy as sp\nimport pandas as pd\nimport numbers\nfrom typing import Callable, List, Union\nimport logging\n\nfrom .base import Epsilon\nfrom ..distance import SCALE_LIN\nfrom ..sampler import Sampler\nfrom ..storage import save_dict_to_json\n\nlogger = logging.getLogger(\"Epsilon\")\n\n\nclass TemperatureBase(Epsilon):\n \"\"\"\n A temperature scheme handles the decrease of the temperatures employed\n by a :class:`pyabc.acceptor.StochasticAcceptor` over time.\n\n This class is not functional on its own, its derivatives must be used.\n \"\"\"\n\n\nclass ListTemperature(TemperatureBase):\n \"\"\"\n Pass a list of temperature values to use successively.\n\n Parameters\n ----------\n values:\n The array of temperatures to use successively.\n For exact inference, finish with 1.\n \"\"\"\n\n def __init__(self, values: List[float]):\n self.values = values\n\n def __call__(self,\n t: int) -> float:\n return self.values[t]\n\n\nclass Temperature(TemperatureBase):\n \"\"\"\n This class implements a highly adaptive and configurable temperature\n scheme. Via the argument `schemes`, arbitrary temperature schemes can be\n passed to calculate the next generation's temperature, via `aggregate_fun`\n one can define how to combine multiple guesses, via `initial_temperature`\n the initial temperature can be set.\n\n Parameters\n ----------\n schemes: Union[Callable, List[Callable]], optional\n Temperature schemes returning proposed\n temperatures for the next time point, e.g.\n instances of :class:`pyabc.epsilon.TemperatureScheme`.\n aggregate_fun: Callable[List[float], float], optional\n The function to aggregate the schemes by, of the form\n ``Callable[List[float], float]``.\n Defaults to taking the minimum.\n initial_temperature: float, optional\n The initial temperature. If None provided, an AcceptanceRateScheme\n is used.\n enforce_exact_final_temperature: bool, optional\n Whether to force the final temperature (if max_nr_populations < inf)\n to be 1.0, giving exact inference.\n log_file: str, optional\n A log file for storing data of the temperature that are currently not\n saved in the database. The data are saved in json format.\n\n Properties\n ----------\n max_nr_populations: int\n The maximum number of iterations as passed to ABCSMC.\n May be inf, but not all schemes can handle that (and will complain).\n temperatures: Dict[int, float]\n Times as keys and temperatures as values.\n \"\"\"\n\n def __init__(\n self,\n schemes: Union[Callable, List[Callable]] = None,\n aggregate_fun: Callable[[List[float]], float] = None,\n initial_temperature: float = None,\n enforce_exact_final_temperature: bool = True,\n log_file: str = None):\n self.schemes = schemes\n\n if aggregate_fun is None:\n # use minimum over all proposed temperature values\n aggregate_fun = min\n self.aggregate_fun = aggregate_fun\n\n if initial_temperature is None:\n initial_temperature = AcceptanceRateScheme()\n self.initial_temperature = initial_temperature\n\n self.enforce_exact_final_temperature = enforce_exact_final_temperature\n self.log_file = log_file\n\n # to be filled later\n self.max_nr_populations = None\n self.temperatures = {}\n self.temperature_proposals = {}\n\n def initialize(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n acceptor_config: dict):\n self.max_nr_populations = max_nr_populations\n\n # set default schemes\n if self.schemes is None:\n # this combination proved rather stable\n acc_rate_scheme = AcceptanceRateScheme()\n decay_scheme = (\n ExpDecayFixedIterScheme() if np.isfinite(max_nr_populations)\n else ExpDecayFixedRatioScheme())\n self.schemes = [acc_rate_scheme, decay_scheme]\n\n # set initial temperature for time t\n self._update(t, get_weighted_distances, get_all_records,\n 1.0, acceptor_config)\n\n def configure_sampler(self, sampler: Sampler):\n if callable(self.initial_temperature):\n self.initial_temperature.configure_sampler(sampler)\n for scheme in self.schemes:\n scheme.configure_sampler(sampler)\n\n def update(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n acceptance_rate: float,\n acceptor_config: dict):\n # set temperature for time t\n self._update(t, get_weighted_distances,\n get_all_records, acceptance_rate,\n acceptor_config)\n\n def _update(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n acceptance_rate: float,\n acceptor_config):\n \"\"\"\n Compute the temperature for time `t`.\n \"\"\"\n # scheme arguments\n kwargs = dict(\n t=t,\n get_weighted_distances=get_weighted_distances,\n get_all_records=get_all_records,\n max_nr_populations=self.max_nr_populations,\n pdf_norm=acceptor_config['pdf_norm'],\n kernel_scale=acceptor_config['kernel_scale'],\n prev_temperature=self.temperatures.get(t-1, None),\n acceptance_rate=acceptance_rate,\n )\n\n if t >= self.max_nr_populations - 1 \\\n and self.enforce_exact_final_temperature:\n # t is last time\n temps = [1.0]\n elif not self.temperatures: # need an initial value\n if callable(self.initial_temperature):\n # execute scheme\n temps = [self.initial_temperature(**kwargs)]\n elif isinstance(self.initial_temperature, numbers.Number):\n temps = [self.initial_temperature]\n else:\n raise ValueError(\n \"Initial temperature must be a float or a callable\")\n else:\n # evaluate schemes\n temps = []\n for scheme in self.schemes:\n temp = scheme(**kwargs)\n temps.append(temp)\n\n # compute next temperature based on proposals and fallback\n # should not be higher than before\n fallback = self.temperatures[t-1] \\\n if t-1 in self.temperatures else np.inf\n temperature = self.aggregate_fun(temps)\n # also a value lower than 1.0 does not make sense\n temperature = max(min(temperature, fallback), 1.0)\n\n if not np.isfinite(temperature):\n raise ValueError(\"Temperature must be finite.\")\n # record found value\n self.temperatures[t] = temperature\n\n # logging\n logger.debug(f\"Proposed temperatures for {t}: {temps}.\")\n self.temperature_proposals[t] = temps\n if self.log_file:\n save_dict_to_json(self.temperature_proposals, self.log_file)\n\n def __call__(self,\n t: int) -> float:\n return self.temperatures[t]\n\n\nclass TemperatureScheme:\n \"\"\"\n A TemperatureScheme suggests the next temperature value. It is used as\n one of potentially multiple schemes employed in the Temperature class.\n This class is abstract.\n\n Parameters\n ----------\n t:\n The time to compute for.\n get_weighted_distances:\n Callable to obtain the weights and kernel values to be used for\n the scheme.\n get_all_records:\n Callable returning a List[dict] of all recorded particles.\n max_nr_populations:\n The maximum number of populations that are supposed to be taken.\n pdf_norm:\n The normalization constant c that will be used in the acceptance step.\n kernel_scale:\n Scale on which the pdf values are (linear or logarithmic).\n prev_temperature:\n The temperature that was used last time (or None if not applicable).\n acceptance_rate:\n The recently obtained rate.\n \"\"\"\n\n def __init__(self):\n pass\n\n def configure_sampler(self, sampler: Sampler):\n \"\"\"\n Modify the sampler. As in, and redirected from,\n :func:`pyabc.epsilon.Temperature.configure_sampler`.\n \"\"\"\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n pass\n\n\nclass AcceptanceRateScheme(TemperatureScheme):\n \"\"\"\n Try to keep the acceptance rate constant at a value of\n `target_rate`. Note that this scheme will fail to\n reduce the temperature sufficiently in later iterations, if the\n problem's inherent acceptance rate is lower, but it has been\n observed to give big feasible temperature leaps in early iterations.\n In particular, this scheme can be used to propose an initial temperature.\n\n Parameters\n ----------\n target_rate: float, optional\n The target acceptance rate to match.\n min_rate: float, optional\n The minimum rate below which not to apply the acceptance step scheme\n any more. Setting this to a value of e.g. 0.05 can make sense\n 1) because it may be unlikely that the acceptance rate scheme will\n propose a useful temperature at such low acceptance levels, and\n 2) to avoid uneccessary computations.\n \"\"\"\n\n def __init__(self, target_rate: float = 0.3, min_rate: float = None):\n self.target_rate = target_rate\n self.min_rate = min_rate\n\n def configure_sampler(self, sampler: Sampler):\n sampler.sample_factory.record_rejected = True\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # check minimum rate\n if self.min_rate is not None and acceptance_rate < self.min_rate:\n return np.inf\n\n # execute function (expensive if in calibration)\n records = get_all_records()\n # convert to dataframe for easier extraction\n records = pd.DataFrame(records)\n\n # previous and current transition densities\n t_pd_prev = np.array(records['transition_pd_prev'], dtype=float)\n t_pd = np.array(records['transition_pd'], dtype=float)\n # acceptance kernel likelihoods\n pds = np.array(records['distance'], dtype=float)\n\n # compute importance weights\n weights = t_pd / t_pd_prev\n # len would suffice, but maybe rather not rely on things to be normed\n weights /= sum(weights)\n\n temperature = match_acceptance_rate(\n weights, pds, pdf_norm, kernel_scale, self.target_rate)\n\n return temperature\n\n\ndef match_acceptance_rate(\n weights, pds, pdf_norm, kernel_scale, target_rate):\n \"\"\"\n For large temperature, changes become effective on an exponential scale,\n thus we optimize the logarithm of the inverse temperature beta.\n\n For a temperature close to 1, subtler changes are neccesary, however here\n the logarhtm is nearly linear anyway.\n \"\"\"\n # objective function which we wish to find a root for\n def obj(b):\n beta = np.exp(b)\n\n # compute rescaled posterior densities\n if kernel_scale == SCALE_LIN:\n acc_probs = (pds / pdf_norm) ** beta\n else: # kernel_scale == SCALE_LOG\n acc_probs = np.exp((pds - pdf_norm) * beta)\n\n # to acceptance probabilities to be sure\n acc_probs = np.minimum(acc_probs, 1.0)\n\n # objective function\n val = np.sum(weights * acc_probs) - target_rate\n return val\n\n # TODO the lower boundary min_b is somewhat arbitrary\n min_b = -100\n if obj(0) > 0:\n # function is monotonically decreasing\n # smallest possible value already > 0\n b_opt = 0\n elif obj(min_b) < 0:\n # it is obj(-inf) > 0 always\n logger.info(\"AcceptanceRateScheme: Numerics limit temperature.\")\n b_opt = min_b\n else:\n # perform binary search\n b_opt = sp.optimize.bisect(obj, min_b, 0, maxiter=100000)\n\n beta_opt = np.exp(b_opt)\n\n temperature = 1. / beta_opt\n return temperature\n\n\nclass ExpDecayFixedIterScheme(TemperatureScheme):\n \"\"\"\n The next temperature is set as\n\n .. math::\n T_j = T_{max}^{(n-j)/n}\n\n where n denotes the number of populations, and j=1,...,n the iteration.\n This translates to\n\n .. math::\n T_j = T_{j-1}^{(n-j)/(n-(j-1))}.\n\n This ensures that a temperature of 1.0 is reached after exactly the\n remaining number of steps.\n\n So, in both cases the sequence of temperatures follows an exponential\n decay, also known as a geometric progression, or a linear progression\n in log-space.\n\n Note that the formula is applied anew in each iteration.\n This is advantageous if also other schemes are used s.t. T_{j-1}\n is smaller than by the above.\n\n Parameters\n ----------\n\n alpha: float\n Factor by which to reduce the temperature, if `max_nr_populations`\n is infinite.\n \"\"\"\n\n def __init__(self):\n pass\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a finite number of iterations\n if max_nr_populations == np.inf:\n raise ValueError(\n \"The ExpDecayFixedIterScheme requires a finite \"\n \"`max_nr_populations`.\")\n\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # base temperature\n temp_base = prev_temperature\n\n # how many steps left?\n t_to_go = max_nr_populations - t\n\n # compute next temperature according to exponential decay\n temperature = temp_base ** ((t_to_go - 1) / t_to_go)\n\n return temperature\n\n\nclass ExpDecayFixedRatioScheme(TemperatureScheme):\n \"\"\"\n The next temperature is chosen as\n\n .. math::\n T_j = \\\\alpha \\\\cdot T_{j-1}.\n\n Like the :class:`pyabc.epsilon.ExpDecayFixedIterScheme`,\n this yields a geometric progression, however with a fixed ratio,\n irrespective of the number of iterations. If a finite number of\n iterations is specified in ABCSMC, there is no influence on the final\n jump to a temperature of 1.0.\n\n This is quite similar to the :class:`pyabc.epsilon.DalyScheme`, although\n simpler in implementation. The alpha value here corresponds to a value of\n 1 - alpha there.\n\n Parameters\n ----------\n alpha: float, optional\n The ratio of subsequent temperatures.\n min_rate: float, optional\n A minimum acceptance rate. If this rate has been violated in the\n previous iteration, the alpha value is increased.\n max_rate: float, optional\n Maximum rate to not be exceeded, otherwise the alpha value is\n decreased.\n \"\"\"\n def __init__(self, alpha: float = 0.5,\n min_rate: float = 1e-4, max_rate: float = 0.5):\n self.alpha = alpha\n self.min_rate = min_rate\n self.max_rate = max_rate\n self.alphas = {}\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n if prev_temperature is None:\n return np.inf\n\n # previous alpha\n alpha = self.alphas.get(t-1, self.alpha)\n\n # check if acceptance rate criterion violated\n if acceptance_rate > self.max_rate and t > 1:\n logger.debug(\"ExpDecayFixedRatioScheme: \"\n \"Reacting to high acceptance rate.\")\n alpha = max(alpha / 2, alpha - (1 - alpha) * 2)\n if acceptance_rate < self.min_rate:\n logger.debug(\"ExpDecayFixedRatioScheme: \"\n \"Reacting to low acceptance rate.\")\n # increase alpha\n alpha = alpha + (1 - alpha) / 2\n # record\n self.alphas[t] = alpha\n\n # reduce temperature\n temperature = self.alphas[t] * prev_temperature\n\n return temperature\n\n\nclass PolynomialDecayFixedIterScheme(TemperatureScheme):\n \"\"\"\n Compute next temperature as pre-last entry in\n\n >>> np.linspace(1, (temp_base)**(1 / temp_decay_exponent),\n >>> t_to_go + 1) ** temp_decay_exponent)\n\n Requires finite `max_nr_populations`.\n\n Note that this is similar to the\n :class:`pyabc.epsilon.ExpDecayFixedIterScheme`, which is\n indeed the limit for `exponent -> infinity`. For smaller\n exponent, the sequence makes larger steps for low temperatures. This\n can be useful in cases, where lower temperatures (which are usually\n more expensive) can be traversed in few larger steps, however also\n the opposite may be true, i.e. that more steps at low temperatures\n are advantageous.\n\n Parameters\n ----------\n exponent: float, optional\n The exponent to use in the scheme.\n \"\"\"\n\n def __init__(self, exponent: float = 3):\n self.exponent = exponent\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # base temperature\n temp_base = prev_temperature\n\n # check if we can compute a decay step\n if max_nr_populations == np.inf:\n raise ValueError(\"Can only perform PolynomialDecayScheme step \"\n \"with a finite max_nr_populations.\")\n\n # how many steps left?\n t_to_go = max_nr_populations - t\n\n # compute sequence\n temps = np.linspace(1, (temp_base)**(1 / self.exponent),\n t_to_go+1) ** self.exponent\n\n logger.debug(f\"Temperatures proposed by polynomial decay method: \"\n f\"{temps}.\")\n\n # pre-last step is the next step\n temperature = temps[-2]\n return temperature\n\n\nclass DalyScheme(TemperatureScheme):\n \"\"\"\n This scheme is loosely based on [#daly2017]_, however note that it does\n not try to replicate it entirely. In particular, the implementation\n of pyABC does not allow the sampling to be stopped when encountering\n too low acceptance rates, such that this can only be done ex-posteriori\n here.\n\n Parameters\n ----------\n alpha: float, optional\n The ratio by which to decrease the temperature value. More\n specifically, the next temperature is given as\n `(1-alpha) * temperature`.\n min_rate: float, optional\n A minimum acceptance rate. If this rate has been violated in the\n previous iteration, the alpha value is decreased.\n\n\n .. [#daly2017] Daly Aidan C., Cooper Jonathan, Gavaghan David J.,\n and Holmes Chris. \"Comparing two sequential Monte Carlo samplers\n for exact and approximate Bayesian inference on biological\n models\". Journal of The Royal Society Interface, 2017.\n \"\"\"\n\n def __init__(self, alpha: float = 0.5, min_rate: float = 1e-4):\n self.alpha = alpha\n self.min_rate = min_rate\n self.k = {}\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # base temperature\n temp_base = prev_temperature\n\n # addressing the std, not the var\n eps_base = np.sqrt(temp_base)\n\n if not self.k:\n # initial iteration\n self.k[t - 1] = eps_base\n\n k_base = self.k[t - 1]\n\n if acceptance_rate < self.min_rate:\n logger.debug(\"DalyScheme: Reacting to low acceptance rate.\")\n # reduce reduction\n k_base = self.alpha * k_base\n\n self.k[t] = min(k_base, self.alpha * eps_base)\n eps = eps_base - self.k[t]\n temperature = eps**2\n\n return temperature\n\n\nclass FrielPettittScheme(TemperatureScheme):\n \"\"\"\n Basically takes linear steps in log-space. See [#vyshemirsky2008]_.\n\n .. [#vyshemirsky2008] Vyshemirsky, Vladislav, and Mark A. Girolami.\n \"Bayesian ranking of biochemical system models.\"\n Bioinformatics 24.6 (2007): 833-839.\n \"\"\"\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # needs a starting temperature\n # if not available, return infinite temperature\n if prev_temperature is None:\n return np.inf\n\n # check if we can compute a decay step\n if max_nr_populations == np.inf:\n raise ValueError(\"Can only perform FrielPettittScheme step with a \"\n \"finite max_nr_populations.\")\n\n # base temperature\n temp_base = prev_temperature\n beta_base = 1. / temp_base\n\n # time to go\n t_to_go = max_nr_populations - t\n\n beta = beta_base + ((1. - beta_base) * 1 / t_to_go) ** 2\n\n temperature = 1. / beta\n return temperature\n\n\nclass EssScheme(TemperatureScheme):\n \"\"\"\n Try to keep the effective sample size (ESS) constant.\n\n Parameters\n ----------\n target_relative_ess: float\n Targe relative effective sample size.\n \"\"\"\n\n def __init__(self, target_relative_ess: float = 0.8):\n self.target_relative_ess = target_relative_ess\n\n def __call__(self,\n t: int,\n get_weighted_distances: Callable[[], pd.DataFrame],\n get_all_records: Callable[[], List[dict]],\n max_nr_populations: int,\n pdf_norm: float,\n kernel_scale: str,\n prev_temperature: float,\n acceptance_rate: float):\n # execute function (expensive if in calibration)\n df = get_weighted_distances()\n\n weights = np.array(df['w'], dtype=float)\n pdfs = np.array(df['distance'], dtype=float)\n\n # compute rescaled posterior densities\n if kernel_scale == SCALE_LIN:\n values = pdfs / pdf_norm\n else: # kernel_scale == SCALE_LOG\n values = np.exp(pdfs - pdf_norm)\n\n # to probability mass function (i.e. normalize)\n weights /= np.sum(weights)\n\n target_ess = len(weights) * self.target_relative_ess\n\n if prev_temperature is None:\n beta_base = 0.0\n else:\n beta_base = 1. / prev_temperature\n\n # objective to minimize\n def obj(beta):\n return (_ess(values, weights, beta) - target_ess)**2\n\n bounds = sp.optimize.Bounds(lb=np.array([beta_base]),\n ub=np.array([1.]))\n # TODO make more efficient by providing gradients\n ret = sp.optimize.minimize(\n obj, x0=np.array([0.5 * (1 + beta_base)]),\n bounds=bounds)\n beta = ret.x\n\n temperature = 1. / beta\n return temperature\n\n\ndef _ess(pdfs, weights, beta):\n \"\"\"\n Effective sample size (ESS) of importance samples.\n \"\"\"\n num = np.sum(weights * pdfs**beta)**2\n den = np.sum((weights * pdfs**beta)**2)\n return num / den\n"
] | [
[
"numpy.random.uniform",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.close",
"numpy.array"
],
[
"numpy.sqrt",
"numpy.sum",
"scipy.optimize.bisect",
"pandas.DataFrame",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.isfinite",
"numpy.minimum"
]
] |
supernord/tools-iuc | [
"95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5"
] | [
"tools/vsnp/vsnp_statistics.py"
] | [
"#!/usr/bin/env python\n\nimport argparse\nimport csv\nimport gzip\nimport os\nfrom functools import partial\n\nimport numpy\nimport pandas\nfrom Bio import SeqIO\n\n\ndef nice_size(size):\n # Returns a readably formatted string with the size\n words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']\n prefix = ''\n try:\n size = float(size)\n if size < 0:\n size = abs(size)\n prefix = '-'\n except Exception:\n return '??? bytes'\n for ind, word in enumerate(words):\n step = 1024 ** (ind + 1)\n if step > size:\n size = size / float(1024 ** ind)\n if word == 'bytes': # No decimals for bytes\n return \"%s%d bytes\" % (prefix, size)\n return \"%s%.1f %s\" % (prefix, size, word)\n return '??? bytes'\n\n\ndef output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):\n # Produce an Excel spreadsheet that\n # contains a row for each sample.\n columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',\n 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',\n 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']\n data_frames = []\n for i, fastq_file in enumerate(fastq_files):\n idxstats_file = idxstats_files[i]\n metrics_file = metrics_files[i]\n file_name_base = os.path.basename(fastq_file)\n # Read fastq_file into a data frame.\n _open = partial(gzip.open, mode='rt') if gzipped else open\n with _open(fastq_file) as fh:\n identifiers = []\n seqs = []\n letter_annotations = []\n for seq_record in SeqIO.parse(fh, \"fastq\"):\n identifiers.append(seq_record.id)\n seqs.append(seq_record.seq)\n letter_annotations.append(seq_record.letter_annotations[\"phred_quality\"])\n # Convert lists to Pandas series.\n s1 = pandas.Series(identifiers, name='id')\n s2 = pandas.Series(seqs, name='seq')\n # Gather Series into a data frame.\n fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id'])\n total_reads = int(len(fastq_df.index) / 4)\n current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)\n # Reference\n current_sample_df.at[file_name_base, 'Reference'] = dbkey\n # File Size\n current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))\n # Mean Read Length\n sampling_size = 10000\n if sampling_size > total_reads:\n sampling_size = total_reads\n fastq_df = fastq_df.iloc[3::4].sample(sampling_size)\n dict_mean = {}\n list_length = []\n i = 0\n for id, seq, in fastq_df.iterrows():\n dict_mean[id] = numpy.mean(letter_annotations[i])\n list_length.append(len(seq.array[0]))\n i += 1\n current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length)\n # Mean Read Quality\n df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])\n current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean()\n # Reads Passing Q30\n reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])\n reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size)\n current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30\n # Total Reads\n current_sample_df.at[file_name_base, 'Total Reads'] = total_reads\n # All Mapped Reads\n all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)\n current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads\n # Unmapped Reads\n current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads\n # Unmapped Reads Percentage of Total\n if unmapped_reads > 0:\n unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads)\n else:\n unmapped_reads_percentage = 0\n current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage\n # Reference with Coverage\n ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)\n current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage\n # Average Depth of Coverage\n current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage\n # Good SNP Count\n current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count\n data_frames.append(current_sample_df)\n output_df = pandas.concat(data_frames)\n output_df.to_csv(output_file, sep='\\t', quoting=csv.QUOTE_NONE, escapechar='\\\\')\n\n\ndef process_idxstats_file(idxstats_file):\n all_mapped_reads = 0\n unmapped_reads = 0\n with open(idxstats_file, \"r\") as fh:\n for i, line in enumerate(fh):\n line = line.rstrip('\\r\\n')\n items = line.split(\"\\t\")\n if i == 0:\n # NC_002945.4 4349904 213570 4047\n all_mapped_reads = int(items[2])\n elif i == 1:\n # * 0 0 82774\n unmapped_reads = int(items[3])\n return all_mapped_reads, unmapped_reads\n\n\ndef process_metrics_file(metrics_file):\n ref_with_coverage = '0%'\n avg_depth_of_coverage = 0\n good_snp_count = 0\n with open(metrics_file, \"r\") as ifh:\n for i, line in enumerate(ifh):\n if i == 0:\n # Skip comments.\n continue\n line = line.rstrip('\\r\\n')\n items = line.split(\"\\t\")\n if i == 1:\n # MarkDuplicates 10.338671 98.74%\n ref_with_coverage = items[3]\n avg_depth_of_coverage = items[2]\n elif i == 2:\n # VCFfilter 611\n good_snp_count = items[1]\n return ref_with_coverage, avg_depth_of_coverage, good_snp_count\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')\nparser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped')\nparser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory')\nparser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory')\nparser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory')\nparser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads')\nparser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')\nparser.add_argument('--read1', action='store', dest='read1', help='Required: single read')\nparser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')\nparser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats')\nparser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage')\n\nargs = parser.parse_args()\n\nfastq_files = []\nidxstats_files = []\nmetrics_files = []\n# Accumulate inputs.\nif args.read1 is not None:\n # The inputs are not dataset collections, so\n # read1, read2 (possibly) and vsnp_azc will also\n # not be None.\n fastq_files.append(args.read1)\n idxstats_files.append(args.samtools_idxstats)\n metrics_files.append(args.vsnp_azc)\n if args.read2 is not None:\n fastq_files.append(args.read2)\n idxstats_files.append(args.samtools_idxstats)\n metrics_files.append(args.vsnp_azc)\nelse:\n for file_name in sorted(os.listdir(args.input_reads_dir)):\n fastq_files.append(os.path.join(args.input_reads_dir, file_name))\n for file_name in sorted(os.listdir(args.input_idxstats_dir)):\n idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))\n if args.list_paired:\n # Add the idxstats file for reverse.\n idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))\n for file_name in sorted(os.listdir(args.input_metrics_dir)):\n metrics_files.append(os.path.join(args.input_metrics_dir, file_name))\n if args.list_paired:\n # Add the metrics file for reverse.\n metrics_files.append(os.path.join(args.input_metrics_dir, file_name))\noutput_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)\n"
] | [
[
"pandas.Series",
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"pandas.concat",
"numpy.mean"
]
] |
Abhijeet8901/CS231n | [
"c8e715028b453899d5069cdb34faf3fc2959c270"
] | [
"assignment2/cs231n/optim.py"
] | [
"import numpy as np\n\n\"\"\"\nThis file implements various first-order update rules that are commonly used\nfor training neural networks. Each update rule accepts current weights and the\ngradient of the loss with respect to those weights and produces the next set of\nweights. Each update rule has the same interface:\n\ndef update(w, dw, config=None):\n\nInputs:\n - w: A numpy array giving the current weights.\n - dw: A numpy array of the same shape as w giving the gradient of the\n loss with respect to w.\n - config: A dictionary containing hyperparameter values such as learning\n rate, momentum, etc. If the update rule requires caching values over many\n iterations, then config will also hold these cached values.\n\nReturns:\n - next_w: The next point after the update.\n - config: The config dictionary to be passed to the next iteration of the\n update rule.\n\nNOTE: For most update rules, the default learning rate will probably not\nperform well; however the default values of the other hyperparameters should\nwork well for a variety of different problems.\n\nFor efficiency, update rules may perform in-place updates, mutating w and\nsetting next_w equal to w.\n\"\"\"\n\n\ndef sgd(w, dw, config=None):\n \"\"\"\n Performs vanilla stochastic gradient descent.\n\n config format:\n - learning_rate: Scalar learning rate.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n\n w -= config[\"learning_rate\"] * dw\n return w, config\n\n\ndef sgd_momentum(w, dw, config=None):\n \"\"\"\n Performs stochastic gradient descent with momentum.\n\n config format:\n - learning_rate: Scalar learning rate.\n - momentum: Scalar between 0 and 1 giving the momentum value.\n Setting momentum = 0 reduces to sgd.\n - velocity: A numpy array of the same shape as w and dw used to store a\n moving average of the gradients.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n config.setdefault(\"momentum\", 0.9)\n v = config.get(\"velocity\", np.zeros_like(w))\n\n next_w=None\n ###########################################################################\n # TODO: Implement the momentum update formula. Store the updated value in #\n # the next_w variable. You should also use and update the velocity v. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n v= config[\"momentum\"]*v - config[\"learning_rate\"]*dw\n next_w=w+v\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n config[\"velocity\"] = v\n\n return next_w, config\n\n\ndef rmsprop(w, dw, config=None):\n \"\"\"\n Uses the RMSProp update rule, which uses a moving average of squared\n gradient values to set adaptive per-parameter learning rates.\n\n config format:\n - learning_rate: Scalar learning rate.\n - decay_rate: Scalar between 0 and 1 giving the decay rate for the squared\n gradient cache.\n - epsilon: Small scalar used for smoothing to avoid dividing by zero.\n - cache: Moving average of second moments of gradients.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n config.setdefault(\"decay_rate\", 0.99)\n config.setdefault(\"epsilon\", 1e-8)\n config.setdefault(\"cache\", np.zeros_like(w))\n\n next_w = None\n ###########################################################################\n # TODO: Implement the RMSprop update formula, storing the next value of w #\n # in the next_w variable. Don't forget to update cache value stored in #\n # config['cache']. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n cache=config[\"cache\"]\n cache=config[\"decay_rate\"]*cache + (1-config[\"decay_rate\"])*dw**2\n w+=(-config[\"learning_rate\"]*dw)/(np.sqrt(cache)+config[\"epsilon\"])\n next_w=w\n config[\"cache\"]=cache\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return next_w, config\n\n\ndef adam(w, dw, config=None):\n \"\"\"\n Uses the Adam update rule, which incorporates moving averages of both the\n gradient and its square and a bias correction term.\n\n config format:\n - learning_rate: Scalar learning rate.\n - beta1: Decay rate for moving average of first moment of gradient.\n - beta2: Decay rate for moving average of second moment of gradient.\n - epsilon: Small scalar used for smoothing to avoid dividing by zero.\n - m: Moving average of gradient.\n - v: Moving average of squared gradient.\n - t: Iteration number.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-3)\n config.setdefault(\"beta1\", 0.9)\n config.setdefault(\"beta2\", 0.999)\n config.setdefault(\"epsilon\", 1e-8)\n config.setdefault(\"m\", np.zeros_like(w))\n config.setdefault(\"v\", np.zeros_like(w))\n config.setdefault(\"t\", 0)\n\n next_w = None\n ###########################################################################\n # TODO: Implement the Adam update formula, storing the next value of w in #\n # the next_w variable. Don't forget to update the m, v, and t variables #\n # stored in config. #\n # #\n # NOTE: In order to match the reference output, please modify t _before_ #\n # using it in any calculations. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n lr=config[\"learning_rate\"]\n b1,b2,ep=config[\"beta1\"],config[\"beta2\"],config[\"epsilon\"]\n m=config[\"m\"]\n v=config[\"v\"]\n t=config[\"t\"]\n t+=1\n m=b1*m+(1-b1)*dw\n mt=m/(1-b1**t)\n v=b2*v+(1-b2)*dw**2\n vt=v/(1-b2**t)\n w-=(lr*mt)/(np.sqrt(vt)+ep)\n config[\"m\"],config[\"v\"],config[\"t\"]=m,v,t\n next_w=w\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return next_w, config\n"
] | [
[
"numpy.sqrt",
"numpy.zeros_like"
]
] |
modichirag/21cmhod | [
"0807a7b0b880f4ba5bc7161b843d500ddcece5a7"
] | [
"code/distributeHI.py"
] | [
"import numpy as np\nimport re, os\nfrom pmesh.pm import ParticleMesh\nfrom nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower\nfrom nbodykit import setup_logging\nfrom mpi4py import MPI\n\nimport HImodels\n# enable logging, we have some clue what's going on.\nsetup_logging('info')\n\n#Get model as parameter\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--size', help='for small or big box', default='small')\nparser.add_argument('-m', '--model', help='model name to use')\nargs = parser.parse_args()\nif args.model == None:\n print('Specify a model name')\n sys.exit()\n#print(args, args.model)\n\nmodel = args.model #'ModelD'\nboxsize = args.size\n\n\n#\n#\n#Global, fixed things\nscratchyf = '/global/cscratch1/sd/yfeng1/m3127/'\nscratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'\nproject = '/project/projectdirs/m3127/H1mass/'\ncosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}\nalist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]\n\n\n#Parameters, box size, number of mesh cells, simulation, ...\nif boxsize == 'small':\n bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'\nelif boxsize == 'big':\n bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres'\nelse:\n print('Box size not understood, should be \"big\" or \"small\"')\n sys.exit()\n\n\n# It's useful to have my rank for printing...\npm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])\nrank = pm.comm.rank\ncomm = pm.comm\n\n\n#Which model & configuration to use\nmodeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC}\nmodedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'} \nHImodel = modeldict[model] #HImodels.ModelB\nmodelname = model\nmode = modedict[model]\nofolder = '../data/outputs/'\n\n\n\n\ndef distribution(aa, halocat, cencat, satcat, outfolder, mbins=None):\n '''Compute the fraction of HI in halos, centrals, satellites'''\n\n if rank==0: print('Calculating distribution')\n\n if mbins is None: mbins = np.logspace(9, 15, 100)\n hmass = halocat['Mass'].compute()\n\n\n htotal, hsize, h1total = [], [], []\n for im in range(mbins.size-1):\n mask = (hmass >= mbins[im]) & (hmass < mbins[im+1])\n rankweight = (hmass*mask).sum()\n htotal.append(comm.allreduce(rankweight))\n rankweight = (mask).sum()\n hsize.append(comm.allreduce(rankweight))\n \n h1bin = []\n for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]:\n rankweight = (cat.compute()*mask).sum()\n h1bin.append(comm.allreduce(rankweight))\n h1total.append(h1bin)\n\n \n #\n if rank==0:\n tosave = np.zeros((len(hsize), 5))\n tosave[:, 1] = hsize\n tosave[:, 0] = htotal / (tosave[:, 1])\n tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1))\n tosave[np.isnan(tosave)] = 0\n header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites'\n np.savetxt(outfolder + \"HI_dist_{:6.4f}.txt\".format(aa), tosave, fmt='%0.6e', header=header)\n \n \n\nif __name__==\"__main__\":\n if rank==0: print('Starting')\n suff='-m1_00p3mh-alpha-0p8-subvol'\n outfolder = ofolder + suff[1:]\n if bs == 1024: outfolder = outfolder + \"-big\"\n outfolder += \"/%s/\"%modelname\n if rank == 0: print(outfolder)\n #outfolder = ofolder + suff[1:] + \"/%s/\"%modelname\n try: \n os.makedirs(outfolder)\n except : pass\n\n for aa in alist:\n if rank == 0: print('\\n ############## Redshift = %0.2f ############## \\n'%(1/aa-1))\n halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')\n mp = halocat.attrs['MassTable'][1]*1e10##\n halocat['Mass'] = halocat['Length'].compute() * mp\n cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)\n satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)\n #\n\n HImodelz = HImodel(aa)\n halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)\n cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(), \n cencat.csize, cencat['Mass'].size, cencat.comm).local\n \n\n mbins = 10**np.arange(9, 15.1, 0.2)\n distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)\n\n"
] | [
[
"numpy.arange",
"numpy.isnan",
"numpy.logspace"
]
] |
jveverka/data-lab | [
"c2a43fd2c34520a9d490f29feff3035bdc70c0d6"
] | [
"ml-services/od-yolov3-tf2/yolov3_tf2/utils.py"
] | [
"from absl import logging\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\nYOLOV3_LAYER_LIST = [\n 'yolo_darknet',\n 'yolo_conv_0',\n 'yolo_output_0',\n 'yolo_conv_1',\n 'yolo_output_1',\n 'yolo_conv_2',\n 'yolo_output_2',\n]\n\nYOLOV3_TINY_LAYER_LIST = [\n 'yolo_darknet',\n 'yolo_conv_0',\n 'yolo_output_0',\n 'yolo_conv_1',\n 'yolo_output_1',\n]\n\n\ndef load_darknet_weights(model, weights_file, tiny=False):\n wf = open(weights_file, 'rb')\n major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)\n\n if tiny:\n layers = YOLOV3_TINY_LAYER_LIST\n else:\n layers = YOLOV3_LAYER_LIST\n\n for layer_name in layers:\n sub_model = model.get_layer(layer_name)\n for i, layer in enumerate(sub_model.layers):\n if not layer.name.startswith('conv2d'):\n continue\n batch_norm = None\n if i + 1 < len(sub_model.layers) and \\\n sub_model.layers[i + 1].name.startswith('batch_norm'):\n batch_norm = sub_model.layers[i + 1]\n\n logging.info(\"{}/{} {}\".format(\n sub_model.name, layer.name, 'bn' if batch_norm else 'bias'))\n\n filters = layer.filters\n size = layer.kernel_size[0]\n in_dim = layer.input_shape[-1]\n\n if batch_norm is None:\n conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)\n else:\n # darknet [beta, gamma, mean, variance]\n bn_weights = np.fromfile(\n wf, dtype=np.float32, count=4 * filters)\n # tf [gamma, beta, mean, variance]\n bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]\n\n # darknet shape (out_dim, in_dim, height, width)\n conv_shape = (filters, in_dim, size, size)\n conv_weights = np.fromfile(\n wf, dtype=np.float32, count=np.product(conv_shape))\n # tf shape (height, width, in_dim, out_dim)\n conv_weights = conv_weights.reshape(\n conv_shape).transpose([2, 3, 1, 0])\n\n if batch_norm is None:\n layer.set_weights([conv_weights, conv_bias])\n else:\n layer.set_weights([conv_weights])\n batch_norm.set_weights(bn_weights)\n\n assert len(wf.read()) == 0, 'failed to read all data'\n wf.close()\n\n\ndef broadcast_iou(box_1, box_2):\n # box_1: (..., (x1, y1, x2, y2))\n # box_2: (N, (x1, y1, x2, y2))\n\n # broadcast boxes\n box_1 = tf.expand_dims(box_1, -2)\n box_2 = tf.expand_dims(box_2, 0)\n # new_shape: (..., N, (x1, y1, x2, y2))\n new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))\n box_1 = tf.broadcast_to(box_1, new_shape)\n box_2 = tf.broadcast_to(box_2, new_shape)\n\n int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -\n tf.maximum(box_1[..., 0], box_2[..., 0]), 0)\n int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -\n tf.maximum(box_1[..., 1], box_2[..., 1]), 0)\n int_area = int_w * int_h\n box_1_area = (box_1[..., 2] - box_1[..., 0]) * \\\n (box_1[..., 3] - box_1[..., 1])\n box_2_area = (box_2[..., 2] - box_2[..., 0]) * \\\n (box_2[..., 3] - box_2[..., 1])\n return int_area / (box_1_area + box_2_area - int_area)\n\n\ndef draw_outputs(img, outputs, class_names):\n boxes, objectness, classes, nums = outputs\n boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]\n wh = np.flip(img.shape[0:2])\n for i in range(nums):\n x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))\n x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))\n img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)\n img = cv2.putText(img, '{} {:.4f}'.format(\n class_names[int(classes[i])], objectness[i]),\n x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)\n return img\n\n\ndef draw_labels(x, y, class_names):\n img = x.numpy()\n boxes, classes = tf.split(y, (4, 1), axis=-1)\n classes = classes[..., 0]\n wh = np.flip(img.shape[0:2])\n for i in range(len(boxes)):\n x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))\n x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))\n img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)\n img = cv2.putText(img, class_names[classes[i]],\n x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,\n 1, (0, 0, 255), 2)\n return img\n\n\ndef freeze_all(model, frozen=True):\n model.trainable = not frozen\n if isinstance(model, tf.keras.Model):\n for l in model.layers:\n freeze_all(l, frozen)"
] | [
[
"numpy.fromfile",
"tensorflow.shape",
"tensorflow.broadcast_to",
"tensorflow.minimum",
"tensorflow.expand_dims",
"numpy.product",
"numpy.flip",
"numpy.array",
"tensorflow.split",
"tensorflow.maximum"
]
] |
instance01/qubo-nn | [
"6f8058565f4b6ab4a8300501fc2f67cdaeed482f"
] | [
"qubo_nn/plots/gen_tsne_gen4.py"
] | [
"import pickle\nimport numpy as np\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nfrom qubo_nn.data import LMDBDataLoader\nfrom qubo_nn.config import Config\n\n\ncfg_id = '27_gen4'\ncfg = Config('../').get_cfg(cfg_id)\ncfg[\"use_big\"] = False\nlmdb_loader = LMDBDataLoader(cfg, reverse=False, base_path='../')\n\nX = []\ny = []\nfor i, data in enumerate(lmdb_loader.train_data_loader):\n if i > 43: # 44 batches á 500 = 22k (from total of 440k), so 5%\n break\n X.extend(data[0].tolist())\n y.extend(data[1].tolist())\n\nX = np.array(X)\nX = X.reshape(-1, 64**2)\nprint(X.shape)\n\nfor i in [10, 20, 30, 50, 70, 100, 200, 500, 1000]:\n tsne = TSNE(\n n_jobs=10,\n n_iter=5000,\n perplexity=i,\n # perplexity=500., # Best.\n verbose=1\n )\n Y = tsne.fit_transform(X)\n\n with open('tsne_gen4_data%d.pickle' % i, 'wb+') as f:\n pickle.dump((Y, y), f)\n"
] | [
[
"numpy.array"
]
] |
OSADP/TCA | [
"25bc1c1db00393cc6b8c6764610bf381494dfcb9"
] | [
"old_versions/TCA_2_2/TCA_V_2_2_1/code/TCASpacePartitioning.py"
] | [
"#standard\nimport unittest\nimport math\n# from collections import OrderedDict\nfrom random import uniform\n\n#external\nimport pandas as pd\nfrom scipy.spatial import KDTree\n\n\n\ndef Find_RSE_range(df, RSEs, minrange):\n\n sub_df = df[['vehicle_ID', 'location_x', 'location_y']]\n\n\n tree = KDTree(sub_df[['location_x', 'location_y']].values)\n rse_points = list(RSEs.RSEListLocations.values())\n locs_index = tree.query_ball_point(rse_points, r=minrange)\n\n #link RSE back to vehicles\n rse_vehicles = {}\n for c, RSE in enumerate(RSEs.RSEListLocations.keys()):\n if len(locs_index[c]) > 0:\n vlist = sub_df.iloc[locs_index[c]]['vehicle_ID'].tolist()\n rse_vehicles[RSE] = vlist\n else:\n rse_vehicles[RSE] = []\n\n return rse_vehicles\n\n\nclass BufferContentCheck(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_whole(self):\n minrange = 4.00\n num_vehicles = 10000\n num_RSE = 30\n\n # Vehicles_loc = {x:(uniform(0, 200), uniform(0, 200)) for x in range(num_vehicles)}\n # df = pd.DataFrame({\n # 'Vid' : ['V' + str(x) for x in Vehicles_loc.keys()],\n # 'x' : [Vehicles_loc[x][0] for x in Vehicles_loc],\n # 'y' : [Vehicles_loc[x][1] for x in Vehicles_loc],\n # })\n # df = df.set_index(['Vid'], drop=False)\n\n # RSEs = OrderedDict({'RSE' + str(x):(uniform(0, 200), uniform(0, 200)) for x in range(num_RSE)})\n\n # rse_info = Find_RSE_range(df, RSEs, minrange)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n"
] | [
[
"scipy.spatial.KDTree"
]
] |
muntazirabidi/boss-sbi | [
"fae016eb10b64153391499276d238ccdf660df88"
] | [
"bin/make_halo_cnf_data.py"
] | [
"import os\nimport numpy as np \nfrom simbig import halos as Halos\n\nnp.random.seed(918234) \n\ntheta_x_pairs = []\nfor i in range(1000): \n # read in halo catalog\n halos = Halos.Quijote_LHC_HR(i, z=0.5)\n\n # impose random halo mass limit as a proxy for baryonic effect \n Mlim = np.random.uniform(12.5, 13.0)\n\n theta_cosmo = Halos.Quijote_LHC_cosmo(i)\n\n # observable: I'm goign to use Nhalo as a proxy for some observable \n Nhalos = np.sum(np.array(halos['Mass']) > Mlim)\n \n # (parameter, data) pair\n theta_x = np.concatenate([theta_cosmo, [Mlim], [Nhalos]])\n theta_x_pairs.append(theta_x) \n\nnp.save(os.path.join(os.environ['QUIJOTE_DIR'], 'chang', 'halo_cnf_data.npy'), np.array(theta_x_pairs))\n"
] | [
[
"numpy.random.uniform",
"numpy.concatenate",
"numpy.random.seed",
"numpy.array"
]
] |
ludovicdmt/python-meegkit | [
"4aa4ba49354b996be20eda41660a550d1bd31f9a"
] | [
"meegkit/utils/trca.py"
] | [
"\"\"\"TRCA utils.\"\"\"\nimport numpy as np\n\nfrom scipy.signal import filtfilt, cheb1ord, cheby1\nfrom scipy import stats\n\n\ndef round_half_up(num, decimals=0):\n \"\"\"Round half up round the last decimal of the number.\n\n The rules are:\n from 0 to 4 rounds down\n from 5 to 9 rounds up\n\n Parameters\n ----------\n num : float\n Number to round\n decimals : number of decimals\n\n Returns\n -------\n num rounded\n \"\"\"\n multiplier = 10 ** decimals\n return int(np.floor(num * multiplier + 0.5) / multiplier)\n\n\ndef normfit(data, ci=0.95):\n \"\"\"Compute the mean, std and confidence interval for them.\n\n Parameters\n ----------\n data : array, shape=()\n Input data.\n ci : float\n Confidence interval (default=0.95).\n\n Returns\n -------\n m : mean\n sigma : std deviation\n [m - h, m + h] : confidence interval of the mean\n [sigmaCI_lower, sigmaCI_upper] : confidence interval of the std\n \"\"\"\n arr = 1.0 * np.array(data)\n num = len(arr)\n avg, std_err = np.mean(arr), stats.sem(arr)\n h_int = std_err * stats.t.ppf((1 + ci) / 2., num - 1)\n var = np.var(data, ddof=1)\n var_ci_upper = var * (num - 1) / stats.chi2.ppf((1 - ci) / 2, num - 1)\n var_ci_lower = var * (num - 1) / stats.chi2.ppf(1 - (1 - ci) / 2, num - 1)\n sigma = np.sqrt(var)\n sigma_ci_lower = np.sqrt(var_ci_lower)\n sigma_ci_upper = np.sqrt(var_ci_upper)\n\n return avg, sigma, [avg - h_int, avg +\n h_int], [sigma_ci_lower, sigma_ci_upper]\n\n\ndef itr(n, p, t):\n \"\"\"Compute information transfer rate (ITR).\n\n Definition in [1]_.\n\n Parameters\n ----------\n n : int\n Number of targets.\n p : float\n Target identification accuracy (0 <= p <= 1).\n t : float\n Average time for a selection (s).\n\n Returns\n -------\n itr : float\n Information transfer rate [bits/min]\n\n References\n ----------\n .. [1] M. Cheng, X. Gao, S. Gao, and D. Xu,\n \"Design and Implementation of a Brain-Computer Interface With High\n Transfer Rates\", IEEE Trans. Biomed. Eng. 49, 1181-1186, 2002.\n\n \"\"\"\n itr = 0\n\n if (p < 0 or 1 < p):\n raise ValueError('Accuracy need to be between 0 and 1.')\n elif (p < 1 / n):\n raise ValueError('ITR might be incorrect because accuracy < chance')\n itr = 0\n elif (p == 1):\n itr = np.log2(n) * 60 / t\n else:\n itr = (np.log2(n) + p * np.log2(p) + (1 - p) *\n np.log2((1 - p) / (n - 1))) * 60 / t\n\n return itr\n\n\ndef bandpass(eeg, sfreq, Wp, Ws):\n \"\"\"Filter bank design for decomposing EEG data into sub-band components.\n\n Parameters\n ----------\n eeg : np.array, shape=(n_samples, n_chans[, n_trials])\n Training data.\n sfreq : int\n Sampling frequency of the data.\n Wp : 2-tuple\n Passband for Chebyshev filter.\n Ws : 2-tuple\n Stopband for Chebyshev filter.\n\n Returns\n -------\n y: np.array, shape=(n_trials, n_chans, n_samples)\n Sub-band components decomposed by a filter bank.\n\n See Also\n --------\n scipy.signal.cheb1ord :\n Chebyshev type I filter order selection.\n\n \"\"\"\n # Chebyshev type I filter order selection.\n N, Wn = cheb1ord(Wp, Ws, 3, 40, fs=sfreq)\n\n # Chebyshev type I filter design\n B, A = cheby1(N, 0.5, Wn, btype=\"bandpass\", fs=sfreq)\n\n # the arguments 'axis=0, padtype='odd', padlen=3*(max(len(B),len(A))-1)'\n # correspond to Matlab filtfilt : https://dsp.stackexchange.com/a/47945\n y = filtfilt(B, A, eeg, axis=0, padtype='odd',\n padlen=3 * (max(len(B), len(A)) - 1))\n return y\n"
] | [
[
"numpy.log2",
"scipy.signal.cheb1ord",
"scipy.stats.t.ppf",
"scipy.stats.chi2.ppf",
"numpy.var",
"scipy.stats.sem",
"numpy.floor",
"scipy.signal.cheby1",
"numpy.sqrt",
"numpy.array",
"numpy.mean"
]
] |
jraman/tensorflow | [
"9028828d3b8a2a622f7203a317002cc749531695"
] | [
"tensorflow/python/framework/ops.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes and functions used to construct graphs.\"\"\"\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport sys\nimport threading\nimport types\n\nimport numpy as np\nimport six\nfrom six.moves import map # pylint: disable=redefined-builtin\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.core.framework import op_def_pb2\nfrom tensorflow.core.framework import versions_pb2\nfrom tensorflow.core.protobuf import config_pb2\n# pywrap_tensorflow must be imported first to avoid profobuf issues.\n# (b/143110113)\n# pylint: disable=invalid-import-order,g-bad-import-order,unused-import\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python import pywrap_tfe\n# pylint: enable=invalid-import-order,g-bad-import-order,unused-import\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import core\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import registry\nfrom tensorflow.python.framework import tensor_conversion_registry\nfrom tensorflow.python.framework import tensor_like\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import traceable_stack\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import decorator_utils\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import lock_util\nfrom tensorflow.python.util import memory\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_stack\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import kwarg_only\nfrom tensorflow.python.util.tf_export import tf_export\n\nag_ctx = LazyLoader(\n \"ag_ctx\", globals(),\n \"tensorflow.python.autograph.core.ag_ctx\")\n\n\n# Temporary global switches determining if we should enable the work-in-progress\n# calls to the C API. These will be removed once all functionality is supported.\n_USE_C_API = True\n_USE_C_SHAPES = True\n\n_api_usage_gauge = monitoring.BoolGauge(\n \"/tensorflow/api/ops_eager_execution\",\n \"Whether ops.enable_eager_execution() is called.\")\n\n\n# pylint: disable=protected-access\n_TensorLike = tensor_like._TensorLike\n_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE\n# pylint: enable=protected-access\n\n\ndef tensor_id(tensor):\n \"\"\"Returns a unique identifier for this Tensor.\"\"\"\n return tensor._id # pylint: disable=protected-access\n\n\nclass _UserDeviceSpec(object):\n \"\"\"Store user-specified device and provide computation of merged device.\"\"\"\n\n def __init__(self, device_name_or_function):\n self._device_name_or_function = device_name_or_function\n self.display_name = str(self._device_name_or_function)\n self.function = device_name_or_function\n self.raw_string = None\n\n if isinstance(device_name_or_function, pydev.MergeDevice):\n self.is_null_merge = device_name_or_function.is_null_merge\n\n elif callable(device_name_or_function):\n self.is_null_merge = False\n dev_func = self._device_name_or_function\n func_name = function_utils.get_func_name(dev_func)\n func_code = function_utils.get_func_code(dev_func)\n if func_code:\n fname = func_code.co_filename\n lineno = func_code.co_firstlineno\n else:\n fname = \"unknown\"\n lineno = -1\n self.display_name = \"%s<%s, %d>\" % (func_name, fname, lineno)\n\n elif device_name_or_function is None:\n # NOTE(taylorrobie): This MUST be False. None signals a break in the\n # device stack, so `is_null_merge` must be False for such a case to\n # allow callers to safely skip over null merges without missing a None.\n self.is_null_merge = False\n\n else:\n self.raw_string = device_name_or_function\n self.function = pydev.merge_device(device_name_or_function)\n self.is_null_merge = self.function.is_null_merge\n\n # We perform this check in __init__ because it is of non-trivial cost,\n # and self.string_merge is typically called many times.\n self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)\n\n def string_merge(self, node_def):\n if self.fast_string_merge:\n return self.function.shortcut_string_merge(node_def)\n\n return compat.as_str(_device_string(self.function(node_def)))\n\n\nclass NullContextmanager(object):\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __enter__(self):\n pass\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n return False # False values do not suppress exceptions\n\n\ndef _override_helper(clazz_object, operator, func):\n \"\"\"Overrides (string) operator on Tensors to call func.\n\n Args:\n clazz_object: the class to override for; either Tensor or SparseTensor.\n operator: the string name of the operator to override.\n func: the function that replaces the overridden operator.\n\n Raises:\n ValueError: If operator has already been overwritten,\n or if operator is not allowed to be overwritten.\n \"\"\"\n existing = getattr(clazz_object, operator, None)\n if existing is not None:\n # Check to see if this is a default method-wrapper or slot wrapper which\n # will be true for the comparison operators.\n if not isinstance(existing, type(object.__lt__)):\n raise ValueError(\"operator %s cannot be overwritten again on class %s.\" %\n (operator, clazz_object))\n if operator not in Tensor.OVERLOADABLE_OPERATORS:\n raise ValueError(\"Overriding %s is disallowed\" % operator)\n setattr(clazz_object, operator, func)\n\n\ndef _as_graph_element(obj):\n \"\"\"Convert `obj` to a graph element if possible, otherwise return `None`.\n\n Args:\n obj: Object to convert.\n\n Returns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`.\n \"\"\"\n conv_fn = getattr(obj, \"_as_graph_element\", None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None\n\n\n_TENSOR_LIKE_TYPES = tuple()\n\n\ndef is_dense_tensor_like(t):\n \"\"\"EXPERIMENTAL: Returns true if `t` implements the tensor interface.\n\n See `register_dense_tensor_like_type()` for the current definition of a\n \"tensor-like type\".\n\n Args:\n t: An object.\n\n Returns:\n True iff `t` is an instance of one of the registered \"tensor-like\" types.\n \"\"\"\n return isinstance(t, _TENSOR_LIKE_TYPES)\n\n\ndef register_dense_tensor_like_type(tensor_type):\n \"\"\"EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.\n\n A \"tensor-like type\" can represent a single dense tensor, and implements\n the `name`, `dtype` and `shape` properties.\n\n Args:\n tensor_type: A type implementing the tensor interface.\n\n Raises:\n TypeError: If `tensor_type` does not implement the tensor interface.\n \"\"\"\n if not (hasattr(tensor_type, \"name\") and\n isinstance(tensor_type.name, property)):\n raise TypeError(\"Type %s does not define a `name` property\" %\n tensor_type.__name__)\n if not (hasattr(tensor_type, \"dtype\") and\n isinstance(tensor_type.dtype, property)):\n raise TypeError(\"Type %s does not define a `dtype` property\" %\n tensor_type.__name__)\n if not (hasattr(tensor_type, \"shape\") and\n isinstance(tensor_type.shape, property)):\n raise TypeError(\"Type %s does not define a `shape` property\" %\n tensor_type.__name__)\n # We expect this list to be small, so choose quadratic complexity\n # for registration, so that we have a tuple that can be used for\n # more efficient `isinstance` checks later.\n global _TENSOR_LIKE_TYPES\n _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])\n\n\ndef uid():\n \"\"\"A unique (within this program execution) integer.\"\"\"\n return pywrap_tfe.TFE_Py_UID()\n\n\ndef numpy_text(tensor, is_repr=False):\n \"\"\"Human readable representation of a tensor's numpy value.\"\"\"\n if tensor.dtype.is_numpy_compatible:\n # pylint: disable=protected-access\n text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())\n # pylint: enable=protected-access\n else:\n text = \"<unprintable>\"\n if \"\\n\" in text:\n text = \"\\n\" + text\n return text\n\n@tf_export(v1=[\"enable_tensor_equality\"])\ndef enable_tensor_equality():\n \"\"\"Compare Tensors with element-wise comparison and thus be unhashable.\n\n Comparing tensors with element-wise allows comparisons such as\n tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are\n unhashable. Thus tensors can no longer be directly used in sets or as a key in\n a dictionary.\n \"\"\"\n Tensor._USE_EQUALITY = True # pylint: disable=protected-access\n\n@tf_export(v1=[\"disable_tensor_equality\"])\ndef disable_tensor_equality():\n \"\"\"Compare Tensors by their id and be hashable.\n\n This is a legacy behaviour of TensorFlow and is highly discouraged.\n \"\"\"\n Tensor._USE_EQUALITY = False # pylint: disable=protected-access\n\n\n@tf_export(\"Tensor\")\nclass Tensor(_TensorLike):\n \"\"\"A tensor represents a rectangular array of data.\n\n When writing a TensorFlow program, the main object you manipulate and pass\n around is the `tf.Tensor`. A `tf.Tensor` object represents a rectangular array\n of arbitrary dimension, filled with data of a specific data type.\n\n A `tf.Tensor` has the following properties:\n\n * a data type (float32, int32, or string, for example)\n * a shape\n\n Each element in the Tensor has the same data type, and the data type is always\n known.\n\n In eager execution, which is the default mode in TensorFlow, results are\n calculated immediately.\n\n >>> # Compute some values using a Tensor\n >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])\n >>> e = tf.matmul(c, d)\n >>> print(e)\n tf.Tensor(\n [[1. 3.]\n [3. 7.]], shape=(2, 2), dtype=float32)\n\n\n Note that during eager execution, you may discover your `Tensors` are actually\n of type `EagerTensor`. This is an internal detail, but it does give you\n access to a useful function, `numpy`:\n\n >>> type(e)\n <class '...ops.EagerTensor'>\n >>> print(e.numpy())\n [[1. 3.]\n [3. 7.]]\n\n TensorFlow can define computations without immediately executing them, most\n commonly inside `tf.function`s, as well as in (legacy) Graph mode. In those\n cases, the shape (that is, the rank of the Tensor and the size of\n each dimension) might be only partially known.\n\n Most operations produce tensors of fully-known shapes if the shapes of their\n inputs are also fully known, but in some cases it's only possible to find the\n shape of a tensor at execution time.\n\n There are specialized tensors; for these, see `tf.Variable`, `tf.constant`,\n `tf.placeholder`, `tf.SparseTensor`, and `tf.RaggedTensor`.\n\n For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor`).\n \"\"\"\n\n # List of Python operators that we allow to override.\n OVERLOADABLE_OPERATORS = {\n # Binary.\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__div__\",\n \"__rdiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__mod__\",\n \"__rmod__\",\n \"__lt__\",\n \"__le__\",\n \"__gt__\",\n \"__ge__\",\n \"__ne__\",\n \"__eq__\",\n \"__and__\",\n \"__rand__\",\n \"__or__\",\n \"__ror__\",\n \"__xor__\",\n \"__rxor__\",\n \"__getitem__\",\n \"__pow__\",\n \"__rpow__\",\n # Unary.\n \"__invert__\",\n \"__neg__\",\n \"__abs__\",\n \"__matmul__\",\n \"__rmatmul__\"\n }\n\n # Whether to allow hashing or numpy-style equality\n _USE_EQUALITY = tf2.enabled()\n\n def __init__(self, op, value_index, dtype):\n \"\"\"Creates a new `Tensor`.\n\n Args:\n op: An `Operation`. `Operation` that computes this tensor.\n value_index: An `int`. Index of the operation's endpoint that produces\n this tensor.\n dtype: A `DType`. Type of elements stored in this tensor.\n\n Raises:\n TypeError: If the op is not an `Operation`.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op needs to be an Operation: %s\" % op)\n self._op = op\n self._value_index = value_index\n self._dtype = dtypes.as_dtype(dtype)\n # This will be set by self._as_tf_output().\n self._tf_output = None\n # This will be set by self.shape().\n self._shape_val = None\n # List of operations that use this Tensor as input. We maintain this list\n # to easily navigate a computation graph.\n self._consumers = []\n self._id = uid()\n self._name = None\n\n @staticmethod\n def _create_with_tf_output(op, value_index, dtype, tf_output):\n ret = Tensor(op, value_index, dtype)\n ret._tf_output = tf_output\n return ret\n\n @property\n def op(self):\n \"\"\"The `Operation` that produces this tensor as an output.\"\"\"\n return self._op\n\n @property\n def dtype(self):\n \"\"\"The `DType` of elements in this tensor.\"\"\"\n return self._dtype\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this tensor.\"\"\"\n return self._op.graph\n\n @property\n def name(self):\n \"\"\"The string name of this tensor.\"\"\"\n if self._name is None:\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n self._name = \"%s:%d\" % (self._op.name, self._value_index)\n return self._name\n\n @property\n def device(self):\n \"\"\"The name of the device on which this tensor will be produced, or None.\"\"\"\n return self._op.device\n\n @property\n def shape(self):\n \"\"\"Returns the `TensorShape` that represents the shape of this tensor.\n\n The shape is computed using shape inference functions that are\n registered in the Op for each `Operation`. See\n `tf.TensorShape`\n for more details of what a shape represents.\n\n The inferred shape of a tensor is used to provide shape\n information without having to execute the underlying kernel. This\n can be used for debugging and providing early error messages. For\n example:\n\n ```python\n >>> c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n >>> print(c.shape) # will be TensorShape([2, 3])\n (2, 3)\n\n >>> d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n >>> print(d.shape)\n (4, 2)\n\n # Raises a ValueError, because `c` and `d` do not have compatible\n # inner dimensions.\n >>> e = tf.matmul(c, d)\n Traceback (most recent call last):\n ...\n tensorflow.python.framework.errors_impl.InvalidArgumentError: Matrix\n size-incompatible: In[0]: [2,3], In[1]: [4,2] [Op:MatMul] name: MatMul/\n\n # This works because we have compatible shapes.\n >>> f = tf.matmul(c, d, transpose_a=True, transpose_b=True)\n >>> print(f.shape)\n (3, 4)\n\n ```\n\n In some cases, the inferred shape may have unknown dimensions. If\n the caller has additional information about the values of these\n dimensions, `Tensor.set_shape()` can be used to augment the\n inferred shape.\n\n Returns:\n A `tf.TensorShape` representing the shape of this tensor.\n\n \"\"\"\n if self._shape_val is None:\n self._shape_val = self._c_api_shape()\n return self._shape_val\n\n def _c_api_shape(self):\n \"\"\"Returns the TensorShape of this tensor according to the C API.\"\"\"\n c_graph = self._op._graph._c_graph # pylint: disable=protected-access\n shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(\n c_graph, self._as_tf_output())\n if unknown_shape:\n return tensor_shape.unknown_shape()\n else:\n shape_vec = [None if d == -1 else d for d in shape_vec]\n return tensor_shape.TensorShape(shape_vec)\n\n @property\n def _shape(self):\n logging.warning(\"Tensor._shape is private, use Tensor.shape \"\n \"instead. Tensor._shape will eventually be removed.\")\n return self.shape\n\n @_shape.setter\n def _shape(self, value):\n raise ValueError(\n \"Tensor._shape cannot be assigned, use Tensor.set_shape instead.\")\n\n def _disallow_when_autograph_disabled(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed: AutoGraph is disabled in this function.\"\n \" Try decorating it directly with @tf.function.\".format(task))\n\n def _disallow_when_autograph_enabled(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed: AutoGraph did not convert this function. Try\"\n \" decorating it directly with @tf.function.\".format(task))\n\n def _disallow_in_graph_mode(self, task):\n raise errors.OperatorNotAllowedInGraphError(\n \"{} is not allowed in Graph execution. Use Eager execution or decorate\"\n \" this function with @tf.function.\".format(task))\n\n def _disallow_bool_casting(self):\n if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n self._disallow_when_autograph_disabled(\n \"using a `tf.Tensor` as a Python `bool`\")\n elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:\n self._disallow_when_autograph_enabled(\n \"using a `tf.Tensor` as a Python `bool`\")\n else:\n # Default: V1-style Graph execution.\n self._disallow_in_graph_mode(\"using a `tf.Tensor` as a Python `bool`\")\n\n def _disallow_iteration(self):\n if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:\n self._disallow_when_autograph_disabled(\"iterating over `tf.Tensor`\")\n elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:\n self._disallow_when_autograph_enabled(\"iterating over `tf.Tensor`\")\n else:\n # Default: V1-style Graph execution.\n self._disallow_in_graph_mode(\"iterating over `tf.Tensor`\")\n\n def __iter__(self):\n if not context.executing_eagerly():\n self._disallow_iteration()\n\n shape = self._shape_tuple()\n if shape is None:\n raise TypeError(\"Cannot iterate over a tensor with unknown shape.\")\n if not shape:\n raise TypeError(\"Cannot iterate over a scalar tensor.\")\n if shape[0] is None:\n raise TypeError(\n \"Cannot iterate over a tensor with unknown first dimension.\")\n return _TensorIterator(self, shape[0])\n\n def _shape_as_list(self):\n if self.shape.ndims is not None:\n return [dim.value for dim in self.shape.dims]\n else:\n return None\n\n def _shape_tuple(self):\n shape = self._shape_as_list()\n if shape is None:\n return None\n return tuple(shape)\n\n def _rank(self):\n \"\"\"Integer rank of this Tensor, if known, else None.\n\n Returns:\n Integer rank or None\n \"\"\"\n return self.shape.ndims\n\n def get_shape(self):\n \"\"\"Alias of `tf.Tensor.shape`.\"\"\"\n return self.shape\n\n def set_shape(self, shape):\n \"\"\"Updates the shape of this tensor.\n\n This method can be called multiple times, and will merge the given\n `shape` with the current shape of this tensor. It can be used to\n provide additional information about the shape of this tensor that\n cannot be inferred from the graph alone. For example, this can be used\n to provide additional information about the shapes of images:\n\n ```python\n _, image_data = tf.compat.v1.TFRecordReader(...).read(...)\n image = tf.image.decode_png(image_data, channels=3)\n\n # The height and width dimensions of `image` are data dependent, and\n # cannot be computed without executing the op.\n print(image.shape)\n ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])\n\n # We know that each image in this dataset is 28 x 28 pixels.\n image.set_shape([28, 28, 3])\n print(image.shape)\n ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])\n ```\n\n NOTE: This shape is not enforced at runtime. Setting incorrect shapes can\n result in inconsistencies between the statically-known graph and the runtime\n value of tensors. For runtime validation of the shape, use `tf.ensure_shape`\n instead.\n\n Args:\n shape: A `TensorShape` representing the shape of this tensor, a\n `TensorShapeProto`, a list, a tuple, or None.\n\n Raises:\n ValueError: If `shape` is not compatible with the current shape of\n this tensor.\n \"\"\"\n # Reset cached shape.\n self._shape_val = None\n\n # We want set_shape to be reflected in the C API graph for when we run it.\n if not isinstance(shape, tensor_shape.TensorShape):\n shape = tensor_shape.TensorShape(shape)\n dim_list = []\n if shape.dims is None:\n unknown_shape = True\n else:\n unknown_shape = False\n for dim in shape.dims:\n if dim.value is None:\n dim_list.append(-1)\n else:\n dim_list.append(dim.value)\n try:\n pywrap_tf_session.TF_GraphSetTensorShape_wrapper(\n self._op._graph._c_graph, # pylint: disable=protected-access\n self._as_tf_output(),\n dim_list,\n unknown_shape)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n @property\n def value_index(self):\n \"\"\"The index of this tensor in the outputs of its `Operation`.\"\"\"\n return self._value_index\n\n def consumers(self):\n \"\"\"Returns a list of `Operation`s that consume this tensor.\n\n Returns:\n A list of `Operation`s.\n \"\"\"\n consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(\n self._as_tf_output())\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(name)\n for name in consumer_names\n ]\n # pylint: enable=protected-access\n\n def _as_node_def_input(self):\n \"\"\"Return a value to use for the NodeDef \"input\" attribute.\n\n The returned string can be used in a NodeDef \"input\" attribute\n to indicate that the NodeDef uses this Tensor as input.\n\n Raises:\n ValueError: if this Tensor's Operation does not have a name.\n\n Returns:\n a string.\n \"\"\"\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n if self._value_index == 0:\n return self._op.name\n else:\n return \"%s:%d\" % (self._op.name, self._value_index)\n\n def _as_tf_output(self):\n # pylint: disable=protected-access\n # NOTE: Beyond preventing unnecessary (re-)allocation, the cached object\n # also guarantees that a dictionary of tf_output objects will retain a\n # deterministic (yet unsorted) order which prevents memory blowup in the\n # cache of executor(s) stored for every session.\n if self._tf_output is None:\n self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)\n return self._tf_output\n # pylint: enable=protected-access\n\n def __str__(self):\n return \"Tensor(\\\"%s\\\"%s%s%s)\" % (\n self.name,\n (\", shape=%s\" %\n self.get_shape()) if self.get_shape().ndims is not None else \"\",\n (\", dtype=%s\" % self._dtype.name) if self._dtype else \"\",\n (\", device=%s\" % self.device) if self.device else \"\")\n\n def __repr__(self):\n return \"<tf.Tensor '%s' shape=%s dtype=%s>\" % (self.name, self.get_shape(),\n self._dtype.name)\n\n def __hash__(self):\n g = getattr(self, \"graph\", None)\n if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and\n (g is None or g.building_function)):\n raise TypeError(\"Tensor is unhashable. \"\n \"Instead, use tensor.ref() as the key.\")\n else:\n return id(self)\n\n def __copy__(self):\n # TODO(b/77597810): get rid of Tensor copies.\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n # NOTE(mrry): This enables the Tensor's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Tensor class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Tensors interact\n # with ndarrays.\n __array_priority__ = 100\n\n def __array__(self):\n raise NotImplementedError(\"Cannot convert a symbolic Tensor ({}) to a numpy\"\n \" array.\".format(self.name))\n\n def __len__(self):\n raise TypeError(\"len is not well defined for symbolic Tensors. ({}) \"\n \"Please call `x.shape` rather than `len(x)` for \"\n \"shape information.\".format(self.name))\n\n @staticmethod\n def _override_operator(operator, func):\n _override_helper(Tensor, operator, func)\n\n def __bool__(self):\n \"\"\"Dummy method to prevent a tensor from being used as a Python `bool`.\n\n This overload raises a `TypeError` when the user inadvertently\n treats a `Tensor` as a boolean (most commonly in an `if` or `while`\n statement), in code that was not converted by AutoGraph. For example:\n\n ```python\n if tf.constant(True): # Will raise.\n # ...\n\n if tf.constant(5) < tf.constant(7): # Will raise.\n # ...\n ```\n\n Raises:\n `TypeError`.\n \"\"\"\n self._disallow_bool_casting()\n\n def __nonzero__(self):\n \"\"\"Dummy method to prevent a tensor from being used as a Python `bool`.\n\n This is the Python 2.x counterpart to `__bool__()` above.\n\n Raises:\n `TypeError`.\n \"\"\"\n self._disallow_bool_casting()\n\n def eval(self, feed_dict=None, session=None):\n \"\"\"Evaluates this tensor in a `Session`.\n\n Note: If you are not using `compat.v1` libraries, you should not need this,\n (or `feed_dict` or `Session`). In eager execution (or within `tf.function`)\n you do not need to call `eval`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for the operation that produces this\n tensor.\n\n *N.B.* Before invoking `Tensor.eval()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values. See\n `tf.Session.run` for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to evaluate this tensor. If\n none, the default session will be used.\n\n Returns:\n A numpy array corresponding to the value of this tensor.\n \"\"\"\n return _eval_using_default_session(self, feed_dict, self.graph, session)\n\n @deprecation.deprecated(None, \"Use ref() instead.\")\n def experimental_ref(self):\n return self.ref()\n\n def ref(self):\n # tf.Variable also has the same ref() API. If you update the\n # documentation here, please update tf.Variable.ref() as well.\n \"\"\"Returns a hashable reference object to this Tensor.\n\n The primary use case for this API is to put tensors in a set/dictionary.\n We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer\n available starting Tensorflow 2.0.\n\n The following will raise an exception starting 2.0\n\n >>> x = tf.constant(5)\n >>> y = tf.constant(10)\n >>> z = tf.constant(10)\n >>> tensor_set = {x, y, z}\n Traceback (most recent call last):\n ...\n TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.\n >>> tensor_dict = {x: 'five', y: 'ten'}\n Traceback (most recent call last):\n ...\n TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.\n\n Instead, we can use `tensor.ref()`.\n\n >>> tensor_set = {x.ref(), y.ref(), z.ref()}\n >>> x.ref() in tensor_set\n True\n >>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}\n >>> tensor_dict[y.ref()]\n 'ten'\n\n Also, the reference object provides `.deref()` function that returns the\n original Tensor.\n\n >>> x = tf.constant(5)\n >>> x.ref().deref()\n <tf.Tensor: shape=(), dtype=int32, numpy=5>\n \"\"\"\n return object_identity.Reference(self)\n\n\n# TODO(agarwal): consider getting rid of this.\nclass _EagerTensorBase(Tensor):\n \"\"\"Base class for EagerTensor.\"\"\"\n\n # __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and\n # only work for scalars; values are cast as per numpy.\n def __complex__(self):\n return complex(self._numpy())\n\n def __int__(self):\n return int(self._numpy())\n\n def __long__(self):\n return long(self._numpy())\n\n def __float__(self):\n return float(self._numpy())\n\n def __index__(self):\n return self._numpy().__index__()\n\n def __bool__(self):\n return bool(self._numpy())\n\n __nonzero__ = __bool__\n\n def __format__(self, format_spec):\n return self._numpy().__format__(format_spec)\n\n def __reduce__(self):\n return convert_to_tensor, (self._numpy(),)\n\n def __copy__(self):\n # Eager Tensors are immutable so it's safe to return themselves as a copy.\n return self\n\n def __deepcopy__(self, memo):\n # Eager Tensors are immutable so it's safe to return themselves as a copy.\n del memo\n return self\n\n def __str__(self):\n return \"tf.Tensor(%s, shape=%s, dtype=%s)\" % (numpy_text(self), self.shape,\n self.dtype.name)\n\n def __repr__(self):\n return \"<tf.Tensor: shape=%s, dtype=%s, numpy=%s>\" % (\n self.shape, self.dtype.name, numpy_text(self, is_repr=True))\n\n def __len__(self):\n \"\"\"Returns the length of the first dimension in the Tensor.\"\"\"\n if not self.shape.ndims:\n raise TypeError(\"Scalar tensor has no `len()`\")\n # pylint: disable=protected-access\n try:\n return self._shape_tuple()[0]\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n\n def _numpy_internal(self):\n raise NotImplementedError()\n\n def _numpy(self):\n # pylint: disable=protected-access\n try:\n return self._numpy_internal()\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n\n @property\n def dtype(self):\n # Note: using the intern table directly here as this is\n # performance-sensitive in some models.\n return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access\n\n def numpy(self):\n \"\"\"Copy of the contents of this Tensor into a NumPy array or scalar.\n\n Unlike NumPy arrays, Tensors are immutable, so this method has to copy\n the contents to ensure safety. Use `memoryview` to get a readonly\n view of the contents without doing a copy:\n\n >>> t = tf.constant([42])\n >>> np.array(memoryview(t))\n array([42], dtype=int32)\n\n Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor\n is on GPU, it will have to be transferred to CPU first in order for\n `memoryview` to work.\n\n Returns:\n A NumPy array of the same shape and dtype or a NumPy scalar, if this\n Tensor has rank 0.\n\n Raises:\n ValueError: If the dtype of this Tensor does not have a compatible\n NumPy dtype.\n \"\"\"\n # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.\n maybe_arr = self._numpy() # pylint: disable=protected-access\n return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr\n\n @property\n def backing_device(self):\n \"\"\"Returns the name of the device holding this tensor's memory.\n\n `.backing_device` is usually the same as `.device`, which returns\n the device on which the kernel of the operation that produced this tensor\n ran. However, some operations can produce tensors on a different device\n (e.g., an operation that executes on the GPU but produces output tensors\n in host memory).\n \"\"\"\n raise NotImplementedError()\n\n def _datatype_enum(self):\n raise NotImplementedError()\n\n def _shape_tuple(self):\n \"\"\"The shape of this Tensor, as a tuple.\n\n This is more performant than tuple(shape().as_list()) as it avoids\n two list and one object creation. Marked private for now as from an API\n perspective, it would be better to have a single performant way of\n getting a shape rather than exposing shape() and shape_tuple()\n (and heaven forbid, shape_list() etc. as well!). Punting on that for now,\n but ideally one would work things out and remove the need for this method.\n\n Returns:\n tuple with the shape.\n \"\"\"\n raise NotImplementedError()\n\n def _rank(self):\n \"\"\"Integer rank of this Tensor.\n\n Unlike regular Tensors, the rank is always known for EagerTensors.\n\n This is more performant than len(self._shape_tuple())\n\n Returns:\n Integer rank\n \"\"\"\n raise NotImplementedError()\n\n def _num_elements(self):\n \"\"\"Number of elements of this Tensor.\n\n Unlike regular Tensors, the number of elements is always known for\n EagerTensors.\n\n This is more performant than tensor.shape.num_elements\n\n Returns:\n Long - num elements in the tensor\n \"\"\"\n raise NotImplementedError()\n\n def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name\n raise NotImplementedError()\n\n @staticmethod\n def _override_operator(name, func):\n setattr(_EagerTensorBase, name, func)\n\n def _copy_nograd(self, ctx=None, device_name=None):\n \"\"\"Copies tensor to dest device, but doesn't record the operation.\"\"\"\n # Creates a new tensor on the dest device.\n if ctx is None:\n ctx = context.context()\n if device_name is None:\n device_name = ctx.device_name\n # pylint: disable=protected-access\n try:\n ctx.ensure_initialized()\n new_tensor = self._copy_to_device(device_name)\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n return new_tensor\n\n def _copy(self, ctx=None, device_name=None):\n \"\"\"Copies tensor to dest device.\"\"\"\n new_tensor = self._copy_nograd(ctx, device_name)\n # Record the copy on tape and define backprop copy as well.\n if context.executing_eagerly():\n self_device = self.device\n\n def grad_fun(dresult):\n return [\n dresult._copy(device_name=self_device)\n if hasattr(dresult, \"_copy\") else dresult\n ]\n\n tape.record_operation(\"_copy\", [new_tensor], [self], grad_fun)\n return new_tensor\n # pylint: enable=protected-access\n\n @property\n def shape(self):\n if self._tensor_shape is None: # pylint: disable=access-member-before-definition\n # pylint: disable=protected-access\n try:\n # `_tensor_shape` is declared and defined in the definition of\n # `EagerTensor`, in C.\n self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())\n except core._NotOkStatusException as e:\n six.raise_from(core._status_to_exception(e.code, e.message), None)\n\n return self._tensor_shape\n\n def get_shape(self):\n \"\"\"Alias of Tensor.shape.\"\"\"\n return self.shape\n\n def _shape_as_list(self):\n \"\"\"The shape of the tensor as a list.\"\"\"\n return list(self._shape_tuple())\n\n @property\n def ndim(self):\n \"\"\"Returns the number of Tensor dimensions.\"\"\"\n return self.shape.ndims\n\n @deprecation.deprecated(None, \"Use tf.identity instead.\")\n def cpu(self):\n \"\"\"A copy of this Tensor with contents backed by host memory.\"\"\"\n return self._copy(context.context(), \"CPU:0\")\n\n @deprecation.deprecated(None, \"Use tf.identity instead.\")\n def gpu(self, gpu_index=0):\n \"\"\"A copy of this Tensor with contents backed by memory on the GPU.\n\n Arguments:\n gpu_index: Identifies which GPU to place the contents on the returned\n Tensor in.\n\n Returns:\n A GPU-memory backed Tensor object initialized with the same contents\n as this Tensor.\n \"\"\"\n return self._copy(context.context(), \"GPU:\" + str(gpu_index))\n\n def set_shape(self, shape):\n if not self.shape.is_compatible_with(shape):\n raise ValueError(\n \"Tensor's shape %s is not compatible with supplied shape %s\" %\n (self.shape, shape))\n\n # Methods not supported / implemented for Eager Tensors.\n @property\n def op(self):\n raise AttributeError(\n \"Tensor.op is meaningless when eager execution is enabled.\")\n\n @property\n def graph(self):\n raise AttributeError(\n \"Tensor.graph is meaningless when eager execution is enabled.\")\n\n @property\n def name(self):\n raise AttributeError(\n \"Tensor.name is meaningless when eager execution is enabled.\")\n\n @property\n def value_index(self):\n raise AttributeError(\n \"Tensor.value_index is meaningless when eager execution is enabled.\")\n\n def consumers(self):\n raise NotImplementedError(\n \"Tensor.consumers is meaningless when eager execution is enabled.\")\n\n def _add_consumer(self, consumer):\n raise NotImplementedError(\n \"_add_consumer not supported when eager execution is enabled.\")\n\n def _as_node_def_input(self):\n raise NotImplementedError(\n \"_as_node_def_input not supported when eager execution is enabled.\")\n\n def _as_tf_output(self):\n raise NotImplementedError(\n \"_as_tf_output not supported when eager execution is enabled.\")\n\n def eval(self, feed_dict=None, session=None):\n raise NotImplementedError(\n \"eval is not supported when eager execution is enabled, \"\n \"is .numpy() what you're looking for?\")\n\n\n# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and\n# registers it with the current module.\nEagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase)\n\n\nregister_dense_tensor_like_type(Tensor)\n\n\n@tf_export(v1=[\"convert_to_tensor\"])\ndef convert_to_tensor_v1(value,\n dtype=None,\n name=None,\n preferred_dtype=None,\n dtype_hint=None):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars. For example:\n\n ```python\n import numpy as np\n\n def my_func(arg):\n arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n return tf.matmul(arg, arg) + arg\n\n # The following calls are equivalent.\n value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n ```\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n name: Optional name to use if a new `Tensor` is created.\n preferred_dtype: Optional element type for the returned tensor, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n dtype_hint: same meaning as preferred_dtype, and overrides it.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n \"\"\"\n preferred_dtype = deprecation.deprecated_argument_lookup(\n \"dtype_hint\", dtype_hint, \"preferred_dtype\", preferred_dtype)\n return convert_to_tensor_v2(value, dtype, preferred_dtype, name)\n\n\n@tf_export(\"convert_to_tensor\", v1=[])\ndef convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars. For example:\n\n >>> def my_func(arg):\n ... arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n ... return arg\n\n >>> # The following calls are equivalent.\n >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n >>> print(value_1)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n >>> print(value_2)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n >>> print(value_3)\n tf.Tensor(\n [[1. 2.]\n [3. 4.]], shape=(2, 2), dtype=float32)\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the type\n is inferred from the type of `value`.\n dtype_hint: Optional element type for the returned tensor, used when dtype\n is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so dtype_hint can be used as a soft preference.\n If the conversion to `dtype_hint` is not possible, this argument has no\n effect.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n \"\"\"\n return convert_to_tensor(\n value=value,\n dtype=dtype,\n name=name,\n preferred_dtype=dtype_hint,\n as_ref=False)\n\n\ndef _error_prefix(name):\n return \"\" if name is None else \"%s: \" % name\n\n\ndef convert_to_tensor(value,\n dtype=None,\n name=None,\n as_ref=False,\n preferred_dtype=None,\n dtype_hint=None,\n ctx=None,\n accepted_result_types=(Tensor,)):\n \"\"\"Implementation of the public convert_to_tensor.\"\"\"\n # TODO(b/142518781): Fix all call-sites and remove redundant arg\n preferred_dtype = preferred_dtype or dtype_hint\n if isinstance(value, EagerTensor):\n if ctx is None:\n ctx = context.context()\n if not ctx.executing_eagerly():\n graph = get_default_graph()\n if not graph.building_function:\n raise RuntimeError(\"Attempting to capture an EagerTensor without \"\n \"building a function.\")\n return graph.capture(value, name=name)\n\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n if isinstance(value, Tensor):\n if dtype is not None and not dtype.is_compatible_with(value.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\" %\n (dtype.name, value.dtype.name, value))\n return value\n\n if preferred_dtype is not None:\n preferred_dtype = dtypes.as_dtype(preferred_dtype)\n for base_type, conversion_func in tensor_conversion_registry.get(type(value)):\n # If dtype is None but preferred_dtype is not None, we try to\n # cast to preferred_dtype first.\n ret = None\n if dtype is None and preferred_dtype is not None:\n try:\n ret = conversion_func(\n value, dtype=preferred_dtype, name=name, as_ref=as_ref)\n except (TypeError, ValueError):\n # Could not coerce the conversion to use the preferred dtype.\n pass\n else:\n if (ret is not NotImplemented and\n ret.dtype.base_dtype != preferred_dtype.base_dtype):\n raise TypeError(\"convert_to_tensor did not convert to \"\n \"the preferred dtype: %s vs %s \" %\n (ret.dtype.base_dtype, preferred_dtype.base_dtype))\n\n if ret is None:\n ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\n\n if ret is NotImplemented:\n continue\n\n if not isinstance(ret, accepted_result_types):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned non-Tensor: %r\" %\n (_error_prefix(name), conversion_func, base_type, ret))\n if dtype and not dtype.is_compatible_with(ret.dtype):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned incompatible \"\n \"dtype: requested = %s, actual = %s\" %\n (_error_prefix(name), conversion_func, base_type, dtype.name,\n ret.dtype.name))\n return ret\n raise TypeError(\"%sCannot convert %r with type %s to Tensor: \"\n \"no conversion function registered.\" %\n (_error_prefix(name), value, type(value)))\n\n\ninternal_convert_to_tensor = convert_to_tensor\n\n\ndef internal_convert_n_to_tensor(values,\n dtype=None,\n name=None,\n as_ref=False,\n preferred_dtype=None,\n ctx=None):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n preferred_dtype: Optional element type for the returned tensors, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n ctx: The value of context.context().\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError(\"values must be a sequence.\")\n ret = []\n if ctx is None:\n ctx = context.context()\n for i, value in enumerate(values):\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n convert_to_tensor(\n value,\n dtype=dtype,\n name=n,\n as_ref=as_ref,\n preferred_dtype=preferred_dtype,\n ctx=ctx))\n return ret\n\n\ndef convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n preferred_dtype: Optional element type for the returned tensors, used when\n dtype is None. In some cases, a caller may not have a dtype in mind when\n converting to a tensor, so preferred_dtype can be used as a soft\n preference. If the conversion to `preferred_dtype` is not possible, this\n argument has no effect.\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n return internal_convert_n_to_tensor(\n values=values,\n dtype=dtype,\n name=name,\n preferred_dtype=preferred_dtype,\n as_ref=False)\n\n\ndef convert_to_tensor_or_composite(value, dtype=None, name=None):\n \"\"\"Converts the given object to a `Tensor` or `CompositeTensor`.\n\n If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\n is converted to a `Tensor` using `convert_to_tensor()`.\n\n Args:\n value: A `CompositeTensor` or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n return internal_convert_to_tensor_or_composite(\n value=value, dtype=dtype, name=name, as_ref=False)\n\n\ndef internal_convert_to_tensor_or_composite(value,\n dtype=None,\n name=None,\n as_ref=False):\n \"\"\"Converts the given object to a `Tensor` or `CompositeTensor`.\n\n If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\n is converted to a `Tensor` using `convert_to_tensor()`.\n\n Args:\n value: A `CompositeTensor`, or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n if isinstance(value, composite_tensor.CompositeTensor):\n value_dtype = getattr(value, \"dtype\", None)\n if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\" %\n (dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))\n return value\n else:\n return convert_to_tensor(\n value,\n dtype=dtype,\n name=name,\n as_ref=as_ref,\n accepted_result_types=(Tensor, composite_tensor.CompositeTensor))\n\n\ndef internal_convert_n_to_tensor_or_composite(values,\n dtype=None,\n name=None,\n as_ref=False):\n \"\"\"Converts `values` to a list of `Tensor` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\n Args:\n values: A list of `None`, `CompositeTensor`, or objects that can be consumed\n by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A list of `Tensor`, `CompositeTensor`, and/or `None` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections_abc.Sequence):\n raise TypeError(\"values must be a sequence.\")\n ret = []\n for i, value in enumerate(values):\n if value is None:\n ret.append(value)\n else:\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n internal_convert_to_tensor_or_composite(\n value, dtype=dtype, name=n, as_ref=as_ref))\n return ret\n\n\ndef convert_n_to_tensor_or_composite(values, dtype=None, name=None):\n \"\"\"Converts `values` to a list of `Output` or `CompositeTensor` objects.\n\n Any `CompositeTensor` objects in `values` are returned unmodified.\n\n Args:\n values: A list of `None`, `CompositeTensor``, or objects that can be\n consumed by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`s or\n `CompositeTensor`s.\n name: (Optional.) A name prefix to used when a new `Tensor` is created, in\n which case element `i` will be given the name `name + '_' + i`.\n\n Returns:\n A list of `Tensor` and/or `CompositeTensor` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n return internal_convert_n_to_tensor_or_composite(\n values=values, dtype=dtype, name=name, as_ref=False)\n\n\ndef _device_string(dev_spec):\n if pydev.is_device_spec(dev_spec):\n return dev_spec.to_string()\n else:\n return dev_spec\n\n\ndef _NodeDef(op_type, name, attrs=None):\n \"\"\"Create a NodeDef proto.\n\n Args:\n op_type: Value for the \"op\" attribute of the NodeDef proto.\n name: Value for the \"name\" attribute of the NodeDef proto.\n attrs: Dictionary where the key is the attribute name (a string)\n and the value is the respective \"attr\" attribute of the NodeDef proto (an\n AttrValue).\n\n Returns:\n A node_def_pb2.NodeDef protocol buffer.\n \"\"\"\n node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),\n name=compat.as_bytes(name))\n if attrs:\n for k, v in six.iteritems(attrs):\n node_def.attr[k].CopyFrom(v)\n return node_def\n\n\n# Copied from core/framework/node_def_util.cc\n# TODO(mrry,josh11b): Consolidate this validation in C++ code.\n_VALID_OP_NAME_REGEX = re.compile(\"^[A-Za-z0-9.][A-Za-z0-9_.\\\\-/>]*$\")\n_VALID_SCOPE_NAME_REGEX = re.compile(\"^[A-Za-z0-9_.\\\\-/>]*$\")\n\n\ndef _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):\n \"\"\"Creates a TF_Operation.\n\n Args:\n graph: a `Graph`.\n node_def: `node_def_pb2.NodeDef` for the operation to create.\n inputs: A flattened list of `Tensor`s. This function handles grouping\n tensors into lists as per attributes in the `node_def`.\n control_inputs: A list of `Operation`s to set as control dependencies.\n op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not\n specified, is looked up from the `graph` using `node_def.op`.\n\n Returns:\n A wrapped TF_Operation*.\n \"\"\"\n if op_def is None:\n op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access\n # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.\n # Refactor so we don't have to do this here.\n inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)\n # pylint: disable=protected-access\n op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,\n compat.as_str(node_def.op),\n compat.as_str(node_def.name))\n if node_def.device:\n pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))\n # Add inputs\n for op_input in inputs:\n if isinstance(op_input, (list, tuple)):\n pywrap_tf_session.TF_AddInputList(op_desc,\n [t._as_tf_output() for t in op_input])\n else:\n pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())\n\n # Add control inputs\n for control_input in control_inputs:\n pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)\n # pylint: enable=protected-access\n\n # Add attrs\n for name, attr_value in node_def.attr.items():\n serialized = attr_value.SerializeToString()\n # TODO(skyewm): this creates and deletes a new TF_Status for every attr.\n # It might be worth creating a convenient way to re-use the same status.\n pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),\n serialized)\n\n try:\n c_op = pywrap_tf_session.TF_FinishOperation(op_desc)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n return c_op\n\n\n@tf_export(\"Operation\")\nclass Operation(object):\n \"\"\"Represents a graph node that performs computation on tensors.\n\n An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`\n objects as input, and produces zero or more `Tensor` objects as output.\n Objects of type `Operation` are created by calling a Python op constructor\n (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`\n context manager.\n\n For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an\n `Operation` of type \"MatMul\" that takes tensors `a` and `b` as input, and\n produces `c` as output.\n\n If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be\n executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for\n calling `tf.compat.v1.get_default_session().run(op)`.\n \"\"\"\n\n def __init__(self,\n node_def,\n g,\n inputs=None,\n output_types=None,\n control_inputs=None,\n input_types=None,\n original_op=None,\n op_def=None):\n r\"\"\"Creates an `Operation`.\n\n NOTE: This constructor validates the name of the `Operation` (passed\n as `node_def.name`). Valid `Operation` names match the following\n regular expression:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]*\n\n Args:\n node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for\n attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and\n `device`. The `input` attribute is irrelevant here as it will be\n computed when generating the model.\n g: `Graph`. The parent graph.\n inputs: list of `Tensor` objects. The inputs to this `Operation`.\n output_types: list of `DType` objects. List of the types of the `Tensors`\n computed by this operation. The length of this list indicates the\n number of output endpoints of the `Operation`.\n control_inputs: list of operations or tensors from which to have a control\n dependency.\n input_types: List of `DType` objects representing the types of the tensors\n accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x\n in inputs]`. Operations that expect reference-typed inputs must specify\n these explicitly.\n original_op: Optional. Used to associate the new `Operation` with an\n existing `Operation` (for example, a replica with the op that was\n replicated).\n op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type\n that this `Operation` represents.\n\n Raises:\n TypeError: if control inputs are not Operations or Tensors,\n or if `node_def` is not a `NodeDef`,\n or if `g` is not a `Graph`,\n or if `inputs` are not tensors,\n or if `inputs` and `input_types` are incompatible.\n ValueError: if the `node_def` name is not valid.\n \"\"\"\n # For internal use only: `node_def` can be set to a TF_Operation to create\n # an Operation for that op. This is useful for creating Operations for ops\n # indirectly created by C API methods, e.g. the ops created by\n # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields\n # should be None.\n\n if isinstance(node_def, node_def_pb2.NodeDef):\n if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:\n raise ValueError(\n \"Cannot create a tensor proto whose content is larger than 2GB.\")\n if not _VALID_OP_NAME_REGEX.match(node_def.name):\n raise ValueError(\"'%s' is not a valid node name\" % node_def.name)\n c_op = None\n elif type(node_def).__name__ == \"TF_Operation\":\n assert inputs is None\n assert output_types is None\n assert control_inputs is None\n assert input_types is None\n assert original_op is None\n assert op_def is None\n c_op = node_def\n else:\n raise TypeError(\"node_def needs to be a NodeDef: %s\" % node_def)\n\n if not isinstance(g, Graph):\n raise TypeError(\"g needs to be a Graph: %s\" % g)\n self._graph = g\n\n if inputs is None:\n inputs = []\n elif not isinstance(inputs, list):\n raise TypeError(\"inputs needs to be a list of Tensors: %s\" % inputs)\n for a in inputs:\n if not isinstance(a, Tensor):\n raise TypeError(\"input needs to be a Tensor: %s\" % a)\n if input_types is None:\n input_types = [i.dtype.base_dtype for i in inputs]\n else:\n if not all(\n x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):\n raise TypeError(\"In op '%s', input types (%s) are not compatible \"\n \"with expected types (%s)\" %\n (node_def.name, [i.dtype for i in inputs], input_types))\n\n # Build the list of control inputs.\n control_input_ops = []\n if control_inputs:\n for c in control_inputs:\n control_op = None\n if isinstance(c, Operation):\n control_op = c\n elif isinstance(c, (Tensor, IndexedSlices)):\n control_op = c.op\n else:\n raise TypeError(\"Control input must be an Operation, \"\n \"a Tensor, or IndexedSlices: %s\" % c)\n control_input_ops.append(control_op)\n\n # This will be set by self.inputs.\n self._inputs_val = None\n\n # pylint: disable=protected-access\n self._original_op = original_op\n self._traceback = tf_stack.extract_stack()\n\n # List of _UserDevSpecs holding code location of device context manager\n # invocations and the users original argument to them.\n self._device_code_locations = None\n # Dict mapping op name to file and line information for op colocation\n # context managers.\n self._colocation_code_locations = None\n self._control_flow_context = self.graph._get_control_flow_context()\n\n # Gradient function for this op. There are three ways to specify gradient\n # function, and first available gradient gets used, in the following order.\n # 1. self._gradient_function\n # 2. Gradient name registered by \"_gradient_op_type\" attribute.\n # 3. Gradient name registered by op.type.\n self._gradient_function = None\n\n # Initialize self._c_op.\n if c_op:\n self._c_op = c_op\n op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))\n name = self.name\n else:\n if op_def is None:\n op_def = self._graph._get_op_def(node_def.op)\n self._c_op = _create_c_op(self._graph, node_def, inputs,\n control_input_ops, op_def)\n name = compat.as_str(node_def.name)\n # pylint: enable=protected-access\n\n self._is_stateful = op_def.is_stateful\n\n # Initialize self._outputs.\n num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n self._outputs = []\n for i in range(num_outputs):\n tf_output = c_api_util.tf_output(self._c_op, i)\n output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)\n tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access\n self._outputs.append(tensor)\n\n self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access\n\n if not c_op:\n self._control_flow_post_processing(input_tensors=inputs)\n\n def _control_flow_post_processing(self, input_tensors=None):\n \"\"\"Add this op to its control flow context.\n\n This may add new ops and change this op's inputs. self.inputs must be\n available before calling this method.\n\n Args:\n input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs\n of this op, which should be equivalent to `self.inputs`. Pass this\n argument to avoid evaluating `self.inputs` unnecessarily.\n \"\"\"\n if input_tensors is None:\n input_tensors = self.inputs\n for input_tensor in input_tensors:\n control_flow_util.CheckInputFromValidContext(self, input_tensor.op)\n if self._control_flow_context is not None:\n self._control_flow_context.AddOp(self)\n\n def colocation_groups(self):\n \"\"\"Returns the list of colocation groups of the op.\"\"\"\n default_colocation_group = [compat.as_bytes(\"loc:@%s\" % self.name)]\n try:\n class_attr = self.get_attr(\"_class\")\n except ValueError:\n # This op has no explicit colocation group, so it is itself its\n # own root of a colocation group.\n return default_colocation_group\n\n attr_groups = [\n class_name for class_name in class_attr\n if class_name.startswith(b\"loc:@\")\n ]\n\n # If there are no colocation groups in the explicit _class field,\n # return the default colocation group.\n return attr_groups if attr_groups else default_colocation_group\n\n def values(self):\n \"\"\"DEPRECATED: Use outputs.\"\"\"\n return tuple(self.outputs)\n\n def _get_control_flow_context(self):\n \"\"\"Returns the control flow context of this op.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, ctx):\n \"\"\"Sets the current control flow context of this op.\n\n Args:\n ctx: a context object.\n \"\"\"\n self._control_flow_context = ctx\n\n @property\n def name(self):\n \"\"\"The full name of this operation.\"\"\"\n return pywrap_tf_session.TF_OperationName(self._c_op)\n\n @property\n def _id(self):\n \"\"\"The unique integer id of this operation.\"\"\"\n return self._id_value\n\n @property\n def device(self):\n \"\"\"The name of the device to which this op has been assigned, if any.\n\n Returns:\n The string name of the device to which this op has been\n assigned, or an empty string if it has not been assigned to a\n device.\n \"\"\"\n return pywrap_tf_session.TF_OperationDevice(self._c_op)\n\n @property\n def _device_assignments(self):\n \"\"\"Code locations for device context managers active at op creation.\n\n This property will return a list of traceable_stack.TraceableObject\n instances where .obj is a string representing the assigned device\n (or information about the function that would be applied to this op\n to compute the desired device) and the filename and lineno members\n record the location of the relevant device context manager.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 15: with tf.device('/gpu:0'):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the device context manager\n would have these member values:\n\n t_obj.obj -> '/gpu:0'\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._device_assignments would return the list [t_obj].\n\n Returns:\n [str: traceable_stack.TraceableObject, ...] as per this method's\n description, above.\n \"\"\"\n return self._device_code_locations or []\n\n @property\n def _colocation_dict(self):\n \"\"\"Code locations for colocation context managers active at op creation.\n\n This property will return a dictionary for which the keys are nodes with\n which this Operation is colocated, and for which the values are\n traceable_stack.TraceableObject instances. The TraceableObject instances\n record the location of the relevant colocation context manager but have the\n \"obj\" field set to None to prevent leaking private data.\n\n For example, suppose file_a contained these lines:\n\n file_a.py:\n 14: node_a = tf.constant(3, name='NODE_A')\n 15: with tf.compat.v1.colocate_with(node_a):\n 16: node_b = tf.constant(4, name='NODE_B')\n\n Then a TraceableObject t_obj representing the colocation context manager\n would have these member values:\n\n t_obj.obj -> None\n t_obj.filename = 'file_a.py'\n t_obj.lineno = 15\n\n and node_b.op._colocation_dict would return the dictionary\n\n { 'NODE_A': t_obj }\n\n Returns:\n {str: traceable_stack.TraceableObject} as per this method's description,\n above.\n \"\"\"\n locations_dict = self._colocation_code_locations or {}\n return locations_dict.copy()\n\n @property\n def _output_types(self):\n \"\"\"List this operation's output types.\n\n Returns:\n List of the types of the Tensors computed by this operation.\n Each element in the list is an integer whose value is one of\n the TF_DataType enums defined in pywrap_tf_session.h\n The length of this list indicates the number of output endpoints\n of the operation.\n \"\"\"\n num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)\n output_types = [\n int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))\n for i in xrange(num_outputs)\n ]\n\n return output_types\n\n def _tf_output(self, output_idx):\n \"\"\"Create and return a new TF_Output for output_idx'th output of this op.\"\"\"\n tf_output = pywrap_tf_session.TF_Output()\n tf_output.oper = self._c_op\n tf_output.index = output_idx\n return tf_output\n\n def _tf_input(self, input_idx):\n \"\"\"Create and return a new TF_Input for input_idx'th input of this op.\"\"\"\n tf_input = pywrap_tf_session.TF_Input()\n tf_input.oper = self._c_op\n tf_input.index = input_idx\n return tf_input\n\n def _set_device(self, device): # pylint: disable=redefined-outer-name\n \"\"\"Set the device of this operation.\n\n Args:\n device: string or device.. The device to set.\n \"\"\"\n self._set_device_from_string(compat.as_str(_device_string(device)))\n\n def _set_device_from_string(self, device_str):\n \"\"\"Fast path to set device if the type is known to be a string.\n\n This function is called frequently enough during graph construction that\n there are non-trivial performance gains if the caller can guarantee that\n the specified device is already a string.\n\n Args:\n device_str: A string specifying where to place this op.\n \"\"\"\n pywrap_tf_session.SetRequestedDevice(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n device_str)\n\n def _update_input(self, index, tensor):\n \"\"\"Update the input to this operation at the given index.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n index: the index of the input to update.\n tensor: the Tensor to be used as the input at the given index.\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n\n # Reset cached inputs.\n self._inputs_val = None\n pywrap_tf_session.UpdateEdge(\n self._graph._c_graph, # pylint: disable=protected-access\n tensor._as_tf_output(), # pylint: disable=protected-access\n self._tf_input(index))\n\n def _add_while_inputs(self, tensors):\n \"\"\"See AddWhileInputHack in python_api.h.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n tensors: list of Tensors\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n for tensor in tensors:\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n\n # Reset cached inputs.\n self._inputs_val = None\n pywrap_tf_session.AddWhileInputHack(\n self._graph._c_graph, # pylint: disable=protected-access\n tensor._as_tf_output(), # pylint: disable=protected-access\n self._c_op)\n\n def _add_control_inputs(self, ops):\n \"\"\"Add a list of new control inputs to this operation.\n\n Args:\n ops: the list of Operations to add as control input.\n\n Raises:\n TypeError: if ops is not a list of Operations.\n ValueError: if any op in ops is from a different graph.\n \"\"\"\n for op in ops:\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n pywrap_tf_session.AddControlInput(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n op._c_op) # pylint: disable=protected-access\n\n def _add_control_input(self, op):\n \"\"\"Add a new control input to this operation.\n\n Args:\n op: the Operation to add as control input.\n\n Raises:\n TypeError: if op is not an Operation.\n ValueError: if op is from a different graph.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n pywrap_tf_session.AddControlInput(\n self._graph._c_graph, # pylint: disable=protected-access\n self._c_op, # pylint: disable=protected-access\n op._c_op) # pylint: disable=protected-access\n\n def _remove_all_control_inputs(self):\n \"\"\"Removes any control inputs to this operation.\"\"\"\n pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access\n\n def _add_outputs(self, types, shapes):\n \"\"\"Adds new Tensors to self.outputs.\n\n Note: this is generally unsafe to use. This is used in certain situations in\n conjunction with _set_type_list_attr.\n\n Arguments:\n types: list of DTypes\n shapes: list of TensorShapes\n \"\"\"\n assert len(types) == len(shapes)\n orig_num_outputs = len(self.outputs)\n for i in range(len(types)):\n t = Tensor(self, orig_num_outputs + i, types[i])\n self._outputs.append(t)\n t.set_shape(shapes[i])\n\n def __str__(self):\n return str(self.node_def)\n\n def __repr__(self):\n return \"<tf.Operation '%s' type=%s>\" % (self.name, self.type)\n\n @property\n def outputs(self):\n \"\"\"The list of `Tensor` objects representing the outputs of this op.\"\"\"\n return self._outputs\n\n @property\n def inputs(self):\n \"\"\"The sequence of `Tensor` objects representing the data inputs of this op.\"\"\"\n if self._inputs_val is None:\n # pylint: disable=protected-access\n self._inputs_val = tuple(\n map(self.graph._get_tensor_by_tf_output,\n pywrap_tf_session.GetOperationInputs(self._c_op)))\n # pylint: enable=protected-access\n return self._inputs_val\n\n @property\n def _input_types(self):\n num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)\n input_types = [\n dtypes.as_dtype(\n pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))\n for i in xrange(num_inputs)\n ]\n return input_types\n\n @property\n def control_inputs(self):\n \"\"\"The `Operation` objects on which this op has a control dependency.\n\n Before this op is executed, TensorFlow will ensure that the\n operations in `self.control_inputs` have finished executing. This\n mechanism can be used to run ops sequentially for performance\n reasons, or to ensure that the side effects of an op are observed\n in the correct order.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(\n self._c_op)\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(\n pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops\n ]\n # pylint: enable=protected-access\n\n @property\n def _control_outputs(self):\n \"\"\"The `Operation` objects which have a control dependency on this op.\n\n Before any of the ops in self._control_outputs can execute tensorflow will\n ensure self has finished executing.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(\n self._c_op)\n # pylint: disable=protected-access\n return [\n self.graph._get_operation_by_name_unsafe(\n pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops\n ]\n # pylint: enable=protected-access\n\n @property\n def type(self):\n \"\"\"The type of the op (e.g. `\"MatMul\"`).\"\"\"\n return pywrap_tf_session.TF_OperationOpType(self._c_op)\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this operation.\"\"\"\n return self._graph\n\n @property\n def node_def(self):\n # pylint: disable=line-too-long\n \"\"\"Returns the `NodeDef` representation of this operation.\n\n Returns:\n A\n [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)\n protocol buffer.\n \"\"\"\n # pylint: enable=line-too-long\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n node_def = node_def_pb2.NodeDef()\n node_def.ParseFromString(compat.as_bytes(data))\n return node_def\n\n @property\n def op_def(self):\n # pylint: disable=line-too-long\n \"\"\"Returns the `OpDef` proto that represents the type of this op.\n\n Returns:\n An\n [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)\n protocol buffer.\n \"\"\"\n # pylint: enable=line-too-long\n return self._graph._get_op_def(self.type)\n\n @property\n def traceback(self):\n \"\"\"Returns the call stack from when this operation was constructed.\"\"\"\n return self._traceback\n\n def _set_attr(self, attr_name, attr_value):\n \"\"\"Private method used to set an attribute in the node_def.\"\"\"\n buf = pywrap_tf_session.TF_NewBufferFromString(\n compat.as_bytes(attr_value.SerializeToString()))\n try:\n self._set_attr_with_buf(attr_name, buf)\n finally:\n pywrap_tf_session.TF_DeleteBuffer(buf)\n\n def _set_attr_with_buf(self, attr_name, attr_buf):\n \"\"\"Set an attr in the node_def with a pre-allocated buffer.\"\"\"\n # pylint: disable=protected-access\n pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,\n attr_buf)\n # pylint: enable=protected-access\n\n def _set_func_attr(self, attr_name, func_name):\n \"\"\"Private method used to set a function attribute in the node_def.\"\"\"\n func = attr_value_pb2.NameAttrList(name=func_name)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))\n\n def _set_func_list_attr(self, attr_name, func_names):\n \"\"\"Private method used to set a list(function) attribute in the node_def.\"\"\"\n funcs = [attr_value_pb2.NameAttrList(name=func_name)\n for func_name in func_names]\n funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))\n\n def _set_type_list_attr(self, attr_name, types):\n \"\"\"Private method used to set a list(type) attribute in the node_def.\"\"\"\n if not types:\n return\n if isinstance(types[0], dtypes.DType):\n types = [dt.as_datatype_enum for dt in types]\n types_list = attr_value_pb2.AttrValue.ListValue(type=types)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))\n\n def _set_shape_list_attr(self, attr_name, shapes):\n \"\"\"Private method used to set a list(shape) attribute in the node_def.\"\"\"\n shapes = [s.as_proto() for s in shapes]\n shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)\n self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))\n\n def _clear_attr(self, attr_name):\n \"\"\"Private method used to clear an attribute in the node_def.\"\"\"\n # pylint: disable=protected-access\n pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)\n # pylint: enable=protected-access\n\n def get_attr(self, name):\n \"\"\"Returns the value of the attr of this op with the given `name`.\n\n Args:\n name: The name of the attr to fetch.\n\n Returns:\n The value of the attr, as a Python object.\n\n Raises:\n ValueError: If this op does not have an attr with the given `name`.\n \"\"\"\n fields = (\"s\", \"i\", \"f\", \"b\", \"type\", \"shape\", \"tensor\", \"func\")\n try:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n x = attr_value_pb2.AttrValue()\n x.ParseFromString(data)\n\n oneof_value = x.WhichOneof(\"value\")\n if oneof_value is None:\n return []\n if oneof_value == \"list\":\n for f in fields:\n if getattr(x.list, f):\n if f == \"type\":\n return [dtypes.as_dtype(t) for t in x.list.type]\n else:\n return list(getattr(x.list, f))\n return []\n if oneof_value == \"type\":\n return dtypes.as_dtype(x.type)\n assert oneof_value in fields, \"Unsupported field type in \" + str(x)\n return getattr(x, oneof_value)\n\n def _get_attr_type(self, name):\n \"\"\"Returns the `DType` value of the attr of this op with the given `name`.\"\"\"\n try:\n dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)\n return _DTYPES_INTERN_TABLE[dtype_enum]\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n def _get_attr_bool(self, name):\n \"\"\"Returns the `bool` value of the attr of this op with the given `name`.\"\"\"\n try:\n return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n def _get_attr_int(self, name):\n \"\"\"Returns the `int` value of the attr of this op with the given `name`.\"\"\"\n try:\n return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)\n except errors.InvalidArgumentError as e:\n # Convert to ValueError for backwards compatibility.\n raise ValueError(str(e))\n\n def run(self, feed_dict=None, session=None):\n \"\"\"Runs this operation in a `Session`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for this operation.\n\n *N.B.* Before invoking `Operation.run()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values. See\n `tf.Session.run` for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to run to this operation. If\n none, the default session will be used.\n \"\"\"\n _run_using_default_session(self, feed_dict, self.graph, session)\n\n_gradient_registry = registry.Registry(\"gradient\")\n\n\n@tf_export(\"RegisterGradient\")\nclass RegisterGradient(object):\n \"\"\"A decorator for registering the gradient function for an op type.\n\n This decorator is only used when defining a new op type. For an op\n with `m` inputs and `n` outputs, the gradient function is a function\n that takes the original `Operation` and `n` `Tensor` objects\n (representing the gradients with respect to each output of the op),\n and returns `m` `Tensor` objects (representing the partial gradients\n with respect to each input of the op).\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, the\n following gradient function would be registered:\n\n ```python\n @tf.RegisterGradient(\"Sub\")\n def _sub_grad(unused_op, grad):\n return grad, tf.negative(grad)\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n \"\"\"\n\n def __init__(self, op_type):\n \"\"\"Creates a new decorator with `op_type` as the Operation type.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not string.\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n self._op_type = op_type\n\n def __call__(self, f):\n \"\"\"Registers the function `f` as gradient function for `op_type`.\"\"\"\n _gradient_registry.register(f, self._op_type)\n return f\n\n\[email protected]_endpoints(\"NotDifferentiable\", \"NoGradient\")\n@tf_export(\"no_gradient\", v1=[\"no_gradient\", \"NotDifferentiable\", \"NoGradient\"])\ndef no_gradient(op_type):\n \"\"\"Specifies that ops of type `op_type` is not differentiable.\n\n This function should *not* be used for operations that have a\n well-defined gradient that is not yet implemented.\n\n This function is only used when defining a new op type. It may be\n used for ops such as `tf.size()` that are not differentiable. For\n example:\n\n ```python\n tf.no_gradient(\"Size\")\n ```\n\n The gradient computed for 'op_type' will then propagate zeros.\n\n For ops that have a well-defined gradient but are not yet implemented,\n no declaration should be made, and an error *must* be thrown if\n an attempt to request its gradient is made.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not a string.\n\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n _gradient_registry.register(None, op_type)\n\n\n# Aliases for the old names, will be eventually removed.\nNoGradient = no_gradient\nNotDifferentiable = no_gradient\n\n\ndef get_gradient_function(op):\n \"\"\"Returns the function that computes gradients for \"op\".\"\"\"\n if not op.inputs:\n return None\n\n gradient_function = op._gradient_function # pylint: disable=protected-access\n if gradient_function:\n return gradient_function\n\n try:\n op_type = op.get_attr(\"_gradient_op_type\")\n except ValueError:\n op_type = op.type\n return _gradient_registry.lookup(op_type)\n\n\ndef set_shape_and_handle_data_for_outputs(_):\n \"\"\"No op. TODO(b/74620627): Remove this.\"\"\"\n pass\n\n\nclass OpStats(object):\n \"\"\"A holder for statistics about an operator.\n\n This class holds information about the resource requirements for an op,\n including the size of its weight parameters on-disk and how many FLOPS it\n requires to execute forward inference.\n\n If you define a new operation, you can create a function that will return a\n set of information about its usage of the CPU and disk space when serialized.\n The function itself takes a Graph object that's been set up so you can call\n methods like get_tensor_by_name to help calculate the results, and a NodeDef\n argument.\n\n \"\"\"\n\n def __init__(self, statistic_type, value=None):\n \"\"\"Sets up the initial placeholders for the statistics.\"\"\"\n self.statistic_type = statistic_type\n self.value = value\n\n @property\n def statistic_type(self):\n return self._statistic_type\n\n @statistic_type.setter\n def statistic_type(self, statistic_type):\n self._statistic_type = statistic_type\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n def __iadd__(self, other):\n if other.statistic_type != self.statistic_type:\n raise ValueError(\"Can't add an OpStat of type %s to one of %s.\" %\n (self.statistic_type, other.statistic_type))\n if self.value is None:\n self.value = other.value\n elif other.value is not None:\n self._value += other.value\n return self\n\n\n_stats_registry = registry.Registry(\"statistical functions\")\n\n\nclass RegisterStatistics(object):\n \"\"\"A decorator for registering the statistics function for an op type.\n\n This decorator can be defined for an op type so that it gives a\n report on the resources used by an instance of an operator, in the\n form of an OpStats object.\n\n Well-known types of statistics include these so far:\n\n - flops: When running a graph, the bulk of the computation happens doing\n numerical calculations like matrix multiplications. This type allows a node\n to return how many floating-point operations it takes to complete. The\n total number of FLOPs for a graph is a good guide to its expected latency.\n\n You can add your own statistics just by picking a new type string, registering\n functions for the ops you care about, and then calling get_stats_for_node_def.\n\n If a statistic for an op is registered multiple times, a KeyError will be\n raised.\n\n Since the statistics is counted on a per-op basis. It is not suitable for\n model parameters (capacity), which is expected to be counted only once, even\n if it is shared by multiple ops. (e.g. RNN)\n\n For example, you can define a new metric called doohickey for a Foo operation\n by placing this in your code:\n\n ```python\n @ops.RegisterStatistics(\"Foo\", \"doohickey\")\n def _calc_foo_bojangles(unused_graph, unused_node_def):\n return ops.OpStats(\"doohickey\", 20)\n ```\n\n Then in client code you can retrieve the value by making this call:\n\n ```python\n doohickey = ops.get_stats_for_node_def(graph, node_def, \"doohickey\")\n ```\n\n If the NodeDef is for an op with a registered doohickey function, you'll get\n back the calculated amount in doohickey.value, or None if it's not defined.\n\n \"\"\"\n\n def __init__(self, op_type, statistic_type):\n \"\"\"Saves the `op_type` as the `Operation` type.\"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string.\")\n if \",\" in op_type:\n raise TypeError(\"op_type must not contain a comma.\")\n self._op_type = op_type\n if not isinstance(statistic_type, six.string_types):\n raise TypeError(\"statistic_type must be a string.\")\n if \",\" in statistic_type:\n raise TypeError(\"statistic_type must not contain a comma.\")\n self._statistic_type = statistic_type\n\n def __call__(self, f):\n \"\"\"Registers \"f\" as the statistics function for \"op_type\".\"\"\"\n _stats_registry.register(f, self._op_type + \",\" + self._statistic_type)\n return f\n\n\ndef get_stats_for_node_def(graph, node, statistic_type):\n \"\"\"Looks up the node's statistics function in the registry and calls it.\n\n This function takes a Graph object and a NodeDef from a GraphDef, and if\n there's an associated statistics method, calls it and returns a result. If no\n function has been registered for the particular node type, it returns an empty\n statistics object.\n\n Args:\n graph: A Graph object that's been set up with the node's graph.\n node: A NodeDef describing the operator.\n statistic_type: A string identifying the statistic we're interested in.\n\n Returns:\n An OpStats object containing information about resource usage.\n \"\"\"\n\n try:\n stats_func = _stats_registry.lookup(node.op + \",\" + statistic_type)\n result = stats_func(graph, node)\n except LookupError:\n result = OpStats(statistic_type)\n return result\n\n\ndef name_from_scope_name(name):\n \"\"\"Returns the name of an op given the name of its scope.\n\n Args:\n name: the name of the scope.\n\n Returns:\n the name of the op (equal to scope name minus any trailing slash).\n \"\"\"\n return name[:-1] if (name and name[-1] == \"/\") else name\n\n\n_MUTATION_LOCK_GROUP = 0\n_SESSION_RUN_LOCK_GROUP = 1\n\n\n@tf_export(\"Graph\")\nclass Graph(object):\n \"\"\"A TensorFlow computation, represented as a dataflow graph.\n\n Graphs are used by `tf.function`s to represent the function's computations.\n Each graph contains a set of `tf.Operation` objects, which represent units of\n computation; and `tf.Tensor` objects, which represent the units of data that\n flow between operations.\n\n ### Using graphs directly (deprecated)\n\n A `tf.Graph` can be constructed and used directly without a `tf.function`, as\n was required in TensorFlow 1, but this is deprecated and it is recommended to\n use a `tf.function` instead. If a graph is directly used, other deprecated\n TensorFlow 1 classes are also required to execute the graph, such as a\n `tf.compat.v1.Session`.\n\n A default graph can be registered with the `tf.Graph.as_default` context\n manager. Then, operations will be added to the graph instead of being executed\n eagerly. For example:\n\n ```python\n g = tf.Graph()\n with g.as_default():\n # Define operations and tensors in `g`.\n c = tf.constant(30.0)\n assert c.graph is g\n ```\n\n `tf.compat.v1.get_default_graph()` can be used to obtain the default graph.\n\n Important note: This class *is not* thread-safe for graph construction. All\n operations should be created from a single thread, or external\n synchronization must be provided. Unless otherwise specified, all methods\n are not thread-safe.\n\n A `Graph` instance supports an arbitrary number of \"collections\"\n that are identified by name. For convenience when building a large\n graph, collections can store groups of related objects: for\n example, the `tf.Variable` uses a collection (named\n `tf.GraphKeys.GLOBAL_VARIABLES`) for\n all variables that are created during the construction of a graph. The caller\n may define additional collections by specifying a new name.\n \"\"\"\n\n def __init__(self):\n \"\"\"Creates a new, empty Graph.\"\"\"\n # Protects core state that can be returned via public accessors.\n # Thread-safety is provided on a best-effort basis to support buggy\n # programs, and is not guaranteed by the public `tf.Graph` API.\n #\n # NOTE(mrry): This does not protect the various stacks. A warning will\n # be reported if these are used from multiple threads\n self._lock = threading.RLock()\n # The group lock synchronizes Session.run calls with methods that create\n # and mutate ops (e.g. Graph.create_op()). This synchronization is\n # necessary because it's illegal to modify an operation after it's been run.\n # The group lock allows any number of threads to mutate ops at the same time\n # but if any modification is going on, all Session.run calls have to wait.\n # Similarly, if one or more Session.run calls are going on, all mutate ops\n # have to wait until all Session.run calls have finished.\n self._group_lock = lock_util.GroupLock(num_groups=2)\n self._nodes_by_id = {} # GUARDED_BY(self._lock)\n self._next_id_counter = 0 # GUARDED_BY(self._lock)\n self._nodes_by_name = {} # GUARDED_BY(self._lock)\n self._version = 0 # GUARDED_BY(self._lock)\n # Maps a name used in the graph to the next id to use for that name.\n self._names_in_use = {}\n self._stack_state_is_thread_local = False\n self._thread_local = threading.local()\n # Functions that will be applied to choose a device if none is specified.\n # In TF2.x or after switch_to_thread_local(),\n # self._thread_local._device_function_stack is used instead.\n self._graph_device_function_stack = traceable_stack.TraceableStack()\n # Default original_op applied to new ops.\n self._default_original_op = None\n # Current control flow context. It could be either CondContext or\n # WhileContext defined in ops/control_flow_ops.py\n self._control_flow_context = None\n # A new node will depend of the union of all of the nodes in the stack.\n # In TF2.x or after switch_to_thread_local(),\n # self._thread_local._control_dependencies_stack is used instead.\n self._graph_control_dependencies_stack = []\n # Arbitrary collections of objects.\n self._collections = {}\n # The graph-level random seed\n self._seed = None\n # A dictionary of attributes that should be applied to all ops.\n self._attr_scope_map = {}\n # A map from op type to the kernel label that should be used.\n self._op_to_kernel_label_map = {}\n # A map from op type to an alternative op type that should be used when\n # computing gradients.\n self._gradient_override_map = {}\n # A map from op type to a gradient function that should be used instead.\n self._gradient_function_map = {}\n # True if the graph is considered \"finalized\". In that case no\n # new operations can be added.\n self._finalized = False\n # Functions defined in the graph\n self._functions = collections.OrderedDict()\n # Default GraphDef versions\n self._graph_def_versions = versions_pb2.VersionDef(\n producer=versions.GRAPH_DEF_VERSION,\n min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)\n self._building_function = False\n # Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),\n # self._thread_local._colocation_stack is used instead.\n self._graph_colocation_stack = traceable_stack.TraceableStack()\n # Set of tensors that are dangerous to feed!\n self._unfeedable_tensors = object_identity.ObjectIdentitySet()\n # Set of operations that are dangerous to fetch!\n self._unfetchable_ops = set()\n # A map of tensor handle placeholder to tensor dtype.\n self._handle_feeders = {}\n # A map from tensor handle to its read op.\n self._handle_readers = {}\n # A map from tensor handle to its move op.\n self._handle_movers = {}\n # A map from tensor handle to its delete op.\n self._handle_deleters = {}\n # Allow optimizers and other objects to pseudo-uniquely key graphs (this key\n # will be shared when defining function graphs, for example, so optimizers\n # being called inside function definitions behave as if they were seeing the\n # actual outside graph).\n self._graph_key = \"grap-key-%d/\" % (uid(),)\n # A string with the last reduction method passed to\n # losses.compute_weighted_loss(), or None. This is required only for\n # backward compatibility with Estimator and optimizer V1 use cases.\n self._last_loss_reduction = None\n # Flag that is used to indicate whether loss has been scaled by optimizer.\n # If this flag has been set, then estimator uses it to scale losss back\n # before reporting. This is required only for backward compatibility with\n # Estimator and optimizer V1 use cases.\n self._is_loss_scaled_by_optimizer = False\n self._container = \"\"\n # Set to True if this graph is being built in an\n # AutomaticControlDependencies context.\n self._add_control_dependencies = False\n # Cache for OpDef protobufs retrieved via the C API.\n self._op_def_cache = {}\n # Cache for constant results of `broadcast_gradient_args()`. The keys are\n # tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the\n # values are tuples of reduction indices: (rx, ry).\n self._bcast_grad_args_cache = {}\n # Cache for constant results of `reduced_shape()`. The keys are pairs of\n # tuples: (input_shape_tuple, reduction_indices_tuple), and the values\n # are pairs of tuples: (output_shape_kept_dims, tile_scaling).\n self._reduced_shape_cache = {}\n\n # TODO(skyewm): fold as much of the above as possible into the C\n # implementation\n self._scoped_c_graph = c_api_util.ScopedTFGraph()\n # The C API requires all ops to have shape functions. Disable this\n # requirement (many custom ops do not have shape functions, and we don't\n # want to break these existing cases).\n pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)\n if tf2.enabled():\n self.switch_to_thread_local()\n\n # Note: this method is private because the API of tf.Graph() is public and\n # frozen, and this functionality is still not ready for public visibility.\n @tf_contextlib.contextmanager\n def _variable_creator_scope(self, creator, priority=100):\n \"\"\"Scope which defines a variable creation function.\n\n Args:\n creator: A callable taking `next_creator` and `kwargs`. See the\n `tf.variable_creator_scope` docstring.\n priority: Creators with a higher `priority` are called first. Within the\n same priority, creators are called inner-to-outer.\n\n Yields:\n `_variable_creator_scope` is a context manager with a side effect, but\n doesn't return a value.\n\n Raises:\n RuntimeError: If variable creator scopes are not properly nested.\n \"\"\"\n # This step keeps a reference to the existing stack, and it also initializes\n # self._thread_local._variable_creator_stack if it doesn't exist yet.\n old = self._variable_creator_stack\n new = list(old)\n new.append((priority, creator))\n # Sorting is stable, so we'll put higher-priority creators later in the list\n # but otherwise maintain registration order.\n new.sort(key=lambda item: item[0])\n self._thread_local._variable_creator_stack = new # pylint: disable=protected-access\n try:\n yield\n finally:\n if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access\n raise RuntimeError(\n \"Exiting variable_creator_scope without proper nesting.\")\n self._thread_local._variable_creator_stack = old # pylint: disable=protected-access\n\n # Note: this method is private because the API of tf.Graph() is public and\n # frozen, and this functionality is still not ready for public visibility.\n @property\n def _variable_creator_stack(self):\n if not hasattr(self._thread_local, \"_variable_creator_stack\"):\n self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access\n\n # This previously returned a copy of the stack instead of the stack itself,\n # to guard against accidental mutation. Consider, however, code that wants\n # to save and restore the variable creator stack:\n # def f():\n # original_stack = graph._variable_creator_stack\n # graph._variable_creator_stack = new_stack\n # ... # Some code\n # graph._variable_creator_stack = original_stack\n #\n # And lets say you have some code that calls this function with some\n # variable_creator:\n # def g():\n # with variable_scope.variable_creator_scope(creator):\n # f()\n # When exiting the variable creator scope, it would see a different stack\n # object than it expected leading to a \"Exiting variable_creator_scope\n # without proper nesting\" error.\n return self._thread_local._variable_creator_stack # pylint: disable=protected-access\n\n @_variable_creator_stack.setter\n def _variable_creator_stack(self, variable_creator_stack):\n self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access\n\n def _check_not_finalized(self):\n \"\"\"Check if the graph is finalized.\n\n Raises:\n RuntimeError: If the graph finalized.\n \"\"\"\n if self._finalized:\n raise RuntimeError(\"Graph is finalized and cannot be modified.\")\n\n def _add_op(self, op, op_name):\n \"\"\"Adds 'op' to the graph and returns the unique ID for the added Operation.\n\n Args:\n op: the Operation to add.\n op_name: the name of the Operation.\n\n Returns:\n An integer that is a unique ID for the added Operation.\n \"\"\"\n self._check_not_finalized()\n with self._lock:\n self._next_id_counter += 1\n op_id = self._next_id_counter\n self._nodes_by_id[op_id] = op\n self._nodes_by_name[op_name] = op\n self._version = max(self._version, op_id)\n return op_id\n\n @property\n def _c_graph(self):\n if self._scoped_c_graph:\n return self._scoped_c_graph.graph\n return None\n\n @property\n def version(self):\n \"\"\"Returns a version number that increases as ops are added to the graph.\n\n Note that this is unrelated to the\n `tf.Graph.graph_def_versions`.\n\n Returns:\n An integer version that increases as ops are added to the graph.\n \"\"\"\n if self._finalized:\n return self._version\n\n with self._lock:\n return self._version\n\n @property\n def graph_def_versions(self):\n # pylint: disable=line-too-long\n \"\"\"The GraphDef version information of this graph.\n\n For details on the meaning of each version, see\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).\n\n Returns:\n A `VersionDef`.\n \"\"\"\n # pylint: enable=line-too-long\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n version_def = versions_pb2.VersionDef()\n version_def.ParseFromString(compat.as_bytes(data))\n return version_def\n\n @property\n def seed(self):\n \"\"\"The graph-level random seed of this graph.\"\"\"\n return self._seed\n\n @seed.setter\n def seed(self, seed):\n self._seed = seed\n\n @property\n def finalized(self):\n \"\"\"True if this graph has been finalized.\"\"\"\n return self._finalized\n\n def finalize(self):\n \"\"\"Finalizes this graph, making it read-only.\n\n After calling `g.finalize()`, no new operations can be added to\n `g`. This method is used to ensure that no operations are added\n to a graph when it is shared between multiple threads, for example\n when using a `tf.compat.v1.train.QueueRunner`.\n \"\"\"\n self._finalized = True\n\n def _unsafe_unfinalize(self):\n \"\"\"Opposite of `finalize`.\n\n Internal interface.\n\n NOTE: Unfinalizing a graph could have negative impact on performance,\n especially in a multi-threaded environment. Unfinalizing a graph\n when it is in use by a Session may lead to undefined behavior. Ensure\n that all sessions using a graph are closed before calling this method.\n \"\"\"\n self._finalized = False\n\n def _get_control_flow_context(self):\n \"\"\"Returns the current control flow context.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, ctx):\n \"\"\"Sets the current control flow context.\n\n Args:\n ctx: a context object.\n \"\"\"\n self._control_flow_context = ctx\n\n def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):\n \"\"\"If this graph contains functions, copy them to `graph_def`.\"\"\"\n bytesize = starting_bytesize\n for f in self._functions.values():\n bytesize += f.definition.ByteSize()\n if bytesize >= (1 << 31) or bytesize < 0:\n raise ValueError(\"GraphDef cannot be larger than 2GB.\")\n graph_def.library.function.extend([f.definition])\n if f.grad_func_name:\n grad_def = function_pb2.GradientDef()\n grad_def.function_name = f.name\n grad_def.gradient_func = f.grad_func_name\n graph_def.library.gradient.extend([grad_def])\n\n def _as_graph_def(self, from_version=None, add_shapes=False):\n # pylint: disable=line-too-long\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using `tf.import_graph_def`) or used with the\n [C++ Session API](../../../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef` containing\n only the nodes that were added to this graph since its `version`\n property had the given value.\n add_shapes: If true, adds an \"_output_shapes\" list attr to each node with\n the inferred shapes of each of its outputs.\n\n Returns:\n A tuple containing a\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer, and the version of the graph to which that\n `GraphDef` corresponds.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n\n \"\"\"\n # pylint: enable=line-too-long\n with self._lock:\n with c_api_util.tf_buffer() as buf:\n pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)\n data = pywrap_tf_session.TF_GetBuffer(buf)\n graph = graph_pb2.GraphDef()\n graph.ParseFromString(compat.as_bytes(data))\n # Strip the experimental library field iff it's empty.\n if not graph.library.function:\n graph.ClearField(\"library\")\n\n if add_shapes:\n for node in graph.node:\n op = self._nodes_by_name[node.name]\n if op.outputs:\n node.attr[\"_output_shapes\"].list.shape.extend(\n [output.get_shape().as_proto() for output in op.outputs])\n for function_def in graph.library.function:\n defined_function = self._functions[function_def.signature.name]\n try:\n func_graph = defined_function.graph\n except AttributeError:\n # _DefinedFunction doesn't have a graph, _EagerDefinedFunction\n # does. Both rely on ops.py, so we can't really isinstance check\n # them.\n continue\n input_shapes = function_def.attr[\"_input_shapes\"]\n try:\n func_graph_inputs = func_graph.inputs\n except AttributeError:\n continue\n # TODO(b/141471245): Fix the inconsistency when inputs of func graph\n # are appended during gradient computation of while/cond.\n for input_tensor, _ in zip(func_graph_inputs,\n function_def.signature.input_arg):\n if input_tensor.dtype == dtypes.resource:\n # TODO(allenl): Save and restore handle data, then save the\n # resource placeholder's shape. Right now some shape functions get\n # confused if we set the shape of the resource placeholder (to a\n # scalar of course) and there isn't any handle data.\n input_shapes.list.shape.add().CopyFrom(\n tensor_shape.TensorShape(None).as_proto())\n else:\n input_shapes.list.shape.add().CopyFrom(\n input_tensor.get_shape().as_proto())\n for node in function_def.node_def:\n try:\n op = func_graph.get_operation_by_name(node.name)\n except KeyError:\n continue\n outputs = op.outputs\n\n if op.type == \"StatefulPartitionedCall\":\n # Filter out any extra outputs (possibly added by function\n # backpropagation rewriting).\n num_outputs = len(node.attr[\"Tout\"].list.type)\n outputs = outputs[:num_outputs]\n\n node.attr[\"_output_shapes\"].list.shape.extend(\n [output.get_shape().as_proto() for output in outputs])\n\n return graph, self._version\n\n def as_graph_def(self, from_version=None, add_shapes=False):\n # pylint: disable=line-too-long\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using `tf.import_graph_def`) or used with the\n [C++ Session API](../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef` containing\n only the nodes that were added to this graph since its `version`\n property had the given value.\n add_shapes: If true, adds an \"_output_shapes\" list attr to each node with\n the inferred shapes of each of its outputs.\n\n Returns:\n A\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n \"\"\"\n # pylint: enable=line-too-long\n result, _ = self._as_graph_def(from_version, add_shapes)\n return result\n\n def _is_function(self, name):\n \"\"\"Tests whether 'name' is registered in this graph's function library.\n\n Args:\n name: string op name.\n\n Returns:\n bool indicating whether or not 'name' is registered in function library.\n \"\"\"\n return compat.as_str(name) in self._functions\n\n def _get_function(self, name):\n \"\"\"Returns the function definition for 'name'.\n\n Args:\n name: string function name.\n\n Returns:\n The function def proto.\n \"\"\"\n return self._functions.get(compat.as_str(name), None)\n\n def _add_function(self, function):\n \"\"\"Adds a function to the graph.\n\n After the function has been added, you can call to the function by\n passing the function name in place of an op name to\n `Graph.create_op()`.\n\n Args:\n function: A `_DefinedFunction` object.\n\n Raises:\n ValueError: if another function is defined with the same name.\n \"\"\"\n name = function.name\n # Sanity checks on gradient definition.\n if (function.grad_func_name is not None) and (function.python_grad_func is\n not None):\n raise ValueError(\"Gradient defined twice for function %s\" % name)\n\n # Add function to graph\n # pylint: disable=protected-access\n gradient = (\n function._grad_func._c_func.func if function._grad_func else None)\n pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,\n gradient)\n # pylint: enable=protected-access\n\n self._functions[compat.as_str(name)] = function\n\n # Need a new-enough consumer to support the functions we add to the graph.\n if self._graph_def_versions.min_consumer < 12:\n self._graph_def_versions.min_consumer = 12\n\n @property\n def building_function(self):\n \"\"\"Returns True iff this graph represents a function.\"\"\"\n return self._building_function\n\n # Helper functions to create operations.\n @deprecated_args(None,\n \"Shapes are always computed; don't use the compute_shapes \"\n \"as it has no effect.\", \"compute_shapes\")\n def create_op(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n \"\"\"Creates an `Operation` in this graph.\n\n This is a low-level interface for creating an `Operation`. Most\n programs will not call this method directly, and instead use the\n Python op constructors, such as `tf.constant()`, which add ops to\n the default graph.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always\n computed).\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Raises:\n TypeError: if any of the inputs is not a `Tensor`.\n ValueError: if colocation conflicts with existing device assignment.\n\n Returns:\n An `Operation` object.\n \"\"\"\n del compute_shapes\n for idx, a in enumerate(inputs):\n if not isinstance(a, Tensor):\n raise TypeError(\"Input #%d is not a tensor: %s\" % (idx, a))\n return self._create_op_internal(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n def _create_op_internal(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n \"\"\"Creates an `Operation` in this graph.\n\n Implements `Graph.create_op()` without the overhead of the deprecation\n wrapper.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Raises:\n ValueError: if colocation conflicts with existing device assignment.\n\n Returns:\n An `Operation` object.\n \"\"\"\n self._check_not_finalized()\n if name is None:\n name = op_type\n # If a names ends with a '/' it is a \"name scope\" and we use it as-is,\n # after removing the trailing '/'.\n if name and name[-1] == \"/\":\n name = name_from_scope_name(name)\n else:\n name = self.unique_name(name)\n\n node_def = _NodeDef(op_type, name, attrs)\n\n input_ops = set(t.op for t in inputs)\n control_inputs = self._control_dependencies_for_inputs(input_ops)\n # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a\n # Session.run call cannot occur between creating and mutating the op.\n with self._mutation_lock():\n ret = Operation(\n node_def,\n self,\n inputs=inputs,\n output_types=dtypes,\n control_inputs=control_inputs,\n input_types=input_types,\n original_op=self._default_original_op,\n op_def=op_def)\n self._create_op_helper(ret, compute_device=compute_device)\n return ret\n\n def _create_op_from_tf_operation(self, c_op, compute_device=True):\n \"\"\"Creates an `Operation` in this graph from the supplied TF_Operation.\n\n This method is like create_op() except the new Operation is constructed\n using `c_op`. The returned Operation will have `c_op` as its _c_op\n field. This is used to create Operation objects around TF_Operations created\n indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).\n\n This function does not call Operation._control_flow_post_processing or\n Graph._control_dependencies_for_inputs (since the inputs may not be\n available yet). The caller is responsible for calling these methods.\n\n Args:\n c_op: a wrapped TF_Operation\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n self._check_not_finalized()\n ret = Operation(c_op, self)\n # If a name_scope was created with ret.name but no nodes were created in it,\n # the name will still appear in _names_in_use even though the name hasn't\n # been used. This is ok, just leave _names_in_use as-is in this case.\n # TODO(skyewm): make the C API guarantee no name conflicts.\n name_key = ret.name.lower()\n if name_key not in self._names_in_use:\n self._names_in_use[name_key] = 1\n self._create_op_helper(ret, compute_device=compute_device)\n return ret\n\n def _create_op_helper(self, op, compute_device=True):\n \"\"\"Common logic for creating an op in this graph.\"\"\"\n # Apply any additional attributes requested. Do not overwrite any existing\n # attributes.\n for key, value in self._attr_scope_map.items():\n try:\n op.get_attr(key)\n except ValueError:\n if callable(value):\n value = value(op.node_def)\n if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):\n raise TypeError(\n \"Callable for scope map key '%s' must return either None or \"\n \"an AttrValue protocol buffer; but it returned: %s\" %\n (key, value))\n if value:\n op._set_attr(key, value) # pylint: disable=protected-access\n\n # Apply a kernel label if one has been specified for this op type.\n try:\n kernel_label = self._op_to_kernel_label_map[op.type]\n op._set_attr(\"_kernel\", # pylint: disable=protected-access\n attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))\n except KeyError:\n pass\n\n op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access\n\n # Apply the overriding op type for gradients if one has been specified for\n # this op type.\n try:\n mapped_op_type = self._gradient_override_map[op.type]\n op._set_attr(\"_gradient_op_type\", # pylint: disable=protected-access\n attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))\n except KeyError:\n pass\n\n self._record_op_seen_by_control_dependencies(op)\n\n if compute_device:\n self._apply_device_functions(op)\n\n # Snapshot the colocation stack metadata before we might generate error\n # messages using it. Note that this snapshot depends on the actual stack\n # and is independent of the op's _class attribute.\n # pylint: disable=protected-access\n op._colocation_code_locations = self._snapshot_colocation_stack_metadata()\n # pylint: enable=protected-access\n\n if self._colocation_stack:\n all_colocation_groups = []\n for colocation_op in self._colocation_stack.peek_objs():\n all_colocation_groups.extend(colocation_op.colocation_groups())\n if colocation_op.device:\n # pylint: disable=protected-access\n op._set_device(colocation_op.device)\n # pylint: enable=protected-access\n\n all_colocation_groups = sorted(set(all_colocation_groups))\n # pylint: disable=protected-access\n op._set_attr(\n \"_class\",\n attr_value_pb2.AttrValue(\n list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))\n # pylint: enable=protected-access\n\n # Sets \"container\" attribute if\n # (1) self._container is not None\n # (2) \"is_stateful\" is set in OpDef\n # (3) \"container\" attribute is in OpDef\n # (4) \"container\" attribute is None\n if self._container and op._is_stateful: # pylint: disable=protected-access\n try:\n container_attr = op.get_attr(\"container\")\n except ValueError:\n # \"container\" attribute is not in OpDef\n pass\n else:\n if not container_attr:\n op._set_attr(\"container\", attr_value_pb2.AttrValue( # pylint: disable=protected-access\n s=compat.as_bytes(self._container)))\n\n def _add_new_tf_operations(self, compute_devices=True):\n \"\"\"Creates `Operations` in this graph for any new TF_Operations.\n\n This is useful for when TF_Operations are indirectly created by the C API\n outside of the Operation constructor (e.g. by TF_ImportGraphDef,\n TF_FinishWhile). This ensures there are corresponding Operations for all\n TF_Operations in the underlying TF_Graph.\n\n Args:\n compute_devices: (Optional.) If True, device functions will be executed to\n compute the device properties of each new Operation.\n\n Returns:\n A list of the new `Operation` objects.\n \"\"\"\n # Create all Operation objects before accessing their inputs since an op may\n # be created before its inputs.\n new_ops = [\n self._create_op_from_tf_operation(c_op, compute_device=compute_devices)\n for c_op in c_api_util.new_tf_operations(self)\n ]\n\n # pylint: disable=protected-access\n for op in new_ops:\n new_control_inputs = self._control_dependencies_for_inputs(op.inputs)\n op._add_control_inputs(new_control_inputs)\n op._control_flow_post_processing()\n # pylint: enable=protected-access\n\n return new_ops\n\n def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):\n \"\"\"Returns the object referred to by `obj`, as an `Operation` or `Tensor`.\n\n This function validates that `obj` represents an element of this\n graph, and gives an informative error message if it is not.\n\n This function is the canonical way to get/validate an object of\n one of the allowed types from an external argument reference in the\n Session API.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can\n also be any object with an `_as_graph_element()` method that returns a\n value of one of these types. Note: `_as_graph_element` will be called\n inside the graph's lock and so may not modify the graph.\n allow_tensor: If true, `obj` may refer to a `Tensor`.\n allow_operation: If true, `obj` may refer to an `Operation`.\n\n Returns:\n The `Tensor` or `Operation` in the Graph corresponding to `obj`.\n\n Raises:\n TypeError: If `obj` is not a type we support attempting to convert\n to types.\n ValueError: If `obj` is of an appropriate type but invalid. For\n example, an invalid string.\n KeyError: If `obj` is not an object in the graph.\n \"\"\"\n if self._finalized:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n\n with self._lock:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n\n def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):\n \"\"\"See `Graph.as_graph_element()` for details.\"\"\"\n # The vast majority of this function is figuring\n # out what an API user might be doing wrong, so\n # that we can give helpful error messages.\n #\n # Ideally, it would be nice to split it up, but we\n # need context to generate nice error messages.\n\n if allow_tensor and allow_operation:\n types_str = \"Tensor or Operation\"\n elif allow_tensor:\n types_str = \"Tensor\"\n elif allow_operation:\n types_str = \"Operation\"\n else:\n raise ValueError(\"allow_tensor and allow_operation can't both be False.\")\n\n temp_obj = _as_graph_element(obj)\n if temp_obj is not None:\n obj = temp_obj\n\n # If obj appears to be a name...\n if isinstance(obj, compat.bytes_or_text_types):\n name = compat.as_str(obj)\n\n if \":\" in name and allow_tensor:\n # Looks like a Tensor name and can be a Tensor.\n try:\n op_name, out_n = name.split(\":\")\n out_n = int(out_n)\n except:\n raise ValueError(\"The name %s looks a like a Tensor name, but is \"\n \"not a valid one. Tensor names must be of the \"\n \"form \\\"<op_name>:<output_index>\\\".\" % repr(name))\n if op_name in self._nodes_by_name:\n op = self._nodes_by_name[op_name]\n else:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, does not exist in the \"\n \"graph.\" % (repr(name), repr(op_name)))\n try:\n return op.outputs[out_n]\n except:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, exists but only has \"\n \"%s outputs.\" %\n (repr(name), repr(op_name), len(op.outputs)))\n\n elif \":\" in name and not allow_tensor:\n # Looks like a Tensor name but can't be a Tensor.\n raise ValueError(\"Name %s appears to refer to a Tensor, not a %s.\" %\n (repr(name), types_str))\n\n elif \":\" not in name and allow_operation:\n # Looks like an Operation name and can be an Operation.\n if name not in self._nodes_by_name:\n raise KeyError(\"The name %s refers to an Operation not in the \"\n \"graph.\" % repr(name))\n return self._nodes_by_name[name]\n\n elif \":\" not in name and not allow_operation:\n # Looks like an Operation name but can't be an Operation.\n if name in self._nodes_by_name:\n # Yep, it's an Operation name\n err_msg = (\"The name %s refers to an Operation, not a %s.\" %\n (repr(name), types_str))\n else:\n err_msg = (\"The name %s looks like an (invalid) Operation name, \"\n \"not a %s.\" % (repr(name), types_str))\n err_msg += (\" Tensor names must be of the form \"\n \"\\\"<op_name>:<output_index>\\\".\")\n raise ValueError(err_msg)\n\n elif isinstance(obj, Tensor) and allow_tensor:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Tensor %s is not an element of this graph.\" % obj)\n return obj\n elif isinstance(obj, Operation) and allow_operation:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Operation %s is not an element of this graph.\" % obj)\n return obj\n else:\n # We give up!\n raise TypeError(\"Can not convert a %s into a %s.\" %\n (type(obj).__name__, types_str))\n\n def get_operations(self):\n \"\"\"Return the list of operations in the graph.\n\n You can modify the operations in place, but modifications\n to the list such as inserts/delete have no effect on the\n list of operations known to the graph.\n\n This method may be called concurrently from multiple threads.\n\n Returns:\n A list of Operations.\n \"\"\"\n if self._finalized:\n return list(self._nodes_by_id.values())\n\n with self._lock:\n return list(self._nodes_by_id.values())\n\n def get_operation_by_name(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if not isinstance(name, six.string_types):\n raise TypeError(\"Operation names are strings (or similar), not %s.\" %\n type(name).__name__)\n return self.as_graph_element(name, allow_tensor=False, allow_operation=True)\n\n def _get_operation_by_name_unsafe(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This is a internal unsafe version of get_operation_by_name. It skips many\n checks and does not have user friendly error messages but runs considerably\n faster. This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if self._finalized:\n return self._nodes_by_name[name]\n\n with self._lock:\n return self._nodes_by_name[name]\n\n def _get_operation_by_tf_operation(self, tf_oper):\n op_name = pywrap_tf_session.TF_OperationName(tf_oper)\n return self._get_operation_by_name_unsafe(op_name)\n\n def get_tensor_by_name(self, name):\n \"\"\"Returns the `Tensor` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Tensor` to return.\n\n Returns:\n The `Tensor` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to a tensor in this graph.\n \"\"\"\n # Names should be strings.\n if not isinstance(name, six.string_types):\n raise TypeError(\"Tensor names are strings (or similar), not %s.\" %\n type(name).__name__)\n return self.as_graph_element(name, allow_tensor=True, allow_operation=False)\n\n def _get_tensor_by_tf_output(self, tf_output):\n \"\"\"Returns the `Tensor` representing `tf_output`.\n\n Note that there is only one such `Tensor`, i.e. multiple calls to this\n function with the same TF_Output value will always return the same `Tensor`\n object.\n\n Args:\n tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).\n\n Returns:\n The `Tensor` that represents `tf_output`.\n \"\"\"\n op = self._get_operation_by_tf_operation(tf_output.oper)\n return op.outputs[tf_output.index]\n\n @property\n def _last_id(self):\n return self._next_id_counter\n\n def _get_op_def(self, type): # pylint: disable=redefined-builtin\n \"\"\"Returns the `OpDef` proto for `type`. `type` is a string.\"\"\"\n # NOTE: No locking is required because the lookup and insertion operations\n # on Python dictionaries are atomic.\n try:\n return self._op_def_cache[type]\n except KeyError:\n with c_api_util.tf_buffer() as buf:\n # pylint: disable=protected-access\n pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),\n buf)\n # pylint: enable=protected-access\n data = pywrap_tf_session.TF_GetBuffer(buf)\n op_def = op_def_pb2.OpDef()\n op_def.ParseFromString(compat.as_bytes(data))\n self._op_def_cache[type] = op_def\n return op_def\n\n def as_default(self):\n \"\"\"Returns a context manager that makes this `Graph` the default graph.\n\n This method should be used if you want to create multiple graphs\n in the same process. For convenience, a global default graph is\n provided, and all ops will be added to this graph if you do not\n create a new graph explicitly.\n\n Use this method with the `with` keyword to specify that ops created within\n the scope of a block should be added to this graph. In this case, once\n the scope of the `with` is exited, the previous default graph is set again\n as default. There is a stack, so it's ok to have multiple nested levels\n of `as_default` calls.\n\n The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n The following code examples are equivalent:\n\n ```python\n # 1. Using Graph.as_default():\n g = tf.Graph()\n with g.as_default():\n c = tf.constant(5.0)\n assert c.graph is g\n\n # 2. Constructing and making default:\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n assert c.graph is g\n ```\n\n If eager execution is enabled ops created under this context manager will be\n added to the graph instead of executed eagerly.\n\n Returns:\n A context manager for using this graph as the default graph.\n \"\"\"\n return _default_graph_stack.get_controller(self)\n\n @property\n def collections(self):\n \"\"\"Returns the names of the collections known to this graph.\"\"\"\n return list(self._collections)\n\n def add_to_collection(self, name, value):\n \"\"\"Stores `value` in the collection with the given `name`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times.\n\n Args:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collection.\n \"\"\" # pylint: disable=g-doc-exception\n self._check_not_finalized()\n with self._lock:\n if name not in self._collections:\n self._collections[name] = [value]\n else:\n self._collections[name].append(value)\n\n def add_to_collections(self, names, value):\n \"\"\"Stores `value` in the collections given by `names`.\n\n Note that collections are not sets, so it is possible to add a value to\n a collection several times. This function makes sure that duplicates in\n `names` are ignored, but it will not check for pre-existing membership of\n `value` in any of the collections in `names`.\n\n `names` can be any iterable, but if `names` is a string, it is treated as a\n single collection name.\n\n Args:\n names: The keys for the collections to add to. The `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collections.\n \"\"\"\n # Make sure names are unique, but treat strings as a single collection name\n names = (names,) if isinstance(names, six.string_types) else set(names)\n for name in names:\n self.add_to_collection(name, value)\n\n def get_collection_ref(self, name):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n If the collection exists, this returns the list itself, which can\n be modified in place to change the collection. If the collection does\n not exist, it is created as an empty list and the list is returned.\n\n This is different from `get_collection()` which always returns a copy of\n the collection list if it exists and never creates an empty collection.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n\n Returns:\n The list of values in the collection with the given `name`, or an empty\n list if no value has been added to that collection.\n \"\"\" # pylint: disable=g-doc-exception\n with self._lock:\n coll_list = self._collections.get(name, None)\n if coll_list is None:\n coll_list = []\n self._collections[name] = coll_list\n return coll_list\n\n def get_collection(self, name, scope=None):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n This is different from `get_collection_ref()` which always returns the\n actual collection list if it exists in that it returns a new list each time\n it is called.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n \"\"\" # pylint: disable=g-doc-exception\n with self._lock:\n collection = self._collections.get(name, None)\n if collection is None:\n return []\n if scope is None:\n return list(collection)\n else:\n c = []\n regex = re.compile(scope)\n for item in collection:\n try:\n if regex.match(item.name):\n c.append(item)\n except AttributeError:\n # Collection items with no name are ignored.\n pass\n return c\n\n def get_all_collection_keys(self):\n \"\"\"Returns a list of collections used in this graph.\"\"\"\n with self._lock:\n return [x for x in self._collections if isinstance(x, six.string_types)]\n\n def clear_collection(self, name):\n \"\"\"Clears all values in a collection.\n\n Args:\n name: The key for the collection. The `GraphKeys` class contains many\n standard names for collections.\n \"\"\"\n self._check_not_finalized()\n with self._lock:\n if name in self._collections:\n del self._collections[name]\n\n @tf_contextlib.contextmanager\n def _original_op(self, op):\n \"\"\"Python 'with' handler to help annotate ops with their originator.\n\n An op may have an 'original_op' property that indicates the op on which\n it was based. For example a replica op is based on the op that was\n replicated and a gradient op is based on the op that was differentiated.\n\n All ops created in the scope of this 'with' handler will have\n the given 'op' as their original op.\n\n Args:\n op: The Operation that all ops created in this scope will have as their\n original op.\n\n Yields:\n Nothing.\n \"\"\"\n old_original_op = self._default_original_op\n self._default_original_op = op\n try:\n yield\n finally:\n self._default_original_op = old_original_op\n\n @property\n def _name_stack(self):\n # This may be called from a thread where name_stack doesn't yet exist.\n if not hasattr(self._thread_local, \"_name_stack\"):\n self._thread_local._name_stack = \"\"\n return self._thread_local._name_stack\n\n @_name_stack.setter\n def _name_stack(self, name_stack):\n self._thread_local._name_stack = name_stack\n\n # pylint: disable=g-doc-return-or-yield,line-too-long\n @tf_contextlib.contextmanager\n def name_scope(self, name):\n \"\"\"Returns a context manager that creates hierarchical names for operations.\n\n A graph maintains a stack of name scopes. A `with name_scope(...):`\n statement pushes a new name onto the stack for the lifetime of the context.\n\n The `name` argument will be interpreted as follows:\n\n * A string (not ending with '/') will create a new name scope, in which\n `name` is appended to the prefix of all operations created in the\n context. If `name` has been used before, it will be made unique by\n calling `self.unique_name(name)`.\n * A scope previously captured from a `with g.name_scope(...) as\n scope:` statement will be treated as an \"absolute\" name scope, which\n makes it possible to re-enter existing scopes.\n * A value of `None` or the empty string will reset the current name scope\n to the top-level (empty) name scope.\n\n For example:\n\n ```python\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0, name=\"c\")\n assert c.op.name == \"c\"\n c_1 = tf.constant(6.0, name=\"c\")\n assert c_1.op.name == \"c_1\"\n\n # Creates a scope called \"nested\"\n with g.name_scope(\"nested\") as scope:\n nested_c = tf.constant(10.0, name=\"c\")\n assert nested_c.op.name == \"nested/c\"\n\n # Creates a nested scope called \"inner\".\n with g.name_scope(\"inner\"):\n nested_inner_c = tf.constant(20.0, name=\"c\")\n assert nested_inner_c.op.name == \"nested/inner/c\"\n\n # Create a nested scope called \"inner_1\".\n with g.name_scope(\"inner\"):\n nested_inner_1_c = tf.constant(30.0, name=\"c\")\n assert nested_inner_1_c.op.name == \"nested/inner_1/c\"\n\n # Treats `scope` as an absolute name scope, and\n # switches to the \"nested/\" scope.\n with g.name_scope(scope):\n nested_d = tf.constant(40.0, name=\"d\")\n assert nested_d.op.name == \"nested/d\"\n\n with g.name_scope(\"\"):\n e = tf.constant(50.0, name=\"e\")\n assert e.op.name == \"e\"\n ```\n\n The name of the scope itself can be captured by `with\n g.name_scope(...) as scope:`, which stores the name of the scope\n in the variable `scope`. This value can be used to name an\n operation that represents the overall result of executing the ops\n in a scope. For example:\n\n ```python\n inputs = tf.constant(...)\n with g.name_scope('my_layer') as scope:\n weights = tf.Variable(..., name=\"weights\")\n biases = tf.Variable(..., name=\"biases\")\n affine = tf.matmul(inputs, weights) + biases\n output = tf.nn.relu(affine, name=scope)\n ```\n\n NOTE: This constructor validates the given `name`. Valid scope\n names match one of the following regular expressions:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]* (for scopes at the root)\n [A-Za-z0-9_.\\\\-/]* (for other scopes)\n\n Args:\n name: A name for the scope.\n\n Returns:\n A context manager that installs `name` as a new name scope.\n\n Raises:\n ValueError: If `name` is not a valid scope name, according to the rules\n above.\n \"\"\"\n if name:\n if isinstance(name, compat.bytes_or_text_types):\n name = compat.as_str(name)\n\n if self._name_stack:\n # Scopes created in a nested scope may have initial characters\n # that are illegal as the initial character of an op name\n # (viz. '-', '\\', '/', and '_').\n if not _VALID_SCOPE_NAME_REGEX.match(name):\n raise ValueError(\"'%s' is not a valid scope name\" % name)\n else:\n # Scopes created in the root must match the more restrictive\n # op name regex, which constrains the initial character.\n if not _VALID_OP_NAME_REGEX.match(name):\n raise ValueError(\"'%s' is not a valid scope name\" % name)\n old_stack = self._name_stack\n if not name: # Both for name=None and name=\"\" we re-set to empty scope.\n new_stack = None\n elif name[-1] == \"/\":\n new_stack = name_from_scope_name(name)\n else:\n new_stack = self.unique_name(name)\n self._name_stack = new_stack\n try:\n yield \"\" if new_stack is None else new_stack + \"/\"\n finally:\n self._name_stack = old_stack\n\n # pylint: enable=g-doc-return-or-yield,line-too-long\n\n def unique_name(self, name, mark_as_used=True):\n \"\"\"Return a unique operation name for `name`.\n\n Note: You rarely need to call `unique_name()` directly. Most of\n the time you just need to create `with g.name_scope()` blocks to\n generate structured names.\n\n `unique_name` is used to generate structured names, separated by\n `\"/\"`, to help identify operations when debugging a graph.\n Operation names are displayed in error messages reported by the\n TensorFlow runtime, and in various visualization tools such as\n TensorBoard.\n\n If `mark_as_used` is set to `True`, which is the default, a new\n unique name is created and marked as in use. If it's set to `False`,\n the unique name is returned without actually being marked as used.\n This is useful when the caller simply wants to know what the name\n to be created will be.\n\n Args:\n name: The name for an operation.\n mark_as_used: Whether to mark this name as being used.\n\n Returns:\n A string to be passed to `create_op()` that will be used\n to name the operation being created.\n \"\"\"\n if self._name_stack:\n name = self._name_stack + \"/\" + name\n\n # For the sake of checking for names in use, we treat names as case\n # insensitive (e.g. foo = Foo).\n name_key = name.lower()\n i = self._names_in_use.get(name_key, 0)\n # Increment the number for \"name_key\".\n if mark_as_used:\n self._names_in_use[name_key] = i + 1\n if i > 0:\n base_name_key = name_key\n # Make sure the composed name key is not already used.\n while name_key in self._names_in_use:\n name_key = \"%s_%d\" % (base_name_key, i)\n i += 1\n # Mark the composed name_key as used in case someone wants\n # to call unique_name(\"name_1\").\n if mark_as_used:\n self._names_in_use[name_key] = 1\n\n # Return the new name with the original capitalization of the given name.\n name = \"%s_%d\" % (name, i - 1)\n return name\n\n def get_name_scope(self):\n \"\"\"Returns the current name scope.\n\n For example:\n\n ```python\n with tf.name_scope('scope1'):\n with tf.name_scope('scope2'):\n print(tf.compat.v1.get_default_graph().get_name_scope())\n ```\n would print the string `scope1/scope2`.\n\n Returns:\n A string representing the current name scope.\n \"\"\"\n return self._name_stack\n\n @tf_contextlib.contextmanager\n def _colocate_with_for_gradient(self, op, gradient_uid,\n ignore_existing=False):\n with self.colocate_with(op, ignore_existing):\n if gradient_uid is not None and self._control_flow_context is not None:\n self._control_flow_context.EnterGradientColocation(op, gradient_uid)\n try:\n yield\n finally:\n self._control_flow_context.ExitGradientColocation(op, gradient_uid)\n else:\n yield\n\n @tf_contextlib.contextmanager\n def colocate_with(self, op, ignore_existing=False):\n \"\"\"Returns a context manager that specifies an op to colocate with.\n\n Note: this function is not for public use, only for internal libraries.\n\n For example:\n\n ```python\n a = tf.Variable([1.0])\n with g.colocate_with(a):\n b = tf.constant(1.0)\n c = tf.add(a, b)\n ```\n\n `b` and `c` will always be colocated with `a`, no matter where `a`\n is eventually placed.\n\n **NOTE** Using a colocation scope resets any existing device constraints.\n\n If `op` is `None` then `ignore_existing` must be `True` and the new\n scope resets all colocation and device constraints.\n\n Args:\n op: The op to colocate all created ops with, or `None`.\n ignore_existing: If true, only applies colocation of this op within the\n context, rather than applying all colocation properties on the stack.\n If `op` is `None`, this value must be `True`.\n\n Raises:\n ValueError: if op is None but ignore_existing is False.\n\n Yields:\n A context manager that specifies the op with which to colocate\n newly created ops.\n \"\"\"\n if op is None and not ignore_existing:\n raise ValueError(\"Trying to reset colocation (op is None) but \"\n \"ignore_existing is not True\")\n op = _op_to_colocate_with(op, self)\n\n # By default, colocate_with resets the device function stack,\n # since colocate_with is typically used in specific internal\n # library functions where colocation is intended to be \"stronger\"\n # than device functions.\n #\n # In the future, a caller may specify that device_functions win\n # over colocation, in which case we can add support.\n device_fn_tmp = self._device_function_stack\n self._device_function_stack = traceable_stack.TraceableStack()\n\n if ignore_existing:\n current_stack = self._colocation_stack\n self._colocation_stack = traceable_stack.TraceableStack()\n\n if op is not None:\n # offset refers to the stack frame used for storing code location.\n # We use 4, the sum of 1 to use our caller's stack frame and 3\n # to jump over layers of context managers above us.\n self._colocation_stack.push_obj(op, offset=4)\n\n try:\n yield\n finally:\n # Restore device function stack\n self._device_function_stack = device_fn_tmp\n if op is not None:\n self._colocation_stack.pop_obj()\n\n # Reset the colocation stack if requested.\n if ignore_existing:\n self._colocation_stack = current_stack\n\n def _add_device_to_stack(self, device_name_or_function, offset=0):\n \"\"\"Add device to stack manually, separate from a context manager.\"\"\"\n total_offset = 1 + offset\n spec = _UserDeviceSpec(device_name_or_function)\n self._device_function_stack.push_obj(spec, offset=total_offset)\n return spec\n\n @tf_contextlib.contextmanager\n def device(self, device_name_or_function):\n # pylint: disable=line-too-long\n \"\"\"Returns a context manager that specifies the default device to use.\n\n The `device_name_or_function` argument may either be a device name\n string, a device function, or None:\n\n * If it is a device name string, all operations constructed in\n this context will be assigned to the device with that name, unless\n overridden by a nested `device()` context.\n * If it is a function, it will be treated as a function from\n Operation objects to device name strings, and invoked each time\n a new Operation is created. The Operation will be assigned to\n the device with the returned name.\n * If it is None, all `device()` invocations from the enclosing context\n will be ignored.\n\n For information about the valid syntax of device name strings, see\n the documentation in\n [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).\n\n For example:\n\n ```python\n with g.device('/device:GPU:0'):\n # All operations constructed in this context will be placed\n # on GPU 0.\n with g.device(None):\n # All operations constructed in this context will have no\n # assigned device.\n\n # Defines a function from `Operation` to device string.\n def matmul_on_gpu(n):\n if n.type == \"MatMul\":\n return \"/device:GPU:0\"\n else:\n return \"/cpu:0\"\n\n with g.device(matmul_on_gpu):\n # All operations of type \"MatMul\" constructed in this context\n # will be placed on GPU 0; all other operations will be placed\n # on CPU 0.\n ```\n\n **N.B.** The device scope may be overridden by op wrappers or\n other library code. For example, a variable assignment op\n `v.assign()` must be colocated with the `tf.Variable` `v`, and\n incompatible device scopes will be ignored.\n\n Args:\n device_name_or_function: The device name or function to use in the\n context.\n\n Yields:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If device scopes are not properly nested.\n \"\"\"\n self._add_device_to_stack(device_name_or_function, offset=2)\n old_top_of_stack = self._device_function_stack.peek_top_obj()\n try:\n yield\n finally:\n new_top_of_stack = self._device_function_stack.peek_top_obj()\n if old_top_of_stack is not new_top_of_stack:\n raise RuntimeError(\"Exiting device scope without proper scope nesting.\")\n self._device_function_stack.pop_obj()\n\n def _apply_device_functions(self, op):\n \"\"\"Applies the current device function stack to the given operation.\"\"\"\n # Apply any device functions in LIFO order, so that the most recently\n # pushed function has the first chance to apply a device to the op.\n # We apply here because the result can depend on the Operation's\n # signature, which is computed in the Operation constructor.\n # pylint: disable=protected-access\n prior_device_string = None\n for device_spec in self._device_function_stack.peek_objs():\n if device_spec.is_null_merge:\n continue\n\n if device_spec.function is None:\n break\n\n device_string = device_spec.string_merge(op)\n\n # Take advantage of the fact that None is a singleton and Python interns\n # strings, since identity checks are faster than equality checks.\n if device_string is not prior_device_string:\n op._set_device_from_string(device_string)\n prior_device_string = device_string\n op._device_code_locations = self._snapshot_device_function_stack_metadata()\n # pylint: enable=protected-access\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def container(self, container_name):\n \"\"\"Returns a context manager that specifies the resource container to use.\n\n Stateful operations, such as variables and queues, can maintain their\n states on devices so that they can be shared by multiple processes.\n A resource container is a string name under which these stateful\n operations are tracked. These resources can be released or cleared\n with `tf.Session.reset()`.\n\n For example:\n\n ```python\n with g.container('experiment0'):\n # All stateful Operations constructed in this context will be placed\n # in resource container \"experiment0\".\n v1 = tf.Variable([1.0])\n v2 = tf.Variable([2.0])\n with g.container(\"experiment1\"):\n # All stateful Operations constructed in this context will be\n # placed in resource container \"experiment1\".\n v3 = tf.Variable([3.0])\n q1 = tf.queue.FIFOQueue(10, tf.float32)\n # All stateful Operations constructed in this context will be\n # be created in the \"experiment0\".\n v4 = tf.Variable([4.0])\n q1 = tf.queue.FIFOQueue(20, tf.float32)\n with g.container(\"\"):\n # All stateful Operations constructed in this context will be\n # be placed in the default resource container.\n v5 = tf.Variable([5.0])\n q3 = tf.queue.FIFOQueue(30, tf.float32)\n\n # Resets container \"experiment0\", after which the state of v1, v2, v4, q1\n # will become undefined (such as uninitialized).\n tf.Session.reset(target, [\"experiment0\"])\n ```\n\n Args:\n container_name: container name string.\n\n Returns:\n A context manager for defining resource containers for stateful ops,\n yields the container name.\n \"\"\"\n original_container = self._container\n self._container = container_name\n try:\n yield self._container\n finally:\n self._container = original_container\n\n # pylint: enable=g-doc-return-or-yield\n\n class _ControlDependenciesController(object):\n \"\"\"Context manager for `control_dependencies()`.\"\"\"\n\n def __init__(self, graph, control_inputs):\n \"\"\"Create a new `_ControlDependenciesController`.\n\n A `_ControlDependenciesController` is the context manager for\n `with tf.control_dependencies()` blocks. These normally nest,\n as described in the documentation for `control_dependencies()`.\n\n The `control_inputs` argument list control dependencies that must be\n added to the current set of control dependencies. Because of\n uniquification the set can be empty even if the caller passed a list of\n ops. The special value `None` indicates that we want to start a new\n empty set of control dependencies instead of extending the current set.\n\n In that case we also clear the current control flow context, which is an\n additional mechanism to add control dependencies.\n\n Args:\n graph: The graph that this controller is managing.\n control_inputs: List of ops to use as control inputs in addition to the\n current control dependencies. None to indicate that the dependencies\n should be cleared.\n \"\"\"\n self._graph = graph\n if control_inputs is None:\n self._control_inputs_val = []\n self._new_stack = True\n else:\n self._control_inputs_val = control_inputs\n self._new_stack = False\n self._seen_nodes = set()\n self._old_stack = None\n self._old_control_flow_context = None\n\n# pylint: disable=protected-access\n\n def __enter__(self):\n if self._new_stack:\n # Clear the control_dependencies graph.\n self._old_stack = self._graph._control_dependencies_stack\n self._graph._control_dependencies_stack = []\n # Clear the control_flow_context too.\n self._old_control_flow_context = self._graph._get_control_flow_context()\n self._graph._set_control_flow_context(None)\n self._graph._push_control_dependencies_controller(self)\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n self._graph._pop_control_dependencies_controller(self)\n if self._new_stack:\n self._graph._control_dependencies_stack = self._old_stack\n self._graph._set_control_flow_context(self._old_control_flow_context)\n\n# pylint: enable=protected-access\n\n @property\n def control_inputs(self):\n return self._control_inputs_val\n\n def add_op(self, op):\n if isinstance(op, Tensor):\n op = op.ref()\n self._seen_nodes.add(op)\n\n def op_in_group(self, op):\n if isinstance(op, Tensor):\n op = op.ref()\n return op in self._seen_nodes\n\n def _push_control_dependencies_controller(self, controller):\n self._control_dependencies_stack.append(controller)\n\n def _pop_control_dependencies_controller(self, controller):\n assert self._control_dependencies_stack[-1] is controller\n self._control_dependencies_stack.pop()\n\n def _current_control_dependencies(self):\n ret = set()\n for controller in self._control_dependencies_stack:\n for op in controller.control_inputs:\n ret.add(op)\n return ret\n\n def _control_dependencies_for_inputs(self, input_ops):\n \"\"\"For an op that takes `input_ops` as inputs, compute control inputs.\n\n The returned control dependencies should yield an execution that\n is equivalent to adding all control inputs in\n self._control_dependencies_stack to a newly created op. However,\n this function attempts to prune the returned control dependencies\n by observing that nodes created within the same `with\n control_dependencies(...):` block may have data dependencies that make\n the explicit approach redundant.\n\n Args:\n input_ops: The data input ops for an op to be created.\n\n Returns:\n A list of control inputs for the op to be created.\n \"\"\"\n ret = []\n for controller in self._control_dependencies_stack:\n # If any of the input_ops already depends on the inputs from controller,\n # we say that the new op is dominated (by that input), and we therefore\n # do not need to add control dependencies for this controller's inputs.\n dominated = False\n for op in input_ops:\n if controller.op_in_group(op):\n dominated = True\n break\n if not dominated:\n # Don't add a control input if we already have a data dependency on i.\n # NOTE(mrry): We do not currently track transitive data dependencies,\n # so we may add redundant control inputs.\n ret.extend(c for c in controller.control_inputs if c not in input_ops)\n return ret\n\n def _record_op_seen_by_control_dependencies(self, op):\n \"\"\"Record that the given op depends on all registered control dependencies.\n\n Args:\n op: An Operation.\n \"\"\"\n for controller in self._control_dependencies_stack:\n controller.add_op(op)\n\n def control_dependencies(self, control_inputs):\n \"\"\"Returns a context manager that specifies control dependencies.\n\n Use with the `with` keyword to specify that all operations constructed\n within the context should have control dependencies on\n `control_inputs`. For example:\n\n ```python\n with g.control_dependencies([a, b, c]):\n # `d` and `e` will only run after `a`, `b`, and `c` have executed.\n d = ...\n e = ...\n ```\n\n Multiple calls to `control_dependencies()` can be nested, and in\n that case a new `Operation` will have control dependencies on the union\n of `control_inputs` from all active contexts.\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `a`, `b`, `c`, and `d`.\n ```\n\n You can pass None to clear the control dependencies:\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies(None):\n # Ops constructed here run normally, not waiting for either `a` or `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `c` and `d`, also not waiting\n # for either `a` or `b`.\n ```\n\n *N.B.* The control dependencies context applies *only* to ops that\n are constructed within the context. Merely using an op or tensor\n in the context does not add a control dependency. The following\n example illustrates this point:\n\n ```python\n # WRONG\n def my_func(pred, tensor):\n t = tf.matmul(tensor, tensor)\n with tf.control_dependencies([pred]):\n # The matmul op is created outside the context, so no control\n # dependency will be added.\n return t\n\n # RIGHT\n def my_func(pred, tensor):\n with tf.control_dependencies([pred]):\n # The matmul op is created in the context, so a control dependency\n # will be added.\n return tf.matmul(tensor, tensor)\n ```\n\n Also note that though execution of ops created under this scope will trigger\n execution of the dependencies, the ops created under this scope might still\n be pruned from a normal tensorflow graph. For example, in the following\n snippet of code the dependencies are never executed:\n\n ```python\n loss = model.loss()\n with tf.control_dependencies(dependencies):\n loss = loss + tf.constant(1) # note: dependencies ignored in the\n # backward pass\n return tf.gradients(loss, model.variables)\n ```\n\n This is because evaluating the gradient graph does not require evaluating\n the constant(1) op created in the forward pass.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the\n context. Can also be `None` to clear the control dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return self._ControlDependenciesController(self, None)\n # First convert the inputs to ops, and deduplicate them.\n # NOTE(mrry): Other than deduplication, we do not currently track direct\n # or indirect dependencies between control_inputs, which may result in\n # redundant control inputs.\n control_ops = []\n current = self._current_control_dependencies()\n for c in control_inputs:\n # The hasattr(handle) is designed to match ResourceVariables. This is so\n # control dependencies on a variable or on an unread variable don't\n # trigger reads.\n if (isinstance(c, IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n c = self.as_graph_element(c)\n if isinstance(c, Tensor):\n c = c.op\n elif not isinstance(c, Operation):\n raise TypeError(\"Control input must be Operation or Tensor: %s\" % c)\n if c not in current:\n control_ops.append(c)\n current.add(c)\n return self._ControlDependenciesController(self, control_ops)\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def _attr_scope(self, attr_map):\n \"\"\"EXPERIMENTAL: A context manager for setting attributes on operators.\n\n This context manager can be used to add additional\n attributes to operators within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # No extra attributes\n with g._attr_scope({\"_a\": tf.attr_value_pb2.AttrValue(b=False)}):\n f_2 = Foo() # Additional attribute _a=False\n with g._attr_scope({\"_a\": tf.attr_value_pb2.AttrValue(b=True)}):\n f_3 = Foo() # Additional attribute _a=False\n with g._attr_scope({\"_a\": None}):\n f_4 = Foo() # No additional attributes.\n\n Args:\n attr_map: A dictionary mapping attr name strings to AttrValue protocol\n buffers or None.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If attr_map is not a dictionary mapping\n strings to AttrValue protobufs.\n \"\"\"\n if not isinstance(attr_map, dict):\n raise TypeError(\"attr_map must be a dictionary mapping \"\n \"strings to AttrValue protocol buffers\")\n # The saved_attrs dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_attrs = {}\n # Install the given attribute\n for name, attr in attr_map.items():\n if not (isinstance(name, six.string_types) and\n (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or\n callable(attr))):\n raise TypeError(\"attr_map must be a dictionary mapping \"\n \"strings to AttrValue protocol buffers or \"\n \"callables that emit AttrValue protocol buffers\")\n try:\n saved_attrs[name] = self._attr_scope_map[name]\n except KeyError:\n pass\n if attr is None:\n del self._attr_scope_map[name]\n else:\n self._attr_scope_map[name] = attr\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the attributes set for this context, and restore any saved\n # attributes.\n for name, attr in attr_map.items():\n try:\n self._attr_scope_map[name] = saved_attrs[name]\n except KeyError:\n del self._attr_scope_map[name]\n\n # pylint: enable=g-doc-return-or-yield\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def _kernel_label_map(self, op_to_kernel_label_map):\n \"\"\"EXPERIMENTAL: A context manager for setting kernel labels.\n\n This context manager can be used to select particular\n implementations of kernels within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # Uses the default registered kernel for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_2\"}):\n f_2 = Foo() # Uses the registered kernel with label \"v_2\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_3\"}):\n f_3 = Foo() # Uses the registered kernel with label \"v_3\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"\"}):\n f_4 = Foo() # Uses the default registered kernel\n # for the Foo op.\n\n Args:\n op_to_kernel_label_map: A dictionary mapping op type strings to kernel\n label strings.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If op_to_kernel_label_map is not a dictionary mapping\n strings to strings.\n \"\"\"\n if not isinstance(op_to_kernel_label_map, dict):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_labels dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_labels = {}\n # Install the given label\n for op_type, label in op_to_kernel_label_map.items():\n if not (isinstance(op_type, six.string_types) and\n isinstance(label, six.string_types)):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_labels[op_type] = self._op_to_kernel_label_map[op_type]\n except KeyError:\n pass\n self._op_to_kernel_label_map[op_type] = label\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, label in op_to_kernel_label_map.items():\n try:\n self._op_to_kernel_label_map[op_type] = saved_labels[op_type]\n except KeyError:\n del self._op_to_kernel_label_map[op_type]\n\n # pylint: enable=g-doc-return-or-yield\n\n @tf_contextlib.contextmanager\n def _override_gradient_function(self, gradient_function_map):\n \"\"\"Specify gradient function for the given op type.\"\"\"\n\n # This is an internal API and we don't need nested context for this.\n assert not self._gradient_function_map\n self._gradient_function_map = gradient_function_map\n yield\n self._gradient_function_map = {}\n\n # pylint: disable=g-doc-return-or-yield\n @tf_contextlib.contextmanager\n def gradient_override_map(self, op_type_map):\n \"\"\"EXPERIMENTAL: A context manager for overriding gradient functions.\n\n This context manager can be used to override the gradient function\n that will be used for ops within the scope of the context.\n\n For example:\n\n ```python\n @tf.RegisterGradient(\"CustomSquare\")\n def _custom_square_grad(op, grad):\n # ...\n\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n s_1 = tf.square(c) # Uses the default gradient for tf.square.\n with g.gradient_override_map({\"Square\": \"CustomSquare\"}):\n s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the\n # gradient of s_2.\n ```\n\n Args:\n op_type_map: A dictionary mapping op type strings to alternative op type\n strings.\n\n Returns:\n A context manager that sets the alternative op type to be used for one\n or more ops created in that context.\n\n Raises:\n TypeError: If `op_type_map` is not a dictionary mapping strings to\n strings.\n \"\"\"\n if not isinstance(op_type_map, dict):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_mappings dictionary stores any currently-set mappings that\n # will be overridden by this context manager.\n saved_mappings = {}\n # Install the given label\n for op_type, mapped_op_type in op_type_map.items():\n if not (isinstance(op_type, six.string_types) and\n isinstance(mapped_op_type, six.string_types)):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_mappings[op_type] = self._gradient_override_map[op_type]\n except KeyError:\n pass\n self._gradient_override_map[op_type] = mapped_op_type\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, mapped_op_type in op_type_map.items():\n try:\n self._gradient_override_map[op_type] = saved_mappings[op_type]\n except KeyError:\n del self._gradient_override_map[op_type]\n\n # pylint: enable=g-doc-return-or-yield\n\n def prevent_feeding(self, tensor):\n \"\"\"Marks the given `tensor` as unfeedable in this graph.\"\"\"\n self._unfeedable_tensors.add(tensor)\n\n def is_feedable(self, tensor):\n \"\"\"Returns `True` if and only if `tensor` is feedable.\"\"\"\n return tensor not in self._unfeedable_tensors\n\n def prevent_fetching(self, op):\n \"\"\"Marks the given `op` as unfetchable in this graph.\"\"\"\n self._unfetchable_ops.add(op)\n\n def is_fetchable(self, tensor_or_op):\n \"\"\"Returns `True` if and only if `tensor_or_op` is fetchable.\"\"\"\n if isinstance(tensor_or_op, Tensor):\n return tensor_or_op.op not in self._unfetchable_ops\n else:\n return tensor_or_op not in self._unfetchable_ops\n\n def switch_to_thread_local(self):\n \"\"\"Make device, colocation and dependencies stacks thread-local.\n\n Device, colocation and dependencies stacks are not thread-local be default.\n If multiple threads access them, then the state is shared. This means that\n one thread may affect the behavior of another thread.\n\n After this method is called, the stacks become thread-local. If multiple\n threads access them, then the state is not shared. Each thread uses its own\n value; a thread doesn't affect other threads by mutating such a stack.\n\n The initial value for every thread's stack is set to the current value\n of the stack when `switch_to_thread_local()` was first called.\n \"\"\"\n if not self._stack_state_is_thread_local:\n self._stack_state_is_thread_local = True\n\n @property\n def _device_function_stack(self):\n if self._stack_state_is_thread_local:\n # This may be called from a thread where device_function_stack doesn't yet\n # exist.\n # pylint: disable=protected-access\n if not hasattr(self._thread_local, \"_device_function_stack\"):\n stack_copy_for_this_thread = self._graph_device_function_stack.copy()\n self._thread_local._device_function_stack = stack_copy_for_this_thread\n return self._thread_local._device_function_stack\n # pylint: enable=protected-access\n else:\n return self._graph_device_function_stack\n\n @property\n def _device_functions_outer_to_inner(self):\n user_device_specs = self._device_function_stack.peek_objs()\n device_functions = [spec.function for spec in user_device_specs]\n device_functions_outer_to_inner = list(reversed(device_functions))\n return device_functions_outer_to_inner\n\n def _snapshot_device_function_stack_metadata(self):\n \"\"\"Return device function stack as a list of TraceableObjects.\n\n Returns:\n [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj\n member is a displayable name for the user's argument to Graph.device, and\n the filename and lineno members point to the code location where\n Graph.device was called directly or indirectly by the user.\n \"\"\"\n snapshot = []\n for obj in self._device_function_stack.peek_traceable_objs():\n obj_copy = obj.copy_metadata()\n obj_copy.obj = obj.obj.display_name\n snapshot.append(obj_copy)\n return snapshot\n\n @_device_function_stack.setter\n def _device_function_stack(self, device_function_stack):\n if self._stack_state_is_thread_local:\n # pylint: disable=protected-access\n self._thread_local._device_function_stack = device_function_stack\n # pylint: enable=protected-access\n else:\n self._graph_device_function_stack = device_function_stack\n\n @property\n def _colocation_stack(self):\n \"\"\"Return thread-local copy of colocation stack.\"\"\"\n if self._stack_state_is_thread_local:\n # This may be called from a thread where colocation_stack doesn't yet\n # exist.\n # pylint: disable=protected-access\n if not hasattr(self._thread_local, \"_colocation_stack\"):\n stack_copy_for_this_thread = self._graph_colocation_stack.copy()\n self._thread_local._colocation_stack = stack_copy_for_this_thread\n return self._thread_local._colocation_stack\n # pylint: enable=protected-access\n else:\n return self._graph_colocation_stack\n\n def _snapshot_colocation_stack_metadata(self):\n \"\"\"Return colocation stack metadata as a dictionary.\"\"\"\n return {\n traceable_obj.obj.name: traceable_obj.copy_metadata()\n for traceable_obj in self._colocation_stack.peek_traceable_objs()\n }\n\n @_colocation_stack.setter\n def _colocation_stack(self, colocation_stack):\n if self._stack_state_is_thread_local:\n # pylint: disable=protected-access\n self._thread_local._colocation_stack = colocation_stack\n # pylint: enable=protected-access\n else:\n self._graph_colocation_stack = colocation_stack\n\n @property\n def _control_dependencies_stack(self):\n if self._stack_state_is_thread_local:\n # This may be called from a thread where control_dependencies_stack\n # doesn't yet exist.\n if not hasattr(self._thread_local, \"_control_dependencies_stack\"):\n self._thread_local._control_dependencies_stack = (\n self._graph_control_dependencies_stack[:])\n return self._thread_local._control_dependencies_stack\n else:\n return self._graph_control_dependencies_stack\n\n @_control_dependencies_stack.setter\n def _control_dependencies_stack(self, control_dependencies):\n if self._stack_state_is_thread_local:\n self._thread_local._control_dependencies_stack = control_dependencies\n else:\n self._graph_control_dependencies_stack = control_dependencies\n\n @property\n def _distribution_strategy_stack(self):\n \"\"\"A stack to maintain distribution strategy context for each thread.\"\"\"\n if not hasattr(self._thread_local, \"_distribution_strategy_stack\"):\n self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access\n return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access\n\n @_distribution_strategy_stack.setter\n def _distribution_strategy_stack(self, _distribution_strategy_stack):\n self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access\n _distribution_strategy_stack)\n\n @property\n def _global_distribute_strategy_scope(self):\n \"\"\"For implementing `tf.distribute.set_strategy()`.\"\"\"\n if not hasattr(self._thread_local, \"distribute_strategy_scope\"):\n self._thread_local.distribute_strategy_scope = None\n return self._thread_local.distribute_strategy_scope\n\n @_global_distribute_strategy_scope.setter\n def _global_distribute_strategy_scope(self, distribute_strategy_scope):\n self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)\n\n @property\n def _auto_cast_variable_read_dtype(self):\n \"\"\"The dtype that instances of `AutoCastVariable` will be casted to.\n\n This is None if `AutoCastVariables` should not be casted.\n\n See `AutoCastVariable` for more information.\n\n Returns:\n The dtype that instances of `AutoCastVariable` will be casted to.\n \"\"\"\n if not hasattr(self._thread_local, \"_auto_cast_variable_read_dtype\"):\n self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access\n return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access\n\n @_auto_cast_variable_read_dtype.setter\n def _auto_cast_variable_read_dtype(self, dtype):\n if dtype:\n dtype = dtypes.as_dtype(dtype)\n self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access\n\n @tf_contextlib.contextmanager\n def _enable_auto_casting_variables(self, dtype):\n \"\"\"Context manager to automatically cast AutoCastVariables.\n\n If an AutoCastVariable `var` is used under this context manager, it will be\n casted to `dtype` before being used.\n\n See `AutoCastVariable` for more information.\n\n Args:\n dtype: The dtype that AutoCastVariables should be casted to.\n\n Yields:\n Nothing.\n \"\"\"\n prev_read_dtype = self._auto_cast_variable_read_dtype\n try:\n self._auto_cast_variable_read_dtype = dtype\n yield\n finally:\n self._auto_cast_variable_read_dtype = prev_read_dtype\n\n def _mutation_lock(self):\n \"\"\"Returns a lock to guard code that creates & mutates ops.\n\n See the comment for self._group_lock for more info.\n \"\"\"\n return self._group_lock.group(_MUTATION_LOCK_GROUP)\n\n def _session_run_lock(self):\n \"\"\"Returns a lock to guard code for Session.run.\n\n See the comment for self._group_lock for more info.\n \"\"\"\n return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)\n\n\n# TODO(agarwal): currently device directives in an outer eager scope will not\n# apply to inner graph mode code. Fix that.\n\n\n@tf_export(v1=[\"device\"])\ndef device(device_name_or_function):\n \"\"\"Wrapper for `Graph.device()` using the default graph.\n\n See `tf.Graph.device` for more details.\n\n Args:\n device_name_or_function: The device name or function to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If eager execution is enabled and a function is passed in.\n \"\"\"\n if context.executing_eagerly():\n if callable(device_name_or_function):\n raise RuntimeError(\n \"tf.device does not support functions when eager execution \"\n \"is enabled.\")\n return context.device(device_name_or_function)\n elif executing_eagerly_outside_functions():\n @tf_contextlib.contextmanager\n def combined(device_name_or_function):\n with get_default_graph().device(device_name_or_function):\n if not callable(device_name_or_function):\n with context.device(device_name_or_function):\n yield\n else:\n yield\n return combined(device_name_or_function)\n else:\n return get_default_graph().device(device_name_or_function)\n\n\n@tf_export(\"device\", v1=[])\ndef device_v2(device_name):\n \"\"\"Specifies the device for ops created/executed in this context.\n\n This function specifies the device to be used for ops created/executed in a\n particular context. Nested contexts will inherit and also create/execute\n their ops on the specified device. If a specific device is not required,\n consider not using this function so that a device can be automatically\n assigned. In general the use of this function is optional. `device_name` can\n be fully specified, as in \"/job:worker/task:1/device:cpu:0\", or partially\n specified, containing only a subset of the \"/\"-separated fields. Any fields\n which are specified will override device annotations from outer scopes.\n\n For example:\n\n ```python\n with tf.device('/job:foo'):\n # ops created here have devices with /job:foo\n with tf.device('/job:bar/task:0/device:gpu:2'):\n # ops created here have the fully specified device above\n with tf.device('/device:gpu:1'):\n # ops created here have the device '/job:foo/device:gpu:1'\n ```\n\n Args:\n device_name: The device name to use in the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n\n Raises:\n RuntimeError: If a function is passed in.\n \"\"\"\n if callable(device_name):\n raise RuntimeError(\"tf.device does not support functions.\")\n return device(device_name)\n\n\n@tf_export(v1=[\"container\"])\ndef container(container_name):\n \"\"\"Wrapper for `Graph.container()` using the default graph.\n\n Args:\n container_name: The container string to use in the context.\n\n Returns:\n A context manager that specifies the default container to use for newly\n created stateful ops.\n \"\"\"\n return get_default_graph().container(container_name)\n\n\ndef _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):\n if context.executing_eagerly():\n if op is not None:\n if not hasattr(op, \"device\"):\n op = internal_convert_to_tensor_or_indexed_slices(op)\n return device(op.device)\n else:\n return NullContextmanager()\n else:\n default_graph = get_default_graph()\n if isinstance(op, EagerTensor):\n if default_graph.building_function:\n return default_graph.device(op.device)\n else:\n raise ValueError(\"Encountered an Eager-defined Tensor during graph \"\n \"construction, but a function was not being built.\")\n return default_graph._colocate_with_for_gradient(\n op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)\n\n\n# Internal interface to colocate_with. colocate_with has been deprecated from\n# public API. There are still a few internal uses of colocate_with. Add internal\n# only API for those uses to avoid deprecation warning.\ndef colocate_with(op, ignore_existing=False):\n return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)\n\n\[email protected](\n date=None, instructions=\"Colocations handled automatically by placer.\")\n@tf_export(v1=[\"colocate_with\"])\ndef _colocate_with(op, ignore_existing=False):\n return colocate_with(op, ignore_existing)\n\n\n@tf_export(\"control_dependencies\")\ndef control_dependencies(control_inputs):\n \"\"\"Wrapper for `Graph.control_dependencies()` using the default graph.\n\n See `tf.Graph.control_dependencies`\n for more details.\n\n When eager execution is enabled, any callable object in the `control_inputs`\n list will be called.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the context.\n Can also be `None` to clear the control dependencies. If eager execution\n is enabled, any callable object in the `control_inputs` list will be\n called.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n \"\"\"\n if context.executing_eagerly():\n if control_inputs:\n # Execute any pending callables.\n for control in control_inputs:\n if callable(control):\n control()\n return NullContextmanager()\n else:\n return get_default_graph().control_dependencies(control_inputs)\n\n\nclass _DefaultStack(threading.local):\n \"\"\"A thread-local stack of objects for providing implicit defaults.\"\"\"\n\n def __init__(self):\n super(_DefaultStack, self).__init__()\n self._enforce_nesting = True\n self.stack = []\n\n def get_default(self):\n return self.stack[-1] if len(self.stack) >= 1 else None\n\n def reset(self):\n self.stack = []\n\n def is_cleared(self):\n return not self.stack\n\n @property\n def enforce_nesting(self):\n return self._enforce_nesting\n\n @enforce_nesting.setter\n def enforce_nesting(self, value):\n self._enforce_nesting = value\n\n @tf_contextlib.contextmanager\n def get_controller(self, default):\n \"\"\"A context manager for manipulating a default stack.\"\"\"\n self.stack.append(default)\n try:\n yield default\n finally:\n # stack may be empty if reset() was called\n if self.stack:\n if self._enforce_nesting:\n if self.stack[-1] is not default:\n raise AssertionError(\n \"Nesting violated for default stack of %s objects\" %\n type(default))\n self.stack.pop()\n else:\n self.stack.remove(default)\n\n\n_default_session_stack = _DefaultStack() # pylint: disable=protected-access\n\n\ndef default_session(session):\n \"\"\"Python \"with\" handler for defining a default session.\n\n This function provides a means of registering a session for handling\n Tensor.eval() and Operation.run() calls. It is primarily intended for use\n by session.Session, but can be used with any object that implements\n the Session.run() interface.\n\n Use with the \"with\" keyword to specify that Tensor.eval() and Operation.run()\n invocations within the scope of a block should be executed by a particular\n session.\n\n The default session applies to the current thread only, so it is always\n possible to inspect the call stack and determine the scope of a default\n session. If you create a new thread, and wish to use the default session\n in that thread, you must explicitly add a \"with ops.default_session(sess):\"\n block in that thread's function.\n\n Example:\n The following code examples are equivalent:\n\n # 1. Using the Session object directly:\n sess = ...\n c = tf.constant(5.0)\n sess.run(c)\n\n # 2. Using default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n result = c.eval()\n\n # 3. Overriding default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n with ops.default_session(...):\n c.eval(session=sess)\n\n Args:\n session: The session to be installed as the default session.\n\n Returns:\n A context manager for the default session.\n \"\"\"\n return _default_session_stack.get_controller(session)\n\n\n@tf_export(v1=[\"get_default_session\"])\ndef get_default_session():\n \"\"\"Returns the default session for the current thread.\n\n The returned `Session` will be the innermost session on which a\n `Session` or `Session.as_default()` context has been entered.\n\n NOTE: The default session is a property of the current thread. If you\n create a new thread, and wish to use the default session in that\n thread, you must explicitly add a `with sess.as_default():` in that\n thread's function.\n\n Returns:\n The default `Session` being used in the current thread.\n \"\"\"\n return _default_session_stack.get_default()\n\n\ndef _eval_using_default_session(tensors, feed_dict, graph, session=None):\n \"\"\"Uses the default session to evaluate one or more tensors.\n\n Args:\n tensors: A single Tensor, or a list of Tensor objects.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which the tensors are defined.\n session: (Optional) A different session to use to evaluate \"tensors\".\n\n Returns:\n Either a single numpy ndarray if \"tensors\" is a single tensor; or a list\n of numpy ndarrays that each correspond to the respective element in\n \"tensors\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot evaluate tensor using `eval()`: No default \"\n \"session is registered. Use `with \"\n \"sess.as_default()` or pass an explicit session to \"\n \"`eval(session=sess)`\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph. Pass an explicit session to \"\n \"`eval(session=sess)`.\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph.\")\n return session.run(tensors, feed_dict)\n\n\ndef _run_using_default_session(operation, feed_dict, graph, session=None):\n \"\"\"Uses the default session to run \"operation\".\n\n Args:\n operation: The Operation to be run.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which \"operation\" is defined.\n session: (Optional) A different session to use to run \"operation\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot execute operation using `run()`: No default \"\n \"session is registered. Use `with \"\n \"sess.as_default():` or pass an explicit session to \"\n \"`run(session=sess)`\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to execute operation: \"\n \"the operation's graph is different from the \"\n \"session's graph. Pass an explicit session to \"\n \"run(session=sess).\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to execute operation: \"\n \"the operation's graph is different from the session's \"\n \"graph.\")\n session.run(operation, feed_dict)\n\n\nclass _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access\n \"\"\"A thread-local stack of objects for providing an implicit default graph.\"\"\"\n\n def __init__(self):\n super(_DefaultGraphStack, self).__init__()\n self._global_default_graph = None\n\n def get_default(self):\n \"\"\"Override that returns a global default if the stack is empty.\"\"\"\n ret = super(_DefaultGraphStack, self).get_default()\n if ret is None:\n ret = self._GetGlobalDefaultGraph()\n return ret\n\n def _GetGlobalDefaultGraph(self):\n if self._global_default_graph is None:\n # TODO(mrry): Perhaps log that the default graph is being used, or set\n # provide some other feedback to prevent confusion when a mixture of\n # the global default graph and an explicit graph are combined in the\n # same process.\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def reset(self):\n super(_DefaultGraphStack, self).reset()\n self._global_default_graph = None\n\n @tf_contextlib.contextmanager\n def get_controller(self, default):\n context.context().context_switches.push(default.building_function,\n default.as_default,\n default._device_function_stack)\n try:\n with super(_DefaultGraphStack,\n self).get_controller(default) as g, context.graph_mode():\n yield g\n finally:\n # If an exception is raised here it may be hiding a related exception in\n # the try-block (just above).\n context.context().context_switches.pop()\n\n\n_default_graph_stack = _DefaultGraphStack()\n\n\n# Shared helper used in init_scope and executing_eagerly_outside_functions\n# to obtain the outermost context that is not building a function, and the\n# innermost non empty device stack.\ndef _get_outer_context_and_inner_device_stack():\n \"\"\"Get the outermost context not building a function.\"\"\"\n default_graph = get_default_graph()\n outer_context = None\n innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access\n\n if not _default_graph_stack.stack:\n # If the default graph stack is empty, then we cannot be building a\n # function. Install the global graph (which, in this case, is also the\n # default graph) as the outer context.\n if default_graph.building_function:\n raise RuntimeError(\"The global graph is building a function.\")\n outer_context = default_graph.as_default\n else:\n # Find a context that is not building a function.\n for stack_entry in reversed(context.context().context_switches.stack):\n if not innermost_nonempty_device_stack:\n innermost_nonempty_device_stack = stack_entry.device_stack\n if not stack_entry.is_building_function:\n outer_context = stack_entry.enter_context_fn\n break\n\n if outer_context is None:\n # As a last resort, obtain the global default graph; this graph doesn't\n # necessarily live on the graph stack (and hence it doesn't necessarily\n # live on the context stack), but it is stored in the graph stack's\n # encapsulating object.\n outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access\n\n if outer_context is None:\n # Sanity check; this shouldn't be triggered.\n raise RuntimeError(\"All graphs are building functions, and no \"\n \"eager context was previously active.\")\n\n return outer_context, innermost_nonempty_device_stack\n\n\n# pylint: disable=g-doc-return-or-yield,line-too-long\n@tf_export(\"init_scope\")\n@tf_contextlib.contextmanager\ndef init_scope():\n \"\"\"A context manager that lifts ops out of control-flow scopes and function-building graphs.\n\n There is often a need to lift variable initialization ops out of control-flow\n scopes, function-building graphs, and gradient tapes. Entering an\n `init_scope` is a mechanism for satisfying these desiderata. In particular,\n entering an `init_scope` has three effects:\n\n (1) All control dependencies are cleared the moment the scope is entered;\n this is equivalent to entering the context manager returned from\n `control_dependencies(None)`, which has the side-effect of exiting\n control-flow scopes like `tf.cond` and `tf.while_loop`.\n\n (2) All operations that are created while the scope is active are lifted\n into the lowest context on the `context_stack` that is not building a\n graph function. Here, a context is defined as either a graph or an eager\n context. Every context switch, i.e., every installation of a graph as\n the default graph and every switch into eager mode, is logged in a\n thread-local stack called `context_switches`; the log entry for a\n context switch is popped from the stack when the context is exited.\n Entering an `init_scope` is equivalent to crawling up\n `context_switches`, finding the first context that is not building a\n graph function, and entering it. A caveat is that if graph mode is\n enabled but the default graph stack is empty, then entering an\n `init_scope` will simply install a fresh graph as the default one.\n\n (3) The gradient tape is paused while the scope is active.\n\n When eager execution is enabled, code inside an init_scope block runs with\n eager execution enabled even when tracing a `tf.function`. For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n @tf.function\n def func():\n # A function constructs TensorFlow graphs,\n # it does not execute eagerly.\n assert not tf.executing_eagerly()\n with tf.init_scope():\n # Initialization runs with eager execution enabled\n assert tf.executing_eagerly()\n ```\n\n Raises:\n RuntimeError: if graph state is incompatible with this initialization.\n \"\"\"\n # pylint: enable=g-doc-return-or-yield,line-too-long\n\n if context.executing_eagerly():\n # Fastpath.\n with tape.stop_recording():\n yield\n else:\n # Retrieve the active name scope: entering an `init_scope` preserves\n # the name scope of the current context.\n scope = get_default_graph().get_name_scope()\n if scope and scope[-1] != \"/\":\n # Names that end with trailing slashes are treated by `name_scope` as\n # absolute.\n scope = scope + \"/\"\n\n outer_context, innermost_nonempty_device_stack = (\n _get_outer_context_and_inner_device_stack())\n\n outer_graph = None\n outer_device_stack = None\n try:\n with outer_context(), name_scope(\n scope, skip_on_eager=False), control_dependencies(\n None), tape.stop_recording():\n context_manager = NullContextmanager\n context_manager_input = None\n if not context.executing_eagerly():\n # The device stack is preserved when lifting into a graph. Eager\n # execution doesn't implement device stacks and in particular it\n # doesn't support device functions, so in general it's not possible\n # to do the same when lifting into the eager context.\n outer_graph = get_default_graph()\n outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access\n outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access\n elif innermost_nonempty_device_stack is not None:\n for device_spec in innermost_nonempty_device_stack.peek_objs():\n if device_spec.function is None:\n break\n if device_spec.raw_string:\n context_manager = context.device\n context_manager_input = device_spec.raw_string\n break\n # It is currently not possible to have a device function in V2,\n # but in V1 we are unable to apply device functions in eager mode.\n # This means that we will silently skip some of the entries on the\n # device stack in V1 + eager mode.\n\n with context_manager(context_manager_input):\n yield\n finally:\n # If an exception is raised here it may be hiding a related exception in\n # try-block (just above).\n if outer_graph is not None:\n outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access\n\n\n@tf_export(v1=[\"executing_eagerly_outside_functions\"])\ndef executing_eagerly_outside_functions():\n \"\"\"Returns True if executing eagerly, even if inside a graph function.\n\n This function will check the outermost context for the program and see if\n it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,\n which checks the current context and will return `False` within a\n `tf.function` body. It can be used to build library that behave differently\n in eager runtime and v1 session runtime (deprecated).\n\n Example:\n\n >>> tf.compat.v1.enable_eager_execution()\n >>> @tf.function\n ... def func():\n ... # A function constructs TensorFlow graphs, it does not execute eagerly,\n ... # but the outer most context is still eager.\n ... assert not tf.executing_eagerly()\n ... return tf.compat.v1.executing_eagerly_outside_functions()\n >>> func()\n <tf.Tensor: shape=(), dtype=bool, numpy=True>\n\n Returns:\n boolean, whether the outermost context is in eager mode.\n \"\"\"\n if context.executing_eagerly():\n return True\n else:\n outer_context, _ = _get_outer_context_and_inner_device_stack()\n with outer_context():\n return context.executing_eagerly()\n\n\ndef inside_function():\n return get_default_graph().building_function\n\n\n@tf_export(v1=[\"enable_eager_execution\"])\ndef enable_eager_execution(config=None, device_policy=None,\n execution_mode=None):\n \"\"\"Enables eager execution for the lifetime of this program.\n\n Eager execution provides an imperative interface to TensorFlow. With eager\n execution enabled, TensorFlow functions execute operations immediately (as\n opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)\n and\n return concrete values (as opposed to symbolic references to a node in a\n computational graph).\n\n For example:\n\n ```python\n tf.compat.v1.enable_eager_execution()\n\n # After eager execution is enabled, operations are executed as they are\n # defined and Tensor objects hold concrete values, which can be accessed as\n # numpy.ndarray`s through the numpy() method.\n assert tf.multiply(6, 7).numpy() == 42\n ```\n\n Eager execution cannot be enabled after TensorFlow APIs have been used to\n create or execute graphs. It is typically recommended to invoke this function\n at program startup and not in a library (as most libraries should be usable\n both with and without eager execution).\n\n Args:\n config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the\n environment in which operations are executed. Note that\n `tf.compat.v1.ConfigProto` is also used to configure graph execution (via\n `tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`\n are not implemented (or are irrelevant) when eager execution is enabled.\n device_policy: (Optional.) Policy controlling how operations requiring\n inputs on a specific device (e.g., a GPU 0) handle inputs on a different\n device (e.g. GPU 1 or CPU). When set to None, an appropriate value will\n be picked automatically. The value picked may change between TensorFlow\n releases.\n Valid values:\n - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the\n placement is not correct.\n - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not\n on the right device but logs a warning.\n - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.\n Note that this may hide performance problems as there is no notification\n provided when operations are blocked on the tensor being copied between\n devices.\n - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies\n int32 tensors, raising errors on the other ones.\n execution_mode: (Optional.) Policy controlling how operations dispatched are\n actually executed. When set to None, an appropriate value will be picked\n automatically. The value picked may change between TensorFlow releases.\n Valid values:\n - tf.contrib.eager.SYNC: executes each operation synchronously.\n - tf.contrib.eager.ASYNC: executes each operation asynchronously. These\n operations may return \"non-ready\" handles.\n\n Raises:\n ValueError: If eager execution is enabled after creating/executing a\n TensorFlow graph, or if options provided conflict with a previous call\n to this function.\n \"\"\"\n _api_usage_gauge.get_cell().set(True)\n if context.default_execution_mode != context.EAGER_MODE:\n return enable_eager_execution_internal(\n config=config,\n device_policy=device_policy,\n execution_mode=execution_mode,\n server_def=None)\n\n\n@tf_export(v1=[\"disable_eager_execution\"])\ndef disable_eager_execution():\n \"\"\"Disables eager execution.\n\n This function can only be called before any Graphs, Ops, or Tensors have been\n created. It can be used at the beginning of the program for complex migration\n projects from TensorFlow 1.x to 2.x.\n \"\"\"\n _api_usage_gauge.get_cell().set(False)\n context.default_execution_mode = context.GRAPH_MODE\n c = context.context_safe()\n if c is not None:\n c._thread_local_data.is_eager = False # pylint: disable=protected-access\n\n\ndef enable_eager_execution_internal(config=None,\n device_policy=None,\n execution_mode=None,\n server_def=None):\n \"\"\"Enables eager execution for the lifetime of this program.\n\n Most of the doc string for enable_eager_execution is relevant here as well.\n\n Args:\n config: See enable_eager_execution doc string\n device_policy: See enable_eager_execution doc string\n execution_mode: See enable_eager_execution doc string\n server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on\n remote devices. GrpcServers need to be started by creating an identical\n server_def to this, and setting the appropriate task_indexes, so that the\n servers can communicate. It will then be possible to execute operations on\n remote devices.\n\n Raises:\n ValueError\n\n \"\"\"\n if config is not None and not isinstance(config, config_pb2.ConfigProto):\n raise TypeError(\"config must be a tf.ConfigProto, but got %s\" %\n type(config))\n if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,\n context.DEVICE_PLACEMENT_WARN,\n context.DEVICE_PLACEMENT_SILENT,\n context.DEVICE_PLACEMENT_SILENT_FOR_INT32):\n raise ValueError(\n \"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*\"\n )\n if execution_mode not in (None, context.SYNC, context.ASYNC):\n raise ValueError(\n \"execution_mode must be one of None, tf.contrib.eager.SYNC, \"\n \"tf.contrib.eager.ASYNC\")\n if context.default_execution_mode == context.GRAPH_MODE:\n graph_mode_has_been_used = (\n _default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access\n if graph_mode_has_been_used:\n raise ValueError(\n \"tf.enable_eager_execution must be called at program startup.\")\n context.default_execution_mode = context.EAGER_MODE\n # pylint: disable=protected-access\n with context._context_lock:\n if context._context is None:\n context._set_context_locked(context.Context(\n config=config,\n device_policy=device_policy,\n execution_mode=execution_mode,\n server_def=server_def))\n elif ((config is not None and config is not context._context._config) or\n (device_policy is not None and\n device_policy is not context._context._device_policy) or\n (execution_mode is not None and\n execution_mode is not context._context._execution_mode)):\n raise ValueError(\n \"Trying to change the options of an active eager\"\n \" execution. Context config: %s, specified config:\"\n \" %s. Context device policy: %s, specified device\"\n \" policy: %s. Context execution mode: %s, \"\n \" specified execution mode %s.\" %\n (context._context._config, config, context._context._device_policy,\n device_policy, context._context._execution_mode, execution_mode))\n else:\n # We already created everything, so update the thread local data.\n context._context._thread_local_data.is_eager = True\n\n # Monkey patch to get rid of an unnecessary conditional since the context is\n # now initialized.\n context.context = context.context_safe\n\n\ndef eager_run(main=None, argv=None):\n \"\"\"Runs the program with an optional main function and argv list.\n\n The program will run with eager execution enabled.\n\n Example:\n ```python\n import tensorflow as tf\n # Import subject to future changes:\n from tensorflow.contrib.eager.python import tfe\n\n def main(_):\n u = tf.constant(6.0)\n v = tf.constant(7.0)\n print(u * v)\n\n if __name__ == \"__main__\":\n tfe.run()\n ```\n\n Args:\n main: the main function to run.\n argv: the arguments to pass to it.\n \"\"\"\n enable_eager_execution()\n app.run(main, argv)\n\n\n@tf_export(v1=[\"reset_default_graph\"])\ndef reset_default_graph():\n \"\"\"Clears the default graph stack and resets the global default graph.\n\n NOTE: The default graph is a property of the current thread. This\n function applies only to the current thread. Calling this function while\n a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will\n result in undefined\n behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects\n after calling this function will result in undefined behavior.\n Raises:\n AssertionError: If this function is called within a nested graph.\n \"\"\"\n if not _default_graph_stack.is_cleared():\n raise AssertionError(\"Do not use tf.reset_default_graph() to clear \"\n \"nested graphs. If you need a cleared graph, \"\n \"exit the nesting and create a new graph.\")\n _default_graph_stack.reset()\n\n\n@tf_export(v1=[\"get_default_graph\"])\ndef get_default_graph():\n \"\"\"Returns the default graph for the current thread.\n\n The returned graph will be the innermost graph on which a\n `Graph.as_default()` context has been entered, or a global default\n graph if none has been explicitly created.\n\n NOTE: The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n Returns:\n The default `Graph` being used in the current thread.\n \"\"\"\n return _default_graph_stack.get_default()\n\n\ndef has_default_graph():\n \"\"\"Returns True if there is a default graph.\"\"\"\n return len(_default_graph_stack.stack) >= 1\n\n\ndef get_name_scope():\n \"\"\"Returns the current name scope in the default_graph.\n\n For example:\n\n ```python\n with tf.name_scope('scope1'):\n with tf.name_scope('scope2'):\n print(tf.get_name_scope())\n ```\n would print the string `scope1/scope2`.\n\n Returns:\n A string representing the current name scope.\n \"\"\"\n if context.executing_eagerly():\n return context.context().scope_name.rstrip(\"/\")\n return get_default_graph().get_name_scope()\n\n\ndef _assert_same_graph(original_item, item):\n \"\"\"Fail if the 2 items are from different graphs.\n\n Args:\n original_item: Original item to check against.\n item: Item to check.\n\n Raises:\n ValueError: if graphs do not match.\n \"\"\"\n if original_item.graph is not item.graph:\n raise ValueError(\"%s must be from the same graph as %s.\" %\n (item, original_item))\n\n\ndef _get_graph_from_inputs(op_input_list, graph=None):\n \"\"\"Returns the appropriate graph to use for the given inputs.\n\n This library method provides a consistent algorithm for choosing the graph\n in which an Operation should be constructed:\n\n 1. If the default graph is being used to construct a function, we\n use the default graph.\n 2. If the \"graph\" is specified explicitly, we validate that all of the inputs\n in \"op_input_list\" are compatible with that graph.\n 3. Otherwise, we attempt to select a graph from the first Operation-\n or Tensor-valued input in \"op_input_list\", and validate that all other\n such inputs are in the same graph.\n 4. If the graph was not specified and it could not be inferred from\n \"op_input_list\", we attempt to use the default graph.\n\n Args:\n op_input_list: A list of inputs to an operation, which may include `Tensor`,\n `Operation`, and other objects that may be converted to a graph element.\n graph: (Optional) The explicit graph to use.\n\n Raises:\n TypeError: If op_input_list is not a list or tuple, or if graph is not a\n Graph.\n ValueError: If a graph is explicitly passed and not all inputs are from it,\n or if the inputs are from multiple graphs, or we could not find a graph\n and there was no default graph.\n\n Returns:\n The appropriate graph to use for the given inputs.\n\n \"\"\"\n current_default_graph = get_default_graph()\n if current_default_graph.building_function:\n return current_default_graph\n\n op_input_list = tuple(op_input_list) # Handle generators correctly\n if graph and not isinstance(graph, Graph):\n raise TypeError(\"Input graph needs to be a Graph: %s\" % graph)\n\n # 1. We validate that all of the inputs are from the same graph. This is\n # either the supplied graph parameter, or the first one selected from one\n # the graph-element-valued inputs. In the latter case, we hold onto\n # that input in original_graph_element so we can provide a more\n # informative error if a mismatch is found.\n original_graph_element = None\n for op_input in op_input_list:\n # Determine if this is a valid graph_element.\n # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this\n # up.\n graph_element = None\n if (isinstance(op_input, (Operation, _TensorLike)) and\n ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck\n graph_element = op_input\n else:\n graph_element = _as_graph_element(op_input)\n\n if graph_element is not None:\n if not graph:\n original_graph_element = graph_element\n graph = graph_element.graph\n elif original_graph_element is not None:\n _assert_same_graph(original_graph_element, graph_element)\n elif graph_element.graph is not graph:\n raise ValueError(\"%s is not from the passed-in graph.\" % graph_element)\n\n # 2. If all else fails, we use the default graph, which is always there.\n return graph or current_default_graph\n\n\n@tf_export(v1=[\"GraphKeys\"])\nclass GraphKeys(object):\n \"\"\"Standard names to use for graph collections.\n\n The standard library uses various well-known names to collect and\n retrieve values associated with a graph. For example, the\n `tf.Optimizer` subclasses default to optimizing the variables\n collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is\n specified, but it is also possible to pass an explicit list of\n variables.\n\n The following standard keys are defined:\n\n * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared\n across distributed environment (model variables are subset of these). See\n `tf.compat.v1.global_variables`\n for more details.\n Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,\n and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.\n * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each\n machine. Usually used for temporarily variables, like counters.\n Note: use `tf.contrib.framework.local_variable` to add to this collection.\n * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the\n model for inference (feed forward). Note: use\n `tf.contrib.framework.model_variable` to add to this collection.\n * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will\n be trained by an optimizer. See\n `tf.compat.v1.trainable_variables`\n for more details.\n * `SUMMARIES`: the summary `Tensor` objects that have been created in the\n graph. See\n `tf.compat.v1.summary.merge_all`\n for more details.\n * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to\n produce input for a computation. See\n `tf.compat.v1.train.start_queue_runners`\n for more details.\n * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also\n keep moving averages. See\n `tf.compat.v1.moving_average_variables`\n for more details.\n * `REGULARIZATION_LOSSES`: regularization losses collected during graph\n construction.\n\n The following standard keys are _defined_, but their collections are **not**\n automatically populated as many of the others are:\n\n * `WEIGHTS`\n * `BIASES`\n * `ACTIVATIONS`\n \"\"\"\n\n # Key to collect Variable objects that are global (shared across machines).\n # Default collection for all variables, except local ones.\n GLOBAL_VARIABLES = \"variables\"\n # Key to collect local variables that are local to the machine and are not\n # saved/restored.\n LOCAL_VARIABLES = \"local_variables\"\n # Key to collect local variables which are used to accumulate interal state\n # to be used in tf.metrics.*.\n METRIC_VARIABLES = \"metric_variables\"\n # Key to collect model variables defined by layers.\n MODEL_VARIABLES = \"model_variables\"\n # Key to collect Variable objects that will be trained by the\n # optimizers.\n TRAINABLE_VARIABLES = \"trainable_variables\"\n # Key to collect summaries.\n SUMMARIES = \"summaries\"\n # Key to collect QueueRunners.\n QUEUE_RUNNERS = \"queue_runners\"\n # Key to collect table initializers.\n TABLE_INITIALIZERS = \"table_initializer\"\n # Key to collect asset filepaths. An asset represents an external resource\n # like a vocabulary file.\n ASSET_FILEPATHS = \"asset_filepaths\"\n # Key to collect Variable objects that keep moving averages.\n MOVING_AVERAGE_VARIABLES = \"moving_average_variables\"\n # Key to collect regularization losses at graph construction.\n REGULARIZATION_LOSSES = \"regularization_losses\"\n # Key to collect concatenated sharded variables.\n CONCATENATED_VARIABLES = \"concatenated_variables\"\n # Key to collect savers.\n SAVERS = \"savers\"\n # Key to collect weights\n WEIGHTS = \"weights\"\n # Key to collect biases\n BIASES = \"biases\"\n # Key to collect activations\n ACTIVATIONS = \"activations\"\n # Key to collect update_ops\n UPDATE_OPS = \"update_ops\"\n # Key to collect losses\n LOSSES = \"losses\"\n # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.\n SAVEABLE_OBJECTS = \"saveable_objects\"\n # Key to collect all shared resources used by the graph which need to be\n # initialized once per cluster.\n RESOURCES = \"resources\"\n # Key to collect all shared resources used in this graph which need to be\n # initialized once per session.\n LOCAL_RESOURCES = \"local_resources\"\n # Trainable resource-style variables.\n TRAINABLE_RESOURCE_VARIABLES = \"trainable_resource_variables\"\n\n # Key to indicate various ops.\n INIT_OP = \"init_op\"\n LOCAL_INIT_OP = \"local_init_op\"\n READY_OP = \"ready_op\"\n READY_FOR_LOCAL_INIT_OP = \"ready_for_local_init_op\"\n SUMMARY_OP = \"summary_op\"\n GLOBAL_STEP = \"global_step\"\n\n # Used to count the number of evaluations performed during a single evaluation\n # run.\n EVAL_STEP = \"eval_step\"\n TRAIN_OP = \"train_op\"\n\n # Key for control flow context.\n COND_CONTEXT = \"cond_context\"\n WHILE_CONTEXT = \"while_context\"\n\n # Used to store v2 summary names.\n _SUMMARY_COLLECTION = \"_SUMMARY_V2\"\n\n # List of all collections that keep track of variables.\n _VARIABLE_COLLECTIONS = [\n GLOBAL_VARIABLES,\n LOCAL_VARIABLES,\n METRIC_VARIABLES,\n MODEL_VARIABLES,\n TRAINABLE_VARIABLES,\n MOVING_AVERAGE_VARIABLES,\n CONCATENATED_VARIABLES,\n TRAINABLE_RESOURCE_VARIABLES,\n ]\n\n # Key for streaming model ports.\n # NOTE(yuanbyu): internal and experimental.\n _STREAMING_MODEL_PORTS = \"streaming_model_ports\"\n\n @decorator_utils.classproperty\n @deprecation.deprecated(None, \"Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.\")\n def VARIABLES(cls): # pylint: disable=no-self-argument\n return cls.GLOBAL_VARIABLES\n\n\ndef dismantle_graph(graph):\n \"\"\"Cleans up reference cycles from a `Graph`.\n\n Helpful for making sure the garbage collector doesn't need to run after a\n temporary `Graph` is no longer needed.\n\n Args:\n graph: A `Graph` object to destroy. Neither it nor any of its ops are usable\n after this function runs.\n \"\"\"\n memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access\n\n # Now clean up Operation<->Graph reference cycles by clearing all of the\n # attributes for the Graph and its ops.\n graph_operations = graph.get_operations()\n for op in graph_operations:\n op.__dict__ = {}\n graph.__dict__ = {}\n\n\n@tf_export(v1=[\"add_to_collection\"])\ndef add_to_collection(name, value):\n \"\"\"Wrapper for `Graph.add_to_collection()` using the default graph.\n\n See `tf.Graph.add_to_collection`\n for more details.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collection. @compatibility(eager)\n Collections are only supported in eager when variables are created inside\n an EagerVariableStore (e.g. as part of a layer or template).\n @end_compatibility\n \"\"\"\n get_default_graph().add_to_collection(name, value)\n\n\n@tf_export(v1=[\"add_to_collections\"])\ndef add_to_collections(names, value):\n \"\"\"Wrapper for `Graph.add_to_collections()` using the default graph.\n\n See `tf.Graph.add_to_collections`\n for more details.\n\n Args:\n names: The key for the collections. The `GraphKeys` class contains many\n standard names for collections.\n value: The value to add to the collections. @compatibility(eager)\n Collections are only supported in eager when variables are created inside\n an EagerVariableStore (e.g. as part of a layer or template).\n @end_compatibility\n \"\"\"\n get_default_graph().add_to_collections(names, value)\n\n\n@tf_export(v1=[\"get_collection_ref\"])\ndef get_collection_ref(key):\n \"\"\"Wrapper for `Graph.get_collection_ref()` using the default graph.\n\n See `tf.Graph.get_collection_ref`\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n\n Returns:\n The list of values in the collection with the given `name`, or an empty\n list if no value has been added to that collection. Note that this returns\n the collection list itself, which can be modified in place to change the\n collection.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility\n \"\"\"\n return get_default_graph().get_collection_ref(key)\n\n\n@tf_export(v1=[\"get_collection\"])\ndef get_collection(key, scope=None):\n \"\"\"Wrapper for `Graph.get_collection()` using the default graph.\n\n See `tf.Graph.get_collection`\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class contains\n many standard names for collections.\n scope: (Optional.) If supplied, the resulting list is filtered to include\n only items whose `name` attribute matches using `re.match`. Items without\n a `name` attribute are never returned if a scope is supplied and the\n choice or `re.match` means that a `scope` without special tokens filters\n by prefix.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n\n @compatibility(eager)\n Collections are not supported when eager execution is enabled.\n @end_compatibility\n \"\"\"\n return get_default_graph().get_collection(key, scope)\n\n\ndef get_all_collection_keys():\n \"\"\"Returns a list of collections used in the default graph.\"\"\"\n return get_default_graph().get_all_collection_keys()\n\n\ndef name_scope(name, default_name=None, values=None, skip_on_eager=True):\n \"\"\"Internal-only entry point for `name_scope*`.\n\n Internal ops do not use the public API and instead rely on\n `ops.name_scope` regardless of the execution mode. This function\n dispatches to the correct `name_scope*` implementation based on\n the arguments provided and the current mode. Specifically,\n\n * if `values` contains a graph tensor `Graph.name_scope` is used;\n * `name_scope_v1` is used in graph mode;\n * `name_scope_v2` -- in eager mode.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n skip_on_eager: Indicates to return NullContextmanager if executing eagerly.\n By default this is True since naming tensors and operations in eager mode\n have little use and cause unnecessary performance overhead. However, it is\n important to preserve variable names since they are often useful for\n debugging and saved models.\n\n Returns:\n `name_scope*` context manager.\n \"\"\"\n ctx = context.context()\n in_eager_mode = ctx.executing_eagerly()\n if not in_eager_mode:\n return internal_name_scope_v1(name, default_name, values)\n\n if skip_on_eager:\n return NullContextmanager()\n\n name = default_name if name is None else name\n if values:\n # The presence of a graph tensor in `values` overrides the context.\n # TODO(slebedev): this is Keras-specific and should be removed.\n # pylint: disable=unidiomatic-typecheck\n graph_value = next((value for value in values if type(value) == Tensor),\n None)\n # pylint: enable=unidiomatic-typecheck\n if graph_value is not None:\n return graph_value.graph.name_scope(name)\n\n return name_scope_v2(name or \"\")\n\n\nclass internal_name_scope_v1(object): # pylint: disable=invalid-name\n \"\"\"Graph-only version of `name_scope_v1`.\"\"\"\n\n @property\n def name(self):\n return self._name\n\n def __init__(self, name, default_name=None, values=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\n Raises:\n TypeError: if `default_name` is passed in but not a string.\n \"\"\"\n if not (default_name is None or isinstance(default_name, six.string_types)):\n raise TypeError(\n \"`default_name` type (%s) is not a string type. You likely meant to \"\n \"pass this into the `values` kwarg.\" % type(default_name))\n self._name = default_name if name is None else name\n self._default_name = default_name\n self._values = values\n\n def __enter__(self):\n \"\"\"Start the scope block.\n\n Returns:\n The scope name.\n\n Raises:\n ValueError: if neither `name` nor `default_name` is provided\n but `values` are.\n \"\"\"\n if self._name is None and self._values is not None:\n # We only raise an error if values is not None (provided) because\n # currently tf.name_scope(None) (values=None then) is sometimes used as\n # an idiom to reset to top scope.\n raise ValueError(\n \"At least one of name (%s) and default_name (%s) must be provided.\"\n % (self._name, self._default_name))\n\n g = get_default_graph()\n if self._values and not g.building_function:\n # Specialize based on the knowledge that `_get_graph_from_inputs()`\n # ignores `inputs` when building a function.\n g_from_inputs = _get_graph_from_inputs(self._values)\n if g_from_inputs is not g:\n g = g_from_inputs\n self._g_manager = g.as_default()\n self._g_manager.__enter__()\n else:\n self._g_manager = None\n else:\n self._g_manager = None\n\n try:\n self._name_scope = g.name_scope(self._name)\n return self._name_scope.__enter__()\n except:\n if self._g_manager is not None:\n self._g_manager.__exit__(*sys.exc_info())\n raise\n\n def __exit__(self, *exc_info):\n self._name_scope.__exit__(*exc_info)\n if self._g_manager is not None:\n self._g_manager.__exit__(*exc_info)\n\n\n# Named like a function for backwards compatibility with the\n# @tf_contextlib.contextmanager version, which was switched to a class to avoid\n# some object creation overhead.\n@tf_export(v1=[\"name_scope\"])\nclass name_scope_v1(object): # pylint: disable=invalid-name\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager validates that the given `values` are from the\n same graph, makes that graph the default graph, and pushes a\n name scope in that graph (see\n `tf.Graph.name_scope`\n for more details on that).\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(name, \"MyOp\", [a, b, c]) as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n \"\"\"\n\n @property\n def name(self):\n return self._name\n\n def __init__(self, name, default_name=None, values=None):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\n Raises:\n TypeError: if `default_name` is passed in but not a string.\n \"\"\"\n self._name_scope = name_scope(\n name, default_name, values, skip_on_eager=False)\n self._name = default_name if name is None else name\n\n def __enter__(self):\n return self._name_scope.__enter__()\n\n def __exit__(self, *exc_info):\n return self._name_scope.__exit__(*exc_info)\n\n\ndef enter_eager_name_scope(ctx, name):\n \"\"\"Updates the eager context to enter the given name scope.\"\"\"\n old_name = ctx.scope_name\n if not name:\n scope_name = \"\"\n else:\n if name.endswith(\"/\"):\n # A trailing slash breaks out of nested name scopes, indicating a\n # fully specified scope name, for compatibility with Graph.name_scope.\n scope_name = name\n else:\n scope_name = name + \"/\"\n if old_name:\n scope_name = old_name + scope_name\n ctx.scope_name = scope_name\n return scope_name, old_name\n\n\n@tf_export(\"name_scope\", v1=[])\nclass name_scope_v2(object):\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.name_scope(\"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n\n When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,\n and `MyOp/c`.\n\n If the scope name already exists, the name will be made unique by appending\n `_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,\n etc.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize the context manager.\n\n Args:\n name: The prefix to use on all names created within the name scope.\n\n Raises:\n ValueError: If name is None, or not a string.\n \"\"\"\n if name is None or not isinstance(name, six.string_types):\n raise ValueError(\"name for name_scope must be a string.\")\n self._name = name\n self._exit_fns = []\n\n @property\n def name(self):\n return self._name\n\n def __enter__(self):\n \"\"\"Start the scope block.\n\n Returns:\n The scope name.\n\n Raises:\n ValueError: if neither `name` nor `default_name` is provided\n but `values` are.\n \"\"\"\n ctx = context.context()\n if ctx.executing_eagerly():\n scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)\n self._exit_fns.append(\n lambda *a: setattr(ctx, \"scope_name\", old_scope_name))\n else:\n scope = get_default_graph().name_scope(self._name)\n scope_name = scope.__enter__()\n self._exit_fns.append(scope.__exit__)\n return scope_name\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n exit_fn = self._exit_fns.pop()\n exit_fn(type_arg, value_arg, traceback_arg)\n return False # False values do not suppress exceptions\n\n\ndef strip_name_scope(name, export_scope):\n \"\"\"Removes name scope from a name.\n\n Args:\n name: A `string` name.\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n Name with name scope removed, or the original name if export_scope\n is None.\n \"\"\"\n if export_scope:\n if export_scope[-1] == \"/\":\n export_scope = export_scope[:-1]\n\n try:\n # Strips export_scope/, export_scope///,\n # ^export_scope/, loc:@export_scope/.\n str_to_replace = r\"([\\^]|loc:@|^)\" + export_scope + r\"[\\/]+(.*)\"\n return re.sub(str_to_replace, r\"\\1\\2\", compat.as_str(name), count=1)\n except TypeError as e:\n # If the name is not of a type we can process, simply return it.\n logging.warning(e)\n return name\n else:\n return name\n\n\ndef prepend_name_scope(name, import_scope):\n \"\"\"Prepends name scope to a name.\n\n Args:\n name: A `string` name.\n import_scope: Optional `string`. Name scope to add.\n\n Returns:\n Name with name scope added, or the original name if import_scope\n is None.\n \"\"\"\n if import_scope:\n if import_scope[-1] == \"/\":\n import_scope = import_scope[:-1]\n\n try:\n str_to_replace = r\"([\\^]|loc:@|^)(.*)\"\n return re.sub(str_to_replace, r\"\\1\" + import_scope + r\"/\\2\",\n compat.as_str(name))\n except TypeError as e:\n # If the name is not of a type we can process, simply return it.\n logging.warning(e)\n return name\n else:\n return name\n\n\n# pylint: disable=g-doc-return-or-yield\n# pylint: disable=not-context-manager\n@tf_export(v1=[\"op_scope\"])\n@tf_contextlib.contextmanager\ndef op_scope(values, name, default_name=None):\n \"\"\"DEPRECATED. Same as name_scope above, just different argument order.\"\"\"\n logging.warn(\"tf.op_scope(values, name, default_name) is deprecated,\"\n \" use tf.name_scope(name, default_name, values)\")\n with name_scope(name, default_name=default_name, values=values) as scope:\n yield scope\n\n\n_proto_function_registry = registry.Registry(\"proto functions\")\n\n\ndef register_proto_function(collection_name,\n proto_type=None,\n to_proto=None,\n from_proto=None):\n \"\"\"Registers `to_proto` and `from_proto` functions for collection_name.\n\n `to_proto` function converts a Python object to the corresponding protocol\n buffer, and returns the protocol buffer.\n\n `from_proto` function converts protocol buffer into a Python object, and\n returns the object..\n\n Args:\n collection_name: Name of the collection.\n proto_type: Protobuf type, such as `saver_pb2.SaverDef`,\n `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..\n to_proto: Function that implements Python object to protobuf conversion.\n from_proto: Function that implements protobuf to Python object conversion.\n \"\"\"\n if to_proto and not callable(to_proto):\n raise TypeError(\"to_proto must be callable.\")\n if from_proto and not callable(from_proto):\n raise TypeError(\"from_proto must be callable.\")\n\n _proto_function_registry.register((proto_type, to_proto, from_proto),\n collection_name)\n\n\ndef get_collection_proto_type(collection_name):\n \"\"\"Returns the proto_type for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[0]\n except LookupError:\n return None\n\n\ndef get_to_proto_function(collection_name):\n \"\"\"Returns the to_proto function for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[1]\n except LookupError:\n return None\n\n\ndef get_from_proto_function(collection_name):\n \"\"\"Returns the from_proto function for collection_name.\"\"\"\n try:\n return _proto_function_registry.lookup(collection_name)[2]\n except LookupError:\n return None\n\n\ndef _operation_conversion_error(op, dtype=None, name=None, as_ref=False):\n \"\"\"Produce a nice error if someone converts an Operation to a Tensor.\"\"\"\n raise TypeError((\"Can't convert Operation '%s' to Tensor \"\n \"(target dtype=%r, name=%r, as_ref=%r)\") %\n (op.name, dtype, name, as_ref))\n\n\ndef _op_to_colocate_with(v, graph):\n \"\"\"Operation object corresponding to v to use for colocation constraints.\"\"\"\n if v is None:\n return None\n if isinstance(v, Operation):\n return v\n # We always want to colocate with the reference op.\n # When 'v' is a ResourceVariable, the reference op is the handle creating op.\n #\n # What this should be is:\n # if isinstance(v, ResourceVariable):\n # return v.handle.op\n # However, that would require a circular import dependency.\n # As of October 2018, there were attempts underway to remove\n # colocation constraints altogether. Assuming that will\n # happen soon, perhaps this hack to work around the circular\n # import dependency is acceptable.\n if hasattr(v, \"handle\") and isinstance(v.handle, Tensor):\n if graph.building_function:\n return graph.capture(v.handle).op\n else:\n return v.handle.op\n return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op\n\n\ndef _is_keras_symbolic_tensor(x):\n return hasattr(x, \"graph\") and getattr(x.graph, \"name\", None) == \"keras_graph\"\n\n\ntensor_conversion_registry.register_tensor_conversion_function(\n Operation, _operation_conversion_error)\n\n\n# These symbols were originally defined in this module; import them for\n# backwards compatibility until all references have been updated to access\n# them from the indexed_slices.py module.\nIndexedSlices = indexed_slices.IndexedSlices\nIndexedSlicesValue = indexed_slices.IndexedSlicesValue\nconvert_to_tensor_or_indexed_slices = \\\n indexed_slices.convert_to_tensor_or_indexed_slices\nconvert_n_to_tensor_or_indexed_slices = \\\n indexed_slices.convert_n_to_tensor_or_indexed_slices\ninternal_convert_to_tensor_or_indexed_slices = \\\n indexed_slices.internal_convert_to_tensor_or_indexed_slices\ninternal_convert_n_to_tensor_or_indexed_slices = \\\n indexed_slices.internal_convert_n_to_tensor_or_indexed_slices\nregister_tensor_conversion_function = \\\n tensor_conversion_registry.register_tensor_conversion_function\n\n\n# Helper functions for op wrapper modules generated by `python_op_gen`.\n\n\ndef to_raw_op(f):\n \"\"\"Make a given op wrapper function `f` raw.\n\n Raw op wrappers can only be called with keyword arguments.\n\n Args:\n f: An op wrapper function to make raw.\n\n Returns:\n Raw `f`.\n \"\"\"\n # Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail\n # due to double-registration.\n f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,\n f.__closure__)\n return kwarg_only(f)\n\n\ndef raise_from_not_ok_status(e, name):\n message = e.message + (\" name: \" + name if name is not None else \"\")\n # pylint: disable=protected-access\n six.raise_from(core._status_to_exception(e.code, message), None)\n # pylint: enable=protected-access\n\n\ndef add_exit_callback_to_default_func_graph(fn):\n \"\"\"Add a callback to run when the default function graph goes out of scope.\n\n Usage:\n\n ```python\n @tf.function\n def fn(x, v):\n expensive = expensive_object(v)\n add_exit_callback_to_default_func_graph(lambda: expensive.release())\n return g(x, expensive)\n\n fn(x=tf.constant(...), v=...)\n # `expensive` has been released.\n ```\n\n Args:\n fn: A callable that takes no arguments and whose output is ignored.\n To be executed when exiting func graph scope.\n\n Raises:\n RuntimeError: If executed when the current default graph is not a FuncGraph,\n or not currently executing in function creation mode (e.g., if inside\n an init_scope).\n \"\"\"\n default_graph = get_default_graph()\n if not default_graph._building_function: # pylint: disable=protected-access\n raise RuntimeError(\n \"Cannot add scope exit callbacks when not building a function. \"\n \"Default graph: {}\".format(default_graph))\n default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access\n\n\ndef _reconstruct_sequence_inputs(op_def, inputs, attrs):\n \"\"\"Regroups a flat list of input tensors into scalar and sequence inputs.\n\n Args:\n op_def: The `op_def_pb2.OpDef` (for knowing the input types)\n inputs: a list of input `Tensor`s to the op.\n attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define\n how long each sequence is)\n\n Returns:\n A list of `Tensor`s (corresponding to scalar inputs) and lists of\n `Tensor`s (corresponding to sequence inputs).\n \"\"\"\n grouped_inputs = []\n i = 0\n for input_arg in op_def.input_arg:\n if input_arg.number_attr:\n input_len = attrs[input_arg.number_attr].i\n is_sequence = True\n elif input_arg.type_list_attr:\n input_len = len(attrs[input_arg.type_list_attr].list.type)\n is_sequence = True\n else:\n input_len = 1\n is_sequence = False\n\n if is_sequence:\n grouped_inputs.append(inputs[i:i + input_len])\n else:\n grouped_inputs.append(inputs[i])\n i += input_len\n\n assert i == len(inputs)\n return grouped_inputs\n\n\nclass _TensorIterator(object):\n \"\"\"Iterates over the leading dim of a Tensor. Performs no error checks.\"\"\"\n\n def __init__(self, tensor, dim0):\n self._tensor = tensor\n self._index = 0\n self._limit = dim0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index == self._limit:\n raise StopIteration\n result = self._tensor[self._index]\n self._index += 1\n return result\n\n next = __next__ # python2.x compatibility.\n"
] | [
[
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.client.pywrap_tf_session.TF_OperationToNodeDef",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetControlOutputs_wrapper",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.util.object_identity.Reference",
"tensorflow.python.client.pywrap_tf_session.TF_OperationDevice",
"tensorflow.python.platform.app.run",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.client.pywrap_tf_session.TF_OperationOutputType",
"tensorflow.python.client.pywrap_tf_session.TF_FinishOperation",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrInt",
"tensorflow.python.client.pywrap_tf_session.RemoveAllControlInputs",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrValueProto",
"tensorflow.python.eager.tape.record_operation",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.util.compat.as_str",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.eager.context.device",
"tensorflow.python.pywrap_tfe.TFE_Py_InitEagerTensor",
"tensorflow.core.framework.versions_pb2.VersionDef",
"tensorflow.python.framework.registry.Registry",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.deprecation.deprecated_argument_lookup",
"tensorflow.python.framework.c_api_util.tf_output",
"tensorflow.python.eager.monitoring.BoolGauge",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.eager.context.Context",
"tensorflow.python.client.pywrap_tf_session.TF_OperationNumInputs",
"tensorflow.python.client.pywrap_tf_session.TF_OperationOpType",
"tensorflow.python.client.pywrap_tf_session.SetRequestedDevice",
"tensorflow.python.pywrap_tfe.TFE_Py_UID",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrType",
"tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue",
"tensorflow.python.util.memory.dismantle_ordered_dict",
"tensorflow.python.framework.device.merge_device",
"tensorflow.python.client.pywrap_tf_session.TF_Input",
"tensorflow.python.client.pywrap_tf_session.TF_GraphCopyFunction",
"tensorflow.python.client.pywrap_tf_session.SetRequireShapeInferenceFns",
"tensorflow.python.client.pywrap_tf_session.GetOperationInputs",
"tensorflow.python.eager.context.context_safe",
"tensorflow.python.framework.traceable_stack.TraceableStack",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetControlInputs_wrapper",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.client.pywrap_tf_session.ClearAttr",
"tensorflow.python.client.pywrap_tf_session.TF_Output",
"tensorflow.python.util.tf_stack.extract_stack",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.device.is_device_spec",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.core.framework.op_def_pb2.OpDef",
"tensorflow.python.framework.tensor_conversion_registry.register_tensor_conversion_function",
"tensorflow.python.client.pywrap_tf_session.TF_AddControlInput",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.framework.c_api_util.new_tf_operations",
"tensorflow.core.framework.function_pb2.GradientDef",
"tensorflow.python.util.function_utils.get_func_code",
"tensorflow.python.client.pywrap_tf_session.TF_OperationNumOutputs",
"tensorflow.python.client.pywrap_tf_session.TF_GraphToGraphDef",
"tensorflow.python.client.pywrap_tf_session.TF_OperationName",
"tensorflow.python.util.object_identity.ObjectIdentitySet",
"tensorflow.core.framework.attr_value_pb2.NameAttrList",
"tensorflow.python.framework.c_api_util.tf_buffer",
"tensorflow.python.util.function_utils.get_func_name",
"tensorflow.python.client.pywrap_tf_session.TF_GraphVersions",
"tensorflow.python.eager.tape.stop_recording",
"tensorflow.python.ops.control_flow_util.CheckInputFromValidContext",
"tensorflow.python.util.lock_util.GroupLock",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.client.pywrap_tf_session.TF_DeleteBuffer",
"tensorflow.python.client.pywrap_tf_session.AddControlInput",
"tensorflow.python.tf2.enabled",
"tensorflow.python.util.tf_export.kwarg_only",
"tensorflow.python.client.pywrap_tf_session.TF_OperationGetAttrBool",
"tensorflow.python.client.pywrap_tf_session.TF_GetBuffer",
"tensorflow.python.client.pywrap_tf_session.SetAttr",
"tensorflow.core.framework.node_def_pb2.NodeDef",
"tensorflow.python.framework.c_api_util.ScopedTFGraph"
]
] |
ragavvenkatesan/models | [
"420a88c7af20dae8d79dbc1b4351fef41be361c8"
] | [
"research/compression/distillation/resnet.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the preactivation form of Residual Networks\n(also known as ResNet v2).\n\nResidual networks (ResNets) were originally proposed in:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nThe full preactivation 'v2' ResNet variant implemented in this module was\nintroduced by:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe key difference of the full preactivation 'v2' variant compared to the\n'v1' variant in [1] is the use of batch normalization before every weight layer\nrather than after.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\n\nimport tensorflow as tf\n\n_BATCH_NORM_DECAY = 0.997\n_BATCH_NORM_EPSILON = 1e-5\n\n\n################################################################################\n# Functions for input processing.\n################################################################################\ndef process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,\n parse_record_fn, num_epochs=1, num_parallel_calls=1):\n \"\"\"Given a Dataset with raw records, parse each record into images and labels,\n and return an iterator over the records.\n Args:\n dataset: A Dataset representing raw records\n is_training: A boolean denoting whether the input is for training.\n batch_size: The number of samples per batch.\n shuffle_buffer: The buffer size to use when shuffling records. A larger\n value results in better randomness, but smaller values reduce startup\n time and use less memory.\n parse_record_fn: A function that takes a raw record and returns the\n corresponding (image, label) pair.\n num_epochs: The number of epochs to repeat the dataset.\n num_parallel_calls: The number of records that are processed in parallel.\n This can be optimized per data set but for generally homogeneous data\n sets, should be approximately the number of available CPU cores.\n\n Returns:\n Dataset of (image, label) pairs ready for iteration.\n \"\"\"\n # We prefetch a batch at a time, This can help smooth out the time taken to\n # load input files as we go through shuffling and processing.\n dataset = dataset.prefetch(buffer_size=batch_size)\n if is_training:\n # Shuffle the records. Note that we shuffle before repeating to ensure\n # that the shuffling respects epoch boundaries.\n dataset = dataset.shuffle(buffer_size=shuffle_buffer)\n\n # If we are training over multiple epochs before evaluating, repeat the\n # dataset for the appropriate number of epochs.\n dataset = dataset.repeat(num_epochs)\n\n # Parse the raw records into images and labels\n dataset = dataset.map(lambda value: parse_record_fn(value, is_training),\n num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.batch(batch_size)\n\n # Operations between the final prefetch and the get_next call to the iterator\n # will happen synchronously during run time. We prefetch here again to\n # background all of the above processing work and keep it out of the\n # critical training path.\n dataset = dataset.prefetch(1)\n\n return dataset\n\n\n################################################################################\n# Functions building the ResNet model.\n################################################################################\ndef batch_norm_relu(inputs, training, data_format):\n \"\"\"Performs a batch normalization followed by a ReLU.\"\"\"\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n inputs = tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)\n inputs = tf.nn.relu(inputs)\n return inputs\n\n\ndef fixed_padding(inputs, kernel_size, data_format):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n A tensor with the same format as the input with the data either intact\n (if kernel_size == 1) or padded (if kernel_size > 1).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs\n\n\ndef conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):\n \"\"\"Strided 2-D convolution with explicit padding.\"\"\"\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format)\n\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)\n\n\ndef building_block(inputs, filters, training, projection_shortcut, strides,\n data_format):\n \"\"\"Standard building block for residual networks with BN before convolutions.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n filters: The number of filters for the convolutions.\n training: A Boolean for whether the model is in training or inference\n mode. Needed for batch normalization.\n projection_shortcut: The function to use for projection shortcuts\n (typically a 1x1 convolution when downsampling the input).\n strides: The block's stride. If greater than 1, this block will ultimately\n downsample the input.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n The output tensor of the block.\n \"\"\"\n shortcut = inputs\n inputs = batch_norm_relu(inputs, training, data_format)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, training, data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut\n\n\ndef bottleneck_block(inputs, filters, training, projection_shortcut,\n strides, data_format):\n \"\"\"Bottleneck block variant for residual networks with BN before convolutions.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n filters: The number of filters for the first two convolutions. Note\n that the third and final convolution will use 4 times as many filters.\n training: A Boolean for whether the model is in training or inference\n mode. Needed for batch normalization.\n projection_shortcut: The function to use for projection shortcuts\n (typically a 1x1 convolution when downsampling the input).\n strides: The block's stride. If greater than 1, this block will ultimately\n downsample the input.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n The output tensor of the block.\n \"\"\"\n shortcut = inputs\n inputs = batch_norm_relu(inputs, training, data_format)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=1, strides=1,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, training, data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, training, data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,\n data_format=data_format)\n\n return inputs + shortcut\n\n\ndef block_layer(inputs, filters, block_fn, blocks, strides, training, name,\n data_format):\n \"\"\"Creates one layer of blocks for the ResNet model.\n\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n filters: The number of filters for the first convolution of the layer.\n block_fn: The block to use within the model, either `building_block` or\n `bottleneck_block`.\n blocks: The number of blocks contained in the layer.\n strides: The stride to use for the first convolution of the layer. If\n greater than 1, this layer will ultimately downsample the input.\n training: Either True or False, whether we are currently training the\n model. Needed for batch norm.\n name: A string name for the tensor output of the block layer.\n data_format: The input format ('channels_last' or 'channels_first').\n\n Returns:\n The output tensor of the block layer.\n \"\"\"\n # Bottleneck blocks end with 4x the number of filters as they start with\n filters_out = 4 * filters if block_fn is bottleneck_block else filters\n\n def projection_shortcut(inputs):\n return conv2d_fixed_padding(\n inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,\n data_format=data_format)\n\n # Only the first block per block_layer uses projection_shortcut and strides\n inputs = block_fn(inputs, filters, training, projection_shortcut, strides,\n data_format)\n\n for _ in range(1, blocks):\n inputs = block_fn(inputs, filters, training, None, 1, data_format)\n\n return tf.identity(inputs, name)\n\n\nclass Model(object):\n \"\"\"Base class for building the Resnet v2 Model.\n \"\"\"\n\n def __init__(self, resnet_size, num_classes, num_filters, kernel_size,\n conv_stride, first_pool_size, first_pool_stride, probe_pool_size,\n second_pool_size, second_pool_stride, probe_pool_stride,\n block_fn, block_sizes, pool_type, num_probes,\n block_strides, final_size, data_format=None):\n \"\"\"Creates a model for classifying an image.\n\n Args:\n resnet_size: A single integer for the size of the ResNet model.\n probe_pool_size: Number to pool the probes by.\n probe_pool_stride: stride size for the probe pooling layer \n num_classes: The number of classes used as labels.\n num_filters: The number of filters to use for the first block layer\n of the model. This number is then doubled for each subsequent block\n layer.\n kernel_size: The kernel size to use for convolution.\n conv_stride: stride size for the initial convolutional layer\n first_pool_size: Pool size to be used for the first pooling layer.\n If none, the first pooling layer is skipped.\n first_pool_stride: stride size for the first pooling layer. Not used\n if first_pool_size is None.\n second_pool_size: Pool size to be used for the second pooling layer.\n second_pool_stride: stride size for the final pooling layer\n block_fn: Which block layer function should be used? Pass in one of\n the two functions defined above: building_block or bottleneck_block\n block_sizes: A list containing n values, where n is the number of sets of\n block layers desired. Each value should be the number of blocks in the\n i-th set.\n pool_type: 'max' or 'mean'.\n block_strides: List of integers representing the desired stride size for\n each of the sets of block layers. Should be same length as block_sizes.\n final_size: The expected size of the model after the second pooling.\n data_format: Input format ('channels_last', 'channels_first', or None).\n If set to None, the format is dependent on whether a GPU is available.\n \"\"\"\n self.resnet_size = resnet_size\n\n if not data_format:\n data_format = (\n 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')\n\n self.data_format = data_format\n self.num_classes = num_classes\n self.num_filters = num_filters\n self.kernel_size = kernel_size\n self.conv_stride = conv_stride\n self.first_pool_size = first_pool_size\n self.first_pool_stride = first_pool_stride\n self.second_pool_size = second_pool_size\n self.second_pool_stride = second_pool_stride\n self.probe_pool_size = probe_pool_size\n self.probe_pool_stride = probe_pool_stride\n self.block_fn = block_fn\n self.block_sizes = block_sizes\n self.block_strides = block_strides\n self.final_size = final_size\n self.pool_type = pool_type\n self.num_probes = num_probes\n\n def __call__(self, inputs, training):\n \"\"\"Add operations to classify a batch of input images.\n\n Args:\n inputs: A Tensor representing a batch of input images.\n training: A boolean. Set to True to add operations required only when\n training the classifier.\n\n Returns:\n A logits Tensor with shape [<batch_size>, self.num_classes].\n \"\"\"\n with tf.variable_scope('input_transforms'):\n if self.data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n with tf.variable_scope('mentor') as scope:\n # mentor\n mentor = conv2d_fixed_padding(\n inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,\n strides=self.conv_stride, data_format=self.data_format)\n mentor = tf.identity(mentor, 'mentor_' + 'initial_conv')\n\n if self.first_pool_size:\n mentor = tf.layers.max_pooling2d(\n inputs=mentor, pool_size=self.first_pool_size,\n strides=self.first_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentor = tf.identity(mentor, 'mentor_' + 'initial_max_pool')\n\n mentor_probes = []\n probe_count = 0\n for i, num_blocks in enumerate(self.block_sizes[0]):\n num_filters = self.num_filters * (2**i)\n mentor = block_layer(\n inputs=mentor, filters=num_filters, block_fn=self.block_fn,\n blocks=num_blocks, strides=self.block_strides[i],\n training=training, name='mentor_' + 'block_layer{}'.format(i + 1),\n data_format=self.data_format)\n \n if probe_count < self.num_probes: \n if self.probe_pool_size > 0:\n if self.pool_type == 'max':\n mentor_probe = tf.layers.max_pooling2d(\n inputs=mentor, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentor_probe = tf.identity(mentor, 'mentor_'+'probe_max_pool_' \\\n + str(i))\n elif self.pool_type == 'mean':\n mentor_probe = tf.layers.average_pooling2d(\n inputs=mentor, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentor_probe = tf.identity(mentor, 'mentor_'+'probe_mean_pool_' \\\n + str(i)) \n else:\n mentor_probe = mentor \n mentor_probes.append(mentor_probe)\n probe_count+=1\n mentor = batch_norm_relu(mentor, training, self.data_format)\n mentor = tf.layers.average_pooling2d(\n inputs=mentor, pool_size=self.second_pool_size,\n strides=self.second_pool_stride, padding='VALID',\n data_format=self.data_format)\n mentor = tf.identity(mentor, 'mentor_' + 'final_avg_pool')\n\n mentor = tf.reshape(mentor, [-1, self.final_size])\n mentor = tf.layers.dense(inputs=mentor, units=self.num_classes)\n mentor = tf.identity(mentor, 'mentor_' + 'final_dense')\n mentor_probes.append(mentor)\n\n with tf.variable_scope('mentee') as scope:\n # mentee\n mentee = conv2d_fixed_padding(\n inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,\n strides=self.conv_stride, data_format=self.data_format)\n mentee = tf.identity(mentee, 'mentee_' + 'initial_conv')\n\n if self.first_pool_size:\n mentee = tf.layers.max_pooling2d(\n inputs=mentee, pool_size=self.first_pool_size,\n strides=self.first_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentee = tf.identity(mentee, 'mentee_' + 'initial_max_pool')\n \n probe_count = 0\n mentee_probes = []\n for i, num_blocks in enumerate(self.block_sizes[1]):\n num_filters = self.num_filters * (2**i)\n mentee = block_layer(\n inputs=mentee, filters=num_filters, block_fn=self.block_fn,\n blocks=num_blocks, strides=self.block_strides[i],\n training=training, name='mentee_' + 'block_layer{}'.format(i + 1),\n data_format=self.data_format)\n if probe_count < self.num_probes: \n if self.probe_pool_size > 0:\n if self.pool_type == 'max':\n mentee_probe = tf.layers.max_pooling2d(\n inputs=mentee, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \\\n + str(i))\n elif self.pool_type == 'mean':\n mentee_probe = tf.layers.average_pooling2d(\n inputs=mentee, pool_size=self.probe_pool_size,\n strides=self.probe_pool_stride, padding='SAME',\n data_format=self.data_format)\n mentee_probe = tf.identity(mentee, 'mentee_'+'probe_max_pool_' \\\n + str(i)) \n else:\n mentee_probe=mentee \n mentee_probes.append(mentee_probe)\n probe_count+=1\n\n mentee = batch_norm_relu(mentee, training, self.data_format)\n mentee = tf.layers.average_pooling2d(\n inputs=mentee, pool_size=self.second_pool_size,\n strides=self.second_pool_stride, padding='VALID',\n data_format=self.data_format)\n mentee = tf.identity(mentee, 'mentee_' + 'final_avg_pool')\n mentee = tf.reshape(mentee, [-1, self.final_size])\n mentee = tf.layers.dense(inputs=mentee, units=self.num_classes)\n mentee = tf.identity(mentee, 'mentee_' + 'final_dense') \n mentee_probes.append(mentee)\n\n probe_cost = tf.constant(0.)\n for mentor_feat, mentee_feat in zip(mentor_probes, mentee_probes):\n probe_cost = probe_cost + tf.losses.mean_squared_error (\n mentor_feat, mentee_feat)\n return (mentor, mentee, probe_cost)\n\n################################################################################\n# Functions for running training/eval/validation loops for the model.\n################################################################################\n\ndef learning_rate_with_decay(\n batch_size, batch_denom, num_images, boundary_epochs, decay_rates):\n \"\"\"Get a learning rate that decays step-wise as training progresses.\n\n Args:\n batch_size: the number of examples processed in each training batch.\n batch_denom: this value will be used to scale the base learning rate.\n `0.1 * batch size` is divided by this number, such that when\n batch_denom == batch_size, the initial learning rate will be 0.1.\n num_images: total number of images that will be used for training.\n boundary_epochs: list of ints representing the epochs at which we\n decay the learning rate.\n decay_rates: list of floats representing the decay rates to be used\n for scaling the learning rate. Should be the same length as\n boundary_epochs.\n\n Returns:\n Returns a function that takes a single argument - the number of batches\n trained so far (global_step)- and returns the learning rate to be used\n for training the next batch.\n \"\"\"\n with tf.variable_scope('learning_rate'):\n initial_learning_rate = 0.01 * batch_size / batch_denom\n batches_per_epoch = num_images / batch_size\n\n # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.\n boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]\n vals = [initial_learning_rate * decay for decay in decay_rates]\n\n def learning_rate_fn(global_step):\n global_step = tf.cast(global_step, tf.int32)\n rval = tf.train.piecewise_constant(global_step, boundaries, vals)\n return rval\n return learning_rate_fn\n\ndef learning_rate_with_decay_2( initial_learning_rate,\n batch_size, batch_denom, num_images, boundary_epochs, decay_rates):\n \"\"\"Get a learning rate that decays step-wise as training progresses.\n\n Args:\n batch_size: the number of examples processed in each training batch.\n batch_denom: this value will be used to scale the base learning rate.\n `0.1 * batch size` is divided by this number, such that when\n batch_denom == batch_size, the initial learning rate will be 0.1.\n num_images: total number of images that will be used for training.\n boundary_epochs: list of ints representing the epochs at which we\n decay the learning rate.\n decay_rates: list of floats representing the decay rates to be used\n for scaling the learning rate. Should be the same length as\n boundary_epochs.\n\n Returns:\n Returns a function that takes a single argument - the number of batches\n trained so far (global_step)- and returns the learning rate to be used\n for training the next batch.\n \"\"\"\n with tf.variable_scope('learning_rate'):\n batches_per_epoch = num_images / batch_size\n\n boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]\n vals = [initial_learning_rate * decay for decay in decay_rates]\n\n def learning_rate_fn(global_step):\n global_step = tf.cast(global_step, tf.int32)\n rval = tf.train.piecewise_constant(global_step, boundaries, vals)\n return rval\n return learning_rate_fn\n\n\ndef distillation_coeff_fn(intital_distillation, global_step):\n global_step = tf.cast(global_step, tf.int32)\n rval = tf.train.exponential_decay (\n intital_distillation,\n global_step, \n 100000,\n 0.55,\n staircase = False)\n return rval\n\ndef resnet_model_fn(features, labels, mode, model_class, trainee, \n distillation_coeff, probes_coeff, resnet_size, num_probes,\n weight_decay_coeff, learning_rate_fn_mentor, \n learning_rate_fn_mentee, learning_rate_fn_finetune,\n momentum, data_format, pool_probes, pool_type,\n temperature=1, optimizer='momentum', \n loss_filter_fn=None):\n \"\"\"Shared functionality for different resnet model_fns.\n\n Initializes the ResnetModel representing the model layers\n and uses that model to build the necessary EstimatorSpecs for\n the `mode` in question. For training, this means building losses,\n the optimizer, and the train op that get passed into the EstimatorSpec.\n For evaluation and prediction, the EstimatorSpec is returned without\n a train op, but with the necessary parameters for the given mode.\n\n Args:\n features: tensor representing input images\n labels: tensor representing class labels for all input images\n mode: current estimator mode; should be one of\n `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`\n model_class: a class representing a TensorFlow model that has a __call__\n function. We assume here that this is a subclass of ResnetModel.\n trainee: A string either `'mentee'` or `'mentor`'.\n resnet_size: A list of two integers for the size of the ResNet model for \n mentor followed by mentee.\n weight_decay_coeff: weight decay rate used to regularize learned variables.\n distillation_coeff: Weight for distillation.\n probes_coeff: weight for probes.\n learning_rate_fn_mentor: function that returns the current learning rate given\n the current global_step\n learning_rate_fn_mentee: function that returns the current learning rate given\n the current global_step\n learning_rate_fn_finetune: function that returns the current learning rate given\n the current global_step \n num_probes: How many equally spaced probes do we need. \n momentum: momentum term used for optimization.\n data_format: Input format ('channels_last', 'channels_first', or None).\n If set to None, the format is dependent on whether a GPU is available.\n temperature: A value of temperature to use for distillation. Defaults to 1\n so that it will remain backward compatible.\n loss_filter_fn: function that takes a string variable name and returns\n True if the var should be included in loss calculation, and False\n otherwise. If None, batch_normalization variables will be excluded\n from the loss.\n pool_probes: Downsampling for probes.\n pool_type: 'max' or 'mean'.\n optimizer: 'adam', 'adadelta' and 'momentum' are options.\n Returns:\n EstimatorSpec parameterized according to the input params and the\n current mode.\n \"\"\"\n with tf.variable_scope('inputs'):\n # Generate a summary node for the images\n tf.summary.image('images', features, max_outputs=6)\n\n model = model_class(resnet_size = resnet_size,\n pool_probes = pool_probes, \n pool_type = pool_type, \n num_probes = num_probes,\n data_format = data_format)\n logits_mentor, logits_mentee, probe_cost = model(features, \n mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions_mentor = {\n 'classes': tf.argmax(logits_mentor, axis=1),\n 'probabilities': tf.nn.softmax(logits_mentor, \n name='softmax_tensor_mentor'),\n }\n\n predictions_mentee = {\n 'classes': tf.argmax(logits_mentee, axis=1),\n 'probabilities': tf.nn.softmax(logits_mentee, \n name='softmax_tensor_mentee'),\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n if trainee == 'mentor':\n return tf.estimator.EstimatorSpec(mode=mode, \n predictions=predictions_mentor)\n elif trainee == 'mentee' or trainee == 'finetune':\n return tf.estimator.EstimatorSpec(mode=mode, \n predictions=predictions_mentee)\n\n with tf.variable_scope('distillery'):\n temperature_softmax_mentor = tf.nn.softmax((tf.div(logits_mentor, \n temperature)), name ='softmax_temperature_tensor_mentor')\n distillation_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits = tf.div(logits_mentee,temperature),\n labels = temperature_softmax_mentor))\n\n tf.identity(distillation_loss, name='distillation_loss')\n tf.summary.scalar('distillation_loss', distillation_loss)\n tf.summary.scalar('scaled_distillation_loss', distillation_coeff *\n distillation_loss)\n\n with tf.variable_scope('cross_entropy'):\n # Calculate loss, which includes softmax cross entropy and L2 regularization.\n cross_entropy_mentor = tf.losses.softmax_cross_entropy(\n logits=logits_mentor, onehot_labels=labels)\n # Create a tensor named cross_entropy for logging purposes.\n tf.identity(cross_entropy_mentor, name='cross_entropy_mentor')\n tf.summary.scalar('cross_entropy_mentor', cross_entropy_mentor) \n\n cross_entropy_mentee = tf.losses.softmax_cross_entropy(\n logits=logits_mentee, onehot_labels=labels)\n tf.identity(cross_entropy_mentee, name='cross_entropy_mentee')\n tf.summary.scalar('cross_entropy_mentee', cross_entropy_mentee)\n\n # If no loss_filter_fn is passed, assume we want the default behavior,\n # which is that batch_normalization variables are excluded from loss.\n if not loss_filter_fn:\n def loss_filter_fn(name):\n return 'batch_normalization' not in name\n\n mentor_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='mentor')\n mentee_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='mentee') \n\n with tf.variable_scope('regularizers'): \n if weight_decay_coeff > 0:\n l2_mentor = weight_decay_coeff * tf.add_n(\n [tf.nn.l2_loss(v) for v in mentor_variables\n if loss_filter_fn(v.name)])\n l2_mentee = weight_decay_coeff * tf.add_n(\n [tf.nn.l2_loss(v) for v in mentee_variables\n if loss_filter_fn(v.name)]) \n else:\n l2_mentor = tf.constant(0.)\n l2_mentee = tf.constant(0.)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n with tf.variable_scope('learning_rates'):\n global_step = tf.train.get_or_create_global_step()\n learning_rate_mentor = learning_rate_fn_mentor(global_step)\n learning_rate_mentee = learning_rate_fn_mentee(global_step)\n learning_rate_finetune = learning_rate_fn_finetune(global_step) \n\n tf.identity(learning_rate_mentor, name='learning_rate_mentor' )\n tf.summary.scalar('learning_rate_mentor', learning_rate_mentor)\n tf.identity(learning_rate_mentee, name='learning_rate_mentee' )\n tf.summary.scalar('learning_rate_mentee', learning_rate_mentee)\n tf.identity(learning_rate_finetune, name='learning_rate_finetune' )\n tf.summary.scalar('learning_rate_finetune', learning_rate_finetune)\n\n with tf.variable_scope('mentor_cumulative_loss'):\n # Add weight decay and distillation to the loss.\n loss_mentor = cross_entropy_mentor + l2_mentor\n tf.summary.scalar('objective', loss_mentor) \n \n with tf.variable_scope('mentee_cumulative_loss'): \n distillation_coeff_decayed = distillation_coeff_fn(distillation_coeff, \n global_step) \n probe_scale = probes_coeff * distillation_coeff_decayed \n\n tf.identity(probe_cost, name='probe_cost') \n tf.summary.scalar('probe_loss', probe_cost)\n tf.summary.scalar('scaled_probe_loss', probe_scale *\n probe_cost)\n tf.identity(distillation_coeff, name='distillation_coeff_decayed')\n tf.summary.scalar('coeff',distillation_coeff_decayed) \n\n loss_mentee = cross_entropy_mentee + l2_mentee + \\\n distillation_coeff_decayed * distillation_loss + \\\n probe_scale * probe_cost\n tf.summary.scalar('objective', loss_mentee) \n \n with tf.variable_scope('mentee_finetune'):\n loss_finetune = cross_entropy_mentee + l2_mentee\n tf.summary.scalar('objective', loss_finetune) \n\n if optimizer[0] == 'momentum':\n with tf.variable_scope('mentor_momentum_optimizer'): \n optimizer_mentor = tf.train.MomentumOptimizer(\n learning_rate=learning_rate_mentor,\n momentum=momentum)\n elif optimizer[0] == 'adam':\n with tf.variable_scope('mentor_adam_optimizer'): \n optimizer_mentor = tf.train.AdamOptimizer(\n learning_rate=learning_rate_mentor)\n elif optimizer[0] == 'adadelta':\n with tf.variable_scope('mentor_adadelta_optimizer'): \n optimizer_mentor = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate_mentor)\n\n if optimizer[1] == 'momentum':\n with tf.variable_scope('mentee_momentum_optimizer'): \n optimizer_mentee = tf.train.MomentumOptimizer(\n learning_rate=learning_rate_mentee,\n momentum=momentum)\n elif optimizer[1] == 'adam':\n with tf.variable_scope('mentee_adam_optimizer'): \n optimizer_mentee = tf.train.AdamOptimizer(\n learning_rate=learning_rate_mentee)\n elif optimizer[1] == 'adadelta':\n with tf.variable_scope('mentee_adadelta_optimizer'): \n optimizer_mentee = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate_mentee)\n\n if optimizer[2] == 'momentum':\n with tf.variable_scope('finetune_momentum_optimizer'): \n optimizer_finetune = tf.train.MomentumOptimizer(\n learning_rate=learning_rate_finetune,\n momentum=momentum)\n elif optimizer[2] == 'adam':\n with tf.variable_scope('finetune_adam_optimizer'): \n optimizer_finetune = tf.train.AdamOptimizer(\n learning_rate=learning_rate_finetune)\n elif optimizer[2] == 'adadelta':\n with tf.variable_scope('finetune_adadelta_optimizer'): \n optimizer_finetune = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate_finetune)\n\n # Batch norm requires update ops to be added as a dependency to train_op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n with tf.variable_scope('optimizers'):\n train_op_mentor = optimizer_mentor.minimize(loss_mentor, \n global_step, \n var_list = mentor_variables)\n train_op_mentee = optimizer_mentee.minimize(loss_mentee, \n global_step, \n var_list = mentee_variables) \n train_op_finetune = optimizer_finetune.minimize(loss_finetune, \n global_step, \n var_list = mentee_variables) \n else:\n with tf.variable_scope('mentor_cumulative_loss'):\n # Add weight decay and distillation to the loss.\n loss_mentor = cross_entropy_mentor + weight_decay_coeff * l2_mentor\n with tf.variable_scope('mentee_cumulative_loss'): \n loss_mentee = cross_entropy_mentee + weight_decay_coeff * l2_mentee\n with tf.variable_scope('mentee_finetune'):\n loss_finetune = cross_entropy_mentee + weight_decay_coeff * l2_mentee\n train_op_mentor = None\n train_op_mentee = None\n train_op_finetune = None\n\n with tf.variable_scope('metrics'):\n accuracy_mentor = tf.metrics.accuracy(\n tf.argmax(labels, axis=1), predictions_mentor['classes'])\n accuracy_mentee = tf.metrics.accuracy(\n tf.argmax(labels, axis=1), predictions_mentee['classes']) \n metrics = {'accuracy_mentor': accuracy_mentor,\n 'accuracy_mentee': accuracy_mentee}\n\n # Create a tensor named train_accuracy for logging purposes\n tf.identity(accuracy_mentor[1], name='train_accuracy_mentor')\n tf.summary.scalar('train_accuracy_mentor', accuracy_mentor[1])\n tf.identity(accuracy_mentee[1], name='train_accuracy_mentee')\n tf.summary.scalar('train_accuracy_mentee', accuracy_mentee[1])\n\n saver=tf.train.Saver(var_list = tf.global_variables())\n\n if trainee == 'mentor':\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_mentor,\n loss=loss_mentor,\n train_op=train_op_mentor,\n scaffold=tf.train.Scaffold(saver=saver),\n eval_metric_ops=metrics)\n\n elif trainee == 'mentee':\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_mentee,\n loss=loss_mentee,\n train_op=train_op_mentee,\n scaffold=tf.train.Scaffold(saver=saver),\n eval_metric_ops=metrics)\n elif trainee == 'finetune':\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions_mentee,\n loss=loss_finetune,\n train_op=train_op_finetune,\n scaffold=tf.train.Scaffold(saver=saver),\n eval_metric_ops=metrics) \n\n\ndef resnet_main(flags, model_function, input_function):\n # Using the Winograd non-fused algorithms provides a small performance boost.\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n\n # Set up a RunConfig to only save checkpoints once per training cycle.\n run_config = tf.estimator.RunConfig().replace(save_checkpoints_secs=1e9)\n mentor = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags.model_dir, \n config=run_config,\n params={\n 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],\n 'data_format': flags.data_format,\n 'batch_size': flags.batch_size,\n 'distillation_coeff': flags.distillation_coeff,\n 'probes_coeff': flags.probes_coeff,\n 'weight_decay_coeff': flags.weight_decay_coeff,\n 'optimizer': [flags.mentor_optimizer,\n flags.mentee_optimizer,\n flags.finetune_optimizer],\n 'temperature': flags.temperature, \n 'num_probes': flags.num_probes,\n 'pool_probes': flags.pool_probes,\n 'train_epochs_mentor': flags.train_epochs_mentor,\n 'train_epochs_mentee': flags.train_epochs_mentee,\n 'train_epochs_finetune': flags.train_epochs_finetune,\n 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,\n 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,\n 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,\n 'pool_type': flags.pool_type, \n 'trainee': 'mentor'\n })\n\n for i in range(flags.train_epochs_mentor // flags.epochs_per_eval):\n tensors_to_log = {\n 'learning_rate': 'learning_rates/learning_rate_mentor',\n 'cross_entropy': 'cross_entropy/cross_entropy_mentor' ,\n 'train_accuracy': 'metrics/train_accuracy_mentor'\n }\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n def input_fn_train():\n return input_function(True, flags.data_dir, flags.batch_size,\n flags.epochs_per_eval, flags.num_parallel_calls)\n\n print(' *********************** ' )\n print(' Starting a mentor training cycle. [' + str(i) + '/' \n + str(flags.train_epochs_mentor // flags.epochs_per_eval) + ']')\n print(' *********************** ' ) \n \n mentor.train(input_fn=input_fn_train, hooks=[logging_hook])\n\n print('Starting to evaluate.')\n # Evaluate the model and print results\n def input_fn_eval():\n return input_function(False, flags.data_dir, flags.batch_size,\n 1, flags.num_parallel_calls)\n\n eval_results = mentor.evaluate(input_fn=input_fn_eval)\n print(eval_results)\n\n mentee = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags.model_dir, \n config=run_config,\n params={\n 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],\n 'data_format': flags.data_format,\n 'batch_size': flags.batch_size,\n 'distillation_coeff': flags.distillation_coeff,\n 'probes_coeff': flags.probes_coeff, \n 'optimizer': [flags.mentor_optimizer,\n flags.mentee_optimizer,\n flags.finetune_optimizer],\n 'weight_decay_coeff': flags.weight_decay_coeff, \n 'temperature': flags.temperature,\n 'num_probes': flags.num_probes, \n 'pool_probes': flags.pool_probes, \n 'train_epochs_mentor': flags.train_epochs_mentor,\n 'train_epochs_mentee': flags.train_epochs_mentee,\n 'train_epochs_finetune': flags.train_epochs_finetune,\n 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,\n 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,\n 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune, \n 'pool_type': flags.pool_type, \n 'trainee': 'mentee'\n })\n\n for i in range(flags.train_epochs_mentee // flags.epochs_per_eval):\n tensors_to_log = {\n 'learning_rate': 'learning_rates/learning_rate_mentee',\n 'cross_entropy': 'cross_entropy/cross_entropy_mentee',\n 'train_accuracy': 'metrics/train_accuracy_mentee',\n 'distillation_loss': 'distillery/distillation_loss',\n 'distillation_coeff':'mentee_cumulative_loss/distillation_coeff_decayed'\n }\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n def input_fn_train():\n return input_function(True, flags.data_dir, flags.batch_size,\n flags.epochs_per_eval, flags.num_parallel_calls)\n\n print(' *********************** ' )\n print(' Starting a mentee training cycle. [' + str(i) + '/' \n + str(flags.train_epochs_mentee // flags.epochs_per_eval) + ']')\n print(' *********************** ' )\n\n mentee.train(input_fn=input_fn_train, hooks=[logging_hook])\n\n print('Starting to evaluate.')\n # Evaluate the model and print results\n def input_fn_eval():\n return input_function(False, flags.data_dir, flags.batch_size,\n 1, flags.num_parallel_calls)\n\n eval_results = mentee.evaluate(input_fn=input_fn_eval)\n print(eval_results)\n\n finetune = tf.estimator.Estimator(\n model_fn=model_function, model_dir=flags.model_dir, \n config=run_config,\n params={\n 'resnet_size': [flags.resnet_size_mentor, flags.resnet_size_mentee],\n 'data_format': flags.data_format,\n 'batch_size': flags.batch_size,\n 'distillation_coeff': flags.distillation_coeff,\n 'probes_coeff': flags.probes_coeff, \n 'optimizer': [flags.mentor_optimizer,\n flags.mentee_optimizer,\n flags.finetune_optimizer],\n 'weight_decay_coeff': flags.weight_decay_coeff, \n 'temperature': flags.temperature,\n 'num_probes': flags.num_probes, \n 'pool_probes': flags.pool_probes,\n 'train_epochs_mentor': flags.train_epochs_mentor,\n 'train_epochs_mentee': flags.train_epochs_mentee,\n 'train_epochs_finetune': flags.train_epochs_finetune,\n 'initial_learning_rate_mentor': flags.initial_learning_rate_mentor,\n 'initial_learning_rate_mentee': flags.initial_learning_rate_mentee,\n 'initial_learning_rate_finetune': flags.initial_learning_rate_finetune,\n 'pool_type': flags.pool_type, \n 'trainee': 'finetune'\n })\n\n for i in range(flags.train_epochs_finetune // flags.epochs_per_eval):\n tensors_to_log = {\n 'learning_rate': 'learning_rates/learning_rate_mentee',\n 'cross_entropy': 'cross_entropy/cross_entropy_mentee',\n 'train_accuracy': 'metrics/train_accuracy_mentee',\n }\n\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=100)\n\n def input_fn_train():\n return input_function(True, flags.data_dir, flags.batch_size,\n flags.epochs_per_eval, flags.num_parallel_calls)\n\n print(' *********************** ' )\n print(' Starting a mentee finetune cycle. [' + str(i) + '/' \n + str(flags.train_epochs_finetune // flags.epochs_per_eval) + ']')\n print(' *********************** ' )\n\n finetune.train(input_fn=input_fn_train, hooks=[logging_hook])\n\n print('Starting to evaluate.')\n # Evaluate the model and print results\n def input_fn_eval():\n return input_function(False, flags.data_dir, flags.batch_size,\n 1, flags.num_parallel_calls)\n\n eval_results = finetune.evaluate(input_fn=input_fn_eval)\n print(eval_results)\n\nclass ResnetArgParser(argparse.ArgumentParser):\n \"\"\"Arguments for configuring and running a Resnet Model.\n \"\"\"\n\n def __init__(self, resnet_size_choices=None):\n super(ResnetArgParser, self).__init__()\n self.add_argument(\n '--data_dir', type=str, default='./resnet_data',\n help='The directory where the input data is stored.')\n\n self.add_argument(\n '--num_parallel_calls', type=int, default=5,\n help='The number of records that are processed in parallel '\n 'during input processing. This can be optimized per data set but '\n 'for generally homogeneous data sets, should be approximately the '\n 'number of available CPU cores.')\n\n self.add_argument(\n '--model_dir', type=str, default='./resnet_model',\n help='The directory where the model will be stored.')\n\n self.add_argument(\n '--resnet_size_mentor', type=int, default=50,\n choices=resnet_size_choices,\n help='The size of the ResNet Mentor model to use.')\n\n self.add_argument(\n '--resnet_size_mentee', type=int, default=10,\n choices=resnet_size_choices,\n help='The size of the ResNet Mentee model to use.')\n\n self.add_argument(\n '--train_epochs_mentor', type=int, default=100,\n help='The number of epochs to use for training.')\n\n self.add_argument(\n '--train_epochs_mentee', type=int, default=100,\n help='The number of epochs to use for training.')\n\n self.add_argument(\n '--train_epochs_finetune', type=int, default=100,\n help='The number of epochs to use for training.')\n\n self.add_argument(\n '--epochs_per_eval', type=int, default=1,\n help='The number of training epochs to run between evaluations.')\n\n self.add_argument(\n '--batch_size', type=int, default=32,\n help='Batch size for training and evaluation.')\n\n self.add_argument(\n '--mentor_optimizer', type=str, default='momentum',\n help='Optimizer for training and evaluation.')\n\n self.add_argument(\n '--mentee_optimizer', type=str, default='momentum',\n help='Optimizer for training and evaluation.')\n\n self.add_argument(\n '--finetune_optimizer', type=str, default='momentum',\n help='Optimizer for training and evaluation.')\n\n self.add_argument(\n '--data_format', type=str, default=None,\n choices=['channels_first', 'channels_last'],\n help='A flag to override the data format used in the model. '\n 'channels_first provides a performance boost on GPU but '\n 'is not always compatible with CPU. If left unspecified, '\n 'the data format will be chosen automatically based on '\n 'whether TensorFlow was built for CPU or GPU.')\n\n self.add_argument(\n '--distillation_coeff', type=float, default=0.01,\n help='Coefficient of distillation to be applied from parent to'\n 'child. This is only useful when performing distillaiton.')\n\n self.add_argument(\n '--probes_coeff', type=float, default=0.0001,\n help='Coefficient of weight to be applied from parent to'\n 'child. This is only useful when performing mentoring.')\n\n self.add_argument(\n '--weight_decay_coeff', type=float, default=0.0002,\n help='Coefficient of weight to be applied from to the'\n 'weight decay regularizer.')\n\n self.add_argument(\n '--temperature', type=float, default=3,\n help='Temperature to be used for the softmax layer')\n\n self.add_argument(\n '--num_probes', type=int, default=0,\n help='Number of probes to be used')\n\n self.add_argument(\n '--pool_probes', type=int, default=2,\n help='Maxpool probes by')\n\n self.add_argument(\n '--initial_learning_rate_mentor', type=float, default=0.001,\n help='Set initial learning rate for mentor') \n\n self.add_argument(\n '--initial_learning_rate_mentee', type=float, default=0.001,\n help='Set initial learning rate for mentee') \n\n self.add_argument(\n '--initial_learning_rate_finetune', type=float, default=0.001,\n help='Set initial learning rate finetune') \n\n self.add_argument(\n '--pool_type', type=str, default='max',\n help='Pool type for probes.') "
] | [
[
"tensorflow.summary.scalar",
"tensorflow.variance_scaling_initializer",
"tensorflow.summary.image",
"tensorflow.reshape",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.variable_scope",
"tensorflow.nn.l2_loss",
"tensorflow.identity",
"tensorflow.layers.average_pooling2d",
"tensorflow.nn.softmax",
"tensorflow.train.LoggingTensorHook",
"tensorflow.train.exponential_decay",
"tensorflow.estimator.RunConfig",
"tensorflow.train.Scaffold",
"tensorflow.train.get_or_create_global_step",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.get_collection",
"tensorflow.layers.batch_normalization",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.global_variables",
"tensorflow.cast",
"tensorflow.layers.max_pooling2d",
"tensorflow.losses.mean_squared_error",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.layers.dense",
"tensorflow.control_dependencies",
"tensorflow.estimator.Estimator",
"tensorflow.pad",
"tensorflow.train.piecewise_constant",
"tensorflow.div",
"tensorflow.train.AdamOptimizer",
"tensorflow.test.is_built_with_cuda",
"tensorflow.argmax",
"tensorflow.nn.relu"
]
] |
dumpmemory/google-research | [
"bc87d010ab9086b6e92c3f075410fa6e1f27251b"
] | [
"minigrid_basics/examples/rw_four_directions.py"
] | [
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Example that uses Gym-Minigrid, a custom environment, and custom actions.\n\nGym-Minigrid has a larger action space that is not standard in reinforcement\nlearning. By default, the actions are {rotate left, rotate right, forward, pick\nup object, drop object, toggle/activate object, done}. This example uses a class\noverridden to have the standard 4 directional actions: {left, right, up, down}.\n\nHere we have a random agent interacting with the environment. In this case, we\nalso use a custom environment, which is likely what one will do in their\nresearch. We are writing the agent observations to the disk just as a simple way\nto get some feedback of what is going on.\n\nSample run:\n\n ```\n python -m minigrid_basics.examples.rw_four_directions \\\n --gin_bindings=\"MonMiniGridEnv.stochasticity=0.1\"\n ```\n\n\"\"\"\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport gin\nimport gym\nimport gym_minigrid # pylint: disable=unused-import\nfrom gym_minigrid.wrappers import RGBImgObsWrapper\nimport matplotlib.pylab as plt\nimport tensorflow as tf\n\nfrom minigrid_basics.custom_wrappers import tabular_wrapper # pylint: disable=unused-import\nfrom minigrid_basics.envs import mon_minigrid\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('file_path', '/tmp/rw_four_directions',\n 'Path in which we will save the observations.')\nflags.DEFINE_multi_string(\n 'gin_bindings', [],\n 'Gin bindings to override default parameter values '\n '(e.g. \"MonMiniGridEnv.stochasticity=0.1\").')\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n gin.parse_config_files_and_bindings(\n [os.path.join(mon_minigrid.GIN_FILES_PREFIX, 'classic_fourrooms.gin')],\n bindings=FLAGS.gin_bindings,\n skip_unknown=False)\n env_id = mon_minigrid.register_environment()\n env = gym.make(env_id)\n env = RGBImgObsWrapper(env) # Get pixel observations\n # Get tabular observation and drop the 'mission' field:\n env = tabular_wrapper.TabularWrapper(env, get_rgb=True)\n env.reset()\n\n num_frames = 0\n max_num_frames = 500\n\n if not tf.io.gfile.exists(FLAGS.file_path):\n tf.io.gfile.makedirs(FLAGS.file_path)\n\n undisc_return = 0\n while num_frames < max_num_frames:\n # Act randomly\n obs, reward, done, _ = env.step(env.action_space.sample())\n undisc_return += reward\n num_frames += 1\n\n print('t:', num_frames, ' s:', obs['state'])\n # Draw environment frame just for simple visualization\n plt.imshow(obs['image'])\n path = os.path.join(FLAGS.file_path, 'obs_{}.png'.format(num_frames))\n plt.savefig(path)\n plt.clf()\n\n if done:\n break\n\n print('Undiscounted return: %.2f' % undisc_return)\n env.close()\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.io.gfile.exists",
"matplotlib.pylab.savefig",
"tensorflow.io.gfile.makedirs",
"matplotlib.pylab.clf",
"matplotlib.pylab.imshow"
]
] |
VladimirYugay/diw | [
"d1a760f1911e9d09fbe038abffc3aa76d384f86a"
] | [
"scripts/run_mots_depth_inference.py"
] | [
"\"\"\" Script for running depth inference assuming MOTS dataset structure \"\"\"\nimport logging\nimport os\nimport sys\nfrom pathlib import Path, PurePath\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom IPython.core import ultratb\nfrom PIL import Image\n\nimport diw\nfrom diw.model import Model, get_vars_to_save_and_restore\n\nsys.excepthook = ultratb.FormattedTB(mode=\"Verbose\", color_scheme=\"Linux\", call_pdb=1)\n\n_logger = logging.getLogger(__name__)\n\n\ndef load_image(img_file):\n \"\"\"Load image from disk. Output value range: [0,255].\"\"\"\n return Image.open(img_file).convert(\"RGB\")\n\n\ndef resize_img(img, img_shape):\n \"\"\" resizes an image \"\"\"\n return img.resize(img_shape, Image.LANCZOS).convert(\"RGB\")\n\n\ndef plot_image(image, image_type=\"RGB\"):\n \"\"\" plots image with matplotlib \"\"\"\n plt.figure()\n color_map = None\n if image_type != \"RGB\":\n color_map = plt.cm.get_cmap(\"plasma\").reversed()\n plt.imshow(image, cmap=color_map)\n plt.show() # display it\n return plt\n\n\[email protected]()\[email protected](\n \"--checkpoint_dir\",\n \"checkpoint_dir\",\n default=\"./data/checkpoints/test\",\n type=click.Path(exists=True),\n help=\"Path to the model checkpoint\",\n)\[email protected](\n \"--data_dir\",\n \"data_dir\",\n default=\"./data/test/mots_data\",\n type=click.Path(exists=True),\n help=\"Path to MOTS data\",\n)\[email protected](\n \"--save_img\",\n \"save_img\",\n flag_value=True,\n help=\"Flag to whether save the image of the depth (besides numpy array)\",\n)\[email protected]_option(diw.__version__)\ndef main(data_dir, checkpoint_dir, save_img):\n if save_img:\n plt.figure()\n height, width = 128, 416\n os.environ[\"TF_FORCE_GPU_ALLOW_GROWTH\"] = \"true\" # to fix CUDA bug\n inference_model = Model(\n is_training=False, batch_size=1, img_height=height, img_width=width\n )\n checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n vars_to_restore = get_vars_to_save_and_restore(checkpoint)\n saver = tf.train.Saver(vars_to_restore)\n with tf.Session() as sess:\n saver.restore(sess, checkpoint)\n sequence_paths = [p for p in Path(data_dir).glob(\"*\") if p.is_dir()]\n for seq_path in sequence_paths:\n model_name = PurePath(checkpoint_dir).parts[-1]\n (seq_path / model_name).mkdir(parents=True, exist_ok=True)\n if save_img:\n (seq_path / (model_name + \"_depth_images\")).mkdir(\n parents=True, exist_ok=True\n )\n img_paths = sorted(\n [p for p in (seq_path / \"img1\").glob(\"*\") if p.is_file()],\n key=lambda path: str(path),\n )\n for img_path in img_paths:\n img_name = img_path.parts[-1].split(\".\")[0]\n print(\"Processing sequence: {}, image: {}\".format(seq_path, img_name))\n image = load_image(str(img_path))\n image = resize_img(image, (width, height))\n image = np.array(image)\n image = image[None, ...]\n depth = inference_model.inference_depth(image, sess)\n depth = depth[0, :, :, 0]\n np.save(str(seq_path / model_name / img_name), depth)\n if save_img:\n plt.imshow(depth, plt.cm.get_cmap(\"plasma\").reversed())\n plt.savefig(\n str(seq_path / (model_name + \"_depth_images\"))\n + \"/\"\n + (img_name + \".png\")\n )\n plt.clf()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.Session",
"matplotlib.pyplot.figure",
"tensorflow.compat.v1.train.latest_checkpoint",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.cm.get_cmap",
"numpy.array"
]
] |
soft-matter/pimsviewer | [
"9263ece121a58a0504c6e4d319ec6e18d1bb460a"
] | [
"pimsviewer/dimension.py"
] | [
"import os\nimport numpy as np\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import QDir, Qt, QTimer, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap\nfrom PyQt5.QtWidgets import (QHBoxLayout, QSlider, QWidget, QAction, QApplication, QFileDialog, QLabel, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QStatusBar, QVBoxLayout, QDockWidget, QPushButton, QStyle, QLineEdit, QCheckBox, QInputDialog)\n\nclass Dimension(QWidget):\n\n _playing = False\n _size = 0\n _position = 0\n _mergeable = False\n _merge = False\n _playable = False\n _fps = 5.0\n _max_playback_fps = 5.0\n\n play_event = pyqtSignal(QWidget)\n\n def __init__(self, name, size=0):\n super(Dimension, self).__init__()\n\n self.name = name\n self._size = size\n\n dirname = os.path.dirname(os.path.realpath(__file__))\n uic.loadUi(os.path.join(dirname, 'dimension.ui'), self)\n\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.playButton.clicked.connect(self.click_event)\n\n self.playTimer = QTimer()\n self.playTimer.timeout.connect(self.play_tick)\n\n self.posButton.pressed.connect(self.update_position_from_btn)\n\n self.slider.setMaximum(self.size-1)\n self.slider.valueChanged.connect(self.update_position_from_slider)\n\n self.mergeButton.clicked.connect(self.update_merge)\n\n if not self.mergeable:\n self.mergeButton.hide()\n\n self._merge = self.mergeButton.isChecked()\n\n self.fps = self._fps\n self.fpsButton.pressed.connect(self.fps_changed)\n\n self.hide()\n\n def merge_image_over_dimension(self, image):\n # problem here: could be two axes with same size\n # TODO: think of a clever fix for this\n try:\n ix = image.shape.index(self._size)\n except ValueError:\n return image\n\n if self.name != 'c':\n # I don't know what to do, sum over axis\n image = np.sum(image, axis=ix)\n\n return image\n\n def enable(self):\n if not self.playable:\n return\n\n self.setEnabled(True)\n self.playButton.setEnabled(True)\n self.posButton.setEnabled(True)\n self.slider.setEnabled(True)\n self.fpsButton.setEnabled(True)\n\n if self.mergeable:\n self.mergeButton.setEnabled(True)\n self.mergeButton.show()\n\n self.show()\n\n def disable(self):\n self.setEnabled(False)\n self.playButton.setEnabled(False)\n self.posButton.setEnabled(False)\n self.slider.setEnabled(False)\n self.fpsButton.setEnabled(False)\n self.mergeButton.setEnabled(False)\n\n def fps_changed(self):\n fps, ok = QInputDialog.getDouble(self, \"Playback framerate\", \"New playback framerate\", self.fps)\n\n if ok:\n self.fps = fps\n\n def click_event(self):\n if not self.playable:\n return\n\n if not self.playing:\n self.playing = True\n else:\n self.playing = False\n\n def play_tick(self):\n if not self.playing:\n return\n\n if self._fps > self._max_playback_fps:\n self.position += int(round(self._fps / self._max_playback_fps))\n else:\n self.position += 1\n\n @property\n def size(self):\n return self._size\n\n @size.setter\n def size(self, size):\n self._size = size\n self.position = 0\n self.playing = False\n self.slider.setMinimum(0)\n self.slider.setMaximum(self.size-1)\n\n @property\n def fps(self):\n return self._fps\n\n @fps.setter\n def fps(self, fps):\n fps = float(fps)\n\n self._fps = fps\n play_fps = fps if fps < self._max_playback_fps else self._max_playback_fps\n self.playTimer.setInterval(int(round(1000.0 / play_fps)))\n self.fpsButton.setText('%d fps' % self.fps)\n\n @property\n def playable(self):\n return self._playable\n\n @playable.setter\n def playable(self, playable):\n self._playable = bool(playable)\n\n @property\n def playing(self):\n return self._playing\n\n @playing.setter\n def playing(self, playing):\n self._playing = bool(playing)\n if self._playing:\n self.playTimer.start()\n else:\n self.playTimer.stop()\n\n @property\n def position(self):\n return self._position\n\n def update_position_from_slider(self):\n position = self.slider.value()\n if position >= 0:\n self.position = position\n\n def update_position_from_btn(self):\n position, ok = QInputDialog.getInt(self, \"'%s' position\" % self.name, \"New '%s' position (0-%d)\" % (self.name, self.size-1), self.position, 0, self.size-1)\n\n if ok:\n self.position = position\n\n @position.setter\n def position(self, position):\n old_position = self.position\n\n while position < 0:\n position += self.size\n\n if position < self.size:\n self._position = position\n else:\n self._position = position - self.size\n\n self.slider.setValue(self.position)\n self.posButton.setText('%s=%d' % (self.name, self.position))\n\n if old_position != self.position:\n self.play_event.emit(self)\n\n def update_merge(self):\n self.merge = self.mergeButton.isChecked()\n\n @property\n def merge(self):\n return self._merge\n\n @merge.setter\n def merge(self, merge):\n if not self.mergeable:\n merge = False\n\n if merge != self._merge:\n self._merge = bool(merge)\n self.mergeButton.setChecked(self._merge)\n self.play_event.emit(self)\n\n @property\n def mergeable(self):\n return self._mergeable\n\n @mergeable.setter\n def mergeable(self, mergeable):\n self._mergeable = bool(mergeable)\n if not mergeable:\n self.merge = False\n\n def __len__(self):\n return self.size\n\n def __str__(self):\n classname = self.__class__.__name__\n playing = \"playing\" if self.playing else \"not playing\"\n return \"<%s %s of length %d (%s)>\" % (classname, self.name, self.size, playing)\n\n def __repr__(self):\n return self.__str__()\n\n"
] | [
[
"numpy.sum"
]
] |
lone17/deform-conv | [
"126ebcc283a4325c474332fa170f57d52a59e34d"
] | [
"deform_conv/utils.py"
] | [
"from __future__ import absolute_import, division\n\nfrom tensorflow.python import debug as tf_debug\nimport keras.backend as K\n\n\ndef keras_set_tf_debug():\n sess = K.get_session()\n sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n K.set_session(sess)\n"
] | [
[
"tensorflow.python.debug.LocalCLIDebugWrapperSession"
]
] |
katsugeneration/tf2-ndg-benchmarks | [
"ba2d07ef997fac87b3991a54c0a234f7c5425b0f"
] | [
"tf2_ndg_benckmarks/metrics/embedding.py"
] | [
"\"\"\"\nCopyright:\n Copyright 2019 by Katsuya SHIMABUKURO.\nLicense:\n MIT, see LICENSE for details.\n\"\"\"\nimport pathlib\nimport gzip\nimport requests\nimport tqdm\nimport numpy as np\nfrom gensim.models import KeyedVectors\n\n\nFILE_ID = '0B7XkCwpI5KDYNlNUTTlSS21pQmM'\nSOURCE_URL = 'https://drive.google.com/uc?export=download&id={file_id}'\nSOURCE_URL_WITH_CONFIRM = 'https://drive.google.com/uc?export=download&confirm={code}&id={file_id}'\n\n\nclass EmbeddingBase(object):\n \"\"\"Embedding based score calculator base.\"\"\"\n\n def __init__(\n self,\n emb_path: str = '/tmp/vector.bin'):\n \"\"\"Embedding class initialization.\n\n Args:\n emb_path (str): Embedding binary file path. When emb_path is not found, start to download from internet.\n\n \"\"\"\n self.emb_path = emb_path\n\n _emb_path = pathlib.Path(self.emb_path)\n if _emb_path.exists():\n self._load()\n return\n\n _emb_gz_path = pathlib.Path(self.emb_path + '.gz')\n\n # Downloas Google pre-trained vector bin from Google Drive\n\n # Get confirmation code\n res = requests.get(SOURCE_URL.format(**{'file_id': FILE_ID}))\n cookies = res.cookies\n res.close()\n code = cookies[next(filter(lambda k: '_warning_' in k, cookies.keys()))]\n\n # Download file.\n res = requests.get(\n SOURCE_URL_WITH_CONFIRM.format(**{'file_id': FILE_ID, 'code': code}),\n cookies=cookies,\n stream=True)\n pbar = tqdm.tqdm(unit=\"B\", unit_scale=True, desc='Download Google news corpus pre-trained vectors.')\n chunck_size = 1024\n with _emb_gz_path.open('wb') as w:\n for chunck in res.iter_content(chunck_size):\n w.write(chunck)\n pbar.update(len(chunck))\n pbar.close()\n res.close()\n\n # Decompress gzip file.\n with _emb_gz_path.open('rb') as f:\n with _emb_path.open('wb') as w:\n w.write(gzip.decompress(f.read()))\n\n self._load()\n\n def _load(self):\n \"\"\"Load word2vec model.\"\"\"\n self.model = KeyedVectors.load_word2vec_format(self.emb_path, binary=True)\n assert 'dog' in self.model\n\n def _get_vectors_from_sentene(self, sentence):\n \"\"\"Return contains word vector list.\"\"\"\n return [self.model.get_vector(w) for w in sentence.split(' ') if w in self.model]\n\n def _calc_cosine_sim(self, vectors1, vectors2):\n \"\"\"Calculate cosine similarity.\"\"\"\n vectors1 /= np.linalg.norm(vectors1, axis=-1, keepdims=True)\n vectors2 /= np.linalg.norm(vectors2, axis=-1, keepdims=True)\n return np.dot(vectors1, vectors2.T)\n\n\nclass Average(EmbeddingBase):\n \"\"\"Embedding based average score calculator.\"\"\"\n\n def sentence_score(\n self,\n reference: str,\n hypothesis: str) -> float:\n \"\"\"Embedding Average metrics.\n\n Args:\n reference (str): reference sentence.\n hypothesis: (str): hypothesis sentence.\n\n Return:\n float: Embedding Average score\n\n \"\"\"\n emb_ref = np.sum(self._get_vectors_from_sentene(reference), axis=0)\n emb_hyp = np.sum(self._get_vectors_from_sentene(hypothesis), axis=0)\n return self._calc_cosine_sim(emb_ref, emb_hyp)\n\n\nclass VectorExtrema(EmbeddingBase):\n \"\"\"Embedding based vector extrema score calculator.\"\"\"\n\n def sentence_score(\n self,\n reference: str,\n hypothesis: str) -> float:\n \"\"\"Embedding Vector Extrema metrics.\n\n Args:\n reference (str): reference sentence.\n hypothesis: (str): hypothesis sentence.\n\n Return:\n float: Embedding Vector Extrema score\n\n \"\"\"\n def extema(vectors):\n vec_max = np.max(vectors, axis=0)\n vec_min = np.min(vectors, axis=0)\n return list(map(lambda x, y: x if np.abs(x) > np.abs(y) else y, vec_max, vec_min))\n\n extema_ref = extema(self._get_vectors_from_sentene(reference))\n extema_hyp = extema(self._get_vectors_from_sentene(hypothesis))\n return self._calc_cosine_sim(extema_ref, extema_hyp)\n\n\nclass GreedyMatching(EmbeddingBase):\n \"\"\"Embedding based greedy matching score calculator.\"\"\"\n\n def sentence_score(\n self,\n reference: str,\n hypothesis: str) -> float:\n \"\"\"Embedding greedy matching metrics.\n\n Args:\n reference (str): reference sentence.\n hypothesis: (str): hypothesis sentence.\n\n Return:\n float: Embedding Greedy Matching score\n\n \"\"\"\n embs_ref = np.array(self._get_vectors_from_sentene(reference))\n embs_hyp = np.array(self._get_vectors_from_sentene(hypothesis))\n\n cs_matrix = self._calc_cosine_sim(embs_ref, embs_hyp) # len(embs_ref) x len(embs_hyp) matrix\n greedy_ref = np.max(cs_matrix, axis=0).mean()\n greedy_hyp = np.max(cs_matrix, axis=1).mean()\n return (greedy_ref + greedy_hyp) / 2.0\n"
] | [
[
"numpy.abs",
"numpy.max",
"numpy.min",
"numpy.dot",
"numpy.linalg.norm"
]
] |
EconForge/dolo | [
"9bb75b8f6ea87578393fe748003092ffb745e8d6"
] | [
"dolo/algos/simulations.py"
] | [
"import numpy\nimport pandas\nimport xarray as xr\nimport numpy as np\n\nfrom dolo.compiler.model import Model\nfrom dolo.numeric.optimize.ncpsolve import ncpsolve\nfrom dolo.numeric.optimize.newton import newton as newton_solver\nfrom dolo.numeric.optimize.newton import SerialDifferentiableFunction\n\n## TODO: extend for mc process\n\n\ndef response(model, dr, varname, T=40, impulse: float = None):\n\n i_exo = model.symbols[\"exogenous\"].index(varname)\n\n if impulse is None:\n try:\n impulse = numpy.sqrt(\n model.exogenous.Σ[i_exo, i_exo]\n ) # works only for IID/AR1\n except:\n impulse = numpy.sqrt(model.exogenous.σ) # works only for IID/AR1\n\n e1 = numpy.zeros(len(model.symbols[\"exogenous\"]))\n e1[i_exo] = impulse\n\n exogenous = model.exogenous\n print(exogenous)\n print(T, e1)\n m_simul = model.exogenous.response(T - 1, e1) # this is an xarray T x V\n m_simul = m_simul.expand_dims(\"N\")\n m_simul = m_simul.transpose(\"T\", \"N\", \"V\").data\n\n sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False)\n\n irf = sim.sel(N=0)\n\n return irf\n\n\ndef find_index(sim, values):\n sh = sim.shape\n N = sh[0]\n T = sh[1]\n indices = np.zeros((N, T), dtype=int)\n for n in range(N):\n for t in range(T):\n v = sim[n, t, :]\n ind = np.where((values == v[None, :]).all(axis=1))[0][0]\n indices[n, t] = ind\n return indices\n\n\nfrom dolo.numeric.grids import CartesianGrid, UnstructuredGrid\nfrom dolo.algos.results import AlgoResult\nfrom dolo.numeric.decision_rule import DecisionRule\n\n\ndef simulate(\n model: Model,\n dr: DecisionRule,\n *,\n process=None,\n N=1,\n T=40,\n s0=None,\n i0=None,\n m0=None,\n driving_process=None,\n seed=42,\n stochastic=True,\n):\n \"\"\"Simulate a model using the specified decision rule.\n\n Parameters\n ----------\n\n model: Model\n\n dr: decision rule\n\n process:\n\n s0: ndarray\n initial state where all simulations start\n\n driving_process: ndarray\n realization of exogenous driving process (drawn randomly if None)\n\n N: int\n number of simulations\n T: int\n horizon for the simulations\n seed: int\n used to initialize the random number generator. Use it to replicate\n exact same results among simulations\n discard: boolean (False)\n if True, then all simulations containing at least one non finite value\n are discarded\n\n Returns\n -------\n xarray.DataArray:\n returns a ``T x N x n_v`` array where ``n_v``\n is the number of variables.\n \"\"\"\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n calib = model.calibration\n parms = numpy.array(calib[\"parameters\"])\n\n if s0 is None:\n s0 = calib[\"states\"]\n\n n_x = len(model.symbols[\"controls\"])\n n_s = len(model.symbols[\"states\"])\n\n s_simul = numpy.zeros((T, N, n_s))\n x_simul = numpy.zeros((T, N, n_x))\n\n s_simul[0, :, :] = s0[None, :]\n\n # are we simulating a markov chain or a continuous process ?\n if driving_process is not None:\n if len(driving_process.shape) == 3:\n m_simul = driving_process\n sim_type = \"continuous\"\n if m0 is None:\n m0 = model.calibration[\"exogenous\"]\n x_simul[0, :, :] = dr.eval_ms(m0[None, :], s0[None, :])[0, :]\n elif len(driving_process.shape) == 2:\n i_simul = driving_process\n nodes = dr.exo_grid.nodes\n m_simul = nodes[i_simul]\n # inds = i_simul.ravel()\n # m_simul = np.reshape( np.concatenate( [nodes[i,:][None,:] for i in inds.ravel()], axis=0 ), inds.shape + (-1,) )\n sim_type = \"discrete\"\n x_simul[0, :, :] = dr.eval_is(i0, s0[None, :])[0, :]\n else:\n raise Exception(\"Incorrect specification of driving values.\")\n m0 = m_simul[0, :, :]\n else:\n from dolo.numeric.processes import DiscreteProcess\n\n if process is None:\n if hasattr(dr, \"dprocess\") and hasattr(dr.dprocess, \"simulate\"):\n process = dr.dprocess\n else:\n process = model.exogenous\n\n # detect type of simulation\n if not isinstance(process, DiscreteProcess):\n sim_type = \"continuous\"\n else:\n sim_type = \"discrete\"\n\n if sim_type == \"discrete\":\n if i0 is None:\n i0 = 0\n dp = process\n m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)\n i_simul = find_index(m_simul, dp.values)\n m0 = dp.node(i0)\n x0 = dr.eval_is(i0, s0[None, :])[0, :]\n else:\n m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)\n if isinstance(m_simul, xr.DataArray):\n m_simul = m_simul.data\n sim_type = \"continuous\"\n if m0 is None:\n m0 = model.calibration[\"exogenous\"]\n x0 = dr.eval_ms(m0[None, :], s0[None, :])[0, :]\n x_simul[0, :, :] = x0[None, :]\n\n f = model.functions[\"arbitrage\"]\n g = model.functions[\"transition\"]\n\n numpy.random.seed(seed)\n\n mp = m0\n for i in range(T):\n m = m_simul[i, :, :]\n s = s_simul[i, :, :]\n if sim_type == \"discrete\":\n i_m = i_simul[i, :]\n xx = [\n dr.eval_is(i_m[ii], s[ii, :][None, :])[0, :] for ii in range(s.shape[0])\n ]\n x = np.row_stack(xx)\n else:\n x = dr.eval_ms(m, s)\n\n x_simul[i, :, :] = x\n\n ss = g(mp, s, x, m, parms)\n if i < T - 1:\n s_simul[i + 1, :, :] = ss\n mp = m\n\n if \"auxiliary\" not in model.functions: # TODO: find a better test than this\n l = [s_simul, x_simul]\n varnames = model.symbols[\"states\"] + model.symbols[\"controls\"]\n else:\n aux = model.functions[\"auxiliary\"]\n a_simul = aux(\n m_simul.reshape((N * T, -1)),\n s_simul.reshape((N * T, -1)),\n x_simul.reshape((N * T, -1)),\n parms,\n )\n a_simul = a_simul.reshape(T, N, -1)\n l = [m_simul, s_simul, x_simul, a_simul]\n varnames = (\n model.symbols[\"exogenous\"]\n + model.symbols[\"states\"]\n + model.symbols[\"controls\"]\n + model.symbols[\"auxiliaries\"]\n )\n\n simul = numpy.concatenate(l, axis=2)\n\n if sim_type == \"discrete\":\n varnames = [\"_i_m\"] + varnames\n simul = np.concatenate([i_simul[:, :, None], simul], axis=2)\n\n data = xr.DataArray(\n simul,\n dims=[\"T\", \"N\", \"V\"],\n coords={\"T\": range(T), \"N\": range(N), \"V\": varnames},\n )\n\n return data\n\n\ndef tabulate(\n model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs\n):\n\n import numpy\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n states_names = model.symbols[\"states\"]\n controls_names = model.symbols[\"controls\"]\n index = states_names.index(str(state))\n\n if bounds is None:\n try:\n endo_grid = dr.endo_grid\n bounds = [endo_grid.min[index], endo_grid.max[index]]\n except:\n domain = model.domain\n bounds = [domain.min[index], domain.max[index]]\n if bounds is None:\n raise Exception(\"No bounds provided for simulation or by model.\")\n\n values = numpy.linspace(bounds[0], bounds[1], n_steps)\n\n if s0 is None:\n s0 = model.calibration[\"states\"]\n\n svec = numpy.row_stack([s0] * n_steps)\n svec[:, index] = values\n\n try:\n dp = dr.dprocess\n except:\n dp = model.exogenous.discretize()\n\n if (i0 is None) and (m0 is None):\n from dolo.numeric.grids import UnstructuredGrid\n\n if isinstance(dp.grid, UnstructuredGrid):\n n_ms = dp.n_nodes\n [q, r] = divmod(n_ms, 2)\n i0 = q - 1 + r\n else:\n m0 = model.calibration[\"exogenous\"]\n\n if i0 is not None:\n m = dp.node(i0)\n xvec = dr.eval_is(i0, svec)\n elif m0 is not None:\n m = m0\n xvec = dr.eval_ms(m0, svec)\n\n mm = numpy.row_stack([m] * n_steps)\n l = [mm, svec, xvec]\n\n series = (\n model.symbols[\"exogenous\"] + model.symbols[\"states\"] + model.symbols[\"controls\"]\n )\n\n if \"auxiliary\" in model.functions:\n p = model.calibration[\"parameters\"]\n pp = numpy.row_stack([p] * n_steps)\n avec = model.functions[\"auxiliary\"](mm, svec, xvec, pp)\n l.append(avec)\n series.extend(model.symbols[\"auxiliaries\"])\n\n import pandas\n\n tb = numpy.concatenate(l, axis=1)\n df = pandas.DataFrame(tb, columns=series)\n\n return df\n\n\ndef tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12, 13]):\n\n import numpy\n import xarray as xr\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n if s0 is None:\n s0 = model.calibration[\"states\"]\n if states is None:\n states = model.symbols[\"states\"]\n assert len(states) == 2\n domain = model.get_domain()\n lps = [numpy.linspace(*domain[s], n[i]) for i, s in enumerate(states)]\n i_x = model.symbols[\"states\"].index(states[0])\n i_y = model.symbols[\"states\"].index(states[1])\n vals = []\n vstates = []\n s = s0.copy()\n for xx in lps[0]:\n vv = []\n s[i_x] = xx\n for yy in lps[1]:\n s[i_y] = yy\n x = dr.eval_is(i0, s)\n vv.append(numpy.concatenate([s, x]))\n vals.append(vv)\n vv = numpy.array(vals)\n controls = model.symbols[\"states\"] + model.symbols[\"controls\"]\n # tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords=[lps[0], lps[1], 'V'])\n tab = xr.DataArray(\n vv,\n dims=[states[0], states[1], \"V\"],\n coords={states[0]: lps[0], states[1]: lps[1], \"V\": controls},\n )\n return tab\n\n\ndef plot3d(tab, varname):\n X = numpy.array(tab[tab.dims[0]])\n Y = numpy.array(tab[tab.dims[1]])\n Z = numpy.array(tab.loc[:, :, varname])\n data = [go.Surface(x=X, y=Y, z=Z)]\n layout = go.Layout(\n title=\"Equity\",\n autosize=False,\n width=500,\n height=500,\n # xaxis=go.XAxis(title=tab.dims[0]),\n # yaxis={'title':tab.dims[1]},\n # zaxis={'title':varname},\n xaxis=dict(\n title=\"x Axis\",\n nticks=7,\n titlefont=dict(family=\"Courier New, monospace\", size=18, color=\"#7f7f7f\"),\n ),\n margin=dict(l=65, r=50, b=65, t=90),\n )\n fig = go.Figure(data=data, layout=layout)\n return iplot(fig, filename=\"graph_\" + varname)\n\n\ndef plot_decision_rule(plot_controls=None, **kwargs):\n\n if isinstance(dr, AlgoResult):\n dr = dr.dr\n\n df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None)\n\n from matplotlib import pyplot\n\n if isinstance(plot_controls, str):\n cn = plot_controls\n pyplot.plot(values, df[cn], **kwargs)\n else:\n for cn in plot_controls:\n pyplot.plot(values, df[cn], label=cn, **kwargs)\n pyplot.legend()\n pyplot.xlabel(\"state = {} | mstate = {}\".format(state, i0))\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.legend",
"numpy.zeros",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.row_stack",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.linspace"
]
] |
TS-SE-GROUP/icme2019 | [
"fe9b31db7bf19b08d5e5d41a259f0a297eb21766"
] | [
"mdeepctr/models/xdeepfm.py"
] | [
"# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.(https://arxiv.org/pdf/1803.05170.pdf)\n\"\"\"\nimport tensorflow as tf\nfrom ..input_embedding import preprocess_input_embedding\nfrom ..layers.core import PredictionLayer, MLP\nfrom ..layers.interaction import CIN\nfrom ..utils import check_feature_config_dict\nfrom ..layers.utils import concat_fun\n\n\ndef xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,):\n \"\"\"Instantiates the xDeepFM architecture.\n\n :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}\n :param embedding_size: positive integer,sparse feature embedding_size\n :param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network\n :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit\n :param cin_activation: activation function used on feature maps\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: L2 regularizer strength applied to embedding vector\n :param l2_reg_deep: L2 regularizer strength applied to deep net\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param keep_prob: float in (0,1]. keep_prob used in deep net\n :param activation: Activation function to use in deep net\n :param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``\n :param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net\n :return: A Keras model instance.\n \"\"\"\n check_feature_config_dict(feature_dim_dict)\n\n deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,\n l2_reg_embedding, l2_reg_linear, init_std,\n seed, True)\n\n fm_input = concat_fun(deep_emb_list, axis=1)\n\n if len(cin_layer_size) > 0:\n exFM_out = CIN(cin_layer_size, cin_activation,\n cin_split_half, seed)(fm_input)\n exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out)\n\n deep_input = tf.keras.layers.Flatten()(fm_input)\n \n output=[]\n for _ in range(output_dim):\n \n deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,\n use_bn, seed)(deep_input)\n deep_logit = tf.keras.layers.Dense(\n 1, use_bias=False, activation=None)(deep_out)\n \n if len(hidden_size) == 0 and len(cin_layer_size) == 0: # only linear\n final_logit = linear_logit\n elif len(hidden_size) == 0 and len(cin_layer_size) > 0: # linear + CIN\n final_logit = tf.keras.layers.add([linear_logit, exFM_logit])\n elif len(hidden_size) > 0 and len(cin_layer_size) == 0: # linear + Deep\n final_logit = tf.keras.layers.add([linear_logit, deep_logit])\n elif len(hidden_size) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep\n final_logit = tf.keras.layers.add(\n [linear_logit, deep_logit, exFM_logit])\n else:\n raise NotImplementedError\n \n output.append(PredictionLayer(final_activation)(final_logit))\n\n model = tf.keras.models.Model(inputs=inputs_list, outputs=output)\n return model\n"
] | [
[
"tensorflow.keras.layers.add",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense"
]
] |
xysun/playground | [
"20f9a7e0eb3d24e7cd32d8afd94b767b8fcc00b4"
] | [
"pommerman/envs/v0.py"
] | [
"\"\"\"The baseline Pommerman environment.\n\nThis evironment acts as game manager for Pommerman. Further environments,\nsuch as in v1.py, will inherit from this.\n\"\"\"\nimport json\nimport os\n\nimport numpy as np\nimport time\nfrom gym import spaces\nfrom gym.utils import seeding\nimport gym\n\nfrom .. import characters\nfrom .. import constants\nfrom .. import forward_model\nfrom .. import graphics\nfrom .. import utility\n\n\nclass Pomme(gym.Env):\n '''The base pommerman env.'''\n metadata = {\n 'render.modes': ['human', 'rgb_array', 'rgb_pixel'],\n }\n\n def __init__(self,\n render_fps=None,\n game_type=None,\n board_size=None,\n agent_view_size=None,\n num_rigid=None,\n num_wood=None,\n num_items=None,\n max_steps=1000,\n is_partially_observable=False,\n env=None,\n **kwargs):\n self._render_fps = render_fps\n self._agents = None\n self._game_type = game_type\n self._board_size = board_size\n self._agent_view_size = agent_view_size\n self._num_rigid = num_rigid\n self._num_wood = num_wood\n self._num_items = num_items\n self._max_steps = max_steps\n self._viewer = None\n self._is_partially_observable = is_partially_observable\n self._env = env\n\n self.training_agent = None\n self.model = forward_model.ForwardModel()\n\n # This can be changed through set_render_mode\n # or from the cli tool using '--render_mode=MODE_TYPE'\n self._mode = 'human'\n\n # Observation and Action Spaces. These are both geared towards a single\n # agent even though the environment expects actions and returns\n # observations for all four agents. We do this so that it's clear what\n # the actions and obs are for a single agent. Wrt the observations,\n # they are actually returned as a dict for easier understanding.\n self._set_action_space()\n self._set_observation_space()\n\n def _set_action_space(self):\n self.action_space = spaces.Discrete(6)\n\n def set_render_mode(self, mode):\n self._mode = mode\n\n def _set_observation_space(self):\n \"\"\"The Observation Space for each agent.\n\n There are a total of 3*board_size^2+12 observations:\n - all of the board (board_size^2)\n - bomb blast strength (board_size^2).\n - bomb life (board_size^2)\n - agent's position (2)\n - player ammo counts (1)\n - blast strength (1)\n - can_kick (1)\n - teammate (one of {AgentDummy.value, Agent3.value}).\n - enemies (three of {AgentDummy.value, Agent3.value}).\n \"\"\"\n bss = self._board_size**2\n min_obs = [0] * 3 * bss + [0] * 5 + [constants.Item.AgentDummy.value\n ] * 4\n max_obs = [len(constants.Item)] * bss + [self._board_size\n ] * bss + [25] * bss\n max_obs += [self._board_size] * 2 + [self._num_items] * 2 + [1]\n max_obs += [constants.Item.Agent3.value] * 4\n self.observation_space = spaces.Box(\n np.array(min_obs), np.array(max_obs))\n\n def set_agents(self, agents):\n self._agents = agents\n\n def set_training_agent(self, agent_id):\n self.training_agent = agent_id\n\n def set_init_game_state(self, game_state_file):\n \"\"\"Set the initial game state.\n\n The expected game_state_file JSON format is:\n - agents: list of agents serialized (agent_id, is_alive, position,\n ammo, blast_strength, can_kick)\n - board: board matrix topology (board_size^2)\n - board_size: board size\n - bombs: list of bombs serialized (position, bomber_id, life,\n blast_strength, moving_direction)\n - flames: list of flames serialized (position, life)\n - items: list of item by position\n - step_count: step count\n\n Args:\n game_state_file: JSON File input.\n \"\"\"\n self._init_game_state = None\n if game_state_file:\n with open(game_state_file, 'r') as f:\n self._init_game_state = json.loads(f.read())\n\n def make_board(self):\n self._board = utility.make_board(self._board_size, self._num_rigid,\n self._num_wood)\n\n def make_items(self):\n self._items = utility.make_items(self._board, self._num_items)\n\n def act(self, obs):\n agents = [agent for agent in self._agents \\\n if agent.agent_id != self.training_agent]\n return self.model.act(agents, obs, self.action_space)\n\n def get_observations(self):\n self.observations = self.model.get_observations(\n self._board, self._agents, self._bombs,\n self._is_partially_observable, self._agent_view_size,\n self._game_type, self._env)\n return self.observations\n\n def _get_rewards(self):\n return self.model.get_rewards(self._agents, self._game_type,\n self._step_count, self._max_steps)\n\n def _get_done(self):\n return self.model.get_done(self._agents, self._step_count,\n self._max_steps, self._game_type,\n self.training_agent)\n\n def _get_info(self, done, rewards):\n return self.model.get_info(done, rewards, self._game_type, self._agents)\n\n def reset(self):\n assert (self._agents is not None)\n\n if self._init_game_state is not None:\n self.set_json_info()\n else:\n self._step_count = 0\n self.make_board()\n self.make_items()\n self._bombs = []\n self._flames = []\n self._powerups = []\n for agent_id, agent in enumerate(self._agents):\n pos = np.where(self._board == utility.agent_value(agent_id))\n row = pos[0][0]\n col = pos[1][0]\n agent.set_start_position((row, col))\n agent.reset()\n\n return self.get_observations()\n\n def seed(self, seed=None):\n gym.spaces.prng.seed(seed)\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, actions):\n max_blast_strength = self._agent_view_size or 10\n result = self.model.step(\n actions,\n self._board,\n self._agents,\n self._bombs,\n self._items,\n self._flames,\n max_blast_strength=max_blast_strength)\n self._board, self._agents, self._bombs, self._items, self._flames = \\\n result[:5]\n\n done = self._get_done()\n obs = self.get_observations()\n reward = self._get_rewards()\n info = self._get_info(done, reward)\n\n self._step_count += 1\n return obs, reward, done, info\n\n def render(self,\n mode=None,\n close=False,\n record_pngs_dir=None,\n record_json_dir=None,\n do_sleep=True):\n if close:\n self.close()\n return\n\n mode = mode or self._mode or 'human'\n\n if mode == 'rgb_array':\n rgb_array = graphics.PixelViewer.rgb_array(\n self._board, self._board_size, self._agents,\n self._is_partially_observable, self._agent_view_size)\n return rgb_array[0]\n\n if self._viewer is None:\n if mode == 'rgb_pixel':\n self._viewer = graphics.PixelViewer(\n board_size=self._board_size,\n agents=self._agents,\n agent_view_size=self._agent_view_size,\n partially_observable=self._is_partially_observable)\n else:\n self._viewer = graphics.PommeViewer(\n board_size=self._board_size,\n agents=self._agents,\n partially_observable=self._is_partially_observable,\n agent_view_size=self._agent_view_size,\n game_type=self._game_type)\n\n self._viewer.set_board(self._board)\n self._viewer.set_agents(self._agents)\n self._viewer.set_step(self._step_count)\n self._viewer.render()\n\n # Register all agents which need human input with Pyglet.\n # This needs to be done here as the first `imshow` creates the\n # window. Using `push_handlers` allows for easily creating agents\n # that use other Pyglet inputs such as joystick, for example.\n for agent in self._agents:\n if agent.has_user_input():\n self._viewer.window.push_handlers(agent)\n else:\n self._viewer.set_board(self._board)\n self._viewer.set_agents(self._agents)\n self._viewer.set_step(self._step_count)\n self._viewer.render()\n\n if record_pngs_dir:\n self._viewer.save(record_pngs_dir)\n if record_json_dir:\n self.save_json(record_json_dir)\n\n if do_sleep:\n time.sleep(1.0 / self._render_fps)\n\n def close(self):\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n\n for agent in self._agents:\n agent.shutdown()\n\n @staticmethod\n def featurize(obs):\n board = obs[\"board\"].reshape(-1).astype(np.float32)\n bomb_blast_strength = obs[\"bomb_blast_strength\"].reshape(-1) \\\n .astype(np.float32)\n bomb_life = obs[\"bomb_life\"].reshape(-1).astype(np.float32)\n position = utility.make_np_float(obs[\"position\"])\n ammo = utility.make_np_float([obs[\"ammo\"]])\n blast_strength = utility.make_np_float([obs[\"blast_strength\"]])\n can_kick = utility.make_np_float([obs[\"can_kick\"]])\n\n teammate = utility.make_np_float([obs[\"teammate\"].value])\n enemies = utility.make_np_float([e.value for e in obs[\"enemies\"]])\n return np.concatenate(\n (board, bomb_blast_strength, bomb_life, position, ammo,\n blast_strength, can_kick, teammate, enemies))\n\n def save_json(self, record_json_dir):\n info = self.get_json_info()\n count = \"{0:0=3d}\".format(self._step_count)\n suffix = count + '.json'\n path = os.path.join(record_json_dir, suffix)\n with open(path, 'w') as f:\n f.write(json.dumps(info, sort_keys=True, indent=4))\n\n def get_json_info(self):\n \"\"\"Returns a json snapshot of the current game state.\"\"\"\n ret = {\n 'board_size': self._board_size,\n 'step_count': self._step_count,\n 'board': self._board,\n 'agents': self._agents,\n 'bombs': self._bombs,\n 'flames': self._flames,\n 'items': [[k, i] for k, i in self._items.items()]\n }\n for key, value in ret.items():\n ret[key] = json.dumps(value, cls=utility.PommermanJSONEncoder)\n return ret\n\n def set_json_info(self):\n \"\"\"Sets the game state as the init_game_state.\"\"\"\n board_size = int(self._init_game_state['board_size'])\n self._board_size = board_size\n self._step_count = int(self._init_game_state['step_count'])\n\n board_array = json.loads(self._init_game_state['board'])\n self._board = np.ones((board_size, board_size)).astype(np.uint8)\n self._board *= constants.Item.Passage.value\n for x in range(self._board_size):\n for y in range(self._board_size):\n self._board[x, y] = board_array[x][y]\n\n self._items = {}\n item_array = json.loads(self._init_game_state['items'])\n for i in item_array:\n self._items[tuple(i[0])] = i[1]\n\n agent_array = json.loads(self._init_game_state['agents'])\n for a in agent_array:\n agent = next(x for x in self._agents \\\n if x.agent_id == a['agent_id'])\n agent.set_start_position((a['position'][0], a['position'][1]))\n agent.reset(\n int(a['ammo']), bool(a['is_alive']), int(a['blast_strength']),\n bool(a['can_kick']))\n\n self._bombs = []\n bomb_array = json.loads(self._init_game_state['bombs'])\n for b in bomb_array:\n bomber = next(x for x in self._agents \\\n if x.agent_id == b['bomber_id'])\n moving_direction = b['moving_direction']\n if moving_direction is not None:\n moving_direction = constants.Action(moving_direction)\n self._bombs.append(\n characters.Bomb(bomber, tuple(b['position']), int(b['life']),\n int(b['blast_strength']), moving_direction))\n\n self._flames = []\n flame_array = json.loads(self._init_game_state['flames'])\n for f in flame_array:\n self._flames.append(\n characters.Flame(tuple(f['position']), f['life']))\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.concatenate"
]
] |
shubhamsingh987/PySyft | [
"ff967e3735bd7d47667d1d3e5038ba1493ca2e90",
"ff967e3735bd7d47667d1d3e5038ba1493ca2e90"
] | [
"syft/frameworks/torch/fl/utils.py",
"test/workers/test_websocket_worker.py"
] | [
"import syft as sy\nimport torch\nfrom typing import Dict\nfrom typing import Any\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef extract_batches_per_worker(federated_train_loader: sy.FederatedDataLoader):\n \"\"\"Extracts the batches from the federated_train_loader and stores them\n in a dictionary (keys = data.location).\n\n Args:\n federated_train_loader: the connection object we use to send responses.\n back to the client.\n\n \"\"\"\n logging_interval = 100\n batches = {}\n for worker_id in federated_train_loader.workers:\n worker = federated_train_loader.federated_dataset.datasets[worker_id].location\n batches[worker] = []\n\n for batch_idx, (data, target) in enumerate(federated_train_loader):\n if batch_idx % logging_interval == 0:\n logger.debug(\"Extracted %s batches from federated_train_loader\", batch_idx)\n batches[data.location].append((data, target))\n\n return batches\n\n\ndef add_model(dst_model, src_model):\n \"\"\"Add the parameters of two models.\n\n Args:\n dst_model (torch.nn.Module): the model to which the src_model will be added.\n src_model (torch.nn.Module): the model to be added to dst_model.\n Returns:\n torch.nn.Module: the resulting model of the addition.\n\n \"\"\"\n\n params1 = src_model.named_parameters()\n params2 = dst_model.named_parameters()\n dict_params2 = dict(params2)\n with torch.no_grad():\n for name1, param1 in params1:\n if name1 in dict_params2:\n dict_params2[name1].set_(param1.data + dict_params2[name1].data)\n return dst_model\n\n\ndef scale_model(model, scale):\n \"\"\"Scale the parameters of a model.\n\n Args:\n model (torch.nn.Module): the models whose parameters will be scaled.\n scale (float): the scaling factor.\n Returns:\n torch.nn.Module: the module with scaled parameters.\n\n \"\"\"\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)\n return model\n\n\ndef federated_avg(models: Dict[Any, torch.nn.Module]) -> torch.nn.Module:\n \"\"\"Calculate the federated average of a dictionary containing models.\n The models are extracted from the dictionary\n via the models.values() command.\n\n Args:\n models (Dict[Any, torch.nn.Module]): a dictionary of models\n for which the federated average is calculated.\n\n Returns:\n torch.nn.Module: the module with averaged parameters.\n \"\"\"\n nr_models = len(models)\n model_list = list(models.values())\n model = model_list[0]\n for i in range(1, nr_models):\n model = add_model(model, model_list[i])\n model = scale_model(model, 1.0 / nr_models)\n return model\n\n\ndef accuracy(pred_softmax, target):\n \"\"\"Calculate the accuray of a given prediction.\n\n This functions assumes pred_softmax to be converted into the final prediction by taking the argmax.\n\n Args:\n pred_softmax: array type(float), providing nr_classes values per element in target.\n target: array type(int), correct classes, taking values in range [0, nr_classes).\n\n Returns:\n accuracy: float, fraction of correct predictions.\n\n \"\"\"\n nr_elems = len(target)\n pred = pred_softmax.argmax(dim=1)\n return (pred.float() == target.view(pred.shape).float()).sum().numpy() / float(nr_elems)\n\n\ndef create_gaussian_mixture_toy_data(nr_samples: int): # pragma: no cover\n \"\"\" Create a simple toy data for binary classification\n\n The data is drawn from two normal distributions\n target = 1: mu = 2, sigma = 1\n target = 0: mu = 0, sigma = 1\n The dataset is balanced with an equal number of positive and negative samples\n\n Args:\n nr_samples: number of samples to generate\n\n Returns:\n data, targets\n\n\n \"\"\"\n sample_dim = 2\n one_half = int(nr_samples / 2)\n X1 = torch.randn(one_half, sample_dim, requires_grad=True) - 5\n X2 = torch.randn(one_half, sample_dim, requires_grad=True) + 5\n X = torch.cat([X1, X2], dim=0)\n Y1 = torch.zeros(one_half, requires_grad=False).long()\n Y2 = torch.ones(one_half, requires_grad=False).long()\n Y = torch.cat([Y1, Y2], dim=0)\n return X, Y\n\n\ndef iris_data_partial():\n \"\"\"\n\n Returns: 30 samples from the iris data set: https://archive.ics.uci.edu/ml/datasets/iris\n\n \"\"\"\n data = [\n [5.1, 3.5, 1.4, 0.2],\n [4.9, 3.0, 1.4, 0.2],\n [4.7, 3.2, 1.3, 0.2],\n [4.6, 3.1, 1.5, 0.2],\n [5.0, 3.6, 1.4, 0.2],\n [5.4, 3.9, 1.7, 0.4],\n [4.6, 3.4, 1.4, 0.3],\n [5.0, 3.4, 1.5, 0.2],\n [4.4, 2.9, 1.4, 0.2],\n [4.9, 3.1, 1.5, 0.1],\n ]\n\n target_to_string = {0: \"Iris-setosa\", 1: \"Iris-versicolor\", 2: \"Iris-virginica\"}\n targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n data += [\n [7.0, 3.2, 4.7, 1.4],\n [6.4, 3.2, 4.5, 1.5],\n [6.9, 3.1, 4.9, 1.5],\n [5.5, 2.3, 4.0, 1.3],\n [6.5, 2.8, 4.6, 1.5],\n [5.7, 2.8, 4.5, 1.3],\n [6.3, 3.3, 4.7, 1.6],\n [4.9, 2.4, 3.3, 1.0],\n [6.6, 2.9, 4.6, 1.3],\n [5.2, 2.7, 3.9, 1.4],\n ]\n\n targets += [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\n data += [\n [6.3, 3.3, 6.0, 2.5],\n [5.8, 2.7, 5.1, 1.9],\n [7.1, 3.0, 5.9, 2.1],\n [6.3, 2.9, 5.6, 1.8],\n [6.5, 3.0, 5.8, 2.2],\n [7.6, 3.0, 6.6, 2.1],\n [4.9, 2.5, 4.5, 1.7],\n [7.3, 2.9, 6.3, 1.8],\n [6.7, 2.5, 5.8, 1.8],\n [7.2, 3.6, 6.1, 2.5],\n ]\n\n targets += [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n\n return torch.tensor(data), torch.tensor(targets)\n",
"import io\nfrom os.path import exists, join\nimport time\nfrom socket import gethostname\nfrom OpenSSL import crypto, SSL\nimport pytest\nimport torch\nimport syft as sy\nfrom syft.generic.frameworks.hook import hook_args\nfrom syft.frameworks.torch.fl import utils\n\nfrom syft.workers.websocket_client import WebsocketClientWorker\nfrom syft.workers.websocket_server import WebsocketServerWorker\n\nfrom test.conftest import instantiate_websocket_client_worker\n\n\nPRINT_IN_UNITTESTS = False\n\n\[email protected](\"secure\", [True, False])\ndef test_websocket_worker_basic(hook, start_proc, secure, tmpdir):\n \"\"\"Evaluates that you can do basic tensor operations using\n WebsocketServerWorker in insecure and secure mode.\"\"\"\n\n def create_self_signed_cert(cert_path, key_path):\n # create a key pair\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 1024)\n\n # create a self-signed cert\n cert = crypto.X509()\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(1000)\n cert.set_pubkey(k)\n cert.sign(k, \"sha1\")\n\n # store keys and cert\n open(cert_path, \"wb\").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(key_path, \"wb\").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n\n kwargs = {\n \"id\": \"secure_fed\" if secure else \"not_secure_fed\",\n \"host\": \"localhost\",\n \"port\": 8766,\n \"hook\": hook,\n }\n\n if secure:\n # Create cert and keys\n cert_path = tmpdir.join(\"test.crt\")\n key_path = tmpdir.join(\"test.key\")\n create_self_signed_cert(cert_path, key_path)\n kwargs[\"cert_path\"] = cert_path\n kwargs[\"key_path\"] = key_path\n\n process_remote_worker = start_proc(WebsocketServerWorker, **kwargs)\n\n time.sleep(0.1)\n x = torch.ones(5)\n\n if secure:\n # unused args\n del kwargs[\"cert_path\"]\n del kwargs[\"key_path\"]\n\n kwargs[\"secure\"] = secure\n remote_proxy = instantiate_websocket_client_worker(**kwargs)\n\n x = x.send(remote_proxy)\n y = x + x\n y = y.get()\n\n assert (y == torch.ones(5) * 2).all()\n\n del x\n\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n process_remote_worker.terminate()\n\n\ndef test_websocket_workers_search(hook, start_remote_worker):\n \"\"\"Evaluates that a client can search and find tensors that belong\n to another party\"\"\"\n # Args for initializing the websocket server and client\n server, remote_proxy = start_remote_worker(id=\"fed2\", hook=hook, port=8767)\n\n # Sample tensor to store on the server\n sample_data = torch.tensor([1, 2, 3, 4]).tag(\"#sample_data\", \"#another_tag\")\n _ = sample_data.send(remote_proxy)\n\n # Search for the tensor located on the server by using its tag\n results = remote_proxy.search([\"#sample_data\", \"#another_tag\"])\n\n assert results\n assert results[0].owner.id == \"me\"\n assert results[0].location.id == \"fed2\"\n\n # Search multiple times should still work\n results = remote_proxy.search([\"#sample_data\", \"#another_tag\"])\n\n assert results\n assert results[0].owner.id == \"me\"\n assert results[0].location.id == \"fed2\"\n\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_list_objects_remote(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-list-objects\", hook=hook, port=8765)\n remote_proxy.clear_objects()\n\n x = torch.tensor([1, 2, 3]).send(remote_proxy)\n\n res = remote_proxy.list_tensors_remote()\n\n res_dict = eval(res.replace(\"tensor\", \"torch.tensor\"))\n assert len(res_dict) == 1\n\n y = torch.tensor([4, 5, 6]).send(remote_proxy)\n res = remote_proxy.list_tensors_remote()\n res_dict = eval(res.replace(\"tensor\", \"torch.tensor\"))\n assert len(res_dict) == 2\n\n # delete x before terminating the websocket connection\n del x\n del y\n time.sleep(0.1)\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_objects_count_remote(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-count-objects\", hook=hook, port=8764)\n remote_proxy.clear_objects()\n\n x = torch.tensor([1, 2, 3]).send(remote_proxy)\n\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 1\n\n y = torch.tensor([4, 5, 6]).send(remote_proxy)\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 2\n\n x.get()\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 1\n\n # delete remote object before terminating the websocket connection\n del y\n time.sleep(0.1)\n remote_proxy.close()\n time.sleep(0.1)\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_clear_objects_remote(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-clear-objects\", hook=hook, port=8769)\n\n x = torch.tensor([1, 2, 3]).send(remote_proxy, garbage_collect_data=False)\n y = torch.tensor(4).send(remote_proxy, garbage_collect_data=False)\n\n nr_objects = remote_proxy.tensors_count_remote()\n assert nr_objects == 2\n\n remote_proxy.clear_objects_remote()\n nr_objects = remote_proxy.objects_count_remote()\n assert nr_objects == 0\n\n remote_proxy.close()\n remote_proxy.remove_worker_from_local_worker_registry()\n server.terminate()\n\n\ndef test_connect_close(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(id=\"fed-connect-close\", hook=hook, port=8770)\n\n x = torch.tensor([1, 2, 3])\n x_ptr = x.send(remote_proxy)\n\n assert remote_proxy.tensors_count_remote() == 1\n\n remote_proxy.close()\n\n time.sleep(0.1)\n\n remote_proxy.connect()\n\n assert remote_proxy.tensors_count_remote() == 1\n\n x_val = x_ptr.get()\n assert (x_val == x).all()\n\n remote_proxy.close()\n remote_proxy.remove_worker_from_local_worker_registry()\n\n time.sleep(0.1)\n\n server.terminate()\n\n\ndef test_websocket_worker_multiple_output_response(hook, start_remote_worker):\n \"\"\"Evaluates that you can do basic tensor operations using\n WebsocketServerWorker.\"\"\"\n server, remote_proxy = start_remote_worker(id=\"socket_multiple_output\", hook=hook, port=8771)\n\n x = torch.tensor([1.0, 3, 2])\n x = x.send(remote_proxy)\n\n p1, p2 = torch.sort(x)\n x1, x2 = p1.get(), p2.get()\n\n assert (x1 == torch.tensor([1.0, 2, 3])).all()\n assert (x2 == torch.tensor([0, 2, 1])).all()\n\n x.get() # retrieve remote object before closing the websocket connection\n\n remote_proxy.close()\n server.terminate()\n\n\ndef test_send_command_whitelist(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(\n id=\"worker_call_api_good_methods\", hook=hook, port=8772\n )\n whitelisted_methods = {\n \"torch\": {\"tensor\": [1, 2, 3], \"rand\": (2, 3), \"randn\": (2, 3), \"zeros\": (2, 3)}\n }\n\n for framework, methods in whitelisted_methods.items():\n attr = getattr(remote_proxy.remote, framework)\n\n for method, inp in methods.items():\n x = getattr(attr, method)(inp)\n\n if \"rand\" not in method:\n assert (x.get() == getattr(torch, method)(inp)).all()\n\n remote_proxy.close()\n server.terminate()\n\n\ndef test_send_command_not_whitelisted(hook, start_remote_worker):\n server, remote_proxy = start_remote_worker(\n id=\"worker_call_api_bad_method\", hook=hook, port=8773\n )\n\n method_not_exist = \"openmind\"\n\n for framework in remote_proxy.remote.frameworks:\n if framework in dir(remote_proxy.remote):\n attr = getattr(remote_proxy.remote, framework)\n with pytest.raises(AttributeError):\n getattr(attr, method_not_exist)\n\n remote_proxy.close()\n server.terminate()\n\n\[email protected]\ndef test_evaluate(hook, start_proc): # pragma: no cover\n\n sy.local_worker.clear_objects()\n sy.generic.frameworks.hook.hook_args.hook_method_args_functions = {}\n sy.generic.frameworks.hook.hook_args.hook_method_response_functions = {}\n sy.generic.frameworks.hook.hook_args.get_tensor_type_functions = {}\n sy.generic.frameworks.hook.hook_args.register_response_functions = {}\n\n data, target = utils.iris_data_partial()\n\n dataset = sy.BaseDataset(data=data, targets=target)\n\n kwargs = {\"id\": \"evaluate_remote\", \"host\": \"localhost\", \"port\": 8790, \"hook\": hook}\n dataset_key = \"iris\"\n # TODO: check why unit test sometimes fails when WebsocketServerWorker is started from the unit test. Fails when run after test_federated_client.py\n # process_remote_worker = start_proc(WebsocketServerWorker, dataset=(dataset, dataset_key), verbose=True, **kwargs)\n\n remote_proxy = instantiate_websocket_client_worker(**kwargs)\n\n def loss_fn(pred, target):\n return torch.nn.functional.cross_entropy(input=pred, target=target)\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = torch.nn.Linear(4, 3)\n\n torch.nn.init.xavier_normal_(self.fc1.weight)\n\n def forward(self, x):\n x = torch.nn.functional.relu(self.fc1(x))\n return x\n\n model_untraced = Net()\n model = torch.jit.trace(model_untraced, data)\n loss_traced = torch.jit.trace(loss_fn, (torch.tensor([[0.3, 0.5, 0.2]]), torch.tensor([1])))\n\n pred = model(data)\n loss_before = loss_fn(target=target, pred=pred)\n if PRINT_IN_UNITTESTS: # pragma: no cover\n print(f\"Loss: {loss_before}\")\n\n # Create and send train config\n train_config = sy.TrainConfig(\n batch_size=4,\n model=model,\n loss_fn=loss_traced,\n model_id=None,\n loss_fn_id=None,\n optimizer_args=None,\n epochs=1,\n )\n train_config.send(remote_proxy)\n\n result = remote_proxy.evaluate(\n dataset_key=dataset_key, return_histograms=True, nr_bins=3, return_loss=True\n )\n\n len_dataset = result[\"nr_predictions\"]\n hist_target = result[\"histogram_target\"]\n\n if PRINT_IN_UNITTESTS: # pragma: no cover\n print(f\"Evaluation result before training: {result}\")\n\n assert len_dataset == 30\n assert (hist_target == [10, 10, 10]).all()\n\n remote_proxy.close()\n remote_proxy.remove_worker_from_local_worker_registry()\n # process_remote_worker.terminate()\n"
] | [
[
"torch.ones",
"torch.randn",
"torch.no_grad",
"torch.tensor",
"torch.zeros",
"torch.cat"
],
[
"torch.ones",
"torch.nn.Linear",
"torch.nn.init.xavier_normal_",
"torch.tensor",
"torch.nn.functional.cross_entropy",
"torch.sort",
"torch.jit.trace"
]
] |
kartik-gatsby/optimized-ising-model | [
"1a9b0210deb26d73f93aec5b0804baaebf9c6ff9"
] | [
"ising_low.py"
] | [
"import numpy as np\nfrom random import random\nimport matplotlib.pyplot as plt\nimport time\nimport logging\n\nlogging.basicConfig(level=logging.INFO,filename='simulation.log', filemode='w',format='%(asctime)s - %(message)s',datefmt='%d-%b-%y %H:%M:%S')\nnp.seterr(all='warn')\n\n#################################################\n# #\n# SIMULATION MACROS #\n# #\n#################################################\n\"\"\"__________________________________________\nSimulation MACROs:\nT_max and T_min is range of temperature.\nnt is number of Temperature points.\nsweeps are number of mc steps per spin.\nmin_meas is minimum number Measurement.\nj_knife_factor is jack knife factor is used when number of measurement interval < 2 x Correlation time.\nAll some_variables0 are default value.\n------------------------------------------\"\"\"\nlogging.info(\"Starting Ising Model Simulation\")\nT_min = 1.5; T_max = 3\nnt = int((T_max-T_min)*10+1)\nsweeps0 = 1000\nmax_sweeps = sweeps0*10\nmin_meas = 100\nj_knife_factor0 = 1\nstartTime = time.time()\nT = np.linspace(T_min, T_max, nt)\n\"\"\"\nWe will work with expanding lattices. We will store expanded lattice for particular temperature. Stored lattice would be used as initial configuration for higher dimenssion lattic size. We have two methods for expanding lattice: zooming and stacking. We recommend stacking for use.\n\"\"\"\nstates = {_: None for _ in T}\n#lattice_sizes = 3**(np.arange(2,5))\n################OR##################\nlattice_sizes = 2**(np.arange(4,8))\n\n#################################################\n# #\n# FUNCTIONS #\n# #\n#################################################\n\"\"\"Onsagar's solutions\"\"\"\ndef onsagar_specific_heat(X):\n const = -(2/2.269)**2*2/np.pi\n return const*np.log(abs(np.ones(len(X))-X/2.269))\ndef onsagar_mag(X):\n lst1 = (1-(np.sinh(np.log(1+np.sqrt(2))*2.269/X[X<2.269]))**(-4))**(1/8)\n lst2 = 0*X[X>=2.269]\n return np.concatenate((lst1,lst2))\n\n\n\"\"\"Monte Carlo Metropolis algorithm\"\"\"\ndef monteCarlo(n, state, energy, mag, beta, sweeps,max_sweeps):\n if sweeps > max_sweeps:\n sweeps = max_sweeps\n exp_betas = np.exp(-beta*np.arange(0,9))\n energies, mags = np.zeros(sweeps), np.zeros(sweeps)\n # random state indices\n J = np.random.randint(0, n, size=(sweeps, n*n))\n K = np.random.randint(0, n, size=(sweeps, n*n))\n #loop\n for t in range(sweeps):\n for tt in range(n*n):\n # random indices\n j, k = J[t, tt], K[t, tt]\n s = state[j,k]\n neighbour_sum = (state[(j-1)%n, k] +\n state[j, (k-1)%n] + state[j, (k+1)%n] +\n state[(j+1)%n, k])\n energy_diff = 2*s*neighbour_sum\n if energy_diff < 0 or random() < exp_betas[energy_diff]:\n s *= -1\n energy += energy_diff\n mag += 2*s\n state[j, k] = s\n energies[t], mags[t] = energy, mag\n return energies, mags\n\n\n\"\"\"Calculation of auto-correlation\"\"\" \ndef autocorrelation(M):\n start_time = time.time()\n tau = 1\n sweeps = len(M)\n auto = np.zeros(sweeps)\n for t in range(sweeps):\n some_time = sweeps-t\n first_term = np.average(M[:some_time]*M[t:sweeps])\n S1 = np.average(M[:some_time])\n S2 = np.average(M[t:sweeps])\n auto_temp = first_term - S1*S2\n if auto_temp > 0:\n auto[t] = auto_temp\n else:#remove oscillating part\n break \n if auto[0] != 0:\n auto = auto[auto>0]\n auto = auto/auto[0] #normalization\n len_auto = len(auto)\n if len_auto > 1: #draw a straight line if you have atleast two points\n tau = int(-1/np.polyfit(np.arange(len_auto), np.log(auto), 1, w=np.sqrt(auto))[0])\n tau = max(tau,1)\n logging.info(f\"Correlation time = {tau}\")\n return tau\n\n\n\"\"\"\nCalculation of specific heat or Susceptibility and errorbar.\nCX is Specific Heat or Susceptibility.\nCX_i is Specific Heat or Susceptibility without i-th measurement.\n\"\"\"\ndef jackKnife(EM,factor=1):\n n = len(EM)\n CX = np.var(EM)\n CX_i = np.zeros(n)\n for i in range(n):\n CX_i[i] = np.var(np.delete(EM,i))\n under = np.sum(np.square(np.full(n,CX) - CX_i))\n CX_err = np.sqrt(under*factor)\n return CX, CX_err\n\n\"\"\"\nStacking Lattices: Stacking z lattice and taking advantage of periodic boundary condition. The energy and magnetization would also increase as system size increase as they are extensive state variables. Other trick to explore is Zoom.\n\"\"\"\ndef stackLattice(z,state,energy,mag):\n h_stack_state = state\n for _ in range(z-1):\n h_stack_state = np.hstack((h_stack_state,state))\n v_stack_state = h_stack_state\n for _ in range(z-1):\n v_stack_state = np.vstack((v_stack_state,h_stack_state))\n return (v_stack_state, z*z*energy, z*z*mag)\n\n#################################################\n# #\n# MAIN #\n# #\n#################################################\n\"\"\"we will plot the following wrt temperature, T\"\"\"\nplotEnergy = np.zeros(nt)\nplotMag = np.zeros(nt)\nplotChi = np.zeros(nt)\nplotChi_err = np.zeros(nt)\nplotSH = np.zeros(nt)\nplotSH_err = np.zeros(nt)\nplotCorrelation = np.zeros(nt)\n\n\n\"\"\"\nPreparing n x n lattice with all spins up.\nHere, z is a zoom factor or a stacking factor.\n\"\"\"\nn = min(lattice_sizes)\nN = n*n\nz = lattice_sizes[1]//lattice_sizes[0]\nstate = np.ones((n,n),dtype=\"int\")\nenergy, mag = -N, N\n\"\"\"lattice size loop\"\"\"\nfor n in lattice_sizes:\n logging.info(f\"Lattice size is {n}x{n}\")\n print(f\"Lattice size is {n}x{n}\")\n N = n*n\n \"\"\"temperature loop\"\"\"\n for k in range(nt):\n temp = T[k]\n Beta=1/temp\n if states[temp] != None:\n (state,energy,mag) = states[temp]\n logging.info(\"_\"*35)\n logging.info(\"Temperature is %0.2f, time elapsed %d\" %(temp,time.time()-startTime))\n sweeps = sweeps0; j_knife_factor = j_knife_factor0; measurements = 0\n E, M = np.zeros(0), np.zeros(0)\n while measurements < min_meas:\n energies, mags = monteCarlo(n, state, energy, mag, Beta, sweeps, max_sweeps//10)\n energy, mag = energies[-1], mags[-1]\n E = np.concatenate((E,energies))\n M = np.concatenate((M,mags))\n delta_int = eq_time = 2*autocorrelation(M)\n measurements = len(E[eq_time::delta_int])\n logging.info(f\"{measurements} measurements are possible\")\n if measurements < min_meas:\n _energies_ = len(E)\n if _energies_ < max_sweeps:\n sweeps = delta_int*(min_meas-measurements)\n logging.info(f\"\\tdoing {sweeps} more sweeps\")\n else:\n delta_int = (_energies_-eq_time)//min_meas\n j_knife_factor = eq_time/delta_int\n measurements = len(E[eq_time::delta_int])\n logging.info(f\"We will do {measurements} measurements\")\n \n \n #doing measurements\n E = E[eq_time::delta_int]\n M = M[eq_time::delta_int]\n plotMag[k] = np.average(M)/N\n Chi, Chi_err = jackKnife(M,j_knife_factor)\n plotChi[k] =Chi*Beta/N\n plotChi_err[k] =Chi_err*Beta/N\n plotEnergy[k] = np.average(E)/N\n sp_heat, sp_heat_err = jackKnife(E,j_knife_factor)\n plotSH[k] = sp_heat*Beta*Beta/N\n plotSH_err[k] = sp_heat_err*Beta*Beta/N\n plotCorrelation[k] = eq_time//2\n \n \n #lattice expansion\n states[temp] = stackLattice(z,state,energy,mag)\n #states[temp] = zoomLattice(z,state,energy,mag)\n \n \n #PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS##PLOTS#\n f = plt.figure(figsize=(16, 9));\n title_name = \"Size:\"+str(n)+\"x\"+str(n)\n plt.title(title_name, color='b');\n\n sp = f.add_subplot(2, 2, 1 );\n plt.scatter(T, plotEnergy, s=50, marker='o', color='IndianRed')\n plt.xlabel(\"Temperature (T)\", fontsize=20);\n plt.ylabel(\"Energy \", fontsize=20); plt.axis('tight');\n\n sp = f.add_subplot(2, 2, 2 );\n plt.scatter(T, abs(np.array(plotMag)), s=50, marker='o', color='IndianRed', label = \"data\")\n temp_list = np.linspace(T_min, T_max, 10000)\n plt.plot(temp_list, onsagar_mag(temp_list) , color='blue', label = \"Onsager Solution\") \n plt.legend()\n plt.xlabel(\"Temperature (T)\", fontsize=20); \n plt.ylabel(\"Magnetization \", fontsize=20); plt.axis('tight');\n\n sp = f.add_subplot(2, 2, 3 );\n plt.errorbar(T, plotSH, yerr = plotSH_err, fmt='o', color='IndianRed', label = \"data\")\n plt.plot(temp_list, onsagar_specific_heat(temp_list), color='RoyalBlue', label = \"Onsager Solution\") \n plt.legend()\n plt.xlabel(\"Temperature (T)\", fontsize=20); \n plt.ylabel(\"Specific Heat \", fontsize=20); plt.axis('tight'); \n\n sp = f.add_subplot(2, 2, 4 );\n plt.errorbar(T, plotChi, yerr = plotChi_err, fmt='o', color='IndianRed', label = \"data\")\n plt.xlabel(\"Temperature (T)\", fontsize=20); \n plt.ylabel(\"Susceptibility\", fontsize=20); plt.axis('tight');\n\n timeIs = time.strftime(\"%H-%M-%S\")\n plt.savefig(timeIs+'.pdf')\n \n #storing measurements in in a file\n with open(str(n)+\"data\",\"w\") as file:\n file.write(\"##Temp\\tEnergy\\tMag\\tSp_ht\\tSp_ht_err\\tChi\\tChi_err\\ttau\\n\")\n for i in range(nt):\n file.write(str(T[i])+\"\\t\"+str(plotEnergy[i])+\"\\t\"+str(plotMag[i])+\"\\t\"+str(plotSH[i])+\"\\t\"+str(plotSH_err[i])+\"\\t\"+str(plotChi[i])+\"\\t\"+str(plotChi_err[i])+\"\\t\"+str(plotCorrelation[i])+\"\\t\"+\"\\n\")\n"
] | [
[
"numpy.ones",
"numpy.var",
"numpy.log",
"matplotlib.pyplot.ylabel",
"numpy.full",
"numpy.vstack",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.seterr",
"matplotlib.pyplot.title",
"numpy.delete",
"numpy.average",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.arange",
"numpy.hstack",
"numpy.array",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.errorbar",
"numpy.sqrt",
"numpy.concatenate",
"numpy.random.randint",
"matplotlib.pyplot.xlabel"
]
] |
RangeKing/PaddleViT | [
"0e25958686e04ed8872cf67fba0dfd6918e9b4dd",
"0e25958686e04ed8872cf67fba0dfd6918e9b4dd"
] | [
"image_classification/MLP-Mixer/load_pytorch_weights.py",
"image_classification/PiT/main_multi_gpu_distill.py"
] | [
"# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"convert pytorch model weights to paddle pdparams\"\"\"\nimport os\nimport numpy as np\nimport paddle\nimport torch\nimport timm\nfrom mlp_mixer import build_mlp_mixer as build_model\nfrom config import get_config\n\n\ndef print_model_named_params(model):\n print('----------------------------------')\n for name, param in model.named_parameters():\n print(name, param.shape)\n print('----------------------------------')\n\n\ndef print_model_named_buffers(model):\n print('----------------------------------')\n for name, param in model.named_buffers():\n print(name, param.shape)\n print('----------------------------------')\n\n\ndef torch_to_paddle_mapping(model_name, config):\n mapping = [\n ('stem.proj', 'patch_embed.patch_embed'),\n ]\n\n for stage_idx in range(config.MODEL.MIXER.DEPTH):\n th_prefix = f'blocks.{stage_idx}'\n pp_prefix = f'mixer_layers.{stage_idx}'\n\n layer_mapping = [\n (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),\n (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),\n (f'{th_prefix}.mlp_tokens.fc1', f'{pp_prefix}.mlp_tokens.fc1'),\n (f'{th_prefix}.mlp_tokens.fc2', f'{pp_prefix}.mlp_tokens.fc2'),\n (f'{th_prefix}.mlp_channels.fc1', f'{pp_prefix}.mlp_channels.fc1'),\n (f'{th_prefix}.mlp_channels.fc2', f'{pp_prefix}.mlp_channels.fc2'),\n ]\n mapping.extend(layer_mapping)\n\n head_mapping = [\n ('norm', 'norm'),\n ('head', 'head'),\n ]\n mapping.extend(head_mapping)\n\n return mapping\n\n\n\ndef convert(torch_model, paddle_model, model_name, config):\n def _set_value(th_name, pd_name, transpose=True):\n th_shape = th_params[th_name].shape\n pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list\n #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'\n print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')\n if isinstance(th_params[th_name], torch.nn.parameter.Parameter):\n value = th_params[th_name].data.numpy()\n else:\n value = th_params[th_name].numpy()\n\n if len(value.shape) == 2 and transpose:\n value = value.transpose((1, 0))\n pd_params[pd_name].set_value(value)\n\n # 1. get paddle and torch model parameters\n pd_params = {}\n th_params = {}\n for name, param in paddle_model.named_parameters():\n pd_params[name] = param\n for name, param in torch_model.named_parameters():\n th_params[name] = param\n\n for name, param in paddle_model.named_buffers():\n pd_params[name] = param\n for name, param in torch_model.named_buffers():\n th_params[name] = param\n\n # 2. get name mapping pairs\n mapping = torch_to_paddle_mapping(model_name, config)\n\n\n missing_keys_th = []\n missing_keys_pd = []\n zip_map = list(zip(*mapping))\n th_keys = list(zip_map[0])\n pd_keys = list(zip_map[1])\n\n for key in th_params:\n missing = False\n if key not in th_keys:\n missing = True\n if key.endswith('.weight'):\n if key[:-7] in th_keys:\n missing = False\n if key.endswith('.bias'):\n if key[:-5] in th_keys:\n missing = False\n if missing:\n missing_keys_th.append(key)\n\n for key in pd_params:\n missing = False\n if key not in pd_keys:\n missing = True\n if key.endswith('.weight'):\n if key[:-7] in pd_keys:\n missing = False\n if key.endswith('.bias'):\n if key[:-5] in pd_keys:\n missing = False\n if missing:\n missing_keys_pd.append(key)\n\n\n print('====================================')\n print('missing_keys_pytorch:')\n print(missing_keys_th)\n print('missing_keys_paddle:')\n print(missing_keys_pd)\n print('====================================')\n\n # 3. set torch param values to paddle params: may needs transpose on weights\n for th_name, pd_name in mapping:\n if th_name in th_params and pd_name in pd_params: # nn.Parameters\n _set_value(th_name, pd_name)\n else:\n if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:\n th_name_w = f'{th_name}.weight'\n pd_name_w = f'{pd_name}.weight'\n _set_value(th_name_w, pd_name_w)\n if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:\n th_name_b = f'{th_name}.bias'\n pd_name_b = f'{pd_name}.bias'\n _set_value(th_name_b, pd_name_b)\n\n return paddle_model\n\n\ndef main():\n paddle.set_device('cpu')\n model_name_list = [\n 'mixer_b16_224',\n 'mixer_l16_224',\n ]\n\n for model_name in model_name_list:\n print(f'============= NOW: {model_name} =============')\n sz = 224\n config = get_config(f'./configs/{model_name}.yaml')\n\n paddle_model = build_model(config)\n\n paddle_model.eval()\n print_model_named_params(paddle_model)\n print_model_named_buffers(paddle_model)\n\n print('+++++++++++++++++++++++++++++++++++')\n device = torch.device('cpu')\n torch_model = timm.create_model(model_name, pretrained=True)\n torch_model = torch_model.to(device)\n torch_model.eval()\n print_model_named_params(torch_model)\n print_model_named_buffers(torch_model)\n\n # convert weights\n paddle_model = convert(torch_model, paddle_model, model_name, config)\n\n # check correctness\n x = np.random.randn(2, 3, sz, sz).astype('float32')\n x_paddle = paddle.to_tensor(x)\n x_torch = torch.Tensor(x).to(device)\n\n out_torch = torch_model(x_torch)\n out_paddle = paddle_model(x_paddle)\n\n out_torch = out_torch.data.cpu().numpy()\n out_paddle = out_paddle.cpu().numpy()\n\n print(out_torch.shape, out_paddle.shape)\n print(out_torch[0, 0:100])\n print('========================================================')\n print(out_paddle[0, 0:100])\n assert np.allclose(out_torch, out_paddle, atol = 1e-3)\n\n # save weights for paddle model\n model_path = os.path.join(f'./{model_name}.pdparams')\n paddle.save(paddle_model.state_dict(), model_path)\n print(f'{model_name} done')\n print('all done')\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PiT train and eval using multiple GPU without teacher model and distillation\"\"\"\nimport sys\nimport os\nimport time\nimport argparse\nimport random\nimport math\nimport numpy as np\nimport paddle\nfrom datasets import get_dataloader\nfrom datasets import get_dataset\nfrom config import get_config\nfrom config import update_config\nfrom utils import AverageMeter\nfrom utils import get_logger\nfrom utils import write_log\nfrom utils import all_reduce_mean\nfrom utils import skip_weight_decay_fn\nfrom mixup import Mixup\nfrom model_ema import ModelEma\nfrom losses import LabelSmoothingCrossEntropyLoss\nfrom losses import SoftTargetCrossEntropyLoss\nfrom losses import DistillationLoss\nfrom regnet import build_regnet as build_teacher_model\nfrom pit import build_pit as build_model\n\n\ndef get_arguments():\n \"\"\"return argumeents, this will overwrite the config by (1) yaml file (2) argument values\"\"\"\n parser = argparse.ArgumentParser('PiT')\n parser.add_argument('-cfg', type=str, default=None)\n parser.add_argument('-dataset', type=str, default=None)\n parser.add_argument('-data_path', type=str, default=None)\n parser.add_argument('-output', type=str, default=None)\n parser.add_argument('-batch_size', type=int, default=None)\n parser.add_argument('-batch_size_eval', type=int, default=None)\n parser.add_argument('-image_size', type=int, default=None)\n parser.add_argument('-accum_iter', type=int, default=None)\n parser.add_argument('-pretrained', type=str, default=None)\n parser.add_argument('-teacher_model_path', type=str, default=None)\n parser.add_argument('-resume', type=str, default=None)\n parser.add_argument('-last_epoch', type=int, default=None)\n parser.add_argument('-eval', action='store_true')\n parser.add_argument('-amp', action='store_true')\n arguments = parser.parse_args()\n return arguments\n\n\ndef train(dataloader,\n model,\n optimizer,\n criterion,\n epoch,\n total_epochs,\n total_batches,\n debug_steps=100,\n accum_iter=1,\n model_ema=None,\n mixup_fn=None,\n amp_grad_scaler=None,\n local_logger=None,\n master_logger=None):\n \"\"\"Training for one epoch\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n optimizer: nn.optimizer\n criterion: nn.XXLoss\n epoch: int, current epoch\n total_epochs: int, total num of epochs\n total_batches: int, total num of batches for one epoch\n debug_steps: int, num of iters to log info, default: 100\n accum_iter: int, num of iters for accumulating gradients, default: 1\n model_ema: ModelEma, model moving average instance\n mixup_fn: Mixup, mixup instance, default: None\n amp_grad_scaler: GradScaler, if not None pass the GradScaler and enable AMP, default: None\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n train_loss_meter.avg: float, average loss on current process/gpu\n train_acc_meter.avg: float, average acc@1 on current process/gpu\n master_loss_meter.avg: float, average loss on all processes/gpus\n master_acc_meter.avg: float, average acc@1 on all processes/gpus\n train_time: float, training time\n \"\"\"\n time_st = time.time()\n train_loss_meter = AverageMeter()\n train_acc_meter = AverageMeter()\n master_loss_meter = AverageMeter()\n master_acc_meter = AverageMeter()\n\n model.train()\n optimizer.clear_grad()\n\n for batch_id, data in enumerate(dataloader):\n # get data\n images = data[0]\n label = data[1]\n label_orig = label.clone()\n batch_size = images.shape[0]\n\n if mixup_fn is not None:\n images, label = mixup_fn(images, label_orig)\n\n # forward\n with paddle.amp.auto_cast(amp_grad_scaler is not None):\n output = model(images)\n loss = criterion(images, output, label)\n\n loss_value = loss.item()\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n loss = loss / accum_iter\n\n # backward and step\n if amp_grad_scaler is None: # fp32\n loss.backward()\n if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n optimizer.step()\n optimizer.clear_grad()\n else: # amp\n scaled_loss = amp_grad_scaler.scale(loss)\n scaled_loss.backward()\n if ((batch_id + 1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n # amp for param group reference: https://github.com/PaddlePaddle/Paddle/issues/37188\n amp_grad_scaler.step(optimizer)\n amp_grad_scaler.update()\n optimizer.clear_grad()\n\n if model_ema is not None and paddle.distributed.get_rank() == 0:\n model_ema.update(model)\n\n # average of output and kd_output, same as eval mode\n pred = paddle.nn.functional.softmax((output[0] + output[1]) / 2)\n acc = paddle.metric.accuracy(pred,\n label_orig if mixup_fn else label_orig.unsqueeze(1)).item()\n\n # sync from other gpus for overall loss and acc\n master_loss = all_reduce_mean(loss_value)\n master_acc = all_reduce_mean(acc)\n master_batch_size = all_reduce_mean(batch_size)\n\n master_loss_meter.update(master_loss, master_batch_size)\n master_acc_meter.update(master_acc, master_batch_size)\n train_loss_meter.update(loss_value, batch_size)\n train_acc_meter.update(acc, batch_size)\n\n if batch_id % debug_steps == 0 or batch_id + 1 == len(dataloader):\n general_message = (f\"Epoch[{epoch:03d}/{total_epochs:03d}], \"\n f\"Step[{batch_id:04d}/{total_batches:04d}], \"\n f\"Lr: {optimizer.get_lr():04f}, \")\n local_message = (general_message +\n f\"Loss: {loss_value:.4f} ({train_loss_meter.avg:.4f}), \"\n f\"Avg Acc: {train_acc_meter.avg:.4f}\")\n master_message = (general_message +\n f\"Loss: {master_loss:.4f} ({master_loss_meter.avg:.4f}), \"\n f\"Avg Acc: {master_acc_meter.avg:.4f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n\n paddle.distributed.barrier()\n train_time = time.time() - time_st\n return (train_loss_meter.avg,\n train_acc_meter.avg,\n master_loss_meter.avg,\n master_acc_meter.avg,\n train_time)\n\n\[email protected]_grad()\ndef validate(dataloader,\n model,\n criterion,\n total_batches,\n debug_steps=100,\n local_logger=None,\n master_logger=None):\n \"\"\"Validation for the whole dataset\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n total_batches: int, total num of batches for one epoch\n debug_steps: int, num of iters to log info, default: 100\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n val_loss_meter.avg: float, average loss on current process/gpu\n val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus\n val_acc5_meter.avg: float, average top5 accuracy on current processes/gpus\n master_loss_meter.avg: float, average loss on all processes/gpus\n master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus\n master_acc5_meter.avg: float, average top5 accuracy on all processes/gpus\n val_time: float, validation time\n \"\"\"\n model.eval()\n val_loss_meter = AverageMeter()\n val_acc1_meter = AverageMeter()\n val_acc5_meter = AverageMeter()\n master_loss_meter = AverageMeter()\n master_acc1_meter = AverageMeter()\n master_acc5_meter = AverageMeter()\n\n time_st = time.time()\n\n for batch_id, data in enumerate(dataloader):\n # get data\n images = data[0]\n label = data[1]\n batch_size = images.shape[0]\n\n output = model(images)\n loss = criterion(output, label)\n loss_value = loss.item()\n\n pred = paddle.nn.functional.softmax(output)\n acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()\n acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5).item()\n\n # sync from other gpus for overall loss and acc\n master_loss = all_reduce_mean(loss_value)\n master_acc1 = all_reduce_mean(acc1)\n master_acc5 = all_reduce_mean(acc5)\n master_batch_size = all_reduce_mean(batch_size)\n\n master_loss_meter.update(master_loss, master_batch_size)\n master_acc1_meter.update(master_acc1, master_batch_size)\n master_acc5_meter.update(master_acc5, master_batch_size)\n val_loss_meter.update(loss_value, batch_size)\n val_acc1_meter.update(acc1, batch_size)\n val_acc5_meter.update(acc5, batch_size)\n\n if batch_id % debug_steps == 0:\n local_message = (f\"Step[{batch_id:04d}/{total_batches:04d}], \"\n f\"Avg Loss: {val_loss_meter.avg:.4f}, \"\n f\"Avg Acc@1: {val_acc1_meter.avg:.4f}, \"\n f\"Avg Acc@5: {val_acc5_meter.avg:.4f}\")\n master_message = (f\"Step[{batch_id:04d}/{total_batches:04d}], \"\n f\"Avg Loss: {master_loss_meter.avg:.4f}, \"\n f\"Avg Acc@1: {master_acc1_meter.avg:.4f}, \"\n f\"Avg Acc@5: {master_acc5_meter.avg:.4f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n paddle.distributed.barrier()\n val_time = time.time() - time_st\n return (val_loss_meter.avg,\n val_acc1_meter.avg,\n val_acc5_meter.avg,\n master_loss_meter.avg,\n master_acc1_meter.avg,\n master_acc5_meter.avg,\n val_time)\n\n\ndef main_worker(*args):\n \"\"\"main method for each process\"\"\"\n # STEP 0: Preparation\n paddle.device.set_device('gpu')\n paddle.distributed.init_parallel_env()\n world_size = paddle.distributed.get_world_size()\n local_rank = paddle.distributed.get_rank()\n config = args[0]\n last_epoch = config.TRAIN.LAST_EPOCH\n seed = config.SEED + local_rank\n paddle.seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n local_logger, master_logger = get_logger(config.SAVE)\n message = (f'----- world_size = {world_size}, local_rank = {local_rank} \\n'\n f'----- {config}')\n write_log(local_logger, master_logger, message)\n\n # STEP 1: Create model\n model = build_model(config)\n\n # define model ema\n model_ema = None\n if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:\n model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)\n if config.TRAIN.MODEL_EMA_FORCE_CPU:\n model_ema.to('cpu')\n\n # STEP 2: Create train and val dataloader\n if not config.EVAL:\n dataset_train = args[1]\n dataloader_train = get_dataloader(config, dataset_train, True, True)\n total_batch_train = len(dataloader_train)\n message = f'----- Total # of train batch (single gpu): {total_batch_train}'\n write_log(local_logger, master_logger, message)\n\n dataset_val = args[2]\n dataloader_val = get_dataloader(config, dataset_val, False, True)\n total_batch_val = len(dataloader_val)\n message = f'----- Total # of val batch (single gpu): {total_batch_val}'\n write_log(local_logger, master_logger, message)\n\n # STEP 3: (Optional) Define Mixup function\n mixup_fn = None\n if (config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or\n config.TRAIN.CUTMIX_MINMAX is not None):\n mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,\n cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,\n cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,\n prob=config.TRAIN.MIXUP_PROB,\n switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,\n mode=config.TRAIN.MIXUP_MODE,\n label_smoothing=config.TRAIN.SMOOTHING)#\n\n # STEP 4: Define loss/criterion\n if mixup_fn is not None:\n criterion = SoftTargetCrossEntropyLoss()\n elif config.TRAIN.SMOOTHING:\n criterion = LabelSmoothingCrossEntropyLoss()\n else:\n criterion = paddle.nn.CrossEntropyLoss()\n # Use CrossEntropyLoss for val\n criterion_val = paddle.nn.CrossEntropyLoss()\n\n # STEP 5: Create Teacher model and distill loss\n teacher_model = None\n if not config.EVAL:\n if config.TRAIN.DISTILLATION_TYPE != 'none':\n write_log(local_logger, master_logger,\n f'----- Load teacher model: {config.TRAIN.TEACHER_MODEL}')\n teacher_model = build_teacher_model()\n assert os.path.isfile(config.TRAIN.TEACHER_MODEL)\n teacher_model_state = paddle.load(config.TRAIN.TEACHER_MODEL)\n teacher_model.set_state_dict(teacher_model_state)\n teacher_model.eval()\n teacher_model = paddle.DataParallel(teacher_model)\n # wrap the criterion:\n criterion = DistillationLoss(criterion,\n teacher_model,\n config.TRAIN.DISTILLATION_TYPE,\n config.TRAIN.DISTILLATION_ALPHA,\n config.TRAIN.DISTILLATION_TAU)\n\n # STEP 5: Define optimizer and lr_scheduler\n if not config.EVAL:\n # set lr according to batch size and world size\n if config.TRAIN.LINEAR_SCALED_LR is not None:\n effective_batch_size = config.DATA.BATCH_SIZE * config.TRAIN.ACCUM_ITER * world_size\n config.TRAIN.BASE_LR = (\n config.TRAIN.BASE_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR\n )\n config.TRAIN.WARMUP_START_LR = (\n config.TRAIN.WARMUP_START_LR* effective_batch_size / config.TRAIN.LINEAR_SCALED_LR\n )\n config.TRAIN.END_LR = (\n config.TRAIN.END_LR * effective_batch_size / config.TRAIN.LINEAR_SCALED_LR\n )\n message = (f'Base lr is scaled to: {config.TRAIN.BASE_LR}, '\n f'warmup start lr is scaled to: {config.TRAIN.WARMUP_START_LR}, '\n f'end lr is scaled to: {config.TRAIN.BASE_LR}')\n write_log(local_logger, master_logger, message)\n # define scaler for amp training\n amp_grad_scaler = paddle.amp.GradScaler() if config.AMP else None\n # warmup + cosine lr scheduler\n if config.TRAIN.WARMUP_EPOCHS > 0:\n cosine_lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(\n learning_rate=config.TRAIN.BASE_LR,\n T_max=config.TRAIN.NUM_EPOCHS - config.TRAIN.WARMUP_EPOCHS,\n eta_min=config.TRAIN.END_LR,\n last_epoch=-1) # do not set last epoch, handled in warmup sched get_lr()\n lr_scheduler = paddle.optimizer.lr.LinearWarmup(\n learning_rate=cosine_lr_scheduler, # use cosine lr sched after warmup\n warmup_steps=config.TRAIN.WARMUP_EPOCHS, # only support position integet\n start_lr=config.TRAIN.WARMUP_START_LR,\n end_lr=config.TRAIN.BASE_LR,\n last_epoch=config.TRAIN.LAST_EPOCH)\n else:\n lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(\n learning_rate=config.TRAIN.BASE_LR,\n T_max=config.TRAIN.NUM_EPOCHS,\n eta_min=config.TRAIN.END_LR,\n last_epoch=config.TRAIN.LAST_EPOCH)\n\n # set gradient clip\n if config.TRAIN.GRAD_CLIP:\n clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)\n else:\n clip = None\n # set optimizer\n optimizer = paddle.optimizer.AdamW(\n parameters=model.parameters(),\n learning_rate=lr_scheduler, # set to scheduler\n beta1=config.TRAIN.OPTIMIZER.BETAS[0],\n beta2=config.TRAIN.OPTIMIZER.BETAS[1],\n weight_decay=config.TRAIN.WEIGHT_DECAY,\n epsilon=config.TRAIN.OPTIMIZER.EPS,\n grad_clip=clip,\n apply_decay_param_fun=skip_weight_decay_fn(\n model, # skip bn and bias\n ['pos_embed', 'cls_token', 'dist_token']), # skip custom ops\n )\n\n # STEP 6: (Optional) Load pretrained model weights for evaluation or finetuning\n if config.MODEL.PRETRAINED:\n assert os.path.isfile(config.MODEL.PRETRAINED) is True\n model_state = paddle.load(config.MODEL.PRETRAINED)\n if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch\n # pretrain only load model weight, opt and epoch are ignored\n if 'model_ema' in model_state:\n model_state = model_state['model_ema']\n else:\n model_state = model_state['model']\n model.set_state_dict(model_state)\n message = f\"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}\"\n write_log(local_logger, master_logger, message)\n\n # STEP 7: (Optional) Load model weights and status for resume training\n if config.MODEL.RESUME:\n assert os.path.isfile(config.MODEL.RESUME) is True\n model_state = paddle.load(config.MODEL.RESUME)\n if 'model' in model_state: # load state_dict with multi items: model, optimier, and epoch\n model.set_state_dict(model_state['model'])\n\n if 'optimizer' in model_state:\n optimizer.set_state_dict(model_state['optimizer'])\n if 'epoch' in model_state:\n config.TRAIN.LAST_EPOCH = model_state['epoch']\n last_epoch = model_state['epoch']\n if 'lr_scheduler' in model_state:\n lr_scheduler.set_state_dict(model_state['lr_scheduler'])\n if 'amp_grad_scaler' in model_state and amp_grad_scaler is not None:\n amp_grad_scaler.load_state_dict(model_state['amp_grad_scaler'])\n if config.TRAIN.MODEL_EMA and local_rank == 0:\n model_ema.module.set_state_dict(model_state['model_ema'])\n\n lr_scheduler.step(last_epoch + 1)\n\n message = (f\"----- Resume Training: Load model from {config.MODEL.RESUME}, w/t \"\n f\"opt = [{'optimizer' in model_state}], \"\n f\"lr_scheduler = [{'lr_scheduler' in model_state}], \"\n f\"model_ema = [{'model_ema' in model_state}], \"\n f\"epoch = [{model_state.get('epoch', -1)}], \"\n f\"amp_grad_scaler = [{'amp_grad_scaler' in model_state}]\")\n write_log(local_logger, master_logger, message)\n else: # direct load pdparams without other items\n message = f\"----- Resume Training: Load {config.MODEL.RESUME}, w/o opt/epoch/scaler\"\n write_log(local_logger, master_logger, message, 'warning')\n model.set_state_dict(model_state)\n lr_scheduler.step(last_epoch + 1)\n\n # STEP 8: Enable model data parallelism on multi processes\n model = paddle.DataParallel(model)\n\n # STEP 9: (Optional) Run evaluation and return\n if config.EVAL:\n write_log(local_logger, master_logger, \"----- Start Validation\")\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batches=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_message = (\"----- Validation: \" +\n f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n master_message = (\"----- Validation: \" +\n f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n return\n\n # STEP 10: Run training\n write_log(local_logger, master_logger, f\"----- Start training from epoch {last_epoch+1}.\")\n for epoch in range(last_epoch + 1, config.TRAIN.NUM_EPOCHS + 1):\n # Train one epoch\n write_log(local_logger, master_logger, f\"Train epoch {epoch}. LR={optimizer.get_lr():.6e}\")\n train_loss, train_acc, avg_loss, avg_acc, train_time = train(\n dataloader=dataloader_train,\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n epoch=epoch,\n total_epochs=config.TRAIN.NUM_EPOCHS,\n total_batches=total_batch_train,\n debug_steps=config.REPORT_FREQ,\n accum_iter=config.TRAIN.ACCUM_ITER,\n model_ema=model_ema,\n mixup_fn=mixup_fn,\n amp_grad_scaler=amp_grad_scaler,\n local_logger=local_logger,\n master_logger=master_logger)\n\n # update lr\n lr_scheduler.step()\n\n general_message = (f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \"\n f\"Lr: {optimizer.get_lr():.4f}, \"\n f\"time: {train_time:.2f}, \")\n local_message = (general_message +\n f\"Train Loss: {train_loss:.4f}, \"\n f\"Train Acc: {train_acc:.4f}\")\n master_message = (general_message +\n f\"Train Loss: {avg_loss:.4f}, \"\n f\"Train Acc: {avg_acc:.4f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n\n # Evaluation (optional)\n if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n write_log(local_logger, master_logger, f'----- Validation after Epoch: {epoch}')\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batches=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_message = (f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n master_message = (f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n write_log(local_logger, master_logger, local_message, master_message)\n\n # Save model weights and training status\n if local_rank == 0:\n if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n model_path = os.path.join(\n config.SAVE, f\"Epoch-{epoch}-Loss-{avg_loss}.pdparams\")\n state_dict = dict()\n state_dict['model'] = model.state_dict()\n if model_ema is not None:\n state_dict['model_ema'] = model_ema.state_dict()\n state_dict['optimizer'] = optimizer.state_dict()\n state_dict['epoch'] = epoch\n if lr_scheduler is not None:\n state_dict['lr_scheduler'] = lr_scheduler.state_dict()\n if amp_grad_scaler is not None:\n state_dict['amp_grad_scaler'] = amp_grad_scaler.state_dict()\n paddle.save(state_dict, model_path)\n message = (f\"----- Save model: {model_path}\")\n write_log(local_logger, master_logger, message)\n\n\ndef main():\n # config is updated in order: (1) default in config.py, (2) yaml file, (3) arguments\n config = update_config(get_config(), get_arguments())\n\n # set output folder\n config.SAVE = os.path.join(config.SAVE,\n f\"{'eval' if config.EVAL else 'train'}-{time.strftime('%Y%m%d-%H-%M')}\")\n if not os.path.exists(config.SAVE):\n os.makedirs(config.SAVE, exist_ok=True)\n\n # get train dataset if in train mode and val dataset\n dataset_train = get_dataset(config, is_train=True) if not config.EVAL else None\n dataset_val = get_dataset(config, is_train=False)\n\n # dist spawn lunch: use CUDA_VISIBLE_DEVICES to set available gpus\n paddle.distributed.spawn(main_worker, args=(config, dataset_train, dataset_val))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.randn",
"numpy.allclose",
"torch.device",
"torch.Tensor"
],
[
"numpy.random.seed"
]
] |
Priyansh2/csnli | [
"de31f3f5ae0a956496b76a4643fa9ce7f3736d29"
] | [
"nmt/onmt/translate/Translator.py"
] | [
"import torch\nfrom torch.autograd import Variable\n\nimport onmt.translate.Beam\nimport onmt.io\n\n\nclass Translator(object):\n \"\"\"\n Uses a model to translate a batch of sentences.\n\n\n Args:\n model (:obj:`onmt.modules.NMTModel`):\n NMT model to use for translation\n fields (dict of Fields): data fields\n beam_size (int): size of beam to use\n n_best (int): number of translations produced\n max_length (int): maximum length output to produce\n global_scores (:obj:`GlobalScorer`):\n object to rescore final translations\n copy_attn (bool): use copy attention during translation\n cuda (bool): use cuda\n beam_trace (bool): trace beam search for debugging\n \"\"\"\n def __init__(self, model, fields,\n beam_size, n_best=1,\n max_length=100,\n global_scorer=None,\n copy_attn=False,\n cuda=False,\n beam_trace=False,\n min_length=0,\n stepwise_penalty=False):\n self.model = model\n self.fields = fields\n self.n_best = n_best\n self.max_length = max_length\n self.global_scorer = global_scorer\n self.copy_attn = copy_attn\n self.beam_size = beam_size\n self.cuda = cuda\n self.min_length = min_length\n self.stepwise_penalty = stepwise_penalty\n\n # for debugging\n self.beam_accum = None\n if beam_trace:\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n\n def translate_batch(self, batch, data):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n data (:obj:`Dataset`): the dataset object\n\n\n Todo:\n Shouldn't need the original dataset.\n \"\"\"\n\n # (0) Prep each of the components of the search.\n # And helper method for reducing verbosity.\n beam_size = self.beam_size\n batch_size = batch.batch_size\n data_type = data.data_type\n vocab = self.fields[\"tgt\"].vocab\n beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,\n cuda=self.cuda,\n global_scorer=self.global_scorer,\n pad=vocab.stoi[onmt.io.PAD_WORD],\n eos=vocab.stoi[onmt.io.EOS_WORD],\n bos=vocab.stoi[onmt.io.BOS_WORD],\n min_length=self.min_length,\n stepwise_penalty=self.stepwise_penalty)\n for __ in range(batch_size)]\n\n # Help functions for working with beams and batches\n def var(a): return Variable(a, volatile=True)\n\n def rvar(a): return var(a.repeat(1, beam_size, 1))\n\n def bottle(m):\n return m.view(batch_size * beam_size, -1)\n\n def unbottle(m):\n return m.view(beam_size, batch_size, -1)\n\n # (1) Run the encoder on the src.\n src = onmt.io.make_features(batch, 'src', data_type)\n src_lengths = None\n if data_type == 'text':\n _, src_lengths = batch.src\n\n enc_states, memory_bank = self.model.encoder(src, src_lengths)\n dec_states = self.model.decoder.init_decoder_state(\n src, memory_bank, enc_states)\n\n if src_lengths is None:\n src_lengths = torch.Tensor(batch_size).type_as(memory_bank.data)\\\n .long()\\\n .fill_(memory_bank.size(0))\n\n # (2) Repeat src objects `beam_size` times.\n src_map = rvar(batch.src_map.data) \\\n if data_type == 'text' and self.copy_attn else None\n memory_bank = rvar(memory_bank.data)\n memory_lengths = src_lengths.repeat(beam_size)\n dec_states.repeat_beam_size_times(beam_size)\n\n # (3) run the decoder to generate sentences, using beam search.\n for i in range(self.max_length):\n if all((b.done() for b in beam)):\n break\n\n # Construct batch x beam_size nxt words.\n # Get all the pending current beam words and arrange for forward.\n inp = var(torch.stack([b.get_current_state() for b in beam])\n .t().contiguous().view(1, -1))\n\n # Turn any copied words to UNKs\n # 0 is unk\n if self.copy_attn:\n inp = inp.masked_fill(\n inp.gt(len(self.fields[\"tgt\"].vocab) - 1), 0)\n\n # Temporary kludge solution to handle changed dim expectation\n # in the decoder\n inp = inp.unsqueeze(2)\n\n # Run one step.\n dec_out, dec_states, attn = self.model.decoder(\n inp, memory_bank, dec_states, memory_lengths=memory_lengths)\n dec_out = dec_out.squeeze(0)\n # dec_out: beam x rnn_size\n\n # (b) Compute a vector of batch x beam word scores.\n if not self.copy_attn:\n out = self.model.generator.forward(dec_out).data\n out = unbottle(out)\n # beam x tgt_vocab\n beam_attn = unbottle(attn[\"std\"])\n else:\n out = self.model.generator.forward(dec_out,\n attn[\"copy\"].squeeze(0),\n src_map)\n # beam x (tgt_vocab + extra_vocab)\n out = data.collapse_copy_scores(\n unbottle(out.data),\n batch, self.fields[\"tgt\"].vocab, data.src_vocabs)\n # beam x tgt_vocab\n out = out.log()\n beam_attn = unbottle(attn[\"copy\"])\n # (c) Advance each beam.\n for j, b in enumerate(beam):\n b.advance(out[:, j],\n beam_attn.data[:, j, :memory_lengths[j]])\n dec_states.beam_update(j, b.get_current_origin(), beam_size)\n\n # (4) Extract sentences from beam.\n ret = self._from_beam(beam)\n ret[\"gold_score\"] = [0] * batch_size\n if \"tgt\" in batch.__dict__:\n ret[\"gold_score\"] = self._run_target(batch, data)\n ret[\"batch\"] = batch\n return ret\n\n def _from_beam(self, beam):\n ret = {\"predictions\": [],\n \"scores\": [],\n \"attention\": []}\n for b in beam:\n n_best = self.n_best\n scores, ks = b.sort_finished(minimum=n_best)\n hyps, attn = [], []\n for i, (times, k) in enumerate(ks[:n_best]):\n hyp, att = b.get_hyp(times, k)\n hyps.append(hyp)\n attn.append(att)\n ret[\"predictions\"].append(hyps)\n ret[\"scores\"].append(scores)\n ret[\"attention\"].append(attn)\n return ret\n\n def _run_target(self, batch, data):\n data_type = data.data_type\n if data_type == 'text':\n _, src_lengths = batch.src\n else:\n src_lengths = None\n src = onmt.io.make_features(batch, 'src', data_type)\n tgt_in = onmt.io.make_features(batch, 'tgt')[:-1]\n\n # (1) run the encoder on the src\n enc_states, memory_bank = self.model.encoder(src, src_lengths)\n dec_states = \\\n self.model.decoder.init_decoder_state(src, memory_bank, enc_states)\n\n # (2) if a target is specified, compute the 'goldScore'\n # (i.e. log likelihood) of the target under the model\n tt = torch.cuda if self.cuda else torch\n gold_scores = tt.FloatTensor(batch.batch_size).fill_(0)\n dec_out, dec_states, attn = self.model.decoder(\n tgt_in, memory_bank, dec_states, memory_lengths=src_lengths)\n\n tgt_pad = self.fields[\"tgt\"].vocab.stoi[onmt.io.PAD_WORD]\n for dec, tgt in zip(dec_out, batch.tgt[1:].data):\n # Log prob of each word.\n out = self.model.generator.forward(dec)\n tgt = tgt.unsqueeze(1)\n scores = out.data.gather(1, tgt)\n scores.masked_fill_(tgt.eq(tgt_pad), 0)\n gold_scores += scores\n return gold_scores\n"
] | [
[
"torch.autograd.Variable",
"torch.Tensor"
]
] |
Erotemic/vtool_ibeis | [
"b5dfd5bec43dacc8ea9fc3d6a7f17cd661b678c5"
] | [
"tests/test_coverage_max_reduce.py"
] | [
"#Is it possible to use numpy.ufunc.reduce over an iterator of ndarrays?\n\n#I have a generator function that yields ndarrays (all of the same shape and dtype) and I would like to find the maximum value at each index.\n\n#Currently I have code that looks like this:\n\n\ndef main():\n import numpy as np\n import cv2\n\n shape = (250, 300)\n dsize = shape[::-1]\n\n affmat_list = np.array([\n [[ 1.57351554e+00, 0.00000000e+00, 1.09061039e+02],\n [ -3.61827926e-01, 7.46059970e-01, 2.50669551e+01]],\n [[ 3.05754491e+00, 0.00000000e+00, 8.28024922e+01],\n [ -2.13866309e-01, 1.72124200e+00, 1.72744669e+02]],\n [[ 2.58008254e+00, 0.00000000e+00, 1.52155447e+02],\n [ -2.08041241e+00, 2.46195663e+00, 1.09493821e+02]],\n [[ 2.01791864e+00, 0.00000000e+00, 2.45704669e+02],\n [ -1.07590956e+00, 3.33499949e+00, 1.66233498e+02]],\n [[ 3.32012638e+00, 0.00000000e+00, 1.03847866e+02],\n [ -2.36557589e+00, 3.02063109e+00, 1.59907802e+02]],\n [[ 4.94371474e+00, 0.00000000e+00, 7.92717193e+01],\n [ -2.67846198e+00, 3.66854256e+00, 1.47888210e+02]]])\n\n fx2_score = np.ones(len(affmat_list))\n\n patch = np.array([\n [ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014],\n [ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],\n [ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],\n [ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],\n [ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],\n [ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],\n [ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],\n [ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],\n [ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],\n [ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0038, 0.0038, 0.0039, 0.0038, 0.0038, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],\n [ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],\n [ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],\n [ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],\n [ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],\n [ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],\n [ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],\n [ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],\n [ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],\n [ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014]\n ])\n\n def warped_patch_generator():\n padded_patch = np.zeros(shape, dtype=np.float32)\n patch_h, patch_w = patch.shape\n warped = np.zeros(shape, dtype=np.float32)\n for count, (M, score) in enumerate(zip(affmat_list, fx2_score)):\n print(count)\n np.multiply(patch, score, out=padded_patch[:patch.shape[0], :patch.shape[1]] )\n cv2.warpAffine(padded_patch, M, dsize, dst=warped,\n flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,\n borderValue=0)\n yield warped\n #yield warped\n\n print(\"THREE\")\n from six.moves import reduce\n import functools\n dstimg3 = np.zeros(shape, dtype=np.float32)\n maximum_partial = functools.partial(np.maximum, out=dstimg3)\n dstimg3 = reduce(maximum_partial, warped_patch_generator())\n\n print(\"ONE\")\n dstimg1 = np.zeros(shape, dtype=np.float32)\n print(\"ONE\")\n for warped in warped_patch_generator():\n #dstimg1 = np.maximum(dstimg1, warped)\n np.maximum(dstimg1, warped, out=dstimg1)\n\n print(\"FOUR\")\n input_copy_ = np.array([w.copy() for w in warped_patch_generator()])\n dstimg4 = input_copy_.max(0)\n\n print(\"TWO\")\n dstimg2 = np.zeros(shape, dtype=np.float32)\n input_iter_ = list((w for w in warped_patch_generator()))\n np.maximum.reduce(input_iter_, axis=0, dtype=np.float32, out=dstimg2)\n\n x = np.where(dstimg1.ravel() != dstimg2.ravel())[0]\n print(dstimg2.take(x))\n print(dstimg1.take(x))\n np.allclose(dstimg1, dstimg2)\n\n import matplotlib.pyplot as plt\n plt.figure(1)\n plt.subplot(221)\n plt.imshow(dstimg1)\n plt.subplot(222)\n plt.imshow(dstimg2)\n plt.subplot(223)\n plt.imshow(dstimg3)\n plt.subplot(224)\n plt.imshow(dstimg4)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n#I would have thought that I would be allowed to write something like this:\n# dstimg = np.maximum.reduce(warped_patch_generator())\n"
] | [
[
"numpy.allclose",
"numpy.multiply",
"numpy.zeros",
"numpy.maximum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.maximum.reduce",
"numpy.array"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.